# ConferenceWorkshop.bib


AUTHOR = {J. Delgado and L. Fong and Y. Liu and N. Bobroff and S. Seelam and {\bf S. Masoud Sadjadi}},
TITLE = {Efficiency Assessment of Parallel Workloads on Virtualized Resources},
YEAR = {2011},
MONTH = {December},
BOOKTITLE = {Proceedings of the 4th IEEE/ACM International Conference on Utility and Cloud Computing (UCC 2011)},
ABSTRACT = {In cloud computing, virtual containers on physical resources are provisioned to requesting users. Resource providers may pack as many containers as possible onto each of their physical machines, or may pack specific types and quantities of virtual containers based on user or system QoS objectives. Such elastic provisioning schemes for resource sharing may present major challenges to scientific parallel applications that require task synchronization during execution. Such elastic schemes may also inadvertently lower utilization of computing resources. In this paper, we describe the elasticity constraint effect and ripple effect that cause a negative impact to application response time and system utilization. We quantify the impact using real workload traces through simulation. Then, we demonstrate that some resource scheduling techniques can be effective in mitigating the impacts. We find that a tradeoff is needed among the elasticity of virtual containers, the complexity of scheduling algorithms, and the response time of applications.},
KEYWORDS = {Efficiency Assessment, Parallel Workloads, Virtual Resources.},
}

@INPROCEEDINGS{DOA-SVI-Patterns,
AUTHOR = {Ingrid Buckley and Eduardo B. Fernandez and Marco Anisetti and Claudio A. Ardagna and
TITLE = {Towards Pattern-based Reliability Certication of Services},
YEAR = {2011},
MONTH = {October},
BOOKTITLE = {Proceedings of the International Symposium on Distributed Objects and Applications,
first International Symposium on Secure Virtual Infrastructures (DOA-SVI'11)},
ABSTRACT = {},
KEYWORDS = {},
PDF = {DOA-SVI-2011-Security-Patterns.pdf},
}

@INPROCEEDINGS{Euro-Par-2011-Deva,
TITLE = {DEVA: Distributed Ensembles of Virtual Appliances in the Cloud},
YEAR = {2011},
MONTH = {August},
BOOKTITLE = {Proceedings of the 17th Euro-Par Conference (Euro-Par 2011)},
PAGES = {467--478},
NOTE = {Part I},
ABSTRACT = {Low upfront costs, rapid deployment of infrastructure and
flexible management of resources has resulted in the quick adoption of
cloud computing. Nowadays, different types of applications in areas such
as enterprise web, virtual labs and high-performance computing are al-
ready being deployed in private and public clouds. However, one of the
remaining challenges is how to allow users to specify Quality of Service
(QoS) requirements for composite groups of virtual machines and en-
force them effectively across the deployed resources. In this paper, we
propose an Infrastructure as a Service resource manager capable of al-
locating Distributed Ensembles of Virtual Appliances (DEVAs) in the
Cloud. DEVAs are groups of virtual machines and their network connec-
tivities instantiated on heterogeneous shared resources with QoS speci-
fications for individual entities as well as their connections. We discuss
the different stages in their lifecycle: declaration, scheduling, provision-
ing and dynamic management, and show how this approach can be used
to maintain QoS for complex deployments of virtual resources.},
KEYWORDS = {DEVA, Cloud Computing, Virtual Appliances},
NOTE = {(acceptance rate 29.9%)},
PDF = {Euro-Par-2011-Deva.pdf},
}

@INPROCEEDINGS{SEKE-2011-Deva-Non-Funcational-Reqs,
TITLE = {Mapping Non-Functional Requirements to Cloud Applications},
YEAR = {2011},
MONTH = {July},
BOOKTITLE = {Proceedings of the 2011 International Conference on Software Engineering and Knowledge Engineering (SEKE 2011)},
ABSTRACT = {Cloud computing represents a solution for applications
with high scalability needs where usage patterns, and
therefore resource requirements, may fluctuate based on external
circumstances such as exposure or trending. However, in order
to take advantage of the cloud’s benefits, software engineers
need to be able to express the application’s needs in quantifiable
terms. Additionally, cloud providers have to understand
such requirements and offer methods to acquire the necessary
infrastructure to fulfill the users’ expectations. In this paper, we
discuss the design and implementation of an Infrastructure as a
Service cloud manager such that non-functional requirements
determined during the requirements analysis phase can be
mapped to properties for a group of Virtual Appliances running
the application. The discussed management system ensures that
expected Quality of Service is maintained during execution and
can be considered during different development phases.},
KEYWORDS = {DEVA, Could Computing, Non-Functional Requirements, Software Engineering},
NOTE = {(acceptance rate 31%)},
PDF = {SEKE-2011-Deva-Non-Funcational-Reqs.pdf},
}

@INPROCEEDINGS{SEKE-2011-Deva-MetaModel,
TITLE = {A Metamodel for Distributed Ensembles of Virtual Appliances},
YEAR = {2011},
MONTH = {July},
BOOKTITLE = {Proceedings of the 2011 International Conference on Software Engineering and Knowledge Engineering (SEKE 2011)},
ABSTRACT = {We present our work on modeling distributed ensembles
of virtual appliances (DEVAs) on Infrastructure as a
Service (IaaS) clouds. Designing solutions on IaaS providers
require a good understanding of the underlying details such
as the software installation or the network configuration. We
propose the use of DEVAs, a modeling approach built on top
of the notion of virtual appliances, that allows easy-to-compose
and ready-to-use cloud application architectures that are IaaSagnostic,
and that abstract away unnecessary details for web
application developers. In this paper, we extend the definition
of a DEVA from previous work by presenting an underlying
metamodel and how that metamodel can be transformed to
an actual deployment. We also present a case study where we
model a web application architecture and we discuss how we can
instantiate it in an IaaS cloud. We argue that the DEVA modeling
approach is suitable for typical cloud use cases.},
KEYWORDS = {DEVA, Could Computing, Meta Model, Software Engineering},
NOTE = {(acceptance rate 31%)},
PDF = {SEKE-2011-Deva-MetaModel.pdf},
}

@INPROCEEDINGS{SPLASH-2010-VirtualLabs,
AUTHOR = {Xabriel J Collazo-Mojica and {\bf S. Masoud Sadjadi} and Fabio Kon and Dilma Da Silva},
TITLE = {Virtual Environments: Easy Modeling of Interdependent Virtual Appliances in the Cloud},
YEAR = {2010},
MONTH = {October},
BOOKTITLE = {Proceedings of the SPLASH 2010 Workshop on Flexible Modeling Tools (SPLASH 2010)},
ABSTRACT = {We present our ideas for modeling groups of interdependent virtual machines in the cloud. We call these models virtual environments. This abstraction is built on top of virtual ap- pliances and the services they provide. We discuss previous attempts in this domain and present our motivations for working on an uncomplicated model for non-expert users of cloud computing such as Web developers and CS stu- dents. Visual and internal representations of the model are presented. Early work on a prototype implementation is de- scribed. We argue that easier to use models such as ours are needed for today’s and tomorrow’s distributed applications.},
KEYWORDS = {virtual environment, virtual appliance, flexible modeling, cloud computing.},
PDF = {FlexiTools-SPLASH2010 Virtual Labs.pdf},
}

@INPROCEEDINGS{SEKE-2010-DAGMAN,
AUTHOR = {Selim Kalayci and Gargi Dasgupta and Liana Fong and Onyeka Ezenwoye and {\bf S. Masoud Sadjadi}},
TITLE = {Distributed and Adaptive Execution of {Condor} {DAGMan} Workflows},
YEAR = {2010},
MONTH = {July},
ADDRESS = {San Francisco Bay, CA},
BOOKTITLE = {Proceedings of the 22nd International Conference on Software Engineering and Knowledge Engineering (SEKE 2010)},
ABSTRACT = {Large-scale applications, in the form of workflows, may require the coordinated usage of resources spreading across multiple administrative domains. Scalable solutions need a decentralized approach to coordinate the execution of such workflows. At runtime, adjustments to the workflow execution plan may be required to meet Quality of Service objectives. In this paper, we provide a decentralized execution approach to large-scale workflows on different resource domains. We also provide a low overhead, decentralized runtime adaptation mechanism to improve the performance of the system. Our prototype implementation is based on standard Condor DAGMan workflow execution engine and does not require any modifications to Condor or its underlying system.},
KEYWORDS = {Application workflow, resource domain, execution, decentralization, distributed system.},
PDF = {SEKE-2010-DAGMan Workflows.pdf},
}

@INPROCEEDINGS{e-Science-2010-GPU-CUDA,
TITLE = {An Incremental Approach to Porting Complex Scientific Applications to {GPU}/{CUDA}},
YEAR = {2010},
MONTH = {July},
BOOKTITLE = {Proceedings of the IV Brazilian e-{Science} Workshop},
ABSTRACT = {This paper proposes and describes a developed methodology to port
complex scientific applications originally written in FORTRAN to the nVidia
CUDA. The process was developed and validated by porting an existing
FORTRAN weather and forecasting algorithm to a GPU parallel paradigm.
We believe that the proposed porting methodology described can be
successfully utilized in several other existing scientific applications.},
PDF = {EScience2010.pdf},
}

@INPROCEEDINGS{IPDPS-2010-WRF,
TITLE = {Performance Prediction of Weather Forecasting Software on Multicore Systems},
YEAR = {2010},
MONTH = {April},
BOOKTITLE = {Proceedings of the 24th IEEE International Parallel & Distributed Processing Symposium (IPDPS-2010), 11th Parallel and Distributed Scientific and Engineering Computing (PDSEC) workshop},
}

@INPROCEEDINGS{WEBIST-2010-Data-Intensive,
AUTHOR = {Onyeka Ezenwoye and Salome Busi and {\bf S. Masoud Sadjadi}},
TITLE = {Dynamically Reconfigurable Data-Intensive Service Composition},
YEAR = {2010},
MONTH = {April},
BOOKTITLE = {Proceedings of the 6th International Conference on Web Information Systems and Technologies (WEBIST 2010)},
ABSTRACT = {The distributed nature of services poses significant challenges to building robust service-based applications. A
major aspect of this challenge is finding a model of service integration that promotes ease of dynamic reconfiguration,
in response to internal and external stimuli. Centralized models of composition are not conducive
for data-intensive applications such as those in the scientific domain. Decentralized compositions are more
complicated to manage especially since no service has a global view of the interaction. In this paper we identify
the requirements for dynamic reconfiguration of data-intensive composite services. A hybrid composition
model that combines the attributes of centralization and decentralization is proposed. We argue that this model
promotes dynamic reconfiguration of data-intensive service compositions.
},
KEYWORDS = {Service Composition Models, Scientific Workflow, Adpaptability, Dynamic Reconfiguration, Choreography, Orchestration.},
PDF = {WEBIST-2010-Ezenwoye.pdf},
}

AUTHOR = {Onyeka Ezenwoye and Balaji Viswanathan and {\bf S. Masoud Sadjadi} and Liana Fong and Gargi Dasgupta and Selim Kalayci},
Workflows for Distributed Environments},
YEAR = {2009},
MONTH = {July},
BOOKTITLE = {Proceedings of the 21st International Conference on Software Engineering and Knowledge Engineering (SEKE 2009)},
ABSTRACT = {Scientific workflows are often composed by scientists
that are not particularly familiar with performance and faulttolerance
issues of the underlying layer. The inherent nature
of the infrastructure and environment for scientific workflow
applications means that the movement of data comes with reliability
challenges. Improving the reliablility scientific workflows in
distributed environments, calls for the decoupling of data staging
and computation activities, and each aspect needs to be addressed
separately
In this paper, we present an approach to managing scientific
workflows that specifically provides constructs for reliable data
staging. In our framework, data staging tasks are automatically
separated from computation tasks in the definition of the workflow.
High-level policies can be provided that allow for dynamic
adaptation of the workflow to occur. Our approach permits
the separate specification of the functional and non-functional
requirements of the application and is dynamic enough to allow
for the alteration of the workflow at runtime for optimization.
},
KEYWORDS = {Data Staging, Scientific Workflow, and Distributed Systems.},
PAGES = {16--19},
}

@INPROCEEDINGS{SEKE-2009-WS-Reliability,
AUTHOR = {Ingrid Buckley and Eduardo B. Fernandez and Gustavo Rossi and {\bf S. Masoud Sadjadi}},
TITLE = {Web Services Reliability Patterns},
YEAR = {2009},
MONTH = {July},
BOOKTITLE = {Proceedings of the 21st International Conference on Software Engineering and Knowledge Engineering (SEKE 2009)},
ABSTRACT = {Due to the widespread use of web services by enterprises, the need to ensure their reliability has become crucial. There are several standards that intend to govern how web services are designed and implemented, including protocols to which they must adhere. These standards include the WS-Reliability and WS-Reliable Messaging standards that define rules for reliable messaging. We present here patterns for these standards which define how to achieve reliable messaging between entities. We compare their features and use.
},
KEYWORDS = {Web Services, Reliability, and Patterns.},
PAGES = {4--9},
PDF = {WS-Reliability-SEKE-2009.pdf},
}

@INPROCEEDINGS{TG-2009-GCB,
AUTHOR = {{\bf S. Masoud Sadjadi} and Sandie Kappes and Laura F. McGinnis},
TITLE = {Grid Enablement of Scientific Applications on TeraGrid},
YEAR = {2009},
MONTH = {June},
PDF = {GCB-TeraGrid-2009.pdf},
BOOKTITLE = {Proceedings of the TeraGrid 2009 Conference},
ABSTRACT = {The lack of access to sufficient computational, storage, and networking resources in the past three years has proven to be the major hurdle in the rate of discovery for our GCB research projects. The TeraGrid Pathway Fellowship Program has helped us address this problem. In this presentation, we will show how this program has helped us enhance the syllabus and contents of the GCB course with the existing TeraGrid educational and training materials (e.g., the CI Tutor) so that the students taking the GCB course become able to utilize the TeraGrid resources to accelerate the rate of their findings and to be able to submit their research papers for publication within the two semesters of the GCB program.
},
KEYWORDS = {Global CyberBridges, TeraGrid, and High-Performance Computing.},
}

@INPROCEEDINGS{LSAP-2009-Grid-Meta-Broker,
AUTHOR = {Yanbin Liu and David Villegas and Norman Bobroff and
Liana Fong and Ivan Rodero and Seetharami Seelam and {\bf S. Masoud Sadjadi}},
TITLE = {An Experimental System for Grid Meta-Broker Evaluation},
YEAR = {2009},
MONTH = {June},
PAGES = {11--18},
BOOKTITLE = {Proceedings of the ACM Large-scale System and Application Performance workshop
(LSAP2009) of the International Symposium on High Performance Distributed Computing (HPDC 2009)},
ABSTRACT = {Grid meta-broker is a key enabler in realizing the full potential of
inter-operating grid computing systems. A challenge to properly
evaluate the effectiveness of meta-brokers is the complexity of
developing a realistic grid experimental environment. In this
paper, this challenge is addressed by a unique combination of two
approaches: using compressed workload traces to demonstrate the
resource matching and scheduling functions of the meta-broker,
and using emulation to provide a flexible and scalable modeling
and management for local resources of a grid environment. Real
workload traces are compressed while preserving their key
workload characteristics to allow exploration of various
dimensions of meta-broker functions in reasonable time.
Evaluation of round-robin, queue-length, and utilization based
meta-broker scheduling algorithms shows that they have different
},
KEYWORDS = {Grid Computing, Meta-Broker, Job Scheduling, and Experimental Evaluation.},
PDF = {GridMetaBroker-HPDC-2009.pdf},
}

@INPROCEEDINGS{ICAC-2009-VM,
AUTHOR = {Juan C. Martinez and Lixi Wang and Ming Zhao and {\bf S. Masoud Sadjadi}},
TITLE = {Experimental Study of Large-scale Computing on Virtualized Resources},
YEAR = {2009},
MONTH = {June},
PAGES = {35--41},
BOOKTITLE = {Proceedings of the 3rd International Workshop on Virtualization
Technologies in Distributed Computing (VTDC 2009) of the IEEE/ACM 6th International
Conference on Autonomic Computing and Communications (ICAC-2009)},
ABSTRACT = {Parallel applications have a pressing need for the utilization of more and more resources to meet user performance expectations. Unfortunately, these resources are not necessarily available within one single domain. Grid computing provides a solution to scaling out from a single domain; however, it also brings another problem for some applications: resource heterogeneity. Since some applications require having homogeneous resources for their execution, virtualizing the resources is a noble and viable solution.
In this paper, we present two parallel applications, namely WRF and mpiBLAST and report the results of different runs scaling them out from 2 to 128 virtual nodes. Later, we analyze the effects of scaling out based on the application’s communication behavior.
},
KEYWORDS = {Large Scale Computing, Virtualized Resources, and Experimental Study.},
PDF = {LargeScaleComputing-ICAC-2009.pdf},
}

@INPROCEEDINGS{WCCCE-2009-SAGE,
AUTHOR = {Javier Delgado and Mark Joselli and Silvio Stanzani and
TITLE = {A Learning and Collaboration Platform Based on {SAGE}},
YEAR = {2009},
MONTH = {May},
PAGES = {70--76},
BOOKTITLE = {Proceedings of the ACM 14th Western Canadian Conference on Computing Education (WCCCE 2009)},
ABSTRACT = {In this paper, we describe the use of a tiled-display wall platform for use as a general purpose collaboration and learning platform. The main scenario of emphasis for this work is online learning by users in different countries. We describe the general efficacy of this platform for our purposes and describe its shortcomings for this purpose empirically. We discuss its advantages and also the shortcomings that we found. We also describe an enhancement made to make it more viable for our target usage scenario by implementing an interface for a modern human interface device.
},
KEYWORDS = {Cyberinfrastructure, interdisciplinary, collaboration, e-learning.},
PDF = {SAGE-WCCCE-2009.pdf},
}

@INPROCEEDINGS{PIRE-First-Year-Tapia-2009,
AUTHOR = {{\bf S. Masoud Sadjadi} and Shu-Ching Chen and Borko Furht and
Pete Martinez and Scott Graham and Steve Luis and Juan Caraballo and Yi Deng},
TITLE = {{PIRE}: A Global Living Laboratory for Cyberinfrastructure Application Enablement},
MONTH = {April},
YEAR = {2009},
PAGES = {64--69},
BOOKTITLE = {Proceedings of the ACM Tapia Celebration of Diversity in Computing 2009 (Tapia'09)},
ABSTRACT = {This Partnership for International Research and Education (PIRE) is a 5-year long project funded by the National Science Foundation that aims to provide 196 international research and training experiences to its participants by leveraging the established programs, resources, and community of the Latin American Grid (LA Grid, an international academic and industry partnership designed to promote research, education and workforce development at major institutions in the USA, Mexico, Argentina, Spain, and other locations around the world). In return, PIRE will take LA Grid to the next level of research and education excellence. Top students, particularly underrepresented minorities, are engaged and each participant will receive multiple perspectives in each of three different aspects of collaboration as they work with (1) local and international researchers, in (2) academic and industrial research labs, and on (3) basic and applied research projects. PIRE participants will engage not only in computer science research topics focused on transparent cyberinfrastructure enablement, but will also be exposed to challenging scientific areas of national importance such as meteorology, bioinformatics, and healthcare. During the first year of this project, 18 students out of a pool of 68 applicants were selected; they participated in complementary PIRE research projects, visited 7 international institutions (spanning 5 countries and 4 continents), and published 9 papers.},
KEYWORD = {Cyberinfrastructure, university/industry partnership, IT workforce development.},
PDF = {PIRE-TAPIA-2009.pdf},
}

@INPROCEEDINGS{REU-Third-Year-Tapia-2009,
AUTHOR = {Masoud Milani and {\bf S. Masoud Sadjadi} and Raju Rangaswami and Peter Clarke and Tao Li},
TITLE = {Research Experiences for Undergraduates: Autonomic Computing Research at FIU},
MONTH = {April},
YEAR = {2009},
PAGES = {93--97},
BOOKTITLE = {Proceedings of the ACM Tapia Celebration of Diversity in Computing 2009 (Tapia'09)},
ABSTRACT = {According to Computing Research Association, between 2003
and 2007 each year fewer than 3% of the US’s Ph.D.s graduates
in computer science and computer engineering are Hispanic or
African American and fewer than 20% are women. Such an
under-representation not only compromises the competitiveness of
the US economy, but also deepens the divide and imbalances in
our society. It is therefore imperative that undergraduate
institutions introduce students to graduate school at an early stage
of their academic careers and to provide them with the tools
necessary for the successful conduct of research in graduate
programs. The School of Computing and Information Sciences
(SCIS) at Florida International University (FIU) has been working
to strengthen the pipeline of underrepresented students to
graduate work in computer science by hosting an NSF Research
Experiences for Undergraduates (REU) site for the last three
years. Our REU site provided this opportunity to 30
undergraduate students, 23 of them were underrepresented
including 7 females, 16 Hispanics, and 4 African Americans, who
published 13 technical papers. Six of ten students who have
KEYWORD = {Research Experiences for Undergraduates, REU Site, Autonomic Computing, Underrepresented Students.},
PDF = {REU-TAPIA-2009.pdf},
}

@INPROCEEDINGS{JobFlowPatterns-ICSOC-2008,
AUTHOR = {Selim Kalayci and Onyeka Ezenwoye and Balaji Viswanathan and
TITLE = {Design and Implementation of a Fault Tolerant Job Flow
Manager Using Job Flow Patterns and Recovery Policies},
MONTH = {December},
YEAR = {2008 },
BOOKTITLE = {Proceedings of the 6th International Conference on Service Oriented Computing (ICSOC'08)},
PAGES = {54--69},
VOLUME = {5364/2008},
PUBLISHER = {Springer Berlin / Heidelberg},
DOI = {10.1007/978-3-540-89652-4_8},
ISBN = {978-3-540-89647-0},
ABSTRACT = {Nowadays, many grid applications are developed as job flows that are composed of multiple jobs.  The execution of job flows requires the support of a job flow manager and a job scheduler. Due to the long running nature of job flows, the support for fault tolerance and recovery policies is especially important, and yet complicated due to the sequencing and dependency of jobs within a flow, and the  required coordination between workflow engines and job schedulers.  In this paper, we describe the design and implementation of a job flow manager that supports fault tolerance.  First, we identify and label job flow patterns within a job flow during deployment time. Next, at run time, we introduce a proxy that intercepts and resolves faults using job flow patterns and their corresponding fault recovery policies.  Our design has the advantages of separation of job flow and fault handling logic, requiring no manipulation at the modeling time, and flexibility in fault resolution at run time.  We validate our design by a prototypical implementation based on the ActiveBPEL workflow engine and GridWay Metascheduler, and Montage application as the case study.},
KEYWORD = {job flow, job flow patterns, fault tolerant patterns, recovery policies, workflow engine, job scheduler, transparent proxy, meta-scheduler.},
NOTE = {(acceptance rate 20.4%).},
PDF = {ICSOC-2008-JFM.pdf},
}

@INPROCEEDINGS{JobFlowPatterns-SEKE-2008,
AUTHOR = {Gargi Dasgupta1 and Onyeka Ezenwoye and Liana Fong and Selim Kalayci and
TITLE = {Design of a Fault-Tolerant Job-Flow Manager for Grid Environments
Using Standard Technologies, Job-Flow Patterns, and a Transparent Proxy},
MONTH = {July},
YEAR = {2008 },
ADDRESS = {San Francisco Bay, USA},
BOOKTITLE = {Proceedings of the 20th International Conference on Software Engineering and Knowledge Engineering (SEKE'2008)},
PAGES = {814--819},
ABSTRACT = {The execution of job flow applications is a reality today in academic and industrial domains. Current approaches to execution of job flows often follow proprietary solutions on expressing the job flows and do not leverage recurrent job-flow patterns to address faults in Grid computing environments. In this paper, we provide a design solution to development of job-flow managers that uses standard technologies such as BPEL and JSDL to express job flows and employs a two-layer peer-to-peer architecture with interoperable protocols for cross-domain interactions among job-flow mangers. In addition, we identify a number of recurring job-flow patterns and introduce their corresponding fault-tolerant patterns to address runtime faults and exceptions. Finally, to keep the business logic of job flows separate from their fault-tolerant behavior, we use a transparent proxy that intercepts job-flow execution at runtime to handle potential faults using a growing knowledge base that contains the most recently identified job-flow patterns and their corresponding fault-tolerant patterns.},
KEYWORD = {Software Design, Job-Flow Patterns, Fault Tolerant, BPEL, JSDL, Grid Computing, Peer-to-Peer.},
NOTE = {(36% acceptance rate for Full Papers.)},
PDF = {SEKE-2008-JobFlowFailure.pdf},
}

@INPROCEEDINGS{CompositeWSs-SEKE-2008,
TITLE = {A Language-based Approach to Addressing Reliability in Composite Web Services},
MONTH = {July},
YEAR = {2008 },
ADDRESS = {San Francisco Bay, USA},
BOOKTITLE = {Proceedings of the 20th International Conference on Software Engineering and Knowledge Engineering (SEKE'2008)},
PAGES = {649--654},
ABSTRACT = {With Web services, distributed applications can be
encapsulated as self-contained, discoverable software components
that can be integrated to create other applications. BPEL allows
for the composition of existing Web services to create new
higher-function Web services. We identified that the techniques
currently applied at development time are not sufficient for
ensuring the reliability of composite Web services In this paper,
we present a language-based approach to transparently adapting
BPEL processes to improve reliability. This approach addresses
reliability at the Business process layer (i.e the language layer)
using a code generator, which weaves fault-tolerant code to the
original code and an external proxy. The generated code uses
standard BPEL constructs, and therefore, does not require any
changes to the BPEL engine.
},
NOTE = {(36% acceptance rate for Full Papers.)},
PDF = {SEKE-2008-CompositeWebServices.pdf},
}

@INPROCEEDINGS{IGE-WRF-CGCTW-2008,
AUTHOR = {Hector A. Duran Limon and {\bf S. Masoud Sadjadi} and Raju Rangaswami and Shu Shimizu and
Liana Fong and Rosa M. Badia and Pat Welsh and Sandeep Pattnaik and Anthony Praino and Javier
Figueroa and Javier Delgado and Xabriel J. Collazo-Mojica and David Villegas and Selim
Kalayci and Gargi Dasgupta and Onyeka Ezenwoye and Khalid Saleem and Juan Carlos Martinez and
Ivan Rodero and Shuyi Chen and Javier Muñoz and Diego Lopez and Julita Corbalan and Hugh
TITLE = {Grid Enablement and Resource Usage Prediction of Weather Research and Forecasting},
MONTH = {April},
YEAR = {2008},
BOOKTITLE = {Proceedings of the Collaborative and Grid Computing Technologies Workshop},
PAGES = {4},
ABSTRACT = {In the last few years we have witnessed a number of devastating hurricanes
around the world. It is believed that the global climate change is fuelling an increase in
the magnitude and also in the average number of hurricanes and tropical storms.
Therefore, there is a pressing need to provide a range of users with accurate and timely
formation that can enable effective planning for and response to potential hurricane
landfalls. The Weather Research Forecast (WRF) code has been adopted worldwide by
meteorological services. The numerical model employed by WRF demands a large
amount of computing nodes. Such demands can increase dramatically if the WRF is used
to model a large geographical area with a high resolution level (e.g. < 1 km). Although
WRF can be run in homogeneous clusters, it was not designed for grid environments
which can potentially offer a higher amount of computing resources. The transparent
Grid enablement of WRF includes carrying out intelligent brokering and scheduling. This
is needed to ensure that a run of WRF will take an acceptable amount of time and to
optimize the Grid resource usage. Resource usage prediction is required to achieve such
brokering and scheduling. However, current approaches to resource prediction tend to
address parts of the problem by either focusing on a specific application, or a specific
platform, or a small subset of system resources. In this paper, we present our research on
Grid enablement of WRF by leveraging our work on resource usage prediction, metascheduling
and job-flow management. We report on our experience on the design and
development of the La Grid WRF Portal to provide a comprehensive, but customized,
Web-based user interface for meteorologist to conduct their hurricane research and to
forecast hurricanes in near real-time. We pay a special focus on our approach for
modelling application resource usage in a platform independent manner enabling
prediction of resource usage on unseen platforms.
},
KEYWORDS = {Grid Enablement, Scientific Applications, WRF, Portal, Meta-Scheduling,
Job Flow Management, Modeling, and Profiling.},
}

@INPROCEEDINGS{ICAC-2008-SOS,
AUTHOR = {Ricardo Koller and Raju Rangaswami and Joseph Marrero and Igor Hernandez
and Geoffrey Smith and Mandy Barsilai and Silviu Necula and {\bf S. Masoud Sadjadi} and
Tao Li and Krista Merrill.},
TITLE = {Anatomy of a Real-time Intrusion Prevention System.},
YEAR = {2008},
MONTH = {June},
BOOKTITLE = {Proceedings of the 5th IEEE International Conference on Autonomic Computing (ICAC-2008)},
PAGES = {151--160},
ABSTRACT = {Host intrusions prevention systems for both servers and
end-hosts must address the dual challenges of accuracy and
performance. Researchers have mostly focused on addressing
the former challenge, suggesting solutions based either on
exploit-based penetration detection or anomaly-based misbehavior
detection, but yet stopping short of comprehensive solutions
that leverage merits of both approaches. The second
challenge, however, is rarely addressed; doing so comprehensively
is important for practical usability, since these systems
can introduce substantial overhead and cause system slowdown,
more so when the system load is high.
We present Rootsense, a holistic and real-time intrusion
prevention system that combines the merits of misbehaviorbased
and anomaly-based detection. Four principles govern
the design and implementation of Rootsense. First, Rootsense
audits events within different subsystems of the host OS and
correlates them to comprehensively capture the global system
state. Second, Rootsense restricts the detection domain
to root compromises only; doing so reduces runtime overhead
and increases detection accuracy (root behavior is more
easily modeled than user behavior). Third, Rootsense adopts
a dual approach to intrusion detection – a root penetration
detector detects activities that exploit system vulnerabilities
to penetrate the security perimeter, and a root misbehavior
detector that tracks misbehavior by root processes. Fourth,
Rootsense is designed to be configurable for overhead management
characteristics of the intrusion prevention system that
affect foreground task performance. A Linux implementation
of Rootsense is analyzed for both accuracy and performance,
using several real-world exploits and a range of end-host and
server benchmarks.
},
KEYWORDS = {Operating systems, security, rootsense.},
NOTE = {(25% acceptance rate)},
PDF = {ICAC-2008-rootsense.pdf},
}

@INPROCEEDINGS{ICAC-2008-WRF-Portal,
AUTHOR = {Khalid Saleem and {\bf S. Masoud Sadjadi} and Shu-Ching Chen},
TITLE = {Towards a Self-Configurable Weather Research and Forecasting System.},
YEAR = {2008},
MONTH = {June},
BOOKTITLE = {Proceedings of the 5th IEEE International Conference on Autonomic Computing (ICAC-2008)},
PAGES = {195--196},
ABSTRACT = {Current weather forecast and visualization systems lack
the scalability to support numerous customized requests for
weather research and forecasting, especially at the time of
natural disasters such as a hurricane landfall. Most of these
systems provide somewhat generic forecasts for different
types of users including meteorologists, business owners and
emergency management officials. Such forecast while may be
relevant to some specific group of users; to others it may not
provide any useful information apart from the prediction of
impending weather hazards. In other words, one size does
not fit all. Weather data and its visualization indicating
inclement weather conditions such as snow or ice storm,
tornadoes and hurricanes need to be customized for the
different type of users using such systems; thus, assisting
them in ensuring effective preparatory and meticulous
recovery plans. In this paper, we propose a self-configurable,
user specific on-demand weather research and forecasting
system that utilizes Grid computing to facilitate scalable
weather forecast data analysis and prediction.
},
KEYWORDS = {Web-based portal, weather forecasting, WRF, self-configuration, ensemble forecasting.},
NOTE = {(38% acceptance rate for Full and Short papers together.)},
PDF = {ICAC-2008-WRF.pdf},
PS = {../Presentations/ICAC08_poster_khalid.ppt},
}

@INPROCEEDINGS{ICAC-2008-Job-Flow-Manager,
AUTHOR = {Gargi Dasgupta and Onyeka Ezenwoye and Liana Fong and Selim Kalayci and
TITLE = {Runtime Fault-Handling for Job-Flow Management in Grid Environments.},
YEAR = {2008},
MONTH = {June},
BOOKTITLE = {Proceedings of the 5th IEEE International Conference on Autonomic Computing (ICAC-2008)},
PAGES = {201--202},
ABSTRACT = {The execution of job flow applications is a reality today in
academic and industrial domains. In this paper, we propose an
approach to adding self-healing behavior to the execution of job
flows without the need to modify the job flow engines or
redevelop the job flows themselves. We show the feasibility of
our non-intrusive approach to self-healing by inserting a
generic proxy to an existing two-level job-flow management
system, which employs job flow based service orchestration at
the upper level, and service choreography at the lower level.
The generic proxy is inserted transparently between these two
layers so that it can intercept all their interactions. We
developed a prototype of our approach in a real Grid
environment to show how the proxy facilitates runtime handling
for failure recovery.
},
KEYWORDS = {job-flow management, meta-scheduler, generic proxy,
fault-tolerance, job-flows.},
NOTE = {(38% acceptance rate for Full and Short papers together.)},
PDF = {ICAC-2008-JobFlowManager.pdf},
PS = {../Presentations/ICAC08_poster_selim.ppt},
}

@INPROCEEDINGS{ICAC-2008-Meta-Scheduler,
AUTHOR = {Yanbin Liu and {\bf S. Masoud Sadjadi} and Liana Fong and Ivan Rodero and David Villegas and
Selim Kalayci and Norman Bobroff and Juan Carlos Martinez},
TITLE = {Enabling Autonomic Meta-Scheduling in Grid Environments.},
YEAR = {2008},
MONTH = {June},
BOOKTITLE = {Proceedings of the 5th IEEE International Conference on Autonomic Computing (ICAC-2008)},
PAGES = {199--200},
ABSTRACT = {Grid computing supports workload execution on computing
resources that are shared across a set of collaborative
organizations. At the core of workload management for Grid
computing is a software component, called meta-scheduler or
Grid resource broker, that provides a virtual layer on top of
heterogeneous Grid middleware, schedulers, and resources.
Meta-schedulers typically enable end-users and applications to
compete over distributed shared resources through the use of
one or more instances of the same meta-scheduler, in a
centralized or distributed manner, respectively. We propose an
approach to enabling autonomic meta-scheduling through the
use of a new communication protocol that –if adopted by
different meta-schedulers or by the applications using them—
can improve the workload execution while avoiding potential
chaos, which can be resulted from blind competition over
resources. This can be made possible by allowing the metaschedulers
and/or their applications to engage in a process to
negotiate their roles (e.g., consumer, provider, or both),
scheduling policies, service-level agreement, etc. To show the
feasibility of our approach, we developed a prototype that
enables some preliminary autonomic management among three
different meta-schedulers, namely, GridWay, eNANOS, and
TDWB.
},
KEYWORDS = {meta-scheduler, grid resource broker, grid interoperability,
NOTE = {(38% acceptance rate for Full and Short papers together.)},
PDF = {ICAC-2008-MetaScheduling.pdf},
PS = {../Presentations/ICAC08_poster_david.ppt},
}

@INPROCEEDINGS{CCGrid-2008,
AUTHOR = {Norman Bobroff and Liana Fong and Selim Kalayci and Yanbin Liu and
Juan Carlos Martinez and Ivan Rodero and {\bf S. Masoud Sadjadi} and David Villegas},
TITLE = {Enabling Interoperability among Meta-Schedulers},
YEAR = {2008},
BOOKTITLE = {Proceedings of 8th IEEE International Symposium on Cluster Computing and the Grid (CCGrid-2008)},
ABSTRACT = {Grid computing supports the harness of computing
resources from cooperating organizations or institutes in the form
of virtual organizations. At the core of matching the resource
requests for jobs is a resource brokering middleware, commonly
known as a meta-scheduler or a meta-broker. The recent
resource matching across multiple virtual organizations, not
limiting to a single one. Different architectures have been
proposed for these interoperating meta-scheduling systems. In
this paper, we present a hybrid approach, combining hierarchical
and peer-to-peer architectures for flexibility and extensibility of
these systems. We also define a set of protocols to allow different
meta-scheduler instances to communicate using Web Services. In
our experiments, three remote organizations using different
scheduling technologies (namely, IBM, BSC, and FIU)
interoperate using the communication protocols.
},
KEYWORDS = {meta-scheduler, resource broker, interoperable scheduling protocol.},
NOTE = {(32% acceptance rate.)},
PAGES = {306--315},
PDF = {CCGrid-2008-LA-Grid-MetaScheduler.pdf},
}

@INPROCEEDINGS{HPGC-2008,
AUTHOR = {{\bf S. Masoud Sadjadi} and Shu Shimizu and Javier Figueroa
and Raju Rangaswami and Javier Delgado and Hector Duran and Xabriel Collazo},
TITLE = {A Modeling Approach for Estimating Execution Time of
Long-Running Scientific Applications},
MONTH = {April},
YEAR = {2008},
PAGES = {1--8},
BOOKTITLE = {Proceedings of the 22nd IEEE International Parallel & Distributed Processing Symposium (IPDPS-2008), the Fifth High-Performance Grid Computing Workshop (HPGC-2008)},
ABSTRACT = {In a Grid computing environment, resources are
shared among a large number of applications. Brokers
and schedulers find matching resources and schedule the
execution of the applications by monitoring dynamic
resource availability and employing policies such as first-come-
first-served and back-filling. To support
applications with timeliness requirements in such an
environment, brokering and scheduling algorithms must
estimate the execution time of the application on the
currently available resources. In this paper, we present a
modeling approach to estimating the execution time of
long-running scientific applications. The modeling
approach we propose is generic; models can be
constructed by merely observing the application
execution “externally” without using intrusive techniques
such as code inspection or instrumentation. The model is
cross-platform; it enables prediction without the need for
the application to be profiled first on the target hardware.
To show the feasibility and effectiveness of this approach,
we developed a resource usage model that estimates the
execution time of a weather forecasting application in a
multi-cluster Grid computing environment. We validated
the model through extensive benchmarking and profiling
experiments and observed prediction errors that were
within 10% of the measured values. Based on our initial
experience, we believe that our approach can be used to
model the execution time of other time-sensitive scientific
applications; thereby, enabling the development of more
intelligent brokering and scheduling algorithms.
},
KEYWORDS = {High-Performance Computing, Profiling, Behavior Modeling, Weather Research and Forecasting.},
PDF = {WRF-Modeling-HPGC-2008.pdf},
PDF = {HPGC-2008-ModelingWRF.pdf},
PS = {../Presentations/HPGC-2008-WRF Modeling Paper Presentationl.ppt},
}

@INPROCEEDINGS{IGE-WRF-GEA-2008,
AUTHOR = {{\bf S. Masoud Sadjadi} and Liana Fong and Rosa M. Badia and Javier Figueroa and
Javier Delgado and Xabriel J. Collazo-Mojica and Khalid Saleem and Raju Rangaswami and Shu Shimizu and
Hector A. Duran Limon and Pat Welsh and Sandeep Pattnaik and Anthony Praino and David Villegas and
Selim Kalayci and Gargi Dasgupta and Onyeka Ezenwoye and Juan Carlos Martinez and Ivan Rodero and
Shuyi Chen and Javier Muñoz and Diego Lopez and Julita Corbalan and Hugh Willoughby and Michael McFail and
TITLE = {Transparent Grid Enablement of Weather Research and Forecasting},
MONTH = {January},
YEAR = {2008},
NOTE = {(8 pages)},
ADDRESS = {Baton Rouge, Louisiana, USA},
BOOKTITLE = {Proceedings of the 15th ACM Mardi Gras conference: From lightweight mash-ups to lambda
grids: Understanding the spectrum of distributed computing requirements, applications, tools,
infrastructures, interoperability, and the incremental adoption of key capabilities},
ABSTRACT = {The impact of hurricanes is so devastating throughout different levels of society that
there is a pressing need to provide a range of users with accurate and timely information that can
enable effective planning for and response to potential hurricane landfalls. The Weather Research
and Forecasting (WRF) code is the latest numerical model that has been adopted by meteorological
services worldwide. The current version of WRF has not been designed to scale out of a single
organization's local computing resources. However, the high resource requirements of WRF for
fine-resolution and ensemble forecasting demand a large number of computing nodes, which typically
cannot be found within one organization. Therefore, there is a pressing need for the Grid-enablement
of the WRF code such that it can utilize resources available in partner organizations. In this paper,
we present our research on Grid enablement of WRF by leveraging our work in transparent shaping,
GRID superscalar, profiling, code inspection, code modeling, meta-scheduling, and job flow management.
},
KEYWORDS = {Grid Enablement, Scientific Applications, WRF, Portal, Meta-Scheduling,
Job Flow Management, Modeling, and Profiling.},
PDF = {TGE-WRF-GEA-2008.pdf},
PS = {../Presentations/Mardi-Gras-GEA-2008-TGE-WRF.ppt},
}

@INPROCEEDINGS{Self-Conf-CVM-ICNSC-2008,
AUTHOR = {{\bf S. Masoud Sadjadi} and Selim Kalayci and Yi Deng},
TITLE = {A Self-Configuring Communication Virtual Machine},
MONTH = {April},
YEAR = {2008},
PAGES = {739--744},
BOOKTITLE = {Proceedings of the 2008 IEEE International Conference on Networking, Sensing and Control (ICNSC-08)},
ABSTRACT = {Today’s communication-based applications are
mostly crafted in a stovepipe development paradigm, which is
inflexible to be used by various domain-specific applications and
costly in the development phase. In a previous paper [1], we
proposed a new design called CVM (Communication Virtual
Machine) to overcome these problems by having a high-level
API which can be reused and extended easily for user-centric
applications in any domain. Within CVM framework, we came
across a practical issue, which is actually the case for any
end-to-end multimedia communication, namely the NAT-traversal
(network address translation) problem that limits the reliability
and availability of CVM and variants of CVM. In this paper,
we explain about the necessity of self-configuration for the
NAT-traversal problem in end-to-end communications, and
propose a solution within the core CVM framework.
},
KEYWORDS = {Communication Virtual Machine, CVM, Sele-Configuration, NAT-Resolution.},
PDF = {NCB-NAT-ICNSC-2008.pdf},
}

@INPROCEEDINGS{i-Scociety-2007-GCB-CollaborativePlatform,
AUTHOR = {Xing Hang and David Villegas Castillo and {\bf S. Masoud Sadjadi} and Heidi Alvarez},
TITLE = {Formative assessment of the effectiveness of collaboration in {GCB}},
MONTH = {October},
YEAR = {2007},
PAGES = {103--110},
BOOKTITLE = {Proceedings of the International Conference on Information Society (i-Society 2007)},
ABSTRACT = {With the rapid emergence of new communication software and hardware
tools and the improvement of telecommunication infrastructures, a new collaboration
paradigm is on the horizon that allows researchers around the globe to expand their
loop of collaborators to cross geographical and cultural boundaries. However,
much needs to be learned from the user experiences not only to improve the quality
of the collaboration facilities, but also to develop new social protocols for
distributed human interactions. In this paper, we try to analyze the usage of
cyberinfrastructure in remote collaboration among researchers. For that, we draw
on survey data and interviews with members from different collaborative projects,
and we analyze how our current communication tools meet the needs of collaborative
research activities. Then, we articulate a series of key challenges and requirements
that contemporary teams are facing. In the end, we present ideas on what sorts of
collaborative tools need to be built in order to fulfil the distributed and
interdisciplinary collaboration projects.  Our findings shed light on the factors
that drive the use of cyberinfrastructure and the effectiveness in the success of
cross-national and interdisciplinary research collaboration and distance learning,
and suggest further research topics.
},
KEYWORDS = {e-Science, formative assessment, group collaboration, distributed collaboration, distance learning.},
PDF = {i-Society-2007-GCB-CollaborationPlatforms.pdf},
}

AUTHOR = {Onyeka Ezenwoye and {\bf S. Masoud Sadjadi} and Ariel Carey and Michael Robinson},
TITLE = {Grid Service Composition in {BPEL} for Scientific Applications},
MONTH = {November},
YEAR = {2007 },
PAGES = {1304--1312},
BOOKTITLE = {Proceedings of the International Conference on Grid computing, high-performAnce and Distributed Applications (GADA'07)},
ABSTRACT = {Grid computing aims to create an accessible virtual supercomputer
by integrating distributed computers to form a parallel infrastructure
for processing applications. To enable service-oriented Grid
computing, the Grid computing architecture was aligned with the current
Web service technologies; thereby, making it possible for Grid applications
to be exposed as Web services. The WSRF set of specifications
standardized the association of state information withWeb services (WSResource)
while providing interfaces for the management of state data.
Key to the realization of the benefits of Grid computing is the ability
to integrate WS-Resources to create higher-level applications. The Business
Process Execution Language (BPEL) is the leading standard for
integrating Web services and as such has a natural affinity to the integration
of Grid services. In this paper, we share our experience on using
BPEL to integrate, create, and manage WS-Resources that implement
the factory pattern. We use a Bioinformatics application as a case study
to show how BPEL can be used to orchestrate Grid services. The execution
environment for our case study comprises the Globus Toolkit as
the Grid middleware and the ActiveBPEL as the BPEL engine. To the
best of our knowledge, this work is among the handful approaches that
successfully use BPEL for orchestrating WSRF-based services and the
only one that includes the discovery and management of instances.
},
KEYWORDS = {BPEL, Grid Computing,WSRF, OGSA-DAI, Service Composition.},
}

@INPROCEEDINGS{CoreGRID-2007,
AUTHOR = {I. Rodero and F. Guim, J. Corbalan and L. L. Fong and Y. G. Liu and {\bf S. Masoud Sadjadi}},
TITLE = {Looking for an Evolution of Grid Scheduling: Meta-brokering},
MONTH = {June},
YEAR = {2007 },
PAGES = {105--119},
BOOKTITLE = {Proceedings of the Second CoreGRID Workshop on Middleware at ISC2007 (CoreGRID-2007)},
ABSTRACT = {A Grid Resource Broker, or also called meta-scheduler, is a component used for matching work to available Grid resources. The Grid resources usually have a local resource management system with a particular scheduler belonging to different IT centers or institutions. These centers or institutions may have different policies or requirements on how the resources should be used. This situation causes two main problems: the user uniform access to the Grid is lost, and the scheduling decisions are taken separately while they should be done in coordination. These problems have been observed in different efforts such as the HPC-Europa project but it is still an open problem. In this paper we discuss how to achieve a new approach in global brokering with new scheduling techniques through meta-brokering. As the result of the discussion on requirements for meta-brokering, we propose a design in two different contexts: as an extension of HPC-Europa on top of different meta-schedulers, and as a distributed model for the LA Grid meta-brokering project.},
PDF = {CoreGrid-MetaBroker-2007.pdf},
}

@INPROCEEDINGS{TGE-SEKE-2007,
AUTHOR = {{\bf S. Masoud Sadjadi} and J. Martinez and T. Soldo and L. Atencio and R. M. Badia and J. Ejarque},
TITLE = {Improving Separation of Concerns in the Development of Scientific Applications},
PAGES = {456--461},
MONTH = {July},
YEAR = {2007 },
BOOKTITLE = {Proceedings of The Nineteenth International Conference on Software Engineering and Knowledge Engineering (SEKE'2007)},
ABSTRACT = {High performance computing ({HPC}) is gaining popularity in solving scientific applications. Using the current programming standards, however, it takes an HPC expert to efficiently take advantage of HPC facilities; a skill that a scientist does not necessarily have. This lack of separation of concerns has resulted in scientific applications with rigid code, which entangles non-functional concerns (i.e., the parallel code) into functional concerns (i.e., the core business logic). Effectively, this tangled code hinders the maintenance and evolution of these applications. In this paper, we introduce Transparent Grid Enabler ({TGE}) that separates the task of developing the business logic of a scientific application from the task of improving its performance. TGE achieves this goal by integrating two existing software tools, namely, {TRAP/J} and GRID superscalar. A simple matrix multiplication program is used as a case study to demonstrate the current use and capabilities of {TGE}.},
PDF = {TGE-SEKE-2007.pdf},
}

@INPROCEEDINGS{TRAP-NET-SEKE-2007,
TITLE = {{TRAP.NET}: A Realization of Transparent Shaping in {.NET}},
PAGES = {19--24},
MONTH = {July},
YEAR = {2007 },
BOOKTITLE = {Proceedings of The Nineteenth International Conference on Software Engineering and Knowledge Engineering (SEKE'2007)},
ABSTRACT = {We define adaptability as the capacity of software in ad-justing its behavior in response to changing conditions. To list just a few examples, adaptability is important in pervasive computing, where software in mobile devices need to adapt to dynamic changes in wireless networks; autonomic computing, where software in critical systems are required to be self-manageable; and grid computing, where software for long running scientific applications need to be resilient to hardware crashes and network out-ages. In this paper, we provide a realization of the trans-parent shaping programming model, called {TRAP.NET}, which enables transparent adaptation in existing .NET applications as a response to the changes in the applica-tion requirements and/or to the changes in their execution environment. Using {TRAP.NET}, we can adapt an applica-tion dynamically, at run time, or statically, at load time, without the need to manually modify the application original functionality-hence transparent.},
PDF = {TRAP-NET-SEKE-2007.pdf},
}

@INPROCEEDINGS{CB-CCGrid-2007,
AUTHOR = {Heidi L. Alvarez and David Chatfield and Donald A. Cox and Eric Crumpler and Cassian D’Cunha and Ronald Gutierrez and Julio Ibarra and Eric Johnson and Kuldeep Kumar and Tom Milledge and Giri Narasimhan and Rajamani S. Narayanan and Alejandro de la Puente and {\bf S. Masoud Sadjadi} and Chi Zhang},
NOTE = {(acceptance rate 33.5%)},
TITLE = {CyberBridges: A Model Collaboration Infrastructure for {e-Science}},
MONTH = {  May },
YEAR = { 2007 },
PAGES = {65--72},
ADDRESS = { Rio de Janeiro, Brazil},
BOOKTITLE = { Proceedings of the 7th IEEE International Symposium on Cluster Computing and the Grid (CCGrid'07)},
ABSTRACT = {The 'CyberBridges' pilot project is an innovative model for creating a new generation of scientists and engineers who are capable of fully integrating cyberinfrastructure into the whole educational, professional, and creative process of their respective disciplines. CyberBridges augments graduate student education to include a foundation of understanding in Advanced Networking and Grid Infrastructure for High Performance Computing, and bridges the divide between the information technology community and diverse science and engineering disciplines. CyberBridges is increasing the rate of discovery for science and engineering faculty by empowering them with cyberinfrastructure, fostering inter-disciplinary research collaboration, improving minority graduate education, and institutionalizing this change process. We demonstrate the effectiveness of CyberBridges by providing four case studies with graduate students of Physics, Bioinformatics, Chemistry, and Biomedical Engineering. Groundwork has begun to extend the outreach of CyberBridges for international research and education collaborations.},
PDF = {CCGrid2007-CB-Final.pdf},
}

@INPROCEEDINGS{SE-IPCCC-2007,
AUTHOR = {Raju Rangaswami and {\bf S. Masoud Sadjadi} and Nagarajan Prabakar and Yi Deng},
TITLE = {Automatic Generation of User-Centric Multimedia Communication Services},
MONTH = { April },
YEAR = { 2007 },
PAGES = {324--331},
ADDRESS = { New Orleans, Louisiana, USA},
BOOKTITLE = { Proceedings of the 26th IEEE International Performance Computing and Communications Conference (IPCCC)},
ABSTRACT = {Multimedia communication services today are conceived, designed, and developed in isolation, following a stovepipe approach. This has resulted in a fragmented and incompatible set of technologies and products. Building new communication services requires a lengthy and costly development cycle, which severely limits the pace of innovation. In this paper, we address the fundamental problem of automating the development of multimedia communication services. We propose a new paradigm for creating such services through declarative specification and generation, rather than through traditional design and development. Further, the proposed paradigm pays special attention to how the end-user specifies his/her communication needs, an important requirement largely ignored in existing approaches. We argue that for the domain of user-centric multimedia communication services, the proposed approach of automatic generation is not only feasible in terms of the ability to meet a range of communication needs in several domains, but is also desirable for maintaining and improving the pace of innovation in multimedia communication services.},
}

@INPROCEEDINGS{TRAP-BPEL-WEBIST-2007,
TITLE = {{TRAP/BPEL}: A Framework for Dynamic Adaptation of Composite Services},
MONTH = { March },
YEAR = { 2007 },
NOTE = {(17 pages.)},
BOOKTITLE = { Proceedings of the International Conference on Web Information Systems and Technologies (WEBIST 2007)},
ABSTRACT = {{TRAP/BPEL} is a framework that adds autonomic behavior into existing BPEL processes automatically and
transparently. We define an autonomic BPEL process as a composite Web service that is capable of responding
to changes in its execution environment (e.g., a failure in a partner Web service). Unlike other approaches,
{TRAP/BPEL} does not require any manual modifications to the original code of the BPEL processes and there
is no need to extend the BPEL language nor its BPEL engine. In this paper, we describe the details of the
{TRAP/BPEL} framework and use a case study to demonstrate the feasibility and effectiveness of our approach.},
KEYWORDS = {{TRAP/BPEL}, generic proxy, self-management, dynamic service discovery.},
PDF = {TRAP-BPEL-WEBIST-2007.pdf},
}

TITLE = {{RobustBPEL2}: Transparent Autonomization in Business Processes through Dynamic Proxies},
MONTH = { March },
YEAR = { 2007 },
PAGES = {17--24},
BOOKTITLE = { Proceedings of the 8th IEEE International Symposium on Autonomous Decentralized Systems (ISADS 2007)},
ABSTRACT = {Web services paradigm is allowing applications to interact with one another over the Internet. BPEL facilitates this interaction by providing a platform through which Web services can be integrated. However, the autonomous and distributed nature of the integrated services presents unique challenges to the reliability of composed services. The focus of our ongoing research is to transparently introduce autonomic behavior to BPEL processes in order to make them more resilient to the failure of partner services. In this work, we present an approach where BPEL processes are adapted by redirecting their interactions with partner services to a dynamic proxy. We describe the generative adaptation process and the architecture of the adaptive BPEL processes and their corresponding proxies. Finally, we use case studies to demonstrate how generated dynamic proxies are used to support self-healing and self-optimization in instrumented BPEL processes.},
KEYWORDS = {RobustBPEL2, dynamic proxy, self-management, dynamic service discovery.},
}

@INPROCEEDINGS{NCB-CollaborateCom-2006,
AUTHOR = {Chi Zhang and {\bf S. Masoud Sadjadi} and Weixiang Sun and Raju Rangaswami and Yi Deng},
TITLE = {A User-Centric Network Communication Broker for Multimedia Collaborative Computing},
MONTH = { November },
YEAR = { 2006 },
PAGES = {1--5},
ADDRESS = { Atlanta, Georgia, USA},
BOOKTITLE = { Proceedings of the Second IEEE International Conference on Collaborative Computing (CollaborateCom 2006) },
ABSTRACT = {The development of collaborative multimedia applications today follows a vertical development approach, which is a major inhibitor that drives up the cost of development and slows down the pace of innovation of new generations of collaborative applications. In this paper, we propose a network communication broker (NCB) that provides a unified higher-level abstraction that encapsulates the complexity of network-level communication control and media delivery for the class of multimedia collaborative applications. NCB expedites the development of next-generation applications with various communication logics. Furthermore, NCB-based applications can be easily ported to new network environments. In addition, the self-managing design of NCB supports dynamic adaptation in response to changes in network conditions and user requirements.},
KEYWORDS = {Network communication broker, multimedia, middleware.},
}

@INPROCEEDINGS{CVM-COMPSAC-2006,
AUTHOR = {Yi Deng and {\bf S. Masoud Sadjadi} and Peter J. Clarke and Chi Zhang and Vagelis Hristidis and Raju
Rangaswami and Nagarajan Prabakar},
TITLE = {A Communication Virtual Machine},
MONTH = { September },
YEAR = { 2006 },
PAGES = {521--531},
ADDRESS = { Chicago, U.S.A. },
BOOKTITLE = { Proceedings of the 30th Annual International Computer Software and Applications Conference (COMPSAC 2006) },
ABSTRACT = {
The convergence of data, voice and multimedia
communication over digital networks, coupled with
continuous improvement in network capacity and
reliability has significantly enriched the ways we
communicate. However, the stovepipe approach used
to develop today’s communication applications and
tools results in rigid technology, limited utility, lengthy
and costly development cycle, difficulty in integration,
and hinders innovation. In this paper, we present a
fundamentally different approach, which we call
Communication Virtual Machine (CVM) to address
these problems. CVM provides a user-centric, modeldriven
approach for conceiving, synthesizing and
delivering communication solutions across application
domains. We argue that CVM represents a far more
solutions. The concept, architecture, modeling
language, prototypical design and implementation of
CVM are discussed.},
KEYWORDS = {Model driven, communication application,
multimedia, middleware, telemedicine.},
}

@INPROCEEDINGS{MobileServiceClouds-SelfMan-2006,
TITLE = { {Mobile Service Clouds}: A Self-Managing Infrastructure for Autonomic Mobile Computing Services },
MONTH = { June },
YEAR = { 2006 },
PAGES = { 130-141},
ADDRESS = { Dublin, Ireland },
BOOKTITLE = { Proceedings of the Second International Workshop on Self-Managed Networks, Systems \& Services (SelfMan 2006, LNCS 3996) },
PUBLISHER = { Springer-Verlag },
SERIES = { Lecture Notes in Computer Science (LNCS) },
VOLUME = { 3996 },
ABSTRACT = {
We recently introduced Service Clouds, a distributed infrastructure
designed to facilitate rapid prototyping and deployment of autonomic communication
services. In this paper, we propose a model that extends Service Clouds
to the wireless edge of the Internet. This model, called Mobile Service Clouds,
enables dynamic instantiation, composition, configuration, and reconfiguration
of services on an overlay network to support self-management in mobile computing.
We have implemented a prototype of this model and applied it to the problem
of dynamically instantiating and migrating proxy services for mobile hosts. We
conducted a case study involving data streaming across a combination of Planet-
Lab nodes, local proxies, and wireless hosts. Results are presented demonstrating
the effectiveness of the prototype in establishing new proxies and migrating their
functionality in response to node failures.},
KEYWORDS = {autonomic networking, distributed service composition, self-managing
system, overlay network, mobile computing, quality of service.},
}

@INPROCEEDINGS{RobustBPEL-ICEIS-2006,
TITLE = {Enabling robustness in existing {BPEL} processes},
BOOKTITLE = {Proceedings of the 8th International Conference on Enterprise Information Systems},
YEAR = {2006},
MONTH = {May},
NOTE = {(8 pages)},
ABSTRACT = {
Web services are increasingly being used to expose applications
over the Internet. To promote efficiency and the reuse of software,
these Web services are being integrated both within enterprises and
across enterprises, creating higher function services. BPEL is a
workflow language that can be used to facilitate this integration.
Unfortunately, the autonomous nature of Web services leaves BPEL
processes susceptible to the failures of their constituent services.
In this paper, we present a systematic approach to making existing
BPEL processes more fault tolerant by monitoring the involved Web
services at runtime, and by replacing delinquent Web services. To show
the feasibility of our approach, we developed a prototype implementation
that generates more robust BPEL processes from existing ones automatically.
The use of the prototype is demonstrated using an existing Loan Approval
BPEL process.
},
KEYWORDS = {ECommerce, Web Service Monitoring, Robust BPEL Processes.},
}

@INPROCEEDINGS{AggregateWS-ACMSE-2006,
TITLE = {Composing Aggregate Web Services in {BPEL}},
BOOKTITLE = {Proceedings of the 44th ACM Southeast Conference (ACMSE 2006)},
YEAR = {2006},
MONTH = {March},
PAGES = {458--463},
ABSTRACT = {
Web services are increasingly being used to expose applications
over the Internet. These Web services are being integrated within
and across enterprises to create higher function services. BPEL
is a workflow language that facilitates this integration. Although
both academia and industry acknowledge the need for workflow
languages, there are few technical papers focused on BPEL. In this
paper, we provide an overview of BPEL and discuss its promises,
limitations and challenges.
},
KEYWORDS = {Web services, workflow language, BPEL, business processes, A2A
integration, and B2B integration.},
PDF = {AggregateWS-ACMSE-2006.pdf},
}

@INPROCEEDINGS{TAI-ICAC,
TITLE = {Using Transparent Shaping and Web Services to Support
Self-Management of Composite Systems},
BOOKTITLE = {Proceedings of the International Conference on Autonomic Computing (ICAC'05)},
YEAR = {2005},
PAGES = {76--87},
MONTH = {June},
ABSTRACT = {
Increasingly, software systems are constructed by composing
multiple existing applications. The resulting complexity
increases the need for self-management of the system.
However, adding autonomic behavior to composite
systems is difficult, especially when the existing components
were not originally designed to support such interactions.
Moreover, entangling the code for integrated selfmanagement
with the code for the business logic of the original
applications may actually increase the complexity of
the system, counter to the desired goal. In this paper, we
propose a technique to enable self-managing behavior to
be added to composite systems transparently, that is, without
requiring manual modifications to the existing code.
The technique uses transparent shaping, developed previously
to enable dynamic adaptation in existing programs,
to weave self-managing behavior into existing applications,
which interact through Web services. A case study demonstrates
the use of this technique to construct a fault-tolerant
surveillance application from two existing applications, one
developed in .NET and the other in CORBA, without the
need to modify the source code of the original applications.
},
KEYWORDS = {application integration, adaptive middleware, autonomic computing,
PDF = {TAI-ICAC.pdf},
}

@INPROCEEDINGS{TransparentShaping-ICSE-DEAS-2005,
AUTHOR = {{\bf S. Masoud Sadjadi} and Philip K. McKinley and Betty H.C. Cheng},
TITLE = {Transparent Shaping of Existing Software to Support Pervasive and Autonomic Computing},
BOOKTITLE = {Proceedings of the first Workshop on the Design and Evolution of Autonomic
Application Software 2005 (DEAS'05), in conjunction with ICSE 2005},
YEAR = {2005},
PAGES = {1--7},
MONTH = {May},
ABSTRACT = {
The need for adaptability in software is growing, driven in part by
the emergence of pervasive and autonomic computing. In many
cases, it is desirable to enhance existing programs with adaptive
behavior, enabling them to execute effectively in dynamic environments.
In this paper, we propose a general programming model
called transparent shaping to enable dynamic adaptation in existing
programs. We describe an approach to implementing transparent
shaping that combines four key software development techniques:
aspect-oriented programming to realize separation of concerns at
development time, behavioral reflection to support software recon-
figuration at run time, component-based design to facilitate independent
middleware to encapsulate the adaptive functionality. After
presenting the general model, we discuss two specific realizations
of transparent shaping that we have developed and used to create
},
PDF = {TransparentShaping-ICSE-DEAS-2005.pdf},
}

@INPROCEEDINGS{X-Communicator-SouthEastCon-2005,
AUTHOR = {Shakil Siddique and Raimund K. Ege and {\bf S. Masoud Sadjadi}},
User Agent for Multimedia Communication},
BOOKTITLE = {Proceedings of the SouthEastCon 2005},
YEAR = {2005},
PAGES = {271 -- 276},
PDF = {X-Communicator.pdf},
}

@INPROCEEDINGS{TRAP-MPAC,
AUTHOR = {Farshad A. Samimi and Philip K. McKinley and {\bf S. Masoud Sadjadi} and Peng Ge},
TITLE = {Kernel­ Middleware Interaction to Support Adaptation in Pervasive Computing Environments},
BOOKTITLE = {Proceedings of the Second International Workshop on
Middleware for Pervasive and Ad-Hoc Computing, a Companion Proceedings of the
fifth International Middleware Conference (Middleware'04)},
YEAR = {2004},
PAGES = {140--145},
MONTH = {October},
ABSTRACT = {
In pervasive computing environments, conditions are highly variable
and resources are limited. In order to meet the needs of applications,
systems must adapt dynamically to changing situations.
Since adaptation at one system layer may be insufficient, crosslayer,
or vertical approaches to adaptation may be needed. Moreover,
adaptation in distributed systems often requires horizontal cooperation
among hosts. This cooperation is not restricted to the
source and destination(s) of a data stream, but might also include
intermediate hosts in an overlay network or mobile ad hoc network.
We refer to this combined capability as universal adaptation.
We contend that the model defining interaction between adaptive
middleware and the operating system is critical to realizing universal
adaptation. We explore this hypothesis by evaluating the
Kernel-Middleware eXchange (KMX), a specific model for crosslayer,
cross-system adaptation. We present the KMX architecture
and discuss its potential role in supporting universal adaptation in
pervasive computing environments. We then describe a prototype
implementation of KMX and show results of an experimental case
study in which KMX is used to improve the quality of video streaming
to mobile nodes in a hybrid wired-wireless network.
},
KEYWORDS = {
quality of service, video streaming, wireless network.
},
PDF = {TRAP-MPAC-2004.pdf},
}

@INPROCEEDINGS{TRAP-DOA,
AUTHOR = {{\bf S. Masoud Sadjadi} and Philip K. McKinley and Betty H.C. Cheng and R.E. Kurt Stirewalt},
TITLE = {{TRAP/J}: Transparent Generation of Adaptable {Java} Programs},
BOOKTITLE = {Proceedings of the International Symposium on Distributed Objects and Applications (DOA'04)},
YEAR = {2004},
VOLUME = {3291},
PAGES = {1243--1261},
MONTH = {October},
ABSTRACT = {
This paper describes {TRAP/J}, a software tool that
Java applications transparently (that is, without modifying
the application source code and without extending the JVM).
The generation process combines behavioral reflection
and aspect-oriented programming to achieve this goal.
Specifically, {TRAP/J} enables the developer to select,
at compile time, a subset of classes in the existing
program that are to be adaptable at run time.  {TRAP/J}
then generates specific aspects and reflective classes
associated with the selected classes, producing an
can be introduced via interfaces to the adaptable classes.
A case study is presented in which {TRAP/J} is used to
introduce adaptive behavior to an existing audio-streaming
application, enabling it to operate effectively in a lossy
wireless network by detecting and responding to changing
network conditions.
},
KEYWORDS = {
generator framework, transparent adaptation, dynamic reconfiguration,
aspect-oriented programming, behavioral reflection, middleware,
mobile computing, quality-of-service.
},
PDF = {TRAP-DOA-2004.pdf},
}

@INPROCEEDINGS{QET-IWQOS,
AUTHOR = {Z. Zhou and P. K. McKinley and {\bf S. M. Sadjadi}},
TITLE = {On Quality-of-Service and Energy Consumption Tradeoffs in
FEC-Enabled Audio Streaming},
BOOKTITLE = {Proceedings of the 12th IEEE International Workshop on Quality of Service (IWQoS 2004)},
NOTE = {Winner of the IWQoS 2004 best student paper award. (acceptance rate 16.23% or 25/154)},
YEAR = {2004},
PAGES = {161--170},
MONTH = {June},
ABSTRACT = {
This paper addresses the energy consumption of forward error
correction (FEC) protocols as used to improve quality-of-service
(QoS) for wireless computing devices. The paper also characterizes the effect on energy
consumption and QoS of the power saving mode in 802.11
wireless local area networks (WLANs). Experiments are described in
which FEC-encoded audio streams are multicast to mobile
computers across a WLAN. Results of these experiments
quantify the tradeoffs between improved QoS, due to FEC, and
additional energy consumption caused by receiving and decoding
redundant packets. Two different approaches to FEC are
compared relative to these metrics. The results of this study enable the development of
adaptive software mechanisms that attempt to manage these
tradeoffs in the presence of highly dynamic wireless environments.
},
KEYWORDS = {
energy consumption, quality-of-service, forward error correction, mobile computing,
},
PDF = {QET-IWQOS.pdf},
}

@INPROCEEDINGS{ACT-ICAC,
TITLE = {Transparent Self-Optimization in Existing {CORBA} Applications},
BOOKTITLE = {Proceedings of the International Conference on Autonomic Computing (ICAC-04)},
YEAR = {2004},
PAGES = {88--95},
MONTH = {May},
ABSTRACT = {
to support autonomic computing in pervasive computing
environments.
The particular problem we address here is how to support
self-optimization to changing network connection capabilities
as a mobile user interacts with heterogeneous elements
in a wireless network infrastructure.
The goal is to enable self-optimization to such changes
transparently with respect to the core application code.
We propose a solution based on the use of the {\em generic
proxy}, which is a specific CORBA object that can intercept
and process any CORBA request using rules and actions that
can be introduced to the knowledge base of the proxy during
execution.
To explore its design and operation, we have incorporated
the generic proxy into ACT [1], an adaptive
middleware framework we designed previously to support
Details of the generic proxy are presented, followed by
results of a case study enabling self-optimization for an
existing surveillance application in a heterogeneous
wireless environment.
},
KEYWORDS = {adaptive middleware, autonomic computing, self-optimization,
quality-of-service, mobile computing, CORBA.},
PDF = {ACT-ICAC.pdf},
}

@INPROCEEDINGS{TRAP-ICAC,
AUTHOR = {{\bf S. M. Sadjadi} and P. K. McKinley and R. E. K. Stirewalt and B. H.C. Cheng},
TITLE = {Generation of Self-Optimizing Wireless Network Applications},
BOOKTITLE = {Proceedings of the International Conference on Autonomic Computing (ICAC-04)},
YEAR = {2004},
MONTH = {May},
PAGES = {310--311},
ABSTRACT = {
This paper introduces {\em TRAP/J}, a software tool that
enables autonomic computing in existing Java programs by
at compile time.
The generation process is transparent to the original
program source code, in which there is no need to modify
the source code manually.
programs.
To reduce overhead, TRAP/J enables the developer to select,
at compile time, a subset of classes, constituting an
existing program, to be adaptive at run time.
To support dynamic adaptation in existing Java programs,
TRAP/J benefits from aspect-oriented programming and
behavioral reflection.
TRAP/J generate specific aspects and reflective classes associated
with the selected classes.
A case study is presented in which TRAP/J was used to
enable an existing audio-streaming application to perform
self-optimization in a wireless network environment by
},
aspect-oriented programming, behavioral reflection, middleware,
quality-of-service, mobile computing.},
PDF = {TRAP-ICAC.pdf},
}

@INPROCEEDINGS{ACT-ICDCS,
NOTE = {(acceptance rate 17.7%)},
PAGES = {74--83},
TITLE = {{ACT}: An Adaptive {CORBA} Template to Support Unanticipated
BOOKTITLE = {Proceedings of the 24th IEEE International Conference on Distributed Computing Systems (ICDCS'04)},
ABSTRACT = { This paper proposes an Adaptive CORBA Template (ACT), which
enables run-time improvements to CORBA applications in
response to unanticipated changes in either their functional
requirements or their execution environments. ACT enhances
CORBA applications by weaving adaptive code into the
applications' object request brokers (ORBs) at run time. The
woven code intercepts and adapt the requests, replies, and
exceptions that pass through the ORBs. ACT itself is
language- and ORB-independent. Specifically, ACT can be used
to develop an object-oriented framework in any language that
CORBA ORB that supports portable interceptors. Moreover, ACT
can be integrated with other adaptive CORBA frameworks and
can be used to support interoperation among otherwise
incompatible frameworks. To evaluate the performance and
functionality of ACT, we implemented a prototype in Java to
support unanticipated adaptation in non-functional concerns,
such as quality-of-service and system-resource management.
Our experimental results show that the overhead introduced
by the ACT infrastructure is negligible, while the
},
KEYWORDS = {middleware, CORBA, dynamic adaptation, interoperability,
request interceptor, dynamic weaving, proxy,
quality-of-service, mobile computing},
MONTH = {March},
YEAR = {2004},
AUTHOR2_URL = {http://www.cse.msu.edu/~mckinley},
AUTHOR2_EMAIL = {mckinley@cse.msu.edu},
PDF = {ACT-ICDCS.pdf},
}

@INPROCEEDINGS{MetaSockets,
AUTHOR = {{\bf S. M. Sadjadi} and P. K. McKinley and E. P. Kasten},
TITLE = {Architecture and Operation of an Adaptable Communication Substrate},
BOOKTITLE = {Proceedings of the Ninth IEEE International Workshop on Future Trends of Distributed Computing Systems (FTDCS'03)},
YEAR = {2003},
MONTH = {May},
PAGES = {46-55},
ADDRESS = {San Juan, Puerto Rico},
ABSTRACT = {This paper describes the internal architecture and operation of an adaptable communication component called the MetaSocket. MetaSockets are created using Adaptive Java, a reflective extension to Java that enables a component's internal architecture and behavior to be adapted at run time in response to external stimuli. This paper describes how adaptive behavior is implemented in MetaSockets, as well as how MetaSockets interact with other adaptive components, such as decision makers and event mediators. Results of experiments on a mobile computing testbed demonstrate how MetaSockets respond to dynamic wireless channel conditions in order to improve the quality of interactive audio streams delivered to iPAQ handheld computers. },
PDF = {ftdcs-03.pdf}
}

@INPROCEEDINGS{iswc-02,
AUTHOR = {Philip K. McKinley and {\bf S. M. Sadjadi} and E. P. Kasten and R. Kalaskar},
TITLE = {Programming Language Support for Adaptive Wearable Computing},
BOOKTITLE = {Proceedings of International Symposium on Wearable Computers (ISWC'02)},
ABSTRACT = {This paper investigates the use of programming language
constructs to realize adaptive behavior in support
of collaboration among users of wearable and handheld
computers. A prototype language, Adaptive Java, contains
primitives that permit programs to modify their own operation
in a principled manner. In a case study, Adaptive
Java was used to construct MetaSocket components,
whose composition and behavior can be adapted to changing
conditions during execution. MetaSockets were then
integrated into Pavilion, a web-based collaboration framework,
and experiments were conducted on a mobile computing
testbed containing wearable, handheld, and laptop
computer systems. Performance results demonstrate the
utility of MetaSockets to improving the quality of interactive
audio streams and reliable data transfers among collaborating
users.},
KEYWORDS = {adaptive middleware, reflection, wearable computing,
mobile computing, wireless networks, forward error correction.},
YEAR = {2002},
MONTH = {October},
PAGES = {205--212},
PDF = {iswc-02.pdf}
}

@INPROCEEDINGS{icts-10,
AUTHOR = {P. K. McKinley and {\bf S. M. Sadjadi} and E. P. Kasten},
TITLE = {An Adaptive Software Approach to Intrusion Detection and Response},
BOOKTITLE = {Proceedings of The 10th International Conference on Telecommunication Systems, Modeling and Analysis (ICTSM10)},
ABSTRACT = {This paper proposes the use of programming language constructs to sup­port
adaptive self­monitoring and self­reporting software. The methods are
particularly well­ suited to wireless mobile devices, where limited resources
may constrain the use of certain software audits. An adaptive software
ar­chitecture is described that supports run­time transformations on software
components, enabling them to report internal details on how they are
be­ing used to other parts of the system. Effectively, any component of the
system can be turned into an informer'' at run time, and the nature of
the reported information can be adapted dynamically based on changing
conditions or directives from another authority, such as an intrusion detec­
tion system. A prototype implementation is described. The operation of
the system is demonstrated through an experiment in which it detects and
responds to a malicious host that multicasts noise'' packets to a wireless
iPAQ handheld computer.},
YEAR = {2002},
MONTH = {October},
PAGES = {91--99},
PDF = {icts-10.pdf}
}

@INPROCEEDINGS{SHAMAN-02,
AUTHOR = {P. K. McKinley and E. P. Kasten and {\bf S. M. Sadjadi} and Z. Zhou},
TITLE = {Realizing Multi-Dimensional Software Adaptation},
BOOKTITLE = {Proceedings of the ACM Workshop on Self-Healing, Adaptive and self-MANaged Systems (SHAMAN), {\rm held in conjunction with the} 16th Annual ACM International Conference on Supercomputing},
ABSTRACT = {This paper describes the use of programming language constructs
to support run-time software adaptation. A prototype language,
Adaptive Java, contains primitives that permit programs to modify
their own operation in a principled manner. In case studies, Adaptive
Java is being used to support adaptation for different crosscutting
concerns associated with heterogeneous mobile computing
and critical infrastructure protection. Examples are described
in which Adaptive Java components support dynamic quality-ofservice
on wireless networks, run-time energy management for handheld
computers, and self-auditing of potential security threats in
distributed environments.},
ADDRESS = {New York City, NY},
YEAR = {2002},
MONTH = {June},
NOTE = {(8 pages)},
PDF = {shaman02.pdf}
}

@INPROCEEDINGS{woss-02,
AUTHOR = {Z. Yang and B. H.C. Cheng and R. E. K. Stirewalt and J. Sowell and {\bf S. M. Sadjadi} and P. K. McKinley},
TITLE = {An Aspect-Oriented Approach to Dynamic Adaptation},
BOOKTITLE = {Proceedings of the ACM SIGSOFT Workshop On Self-healing Software (WOSS'02)},
YEAR = {2002},
MONTH = {November},
PAGES = {85--92},
ABSTRACT = {This paper presents an aspect-oriented approach to dynamic adaptation. A systematic process for defining where, when, and how an adaptation is to be incorporated into an application is presented. Specifically, the paper presents a two-phase approach to dynamic adaptation, where the first phase prepares a non-adaptice program for adaptation, and the second phase implements the adaptation at run time. this approach is illustrated with a distributed conferencing application.},
PDF = {woss-02.pdf}
}

AUTHOR = {E. P. Kasten and P. K. McKinley and {\bf S. M. Sadjadi} and R. E. K. Stirewalt},
TITLE = {Separating Introspection and Intercession in Metamorphic Distributed Systems},
BOOKTITLE = {Proceedings of the IEEE Workshop on Aspect-Oriented Programming for Distributed Computing	 (with ICDCS'02)},
ABSTRACT = {Many middleware platforms use computational reflection
to support adaptive functionality. Most approaches intertwine
the activity of observing behavior (introspection)
with the activity of changing behavior (intercession). This
paper explores the use of language constructs to separate
these parts of reflective functionality. This separation and
“packaging” of reflective primitives is intended to facilitate
the design of correct and consistent adaptive middleware.
A prototype implementation is described in which this
functionality is realized through extensions to the Java programming
language. A case study is described in which
“metamorphic” socket components are created from regular
socket classes and used to realize adaptive behavior on
wireless network connections.},
KEYWORDS = {adaptive middleware, reflection, component design,
mobile computing, wireless networks, forward error correction.},
PAGES = {465-472},