tech_report.bib

tech_report.bib

@techreport{NAS-130,
  author = {Srikar Tati and Scott Rager and Thomas {La Porta}},
  title = {{netCSI: A Generic Fault Diagnosis Algorithm for Computer Networks}},
  number = {NAS-TR-0130-2010},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {jun},
  year = {2010}
}
@techreport{NAS-0001,
  pdf = {/tech_report/NAS-TR-0001-2004.pdf},
  author = {Patrick Traynor and Heesook Choi and Guohong Cao and Sencun Zhu and Thomas F. La Porta},
  title = {Establishing Pair-Wise Keys in Heterogeneous Sensor Networks},
  number = {NAS-TR-0001-2004},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {December},
  year = {2004},
  note = {Updated July 6, 2005},
  abstract = {Many applications that make use of sensor networks require secure communication. Because  asymmetric-key  approaches  to  security  are  impossible to implement  in  such a resource-constrained  environment, symmetric-key methods coupled with clever a priori key distribution schemes have been  proposed to achieve the goals of data secrecy and integrity. These approaches typically assume that all sensors are similar in terms of capabilities, and hence deploy the same number of keys in all sensors in a  network to provide the aforementioned protections.  In this paper we demonstrate that a probabilistic unbalanced distribution of keys throughout the network that leverages the existence of a small percentage of more capable sensor nodes can not only provide an equal level of security but also reduce the  consequences of node compromise. We demonstrate the effectiveness of this approach on small networks  using a variety of trust models and then demonstrate the application of this method to very large systems. }
}
@techreport{NAS-0003,
  pdf = {/tech_report/NAS-TR-0003-2005.pdf},
  author = {Patrick Traynor and Guohong Cao and Thomas F. La Porta},
  title = {The Effects of Probabilistic Key Management on Secure Routing in Sensor Networks},
  number = {NAS-TR-0003-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {January},
  year = {2005},
  abstract = {Secure data dissemination is a necessary and an extremely important component of ad hoc sensor  networks and has been the topic of a large body of literature over the past few years. A variety of schemes have  been proposed in order to ensure that data is delivered through these networks while maintaining message  authenticity, integrity and, if so desired, privacy. The majority approaches applied to ad hoc networks assume  the presence of either public or pre-established symmetric keys. This assumption, however, is not realistic for  sensor networks. In this paper, we discuss the use of probabilistic symmetric-key management schemes and the  ways in which their deployment specifically affects the ability of sensor nodes to optimally route packets in a  secure setting. While numerous papers have advocated such an approach, none have investigated the details of  such an implementation. Specifically, we contrast pre-establishing symmetric keys with neighboring nodes to a  completely  reactive  approach  of  instituting  secure  relationships.  Through  simulation,  we  quantify  the  consequences of the application of these two methods on a number of scenarios requiring secure hop-by-hop  routing in sensor networks.}
}
@techreport{NAS-0002,
  pdf = {/tech_report/NAS-TR-0002-2004.pdf},
  author = {William Aiello and Kevin Butler and Patrick McDaniel},
  title = {Path Authentication in Interdomain Routing},
  number = {NAS-TR-0002-2004},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  note = {Revised May 2005},
  month = {December},
  year = {2004},
  abstract = {Interdomain routing is implemented on the Internet through the Border Gateway Protocol (BGP). Many approaches have been proposed to mitigate or solve the many problems of BGP security; yet, none of the proposed solutions have been widely deployed. The lack of adoption is largely caused by a failure to find an acceptable balance between deployability, cost, and security. In this paper, we study one aspect of the BGP security puzzle: path validation. Unlike many previous works in this area, we develop a formal model of path authentication in BGP. We define and prove the security of our novel and efficient solutions under this model. We further analyze the security relevant stability of paths in the Internet and profile resource consumption of the proposed constructions via trace-based simulations. Our constructions are shown to reduce signature validation costs by as much as 97.3\% over existing proposals while requiring nominal storage resources. We conclude by considering how our solution can be deployed in the Internet.}
}
@techreport{NAS-0004,
  pdf = {/tech_report/NAS-TR-0004-2005.pdf},
  author = {Boniface Hicks and David King and Patrick McDaniel},
  title = {Declassification with Cryptographic Functions in a Security-Typed Language},
  number = {NAS-TR-0004-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {May},
  year = {2005},
  abstract = {Security-typed languages are powerful tools for provably enforcing noninterference. Real computing systems, however, often intentionally violate noninterference by deliberately releasing (or declassifying) sensitive information. These systems frequently trust cryptographic functions to achieve declassification while still maintaining confidentiality. We introduce the notion of trusted functions that implicitly act as declassifiers within a security-typed language. Proofs of the new language's soundness and its enforcement of a weakened form of noninterference are given. Additionally, we implement trusted functions used for declassification in the Jif language. This represents a step forward in making security-typed languages more practical for use in real systems.}
}
@techreport{NAS-0007,
  pdf = {/tech_report/NAS-TR-0007-2005.pdf},
  author = {William Enck and Patrick Traynor and Patrick McDaniel and Thomas F. {La Porta}},
  title = {Exploiting Open Functionality in {SMS}-Capable Cellular Networks},
  number = {NAS-TR-0007-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {May},
  year = {2005},
  abstract = {Cellular networks are a critical component of the economic and social infrastructures in which we live. In addition to voice services, these networks deliver alphanumeric text messages to the vast majority of
wireless subscribers. To encourage the expansion of this new service, telecommunications companies offer connections between their networks and the Internet. The ramifications of such connections, however, have not been fully recognized. In this paper, we evaluate the security impact of the SMS interface on the availability of the cellular phone network. Specifically, we demonstrate the ability to deny voice service to cities the size of Washington D.C. and Manhattan with little more than a cable modem. Moreover, attacks targeting the entire United States are feasible with resources available at most medium-sized organizations. This analysis begins with an exploration of the structure of cellular networks. We then characterize network behavior and explore a number of reconnaissance techniques aimed at effectively targeting attacks on these systems.  We conclude by discussing countermeasures that mitigate or eliminate the threats introduced by these attacks.}
}
@techreport{NAS-0005,
  pdf = {/tech_report/NAS-TR-0005-2005.pdf},
  author = {{JaeSheung} Shin and {HeeSook} Choi and Patrick Traynor and Thomas F. {La Porta}},
  title = {Network Formation Schemes for Dynamic Multi-Radio, Multi-Hop Cellular Networks},
  number = {NAS-TR-0005-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {April},
  year = {2005},
  note = {Updated July 6, 2005},
  abstract = {Multi-hop relaying in cellular networks can greatly increase capacity and  performance by exploiting the best available links to a base station. We envision an environment in  which relay networks are dynamically formed in different frequency bands in response to the  degradation of network performance. Nodes experiencing poor service may use their agile radios to  join one of the available, non-interfering relay networks. We propose and evaluate a set of algorithms  used to form such relay networks on-demand. Each of the algorithms begins by designating the nodes  best suited for acting as gateways between the relay and cellular networks. Each scheme then  determines the order of route request initiations. These algorithms are evaluated for latency, signaling  overhead and gateway load during the network formation process, and average path length and  amount of link sharing in the resulting relay networks. The evaluation leads us to conclude that having  nodes furthest from the BS initiate route discovery first is the best approach for reducing the  formation overhead and building efficient relay networks. To our knowledge, we are the first to  propose and evaluate algorithms for the on-demand formation of multi-hop relay networks. }
}
@techreport{NAS-0006,
  pdf = {/tech_report/NAS-TR-0006-2005.pdf},
  author = {{JaeSheung} Shin and {KyoungHwan} Lee and Aylin Yener and Thomas F. {La Porta}},
  title = {On-Demand Diversity Wireless Relay Networks},
  number = {NAS-TR-0006-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {April},
  year = {2005},
  abstract = {There has been much recent attention on using wireless relay networks to forward data  from mobile nodes to a base station. This network architecture is motivated by performance  improvements obtained by leveraging the highest quality links to a base station for data transfer.  With the advent of agile radios it is possible to improve the performance of relay networks through  intelligent frequency assignments. First, it is beneficial if the links of the relay network are  orthogonal with respect to each other so that simultaneous transmission on all links is possible.   Second, diversity can be added to hops in the relay network to reduce error rates. In this paper we  present algorithms for forming such relay networks dynamically. The formation algorithms support  intelligent frequency assignments. Our results show that algorithms that order the sequence in which  nodes join a relay network carefully achieve the highest amount of diversity and hence best  performance.}
}
@techreport{NAS-0008,
  pdf = {/tech_report/NAS-TR-0008-2005.pdf},
  author = {Patrick Traynor and Raju Kumar and Hussain Bin Saad and Guohong Cao and Thomas F. La Porta},
  title = {{LIGER}: Implementing Efficient Hybrid Security Mechanisms for Heterogeneous Sensor Networks},
  number = {NAS-TR-0008-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {May},
  year = {2005},
  note = {Updated July 5, 2005},
  abstract = {The majority of security schemes available for sensor networks assume deployment in areas without access to a wired infrastructure. More specifically, nodes in these networks are unable to leverage key distribution centers (KDCs) to assist them with key management. In networks with a heterogeneous mix of nodes, however, it is not unrealistic to assume that some more powerful nodes have at least intermittent contact with a backbone network. For instance, an air-deployed battlefield network may have to operate securely for some time until uplinked friendly forces move through the area. We therefore propose LIGER, a hybrid key management scheme for heterogeneous sensor networks that allows systems to operate in both the presence and absence of a KDC. Specifically, when no KDC is available, nodes communicate securely with each other based upon a probabilistic unbalanced method of key management. The ability to access a KDC allows nodes to probabilistically authenticate neighboring devices with which they are communicating. We also demonstrate that this scheme is robust to the compromise of both low and high capability nodes and that the same keys can be used for both modes of operation. Detailed experiments and simulations are used to show that LIGER is a highly practical solution for the current generation of sensors and the unbalanced approach can significantly reduce the network initialization time.}
}
@techreport{NAS-0009,
  pdf = {/tech_report/NAS-TR-0009-2005.pdf},
  author = {Patrick Traynor and Kevin Butler and William Enck and Jennifer Plasterr and Scott Weaver and John van Bramer and Patrick McDaniel},
  title = {Privacy-Preserving Web-Based Email},
  number = {NAS-TR-0009-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {June},
  year = {2005},
  abstract = {The Internet is hemorrhaging unimaginable amounts of user data. In addition to information leaked through tracking cookies and spyware, users are often required to allow the providers of online services such as web-based email access to their data. We argue that it is possible to protect this informationfrom the dangers of data mining by external sources regardless of the arbitrary privacy policies imposed by these services. As an existence proof, we present Ketu -- an open-source, extensible tool that provides or message privacy and  integrity while assuring plausible deniability for both the sender andreceiver.}
}
@techreport{NAS-0010,
  pdf = {/tech_report/NAS-TR-0010-2005.pdf},
  author = {Wesam Lootah and William Enck and Patrick McDaniel},
  title = {{TARP}: Ticket-Based Address Resolution Protocol},
  number = {NAS-TR-0010-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {June},
  year = {2005},
  abstract = {IP networks fundamentally rely on the Address Resolution Protocol (ARP) for proper operation.  Unfortunately, vulnerabilities in the ARP protocol enable a raft of IP-based impersonation, man-in-the-middle, or DoS attacks.  Proposed countermeasures to these vulnerabilities have yet to simultaneously address backward compatibility and cost requirements.  This paper introduces the {\it Ticket-based Address Resolution Protocol} (TARP).  TARP implements security by distributing centrally issued secure    MAC/IP address mapping attestations through existing ARP messages.  We detail the TARP protocol and its implementation within the Linux operating system.  Our experimental analysis shows that TARP improves the costs of implementing ARP security by as much as two orders of magnitude over existing protocols.  We conclude by exploring a range of operational issues associated with deploying and administering ARP security.}
}
@techreport{NAS-0011,
  pdf = {/tech_report/NAS-TR-0011-2005.pdf},
  author = {Boniface Hicks and Patrick McDaniel and Ali Hurson},
  title = {Information Flow Control in Database Security: A Case Study for Secure Programming with {Jif}},
  number = {NAS-TR-0011-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {April},
  year = {2005},
  abstract = {Because of the increasing demands for privacy and integrity guarantees in applications that handle sensitive, electronic data, there is a need for automated software development tools and techniques for enforcing this security. Although there have been many attempts to apply security to existing systems after the fact, manifold failures indicate that this approach should be revisited. To the contrary, experience indicates that secure systems must be designed with explicit policies from the beginning and that there should be some automated, mathematically-verifiable mechanism to aid programmers in doing this correctly. Recent research in language-based security has made great strides towards the development of sophisticated and robust languages for programming with explicit security policies. Insufficient experience with these languages, however, has left them untested and impractical for writing real, distributed applications. In this paper, we present our experiences of working with Jif, a Java-based, security-typed language, in building a distributed, database application. Our experience has indicated the current impracticality of programming in Jif, but has helped us to identify language development tools and automation algorithms that could aid in making Jif more practical for developing real, distributed applications.}
}
@techreport{NAS-0012,
  pdf = {/tech_report/NAS-TR-0012-2005.pdf},
  author = {John J. Metzner},
  title = {Pulsed {ALOHA} -- a Form of Multiaccess {UWB} Communications},
  number = {NAS-TR-0012-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {May},
  year = {2005},
  abstract = {Pulsed ALOHA is a form of impulse Ultra-Wideband communication where a bit is carried with each pulse, rather than using a time spreading code. Pulsed ALOHA and Wideband ALOHA are perfect fits to low energy wide-bandwidth communication. They permit energy-efficient higher data rates. Pulsed ALOHA has some unique advantages in collision avoidance and collision tolerance over Wideband ALOHA, when used with multi-receiver diversity reception.  Both systems have substantial advantages in rate, energy efficiency, and simplicity over using spreading codes.  A one-dimensional network example is given, which could be a model for a system along auto roadways.}
}
@techreport{NAS-0013,
  pdf = {/tech_report/NAS-TR-0013-2005.pdf},
  author = {John J. Metzner and Jade Bissat and Yuexin Liu},
  title = {Efficient, Secure and Reliable Ring Multicast in Wired or Wireless Networks},
  number = {NAS-TR-0013-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {June},
  year = {2005},
  abstract = {A multicast scheme is described which merges the key distribution and acknowledgment tasks, allowing simple acknowledgment and frequent key changing, if desired. The scheme, denoted SAM for Secure Acknowledging Multicast, requires a ring organization. The key change requires only slightly more than one ring revolution, and need not interrupt data flow. Leaving and joining key changes are similarly easy to handle. Any group member can be the source, and the same acknowledgment policy can be used for reliable communication.  The new key is encrypted by the old key, and only new messages use the new key. The joining and leaving methods are somewhat like the ?CLIQUES? strategy, but SAM is more symmetrical, and directly incorporates acknowledgments as an added bonus. It can be applied to virtual rings in switched networks, or to rings in wireless networks. The basic ring procedure is?stop and wait?, but in a modified method, denoted MSAM, channels can be interlaced for near continuous transmission or simultaneous many-to-many  communication. For some wireless networks, average transmitted power is a more severe limitation on average bit rate than bandwidth, and stop-and-wait transmission is practical. Broadcast information can be combined with ring acknowledgment for further efficiency reduction.}
}
@techreport{NAS-0014,
  pdf = {/tech_report/NAS-TR-0014-2005.pdf},
  author = {John J. Metzner},
  title = {Burst Erasure Correction to Improve the Efficiency of Broadband {CSMA/CD}},
  number = {NAS-TR-0014-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {June},
  year = {2004},
  abstract = {Assume a broadcast network with a central station.  All senders send to the central station, and the central station rebroadcasts all receptions on a different band. In standard CSMA/CD, all senders stop when a collision is detected. In the suggested modified algorithm, exactly one continues to send. A colliding sender can know if it was the first to arrive at the central station. If so, it continues to send, else it stops. The one that continues to send incurs an observable-length erasure burst, and appends a redundant part to its frame to allow filling in the burst erasure. Also, a collision resolution-like algorithm is introduced which improves fairness and performance.}
}
@techreport{NAS-0015,
  pdf = {/tech_report/NAS-TR-0015-2005.pdf},
  author = {Hungyuan Hsu and Sencun Zhu and Ali Hurson},
  title = {{LIP:} A Lightweight Inter-layer Protocol for Network Access Control in Mobile Ad-Hoc Networks},
  number = {NAS-TR-0015-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {May},
  year = {2005},
  abstract = {Most ad hoc networks do not implement any network access control, leaving these networks vulnerable to packet injection attacks where a malicious node injects a large number of packets into the network with the goal of depleting the resources of the nodes relaying the packets. To prevent such attacks, it is necessary to employ authentication mechanisms that ensure that only authorized nodes can inject traffic into the network. We design a Lightweight Inter-layer Protocol (LIP) for network access control based on efficient local broadcast authentication mechanisms. In addition to preventing attacks by unauthorized nodes, LIP can also detect and minimize the impersonation attacks by compromised insider nodes. Through detailed simulation study, we show that LIP incurs small bandwidth overhead and has little impact on the traffic delivery ratio even in the case of high node mobility. Moreover, the transparency and independence properties of LIP allows it to be turned on/off as desired and to be integrated seamlessly with secure routing protocols, providing stronger security services for ad hoc networks.}
}
@techreport{NAS-0016,
  pdf = {/tech_report/NAS-TR-0016-2005.pdf},
  author = {Hui Song and Sencun Zhu and Guohong Cao},
  title = {Attack-Resilient Time Synchronization for Wireless Sensor Networks},
  number = {NAS-TR-0016-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {May},
  year = {2005},
  abstract = {The existing time synchronization schemes in sensor networks were not designed with security in mind, thus leaving them vulnerable to security attacks. In this paper, we first identify various attacks that are effective to several representative time synchronization schemes, and then focus on a specific type of attack called delay attack, which cannot be addressed by cryptographic techniques. Then, we propose two approaches to detect and accommodate the delay attacks. Our first approach uses the generalized extreme studentized deviate (GESD) algorithm to detect multiple outliers introduced by the compromised nodes; our second approach uses a threshold derived using a time transformation technique to filter out the outliers. Finally, we show the effectiveness of these two schemes through extensive simulations.}
}
@techreport{NAS-0017,
  pdf = {/tech_report/NAS-TR-0017-2005.pdf},
  author = {Hosam Rowaihy and William Enck and Patrick McDaniel and Thomas {La Porta}},
  title = {Limiting {Sybil} Attacks in Structured Peer-to-Peer Networks},
  number = {NAS-TR-0017-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {July},
  year = {2005},
  abstract = {Structured peer-to-peer networks are highly scalable, efficient, and reliable. These characteristics are achieved by deterministically replicating and recalling content within a widely distributed and decentralized network. One practical limitation of these networks is that they are frequently subject to Sybil attacks: malicious parties can compromise the network by generating and controlling large numbers of shadow identities. In this paper, we propose an admission control system that mitigates Sybil attacks by adaptively constructing a hierarchy of cooperative admission control nodes. Implemented by the peer-to-peer nodes, the admission control system vets joining nodes via client puzzles. A node wishing to join the network is serially challenged by the nodes from a leaf to the root of the hierarchy. Nodes completing the puzzles of all nodes in the chain are provided a cryptographic proof of the vetted identity. In this way, we exploit the structure of hierarchy to distribute load and increase resilience to targeted attacks on the admission control system. We evaluate the security, fairness, and efficiency of our scheme analytically and via simulation. Centrally, we show that an adversary must perform days or weeks of effort to obtain even a small percentage of nodes in small peer-to-peer networks, and that this effort increases linearly with the size of the network. We further show that we can place a ceiling on the number of IDs any adversary may obtain by requiring periodic reassertion of the an IDs continued validity. Finally, we show that participation in the admission control system does not interfere with a node?s use of the peer-to-peer system: the loads placed on the nodes participating in admission control are vanishingly small.}
}
@techreport{NAS-0018,
  pdf = {/tech_report/NAS-TR-0018-2005.pdf},
  author = {Sophie Y. Qiu and Patrick D. McDaniel and Fabian Monrose and Aviel D. Rubin},
  title = {Characterizing Address Use Structure and Stability of Origin Advertisement in Inter-domain Routing},
  number = {NAS-TR-0018-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {July},
  year = {2005},
  abstract = {The stability and robustness of BGP remains one of the most critical elements in sustaining today's Internet. In this paper, we study the structure and stability of origin advertisements in inter-domain routing. Using our q-chart IP address advertisement visualization tool, we explore the gross structure of IP address advertisements and show that it exhibits considerably consistent structure. We further quantitatively characterize the stability of origin advertisements by analyzing real-world BGP updates for a period of one year from multiple vantage points. We show that while repetitive prefix re-additions and subsequent withdrawals constitute a major volume of BGP updates -- due in part to a number of frequently flapping prefixes with short up-and-down cycles -- a significant portion of prefixes have high origin stability. In particular, origin changes account for less than 2\% of the BGP update traffic, with more than 90\% of the prefixes being consistently originated by the same AS for an entire year. For those prefixes involved in origin changes, approximately 57\% have only one change across the year, implying that these changes are indeed permanent. We also show that most ASes are involved in few, if any, prefix movement events, while a small number of ASes are responsible for most of the advertisement churn. Additionally, we find that a high volume of new prefixes can be attributed to actively evolving countries, that some abnormal prefix flapping is most likely due to misconfiguration, and that most of the origin changes are a result of multi-homed prefixes oscillating between their origins. This work not only contributes to a better understanding of BGP dynamics, but also provides insights for other research areas such as BGP security that rely on key assumptions pertaining to origin stability.}
}
@techreport{NAS-0019,
  pdf = {/tech_report/NAS-TR-0019-2005.pdf},
  author = {Kameswari Kotapati and Peng Liu and Thomas F. {La Porta}},
  title = {{CAT} -- A Practical {SDL} Based Attack Attribution Toolkit for {3G} Networks},
  number = {NAS-TR-0019-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {June},
  year = {2005},
  abstract = {This paper presents the Cross Infrastructure Attack Attribution Toolkit (CAT), which is a utility to analyze the vulnerability of 3G networks using telecommunication specifications. CAT analyzes vulnerabilities by generating attack graphs, which show the global view of the network in the event of an attack. The uniqueness of CAT is as follows: (1) usage of telecommunication specification written in Specification and Description Language (SDL) to derive attack graphs, (2) implementation of simple algorithms that output attack graphs irrespective of intruder profile and network configuration, and (3) generation of attack graphs that are exhaustive, succinct and loop free with low redundancy.}
}
@techreport{NAS-0020,
  pdf = {/tech_report/NAS-TR-0020-205.pdf},
  author = {Jing Zhao and Guohong Cao},
  title = {{VADD}: Vehicle-Assisted Data Delivery in Vehicular Ad Hoc Networks},
  number = {NAS-TR-0020-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {July},
  year = {2005},
  abstract = {Multi-hop data delivery through vehicular ad hoc networks is complicated by the fact that vehicular networks are highly mobile and frequently disconnected. To address this issue, we adopt the idea of carry and forward, where a moving vehicle carries the packet until a new vehicle moves into its vicinity and forwards the packet. Different from existing carry and forward solutions, we make use of the predicable vehicle mobility, which is limited by the traffic pattern and road layout. Based on the existing traffic pattern, a vehicle can find the next road to forward the packet to reduce the delay. We propose several vehicle-assisted data delivery (VADD) protocols to forward the packet to the best road with the lowest data delivery delay. Experimental results are used to evaluate the proposed solutions. Results show that the proposed VADD protocols outperform existing solutions in terms of packet delivery ratio, data packet delay and protocol overhead. Among the proposed VADD protocols, the H-VADD protocol has much better performance.}
}
@techreport{NAS-0021,
  pdf = {/tech_report/NAS-TR-0021-2005.pdf},
  author = {Kameswari Kotapati and Peng Liu and Yan Sun and Thomas F. {La Porta}},
  title = {{A Taxonomy of Cyber Attacks on {3G} Networks}},
  number = {NAS-TR-0021-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {January},
  year = {2005},
  abstract = {Cross Network Services are a new breed of services that have spawned from the merger of the Internet and the previously isolated wireless telecommunication network. These services act as a launching pad for a new type of security threat - the Cross Infrastructure Cyber Attack. This paper is the first to propose attack taxonomy for 3G networks.  The uniqueness of this taxonomy is the inclusion of Cross Infrastructure Cyber Attacks in addition to the standard Single Infrastructure attacks. This paper also proposes an abstract model of the 3G network entities. This abstract model has been a vehicle in the development of the attack taxonomy, detection of vulnerable points in the network and validating 3G network vulnerability assessment tools. This paper examines the threats and vulnerabilities in a 3G network with special examination of the security threats and vulnerabilities introduced by the merger of the 3G and the Internet. The abstract model aids this comprehensive study of security threats and vulnerabilities on 3G networks.}
}
@techreport{NAS-0022,
  pdf = {/tech_report/NAS-TR-0022-2005.pdf},
  author = {Bita Mortazavi and George Kesidis},
  title = {A Model of a Reputation Service for Incentive Engineering},
  number = {NAS-TR-0022-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {July},
  year = {2005},
  abstract = {Reputation systems are used to provide incentives for cooperation among participants of, and generally help to secure, peer-to-peer networks. In this paper, a survey of such systems is provided followed by the description of a model of a reputation framework that can capture the phenomenon of peer nodes misrepresenting reputations for malicious or selfish reasons. For special case, the model is shown to converge in mean to reputations that "reveal" the true propensity of peer nodes to cooperate. The paper concludes with a simulation study that considers weighted voting, hierarchical trust groups and misrepresentations.}
}
@techreport{NAS-0023,
  pdf = {/tech_report/NAS-TR-0023-2005.pdf},
  author = {Jisheng Wang and David J. Miller and George Kesidis},
  title = {Efficient Mining of the Multidimensional Traffic Cluster Hierarchy for  Digesting, Visualization, and Anomaly Identification},
  number = {NAS-TR-0023-2005},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {August},
  year = {2005},
  abstract = {Mining traffic traces to identify the dominant flows sent over a given link, over a  specified time interval, is a valuable capability with applications to traffic auditing,  simulation, and visualization of unexpected phenomena. Recently, Estan et al. advanced a  comprehensive data mining structure tailored for networking data  a parsimonious,  multidimensional flow hierarchy, along with an algorithm for its construction. While they  primarily targeted off-line auditing, interactive visualization of current traffic or of network  simulations in progress will require real-time data mining. We suggest several improvements  to Estan et al.'s algorithm that substantially reduce the computational complexity of  multidimensional flow mining. We also propose computational and memory-efficient  approaches for unidimensional clustering of the IP address spaces. For baseline  implementations, evaluated on the New Zealand (NZIX) trace data, our method reduced CPU  execution times of the Estan el al. method by a factor of more than eight. We also  demonstrate the usefulness of our approach for anomaly and attack identification, based on  traces from the Slammer and Code Red worms and the MIT Lincoln Labs DDoS data.}
}
@techreport{NAS-123,
  author = {Dave King and Susmit Jha and Divya Muthukumaran and
Trent Jaeger and Somesh Jha and Sanjit Seshia},
  title = {{Automating Security Mediation Placement}},
  number = {NAS-TR-0123-2010},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering,
Pennsylvania State University, University Park, PA, USA},
  month = jan,
  year = 2010,
  abstract = {We present a framework that automatically produces
resolution placement suggestions for type errors in security-typed
programs, enabling legacy code to be retrofit with comprehensive
security policy mediation.  Resolving such type errors requires
selecting a placement of mediation statements that implement runtime
security decisions, such as declassifiers and authorization checks.
Manually placing mediation statements in legacy code can be difficult,
as there may be several, interacting type errors.  In this paper, we
solve this problem by constructing a graph that has the property that
a vertex cut is equivalent to the points at which mediation statements
can be inserted to allow the program to satisfy the type system.  We
build a framework that produces suggestions that are minimum cuts of
this graph, and the framework can be customized to find suggestions
that satisfy functional and security requirements.  We have built a
tool that implements this framework for Java programs that computes
suggestions for 20,000 line programs in less than 100 seconds.  Even
without additional guidance, we find that this tool selects
suggestions where more than 80 percent of the selected mediation
points were classified as similar to those placed by expert
programmers, and reduces the number of locations that a programmer
would otherwise have to inspect using current security-typed language
tools by approximately 90 percent.}
}
@techreport{NAS-124,
  author = {Hayawardh Vijayakumar and
Guruprasad Jakka and Sandra Rueda and Joshua Schiffman and Trent
Jaeger},
  title = {{Integrity Walls: Finding Attack Surfaces
from Mandatory Access Control Policies}},
  number = {NAS-TR-0124-2010},
  institution = {Network and Security
Research Center},
  address = {Department of Computer Science and Engineering,
Pennsylvania State University, University Park, PA, USA},
  month = feb,
  year = 2010,
  abstract = {An attack surface defines the entry points that
attackers can use to launch attacks against a system. Researchers have
examined attack surfaces in the context of
systems, by assessing the reachability among processes given an access
control policy, and in the context of programs, by assessing the
reachability of program interfaces. However,
an attacker’s ability to reach a particular program interface depends
on the system’s access control policy to allow such access. In this
report, we examine how a system’s access co
ntrol policy may be used to guide a more accurate assessment of attack
surfaces. We propose an approach for defining an integrity wall for
every program that identifies the set of unt
rusted operations that that program may perform. Also, we develop a
runtime monitoring tool for collecting the interfaces actually used to
perform such untrusted operations. We find
that even with a conservative SELinux MAC policy, the system TCB and
server programs have several interfaces that access untrusted data.
Further, using automated techniques we ident
ified interfaces that have led to several, known vulnerabilities, some
recently discovered, and also we identified interfaces in programs that
were not found by previous manual analys
is. We envision that such an approach will be useful for OS
distributors who must make decisions about which programs should be
trusted with permissions that authorize untrustedoperations.}
}
@techreport{NAS-125,
  author = {Trent Jaeger and Sandra Rueda
and Hayawardh Vijayakumar and Divya Muthukumaran and Joshua Schiffman
and Swarat Chaudhuri},
  title = {{Hierarchical Policy Compliance
for Virtual Machine Systems}},
  number = {NAS-TR-0125-2010},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering,
Pennsylvania State University, University Park, PA, USA},
  month = feb,
  year = 2010,
  abstract = {Mandatory access control (MAC) enforcement
mechanisms are now available for virtual machine monitors (VMMs) and
user-level programs, in addition to operating systems, presenting an
opportunity for comprehensive and flexible access control. However,
MAC policies for these individual components are often developed
independently. As a consequence, they may enforce inconsistent or
contradictory security goals. At present, we have no approach for
understanding the security risks that may result from these
inconsistencies, so VMs are deployed with an unknown number and
variety of risks. In this paper, we develop a method for identifying
integrity risks in multiple, independent MAC policies in VM systems
that has the following properties: (1) it is comprehensive in that all
risky information flows in a VM system are identified; (2) it is
incremental in that individual MAC policies (or even portions of MAC
policies) can be evaluated independently; and (3) it is useful because
risk identification shows where constraints are necessary to resolve
risks. We demonstrate this approach using the hierarchical compliance
analysis testing system Hippocrates that we constructed on Xen systems
enforcing XSM/Flask policies in the hypervisor and SELinux policies in
each of the VMs. Despite the complexity of the MAC policies, we are
able to identify a comprehensive set of integrity risks in 9-14
seconds per VM evaluated.}
}
@techreport{NAS-126,
  author = {Joshua Schiffman and Xinwen Zhang and Simon Gibbs},
  title = {{DAuth: Fine-grained Authorization Delegation for
Distributed Web Application Consumers}},
  number = {NAS-TR-0126-2010},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering,
Pennsylvania State University, University Park, PA, USA},
  month = mar,
  year = 2010,
  abstract = {Web applications are becoming the predominate
means by which users interact with online content. However, current
authentication approaches use a single authentication credential to
manage access permissions, which is too inflexible for distributed
programs with unique security and privacy requirements for each
component. In this paper, we introduce DAuth, an authorization
mechanism that allows fine-grained and flexible control of access
permissions derived from a single authentication credential for
distributed consumers of web applications. We implement DAuth as a
proxy for a Twitter social networking application within our
distributed Elastic Application framework and find it introduces
negligible overhead and requires only minor modification of existing
applications. Through our evaluation, we demonstrate DAuth improves on
existing web authentication mechanisms to support distributed web
application consumers and can be implemented as a proxy to web
applications that do not wish to develop their own implementation.}
}
@techreport{NAS-127,
  author = {Joshua Schiffman and Thomas Moyer and Hayawardh
Vijayakumar and Trent Jaeger and Patrick McDaniel},
  title = {{Seeding Clouds with Trust Anchors}},
  number = {NAS-TR-0127-2010},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering,
Pennsylvania State University, University Park, PA, USA},
  month = apr,
  year = 2010,
  abstract = {We are finding that customers with
security-critical data processing needs are beginning to push back
strongly against using clouds, so we examine how clouds may prove that
they enforce a customer’s data security requirements. Researchers have
proposed integrity measurement methods for building proofs of system
integrity, but while there have been some successes on single-purpose
systems or for specific threats such methods have not been adopted
broadly. However, we find that the centralized management of cloud
data centers and our proposed approach to leverage integrity
enforcement results in a practical approach for customers to verify
data security in the cloud. Specifically, we propose a cloud verifier
service that generates integrity proofs for customers to verify the
integrity and access control enforcement abilities of the cloud
platform to protect the integrity of customer’s application VMs in
IaaS clouds. While a cloud-wide verifier service could present a
significant system bottleneck, we demonstrate that aggregating proofs
enables significant overhead reductions. As a result, we find several,
promising research directions to pursue for building cloud systems for
security- critical applications and the skeptics that run them.}
}
@techreport{NAS-128,
  author = {Raju Kumar and Sharanya Eswaran and Thomas {La Porta}},
  title = {{End-to-End Rate Selection for Opportunistic
Reception in Multi-Rate Wireless Networks}},
  number = {NAS-TR-0128-2010},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering,
Pennsylvania State University, University Park, PA, USA},
  month = apr,
  year = 2010,
  abstract = {In this paper we propose an end-to-end algorithm,
called NUM-RS, that is both multi-rate and opportunistic
reception-aware, for selecting link transmission rates and source
rates in a multi-hop multi-rate wireless network. Prior works on rate
selection, including those that are opportunistic reception aware,
perform rate selection on a hop-by-hop basis, attempting to maximize
the throughput on each link. Our algorithm leverages the Network
Utility Maximization (NUM) framework, thus providing end-to-end
semantics for rate selection and proportional fairness with low
overhead. By using end-to-end semantics NUM-RS considers both source
rates and congestion on links used by a flow when selecting link
rates. Our results show that NUM-RS increasingly outperforms
contemporary hop-by-hop rate selection schemes as the number of hops
in the flows increase. For example, 20% and 50% of 8-hop flows exhibit
performance gains of at least 36% and 15%, respectively, in terms of
data delivered to the destinations.}
}
@techreport{NAS-129,
  author = {Raghav Bhaskar and Srivatsan Laxman and Adam Smith
and Abhradeep Thakurta},
  title = {{Discovering Frequent Patterns in Sensitive Data }},
  number = {NAS-TR-0129-2010},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering,
Pennsylvania State University, University Park, PA, USA},
  month = apr,
  year = 2010,
  abstract = {Discovering frequent patterns from data is a
popular exploratory  technique in data mining. However, if the data
are sensitive (e.g.  patient health records, user behavior records)
releasing information about significant patterns or trends carries
significant risk to privacy. This paper shows how one can accurately
discover and release the most significant patterns along with their
frequencies in a data set containing sensitive information, while
providing rigorous guarantees of privacy for the individuals whose
information is stored there. We present two efficient algorithms for
discovering the K most frequent patterns in a data set of sensitive
records. Our algorithms satisfy differential privacy, a recently
introduced definition that provides meaningful privacy guarantees in
the presence of arbitrary external information. Differentially private
algorithms require a degree of uncertainty in their output to preserve
privacy. Our algorithms handle this by returning ‘noisy’ lists of
patterns that are close to the actual list of K most frequent patterns
in the data. We define a new notion of utility that quantifies the
output accuracy of private top K pattern mining algorithms. In typical
data sets, our utility criterion implies low false positive and false
negative rates in the reported lists. We prove that our methods meet
the new utility criterion; we also demonstrate the performance of our
algorithms through extensive experiments on the transaction data sets
from the FIMI repository. While the paper focuses on frequent pattern
mining, the techniques developed here are relevant whenever the data
mining output is a list of elements ordered according to an
appropriately ‘robust’ measure of interest.}
}
@article{TBE10,
  author = {Patrick Traynor and Kevin Butler and William Enck and Kevin Borders an
d Patrick McDaniel},
  title = {{malnets: Large-Scale Malicious Networks via Compromised Wireless Acces
s Points}},
  journal = {Journal of Security and Communication Networks},
  volume = {3},
  number = {2},
  year = {2010},
  month = {March},
  pages = {102--113},
  supercedes = {NAS-0048},
  abstract = {
Densely populated areas are increasingly filled with vulnerable wireless
routers set up by unsophisticated users.  In isolation, such routers
appear to represent only a minor threat, but in aggregate, the threat
can be much greater.  We introduce the notion of malnets: networks
of adversary-controlled wireless routers targeted to a physical
geography.  Similar to Internet worms such as Slammer and Code-Red,
malnets are created by the recursive compromise of targeted devices.
However, unlike their traditionally wired counterparts, malnet worms
exploit only other routers that are within their transmission range.
The malnet thus creates a parallel wireless infrastructure that is a)
completely under control of the adversary, and b) spans a targeted
physical area, creating a valuable infrastructure for a variety of
virtual and physical attacks.  We initially study the propagation
characteristics of commercial routers and model inter-router
connectivity using publicly available war-driving data.  The resulting
characterization is applied to well-known epidemiological models to
explore the success rates and speeds of malnet creation across cities
such as New York, Atlanta, and Los Angeles.  Finally, we use a sampling
of available exploits to demonstrate the construction of multi-vector,
multi-platform worms capable of targeting wireless routers.  Our
analysis show that an adversary can potentially deploy a malnet of over
24,000 routers in Manhattan in less than two hours.  Through this work
we show that malnets are not only feasible but can be efficiently
deployed.
}
}
@article{emm+09,
  author = {William Enck and Thomas Moyer and Patrick McDaniel and 
   Subhabrata Sen and Panagiotis Sebos and Sylke Spoerel and 
   Albert Greenberg and Yu-Wei Eric Sung and Sanjay Rao and 
   William Aiello},
  title = {Configuration Management at Massive Scale: System Design 
   and Experience},
  journal = {IEEE Journal on Selected Areas in Communications (JSAC)},
  year = {2009},
  extends = {ems+07},
  abstract = {
  The development and maintenance of network device configurations is one 
of the central challenges faced by large network providers.  Current 
network management systems fail to meet this challenge primarily because 
of their inability to adapt to rapidly evolving customer and 
provider-network needs, and because of mismatches between the conceptual 
models of the tools and the services they must support.  In this paper, 
we present the PRESTO configuration management system that attempts to 
address these failings in a comprehensive and flexible way. Developed 
for and used during the last 5 years within a large ISP network, PRESTO 
constructs device-native configurations based on the composition of   
configlets representing different services or service options. 
Configlets are compiled by extracting and manipulating data from 
external systems as directed by the PRESTO configuration scripting and 
template language.  We outline the configuration management needs of 
large-scale network providers, introduce the PRESTO system and 
configuration language, and reflect upon our experiences developing 
PRESTO configured VPN and VoIP services. In doing so, we describe how PRESTO 
promotes healthy configuration management practices.  }
}
@article{TEM+08,
  author = {Patrick Traynor, William Enck, Patrick McDaniel and Thomas {La Porta}},
  title = {Mitigating Attacks on Open Functionality in SMS-Capable Cellular Networks},
  journal = {IEEE/ACM Transactions on Networking (TON)},
  year = {2008},
  extends = {teml06},
  url = {http://www.cse.psu.edu/~traynor/job/traynor_ton08.pdf},
  abstract = {The transformation of telecommunications networks from
    homogeneous closed systems providing only voice services to
    Internet-connected open networks that provide voice and data
    services presents significant security challenges. For example,
    recent research illustrated that a carefully crafted DoS attack via
    text messaging could incapacitate all voice communications in a
    metropolitan area with little more than a cable modem. This attack
    highlights a growing threat to these systems; namely, cellular
    networks are increasingly exposed to adversaries both in and outside
    the network. In this paper, we use a combination of modeling and
    simulation to demonstrate the feasibility of targeted text messaging
    attacks. Under realistic network conditions, we show that
    adversaries can achieve blocking rates of more than 70% with only
    limited resources. We then develop and characterize five techniques
    from within two broad classes of countermeasures - queue management
    and resource provisioning. Our analysis demonstrates that these
    techniques can eliminate or extensively mitigate even the most
    intense targeted text messaging attacks. We conclude by considering
    the tradeoffs inherent to the application of these techniques in
    current and next generation telecommunications networks.}
}
@article{TCWHM08,
  author = {Patrick Traynor and Michael Chien and Scott Weaver and Boniface Hicks and Patrick McDaniel},
  title = {{Non-Invasive Methods for Host Certification}},
  journal = {{ACM Transactions on Information and System Security (TISSEC)}},
  year = {2008},
  abstract = {Determining whether a user or system is exercising
      appropriate security practices is difficult in any context.  Such
      difficulties are particularly pronounced when uncontrolled or
      unknown platforms join public networks.  Commonly practiced
      techniques used to vet these hosts, such as system scans, have the
      potential to infringe upon the privacy of users. In this paper, we
      show that it is possible for clients to prove both the presence
      and proper functioning of security infrastructure without allowing
      unrestricted access to their system. We demonstrate this approach,
      specifically applied to anti-virus security, by requiring clients
      seeking admission to a network to positively identify the presence
      or absence of malcode in a series of puzzles. The implementation
      of this mechanism and its application to real networks are also
      explored. In so doing, we demonstrate that it is not necessary for
      an administrator to be invasive to determine whether a client
      implements required security practices.}
}
@article{tem+07,
  author = {Patrick Traynor and William Enck and Patrick McDaniel and Thomas La Porta},
  title = {Exploiting Open Functionality in SMS-Capable Cellular Networks},
  journal = {Journal of Computer Security},
  publisher = {IOS Press},
  year = {2007},
  extends = {etml05},
  url = {http://www.cse.psu.edu/~traynor/job/traynor_jcs08.pdf},
  abstract = {Cellular networks are a critical component of the economic
  and social infrastructures in which we live. In addition to voice
  services, these networks deliver alphanumeric text messages to the
  vast majority of wireless subscribers. To encourage the expansion of
  this new service, telecommunications companies offer connections
  between their networks and the Internet. The ramifications of such
  connections, however, have not been fully recognized. In this paper,
  we evaluate the security impact of the SMS interface on the
  availability of the cellular phone network. Specifically, we describe
  the ability to deny voice service to cities the size of Washington
  D.C. and Manhattan with little more than a cable modem. Moreover,
  attacks targeting the entire United States are feasible with resources
  available to medium-sized zombie networks. This analysis begins with
  an exploration of the structure of cellular networks. We then
  characterize network behavior and explore a number of reconnaissance
  techniques aimed at effectively targeting attacks on these systems. We
  conclude by discussing countermeasures that mitigate or eliminate the
  threats introduced by these attacks.}
}
@article{lsz+07,
  author = {K.C.K. Lee and Josh Schiffman and B. Zheng and W.C. Lee},
  title = {Round-Eye: A System for Tracking Nearest Surrounders in Moving Object Environments},
  journal = {Journal of Systems and Software},
  publisher = {Elsevier},
  year = {2007},
  volume = {80},
  pages = {2063-2076},
  abstract = {This paper presents "Round-Eye", a system for tracking nearest surrounding objects
  (or nearest surrounders) in moving object environments. This system provides a platform for 
  surveillance eapplications. The core part of this system is continuous nearest surrounder (NS)
  query that maintains views of the nearest objects at distinct angles from query points.
  This query differs from conventional spatial queries such as
  range queries and nearest neighbor queries as NS query considers both distance and angular
  aspects of objects with respect to a query point at the same time.
  In our system framework, a centralized server is dedicated (1) to collect
  locationu pdates of both objects and queries, (2)to determine which NS queries
  are invalidated in presence of object/query location changes and corresponding
  result changes if any, and (3) to refresh the affected query answers.
  To enhance the system performance in terms of processing time and network bandwidth
  consumption, we propose various techniques, namely, safe region, partial query reevaluation,
  and incremental query result update. Through simulations, we evaluate our system with the 
  proposed techniques over a wide range of settings.}
}
@article{lem07,
  author = {Wesam Lootah and William Enck and Patrick McDaniel},
  title = {TARP: Ticket-based Address Resolution Protocol},
  journal = {Computer Networks},
  publisher = {Elsevier},
  year = {2007},
  extends = {lem05},
  url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6VRG-4P0089K-1-1&_cdi=6234&_user=209810&_orig=search&_coverDate=10%2F24%2F2007&_sk=999489984&view=c&wchp=dGLbVtb-zSkWA&md5=cd0c57e0fcd12472753fb3c4933840db&ie=/sdarticle.pdf},
  abstract = {IP networks fundamentally rely on the Address Resolution
  Protocol (ARP) for proper operation. Unfortunately, vulnerabilities in
  ARP enable a raft of Internet Protocol (IP)-based impersonation,
  man-in-the-middle, or Denial of Service (DoS) attacks. Proposed
  countermeasures to these vulnerabilities have yet to simultaneously
  address backward compatibility and cost requirements. This paper
  introduces the Ticket-based Address Resolution Protocol (TARP). TARP
  implements security by distributing centrally issued secure IP/Medium
  Access Control (MAC) address mapping attestations through existing ARP
  messages. We detail TARP and its implementation within the Linux
  operating system. We also detail the integration of TARP with the
  Dynamic Host Configuration Protocol (DHCP) for dynamic ticket
  distribution. Our experimental analysis shows that TARP improves the
  costs of implementing ARP security by as much as two orders of
  magnitude over existing protocols.  We conclude by exploring a range
  of operational issues associated with deploying and administering ARP
  security.}
}
@article{TKCZC+07,
  author = {Patrick Traynor and Raju Kumar and Heesook Choi and Sencun Zhu and Guohong Cao and Thomas {La Porta}},
  title = {{Efficient Hybrid Security Mechanisms for Heterogeneous Sensor Networks}},
  journal = {{IEEE Transactions on Mobile Computing}},
  volume = {6},
  number = {6},
  month = {June},
  year = {2007},
  pages = {663-677},
  url = {http://siis.cse.psu.edu/pubs/tkczc07.pdf},
  abstract = {Many applications that make use of sensor networks require
  secure communication. Because asymmetric-key solutions are difficult
  to implement in such a resource-constrained environment, symmetric-key
  methods coupled with a priori key distribution schemes have been
  proposed to achieve the goals of data secrecy and integrity. These
  approaches typically assume that all nodes are similar in terms of
  capabilities and, hence, deploy the same number of keys in all sensors
  in a network to provide the aforementioned protections. In this paper,
  we demonstrate that a probabilistic unbalanced distribution of keys
  throughout the network that leverages the existence of a small
  percentage of more capable sensor nodes can not only provide an equal
  level of security, but also reduce the consequences of node
  compromise. To fully characterize the effects of the unbalanced key
  management system, we design, implement, and measure the performance
  of a complementary suite of key establishment protocols known as
  LIGER. Using their predeployed keys, nodes operating in isolation from
  external networks can securely and efficiently establish keys with
  each other. Should resources such as a backhaul link to a key
  distribution center (KDC) become available, networks implementing
  LIGER automatically incorporate and benefit from such facilities.
  Detailed experiments demonstrate that the unbalanced distribution in
  combination with the multimodal LIGER suite offers a robust and
  practical solution to the security needs in sensor networks.}
}
@article{ces+07,
  author = {Heesok Choi and William Enck and Jaesheung Shin and Patrick McDaniel and Thomas {La Porta}},
  title = {ASR: Anonymous and Secure Reporting of Traffic Forwarding Activity in Mobile Ad Hoc Networks},
  journal = {Wireless Networks (WINET)},
  publisher = {ACM/Kluwer},
  year = {2007},
  month = {May},
  extends = {ces+05},
  url = {http://www.cse.psu.edu/~enck/pubs/asr_winet.pdf},
  abstract = {Nodes forward data on behalf of each other in mobile ad
  hoc networks. In a civilian application, nodes are assumed to be
  selfish and rational, i.e., they pursue their own self-interest.
  Hence, the ability to accurately measure traffic forwarding is
  critical to ensure proper network operation. These measurements are
  also often used to credit nodes based on their level of participation,
  or to detect loss. Past solutions employ neighbor monitoring and
  reporting on traffic forwarding of nodes. These methods are not
  applicable in civilian networks in which neighbor nodes lack the
  desire or ability to perform the monitoring function. Such
  environments occur frequently in which neighbor hosts are resource
  constrained, or in networks where directional antennas are used and
  reliable eavesdropping is difficult or impossible. 
  

In this paper, we propose a protocol that uses nodes on the data path to securely produce packet forwarding reports. Reporting nodes are chosen randomly and secretly so that malicious nodes cannot modify their behavior based upon the monitoring point. The integrity and authenticity of reports are preserved through the use of secure link layer acknowledgments and monitoring reports. The robustness of the reporting mechanism is strengthened by forwarding the report to multiple destinations (source and destination). We explore the security, cost, and accuracy of our protocol.} }
@article{mabi06,
  author = {Patrick McDaniel and B. Aiello and Kevin Butler and J. Ioannidis},
  title = {Origin Authentication in Interdomain Routing},
  journal = {Computer Networks},
  year = {2006},
  month = {November},
  pages = {2953-2980},
  volume = {50},
  number = {16},
  url = {http://www.patrickmcdaniel.org/pubs/comnet06.pdf},
  abstract = {Attacks against Internet routing are increasing in number
  and severity. Contributing greatly to these attacks is the absence of
  origin authentication; there is no way to validate claims of address
  ownership or location. The lack of such services not only enables
  attacks by malicious entities, but also indirectly allows seemingly
  inconsequential misconfigurations to disrupt large portions of the
  Internet. This paper considers the semantics, design, and costs of
  origin authentication in interdomain routing. We formalize the
  semantics of address delegation and use on the Internet, and develop
  and characterize original, broad classes of origin authentication
  proof systems. We estimate the address delegation graph representing
  the current use of IPv4 address space using available routing data.
  This effort reveals that current address delegation is dense and
  relatively static: as few as 16 entities perform 80% of the delegation
  on the Internet. We conclude by evaluating the proposed services via
  trace-based simulation, which demonstrates that the enhanced proof
  systems can significantly reduce resource costs associated with origin
  authentication.}
}
@article{mp06a,
  author = {Patrick McDaniel and Atul Prakash},
  title = {{Enforcing Provisioning and Authorization Policy in the Antigone System}},
  journal = {{Journal of Computer Security}},
  volume = {14},
  number = {9},
  month = {November},
  pages = {483-511},
  year = {2006},
  extends = {mp03,mp02},
  url = {http://www.patrickmcdaniel.org/pubs/jcs05.pdf},
  abstract = {Works in communication security policy have recently
  focused on general-purpose policy languages and evaluation
  algorithms. However, because the supporting frameworks often defer 
  enforcement, the correctness of a realization of these policies in
  software is limited by the quality of domain-specific implementations.
  This paper introduces the Antigone communication security policy
  enforcement framework. The Antigone framework fills the gap between
  representations and enforcement by implementing and integrating the
  diverse security services needed by policy.  Policy is enforced by the
  run-time composition, configuration, and regulation of security
  services.  We present the Antigone architecture, and demonstrate
  non-trivial applications and policies. A profile of policy enforcement
  performance is developed, and key architectural enhancements identified. 
  We conclude by considering the advantages and disadvantages of
  a broad range of software architectures appropriate for policy
  enforcement.}
}
@article{mp06b,
  author = {Patrick McDaniel and Atul Prakash},
  title = {{Methods and Limitations of Security Policy Reconciliation}},
  journal = {{ACM Transactions on Information and System Security (TISSEC), Association for Computing Machinery}},
  volume = {9},
  number = {3},
  month = {August},
  pages = {259-291},
  year = {2006},
  extends = {mp02},
  url = {http://www.patrickmcdaniel.org/pubs/tissec06.pdf},
  abstract = {A security policy specifies session participant
  requirements. However, existing frameworks provide limited
  facilities for the automated reconciliation of participant policies.
  This paper considers the limits and methods of reconciliation in a
  general-purpose policy model. We identify an algorithm for efficient
  two-policy reconciliation, and show that, in the worst-case,
  reconciliation of three or more policies is intractable. Further, we
  suggest efficient heuristics for the detection and resolution of
  intractable reconciliation. Based upon the policy model, we describe
  the design and implementation of the Ismene policy language. The
  expressiveness of Ismene, and indirectly of our model, is demonstrated
  through the representation and exposition of policies supported by
  existing policy languages. We conclude with brief notes on the
  integration and enforcement of Ismene policy within the Antigone
  communication system.}
}
@article{mp05,
  author = {P. McDaniel and A. Prakash},
  title = {Security Policy Enforcement in the Antigone System},
  journal = {Journal of Computer Security},
  note = {Accepted for publication. {\it Draft}},
  year = {2005},
  url = {http://siis.cse.psu.edu/pubs/jcs05.pdf},
  abstract = {Works in communication security policy have recently
  focused on general-purpose policy languages and evaluation algorithms.
  However, because the supporting frameworks often defer enforcement,
  the correctness of a realization of these policies in software is
  limited by the quality of domain-specific implementations.  This paper
  introduces the Antigone communication security policy enforcement
  framework.  The Antigone framework fills the gap between
  representations and enforcement by implementing and integrating the
  diverse security services needed by policy.  Policy is enforced by the
  run-time composition, configuration, and regulation of security
  services.  We present the Antigone architecture, and demonstrate
  non-trivial applications and policies.  A profile of policy
  enforcement performance is developed, and key architectural
  enhancements identified.  We conclude by considering the advantages
  and disadvantages of a broad range of software architectures
  appropriate for policy enforcement.}
}
@article{pznm+05,
  author = {Matthew Pirretti and Sencun Zhu and Vijaykrishnan
  Narayanan and Patrick McDaniel and Mahmut Kandemir and and Richard
  Brooks},
  title = {{The Sleep Deprivation Attack in Sensor Networks: Analysis and Methods of Defense}},
  year = {2005},
  month = {June},
  pages = {267-287},
  volume = {2},
  number = {3},
  journal = {International Journal of Distributed Sensor Networks},
  url = {http://www.patrickmcdaniel.org/pubs/ijdsn05.pdf},
  abstract = {The ability of sensor nodes to enter a low power sleep
  mode is very useful for extending network longevity. We show how
  adversary nodes can exploit clustering algorithms to ensure their
  selection as cluster heads for the purpose of launching attacks that
  prevent victim nodes from sleeping. We present two such attacks: the
  barrage attack and the sleep deprivation attack. The barrage attack
  bombards victim nodes with legitimate requests, whereas the sleep
  deprivation attack makes requests of victim nodes only as often as is
  necessary to keep the victims awake. We show that while the barrage
  attack causes its victims to spend slightly more energy, it is more
  easily detected and requires more effort on behalf of the attacker.
  Thus we have focused our research on the sleep deprivation attack. Our
  analysis indicates that this attack can nullify any energy savings
  obtained by allowing sensor nodes to enter sleep mode. We also analyze
  three separate methods for mitigating this attack: the random vote
  scheme, the round robin scheme, and the hash-based scheme. We have
  evaluated these schemes based upon their ability to reduce the
  adversary's attack, the amount of time required to select a cluster
  head, and the amount of energy required to perform each scheme. We
  have found that of the three clustering methods analyzed, the
  hash-based scheme is the best at mitigating the sleep deprivation
  attack.}
}
@article{bcc+04,
  author = {S. Byers and L. Cranor and E. Cronin and D. Kormann and
  P. McDaniel},
  title = {{Analysis of Security Vulnerabilities in the Movie Production
  and Distribution Process}},
  year = {2004},
  month = {August},
  pages = {619-644},
  volume = {28},
  number = {8},
  journal = {Telecommunications Policy},
  url = {http://www.patrickmcdaniel.org/pubs/tcpolicy04.pdf},
  abstract = {Unauthorized copying of movies is a major concern for the
  motion picture industry.  While unauthorized copies of movies have
  been distributed via portable physical media for some time, low-cost,
  high-bandwidth Internet connections and peer-to-peer file sharing
  networks provide highly efficient distribution media.  Many movies are
  showing up on file sharing networks shortly after, and in some cases
  prior to, theatrical release.  It has been argued that the
  availability of unauthorized copies directly affects theater
  attendance and DVD sales, and hence represents a major financial
  threat to the movie industry.  Our research attempts to determine the
  source of unauthorized copies by studying the availability and
  characteristics of recent popular movies in file sharing networks.  We
  developed a data set of 312 popular movies and located one or more
  samples of 183 of these movies on file sharing networks, for a total
  of 285 movie samples. 77% of these samples appear to have been leaked
  by industry insiders. Most of our samples appeared on file sharing
  networks prior to their official consumer DVD release date. Indeed, of
  the movies that had been released on DVD as of the time of our study,
  only 5% first appeared after their DVD release date on a web site that
  indexes file sharing networks, indicating that consumer DVD copying
  currently represents a relatively minor factor compared with insider
  leaks. We perform a brief analysis of the movie production and
  distribution process and identify potential security vulnerabilities
  that may lead to unauthorized copies becoming available to those who
  may wish to redistribute them. Finally, we offer recommendations for
  reducing security vulnerabilities in the movie production and
  distribution process.}
}
@article{jez04,
  author = {T. Jaeger and A. Edwards and X. Zhang},
  title = {Consistency Analysis of Authorization Hook Placement in the {Linux}
  Security Modules Framework},
  journal = {ACM Transactions on Information and System Security (TISSEC)},
  volume = {7},
  number = {2},
  pages = {175-205},
  month = {May},
  year = {2004},
  url = {http://www.cse.psu.edu/~tjaeger/papers/tissec2004.pdf},
  abstract = {We present a consistency analysis approach to assist the
  Linux community in verifying the correctness of authorization hook
  placement in the Linux Security Modules (LSM) framework. The LSM
  framework consists of a set of authorization hooks inserted into the
  Linux kernel to enable additional authorizations to be performed
  (e.g., for mandatory access control). When compared to system call
  interposition, authorization within the kernel has both security and
  performance advantages, but it is more difficult to verify that
  placement of the LSM hooks ensures that all the kernel's
  security-sensitive operations are authorized. Static analysis has been
  used previously to verified mediation (i.e., that some hook mediates
  access to a security-sensitive operation), but that work did not
  determine whether the necessary set of authorizations were checked. In
  this paper, we develop an approach to test the consistency of the
  relationships between security-sensitive operations and LSM hooks. The
  idea is that whenever a security-sensitive operation is performed as
  part of specifiable event, a particular set of LSM hooks must have
  mediated that operation. This work demonstrates that the number of
  events that impact consistency is manageable and that the notion of
  consistency is useful for verifying correctness. We describe our
  consistency approach for performing verification, the implementation
  of run-time tools that implement this approach, the anomalous
  situations found in an LSM-patched Linux 2.4.16 kernel, and an
  implementation of a static analysis version of this approach.}
}
@article{jze03,
  author = {T. Jaeger and X. Zhang and A. Edwards},
  title = {Policy Management Using Access Control Spaces},
  journal = {ACM Transactions on Information and System Security (TISSEC)},
  volume = {6},
  number = {3},
  pages = {327-364},
  month = {August},
  year = {2003},
  url = {http://www.cse.psu.edu/~tjaeger/papers/tissec2003.pdf},
  abstract = { We present the concept of an access control space and
  investigate how it may be useful in managing access control policies.
  An access control space represents the permission assignment state of
  a subject or role. For example, the set of permissions explicitly
  assigned to a role defines its specified subspace, and the set of
  constraints precluding assignment to that role defines its prohibited
  subspace. In analyzing these subspaces, we identify two problems: (1)
  often a significant portion of an access control space has unknown
  assignment semantics, which indicates that the policy is
  underspecified; and (2) often high-level assignments and constraints
  that are easily understood result in conflicts, where resolution often
  leads to significantly more complex specifications. We have developed
  a prototype system, called Gokyo, that computes access control spaces.
  Gokyo identifies the unknown subspace to assist system administrators
  in developing more complete policy specifications. Also, Gokyo
  identifies conflicting subspaces and enables system administrators to
  resolve conflicts in a variety of ways in order to preserve the
  simplicity of constraint specification. We demonstrate Gokyo by
  analyzing a Web server policy example and examine its utility by
  applying it to the SELinux example policy. Even for the extensive
  SELinux example policy, we find that only eight additional expressions
  are necessary to resolve Apache administrator policy conflicts.}
}
@article{JT01,
  title = {Practical safety in flexible access control models},
  author = {T. Jaeger and J. Tidswell},
  journal = {ACM Transactions on Information and System Security (TISSEC)},
  number = {2},
  volume = {4},
  pages = {158-190},
  year = {2001},
  url = {http://www.cse.psu.edu/~tjaeger/papers/tissec2001.pdf},
  abstract = {Assurance that an access control configuration will not
  result in the leakage of a right to an unauthorized principal, called
  safety, is fundamental to ensuring that the most basic of access
  control policies can be enforced. It has been proven that the safety
  of an access control configuration cannot be decided for a general
  access control model, such as Lampson's access matrix, so safety is
  achieved either through the use of limited access control models or
  the verification of safety via constraints. Currently, almost all
  safety critical systems use limited access control models, such as
  Bell--LaPadula or Domain and Type Enforcement, because constraint
  expression languages are far too complex for typical administrators to
  use properly. However, researchers have identified that most
  constraints belong to one of a few basic types, so our goal is to
  develop a constraint expression model in which these constraints can
  be expressed in a straightforward way and extensions can be made to
  add other constraints, if desired. Our approach to expressing
  constraints has the following properties: (1) an access control policy
  is expressed using a graphical model in which the nodes represent sets
  (e.g., of subjects, objects, etc.) and the edges represent binary
  relationships on those sets and (2) constraints are expressed using a
  few, simple set operators on graph nodes. The basic graphical model is
  very simple, and we extend this model only as necessary to satisfy the
  identified constraint types. Since the basic graphical model is also
  general, further extension to support other constraints is possible,
  but such extensions should be made with caution as each increases the
  complexity of the model. Our hope is that by keeping the complexity of
  constraint expression in check, flexible access control models, such
  as role-based access control, may also be used for expressing access
  control policy for safety-critical systems.}
}
@article{FHW00,
  author = {Moreno Falaschi and Patrick Hicks and William Winsborough},
  journal = {Journal of Logic Programming},
  month = {MAR},
  number = {3},
  pages = {185-215},
  pdf = {http://www.cse.psu.edu/~phicks/papers/demandTransformAnalysis.pdf},
  title = {Demand Transformation Analysis for Concurrent Constraint Programs},
  volume = {41},
  year = {2000},
  abstract = {
This paper presents a demand transformation analysis that maps a
predicate's output demands to its input demands. This backward dataflow
analysis for concurrent constraint programs is constructed in the
framework of abstract interpretation. In the context of stream
parallelism, this analysis identifies an amount of input data for which
predicate execution can safely wait without danger of introducing
deadlock. We assume that programs are well-moded and prove that our
analysis is safe. We have constructed an implementation of this analysis
and tested it on some small, illustrative programs and have determined
that it gives useful results in practice. We identify several
applications of the analysis results to distributed implementations of
concurrent constraint languages, including thread construction and
communication granularity. This analysis will enable exisiting
computation cost estimation analyses to be applied to stream-parallel
logic languages. }
}
@article{HH00,
  author = {John Hannan and Patrick Hicks},
  journal = {Journal of Higher Order and Symbolic Computation},
  number = {3},
  pages = {179--216},
  pdf = {http://www.cse.psu.edu/~phicks/papers/HOUncurryingJournal.pdf},
  title = {Higher-Order Uncurrying},
  volume = {13},
  year = {2000},
  abstract = {
We present a formal specification of unCurrying for a higher-order,
functional language with ML-style let-polymorphism. This specification
supports the general unCurrying of functions, even for functions that
are passed as arguments or returned as values. The specification also
supports partial unCurrying of any consecutive parameters of a function,
rather than only unCurrying all of a function's parameters. We present
the specification as a deductive system that axiomatizes a judgment
relating a source term with an unCurried form of the term. We prove that
this system relates only typable terms and that it is correct with
respect to an operational semantics. We define a practical algorithm,
based on algorithm W, that implements unCurrying and prove this
algorithm sound and complete with respect to the deductive system. This
algorithm generates maximally unCurried forms of source terms. These
results provide a declarative framework for reasoning about unCurrying
and support a richer form of unCurrying than is currently found in
compilers for functional languages. }
}
@article{jpl+99,
  author = {T. Jaeger and A. Prakash and J. Liedtke and N. Islam},
  title = {Flexible Control of Downloaded Executable Content},
  journal = {ACM Transactions on Information and System Security},
  volume = {2},
  number = {2},
  pages = {177-228},
  month = {May},
  year = {1999},
  url = {http://www.cse.psu.edu/~tjaeger/papers/tissec1999.pdf},
  abstract = {We present a security architecture that enables system and
  application a ccess control requirements to be enforced on
  applications composed from downloaded executable content. Downloaded
  executable content consists of messages downloaded from remote hosts
  that contain executables that run, upon receipt, on the downloading
  principal's machine. Unless restricted, this content can perform
  malicious actions, including accessing its downloading principal's
  private data and sending messages on this principal's behalf. Current
  security architectures for controlling downloaded executable content
  (e.g., JDK 1.2) enable specification of access control requirements
  for content based on its provider and identity. Since these access
  control requirements must cover every legal use of the class, they may
  include rights that are not necessary for a particular application of
  content. Therefore, using these systems, an application composed from
  downloaded executable content cannot enforce its access control
  requirements without the addition of application-specific security
  mechanisms. In this paper, we define an access control model with the
  following properties: (1) system administrators can define system
  access control requirements on applications and (2) application
  developers can use the same model to enforce application access
  control requirements without the need for ad hoc security mechanisms.
  This access control model uses features of role-based access control
  models to enable (1) specification of a single role that applies to
  multiple application instances; (2) selection of a content's access
  rights based on the content's application and role in the application;
  (3) consistency maintained between application state and content
  access rights; and (4) control of role administration. We detail a
  system architecture that uses this access control model to implement
  secure collaborative applications. Lastly, we describe an
  implementation of this architecture, called the Lava security
  architecture.}
}
@article{j99a,
  title = {Access Control in Configurable Systems},
  author = {Trent Jaeger},
  year = {1999},
  booktitle = {Secure Internet Programming},
  pages = {289-316}
}
@article{IAJ+97,
  author = {Nayeem Islam and Rangachari Anand and Trent Jaeger and Josyula R. Rao},
  title = {{A flexible security system for using Internet content}},
  year = {1997},
  month = {September},
  pages = {52-59},
  volume = {14},
  number = {5},
  journal = {IEEE Software},
  abstract = {The Internet and World Wide Web have introduced a powerful
  new way to acquire content. Previously, users had to either buy or
  order content on coded disks. They can now download executable code
  and applications directly. From the users' viewpoint, this decreases
  the amount of software stored on their machines. It also lets content
  providers customize applications by combining different vendors'
  content. Downloading content from the Internet brings risks: content
  that appears reputable may be malicious, given system access it can
  damage or destroy data. To contend with this risk, the author
  developed FlexxGuard, a flexible interpreter that dynamically derives
  protection domains and uses those domains to authorize content
  operations. }
}
@inproceedings{SMSJM09,
  author = {Joshua Schiffman and Thomas Moyer and Christopher Shal and 
           Trent Jaeger and Patrick McDaniel},
  title = {{Justifying Integrity Using a Virtual Machine Verifier}},
  booktitle = {25th Annual Computer Security Applications Conference (ACSAC)},
  month = {December},
  year = {2009},
  abstract = {
Emerging distributing computing architectures, such as grid and cloud 
computing, depend on the high integrity execution of each system in 
the computation. While integrity measurement enables systems to generate 
proofs of their integrity to remote parties, we find that current 
integrity measurement approaches are insufficient to prove runtime 
integrity for systems in these architectures. Integrity measurement 
approaches that are flexible enough have an incomplete view of runtime 
integrity, possibly leading to false integrity claims, and approaches 
that provide comprehensive integrity do so only for computing environments 
that are too restrictive. In this paper, we propose an architecture 
for building comprehensive runtime integrity proofs for general purpose 
systems in distributed computing architectures. In this architecture, 
we strive for classical integrity, using an approximation of the 
Clark-Wilson integrity model as our target. Key to building such 
integrity proofs is a carefully crafted host system whose long-term 
integrity can be justified easily using current techniques and a new 
component, called a VM verifier, that can enforce our integrity target 
on VMs comprehensively. We have built a prototype based 
on the Xen virtual machine system for SELinux VMs, and find distributed 
compilation can be implemented, providing accurate proofs of our 
integrity target with less than 4% overhead.
}
}
@inproceedings{MBSMJ09,
  author = {Thomas Moyer and Kevn Butler and Joshua Schiffman and Patrick McDaniel and Trent Jaeger},
  title = {{Scalable Web Content Attestation}},
  booktitle = {25th Annual Computer Security Applications Conference (ACSAC)},
  month = {December},
  year = {2009},
  abstract = {
The web is a primary means of information sharing for most organizations 
and people. Currently, a recipient of web content knows nothing about 
the environment in which that information was generated other than the 
specific server from whence it came (and even that information can be 
unreliable).   In this paper, we develop and evaluate the Spork system 
that uses the Trusted Platform Module (TPM) to tie the web server  
integrity state to the web content delivered to browsers, thus allowing  
a client to verify that the origin of the content was functioning 
properly when the received content was generated and/or delivered. We 
discuss the design and implementation of the Spork service and its 
browser-side Firefox validation extension.  In particular, we explore 
the challenges and solutions of scaling the delivery of mixed static and 
dynamic content using exceptionally slow TPM hardware. We perform an 
in-depth empirical analysis of the Spork system within Apache web 
servers.  This analysis shows Spork can deliver nearly 8,000 static or 
over 7,000 dynamic integrity-measured web objects per-second.  More broadly, 
we identify how TPM-based content web services can scale with manageable 
overheads and deliver integrity-measured content with manageable overhead.
}
}
@inproceedings{OMEM09,
  author = {Machigar Ongtang and Stephen McLaughlin and William Enck and Patrick McDaniel},
  title = {{Semantically Rich Application-Centric Security in Android}},
  booktitle = {25th Annual Computer Security Applications Conference (ACSAC)},
  month = {December},
  year = {2009},
  abstract = {
Smartphones are now ubiquitous. However, the security requirements of 
these relatively new systems and the applications they support are 
still being understood. As a result, the security infrastructure 
available in current smartphone operating systems is largely 
underdeveloped. In this paper, we consider the security requirements 
of smartphone applications and augment the existing Android operating 
system with a framework to meet them. Named Secure Application INTeraction 
(Saint), this modified infrastructure governs install-time permission 
assignment and their run-time use as dictated by application provider 
policy. An in-depth description of the semantics of application policy 
is presented. The architecture and technical detail of Saint is given, 
and areas for extension, optimization, and improvement explored. The 
paper concludes by illustrating the usefulness of this framework via 
concrete example.
}
}
@inproceedings{EOM09,
  author = {William Enck and Machigar Ongtang and Patrick McDaniel},
  title = {{On Lightweight Mobile Phone Application Certification}},
  booktitle = {16th ACM Conference on Computer and Communications Security (CCS)},
  month = {November},
  year = {2009},
  abstract = {
Users have begun downloading an increasingly large number of 
mobile phone applications in response to advancements in handsets 
and wireless networks. The increased number of applications 
results in a greater chance of installing Trojans and similar malware. 
In this paper, we propose the Kirin security service for Android, 
which performs lightweight certification of applications to 
mitigate malware at install time. Kirin certification uses security 
rules, which are templates designed to conservatively match undesirable 
properties in security configuration bundled with applications. 
We use a variant of security requirements engineering techniques 
to perform an in-depth security analysis of Android to produce 
a set of rules that match malware characteristics. In a sample 
of 311 of the most popular applications downloaded from the 
official Android Market, Kirin and our rules found 5 applications 
that implement dangerous functionality and therefore should be installed 
with extreme caution. Upon close inspection, another five 
applications asserted dangerous rights, but were within the scopea 
of reasonable functional needs. These results indicate that security 
configuration bundled with Android applications provides practical 
means of detecting malware. 
}
}
@inproceedings{TLOR09,
  author = {Patrick Traynor and Michael Lin and Machigar Ongtang and Vikhyath Rao and Trent Jaeger and Patrick McDaniel},
  title = {{On Cellular Botnets: Measuring the impact of Malicious Devices on Cellular Network Core}},
  booktitle = {16th ACM Conference on Computer and Communications Security (CCS)},
  month = {November},
  year = {2009},
  abstract = {
The vast expansion of interconnectivity with the Internet and the rapid 
evolution of highly-capable but largely insecure mobile devices 
threatens cellular networks.  In this paper, we characterize the impact 
of the large scale compromise and coordination of mobile phones in 
attacks against the core of these networks. Through a combination of 
measurement, simulation and analysis, we demonstrate the ability of a 
botnet composed of as few as 11,750 compromised mobile phones to degrade 
service to area-code sized regions by 93%.  As such attacks are 
accomplished through the execution of network service requests and not a 
constant stream of phone calls, users are unlikely to be aware of their 
occurrence. We then investigate a number of significant network 
bottlenecks, their impact on the density of compromised nodes per base 
station and how they can be avoided.  We conclude by discussing a number 
of countermeasures that may help to partially mitigate the threats posed 
by such attacks.
 }
}
@inproceedings{RVJ09,
  author = {Sandra Rueda and Hayawardh Vijayakumar and Trent Jaeger},
  title = {{Analysis of Virtual Machine System Policies}},
  booktitle = {14th ACM Symposium on Access Control Models (SACMAT)},
  month = {June},
  year = {2009},
  abstract = {
The recent emergence of mandatory access (MAC) enforcement for virtual 
machine monitors (VMMs) presents an opportunity to enforce a security 
goal over all its virtual machines (VMs).  However, these VMs also 
have MAC enforcement, so to determine whether the overall system 
(VM-system) is secure requires an evaluation of whether this 
combination of MAC policies, as a whole, complies with a given 
security goal.  Previous MAC policy analyses either consider a single 
policy at a time or do not represent the interaction between different 
policy layers (VMM and VM). We observe that we can analyze the VMM 
policy and the labels used for communications between VMs to create an 
inter-VM flow graph that we use to identify safe, unsafe, 
and {\em ambiguous} VM interactions.  A VM with only safe interactions 
is compliant with the goal, a VM with any unsafe interaction violates 
the goal. For a VM with ambiguous interactions we analyze its local 
MAC policy to determine whether it is compliant or not with the goal. 
We used this observation to develop an analytical model of a 
VM-system, and evaluate if it is compliant with a security goal. We 
implemented the model and an evaluation tool in Prolog. We evaluate 
our implementation by checking whether a VM-system running XSM/Flask 
policy at the VMM layer and SELinux policies at the VM layer satisfies 
a given integrity goal.  This work is the first step toward developing 
layered, multi-policy analyses.
 }
}
@inproceedings{KHHJ08,
  author = {Dave King and Boniface Hicks and Michael Hicks and Trent Jaeger},
  title = {{Implicit Flows: Can't Live With 'Em, Can't Live Without 'Em}},
  booktitle = {4th International Conference on Information and Systems
 Security (ICISS 2008)},
  month = {December},
  year = {2008},
  abstract = {
Verifying that programs trusted to enforce security actually enforce 
security is practical concern for programmers and administrators. 
However, there is a disconnect between the kinds of tools that have been 
successfully applied to real software systems (such as taint mode in 
Perl and Ruby), and information-flow compilers that enforce a variant of 
the stronger security property of noninterference.  Tools that have been 
successfully used to find security violations have focused on  
explicit flows of information, where high-security information is 
directly leaked to output.  Analysis tools that enforce stronger 
security guarantees also prevent  implicit flows of information, 
where high-security information can be inferred from a program's flow of 
control.  However, these tools have not been used to find bugs in real 
programs, despite the stronger guarantees that they provide.  We 
investigate both classes of flows that are flagged by a security-typed 
language compiler when applied to implementations of authentication and 
cryptographic functions.  We conclude that the extremely large number of 
false alarms makes checking production code for stronger 
information-flow security properties, such as noninterference, currently 
infeasible.  This false alarm rate must be improved if technology from 
security-typed languages is to be adopted to guarantee security 
properties of existing programs. 
}
}
@inproceedings{TTHMJ08,
  author = {Albert Tannous and Jonathan Trostle and Mohamed Hassan and Stephen McLaughin and Trent Jaeger},
  title = {{New Side Channel Attacks Targeting Passwords}},
  booktitle = {Proceedings of the 24th Annual Computer Security Applications
     Conference (ACSAC)},
  month = {December},
  year = {2008}
}
@inproceedings{EMT08,
  author = {William Enck and Patrick McDaniel and Trent Jaeger},
  title = {{PinUP: Pinning User Files to Known Applications}},
  booktitle = {Proceedings of the 24th Annual Computer Security Applications
     Conference (ACSAC)},
  month = {December},
  year = {2008}
}
@inproceedings{EBR+08,
  author = {William Enck and Kevin R. B. Butler and Thomas Richardson and Patrick McDaniel and Adam Smith},
  title = {{Defending Against Attacks on Main Memory Persistence}},
  booktitle = {Proceedings of the 24th Annual Computer Security Applications
     Conference (ACSAC)},
  month = {December},
  year = {2008}
}
@inproceedings{BMM+08,
  author = {Kevin R. B. Butler, Stephen McLaughlin and Patrick D. McDaniel},
  booktitle = {Proceedings of the 15th ACM Conference on Computer and Communications Security
               (CCS)},
  month = {October},
  title = {Rootkit-Resistant Disks},
  year = {2008},
  abstract = {
Rootkits are now prevalent in the wild.  Users affected by rootkits are 
subject to the abuse of their data and resources, often unknowingly. Such 
malware becomes even more dangerous when it is 
persistent--infected disk images allow the malware to exist across 
reboots and prevent patches or system repairs from being successfully 
applied. In this paper, we introduce rootkit-resistant disks (RRD) that 
label all immutable system binaries and configuration files at installation 
time.  During normal operation, the disk controller inspects all write 
operations received from the host operating system and  denies those 
made for labeled blocks.  To upgrade, the host is booted into a safe 
state and system blocks can only be modified if a security token is it 
attached to the disk controller. By enforcing immutability at the disk 
controller, we prevent a compromised operating system from infecting its 
on-disk image. 

We implement the RRD on a Linksys NSLU2 network storage device by 
extending the I/O processing on the embedded disk controller running the 
SlugOS Linux distribution. Our performance evaluation shows that the RRD 
exhibits an overhead of less than 1% for filesystem creation and less 
than 1.5% during I/O intensive Postmark benchmarking. We further  
demonstrate the viability of our approach by preventing a rootkit  
collected from the wild from infecting the OS image.  In this way, we 
show that RRDs not only prevent rootkit persistence, but do so in an 
efficient way.
  }
}
@inproceedings{KJJS+08,
  author = {Srivatsava Ranjit Ganta, Shiva Kasiviswanathan and Adam Smith},
  booktitle = {14th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD'08)},
  month = {August},
  title = {Composition Attacks and Auxiliary Information in Data Privacy},
  year = {2008},
  abstract = {
Privacy is an increasingly important aspect of data publishing. 
Reasoning about privacy, however, is fraught with pitfalls. One of the 
most significant is the auxiliary information (also called external 
knowledge, background knowledge, or side 
information) that an adversary gleans from other channels such as the 
web, public records, or domain knowledge. This paper explores how one 
can reason about privacy in the face of rich, realistic sources of 
auxiliary information. Specifically, we investigate the effectiveness 
of current anonymization schemes in preserving privacy when multiple 
organizations independently release anonymized data about 
overlapping populations.
  • We investigate composition attacks, in which an adversary uses independently anonymized releases to breach privacy. We explain why recently proposed models of limited auxiliary information fail to capture composition attacks. Our experiments demonstrate that even a simple instance of a composition attack can breach privacy in practice, for a large class of currently proposed techniques. The class includes $k$-anonymity and several recent variants.
  • On a more positive note, certain randomization-based notions of privacy (such as differential privacy) provably resist composition attacks and, in fact, the use of arbitrary side information. This resistance enables ``stand-alone" design of anonymization schemes, without the need for explicitly keeping track of other releases. We provide a precise formulation of this property, and prove that an important class of relaxations of differential privacy also satisfy the property. This significantly enlarges the class of protocols known to enable modular design.
} }
@inproceedings{KJJSdave+08,
  author = {Dave King, Trent Jaeger, Somesh Jha and Sanjit A. Seshia},
  booktitle = {16th ACM SIGSOFT, International Symposium on Foundations 
               of Software Engineering},
  month = {November},
  title = {Effective Blame for Information-Flow Violations},
  note = {To appear},
  year = {2008},
  abstract = {
Programs trusted with secure data should not release this data in ways 
contrary to system policy.  However, when a program contains an illegal 
flow of information, current information-flow reporting techniques are 
inadequate for determining the cause of the error.  Reasoning about 
information-flow errors can be difficult, as the flows involved can be 
quite subtle. We present a general model for information-flow 
blame that can explain the source of such security errors in code. 
This model is implemented by changing the information-flow verification 
procedure to: (1) generate supplementary information to reveal otherwise 
hidden program dependencies; (2) modify the constraint solver to 
construct a blame dependency graph; and (3) develop an explanation 
procedure that returns a {\em complete} and {\em minimal} error report. 
Our experiments show that information-flow errors can generally be 
explained and resolved by viewing only a small fraction of the total 
code.
  }
}
@inproceedings{RKJ+08,
  address = {California, USA},
  author = {Sandra Rueda, Dave King and Trent Jaeger},
  booktitle = {17th USENIX Security Symposium},
  month = {July},
  title = {Verifying Compliance of Trusted Programs},
  note = {To appear},
  year = {2008},
  abstract = {Every system depends on trusted programs for its secure operation.  A trusted
program is trusted to only perform safe operations despite have the
authority to perform unsafe operations; for example, administrative 
programs, root network daemons, and many others.  Currently, these programs 
are trusted without concrete justification.  The emergence of tools  for building 
applications that guarantee policy enforcement, such as security-typed languages 
(STLs), and mandatory access control systems, such as user-level policy servers, 
finally offer a basis for justifying trust in such programs.  If the 
operations authorized by the program policy are also authorized by system 
policy, then the program complies with the system. 
However, program policy is not directly comparable with system policy, making 
compliance checking difficult in general.  We observe that the integrity of 
trusted programs must dominate the integrity of system data (the PIDSI 
approach). Using this insight, we assign trusted program objects to a higher
integrity level than system data objects, resulting in a simplified program
policy that enables automated compliance verification.  We find that this
assumption is consistent with the SELinux reference policy for its trusted
programs.
  }
}
@inproceedings{MSSJJ+08,
  address = {Colorado, USA},
  author = {Divya Muthukumaran and Anuj Sawani and Joshua Schiffman and Brian M. Jung and Trent Jaeger},
  booktitle = {13th ACM Symposium on Access Control Models and Technologies (SACMAT)},
  month = {June},
  title = {Measuring Integrity on Mobile Phone Systems},
  note = {To appear},
  year = {2008},
  abstract = {Mobile phone security is a relatively new field that is gathering 
momentum in the wake of rapid advancements in phone system technology. 
Mobile phones are now becoming sophisticated smart phones that provide 
services beyond basic telephony, such as supporting third-party 
applications.  Such third-party applications may be security-critical, 
such as mobile banking, or may be untrusted applications, such as 
downloaded games.  Our goal is to protect the integrity of such  
critical applications from potentially untrusted functionality, but we 
find that existing mandatory access control approaches are too complex 
and do not provide formal integrity guarantees.  In this work, we 
leverage the simplicity inherent to phone system environments to 
develop a compact SELinux policy that can be used to justify the 
integrity of a phone system using the Policy Reduced Integrity  
Measurement Architecture (PRIMA) approach.  We show that the resultant 
policy enables systems to be proven secure to remote parties, enables 
the desired functionality for installing and running trusted programs, 
and the resultant SELinux policy is over 90% smaller in size.  We 
envision that this approach can provide an outline for how to build 
high integrity phone systems.
}
}
@inproceedings{TBE+07,
  address = {San Diego, CA},
  author = {Patrick Traynor and Kevin Butler and William Enck and Patrick McDaniel},
  booktitle = {ISOC Network & Distributed System Security Symposium (NDSS)},
  month = {February},
  title = {Realizing Massive-Scale Conditional Access Systems Through Attribute-Based Cryptosystems},
  year = {2008},
  url = {http://www.cse.psu.edu/~traynor/job/traynor_ndss08.pdf},
  abstract = {The enormous growth in the diversity of content services
  such as IPtv has highlighted the inadequacy of the accompanying
  content security: existing security mechanisms scale poorly, require
  complex and often costly dedicated hardware, or fail to meet basic
  security requirements.  New security methods are needed.  In this
  paper, we explore the ability of attribute-based encryption (ABE) to
  meet the unique performance and security requirements of conditional
  access systems such as subscription radio and pay-per-view television.
  We show through empirical study that costs of ABE make its direct
  application inappropriate, but present constructions that mitigate its
  incumbent costs.  We develop an extensive simulation that allows us to
  explore the performance of a number of virtual hardware configurations
  and construction parameters over workloads developed from real
  subscription and television audiences.  These simulations show that we
  can securely deliver high quality content to viewerships of the
  highest rated shows being broadcast today, some in excess of
  26,000,000 viewers.  It is through these experiments that we expose
  the viability of not only ABE-based content delivery, but
  applicability of ABE systems to large-scale distributed systems.}
}
@inproceedings{HMM07,
  address = {Miami, Fl},
  author = {Boniface Hicks and Timothy Misiak and Patrick McDaniel},
  booktitle = {23rd Annual Computer Security Applications Conference (ACSAC)},
  month = {December},
  title = {Channels: Runtime System Infrastructure for Security-typed Languages},
  year = {2007},
  url = {http://siis.cse.psu.edu/pubs/hmm07.pdf},
  abstract = {Security-typed languages (STLs) are powerful tools for
  provably implementing policy in applications. The programmer maps
  policy onto programs by annotating types with information flow labels,
  and the STL compiler guarantees that data always obeys its label as it
  flows within an application. As data flows into or out of an
  application, however, a runtime system is needed to mediate between
  the information flow world within the application and the
  non-information flow world of the operating system. In the few
  existing STL applications, this problem has been handled in ad hoc
  ways that hindered software engineering and security analysis. In this
  paper, we present a principled approach to STL runtime system
  development along with policy infrastructure and class abstractions
  for the STL, Jif, that implement these principles. We demonstrate the
  effectiveness of our approach by using our infrastructure to develop a
  firewall application, FLOWWALL, that provably enforces its policy.}
}
@inproceedings{SSJD07,
  address = {Miami, Fl},
  author = {Luke St.Clair and Joshua Schiffman and Trent Jaeger and Patrick McDaniel},
  booktitle = {23rd Annual Computer Security Applications Conference (ACSAC)},
  month = {December},
  title = {Establishing and Sustaining System Integrity via Root of Trust Installation},
  year = {2007},
  url = {http://www.cse.psu.edu/~tjaeger/papers/acsac07.pdf},
  abstract = {Integrity measurements provide a means by which
  distributed systems can assess the trustability of potentially
  compromised remote hosts. However, current measurement techniques
  simply assert the identity of software, but provide no indication of
  the ongoing status of the system or its data.  As a result, a number
  of significant vulnerabilities can result if the system is not
  configured and managed carefully. To improve the management of a
  systems integrity, we propose a Root of Trust Installation (ROTI) as
  a foundation for high integrity systems. A ROTI is a trusted system
  installer that also asserts the integrity of the trusted computing
  base software and data that it installs to enable straightforward,
  comprehensive integrity verification for a system. The ROTI addresses
  a historically limiting problem in integrity measurement: determining
  what constitutes a trusted system state in any heterogeneous, evolving
  environment. Using the ROTI, a high integrity system state is defined
  by its installer, thus enabling a remote party to verify integrity
  guarantees that approximate classical integrity models (e.g., Biba).
  In this paper, we examine what is necessary to prove the integrity of
  the trusted computing base (sCore) of a distributed security
  architecture, called the SRM. We describe the design and
  implementation of our custom ROTI sCore installer and study the costs
  and effectiveness of binding system integrity to installation in the
  distributed SRM. This demonstration shows that strong integrity
  guarantees can be efficiently achieved in large, diverse environments
  with limited administrative overhead.}
}
@inproceedings{nrs07,
  address = {San Diego, CA},
  author = {K. Nissim and S. Raskhodnikova and A. Smith},
  title = {{Smooth Sensitivity and Sampling in Private Data Analysis}},
  booktitle = {{The 39th ACM Symposium on Theory of Computing (STOC 2007)}},
  url = {http://siis.cse.psu.edu/pubs/nrs07.pdf},
  month = {August},
  year = {2007},
  abstract = {We introduce a new, generic framework for private data
  analysis. The goal of private data analysis is to release aggregate
  information about a data set while protecting the privacy of the
  individuals whose information the data set contains. Our framework
  allows one to release functions f of the data with instance-based
  additive noise. That is, the noise magnitude is determined not only by
  the function we want to release, but also by the database itself. One
  of the challenges is to ensure that the noise magnitude does not leak
  information about the database. To address that, we calibrate the
  noise magnitude to the smooth sensitivity of f on the database x -- a
  measure of variability of f in the neighborhood of the instance x. The
  new framework greatly expands the applicability of output
  perturbation, a technique for protecting individuals privacy by
  adding a small amount of random noise to the released statistics. To
  our knowledge, this is the first formal analysis of the effect of
  instance-based noise in the context of data privacy.  
 

Our framework raises many interesting algorithmic questions. Namely, to apply the framework one must compute or approximate the smooth sensitivity of f on x. We show how to do this efficiently for several different functions, including the median and the cost of the minimum spanning tree. We also give a generic procedure based on sampling that allows one to release f(x) accurately on many databases x. This procedure is applicable even when no efficient algorithm for approximating smooth sensitivity of f is known or when f is given as a black box. We illustrate the procedure by applying it to k-SED (k-means) clustering and learning mixtures of Gaussians.} }
@inproceedings{TML07,
  author = {Patrick Traynor and Patrick McDaniel and Thomas {La Porta}},
  title = {{On Attack Causality in Internet-Connected Cellular Networks}},
  booktitle = {{Proceedings of the USENIX Security Symposium (Sec'07)}},
  month = {August},
  year = {2007},
  url = {http://www.patrickmcdaniel.org/pubs/usec07.pdf},
  abstract = {The emergence of connections between telecommunications
  networks and the Internet creates significant avenues for
  exploitation. For example, through the use of small volumes of
  targeted traffic, researchers have demonstrated a number of attacks
  capable of denying service to users in major metropolitan areas. While
  such investigations have explored the impact of specific
  vulnerabilities, they neglect to address a larger issue - how the
  architecture of cellular networks makes these systems susceptible to
  denial of service attacks. As we show in this paper, these problems
  have little to do with a mismatch of available bandwidth. Instead,
  they are the result of the pairing of two networks built on
  fundamentally opposing design philosophies. We support this a claim by
  presenting two new attacks on cellular data services.  These attacks
  are capable of preventing the use of high-bandwidth cellular data
  services throughout an area the size of Manhattan with less than
  200Kbps of malicious traffic. We then examine the characteristics
  common to these and previous attacks as a means of explaining why such
  vulnerabilites are artifacts of design rigidity.  Specifically, we
  show that the shoehorning of data communications protocols onto a
  network rigorously optimized for the delivery of voice causes that
  network to fail under modest loads.}
}
@inproceedings{JRB+07,
  author = {Lisa Johansen and Michael Rowell and Kevin Butler and Patrick McDaniel},
  booktitle = {Fourth Conference on Email and Anti-Spam (CEAS 2007)},
  title = {Email Communities of Interest},
  month = {August},
  year = {2007},
  url = {http://siis.cse.psu.edu/pubs/jrb07_emailcoi.pdf},
  abstract = {Email has become an integral and sometimes overwhelming part of
  users' personal and professional lives.  In this paper, we measure
  the flow and frequency of user email toward the identification of
  communities of interest (COI)--groups of users that share common
  goals or associations.  If detectable, such associations will be
  useful in automating email management, e.g., topical classification,
  flagging important missives, and SPAM mitigation.  An analysis of a
  large corpus of university email is used to drive the generation and
  validation of algorithms for automatically determining COIs.  We
  examine the effect of the structure and transience of COIs with the
  algorithms and validate algorithms using user-labeled data.  Our
  analysis shows that the proposed algorithms correctly identify email
  as being sent from the human-identified COI with high accuracy.  The
  structure and characteristics of COIs are explored analytically and
  broader conclusions about email use are posited.}
}
@inproceedings{ABM+07,
  author = {Anusha Sriraman and Kevin Butler and Patrick McDaniel and Padma Raghavan},
  title = {{Analysis of IPv4 Address Space Delegation Structure}},
  booktitle = {{12th IEEE Symposium on Computers and Communications (ISCC)}},
  month = {July},
  year = {2007},
  url = {http://www.patrickmcdaniel.org/pubs/iscc07.pdf},
  abstract = {The Internet has grown tremendously in terms of the
    number of users who rely on it and the number of organizations that
    are connected to it.  Characterizing how this growth affects its
    structure and topology is vitally important to determine the
    fundamental characteristics and limitations that must be handled,
    such as address space exhaustion; understanding the process of
    allocating and delegating address space can help to answer these
    questions. In this paper, we analyze BGP routing data to study the
    structure and growth of IPv4 address space allocation, fragmentation
    and usage. We explore the notion of delegation relationships among
    prefixes and use this information to construct an autonomous system
    (AS) delegation tree. We show that delegation in the Internet is not
    significantly correlated to the underlying topology or AS
    customer-provider relationships. We also analyze the fragmentation
    and usage of address space over a period of five years and examine
    prefixes that are delegated by organizations vs.  those that are not
    delegated. We notice that the address space usage due to delegating
    prefixes is increasing at the same rate as the address space usage
    due to non-delegating prefixes. This indicates that fragmentation
    rate of the address space is actually almost a constant with respect
    to total address usage. Additionally, we show that most delegation
    is performed by a small number of organizations, which may aid in
    the implementation of a public-key infrastructure for the Internet.}
}
@inproceedings{HRJ+07,
  author = {Boniface Hicks and Sandra Rueda and Trent Jaeger and Patrick McDaniel},
  title = {{From Trusted to Secure: Building and Executing Applications that Enforce System Security}},
  booktitle = {{Proceedings of the USENIX Annual Technical Conference}},
  month = {June},
  year = {2007},
  url = {pubs/tech07-hicks.pdf},
  abstract = {Commercial operating systems have recently introduced
    mandatory access controls (MAC) that can be used to ensure
    system-wide data confidentiality and integrity. These protections
    rely on restricting the flow of information between processes based
    on security levels.  The problem is, there are many applications
    that defy simple classification by security level, some of them
    essential for system operation. Surprisingly, the common practice
    among these operating systems is simply to mark these applications
    as "trusted", and thus allow them to bypass label protections.
    This compromise is not a limitation of MAC or the operating system
    services that enforce it, but simply a fundamental inability of any
    operating system to reason about how applications treat sensitive
    data internally---and thus the OS must either restrict the data that
    they receive or trust them to handle it correctly.  These practices
    were developed prior to the advent security-typed languages. These
    languages provide a means of reasoning about how the OS's sensitive
    data is handled within applications. Thus, applications can be shown
    to enforce system security by guaranteeing, in advance of execution,
    that they will adhere to the OS's MAC policy. In this paper, we
    provide an architecture for an operating system service, that
    integrate security-typed language with operating system MAC
    services. We have built an implementation of this service, called
    SIESTA, which handles applications developed in the security-typed
    language, Jif, running on the SELinux operating system. We also
    provide some sample applications to demonstrate the security,
    flexibility and efficiency of our approach.}
}
@inproceedings{EMS+07,
  author = {William Enck and Patrick McDaniel and Shubho Sen and Panagiotis
    Sebos and Sylke Spoerel and Albert Greenberg and Sanjay Rao and William Aiello},
  title = {{Configuration Management at Massive Scale: System Design
    and Experience}},
  booktitle = {{Proceedings of the USENIX Annual Technical Conference}},
  month = {June},
  year = {2007},
  url = {http://www.cse.psu.edu/~enck/pubs/usenix07.pdf},
  abstract = {The development and maintenance of network device
    configurations is one of the central challenges faced by large
    network providers. Current network management systems fail to meet
    this challenge primarily because of their inability to adapt to
    rapidly evolving customer and provider-network needs, and because of
    mismatches between the conceptual models of the tools and the
    services they must support. In this paper, we present the PRESTO
    configuration management system that attempts to address these
    failings in a comprehensive and flexible way.  Developed for and
    deployed over the last 4 years within a large ISP network, PRESTO
    constructs device-native configurations based on the composition of
    configlets representing different services or service options.
    Configlets are compiled by extracting and manipulating data from
    external systems as directed by the PRESTO configuration scripting
    and template language. We outline the configuration management needs
    of large-scale network providers, introduce the PRESTO system and
    configuration language, and demonstrate the use, workflows, and
    ultimately the platform's flexibility via an example of VPN service.
    We conclude by considering future work and reflect on the operators
    experiences with PRESTO.}
}
@inproceedings{HRS+07,
  author = {Boniface Hicks and Sandra Rueda and Luke St. Clair and Trent Jaeger and Patrick McDaniel},
  booktitle = {Proceedings of the {ACM} Symposium on Access Control Models and Technologies ({SACMAT})},
  month = {June},
  title = {A Logical Specification and Analysis for {SELinux} {MLS} Policy},
  year = {2007},
  url = {http://www.patrickmcdaniel.org/pubs/sacmat07.pdf},
  abstract = {The SELinux mandatory access control (MAC) policy has
    recently added a multi-level security (MLS) model which is able to
    express a fine granularity of control over a subject's access
    rights.  The problem is that the richness of this policy makes it
    impractical to verify, by hand, that a given policy has certain
    important information flow properties or is compliant with another
    policy.  To address this, we have modeled the SELinux MLS policy
    using a logical specification and implemented that specification in
    the Prolog language.  Furthermore, we have developed some analyses
    for testing the properties of a given policy as well an algorithm to
    determine whether one policy is compliant with another.  We have
    implemented these analyses in Prolog and compiled our implementation
    into a tool for SELinux MLS policy analysis, called PALMS.  Using
    PALMS, we verified some important properties of the SELinux MLS
    reference policy, namely that it satisfies the simple security
    condition and *-property defined by Bell and LaPadula.}
}
@inproceedings{JSS07,
  author = {Trent Jaeger and Reiner Sailer and Yogesh Sreenivasan},
  title = {{Managing the Risk of Covert Information Flows in Virtual Machine Systems}},
  booktitle = {{ACM Symposium on Access Control Models and Technologies (SACMAT)}},
  month = {June},
  year = {2007},
  abstract = {Flexible mandatory access control (MAC) enforcement is now
available for virtual machine systems. For example, the sHype MAC system
for the Xen virtual machine monitor is part of the mainline Xen
distribution. Such systems offer the isolation of VM systems with the
flexible security of MAC enforcement. A problem is that such MAC VM
systems will only be assured at modest levels (e.g., Common Criteria
EAL4), so they may contain covert channels. Covert channels are often
difficult to identify and harder to remove, so we propose an approach to
manage possible covert leakage to enable verification of security
guarantees. Typically, covert channels are outside of access control
policies, but we propose an approach that includes both overt flows and
covert flows to assess the possible risk of information leakage due to
their combination. We define the concept of a risk flow policy that
describes the authorized risks due to covert flows. In this paper, we
evaluate the ability of four policy models to express risk flow
policies. Further, we examine how such policies will be enforced in VM
systems. We find that variants of the Chinese Wall model and
Bell-LaPadula model have features necessary to express risk flow
policies. Further, we find that such policies can be enforced in the
context of sHype's Type Enforcement model.}
}
@inproceedings{CLM07,
  author = {Heesook Choi and Thomas F. La Porta and Patrick McDaniel},
  title = {{Privacy Preserving Communication in MANETs}},
  booktitle = {{Proceedings of Fourth Annual IEEE Communications
    Society Conference on Sensor, Mesh, and Ad Hoc Communications and Networks}},
  month = {June},
  year = {2007},
  url = {http://www.patrickmcdaniel.org/pubs/smac07.pdf},
  abstract = {Mobile ad hoc networks often support sensitive
    applications. These applications may require that users' identity,
    location, and correspondents be kept secret. This is a challenge in
    a MANET because of the cooperative nature of the network and
    broadcast nature of the communication media. In this paper, we
    propose a Privacy Preserving Communication System (PPCS) which
    provides a comprehensive solution to anonymize communication
    endpoints, keep the location and identifier of a node unlinkable,
    and mask the existence of communication flows. We present an
    analysis of the security of PPCS against passive internal attackers,
    provide a qualitative discussion on its strength against external
    attackers, and characterize its performance trade-offs. The
    simulation results demonstrate that PPCS has only 3% lower packet
    delivery ratio than existing multi-path routing protocols, while
    effectively providing privacy service in MANETs.}
}
@inproceedings{QMF07,
  author = {Sophie Qui and Patrick McDaniel and Fabian Monrose},
  title = {{Toward Valley-Free Inter-domain Routing}},
  booktitle = {{Proceedings of 2007 IEEE International Conference on Communications (ICC 2007)}},
  month = {June},
  year = {2007},
  url = {http://www.patrickmcdaniel.org/pubs/icc07.pdf},
  abstract = {ASes in inter-domain routing receive little information
    about the quality of the routes they receive. This lack of
    information can lead to inefficient and even incorrect routing. In
    this paper, we quantitatively characterize BGP announcements that
    violate the so-called valley-free property - an indicator that
    universal best practices are not being preserved in the propagation
    of routes. Our analysis indicates that valley announcements are more
    pervasive than expected.  Approximately ten thousand valley
    announcements appear every day and involve a substantial number of
    prefixes. 11% of provider ASes propagate valley announcements, with
    a majority of violations happening at intermediate providers. We
    find that large surges of violating announcements can be attributed
    to transient configuration errors. We further propose a dynamic
    mechanism that provides route propagation information as transitive
    attributes of BGP. This information implicitly reflects the policies
    of the ASes along the path, without revealing the relationship of
    each AS pair.  BGPspeaking routers use this information to identify
    (and presumably avoid) routes that violate the valley-free
    property.}
}
@inproceedings{gkj+07,
  author = {Vinod Ganapathy and Dave King and Trent Jaeger and Somesh Jha},
  title = {{Mining Security-Sensitive Operations in Legacy Code using Concept Analysis}},
  booktitle = {Proceedings of the 29th International Conference on Software Engineering (ICSE '07)},
  month = {May},
  year = {2007},
  url = {http://www.cse.psu.edu/~tjaeger/papers/icse07.pdf},
  abstract = {This paper presents an approach to statically retrofit
    legacy servers with mechanisms for authorization policy enforcement.
    The approach is based upon the observation that security-sensitive
    operations performed by a server are characterized by idiomatic
    resource manipulations, called fingerprints. Candidate fingerprints
    are automatically mined by clustering resource manipulations using
    concept analysis. These fingerprints are then used to identify
    security-sensitive operations performed by the server.  Case studies
    with three real-world servers show that the approach can be used to
    identify security-sensitive operations with a few hours of manual
    effort and modest domain knowledge.}
}
@inproceedings{RBT+07,
  author = {S. Ryu and K. Butler and P. Traynor and P. McDaniel},
  title = {{Leveraging Identity-based Cryptography for Node ID Assignment in Structured P2P Systems}},
  booktitle = {IEEE International Symposium on Security in Networks and Distributed Systems (SSNDS)},
  month = {May},
  year = {2007},
  url = {http://www.patrickmcdaniel.org/pubs/ssnds07.pdf},
  abstract = {Structured peer-to-peer systems have grown
	enormously because of their scalability, efficiency and
	reliability.  These systems assign a unique identifier to each
	user and object.  However, current assignment schemes allow an
	adversary to carefully select user IDs and/or simultaneously
	obtain many pseudo-identities -- leading ultimately to an ability
	to disrupt the P2P system in very targeted (and dangerous) ways.
	In this paper, we propose novel ID assignment protocols based on
	identity-based cryptography. This approach permits the
	acquisition of node IDs to be tightly regulated without many of
	the complexities and costs associated with traditional
	certificate solutions. We broadly consider the security
	requirements of ID assignment and present three protocols
	representing distinct threat and trust models. A detailed
	empirical study of the protocols is given. Our analysis shows
	that the cost of our identity-based protocols is nominal, and
	that the associated identity services can scale to millions of
	users using a limited number of servers.}
}
@inproceedings{REM+07,
  author = {Hosam Rowaihy and William Enck and Patrick McDaniel and
    Thomas La Porta},
  title = {{Limiting Sybil Attacks in Structured Peer-to-Peer Networks}},
  booktitle = {{Proceedings of IEEE INFOCOM 2007 MiniSymposium}},
  month = {May},
  year = {2007},
  url = {http://www.cse.psu.edu/~enck/pubs/infocom07.pdf},
  abstract = {One practical limitation of structured peer-to-peer
    (P2P) networks is that they are frequently subject to Sybil attacks:
    malicious parties can compromise the network by generating and
    controlling large numbers of shadow identities.  In this paper, we
    propose an admission control system that mitigates Sybil attacks by
    adaptively constructing a hierarchy of cooperative peers. The
    admission control system vets joining nodes via client puzzles. A
    node wishing to join the network is serially challenged by the nodes
    from a leaf to the root of the hierarchy. Nodes completing the
    puzzles of all nodes in the chain are provided a cryptographic proof
    of the vetted identity. We evaluate our solution and show that an
    adversary must perform days or weeks of effort to obtain even a
    small percentage of nodes in small P2P networks, and that this
    effort increases linearly with the size of the network. We further
    show that we can place a ceiling on the number of IDs any adversary
    may obtain by requiring periodic reassertion of the IDs continued
    validity.}
}
@inproceedings{HRJ06,
  abstract = {Traditionally, operating systems have enforced MAC and
    information flow policies with minimal dependence on application
    programs. However, there are many cases where systems depend on
    user-level programs to enforce information flows.  Previous
    approaches to handling this problem, such as privilege-separation of
    application components or assuming trust in application information
    flow enforcement, are prone to error and cumbersome to manage.  On
    the other hand, recent advances in the area of security-typed
    languages have enabled the development of realistic applications
    with formally and automatically verified information flow controls.
    In this paper, we examine what it takes to integrate information
    flow enforcement of applications written in a security-typed
    extension of Java (called Jif) with SELinux.  To this end, we have
    extended the Jif infrastructure to support interaction with SELinux
    security contexts, and we describe the SELinux policy and system
    calls which are necessary for a successful integration.  We have
    also identified the need for further services, such as a means of
    formally verifying compliance between information flow policies.  We
    have demonstrated the utility, flexibility and security of our
    approach by constructing a prototype multi-level secure email
    client.},
  author = {Boniface Hicks and Sandra Rueda and Trent Jaeger and Patrick McDaniel},
  booktitle = {Third Annual Security Enhanced Linux Symposium},
  month = {March},
  title = {Integrating SELinux with Security-typed Languages},
  year = {2007},
  url = {http://siis.cse.psu.edu/pubs/jpselinux06.pdf}
}
@inproceedings{s07,
  address = {New Orleans, LA},
  author = {Adam Smith},
  title = {{Scrambling Adversarial Errors Using Few Random Bits}},
  booktitle = {{The ACM-SIAM Symposium on Discrete Algorithms (SODA 2007)}},
  url = {pubs/s07.pdf},
  month = {January},
  year = {2007},
  abstract = {When communicating over a noisy channel, it is typically
  much easier to deal with random, independent errors with a known
  distribution than with adversarial errors. This paper looks at how one
  can use schemes designed for random errors in an adversarial context,
  at the cost of relatively few additional random bits and without using
  unproven computational assumptions. 
 

The basic approach is to permute the positions of a bit string using a permutation drawn from a $t$-wise independent family, where $t=o(n)$. This leads to two new results:

1. We construct *computationally efficient* information reconciliation protocols correcting $pn$ adversarial binary Hamming errors with optimal communication and entropy loss $n(h(p)+o(1))$ bits, where $n$ is the length of the strings and $h()$ is the binary entropy function. Information reconciliation protocols are important tools for dealing with noisy secrets in cryptography; they are also used to synchronize remote copies of large files.

2. We improve the randomness complexity (key length) of efficiently decodable capacity-approaching "private codes" from $\Theta(n\log n)$ to $n+o(n)$.

We also present a simplified proof of an existential result on private codes due to Langberg (FOCS '04). } }
@inproceedings{HAM06,
  abstract = {Security-typed languages are an evolving tool for
  implementing systems with provable security guarantees. However, to
  date, these tools have only been used to build simple "toy" programs.
  As described in this paper, we have developed the first real-world,
  security-typed application: a secure email system written in the Java
  language variant Jif.  Real-world policies are mapped onto the
  information flows controlled by the language primitives, and we
  consider the process and tractability of broadly enforcing security
  policy in commodity applications. We find that while the language
  provided the rudimentary tools to achieve low-level security goals,
  additional tools, services, and language extensions were necessary to
  formulate and enforce application policy. We detail the design and use
  of these tools. We also show how the strong guarantees of Jif in
  conjunction with our policy tools can be used to evaluate security.
  This work serves as a starting point--we have demonstrated that it is
  possible to implement real-world systems and policy using
  security-typed languages. However, further investigation of the
  developer tools and supporting policy infrastructure is necessary
  before they can fulfill their considerable promise of enabling more
  secure systems.},
  address = {Miami, Fl},
  author = {Boniface Hicks and Kiyan Ahmadizadeh and Patrick McDaniel},
  booktitle = {22st Annual Computer Security Applications Conference (ACSAC)},
  month = {December},
  url = {http://www.cse.psu.edu/~phicks/pubs/papers/hicks-acsac06jpmail.pdf},
  title = {Understanding Practical Application Development in Security-typed Languages},
  year = {2006}
}
@inproceedings{mbc+06,
  author = {Jonathon McCune and Stefan Berger and Ramon Caceres and Trent Jaeger and Reiner Sailer},
  title = {Shamon: A system for distributed mandatory access control},
  booktitle = {The Proceedings of the 2006 Annual Computer Security Applications Conference},
  month = {December},
  year = {2006},
  url = {http://www.cse.psu.edu/~tjaeger/papers/acsac06.pdf},
  abstract = {We define and demonstrate an approach to securing 
    distributed computation based on a shared reference monitor (Shamon)
    that enforces mandatory access control (MAC) policies across a
    distributed set of machines. The Shamon enables local reference
    monitor guarantees to be attained for a set of reference monitors on
    these machines. We implement a prototype system on the Xen
    hypervisor with a trusted MAC virtual machine built on Linux 2.6
    whose reference monitor design requires only 13 authorization
    checks, only 5 of which apply to normal processing (others are for
    policy setup). We show that, through our architecture, distributed
    computations can be protected and controlled coherently across all
    the machines involved in the computation.}
}
@inproceedings{bep++06,
  author = {Kevin Butler and William Enck and Jennifer Plasterr and
    Patrick Traynor and Patrick McDaniel},
  title = {Privacy-Preserving Web-Based Email},
  booktitle = {2nd International Conference on Information Systems
    Security (ICISS 2006)},
  address = {Kolkata, India},
  month = {December},
  year = {2006},
  url = {http://www.cse.psu.edu/~enck/pubs/iciss06b.pdf},
  abstract = {Recent web-based applications offer users free service
    in exchange for access to personal communication, such as online
    email services and instant messaging. The inspection and retention
    of user communication is generally intended to enable targeted
    marketing.  However, unless specifically stated otherwise by the
    collecting services privacy policy, such records have an indefinite
    lifetime and may be later used or sold without restriction. In this
    paper, we show that it is possible to protect a users privacy from
    these risks by exploiting mutually oblivious, competing
    communication channels. We create virtual channels over online
    services (e.g., Google's Gmail, Microsoft's Hotmail) through which
    messages and cryptographic keys are delivered. The message recipient
    uses a shared secret to identify the shares and ultimately recover
    the original plaintext. In so doing, we create a wired
    "spread-spectrum" mechanism for protecting the privacy of web-based
    communication. We discuss the design and implementation of our
    open-source Java applet, Aquinas, and consider ways that the myriad
    of communication channels present on the Internet can be exploited
    to preserve privacy.}
}
@inproceedings{sje+06,
  author = {Luke {St. Clair} and Lisa Johansen and William Enck and
    Matthew Pirretti and Patrick Traynor and Patrick McDaniel and Trent
    Jaeger},
  title = {{Password Exhaustion: Predicting the End of Password
    Usefulness}},
  booktitle = {2nd International Conference on Information Systems
    Security (ICISS 2006)},
  address = {Kolkata, India},
  month = {December},
  year = {2006},
  note = {Invited Paper},
  url = {http://www.cse.psu.edu/~enck/pubs/iciss06a.pdf},
  abstract = {Passwords are currently the dominant authentication
    mechanism in computing systems. However, users are unwilling or
    unable to retain passwords with a large amount of entropy. This
    reality is exacerbated by the increasing ability of systems to mount
    offline attacks. In this paper, we evaluate the degree to which the
    previous statements are true and attempt to ascertain the point at
    which passwords are no longer sufficient to securely mediate
    authentication. In order to demonstrate this, we develop an
    analytical model for computation to understand the time required to
    recover random passwords. Further, an empirical study suggests the
    situation is much worse. In fact, we found that past systems
    vulnerable to offline attacks will be obsolete in 5-15 years, and
    our study suggests that a large number of these systems are already
    obsolete. We conclude that we must discard or fundamentally change
    these systems, and to that effect, we suggest a number of ways to
    prevent offline attacks.}
}
@inproceedings{bma06,
  author = {Kevin Butler and Patrick McDaniel and William Aiello},
  title = {Optimizing BGP Security by Exploiting Path Stability},
  booktitle = {13th ACM Conference on Computer and Communications Security (CCS'06)},
  month = {November},
  year = {2006},
  url = {http://www.patrickmcdaniel.org/pubs/ccs06a.pdf},
  abstract = {The Border Gateway Protocol (BGP) is the de facto
    interdomain routing protocol on the Internet. While the serious
    vulnerabilities of BGP are well known, no security solution has been
    widely deployed.  The lack of adoption is largely caused by a
    failure to find a balance between deployability, cost, and security.
    In this paper, we consider the design and performance of BGP path
    authentication constructions that limit resource costs by exploiting
    route stability.  Based on a year-long study of BGP traffic and
    indirectly supported by findings within the networking community, we
    observe that routing paths are highly stable. This observation leads
    to comprehensive and efficient constructions for path
    authentication.  We empirically analyze the resource consumption of
    the proposed constructions via trace-based simulations. This latter
    study indicates that our constructions can reduce validation costs
    by as much as 97.3% over existing proposals while requiring nominal
    storage resources. We conclude by considering operational issues
    related to incremental deployment of our solution.}
}
@inproceedings{ptmw06,
  author = {Matthew Pirretti and Patrick Traynor and Patrick McDaniel
    and Brent Waters},
  title = {{Secure Attribute-Based Systems}},
  booktitle = {13th ACM Conference on Computer and Communications
    Security (CCS'06)},
  month = {November},
  year = {2006},
  url = {http://www.patrickmcdaniel.org/pubs/ccs06b.pdf},
  abstract = {Attributes define, classify, or annotate the datum to
    which they are assigned. However, traditional attribute
    architectures and cryptosystems are ill-equipped to provide security
    in the face of diverse access requirements and environments.  In
    this paper, we introduce a novel secure information management
    architecture based on emerging attribute-based encryption (ABE)
    primitives. A policy system that meets the needs of complex policies
    is defined and illustrated. Based on the needs of those policies, we
    propose cryptographic optimizations that vastly improve enforcement
    efficiency. We further explore the use of such policies in two
    example applications: a HIPAA compliant distributed file system and
    a social network. A performance analysis of our ABE system and
    example applications demonstrates the ability to reduce
    cryptographic costs by as much as 98% over previously proposed
    constructions. Through this, we demonstrate that our attribute
    system is an efficient solution for securely managing information in
    large, loosely-coupled, distributed systems.}
}
@inproceedings{bcg+06,
  author = {Michael Ben-Or and Claude Crepeau and Daniel Gottesman and Avinatan Hassidim and Adam Smith},
  title = {{Secure Multiparty Quantum Computation with (Only) a Strict Honest Majority}},
  booktitle = {{Foundations of Computer Science (FOCS 2006)}},
  month = {October},
  year = {2006},
  url = {http://crypto.cs.mcgill.ca/~crepeau/PDF/BCGHS06.pdf},
  abstract = {Secret sharing and multiparty computation (also called
    "secure function evaluation") are fundamental primitives in modern
    cryptography, allowing a group of mutually distrustful players to
    perform correct, distributed computations under the sole assumption
    that some number of them will follow the protocol honestly. This
    paper investigates how much trust is necessary -- that is, how many
    players must remain honest -- in order for distributed quantum
    computations to be possible. We present a verifiable quantum secret
    sharing (VQSS) protocol, and a general secure multiparty quantum
    computation (MPQC) protocol, which can tolerate any (n-1)/2 (rounded
    down) cheaters among n players. Previous protocols for these tasks
    tolerated (n-1)/4 (rounded down) and (n-1)/6 (rounded down)
    cheaters, respectively. The threshold we achieve is tight - even in
    the classical case, "fair" multiparty computation is not possible
    if any set of n/2 players can cheat. Our protocols rely on
    approximate quantum error-correcting codes, which can tolerate a
    larger fraction of errors than traditional, exact codes. We
    introduce new families of authentication schemes and approximate
    codes tailored to the needs of our protocols, as well as new state
    purification techniques along the lines of those used in
    fault-tolerant quantum circuits. }
}
@inproceedings{teml06,
  author = {Patrick Traynor and William Enck and Patrick McDaniel and
    Thomas {La Porta}},
  title = {{Mitigating Open Functionality in SMS-Capable Cellular
    Networks}},
  booktitle = {{Proceedings of the ACM Twelfth Annual International
    Conference on Mobile Computing and Networking (MobiCom)}},
  month = {September},
  year = {2006},
  url = {http://www.cse.psu.edu/~enck/pubs/mobicom06.pdf},
  abstract = {The transformation of telecommunications networks from
    homogeneous closed systems providing only voice services to
    Internetconnected open networks that provide voice and data services
    presents significant security challenges. For example, recent
    research illustrated that a carefully crafted DoS attack via text
    messaging could incapacitate all voice communications in a
    metropolitan area with little more than a cable modem. This attack
    highlights a growing threat to these systems; namely, cellular
    networks are increasingly exposed to adversaries both in and outside
    the network. In this paper, we use a combination of modeling and
    simulation to demonstrate the feasibility of targeted text messaging
    attacks. Under realistic network conditions, we show that
    adversaries can achieve blocking rates of more than 70% with only
    limited resources. We then develop and characterize five techniques
    from within two broad classes of countermeasures - queue management
    and resource provisioning.  Our analysis demonstrates that these
    techniques can eliminate or extensively mitigate even the most
    intense targeted text messaging attacks. We conclude by considering
    the tradeoffs inherent to the application of these techniques in
    current and next generation telecommunications networks.}
}
@inproceedings{TSMPL06,
  author = {Patrick Traynor and JaeShung Shin and Barat Madan and Shashi Phoha and Thomas {La Porta}},
  title = {{Efficient Group Mobility for Heterogeneous Sensor Networks}},
  booktitle = {{Proceedings of the IEEE Vehicular Technology Conference (VTC Fall)}},
  month = {September},
  year = {2006},
  url = {http://www.cse.psu.edu/~traynor/papers/vtc06.pdf},
  abstract = {Mobility management protocols allow wireless devices to
  move between networks. These protocols have traditionally supported
  the mobility of individual nodes and are therefore not optimized to
  support the migration of groups.  Accordingly, the time required to
  re-establish connectivity, frequency of dropped packets and contention
  for the air interface increase significantly for mobile groups. We
  propose a protocol for mobile groups that reduces all of the above by
  allowing a single node to perform handoffs on behalf of all group
  members. This "gateway" node eliminates the need for multiple handoff
  messages by obscuring group membership to external parties. Through
  extensive simulation and implementation, we show significant reduction
  in handoff times, message complexity and packet loss for groups of
  heterogeneous, mobile sensors running AODV and DSDV. By leveraging the
  naturally occurring hierarchy, we demonstrate that it is possible for
  groups to efficiently use traditional mobility protocols to support
  their collective movements.}
}
@inproceedings{nss06,
  author = {Moni Naor and Gil Segev and Adam Smith},
  title = {{Tight Bounds for Unconditional Authentication Protocols in
  the Manual Channel and Shared key Models}},
  booktitle = {{The 26th Annual International Cryptology Conference
  (CRYPTO'06)}},
  month = {August},
  year = {2006},
  url = {http://eprint.iacr.org/2006/175.pdf},
  abstract = {We address the message authentication problem in two seemingly different communication 
  models. In the first model, the sender and receiver are connected by an insecure channel and by a 
  low-bandwidth auxiliary channel, that enables the sender to ``manually'' authenticate one short 
  message to the receiver (for example, by typing a short string or comparing two short strings). 
  We consider this model in a setting where no computational assumptions are made, and prove that 
  for any $0 < \epsilon < 1$ there exists a $\log^* n$-round protocol for authenticating $n$-bit 
  messages, in which only $2 \log(1 / \epsilon) + O(1)$ bits are manually authenticated, and any 
  adversary (even computationally unbounded) has probability of at most $\epsilon$ to cheat the 
  receiver into accepting a fraudulent message. Moreover, we develop a proof technique showing that
  our protocol is essentially optimal by providing a lower bound of $2 \log(1 / \epsilon) - O(1)$ on
  the required length of the manually authenticated string.
 

The second model we consider is the traditional message authentication model. In this model the sender and the receiver share a short secret key; however, they are connected only by an insecure channel. We apply the proof technique above to obtain a lower bound of $2 \log(1 / \epsilon) - 2$ on the required Shannon entropy of the shared key. This settles an open question posed by Gemmell and Naor (CRYPTO '93).

Finally, we prove that one-way functions are necessary (and sufficient) for the existence of protocols breaking the above lower bounds in the computational setting. } }
@inproceedings{dkr+06,
  author = {Yevgeniy Dodis and Jonathan Katz and Leonid Reyzin and Adam
  Smith},
  title = {{Robust Fuzzy Extractors and Authenticated Key Agreement from
  Close Secrets}},
  booktitle = {{The 26th Annual International Cryptology Conference
  (CRYPTO'06)}},
  month = {August},
  year = {2006},
  url = {http://people.csail.mit.edu/dodis/ps/ake-close.ps}
}
@inproceedings{jbkmc06,
  author = {Trent Jaeger and  Kevin Butler and David King and Jonathan
    McCune and Ramon Caceres and Serge Hallyn and Joy Latten and Reiner
    Sailer and Xiolan Zhang},
  title = {{Leveraging IPsec for Distributed Authorization}},
  booktitle = {2nd IEEE Communications Society/CreateNet International
    Conference on Security and Privacy in Communication Networks
    (SecureComm'06)},
  month = {August},
  year = {2006},
  url = {http://www.cse.psu.edu/~tjaeger/papers/securecomm06.pdf},
  abstract = {Mandatory access control (MAC) enforcement is becoming
    available for commercial environments. For example, Linux 2.6
    includes the Linux Security Modules (LSM) framework that enables the
    enforcement of MAC policies (e.g., Type Enforcement or Multi-Level
    Security) for individual systems. While this is a start, we envision
    that MAC enforcement should span multiple machines. The goal is to
    be able to control interaction between applications on different
    machines based on MAC policy. In this paper, we describe a recent
    extension of the LSM framework that enables labeled network
    communication via IPsec that is now available in mainline Linux as
    of version 2.6.16. This functionality enables machines to control
    communication with processes on other machines based on the security
    label assigned to an IPsec security association. We outline a
    security architecture based on labeled IPsec to enable distributed
    MAC authorization. In particular, we examine the construction of a
    xinetd service that uses labeled IPsec to limit client access on
    Linux 2.6.16 systems. We also discuss the application of labeled
    IPsec to distributed storage and virtual machine access control.}
}
@inproceedings{tcw06,
  author = {Patrick Traynor and Michael Chien and Scott Weaver and
    Boniface Hicks and Patrick McDaniel},
  title = {{Non-Invasive Methods for Host Certification}},
  booktitle = {2nd IEEE Communications Society/CreateNet International
    Conference on Security and Privacy in Communication Networks
    (SecureComm'06)},
  month = {August},
  year = {2006},
  url = {http://www.patrickmcdaniel.org/pubs/securecomm06b.pdf},
  abstract = {Determining whether a user or system is exercising
    appropriate security practices is difficult in any context. Such
    difficulties are particularly pronounced when uncontrolled or
    unknown platforms join public networks. Commonly practiced
    techniques used to vet these hosts, such as system scans, have the
    potential to infringe upon the privacy of users. In this paper, we
    show that it is possible for clients to prove both the presence and
    proper functioning of security infrastructure without allowing
    unrestricted access to their system. We demonstrate this approach,
    specifically applied to anti-virus security, by requiring clients
    seeking admission to a network to positively identify the presence
    or absence of malcode in a series of puzzles. The implementation of
    this mechanism and its application to real networks are also
    explored. In so doing, we demonstrate that it is not necessary for
    an administrator to be invasive to determine whether a client
    implements good security practices.}
}
@inproceedings{zkp+06,
  author = {Xiaolan Zhang and Larry Koved and Marco Pistoia and Sam
    Weber and Trent Jaeger and Guillaume Marceau},
  title = {{The case for analysis preserving language
    transformations}},
  booktitle = {{Proceedings of the 2006 International Symposium on
    Software Testing and Analysis}},
  pages = {191-201},
  month = {July},
  year = {2006}
}
@inproceedings{TKBCL06,
  author = {Patrick Traynor and Raju Kumar and Hussain {Bin Saad} and
  Guohong Cao and Thomas {La Porta}},
  title = {{LIGER: Implementing Efficient Hybrid Security Mechanisms for
  Heterogeneous Sensor Networks}},
  booktitle = {{Proceedings of the 4th ACM International Conference on
  Mobile Systems, Applications and Services (MobiSys)}},
  month = {June},
  year = {2006},
  url = {http://www.cse.psu.edu/~traynor/papers/mobisys06.pdf},
  abstract = {The majority of security schemes available for sensor
  networks assume deployment in areas without access to a wired
  infrastructure.  More specifically, nodes in these networks are unable
  to leverage key distribution centers (KDCs) to assist them with key
  management.  In networks with a heterogeneous mix of nodes, however,
  it is not unrealistic to assume that some more powerful nodes have at
  least intermittent contact with a backbone network. For instance, an
  air-deployed battlefield network may have to operate securely for some
  time until uplinked friendly forces move through the area.  We
  therefore propose LIGER, a hybrid key management scheme for
  heterogeneous sensor networks that allows systems to operate in both
  the presence and absence of a KDC. Specifically, when no KDC is
  available, nodes communicate securely with each other based upon a
  probabilistic unbalanced method of key management.  The ability to
  access a KDC allows nodes to probabilistically authenticate
  neighboring devices with which they are communicating.  We also
  demonstrate that this scheme is robust to the compromise of both low
  and high capability nodes and that the same keys can be used for both
  modes of operation. Detailed experiments and simulations are used to
  show that LIGER is a highly practical solution for the current
  generation of sensors and the unbalanced approach can significantly
  reduce network initialization time.}
}
@inproceedings{qmm+06,
  author = {Sophie Qiu and Patrick McDaniel and Fabian Monrose and Avi
  Rubin},
  title = {{Characterizing Address Use Structure and Stabillity of
  Origin Advertisement in Interdomain Routing}},
  booktitle = {{11th IEEE Symposium on Computers and Communications}},
  month = {June},
  pages = {489-496},
  year = {2006},
  url = {http://www.patrickmcdaniel.org/pubs/icss06.pdf},
  abstract = {The stability and robustness of BGP remains one of the
  most critical elements in sustaining today's Internet. In this paper,
  we study the structure and stability of origin advertisements in
  inter-domain routing. We visualize and quantitatively characterize the
  frequency, size, and effect of address assignment and origin changes
  by analyzing realworld BGP updates for a period of one year from
  multiple vantage points. Broad classes of prefix behaviors are
  developed.  We show that a significant portion of BGP traffic is due
  to prefix flapping and explore the contributing factors which include
  a number of prefixes with abnormal short up-and-down cycles. A
  significant portion of prefixes have high origin stability. Most ASes
  are involved in few, if any, prefix movement events, while a small
  number of ASes are responsible for most of the origin churn.
  Additionally, we find that a high volume of new prefixes can be
  attributed to actively evolving countries, that some abnormal prefix
  flapping is most likely due to misconfiguration, and that some culprit
  ASes characterize the places where multi-origin prefixes oscillate.}
}
@inproceedings{gjj06,
  author = {V. Ganapathy and T. Jaeger and S. Jha},
  title = {Retrofitting Legacy Code for Authorization Policy
  Enforcement},
  booktitle = {Proceedings of the 2006 IEEE Symposium on Security and
  Privacy},
  month = {May},
  year = {2006},
  url = {http://www.cse.psu.edu/~tjaeger/papers/oakland06.pdf},
  abstract = {Researchers have argued that the best way to construct a
  secure system is to proactively integrate security into the design of
  the system. However, this tenet is rarely followed because of
  economic and practical considerations.  Instead, security mechanisms
  are added as the need arises, by retrofitting legacy code. Existing
  techniques to do so are manual and ad hoc, and often result in
  security holes.  We present program analysis techniques to assist the
  process of retrofitting legacy code for authorization policy 
  enforcement. These techniques can be used to retrofit legacy servers,
  such as X window, web, proxy, and cache servers.  Because such servers
  manage multiple clients simultane- ously, and offer shared resources
  to clients, they must have the ability to enforce authorization
  policies. A developer can use our techniques to identify
  security-sensitive loca- tions in legacy servers, and place reference
  monitor calls to mediate these locations. We demonstrate our
  techniques by retrofitting the X11 server to enforce authorization
  policies on its X clients.}
}
@inproceedings{TCCZL06,
  author = {Patrick Traynor and Heesook Choi and Guohong Cao and Sencun
  Zhu and Thomas {La Porta}},
  title = {{Establishing Pair-Wise Keys In Heterogeneous Sensor
  Networks}},
  booktitle = {{Proceedings of the 25th Annual IEEE Conference on
  Computer Communications (INFOCOM)}},
  month = {April},
  year = {2006},
  url = {http://www.cse.psu.edu/~traynor/papers/infocom06.pdf},
  abstract = {Many applications that make use of sensor networks require
  secure communication. Because asymmetric-key solutions are difficult
  to implement in such a resource-constrained environment, symmetric-key
  methods coupled with a priori key distribution schemes have been
  proposed to achieve the goals of data secrecy and integrity. These
  approaches typically assume that all sensors are similar in terms of
  capabilities, and hence deploy the same number of keys in all sensors
  in a network to provide the aforementioned protections. In this paper
  we demonstrate that a probabilistic unbalanced distribution of keys
  throughout the network that leverages the existence of a small
  percentage of more capable sensor nodes can not only provide an equal
  level of security but also reduce the consequences of node compromise.
  We demonstrate the effectiveness of this approach on small networks
  using a variety of trust models and then demonstrate the application
  of this method to very large systems.  The approach and analysis
  presented in this paper can be applied to all protocols that use
  probabilistic keys including those that employ broadcast mechanisms,
  hash functions or polynomials for the generation of keys.}
}
@inproceedings{TCL06,
  author = {Patrick Traynor and Guohong Cao and Thomas {La Porta}},
  title = {{The Effects of Probabilistic Key Management on Secure
  Routing in Sensor Networks}},
  booktitle = {{Proceedings of the 2006 IEEE Wireless Communications and
  Networking Conference (WCNC)}},
  month = {April},
  year = {2006},
  url = {http://www.cse.psu.edu/~traynor/papers/wcnc06.pdf},
  abstract = {Secure data dissemination in wireless ad hoc and sensor
  networks has recently received a great deal of attention.  A variety
  of protocols have been proposed in order to ensure secure data
  delivery across these systems; however, the majority of these schemes
  assume the presence of public or pre-established symmetric keys.
  Accordingly, the cost of key management has not been incorporated into
  secure routing mechanisms in this setting.  This paper considers the
  expenses incurred by sensor networks implementing secure routing
  schemes on top of probabilistic symmetric key management schemes.
  Specifically, we examine the overhead observed from proactive and
  reactive key establishment mechanisms for networks using a balanced
  method of key management. Through extensive simulation, we quantify
  more realistic costs for the application of secure hop-by-hop routing
  in sensor networks.}
}
@inproceedings{dmn+06,
  author = {Cynthia Dwork and Frank McSherry and Kobbi Nissim and Adam
  Smith},
  title = {{Calibrating Noise to Sensitivity in Private Data Analysis}},
  booktitle = {{Theory of Cryptography Conference (TCC '06)}},
  month = {March},
  year = {2006},
  url = {http://www.csail.mit.edu/~asmith/PS/sensitivity-tcc-final.pdf},
  abstract = {We continue a line of research initiated in [10, 11] on
  privacy-preserving statistical databases. Consider a trusted server
  that holds a database of sensitive information. Given a query function
  f mapping databases to reals, the so-called true answer is the result
  of applying f to the database. To protect privacy, the true answer is
  perturbed by the addition of random noise generated according to a
  carefully chosen distribution, and this response, the true answer plus
  noise, is returned to the user.


Previous work focused on the case of noisy sums, in which f = Σi g(xi), where xi denotes the ith row of the database and g maps database rows to [0, 1]. We extend the study to general functions f, proving that privacy can be preserved by calibrating the standard deviation of the noise according to the sensitivity of the function f. Roughly speaking, this is the amount that any single argument to f can change its output. The new analysis shows that for several particular applications substantially less noise is needed than was previously understood to be the case.

The first step is a very clean characterization of privacy in terms of indistinguishability of transcripts. Additionally, we obtain separation results showing the increased value of interactive sanitization mechanisms over non-interactive.} }
@inproceedings{mso+06,
  author = {Patrick McDaniel and Shubho Sen and Oliver Spatscheck and
    Jacobus Van der Merwe Bill Aiello and Charles Kalmanek},
  title = {Enterprise Security: A Community of Interest Based Approach},
  booktitle = {Proceedings of Network and Distributed Systems Security
    2006 (NDSS)},
  address = {San Diego, CA},
  month = {February},
  year = {2006},
  url = {http://www.patrickmcdaniel.org/pubs/ndss06.pdf},
  abstract = {Enterprise networks today carry a range of mission
    critical communications. A successful worm attack within an
    enterprise network can be substantially more devastating to most
    companies than attacks on the larger Internet. In this paper we
    explore a brownfield approach to hardening an enterprise network
    against active malware such as worms.  The premise of our approach
    is that if future communication patterns are constrained to
    historical "normal" communication patterns, then the ability of
    malware to exploit vulnerabilities in the enterprise can be severely
    curtailed.  We present techniques for automatically deriving
    individual host profiles that capture historical communication
    patterns (i.e., community of interest (COI)) of end hosts within an
    enterprise network. Using traces from a large enterprise network, we
    investigate how a range of different security policies based on
    these profiles impact usability (as valid communications may get
    restricted) and security (how well the policies contain malware).
    Our evaluations indicate that a simple security policy comprised of
    our Extended COI-based profile and Relaxed Throttling Discipline can
    effectively contain worm behavior within an enterprise without
    significantly impairing normal network operation.}
}
@inproceedings{sjs06,
  author = {U. Shankar and T. Jaeger and R. Sailer},
  title = {Toward Automated Information-Flow Integrity Verification for
  Security-Critical Applications},
  booktitle = {Proceedings of the 2006 ISOC Networked and Distributed
  Systems Security Symposium},
  month = {February},
  year = {2006},
  url = {http://www.cse.psu.edu/~tjaeger/papers/ndss06.pdf},
  abstract = {We provide a largely automated system for verifying Clark-
  Wilson interprocess information-flow integrity. Information-flow
  integrity properties are essential to isolate trusted processes from
  untrusted ones by removing unwanted input dependences.  For example,
  an untrusted user process should not be able to write to sshd config
  via a cron script. A useful notion of integrity is the Clark-Wilson
  integrity model [7], which allows trusted processes to accept
  necessary untrusted inputs (e.g., network data or print jobs) via
  filtering interfaces that sanitize the data. However, Clark-Wilson has
  the requirement that programs undergo formal semantic verification; in
  practice, this kind of burden has meant that no information-flow
  integrity property is verified on most widely-used systems. We define
  a weaker version of Clark-Wilson integrity, called CW-Lite, which has
  the same interprocess information-flow guarantees, but which requires
  less filtering, only small changes to existing applications, and which
  we can check using automated tools. We modify the SELinux user library
  and kernel module in order to support CWLite integrity verification
  and develop new software tools to aid developers in finding and
  enabling filtering interfaces. Using our toolset, we found and fixed
  several integrity-violating configuration errors in the default
  SELinux policies for OpenSSH and vsftpd.}
}
@inproceedings{bm05,
  author = {Kevin Butler and Patrick McDaniel},
  title = {Understanding Mutable Internet Pathogens, or How I Learned to
  Stop Worrying and Love Parasitic Behavior},
  booktitle = {Proceedings of 1st International Conference on
  Information Systems Security (ICISS)},
  address = {Kolkata, India},
  month = {December},
  year = {2005},
  note = {Invited Paper},
  url = {http://www.patrickmcdaniel.org/pubs/iciss05.pdf},
  abstract = {Worms are becoming increasingly hostile.  The exponential
  growth of infection rates allows small outbreaks to have worldwide
  consequences within minutes.  Moreover, the collateral damage caused
  by infections can cripple the entire Internet.  While harmful, such
  behaviors have historically been short-lived.  We assert the future
  holds much more caustic malware.  Attacks based on mutation and covert
  propagation are likely to be ultimately more damaging and long
  lasting.  This assertion is supported by observations of natural
  systems, where similarly behaving parasites represent by far
  the most successful class of living creatures.  This talk considers a
  parasite for the Internet, providing biological metaphors for its
  behavior and demonstrating the structure of pathogens.  Through
  simulation, we show that even with low infection rates, a mutating
  pathogen will eventually infect an entire community.  We posit the
  inevitability of such parasites, and consider ways that they can be
  mitigated.}
}
@inproceedings{sjv+05,
  author = {R. Sailer and T. Jaeger and E. Valdez and R. Caceres and R. Perez
  and S. Berger and J. L. Griffin and L. van Doorn},
  title = {Building a {MAC}-based Security Architecture for the {Xen}
  Open-Source Hypervisor},
  booktitle = {Proceedings of the 21st Annual Computer Security Applications
  Conference},
  month = {December},
  year = {2005},
  url = {http://www.cse.psu.edu/~tjaeger/papers/acsac05.pdf},
  abstract = {We present the sHype hypervisor security architecture and
  examine in detail its mandatory access control facilities.  While
  existing hypervisor security approaches aiming at high assurance have
  been proven useful for high-security environments that prioritize
  security over performance and code reuse, our approach aims at
  commercial security where near-zero performance overhead,
  non-intrusive implementation, and usability are of paramount
  importance.  sHype enforces strong isolation at the granularity of a
  virtual machine, thus providing a robust foundation on which higher
  software layers can enact finer-grained controls. We provide the
  rationale behind the sHype design and describe and evaluate our
  implementation for the Xen open-source hypervisor.}
}
@inproceedings{lem05,
  author = {Wesam Lootah and William Enck and Patrick McDaniel},
  title = {{TARP}: Ticket-Based Address Resolution Protocol},
  booktitle = {21st Annual Computer Security Applications Conference
  (ACSAC)},
  address = {Tuscon, AZ},
  month = {December},
  year = {2005},
  supercedes = {lem05a},
  url = {http://www.cse.psu.edu/~enck/pubs/acsac05.pdf},
  abstract = { IP networks fundamentally rely on the Address Resolution
  Protocol (ARP) for proper operation.  Unfortunately, vulnerabilities
  in the ARP protocol enable a raft of IP-based impersonation,
  man-in-the-middle, or DoS attacks.  Proposed countermeasures to these
  vulnerabilities have yet to simultaneously address backward
  compatibility and cost requirements.  This paper introduces the
  Ticket-based Address Resolution Protocol (TARP).  TARP
  implements security by distributing centrally issued secure MAC/IP
  address mapping attestations through existing ARP messages.	We
  detail the TARP protocol and its implementation within the Linux
  operating system.  Our experimental analysis shows that TARP improves
  the costs of implementing ARP security by as much as two orders of
  magnitude over existing protocols.  We conclude by exploring a range
  of operational issues associated with deploying and administering ARP
  security. }
}
@inproceedings{gjj05,
  author = {V. Ganapathy and T. Jaeger and S. Jha},
  title = {Automatic placement of authorization hooks in the {Linux}
  security modules framework},
  booktitle = {Proceedings of the 12th ACM Conference on Computer and
  Communications Security},
  month = {November},
  year = {2005},
  url = {http://www.cse.psu.edu/~tjaeger/papers/ccs05.pdf},
  abstract = { We present a technique for automatic placement of
  authorization hooks, and apply it to the Linux security modules (LSM)
  framework.  LSM is a generic framework which allows diverse
  authorization policies to be enforced by the Linux kernel. It consists
  of a kernel module which encapsulates an authorization policy, and
  hooks into the kernel module placed at appropriate locations in the
  Linux kernel. The kernel enforces the authorization policy using hook
  calls. In current practice, hooks are placed manually in the kernel.
  This approach is tedious, and as prior work has shown, is prone to
  security holes.


Our technique uses static analysis of the Linux kernel and the kernel module to automate hook placement. Given a non-hookplaced version of the Linux kernel, and a kernel module that implements an authorization policy, our technique infers the set of operations authorized by each hook, and the set of operations performed by each function in the kernel. It uses this information to infer the set of hooks that must guard each kernel function. We describe the design and implementation of a prototype tool called TAHOE (Tool for Authorization Hook Placement) that uses this technique. We demonstrate the effectiveness of TAHOE by using it with the LSM implementation of security-enhanced Linux (SELinux). While our exposition in this paper focuses on hook placement for LSM, our technique can be used to place hooks in other LSM-like architectures as well. } }
@inproceedings{etml05,
  author = {William Enck and Patrick Traynor and Patrick McDaniel and
  Thomas {La Porta}},
  title = {Exploiting Open Functionality in SMS-Capable Cellular
  Networks},
  booktitle = {Proceedings of the 12th ACM Conference on Computer and
  Communications Security (CCS)},
  month = {November},
  year = {2005},
  supercedes = {etml05b},
  url = {http://www.cse.psu.edu/~enck/pubs/ccs05.pdf},
  abstract = { Cellular networks are a critical component of the
  economic and social infrastructures in which we live. In addition to
  voice services, these networks deliver alphanumeric text messages to
  the vast majority of wireless subscribers. To encourage the expansion
  of this new service, telecommunications companies offer connections
  between their networks and the Internet. The ramifications of such
  connections, however, have not been fully recognized. In this paper,
  we evaluate the security impact of the SMS interface on the
  availability of the cellular phone network. Specifically, we
  demonstrate the ability to deny voice service to cities the size of
  Washington D.C. and Manhattan with little more than a cable modem.
  Moreover, attacks targeting the entire United States are feasible with
  resources available to medium-sized zombie networks.  This analysis
  begins with an exploration of the structure of cellular networks. We
  then characterize network behavior and explore a number of
  reconnaissance techniques aimed at effectively targeting attacks on
  these systems. We conclude by discussing countermeasures that mitigate
  or eliminate the threats introduced by these attacks. }
}
@inproceedings{pzn+05,
  author = {Matthew Pirretti and Sencun Zhu and Vijaykrishnan
  Narayanan and Patrick McDaniel and Mahmut Kandemir and and Richard
  Brooks},
  title = {The Sleep Deprivation Attack in Sensor Networks: Analysis
  and Methods of Defense},
  booktitle = {Proceedings of the Innovations and Commercial
  Applications of Distributed Sensor Networks Symposium},
  month = {October},
  address = {Bethesda, Maryland},
  note = {({\it best paper})},
  year = {2005},
  url = {http://www.patrickmcdaniel.org/pubs/ICA_DSN_05b.pdf},
  abstract = { The ability of sensor nodes to enter a low power sleep
  mode is very useful for extending network longevity. We show how
  adversary nodes can exploit clustering algorithms to ensure their
  selection as cluster heads for the purpose of launching attacks that
  prevent victim nodes from sleeping. We present two such attacks: the
  barrage attack and the sleep deprivation attack. The barrage attack
  bombards victim nodes with legitimate requests, whereas the sleep
  deprivation attack makes requests of victim nodes only as often as is
  necessary to keep the victims awake. We show that while the barrage
  attack causes its victims to spend slightly more energy, it is more
  easily detected and requires more effort on behalf of the attacker.
  Thus we have focused our research on the sleep deprivation attack. Our
  analysis indicates that this attack can nullify any energy savings
  obtained by allowing sensor nodes to enter sleep mode. We also analyze
  three separate methods for mitigating this attack: the random vote
  scheme, the round robin scheme, and the hash-based scheme. We have
  evaluated these schemes based upon their ability to reduce the
  adversary's attack, the amount of time required to select a cluster
  head, and the amount of energy required to perform each scheme. We
  have found that of the three clustering methods analyzed, the
  hash-based scheme is the best at mitigating the sleep deprivation
  attack. }
}
@inproceedings{kjm05,
  author = {Luis Kruger and Somesh Jha and Patrick McDaniel},
  title = {Privacy Preserving Clustering},
  booktitle = {10th European Symposium on Research in Computer
  Security (ESORICS '05)},
  month = {September},
  year = {2005},
  note = {Milan, Italy},
  url = {http://siis.cse.psu.edu/pubs/esorics05.pdf},
  abstract = {The freedom and transparency of information flow on the
  Internet has heightened concerns of privacy.  Given a set of data
  items, clustering algorithms group similar items
  together. Clustering has many applications, such as
  customer-behavior analysis, targeted marketing, forensics, and
  bioinformatics. In this paper, we present the design and analysis of
  a privacy-preserving k-means clustering algorithm, where
  only the cluster means at the various steps of the algorithm are
  revealed to the participating parties. The crucial step in our
  privacy-preserving k-means is privacy-preserving
  computation of cluster means. We present two protocols (one based on
  oblivious polynomial evaluation and the second based on homomorphic
  encryption) for privacy-preserving computation of cluster means. We
  have a JAVA implementation of our algorithm. Using our
  implementation, we have performed a thorough evaluation of our
  privacy-preserving clustering algorithm on three data sets. Our
  evaluation demonstrates that privacy-preserving clustering is
  feasible, i.e., our homomorphic-encryption based algorithm finished
  clustering a large data set in approximately 66 seconds.}
}
@inproceedings{ces+05,
  author = {Heesook Choi and William Enck and Jaesheung Shin and
  Patrick McDaniel and Thomas F. La Porta},
  title = {Secure Reporting of Traffic Forwarding Activity in Mobile Ad Hoc Networks},
  booktitle = {MobiQuitous 2005},
  month = {July},
  year = {2005},
  note = {San Diego, CA},
  url = {http://siis.cse.psu.edu/pubs/mobiq05.pdf},
  abstract = {
  Nodes forward data on behalf of each other in mobile ad hoc
  networks.  In a civilian application, nodes are assumed to be
  selfish and rational, i.e., they pursue their own
  self-interest. Hence, the ability to accurately measure traffic
  forwarding is critical to ensure proper network operation.  These
  measurements are often used to credit nodes based on their level of
  participation, or to detect loss.  Past solutions employ neighbor
  monitoring and reporting on node forwarding traffic.  These methods
  are not applicable in civilian networks where neighbor nodes lack
  the desire or ability to perform the monitoring function.  Such
  environments occur frequently in which neighbor hosts are resource
  constrained, or in networks where directional antennas are used and
  reliable monitoring is difficult or impossible.


In this paper, we propose a protocol that uses nodes on the data path to securely produce packet forwarding reports. Reporting nodes are chosen randomly and secretly so that malicious nodes cannot modify their behavior based upon the monitoring point. The integrity and authenticity of reports are preserved through the use of secure link layer acknowledgments and monitoring reports. The robustness of the reporting mechanism is strengthened by forwarding the report to multiple destinations (source and destination). We explore the security, cost, and accuracy of our protocol.} }
@inproceedings{ds05a,
  author = {Yevgeniy Dodis and Adam Smith},
  title = {Correcting Errors Without Leaking Partial Information},
  booktitle = {ACM Symposium on Theory of Computing (STOC)},
  month = {May},
  year = {2005},
  note = {Baltimore, MD},
  url = {http://www.csail.mit.edu/~asmith/PS/proxobf-stoc-proc.pdf},
  abstract = {
  This paper explores what kinds of information two parties
must communicate in order to correct errors which occur in
a shared secret string W. Any bits they communicate must
leak a significant amount of information about W  that
is, from the adversarys point of view, the entropy of W will
drop significantly. Nevertheless, we construct schemes with
which Alice and Bob can prevent an adversary from learning
any useful information about W. Specifically, if the entropy
of W is sufficiently high, then there is no function f(W)
which the adversary can learn from the error-correction information
with significant probability. This leads to several
new results: (a) the design of noise-tolerant perfectly oneway
hash functions in the sense of Canetti et al. [7], which
in turn leads to obfuscation of proximity queries for high
entropy secrets W; (b) private fuzzy extractors [11], which
allow one to extract uniformly random bits from noisy and
nonuniform data W, while also insuring that no sensitive
information about W is leaked; and (c) noise tolerance and
stateless key re-use in the Bounded Storage Model, resolving
the main open problem of Ding [10].



The heart of our constructions is the design of strong randomness extractors with the property that the source W can be recovered from the extracted randomness and any string W' which is close to W.} }
@inproceedings{cgs05,
  author = {Claude Crpeau and Daniel Gottesman and Adam Smith},
  title = {Approximate Quantum Error Correcting Codes and Verifiable Secret Sharing},
  booktitle = {Eurocrypt 2005},
  month = {May},
  year = {2005},
  note = {Aarhus, Denmark},
  url = {http://www.csail.mit.edu/~asmith/PS/aqecc-proc-Eurocrypt-2005.pdf},
  abstract = {
  It is a standard result in the theory of quantum error-correcting
codes that no code of length n can fix more than n/4 arbitrary errors,
regardless of the dimension of the coding and encoded Hilbert spaces.
However, this bound only applies to codes which recover the message
exactly. Naively, one might expect that correcting errors to very high
fidelity would only allow small violations of this bound. This intuition
is incorrect: in this paper we describe quantum error-correcting codes
capable of correcting up to [(n- 1)/2] arbitrary errors with fidelity exponentially
close to 1, at the price of increasing the size of the registers
(i.e., the coding alphabet). This demonstrates a sharp distinction between
exact and approximate quantum error correction. The codes have
the property that any t components reveal no information about the
message, and so they can also be viewed as error-tolerant secret sharing
schemes.



The construction has several interesting implications for cryptography and quantum information theory. First, it suggests that secret sharing is a better classical analogue to quantum error correction than is classical error correction. Second, it highlights an error in a purported proof that verifiable quantum secret sharing (VQSS) is impossible when the number of cheaters t is n/4. In particular, the construction directly yields an honest-dealer VQSS scheme for t = [(n- 1)/2]. We believe the codes could also potentially lead to improved protocols for dishonest-dealer VQSS and secure multi-party quantum computation.

More generally, the construction illustrates a difference between exact and approximate requirements in quantum cryptography and (yet again) the delicacy of security proofs and impossibility results in the quantum model.} }
@inproceedings{bdk+05,
  author = {Xavier Boyen and Yevgeniy Dodis and Jonathan Katz and Rafail Ostrovsky and Adam Smith},
  title = {Secure Remote Authentication Using Biometric Data},
  booktitle = {Eurocrypt 2005},
  month = {May},
  year = {2005},
  note = {Aarhus, Denmark},
  url = {http://www.csail.mit.edu/~asmith/PS/fuzzy-remote-proceedings-Eurocrypt-2005.pdf},
  abstract = {
  Biometric data offer a potential source of high-entropy, secret
information that can be used in cryptographic protocols provided two
issues are addressed: (1) biometric data are not uniformly distributed;
and (2) they are not exactly reproducible. Recent work, most notably
that of Dodis, Reyzin, and Smith, has shown how these obstacles may
be overcome by allowing some auxiliary public information to be reliably
sent from a server to the human user. Subsequent work of Boyen has
shown how to extend these techniques, in the random oracle model, to
enable unidirectional authentication from the user to the server without
the assumption of a reliable communication channel.



We show two efficient techniques enabling the use of biometric data to achieve mutual authentication or authenticated key exchange over a completely insecure (i.e., adversarially controlled) channel. In addition to achieving stronger security guarantees than the work of Boyen, we improve upon his solution in a number of other respects: we tolerate a broader class of errors and, in one case, improve upon the parameters of his solution and give a proof of security in the standard model.} }
@inproceedings{cdm+05,
  author = {Shuchi Chawla and Cynthia Dwork and Frank McSherry and Adam Smith and Hoeteck Wee},
  title = {Towards Privacy in Public Databases},
  booktitle = {Theory of Cryptography (TCC) 2005},
  month = {February},
  year = {2005},
  note = {Cambridge, MA},
  url = {http://www.csail.mit.edu/~asmith/PS/sdb-tcc-2005-almost-final-proceedings.pdf},
  abstract = {
  We initiate a theoretical study of the census problem. Informally,
in a census individual respondents give private information to a
trusted party (the census bureau), who publishes a sanitized version of
the data. There are two fundamentally conflicting requirements: privacy
for the respondents and utility of the sanitized data. Unlike in the study
of secure function evaluation, in which privacy is preserved to the extent
possible given a specific functionality goal, in the census problem privacy
is paramount; intuitively, things that cannot be learned "safely" should
not be learned at all.



An important contribution of this work is a definition of privacy (and privacy compromise) for statistical databases, together with a method for describing and comparing the privacy offered by specific sanitization techniques.We obtain several privacy results using two different sanitization techniques, and then show how to combine them via cross training. We also obtain two utility results involving clustering.} }
@inproceedings{ds05b,
  author = {Yevgeniy Dodis and Adam Smith},
  title = { Entropic Security and the Encryption of High Entropy Messages},
  booktitle = {Theory of Cryptography (TCC) 2005},
  month = {February},
  year = {2005},
  note = {Cambridge, MA},
  url = {http://eprint.iacr.org/2004/219.pdf},
  abstract = {
  Russell and Wang (Eurocrypt 2002) recently introduced an elegant,
  information-theoretic notion called entropic security of encryption:
  they required that the cipher text leak no predicate of the plaintext
  (similar to semantic security (Goldwasser and Micali, JCSS 1984)) but
  only as long as the distribution on messages has high entropy from the
  adversary's point of view. They show that this notion of security can
  be achieved with very short keys for entropically rich message spaces.
  Canetti, Miccianci and Reingold (STOC, 1998) had previously
  constructed hash functions which satisfy a similar entropic security
  condition. The output of such hash function leaks no partial
  information about the input, provided the input has sufficiently high
  entropy. This paper studies entropic security in general, and its
  application to the encryption of high-entropy messages.



(1) We elucidate the notion of entropic security. Our results apply to all entropically-secure primitives, including both encryption and hash functions. We strengthen the formulation of Canetti and Russell and Wang: we require that an entropically-secure primitive leak no function whasoever of its input, while the previous works focus only on predicates. This stronger formulation is much closer to the original notion of semantic security. We also show that entropic security is equivalent to indistinguishability on pairs of input distributions of sufficiently high entropy. This equivalence generalizes the conventional equivalence between indistinguishability and semantic security. Indistinguishability, in turn, is related to randomness extraction from non-uniform distributions. The proof of the equivalence of hiding predicates to hiding all functions is quite involved, and requires very different techniques from those of Goldwasser and Micali.

(2) We use the equivalence above, and the connection to randomness extraction, to prove several new results on entropically-secure encryption. We obtain:

(a) Two general frameworks for constructing entropically secure encryption schemes, one based on expander graphs and the other on XOR-universal hash functions. These schemes generalize the schemes of Russell and Wang, yielding simpler constructions and proofs as well as improved parameters. The best scheme uses a key of length k=n-t+2log(1/eps)+O(1), where eps is a measure of leakage and t is the initial min-entropy of the message.

(b) Lower bounds on the key length k for entropic security and indistinguishability. In particular, we show near tightness of Russell-Wang constructions: k > n-t. (In fact, for a large class of schemes k >= n-t + log(1/eps).) } }
@inproceedings{bcc+04b,
  author = {S. Byers and L. Cranor and E. Cronin and D. Kormann and P. McDaniel},
  title = {Exposing Digital Content Piracy: Approaches, Issues and Experiences},
  booktitle = {Thirty-Eighth Conference on Signals, Systems, and Computers},
  month = {Nov},
  year = {2004},
  note = {Monterey, CA. Invited paper}
}
@inproceedings{sjz+04,
  title = {Attestation-based policy enforcement for remote access},
  author = {R. Sailer and T. Jaeger and X. Zhang and L. van Doorn},
  booktitle = {ACM Conference on Computer and Communications
    Security},
  pages = {308-317},
  year = {2004},
  url = {http://www.cse.psu.edu/~tjaeger/papers/ccs04.pdf},
  abstract = {Intranet access has become an essential function for
    corporate users. At the same time, corporation's security
    administrators have little ability to control access to corporate
    data once it is released to remote clients. At present, no
    confidentiality or integrity guarantees about the remote access
    clients are made, so it is possible that an attacker may have
    compromised a client process and is now downloading or modifying
    corporate data. Even though we have corporatewide access control
    over remote users, the access control approach is currently
    insufficient to stop these malicious processes.  We have designed
    and implemented a novel system that empowers corporations to verify
    client integrity properties and establish trust upon the client
    policy enforcement before allowing clients (remote) access to
    corporate Intranet services. Client integrity is measured using a
    Trusted Platform Module (TPM), a new security technology that is
    becoming broadly available on client systems, and our system uses
    these measurements for access policy decisions enforced upon the
    client's processes. We have implemented a Linux 2.6 prototype system
    that utilizes the TPM measurement and attestation, existing Linux
    network control (Netfilter), and existing corporate policy
    management tools in the Tivoli Access Manager to control remote
    client access to corporate data. This prototype illustrates that our
    solution integrates seamlessly into scalable corporate policy
    management and introduces only a minor performance overhead.}
}
@inproceedings{szj+04,
  title = {Design and Implementation of a {TCG}-based Integrity
    Measurement Architecture},
  author = {R. Sailer and X. Zhang and T. Jaeger and L. van Doorn},
  booktitle = {USENIX Security Symposium},
  pages = {223-238},
  year = {2004},
  url = {http://www.cse.psu.edu/~tjaeger/papers/usenix_sec2004.pdf},
  abstract = {We present the design and implementation of a secure
    integrity measurement system for Linux. All executable content that
    is loaded onto the Linux system is measured before execution and
    these measurements are protected by the Trusted Platform Module
    (TPM) that is part of the Trusted Computing Group (TCG) standards.
    Our system is the first to extend the TCG trust measurement concepts
    to dynamic executable content from the BIOS all the way up into the
    application layer. In effect, we show that many of the Microsoft
    NGSCB guarantees can be obtained on today's hardware and today's
    software and that these guarantees do not require a new CPU mode or
    operating system but merely depend on the availability of an
    independent trusted entity, a TPM for example. We apply our trust
    measurement architecture to a web server application where we show
    how our system can detect undesirable invocations, such as rootkit
    programs, and that our measurement architecture is practical in
    terms of the number of measurements taken and the performance impact
    of making them.}
}
@inproceedings{jsz04,
  author = {T. Jaeger and R. Sailer and X. Zhang},
  title = {Resolving Constraint Conflicts},
  booktitle = {Proceedings of the 2004 ACM Symposium on Access Control Models
  and Technologies},
  month = {June},
  year = {2004},
  url = {http://www.cse.psu.edu/~tjaeger/papers/sacmat04.pdf},
  abstract = {In this paper, we define constraint conflicts and examine properties
  that may aid in guiding their resolution. A constraint conflict is an
  inconsistency between the access control policy and the constraints
  specified to limit that policy. For example, a policy that permits
  a high integrity subject to access low integrity data is in conflict
  with a Biba integrity constraint. Constraint conflicts differ from
  typical policy conflicts in that constraints are never supposed to be
  violated. That is, a conflict with a constraint results in a policy
  compilation error, whereas policy conflicts are resolved at runtime.
  As we have found in the past, when constraint conflicts occur in
  a specification a variety of resolutions are both possible and practical.
  In this paper, we detail some key formal properties of constraint
  conflicts and show how these are useful in guiding conflict
  resolution. We use the SELinux example policy for Linux 2.4.19 as
  the source of our constraint conflicts and resolution examples. The
  formal properties are used to guide the selection of resolutions and
  provide a basis for a resolution language that we apply to resolve
  conflicts in the SELinux example policy.}
}
@inproceedings{aim03,
  author = {W. Aiello and J. Ioannidis and P. McDaniel},
  title = {{Origin Authentication in Interdomain Routing}},
  booktitle = {Proceedings of 10th ACM Conference on Computer and
  Communications Security},
  organization = {ACM},
  month = {October},
  year = {2003},
  pages = {165-178},
  note = {Washington, DC},
  url = {http://www.patrickmcdaniel.org/pubs/ccs03a.pdf},
  abstract = {Attacks against Internet routing are increasing in
  number and severity.  Contributing greatly to these attacks is the
  absence origin authentication: there is no way to validate
  claims of address ownership or location.  The lack of such services
  enables not only attacks by malicious entities, but indirectly allow
  seemingly inconsequential miconfigurations to disrupt large portions
  of the Internet.  This paper considers the semantics, design, and
  costs of origin authentication in interdomain routing.  We formalize
  the semantics of address delegation and use on the Internet, and
  develop and characterize broad classes of origin authentication
  proof systems.  We estimate the address delegation graph
  representing the current use of IPv4 address space using available
  routing data.  This effort reveals that current address delegation
  is dense and relatively static: as few as 16 entities perform 80%
  of the delegation on the Internet.  We conclude by evaluating the
  proposed services via traced based simulation.  This simulation
  shows the enhanced proof systems can reduce resource consumption by
  an order of magnitude over currently proposed solutions.}
}
@inproceedings{cjmm03,
  author = {E. Cronin and S. Jamin and T. Malkin and P. McDaniel},
  title = {{On the Performance, Feasibility, and Use of Forward Secure
  Signatures}},
  booktitle = {Proceedings of 10th ACM Conference on Computer and
  Communications Security},
  organization = {ACM},
  month = {October},
  year = {2003},
  pages = {131-144},
  note = {Washington, DC},
  url = {http://www.patrickmcdaniel.org/pubs/ccs03b.pdf},
  abstract = {Forward-secure signatures (FSSs) have recently received much
  attention from the cryptographic theory community as a potentially
  realistic way to mitigate many of the difficulties digital
  signatures face with key exposure.  However, no previous works have
  explored the practical performance of these proposed constructions
  in real-world applications, nor have they compared FSS to
  traditional, non-forward-secure, signatures in a non-asymptotic way.


We present an empirical evaluation of several FSS schemes that looks at the relative performance among different types of FSS as well as between FSS and traditional signatures. Our study provides the following contributions: first, a new methodology for comparing the performance of signature schemes, and second, a thorough examination of the practical performance of FSS. We show that for many cases the best FSS scheme has essentially identical performance to traditional schemes, and even in the worst case is only 2-4 times slower. On the other hand, we also show that if the wrong FSS configuration is used, the performance can be orders of magnitude slower. Our methodology provides a way to prevent such misconfigurations, and we examine common applications of digital signatures using it. In addition to the evaluation methodology and empirical study, a third contribution of this paper is the open-source FSS library developed for the study.

We conclude that not only are forward-secure signatures a useful theoretical construct as previous works have shown, but they are also, when used correctly, a very practical solution to some of the problems associated with key exposure in real-world applications. Through our metrics and our reference implementation we provide the tools necessary for developers to efficiently use forward-secure signatures.} }
@inproceedings{jsz03,
  author = {T. Jaeger and R. Sailer and X. Zhang},
  title = {Analyzing Integrity Protection in the {SElinux} Example Policy},
  booktitle = {Proceedings of the 12th USENIX Security Symposium},
  pages = {59-74},
  month = {August},
  year = {2003},
  abstract = {In this paper, we present an approach for analyzing the
  integrity protection in the SELinux example policy. The SELinux
  example policy is intended as an example from which administrators
  customize to create a policy for their site's security goals, but the
  complexity of the model and size of the policy make this quite
  complex. Our aim is to provide an access control model to express site
  security goals and resolve them against the SELinux policy.
  Ultimately, we aim to define a minimal trusted computing base (TCB)
  that satisfies Clark-Wilson integrity, by first testing for the more
  restrictive Biba integrity policy and resolving conflicts using
  Clark-Wilson semantics. Our policy analysis tool, Gokyo, implements
  the following approach: (1) it represents the SELinux example policy
  and our integrity goals; (2) it identifies conflicts between them; (3)
  it estimates the resolutions to these conflicts; and (4) provides
  information for deciding upon a resolution. Using Gokyo, we derive a
  proposal for a minimal TCB for SELinux includes 30 subject types, and
  we identify the work remaining to ensure that TCB is
  integrity-protected. Our analysis is performed on the SELinux example
  policy for Linux 2.4.19.},
  url = {pubs/jsz03.pdf}
}
@inproceedings{gag+03,
  author = {Geoff Goodell and William Aiello and Tim Griffin and John
    Ioannidis and Patrick McDaniel and Avi Rubin},
  title = {{Working Around BGP: An Incremental Approach to Improving
    Security and Accuracy of Interdomain Routing}},
  booktitle = {Proceedings of Network and Distributed Systems Security
    2003 (NDSS)},
  organization = {Internet Society},
  pages = {75-85},
  month = {February},
  year = {2003},
  note = {San Diego, CA},
  url = {http://www.patrickmcdaniel.org/pubs/ndss03.pdf},
  abstract = {BGP is essential to the operation of the Internet, but
    is vulnerable to both accidental failures and malicious attacks.  We
    propose a new protocol that works in concert with BGP, which
    Autonomous Systems will use to help detect and mitigate accidentally
    or maliciously introduced faulty routing information.  The protocol
    differs from previous efforts at securing BGP in that it is
    receiver-driven, meaning that there is a mechanism for recipients of
    BGP UPDATE messages to corroborate the information they receive and
    to provide feedback. We argue that our new protocol can be adopted
    incrementally, and we show that there is incentive for network
    operators to do so. We also describe our prototype implementation.}
}
@inproceedings{ezj02,
  author = {A. Edwards and X. Zhang and T. Jaeger},
  title = {Runtime Verification of Authorization Hook Placement for the Linux Security Modules Framework},
  booktitle = {Proceedings of the 9th ACM Conference on Computer and Communications Security},
  pages = {225-234},
  month = {October},
  year = {2002},
  note = {Washington, DC},
  url = {http://www.cse.psu.edu/~tjaeger/papers/ccs02.pdf},
  abstract = {We present runtime tools to assist the Linux community in
  verifying the correctness of the Linux Security Modules (LSM)
  framework. The LSM framework consists of a set of authorization hooks
  inserted into the Linux kernel to enable additional authorizations to
  be performed (e.g., for mandatory access control). When compared to
  system call interposition, authorization within the kernel has both
  security and performance advantages, but it is more difficult to
  verify that placement of the LSM hooks ensures that all the kernel's
  security-sensitive operations are authorized. We have examined both
  static and runtime analysis techniques for this verification, and have
  found them to be complementary. Static analysis is more complex to
  implement and tends to generate more false positives, but coverage of
  all type-safe execution paths is possible. Runtime analysis lacks the
  code and input coverage of static analysis, but tends to be simpler to
  gather useful information. The major simplifying factor in our runtime
  verification approach is that we can leverage the fact that most of
  the LSM hooks are properly placed to identify misplaced hooks. Our
  runtime verification tools collect the current LSM authorizations and
  find inconsistencies in these authorizations. We describe our approach
  for performing runtime verification, the design of the tools that
  implement this approach, and the anomalous situations found in an
  LSM-patched Linux 2.4.16 kernel.}
}
@inproceedings{zej02,
  author = {X. Zhang and A. Edwards and T. Jaeger},
  title = {Using {CQUAL} for Static Analysis of Authorization Hook
  Placement},
  booktitle = {Proceedings of the 11th USENIX Security Symposium},
  pages = {33-48},
  month = {August},
  year = {2002},
  url = {http://www.cse.psu.edu/~tjaeger/papers/usenix_sec2002.pdf},
  abstract = {The Linux Security Modules (LSM) framework is a set of
  authorization hooks for implementing flexible access control in the
  Linux kernel. While much effort has been devoted to defining the
  module interfaces, little attention has been paid to verifying the
  correctness of hook placement. This paper presents a novel approach to
  the verification of LSM authorization hook placement using CQUAL, a
  type-based static analysis tool. With a simple CQUAL lattice
  configuration and some GCC-based analyses, we are able to verify
  complete mediation of operations on key kernel data structures. Our
  results reveal some potential security vulnerabilities of the current
  LSM framework, one of which we demonstrate to be exploitable.  Our
  experiences demonstrate that combinations of conceptually simple tools
  can be used to perform fairly complex analyses.}
}
@inproceedings{mp02,
  author = {Patrick McDaniel and Atul Prakash},
  title = {{Methods and Limitations of Security Policy
    Reconciliation}},
  booktitle = {2002 IEEE Symposium on Security and Privacy},
  organization = {IEEE Computer Society Press},
  pages = {73-87},
  month = {MAY},
  year = {2002},
  note = {Oakland, CA},
  url = {http://www.patrickmcdaniel.org/pubs/oak2002.pdf},
  abstract = {A security policy is a means by which participant
    session requirements are specified. However, existing frameworks
    provide limited facilities for the automated reconciliation of
    participant policies. This paper considers the limits and methods of
    reconciliation in a general-purpose policy model. We identify an
    algorithm for efficient two-policy reconciliation, and show that, in
    the worst-case, reconciliation of three or more policies is
    intractable. Further, we suggest efficient heuristics for the
    detection and resolution of intractable reconciliation. Based upon
    the policy model, we describe the design and implementation of the
    Ismene policy language. The expressiveness of Ismene, and indirectly
    of our model, is demonstrated through the representation and
    exposition of policies supported by existing policy languages.  We
    conclude with brief notes on the integration and enforcement of
    Ismene policy within the Antigone communication system.}
}
@inproceedings{mpi+01,
  author = {Patrick McDaniel and Atul Prakash and Jim Irrer and  Sharad
  Mittal and Thai-Chuin Thuang},
  title = {{Flexibly Constructing Secure Groups in Antigone 2.0}},
  booktitle = {Proceedings of DARPA Information Survivability
  Conference and Exposition II},
  organization = {IEEE Computer Society Press},
  pages = {55-67},
  month = {June},
  year = {2001},
  note = {Los Angeles, CA},
  url = {http://www.patrickmcdaniel.org/pubs/discex01.pdf},
  abstract = {Group communication is increasingly used as a low cost
  building block for the development of highly available and survivable
  services in dynamic environments. However, contemporary frameworks
  often provide limited facilities for the definition and enforcement of
  precise security policies.  This paper presents the Antigone 2.0
  framework that allows the flexible specification and enforcement of
  group security policies. Enforcement is achieved through the policy
  directed composition and configuration of sets of basic security
  services implementing the group. We summarize the design of the
  Antigone 2.0 architecture, its use, and the Application Programming
  Interface (API). The use of the API is illustrated through two
  applications built on Antigone; a reliable multicast system and host
  level multicast security service. We conclude with a description of
  current status and plans for future work.}
}
@inproceedings{j01,
  author = {Trent Jaeger},
  title = {Managing access control complexity using metrics},
  pages = {131-152},
  booktitle = {Proceedings of the Sixth {ACM} Symposium on Access
		 Control Models and Technologies ({SACMAT}-01)},
  month = {May},
  year = {2001},
  abstract = {General access control models enable flexible expression
  of access control policies, but they make the verification of whether
  a particular access control configuration is safe (i.e., prevents the
  leakage of a permission to an unauthorized subject) difficult. The
  current approach to expressing safety policy in such models is to use
  constraints. When the constraints are verified, then the configuration
  is verified to be safe. However, the addition of constraints to an
  access control configuration significantly increases its complexity,
  so it quickly becomes difficult to understand the access control
  policy expressed in the configuration such that future changes can be
  made correctly. We propose an approach whereby the complexity of each
  access control configuration is estimated, so the administrators can
  see the effect of a configuration change on the future ability to
  maintain the configuration. We identify metrics for making complexity
  estimates and evaluate these metrics on some constraint examples. Our
  goal is to enable the use of flexible access control models for
  safety-critical systems by permitting limited use of constraints that
  do not complicate the configuration beyond a maintainable complexity.},
  url = {http://www.cse.psu.edu/~tjaeger/papers/sacmat01.pdf}
}
@inproceedings{hcm01,
  author = {Hugh Harney and Andrea Colegrove and Patrick McDaniel},
  title = {{Principles of Policy in Secure Groups}},
  booktitle = {Proceedings of Network and Distributed Systems Security
  2001 (NDSS)},
  organization = {Internet Society},
  month = {February},
  year = {2001},
  note = {San Diego, CA},
  url = {http://www.patrickmcdaniel.org/pubs/ndss01.pdf},
  abstract = {Security policy is increasingly being used as a vehicle
  for specifying complex entity relationships. When used to define group
  security, policy must be extended to state the entirety of the
  security context. For this reason, the policy requirements of secure
  groups are more complex than found in traditional peer communication;
  group policies convey information about associations greater and more
  abstract than their pair-wise counterparts. This paper identifies and
  illustrates universal requirements of secure group policy and reasons
  about the adherence of the Group Security Association Key Management
  Protocol (GSAKMP) to these principles.}
}
@inproceedings{ale+01,
  title = {The SawMill Framework for Virtual Memory Diversity},
  author = {Mohit Aron and Jochen Liedtke and Kevin Elphinstone
		 and Yoonho Park and Trent Jaeger and Luke Deller},
  year = {2001},
  booktitle = {Proceedings of the 2001 Australian Computer Systems Architecture Conference},
  pages = {3-10}
}
@inproceedings{tj00a,
  title = {An access control model for simplifying constraint
		 expression},
  author = {Jonathon Tidswell and Trent Jaeger},
  year = {2000},
  booktitle = {Proceedings of the ACM Conference on Computer and Communications
		 Security (CCS)},
  pages = {154-163},
  url = {http://www.cse.psu.edu/~tjaeger/papers/ccs00.pdf}
}
@inproceedings{mj00,
  author = {Patrick McDaniel and Sugih Jamin},
  title = {{Windowed Certificate Revocation}},
  booktitle = {Proceedings of IEEE INFOCOM 2000},
  organization = {IEEE},
  pages = {1406-1414},
  month = {March},
  year = {2000},
  note = {Tel Aviv, Israel},
  abstract = {The advent of electronic commerce and personal
  communications on the Internet heightens concerns over the lack of
  privacy and security. Network services providing a wide range of
  security related guarantees are increasingly based on public key
  certificates. A fundamental problem inhibiting the wide acceptance of
  existing certificate distribution services is the lack of a scalable
  certificate revocation mechanism. We argue in this paper that the
  resource requirements of extant revocation mechanisms place
  significant burden on certificate servers and network resources. We
  propose a novel mechanism called windowed revocation that satisfies
  the security policies and requirements of existing mechanisms and, at
  the same time, reduces the burden on certificate servers and network
  resources. We include a proof of correctness of windowed revocation
  and analyze worst case performance scenarios.},
  url = {http://www.patrickmcdaniel.org/pubs/info00.pdf}
}
@inproceedings{mr00,
  author = {Patrick McDaniel and Avi Rubin},
  title = {{A Response to `Can We Eliminate Certificate Revocation  Lists?'}},
  booktitle = {Proceedings of Financial Cryptography 2000},
  organization = {International Financial Cryptography Association  (IFCA)},
  month = {February},
  year = {2000},
  pages = {245-258},
  note = {Anguilla, British West Indies},
  url = {http://www.patrickmcdaniel.org/pubs/finc00.pdf},
  abstract = {The massive growth of electronic commerce on the Internet
  heightens concerns over the lack of meaningful certificate management.
  One issue limiting the availability of such services is the absence of
  scalable certificate revocation. The use of certificate revocation
  lists (CRLs) to convey revocation state in public key infrastructures
  has long been the subject of debate. Centrally, opponents of the
  technology attribute a range of semantic and technical limitations to
  CRLs. In this paper, we consider arguments advising against the use of
  CRLs made principally by Rivest in his paper "Can we eliminate
  certificate revocation lists?" [1]. Specifically, the assumptions and
  environments on which these arguments are based are separated from
  those features inherent to CRLs. We analyze the requirements and
  potential solutions for three distinct PKI environments. The
  fundamental tradeoffs between revocation technologies are identified.
  Prom the case study analysis we show how, in some environments, CRLs
  are the most efficient vehicle for distributing revocation state. The
  lessons learned from our case studies are applied to a realistic PKI
  environment. The result, revocation on demand, is a CRL based
  mechanism providing timely revocation information.}
}
@inproceedings{aac99,
  author = {Andrwe Adamson and C.J. Antonelli and Kevin Coffman and  Patrick McDaniel and Jim Rees},
  title = {{Secure Distributed Virtual Conferencing}},
  booktitle = {{Proceedings of Communications and Multimedia Security  {(CMS '99)}}},
  pages = {176-190},
  month = {September},
  year = {1999},
  note = {Katholieke Universiteit, Leuven, Belgium},
  url = {http://www.patrickmcdaniel.org/pubs/cms99.pdf},
  abstract = {We describe a secure distributed virtual conferencing application
  (SDVC) that provides high quality streaming video
  and audio using IP multicast for efficient distribution, uses
  strong authentication via cryptographic means, and (optionally) provides 
  fully encrypted communication without sacrificing the quality of 
  the medium or the user experience. We summarize our experiences with SDVC
  in a recent live demonstration and conclude with a discussion of future plans.}
}
@inproceedings{mph99,
  author = {Patrick McDaniel and Atul Prakash and Peter Honeyman},
  title = {{Antigone: A Flexible Framework for Secure Group  Communication}},
  booktitle = {{Proceedings of the 8th USENIX Security Symposium}},
  pages = {99-114},
  month = {August},
  year = {1999},
  note = {Washington, DC},
  url = {http://www.patrickmcdaniel.org/pubs/usec99.pdf},
  abstract = {Many emerging applications on the Internet requiring group
  communication have varying security requirements.  Significant strides
  have been made in achieving strong semantics and security guarantees
  within group environments.  However, in existing solutions, the scope
  of available security policies is often limited. This paper presents
  the Antigone framework. Antigone provides a suite of mechanisms from
  which flexible application security policies may be implemented. Using
  this approach, developers may chose a policy that best addresses their
  security and performance requirements. We describe the Antigone's
  mechanisms, consisting of a set of microprotocols, and show how
  different security policies can be implemented using those mechanisms.
  We also present a performance study illustrating the
  security/performance tradeoffs that can be made using Antigone.}
}
@techreport{NAS-00103,
  author = {Joshua Schiffman and Thomas Moyer and Christopher Shal and Trent Jaeger and Patrick McDaniel},
  title = {{No Node Is an Island: Shamon Integrity Monitoring Approach}},
  number = {NAS-TR-0103-2009},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, 
                  Pennsylvania State University, University Park, PA, USA},
  month = {February},
  year = {2009},
  abstract = {
Many users obtain critical information from distributed systems. As more 
Internet services leverage distributed computing approaches, like mashups and 
service-oriented architectures, the need for users to verify the authenticity 
of data from distributed systems will increase. While a variety of mechanisms 
are broadly used to authenticate principals in distributed systems, mechanisms 
to verify the integrity of the distributed computation are only used in 
limited cases. Hardware-based attestation is still the most promising approach 
for integrity verification of remote systems, but current attestation 
mechanisms only verify a single node or only apply to limited distributed 
systems. In this paper, we propose a Shamon Integrity Monitoring Approach 
(SHIMA) for integrity verification in distributed systems. SHIMA prescribes 
that specialized services, called Virtual Machine Verifier, on each 
distributed node enforce an integrity criteria over the execution of 
system VMs and collaborate to maintain the integrity of the distributed 
system. This approach enables any system node to build a global integrity 
proof that the entire distributed system satisfies that integrity criteria. 
SHIMA is based on two insights: (1) that having a specialized component whose 
long-term integrity can be verified using traditional attestation provides a 
foundation for distributed system verification and (2) that the specialized 
component can be entrusted with the task of enforcing integrity and reporting 
enforcement status to others via attestations. We deploy a SHIMA prototype to 
verify that a Tor anonymity system runs only Tor servers that satisfy an 
integrity criteria. The resultant Tor system detects malware and policy 
misconfigurations that would enable compromise of a Tor server while adding 
negligible overhead to request processing. Thus, with SHIMA, we provide a low 
overhead mechanism for integrity verification of distributed systems that 
ensures a meaningful integrity criteria. 
}
}
@techreport{NAS-00100,
  author = {Dave King and Susmit Jha and Trent Jaeger and Somesh  
Jha and Sanjit A. Seshia},
  title = {{Towards Automated Security Mediation Placement}},
  number = {NAS-TR-0100-2008},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering,  
Pennsylvania State University, University Park, PA, USA},
  month = {November},
  year = {2008},
  abstract = {
It is important to verify that programs protect the security of the data 
they process.  Security-typed languages use type systems that augment  
the types of data with security labels to statically verify that a program 
satisfies a security property.	However, many programs exhibit behavior 
that is not compatible with a static type system.  For example, the   
actual security labels of data may not be known until runtime, requiring an 
authorization check. Also, secret data may need to be released under 
controlled conditions, requiring a declassification that changes the   
data's security label.  To resolve these conflicts within the type system, 
programmers insert mediation statements.  Placing these statements   
manually can be difficult, especially in legacy code, because of the 
complexity of security type inference. In this paper, we present a method  
that automatically determines locations in a program where mediation   
statements should be placed to resolve type system conflicts.   
To achieve this, we construct a graph that has the property that a vertex 
cut is equivalent to 
the points at which mediation statements can be inserted to allow the 
program to satisfy the type system. We output suggestions associated with 
minimum cuts of this graph, and find that this technique selects   
mediation points that are similar to mediation points selected by expert   
programmers.  
In our experiments, our tool reduces the number of locations that a 
programmer would otherwise have to inspect using current security-typed  
language tools by 85%, and more than 95% of the selected mediation   
points were classified as similar to those placed by expert programmers. 
}
}
@techreport{NAS-0094,
  author = {William Enck and Machigar Ongtang and Patrick McDaniel},
  title = {{Mitigating Android Software Misuse Before It Happens}},
  number = {NAS-TR-0094-2008},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {September},
  year = {2008},
  note = {Updated November 2008},
  abstract = {
Mobile phones running open operating systems such as Google Android will 
soon be the norm in cellular networks. These systems expose previously 
unavailable phone and network resources to application developers. 
However, with increased exposure comes increased risk.	Poorly or 
maliciously designed applications can compromise the phone and network. 
While Android defines a base set of permissions to protect phone resources 
and core applications, it does not define what a secure phone is, relying 
on the applications to act together securely. In this paper, we develop the 
Kirin security framework to enforce policy that transcends applications, 
called policy invariants, and provides an at 'installation'  
self-certification process to ensure only policy compliant applications 
will be installed.  We begin by describing the Google Android security 
model and formally model its existing policy.  Using relatively simple 
policy invariants describing realistic security requirements, Kirin 
identified insecure policy configurations within Android leading to 
vulnerabilities in core phone services, thereby motivating additional 
security framework defining system-wide policy. 
}
}
@techreport{NAS-0096,
  author = {Divya Muthukumaran and Mohamed Hassan and Vikhyath Rao and Trent Jaeger},
  title = {Protecting Telephony Services in Mobile Phones},
  number = {NAS-TR-0096-2008},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennslyvania State University, University Park, PA, USA},
  month = {September},
  year = {2008},
  abstract = {
Mobile phones have evolved into indispensable devices encompassing a wide 
variety of features and enabling exciting new applications. The adoption of 
general purpose operating systems for mobile device, like Symbian, Windows, 
and Linux, has heralded the emergence of third-party applications, some of 
which may be malware or themselves vulnerable to attack. The growing 
reliance on mobile phones implies that we need to be able to protect 
fundamental functionality from being misused by these untrusted 
applications. However, much of the fundamental functionality of mobile 
phones is implemented in user-level services, such as audio, windowing, and 
telephony services. The problem is that these services have historically 
been designed on systems where the vendors controlled all the code, so they 
trust all their clients. Further, the operating system trusts all the 
clients as well, so the system is quite vulnerable to confused deputy  
attacks. In this paper, we focus on how to convert mobile phone systems to 
protect the fundamental function of telephony. Our insight is that the 
telephony services and the operating system controls need to be coordinated 
to provide comprehensive protection from attack. We extend an Openmoko 
Linux mobile phone system with SELinux protection over the system objects, 
and integrate a reference monitor into the Openmoko GSM telephony server 
that can enforce an SELinux MAC policy on telephony requests. We examine 
how the resultant system is secure against the threats from untrusted 
applications and measure its performance to show that this additional 
mediation introduces negligible overhead. While this is just one of several 
user-level services that must be secured on mobile phone devices, we hope 
that our approach provides guidance for how to build secure mobile systems. 
}
}
@techreport{NAS-0095,
  author = {Thomas Moyer and Kevin Butler and Joshua Schiffman and Patrick McDaniel and Trent Jaeger},
  title = {Scalable Asynchronous Web Content Attestation},
  number = {NAS-TR-0096-2008},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennslyvania State University, University Park, PA, USA},
  month = {September},
  year = {2008},
  abstract = {
The web is the primary means of information sharing for most organizations
and people. However, at present, there are few means of validating the
uthenticity and integrity of the received content beyond identifying the
source from whence it was received (i.e., via server-side SSL certificate
validation). In this paper, we develop and evaluate a system enabling the
use of TPM platform to tie the web server integrity state to the web
content delivered to browsers. We discuss the design and implementation of
the web server authentication services and the browser-side Firefox
validation extension. Our empirical evaluation shows that the system incurs
a modest 12.5% overhead, sustaining the retrieval of quoted documents at
near line speed. More broadly, we identify how TPM-based content services
can scale without unmanageable overheads.
}
}
@techreport{NAS-0093,
  author = {Dave King and Boniface Hicks and Michael Hicks and Trent Jaeger},
  title = {{Implicit Flows: Can't Live With `Em, Can't Live Without `Em}},
  number = {NAS-TR-0093-2008},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennslyvania State University, University Park, PA, USA},
  month = {July},
  year = {2008},
  abstract = {
Verifying that programs trusted to enforce security actually enforce 
security is a pratical concern for programmers and administrators. However, 
there is a disconnect between the kinds of tools that have been 
successfully applied to real software systems (such as taint mode in Perl 
and Ruby), and information-flow compilers that enforce a variant of the 
stronger security property of noninterference. Tools that have been 
successfully used to find security violations have focused on explicit 
flows of information, where high-security information is directly leaked to 
output. Analysis tools that enforce stronger security guarantees also 
prevent implicit flows of information, where high-security information can 
be inferred from a program's flow of control. We investigate both classes 
of flows that are flagged by a security-typed language compiler when 
applied to an SSH server and conclude that the extremely large number of 
false alarms makes checking production code for stronger informationflow 
security properties currently infeasible. This false alarm rate must be 
improved if technology from security-typed languages is to be adopted for 
the retrofitting of legacy code in mainstream programs. 
        }
}
@techreport{NAS-0092,
  author = {Boniface Hicks and David King and Patrick McDaniel and Michael Hicks},
  title = {{Trusted Declassification: Policy Infrastructure for a Security-Typed Language}},
  number = {NAS-TR-0092-2008},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennslyvania State University, University Park, PA, USA},
  month = {July},
  year = {2008},
  abstract = {
Security-typed languages are a powerful tool for developing verifiably 
secure software applications. Programs written in these languages enforce a 
strong, global policy of noninterference which ensures that high-security 
data will not be observable on low-security channels. Because 
noninterference is typically too strong a property, most programs use some 
form of declassification to selectively leak high security information, 
e.g. when performing a password check or data encryption. Unfortunately, 
such a declassification is often expressed as an operation within a given 
program, rather than as part of a global policy, making reasoning about the 
security implications of a policy difficult. In this paper, we propose a 
simple idea we call trusted declassification in which special declassifier 
functions are specified as part of the global policy. In particular, 
individual principals declaratively specify which declassifiers they trust 
so all information flows implied by the policy can be reasoned about in 
absence of a particular program. We formalize our approach for a Javalike 
language and prove a modified form of noninterference which we call 
noninterference modulo trusted methods. We have implemented our approach as 
an extension to Jif, a security-typed variant of Java, and provide our 
experience using it to build a secure email client, JPmail.
        }
}
@techreport{NAS-0091,
  author = {Boniface Hicks and Sandra Rueda and Luke St.Clair and Trent Jaeger and Patrick
McDaniel},
  title = {{A Logical Specification and Analysis for SELinux MLS Policy}},
  number = {NAS-TR-0091-2008},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennslyvania State University, University Park, PA, USA},
  month = {July},
  year = {2008},
  abstract = {
The SELinux mandatory access control (MAC) policy has recently added a 
multilevel security (MLS) model which is able to express a fine granularity 
of control over a subject's access rights. The problem is that the richness 
of the SELinux MLS model makes it impractical to manually evaluate that a 
given policy meets certain specific properties. To address this issue, we 
have modeled the SELinux MLS model using a logical specification and 
implemented that specification in the Prolog language. Furthermore, we have 
developed some analyses for testing information flow properties of a given 
policy as well an algorithm to determine whether one policy is compliant 
with another. We have implemented these analyses in Prolog and compiled our 
implementation into a tool for SELinux MLS policy analysis, called PALMS. 
Using PALMS, we verified some important properties of the SELinux MLS 
reference policy, namely that it satisfies the simple security condition 
and *-property defined by Bell and LaPadula. We also evaluated whether the 
policy associated to a given application is compliant with the policy of 
the SELinux system in which it would be deployed. 
        }
}
@techreport{NAS-0083,
  author = {Dave King and Susmit Jha and Trent Jaeger and Somesh Jha and and Sanjit A. Seshia},
  title = {{On Automatic Placement of Declassifiers for Information-Flow Security}},
  number = {NAS-TR-0083-2007},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennslyvania State University, University Park, PA, USA},
  month = {November},
  year = {2007},
  note = {Updated January 2008. In submission},
  abstract = {Security-typed languages can be used to build programs that are information-flow
        secure, meaning that they do not allow secret data to leak. Declassification
        allows programs to leak secret information in carefully prescribed ways. Manually
        placing declassifiers to authorize certain flows of information can be dangerous
        because an incorrectly placed declassifier can leak far more secure data than
        intended. Additionally, the sheer number of runtime flows that can cause an
        error means that determining where to place declassifiers can be difficult. We
        present a new approach for constructing information-flow secure programs where
        declassifiers are placed such that no unintended leakage occurs. Leakage
        restrictions are specified using hard constraints and potential
        declassifier locations are ranked using soft constraints. Finally, the
        placement problem is submitted to a pseudo-Boolean optimizing SAT solver that
        selects a minimal set of declassifiers that prevent unauthorized data leakage.
        These declassifiers can be reviewed by the programmer to ensure that they
        correspond with acceptable declassification points: if not, new hard constraints
        can be added and the optimization framework can be re-invoked. Our experimental
        results indicate that our analysis suggests declassifiers that will cause no
        more leakage than those placed by programmers in a fraction of the time it would take
        to perform a manual analysis. This work provides a foundation for less expert
        programmers to build information-flow secure programs and to convert existing
        programs to be information-flow secure}
}
@techreport{NAS-0074,
  author = {Kevin Butler and Stephen McLaughlin and Patrick McDaniel},
  title = {{Non-Volatile Memory and Disks: Avenues for Policy Architectures}},
  number = {NAS-TR-0074-2007},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennslyvania State University, University Park, PA, USA},
  month = {June},
  year = {2007},
  abstract = {As computing models change, so too do the demands on storage. Distributed and 
	    virtualized systems introduce new vulnerabilities, assumptions, and performance requirements
	    on disks. However, traditional storage systems have very limited capacity to implement needed 
	    "advanced storage" features such as integrity and data isolation. This is largely due to the
	    simple interfaces and limited computing resources provided by commodity hard-drives. A new 
	    generation of storage devices affords better opportunities to meet these new models, but little
	    is known about how to exploit them.  In this paper, we show that the recently introduced fast-access
	    non-volatile RAM-enhanced hybrid (HHD) disk architectures can be used to implement a range of
	    valuable storage-security services. We specifically discuss the use of these new architectures
	    to provide data integrity, capability-based access control, and labeled information flow at
	    the disk access layer. In this, we introduce systems that place a security perimeter at the disk
	    interface--and deal with the parent operating system only as a largely untrusted entity.}
}
@techreport{NAS-0073,
  author = {William Enck and Sandra Rueda and Joshua Schiffman and Yogesh Sreenivasan and Luke {St. Clair} and Trent Jaeger and Patrick McDaniel},
  title = {{Protecting Users From "Themselves"}},
  number = {NAS-TR-0073-2007},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {June},
  year = {2007},
  abstract = {Computer usage and threat models have changed drastically since the advent
	    of access control systems in the 1960s.  Instead of multiple users sharing a single file system,
	    each user has many devices with their own storage.  Thus, a user's fear has shifted away from 
	    other users' impact on the same system to the threat of malice in the software they intentionally
	    or even inadvertently run.  As a result, we propose a new vision for access control: one where
	    individual users are isolated by default and where the access of individual user applications
	    is carefully managed.  A key question is how much user administration effort would be required
	    if a system implementing this vision were constructed.  In this paper, we outline our work on
	    just such a system, called PinUP, which manages file access on a per application basis for each user.
	    We use historical data from our lab's users to explore how much user and system administration effort
	    is required.  Since administration is required for user sharing in PinUP, we find that sharing via mail
	    and file repositories requires a modest amount of administrative effort, a system policy change every
	    couple of days and a small number of user administrative operations a day.  We are encouraged that 
	    practical administration on such a scale is possible given an appropriate and secure user approach.}
}
@techreport{NAS-0071,
  author = {Dhananjay Bapat and Kevin Butler and Patrick McDaniel},
  title = {{Towards Automated Privilege Separation}},
  number = {NAS-TR-0071-2007},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennslyvania State University, University Park, PA, USA},
  month = {May},
  year = {2007},
  abstract = { Applications are subject to threat from a number of
	    attack vectors, and limiting their attack surface is vital. By
	    using privilege separation to constrain application access to
	    protected resources, we can mitigate the threats against the
	    application. Previous examinations of privilege separation
	    either entailed significant manual effort or required access to
	    the source code. We consider a method of performing privilege
	    separation through black-box analysis. We consider similar
	    applications to the target and infer states of execution, and
	    determine unique trigger system calls that cause transitions. We
	    use these for the basis of state-based policy enforcement by
	    leveraging the Systrace policy enforcement mechanism. Our
	    results show that we can infer state transitions with a high
	    degree of accuracy, while our modifications to Systrace result
	    in more granular protection by limiting system calls depending
	    on the application's state. The modified Systrace increases the
	    size of the Apache web server's policy file by less than 17.5%. }
}
@techreport{NAS-0069,
  author = {Dave King and  Trent Jaeger and Somesh Jha and Sanjit A. Seshia},
  title = {{Effective Blame for Information-Flow Violations}},
  number = {NAS-TR-0069-2007},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennslyvania State University, University Park, PA, USA},
  month = {May},
  year = {2007},
  note = {Updated March 2008},
  abstract = {Programs trusted with secure data should not release this data in ways contrary
        to system policy. However, when a program contains an illegal flow of
        information, current information-flow reporting techniques are inadequate for
        determining the cause of the error. Reasoning about information-flow errors can
        be difficult, as the flows involved can be quite subtle. We present a general
        model for information-flow blame that can explain the source of such
        security errors in code. This model is implemented by changing the
        information-flow verification procedure to: (1) generate supplementary
        information to reveal otherwise hidden program dependencies; (2) modify the
        constraint solver to construct a blame dependency graph; and (3) develop an
        explanation procedure that returns a complete and minimal error
        report. Our experiments show that information-flow errors can generally be
        explained and resolved by viewing only a small fraction of the total code.}
}
@techreport{NAS-0068,
  author = {Luke St.Clair and Josh Schiffman and Trent Jaeger and Patrick McDaniel},
  title = {{Establishing and Sustaining System Integrity via Root of Trust Installation}},
  number = {NAS-TR-0067-2007},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennslyvania State University, University Park, PA, USA},
  month = {April},
  year = {2007},
  abstract = {Integrity measurements provide a means by which
        distributed systems can assess the trustability of potentially
        compromised remote hosts. However, current measurement
        techniques simply assert the identity of software, but provide
        no indication of the ongoing status of the system or its data.
        As a result, a number of significant vulnerabilities can result
        if the system is not configured and managed carefully. In this
        paper, we examine what is necessary to prove the integrity of
        the core trusted computing base (sCore) of a distributed
        security architecture, called the Shamon. We identify a specific
        set of vulnerabilities that would result from the use of current
        integrity measurement techniques, and propose a Root of Trust
        Instal lation (ROTI) as a foundation for high integrity systems.
        A ROTI binds the trusted computing base software and data to a
        trusted installer to enable straightforward, comprehensive
        integrity verification for a system. We built a custom installer
        for our sCore and show that it is possible to bind system
        integrity to installation for a usable sCore system with nominal
        performance penalties. The resulting approach shows that
        practical systems management approaches can achieve high degrees
        of integrity with a modest administrative effort and modest
        overhead at installation and boot.  }
}
@techreport{NAS-0063,
  author = {William Enck and Patrick McDaniel and Trent Jaeger},
  title = {{PinUP: Protecting User Files by Reducing Application Access}},
  number = {NAS-TR-0063-2007},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {February},
  year = {2007},
  note = {Updated January 2008},
  abstract = { Users commonly download, patch, and use applications
    such as email clients, office applications, and media-players from
    the Internet.  Such applications are run with the user's full
    permissions.  Because system protections do not differentiate
    applications from each other, any malcode present in the downloaded
    software can compromise or otherwise leak all user data.
    Interestingly, our investigations show that common applications
    adhere to recognizable workflows on user data.  In this paper, we
    take advantage of this reality by developing protection mechanisms
    that ``pin'' user files to the applications that may use them.
    These mechanisms restrict access to user data to explicitly stated
    workflows--thus preventing malcode from exploiting user data not
    associated with that application. We describe our implementation of
    PinUP on the Linux Security Modules framework, explore its
    performance, and study several practical use cases.  Through these
    activities, we show that user data can be protected from untrusted
    applications while retaining the ability to receive the benefits of
    those applications. }
}
@techreport{NAS-0060,
  author = {Lisa Johansen and Kevin Butler and William Enck and Patrick Traynor and Patrick McDaniel},
  title = {{Grains of SANs: Building Storage Area Networks from Memory Spots}},
  number = {NAS-TR-0060-2007},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {January},
  year = {2007},
  abstract = {Emerging storage platforms, such as HPs memory spot, are
    increasingly becoming smaller, faster and less expensive. Whether
    intended for holding digital media or personal documents, such
    systems currently function as independent receptacles of data. As we
    demonstrate in this paper, however, the true power of these devices
    is their ability to form the flexible building blocks of larger
    logical storage systems. Such systems allow for the creation of
    continuously reconfigurable storage devices, capable of the dynamic
    addition, removal and repurposing of component nodes. Moreover, such
    changes can be made transparent to the users view of the storage
    system. To illustrate these properties, we design and implement a
    granular storage system based on memory spots. In so doing, we
    identify and address significant challenges in areas including
    organization, security and reliability. We then conduct an extensive
    analysis of performance and demonstrate the ability to achieve
    throughputs of greater than 3 Mbps to our unoptimized logical
    storage device. These results demonstrate the potential for new
    applications and systems built on granular storage.  }
}
@techreport{NAS-0059,
  author = {Patrick Traynor and Vikhyath Rao and Trent Jaeger and Patrick McDaniel and Thomas {La Porta}},
  title = {{From Mobile Phones to Responsible Devices}},
  number = {NAS-TR-0059-2007},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {January},
  year = {2007},
  abstract = {Mobile phones have evolved from simple voice terminals
    into highly-capable, general-purpose computing platforms. While
    people are becoming increasingly more dependent on such devices to
    perform sensitive operations, protect secret data, and be available
    for emergency use, it is clear that phone operating systems are not
    ready for such responsibilities. Through a pair of vulnerabilities,
    we demonstrate that there are a myriad of unmanaged mechanisms on a
    phone system, and that control of these mechanisms is vital to
    achieving reliable use. Further, we find that a number of features
    of mobile phones are beyond the controls of traditional operating
    systems as well.  In this paper, we examine the requirements  for
    providing effective mediation, access control, resource management
    and quality of service for hone systems. Also, we investigate the
    convergence of cellular networks with the Internet, and its impact
    on effective resource management. Based on these results, we argue
    for phone systems that enable predictable behavior in a network --
    they can protect themselves and ensure predictable network impact
    from their applications.  }
}
@techreport{IBM-TR01,
  author = {{Trent Jaeger and Reiner Sailer and Yogesh Sreenivasan}},
  title = {{Managing the Risk of Covert Information Flows in Virtual Machine Systems}},
  number = {RC24154},
  institution = {IBM},
  month = {January},
  year = {2007},
  url = {http://nsrc.cse.psu.edu/tech_report/rc24154.pdf},
  abstract = {Flexible mandatory access control (MAC) enforcement is
    now available for virtual machine systems.  For example, the sHype
    MAC system for the Xen virtual machine monitor is part of the
    mainline Xen distribution.  Such systems offer the isolation of VM
    system with the flexible security of MAC enforcement.  A problem is
    that such MAC VM systems will only be assured at modest levels (e.g,
    Common Criteria EAL4), so they may contain covert channels.  Covert
    channels are often difficult to identify and harder to remove, so we
    propose an approach to manage possible covert leakage to enable
    verification of security guarantees.  Typically, covert channels are
    outside of access control policies, but we propose an approach that
    includes both overt flows and covert flows to assess the possible
    risk of information and covert flows to asses the possible risk of
    information leakage due to their combination.  We define the concept
    of a risk flow policy that describes the authorized risks due to
    covert flows.  In this paper, we evaluate the ability of four policy
    models to express risk flow policies.  Further, we examine how such
    policies will be enforced in VM systems.  We find that variants of
    the Chinese Wall model and Bell-LaPadula model have features that
    such policies can be enforced in the context of sHype's Type
    Enforcement model.  }
}
@techreport{NAS-0061,
  author = {Boniface Hicks and Sandra Rueda and Trent Jaeger and Patrick McDaniel},
  title = {{From Trusted to Secure: Building and Executing Applications that Enforce System Security}},
  number = {NAS-TR-0061-2007},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {January},
  year = {2007}
}
@techreport{NAS-0056,
  author = {Luke {St. Clair} and Josh Schiffman and Trent Jaeger and Patrick McDaniel},
  title = {{Sum of the Parts: Composing Trust from Validation Primitives}},
  number = {NAS-TR-0056-2006},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {November},
  year = {2006},
  abstract = {A recent proposal advocates the construction of a shared
    reference monitor, Shamon, as the basis for enforcing mandatory
    access control in Internet-wide distributed systems. In a sense, the
    Shamon effort explored how distributed system policies are enforced
    on individual systems, where the trust in those systems is based on
    hardware-based integrity measurement. However, the detailed
    requirements for trust in individual systems are not defined, and
    importantly, the development of that trust across large distributed
    systems is not discussed. In this paper, we develop the requirements
    for verifying trust in individual systems, and how the methods used
    to verify trust may enable scalable trust in large, distributed
    systems. Based on an initial prototype for conveying attestations
    during IPsec negotiation, we outline the integrity measurements that
    we would take to justify trust to a remote party. We identify
    measurements that convey trust in the systems enforcement mechanism
    that can be verified by as many remote parties as possible, and
    further can be used to convey trust transitively to other parties.
    We also identify distributed system (coalition) measurements that
    enable members of the distributed system to verify that coalition
    security requirements are being enforced correctly on each
    component.}
}
@techreport{NAS-0051,
  author = {Patrick Traynor and William Enck and Patrick McDaniel and Thomas La Porta},
  title = {{Mitigating Attacks on Open Functionality in SMS-Capable Cellular Networks}},
  number = {NAS-TR-0051-2006},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {October},
  year = {2006}
}
@techreport{NAS-0049,
  author = {Boniface Hicks and Sandra Rueda and Trent Jaeger and Patrick McDaniel},
  title = {{Breaking Down the Walls of Mutual Distrust: Security-typed Email Using Labeled IPsec}},
  number = {NAS-TR-0049-2006},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {September},
  year = {2006}
}
@techreport{NAS-0044,
  author = {Patrick Traynor and Raju Kumar and Heesook Choi and Guohong Cao and Sencun Zhu and Thomas La Porta},
  title = {{Efficient Hybrid Security Mechanisms for Heterogeneous Sensor Networks}},
  number = {NAS-TR-0044-2006},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {August},
  year = {2006}
}
@techreport{NAS-0042,
  author = {Patrick McDaniel},
  title = {{Understanding Equivalence in High-Level and Information Flow Policy}},
  number = {NAS-TR-0042-2006},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {July},
  year = {2006}
}
@techreport{NAS-0040,
  author = {Lisa Johansen and Kevin Butler and Mike Rowell and Patrick McDaniel},
  title = {{Email Communities of Interest}},
  number = {NAS-TR-0040-2006},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {May},
  year = {2006}
}
@techreport{NAS-0035,
  author = {Boniface Hicks and Kiyan Ahmadizadeh and Patrick McDaniel},
  title = {{From Languages to Systems: Understanding Practical Application Development in Security-typed Languages}},
  number = {NAS-TR-0035-2006},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {April},
  year = {2006}
}
@techreport{NAS-0034,
  author = {Heesook Choi and William Enck and Jaesheung Shin and Patrick McDaniel and Tom LaPorta},
  title = {{ASR: Anonymous and Secure Reporting of Traffic Forwarding Activity in Mobile Ad Hoc Networks}},
  number = {NAS-TR-0034-2006},
  institution = {Network and Security Research Center},
  address = {Department of Computer Science and Engineering, Pennsylvania State University, University Park, PA, USA},
  month = {March},
  year = {2006}
}
@techreport{MBCJS2006,
  author = {J. {McCune} and S. Berger and R. Caceres and T. Jaeger and R. Sailer},
  title = {DeuTeRiuM: A system for distributed mandatory access control},
  institution = {IBM T.J. Watson Research Center},
  number = {RC23865},
  month = {February},
  note = {Submitted for publication},
  year = {2006}
}
@techreport{NAS-0029,
  author = {William Enck and Kevin Butler and Thomas Richardson and Patrick McDaniel},
  title = {{Securing Non-Volatile Main Memory}},
  number = {NAS-TR-0029-2006},
  institution = {Network and Security Research Center},
  month = {February},
  year = {2006}
}
@techreport{JHL2005,
  author = {T. Jaeger and S. Hallyn and J. Latten},
  title = {Leveraging IPsec for Mandatory Access Control of Linux Network Communications},
  institution = {IBM T.J. Watson Research Center},
  number = {RC23642},
  month = {April},
  note = {Presented at 21st Annual Computer Security Applications 
		 Conference; Tucson, Arizona; December 2005},
  year = {2005}
}
@techreport{hmh05,
  author = {Boniface Hicks and Patrick McDaniel and Ali Hurson},
  title = {{Information flow control in database security: A case study for secure
  programming with JIF}},
  institution = {Network and Security Center},
  address = {Department of Computer Science, Pennsylvania State University},
  number = {NAS-TR-0011-2005},
  month = {April},
  year = {2005},
  url = {http://www.cse.psu.edu/~phicks/pubs/papers/hicks05tr0011-2005.pdf},
  abstract = {Because of the increasing demands for privacy and
  integrity guarantees in applications that handle sensitive, electronic
  data, there is a need for automated software development tools and
  techniques for enforcing this security. Although there have been many
  attempts to apply security to existing systems after the fact,
  manifold failures indicate that this approach should be revisited. To
  the contrary, experience indicates that secure systems must be
  designed with explicit policies from the beginning and that there
  should be some automated, mathematically-verifiable mechanism to aid
  programmers in doing this correctly. Recent research in language-based
  security has made great strides towards the development of
  sophisticated and robust languages for programming with explicit
  security policies. Insufficient experience with these languages,
  however, has left them untested and impractical for writing real,
  distributed applications. In this paper, we present our experiences of
  working with Jif, a Java-based, security-typed language, in building a
  distributed, database application. Our experience has indicated the
  current impracticality of programming in Jif, but has helped us to
  identify language development tools and automation algorithms that
  could aid in making Jif more practical for developing real,
  distributed applications.}
}
@techreport{hkm05,
  author = {Boniface Hicks and Dave King and Patrick McDaniel},
  title = {{Declassification with Cryptographic Functions in a
  Security-Typed Language}},
  institution = {Network and Security Center},
  address = {Department of Computer Science, Pennsylvania State University},
  number = {NAS-TR-0004-2005},
  month = {January},
  year = {2005},
  note = {({\it updated May 2005})},
  abstract = {Security-typed languages are powerful tools for provably
  enforcing noninterference.  Real computing systems, however, often
  intentionally violate noninterference by deliberately releasing (or
  declassifying) sensitive information.  These systems frequently
  trust cryptographic functions to achieve declassification while
  still maintaining confidentiality.  We introduce the notion of
  trusted functions that implicitly act as declassifiers within a
  security-typed language.  Proofs of the new language's soundness and
  its enforcement of a weakened form of noninterference are given.
  Additionally, we implement trusted functions used for
  declassification in the Jif language.  This represents a step
  forward in making security-typed languages more practical for use in
  real systems.}
}
@techreport{abm04,
  author = {W. Aiello and K. Butler and P. McDaniel},
  title = {{Path Authentication in Interdomain Routing}},
  number = {TR NAS-TR-0002-2004, Network and Security Center},
  institution = {Department of Computer Science and Engineering, Penn State University},
  month = {November},
  year = {2004},
  url = {http://www.patrickmcdaniel.org/pubs/nas-tr-0002-2004.pdf},
  abstract = {Interdomain routing is implemented on the Internet
  through the Border Gateway Protocol (BGP).  Many approaches have
  been proposed to mitigate or solve the many problems of BGP
  security; yet, none of the proposed solutions have been widely
  deployed.  The lack of adoption is largely caused by a failure to
  find an acceptable balance between deployability, cost, and
  security.  In this paper, we study one aspect of the BGP security
  puzzle: path validation.  We develop formal semantics for path and
  route attestations, which provide the basis for a suite of
  cryptographic proof systems for path validation.  We analyze the
  security relevant stability of paths in the Internet and profile
  resource consumption of the proposed constructions via trace-based
  simulations.  Our constructions are shown through these experiments
  to reduce signature validation costs by as much as 97.3% over
  existing proposals while requiring nominal storage resources.  We
  conclude by considering how our solution can be deployed in the
  Internet.}
}
@techreport{bfm+04,
  author = {Kevin Butler and Toni Farley and Patrick McDaniel and Jennifer Rexford},
  title = {{A Survey of BGP Security Issues and Solutions}},
  number = {TR TD-5UGJ33, Network and Security Center},
  institution = {AT&T Labs - Research, Florham Park, NJ},
  month = {February},
  year = {2004},
  note = {({\it updated June 2004})}
}
@inproceedings{MBL10,
  author = {Patrick McDaniel and Kevin Butler and Stephen McLaughlin
            and Radu Sion and Erez Zadok and Marianne Winslett},
  title = {Towards a Secure and Efficient System for End-to-End Provenance},
  booktitle = {2nd USENIX Workshop on the Theory and Practice of Provenance},
  month = {February},
  year = {2010},
  abstract = {
Work on the End-to-End Provenance System (EEPS) began in the late 
summer of 2009. The EEPS effort seeks to explore the three central 
questions in provenance systems: (1) Where and how do I design secure 
host-level provenance collecting instruments (called provenance monitors)?; 
(2) How do I extend completeness and accuracy guarantees to distributed 
systems and computations?; and (3) What are the costs associated 
with provenance collection? This position paper discusses our initial 
exploration into these issues and posits several challenges to the 
realization of the EEPS vision.
  }
}
@inproceedings{beh+08,
  author = {Kevin Butler and William Enck and Harri Hursti and Stephen 
            McLaughlin and Patrick Traynor and Patrick McDaniel},
  title = {{Systemic Issues in the Hart InterCivic and Premier Voting 
            Systems: Reflections Following Project EVEREST}},
  booktitle = {Proceedings of the USENIX/ACCURATE Electronic Voting 
               Technology (EVT) Workshop},
  month = {July},
  supercedes = {NAS-0087,NAS-0088},
  year = {2008},
  abstract = {
The State of Ohio commissioned the EVEREST study in late summer of 2007. The 
study participants were charged with an analysis of the usability, stability, 
and security of all voting systems used in Ohio elections. This paper details 
the approach and results of the security analysis of the Premier and Hart 
systems within the EVEREST effort. As in previous studies, we found the 
election 
systems to be critically flawed in ways that are practically and easily 
exploitable.  Such exploits could effect election results, prevent legitimate 
votes from being cast, or simply cast doubt on the legitimacy of the election 
itself.  In this effort we identified new areas of concern including novel 
exploitable failures of software and election data integrity protection and 
the discovery of dangerous hidden software features. We begin by describing in 
depth our systematic methodology for identifying and validating 
vulnerabilities appropriate for  the current complex political climate, and 
detail and illustrate broad classes of vulnerabilities uncovered using this 
approach.  We conclude by considering the impact of this study both in terms 
of the tangible vulnerabilities discovered and as a model for performing 
future analyses. 
  }
}
@inproceedings{ERSSSJD07,
  author = {William Enck and Sandra Rueda and Yogesh Sreenivasan and Joshua Schiffman and Luke St. Clair and Trent Jaeger and Patrick McDaniel},
  title = {Protecting Users from "Themselves"},
  booktitle = {Proceedings of the 1st ACM Computer Security Architectures Workshop},
  month = {November},
  year = {2007},
  address = {Alexandria, VA},
  url = {http://www.cse.psu.edu/~enck/pubs/csaw07.pdf},
  abstract = {Computer usage and threat models have changed
    drastically since the advent of access control systems in the 1960s.
    Instead of multiple users sharing a single file system, each user
    has many devices with their own storage. Thus, a user's fear has
    shifted away from other users' impact on the same system to the
    threat of malice in the software they intentionally or even
    inadvertently run. As a result, we propose a new vision for access
    control: one where individual users are isolated by default and
    where the access of individual user applications is carefully
    managed. A key question is how much user administration effort would
    be required if a system implementing this vision were constructed.
    In this paper, we outline our work on just such a system, called
    PinUP, which manages file access on a per application basis for each
    user. We use historical data from our lab's users to explore how
    much user and system administration effort is required. Since
    administration is required for user sharing in PinUP, we find that
    sharing via mail and file repositories requires a modest amount of
    administrative effort, a system policy change every couple of days
    and a small number of user administrative operations a day. We are
    encouraged that practical administration on such a scale is possible
    given an appropriate and secure user approach.}
}
@inproceedings{bmm07,
  address = {Alexandria, VA},
  author = {Kevin Butler and Stephen McLaughlin and Patrick McDaniel},
  title = {{Non-Volatile Memory and Disks: Avenues for Policy Architectures}},
  booktitle = {{First Computer Security Architecture Workshop (CSAW 2007)}},
  month = {November},
  year = {2007},
  url = {http://siis.cse.psu.edu/pubs/csaw07.pdf},
  abstract = {As computing models change, so to do the demands on
  storage. Distributed and virtualized system introduce new
  vulnerabilities, assumptions, and performance requirements on disks.
  However, traditional storage systems have very limited capacity to
  implement needed advanced storage features such as integrity and
  data isolation. This is largely due to the simple interfaces and
  limited computing resources provided by commodity hard-drives. A new
  generation of storage devices affords better opportunities to meet
  these new models, but little is known about how to exploit them. In
  this paper, we show that the recently introduced fastaccess
  non-volatile RAM-enhanced hybrid (HHD) disk architectures can be used
  to implement a range of valuable storage-security services. We
  specifically discuss the use of these new architectures to provide
  data integrity, capabilitybased access control, and labeled
  information flow at the disk access layer. In this, we introduce
  systems that place a security perimeter at the disk interfaceand deal
  with the parent operating system only as a largely untrusted entity.}
}
@inproceedings{rrr+07,
  address = {Princeton, NJ},
  author = {S. Raskhodnikova and D. Ron and R. Rubinfeld and A. Smith},
  title = {{Sublinear Algorithms for Approximating String Compressibility}},
  booktitle = {{The 11th International Workshop on Randomization and Computation (RANDOM 2007)}},
  month = {August},
  year = {2007},
  url = {http://www.cse.psu.edu/~asmith/pubs/2008/comp-random-full-Nov-07.pdf},
  abstract = {This work raises the question of approximating the
  compressibility of a string with respect to a fixed compression
  scheme, in sublinear time. This question is studied in detail for two
  popular lossless compression schemes: run-length encoding (RLE) and
  Lempel-Ziv (LZ). Sublinear algorithms are presented for approximating
  compressibility with respect to both schemes. Several lower bounds are
  also proven that show that our algorithms for both schemes cannot be
  improved significantly. 
  

This investigation of LZ yields results whose interest goes beyond the initial questions it set out to study. In particular, it leads to combinatorial structural lemmas that relate the compressibility of a string with respect to Lempel-Ziv to the number of distinct short substrings contained in it. In addition, it is shown that approximating the compressibility with respect to LZ is related to approximating the support size of a distribution.} }
@inproceedings{HKM07,
  address = {San Diego, CA},
  author = {Boniface Hicks and Dave King and Patrick McDaniel},
  booktitle = {Proceedings of the 2nd ACM SIGPLAN Workshop on
    Programming Languages and Analysis for Security (PLAS '07)},
  editor = {Michael Hicks},
  month = {June 14},
  publisher = {ACM Press},
  title = {Jifclipse: Development Tools for Security-Typed
    Applications},
  year = {2007},
  url = {http://siis.cse.psu.edu/pubs/plas07.pdf},
  abstract = {Security-typed languages such as Jif require the
    programmer to label variables with information flow security
    policies as part of application development. The compiler then flags
    errors wherever information leaks may occur. Resolving these
    information leaks is a critical task in security-typed language
    application development. Unfortunately, because information flows
    can be quite subtle, simple error messages tend to be insufficient
    for finding and resolving the source of information leaks; more
    sophisticated development tools are needed for this task. To this
    end we provide a set of principles to guide the development of such
    tools. Furthermore, we implement a subset of these principles in an
    integrated development environment (IDE) for Jif, called Jifclipse,
    which is built on the Eclipse extensible development platform. Our
    plug-in provides a Jif programmer with additional tools to view
    hidden information generated by a Jif compilation, to suggest fixes
    for errors, and to get more specific information behind an error
    message.  Better development tools are essential for making
    security-typed application development practical; Jifclipse is a
    first step in this process.}
}
@inproceedings{cbs+06,
  address = {Alexandria, VA},
  author = {Shiva Chaitanya and Kevin Butler and Anand Sivasubramaniam
    and Patrick McDaniel and Murali Vilayannur},
  title = {Design, implementation and evaluation of security in
    iSCSI-based network storage systems},
  booktitle = {The 2nd International Workshop on Storage Security and
    Survivability (StorageSS 2006)},
  year = {2006},
  month = {October},
  url = {http://www.patrickmcdaniel.org/pubs/storss06.pdf},
  abstract = {This paper studies the performance and security aspects
    of the iSCSI protocol in a network storage based system.  Ethernet
    speeds have been improving rapidly and network throughput is no
    longer considered a bottleneck when compared to Fibre-channel
    based storage area networks. However, when security of the data
    traffic is taken into consideration, existing protocols like IPSec
    prove to be a major hindrance to the overall throughput. In this
    paper, we evaluate the performance of iSCSI when deployed over
    standard security protocols and suggest lazy crypto approaches to
    alleviate the processing needs at the server. The testbed consists 
    of a cluster of Linux machines directly connected to the
    server through a Gigabit Ethernet network. Micro and application
    benchmarks like BTIO and dbench were used to analyze the performance
    and scalability of the different approaches. Our proposed lazy
    approaches improved throughput by as much as 46% for
    microbenchmarks and 30% for application benchmarks in comparison to
    the IPSec based approaches.}
}
@inproceedings{jms+06,
  address = {Vancouver, B.C., Canada},
  author = {Trent Jaeger and Patrick McDaniel and Luke St.Clair and
    Ramon Caceres and Reiner Sailer},
  title = {Shame on Trust in Distributed Systems},
  booktitle = {Proceedings of the First Workshop on Hot Topics in
    Security (HotSec '06)},
  year = {2006},
  month = {July},
  url = {http://www.patrickmcdaniel.org/pubs/hotsec06.pdf},
  abstract = {Approaches for building secure, distributed systems have
    fundamental limitations that prevent the construction of dynamic,
    Internet-scale systems. In this paper, we propose a concept of a
    shared reference monitor or Shamon that we believe will provide a
    basis for overcoming these limitations. First, distributed systems
    lack a principled basis for trust in the trusted computing bases of
    member machines. In most distributed systems, a trusted computing
    base is assumed. However, the fear of compromise due to
    misconfiguration or vulnerable software limits the cases where this
    assumption can be applied in practice. Where such trust is not
    assumed, current solutions are not scalable to large systems [7,
    20]. Second, current systems do not ensure the enforcement of the
    flexible, distributed system security goals. Mandatory access
    control (MAC) policies aim to describe enforceable security goals,
    but flexible MAC solutions, such as SELinux, do not even provide a
    scalable solution for a single machine (due to the complexity of
    UNIX systems), much less a distributed system. A significant change
    in approach is necessary to develop a principled trusted computing
    base that enforces system security goals and scales to large
    distributed systems. }
}
@inproceedings{bm06,
  address = {Arlington, VA},
  author = {Kevin Butler and Patrick McDaniel},
  title = {Testing Large Scale BGP Security in Replayable Network
    Environments},
  booktitle = {DETER Community Workshop on Cyber Security
    Experimentation and Test},
  year = {2006},
  month = {June},
  url = {http://www.patrickmcdaniel.org/pubs/deter06.pdf},
  abstract = {Understanding the operation of BGP and providing for its
    security is essential to the well-being of the Internet. To date,
    however, simulating every autonomous system comprising the Internet,
    in order to test proposals and security solutions, has been
    infeasible. We have developed lseb, a large scale BGP simulator,
    that generates realistic network topologies from routing data, and
    provides the ability to replay network events from any point in the
    past, and allows what-if scenarios such as simulated attacks or
    defense mechanisms, to test the resilience of the critical network
    infrastructure. We describe topology extraction tools that we have
    developed and the design and implementation of lseb. We also discuss
    visualization tools that allow a graphical topology representation
    and provide an example of an attack scenario that can be modeled
    with lseb.}
}
@inproceedings{bqm06,
  address = {Arlington, VA},
  author = {Kevin Butler and Patrick McDaniel and Sophie Qui},
  title = {BGPRV: Retrieving and Processing BGP Data with Efficiency
    and Convenience},
  booktitle = {DETER Community Workshop on Cyber Security
    Experimentation and Test},
  year = {2006},
  month = {June},
  url = {http://www.patrickmcdaniel.org/pubs/deter06b.pdf},
  abstract = {BGPRV is a tool aimed to aid the analysis of BGP updates
    or routing table snapshots. It provides a set of library functions
    that make it possible to retrieve and process archived BGP data with
    efficiency and convenience.  It encapsulates the functions of
    scanning the Route Views route repository, downloading data for
    specified time frame, processing the binary MRT format, and
    filtering incomplete or undesired data etc., and returns BGP data as
    single stream. With the abstraction of operations and simplified
    usage, it provides users with clean and organized BGP data that is
    ready for further processing and analysis.}
}
@inproceedings{HKM+06,
  abstract = {Security-typed languages promise to be a powerful tool
    with which provably secure software applications may be developed.
    Programs written in these languages enforce a strong, global policy
    of noninterference which ensures that high-security data will not be
    observable on low-security channels. Because noninterference is
    typically too strong a property, most programs use some form of
    declassification to selectively leak high security information, e.g.
    when performing a password check or data encryption. Unfortunately,
    such a declassification is often expressed as an operation within a
    given program, rather than as part of a global policy, making
    reasoning about the security implications of a policy more
    difficult.  

In this paper, we propose a simple idea we call trusted declassification in which special declassifier functions are specified as part of the global policy. In particular, individual principals declaratively specify which declassifiers they trust so that all information flows implied by the policy can be reasoned about in absence of a particular program. We formalize our approach for a Java-like language and prove a modified form of noninterference which we call noninterference modulo trusted methods. We have implemented our approach as an extension to Jif and provide some of our experience using it to build a secure e-mail client.}, address = {Ottawa, Canada}, author = {Boniface Hicks and Dave King and Patrick McDaniel and Michael Hicks}, booktitle = {Proceedings of the 1st ACM SIGPLAN Workshop on Programming Languages and Analysis for Security ({PLAS} '06)}, month = {June}, url = {http://www.cse.psu.edu/~phicks/pubs/papers/hicks06trustedDeclass.pdf}, publisher = {ACM Press}, title = {Trusted Declassification: High-level policy for a security-typed language}, year = {2006} }
@inproceedings{alm05,
  author = {Ali Al-Lawati and Dongwon Lee and Patrick McDaniel},
  title = {Blocking in Private Information Matching},
  booktitle = {Proceedings of Second International ACM SIGMOD Workshop
  on Information Quality in Information Systems},
  month = {July},
  year = {2005},
  note = {Baltimore, MD},
  url = {http://siis.cse.psu.edu/pubs/iqis05.pdf},
  abstract = {In this paper, the problem of quickly matching records
  (i.e., record linkage problem) from two autonomous sources without
  revealing privacy to the other parties is considered. In particular,
  our focus is to devise secure blocking scheme to improve the
  performance of record linkage significantly while being secure.
  Although there have been works on private record linkage, none has
  considered adopting the blocking framework. Therefore, our proposed
  blocking-aware private record linkage can perform large-scale record
  linkage without revealing privacy. Preliminary experimental results
  showing the potential of the proposal are reported.}
}
@inproceedings{GJPSDC2005,
  author = {J. Linwood Griffin and T. Jaeger and R. Perez
		 and R. Sailer and L. van Doorn and R.
		 Caceres},
  title = {Trusted Virtual Domains: Toward secure distributed services},
  booktitle = {Proceedings of the First Workshop on Hot Topics in Systems Dependability},
  month = {June},
  year = {2005},
  url = {http://siis.cse.psu.edu/pubs/GJPSDC2005.pdf},
  abstract = {The focus of trusted computing efforts to date has been to
  create islands of trust in a sea of distrust, identifying these
  islands as dependable domains with a solid base that can be used for
  building applications upon which critical services depend. The aim of
  our work is to extend this solid base by building "bridges" among
  trusted islands, with such goals as enabling meaningful trade
  agreements between islands, enabling migration of individual island
  inhabitants, and enabling geography-independent affiliation among
  inhabitants of different islands.}
}
@inproceedings{akm+05,
  author = {William Aiello and Chuck Kalmanek and Patrick McDaniel and
  Shubo Sen and Oliver Spatscheck},
  title = {Analysis of Communities Of Interest in Data Networks},
  booktitle = {Passive and Active Measurement Workshop 2005},
  month = {March},
  year = {2005},
  note = {Boston, MA},
  url = {http://www.patrickmcdaniel.org/pubs/pam05.pdf},
  abstract = {Communities of interest (COI) have been applied in a
  variety of environments ranging from characterizing the online buying
  behavior of individuals to detecting fraud in telephone networks. The
  common thread among these applications is that the historical COI of
  an individual can be used to predict future behavior as well as the
  behavior of other members of the COI. It would clearly be beneficial
  if COIs can be used in the same manner to characterize and predict the
  behavior of hosts within a data network. In this paper, we introduce a
  methodology for evaluating various aspects of COIs of hosts within an
  IP network. In the context of this study, we broadly define a COI as a
  collection of interacting hosts.  We apply our methodology using data
  collected from a large enterprise network over a eleven week period.
  First, we study the distributions and stability of the size of COIs.
  Second, we evaluate multiple heuristics to determine a stable core set
  of COIs and determine the stability of these sets over time. Third, we
  evaluate how much of the communication is not captured by these core
  COI sets.}
}
@inproceedings{hth+05,
  author = {Michael Hicks and Stephen Tse and Boniface Hicks and Steve Zdancewic},
  title = {Dynamic updating of information-flow policies},
  booktitle = {Proceedings of the Foundations of Computer Security Workshop (FCS '05)},
  month = {March},
  year = {2005},
  url = {http://www.cse.psu.edu/~phicks/pubs/papers/dynUpdateInfoflow.pdf},
  abstract = {Applications that manipulate sensitive information should
  ensure end-to-end security by satisfying two properties: sound
  execution and some form of noninterference. By the former, we mean the
  program should always perform actions in keeping with its current
  policy, and by the latter we mean that these actions should never
  cause high-security information to be visible to a low-security
  observer. Over the last decade, securitytyped languages have been
  developed that exhibit these properties, increasingly improving so as
  to model important features of real programs. No current
  security-typed language, however, permits general changes to security
  policies in use by running programs. This paper presents a simple
  information flow type system for that allows for dynamic security
  policy updates while ensuring sound execution and a relaxed form of
  noninterference we term noninterference between updates. We see this
  work as an important step toward using language-based techniques to
  ensure end-to-end security for realistic applications.}
}
@inproceedings{wjml04,
  author = {H.B. Wang and S. Jha and P. McDaniel and M. Livny},
  title = {Security Policy Reconciliation in Distributed Computing
  Environments},
  booktitle = {Proceedings of 5th International Workshop on Policies
  for Distributed Systems and Networks (Policy 2004)},
  organization = {IEEE},
  month = {June},
  year = {2004},
  url = {http://www.patrickmcdaniel.org/pubs/policy04.pdf},
  abstract = {A major hurdle in sharing resources between
  organizations is heterogeneity. Therefore, in order for two
  organizations to collaborate their policies have to be resolved. The
  process of resolving different policies is known as policy
  reconciliation, which in general is an intractable problem. This
  paper addresses policy reconciliation in the context of security. We
  present a formal framework and hierarchical representation for
  security policies.  Our hierarchical representation exposes the
  structure of the policies and leads to an efficient reconciliation
  algorithm.  We also demonstrate that agent preferences for security
  mechanisms can be readily incorporated into our framework. We have
  implemented our reconciliation algorithm in a library called the
  Policy Reconciliation Engine or PRE.  In order to test the
  implementation and measure the overhead of our reconciliation
  algorithm, we have integrated PRE into a distributed high-throughput
  system called Condor.}
}
@inproceedings{bckm04,
  author = {Simon Byers and Lorrie Faith Cranor and Dave Kormann and
  Patrick McDaniel},
  title = {Searching for Privacy: Design and Implementation of a
  P3P-Enabled Search Engine},
  booktitle = {Proceedings of 2004 Workshop on Privacy Enhancing
  Technologies (PETS)},
  month = {May},
  year = {2004},
  url = {http://www.patrickmcdaniel.org/pubs/pets04.pdf},
  note = {Toronto, Canada},
  abstract = {Although the number of online privacy policies is
  increasing, it remains difficult for Internet users to understand
  them, let alone to compare policies across sites or identify sites
  with the best privacy practices. The World Wide Web Consortium (W3C)
  developed the Platform for Privacy Preferences (P3P 1.0)
  specification to provide a standard computer-readable format for
  privacy policies.  This standard enables web browsers and other user
  agents to interpret privacy policies on behalf of their users.  This
  paper introduces our prototype P3P-enabled Privacy Bird Search
  engine.  Users of this search service are given visual indicators of
  the privacy policies at sites included in query results.  Our system
  acts as a front end to a general search engine by evaluating the P3P
  policies associated with search results against a user's privacy
  preference settings. To improve system performance we cache
  unexpired P3P policy information (including information about the
  absence of P3P policies) for thousands of the most popular sites as
  well as for sites that have been returned in previous search
  results.  We discuss the system architecture and its implementation,
  and consider the work necessary to evolve our prototype into a fully
  functional and efficient service.}
}
@inproceedings{bcc+03,
  author = {Simon Byers and Lorrie Cranor and Eric Cronin and Dave Kormann and
  Patrick McDaniel},
  title = {Analysis of Security Vulnerabilities in the Movie Production and Distribution Process},
  booktitle = {Proceedings of 2003 ACM Workshop on Digital Rights Management, ACM},
  month = {October},
  year = {2003},
  url = {http://www.patrickmcdaniel.org/pubs/drm03.pdf},
  note = {Washington, DC},
  abstract = {Unauthorized copying of movies is a major concern for the
  motion picture industry. While unauthorized copies of movies have been
  distributed via portable physical media for some time, low-cost,
  high-bandwidth Internet connections and peer-to-peer file sharing
  networks provide highly efficient distribution media. Many movies are
  showing up on file sharing networks shortly after, and in some cases
  prior to, theatrical release. It has been argued that the availability
  of unauthorized copies directly affects theater attendance and DVD
  sales, and hence represents a major financial threat to the movie
  industry. Our research attempts to determine the source of
  unauthorized copies by studying the availability and characteristics
  of recent popular movies in file sharing networks. We developed a data
  set of 312 popular movies and located one or more samples of 183 of
  these movies on file sharing networks, for a total of 285 movie
  samples. 77% of these samples appear to have been leaked by industry
  insiders. Most of our samples appeared on file sharing networks prior
  to their official consumer DVD release date. Indeed, of the movies
  that had been released on DVD as of the time of our study, only 5%
  first appeared after their DVD release date on a web site that indexes
  file sharing networks, indicating that consumer DVD copying currently
  represents a relatively minor factor compared with insider leaks. We
  perform a brief analysis of the movie production and distribution
  process and identify potential security vulnerabilities that may lead
  to unauthorized copies becoming available to those who may wish to
  redistribute them. Finally, we offer recommendations for reducing
  security vulnerabilities in the movie production and distribution
  process.}
}
@inproceedings{JTG+00,
  title = {Synchronous {IPC} over transparent monitors},
  author = {T. Jaeger and J. Tidswell and A. Gefflaut
and Y. Park and K. Elphinstone and J.
Liedtke},
  booktitle = {ACM SIGOPS European Workshop},
  pages = {189-194},
  year = {2000}
}
@inproceedings{gjp+00,
  title = {The SawMill multiserver approach},
  author = {Alain Gefflaut and Trent Jaeger and Yoonho Park and
		 Jochen Liedtke and Kevin Elphinstone and Volkmar Uhlig
		 and Jonathon Tidswell and Luke Deller and Lars
		 Reuther},
  year = {2000},
  booktitle = {Proceedings of the ACM SIGOPS European Workshop},
  pages = {109-114},
  url = {http://siis.cse.psu.edu/pubs/gjp00_sigops.pdf}
}
@inproceedings{tj00b,
  author = {Jonathan F. Tidswell and Trent Jaeger},
  title = {Integrated Constraints and Inheritance in {DTAC}},
  pages = {93-102},
  booktitle = {Proceedings of the 5th {ACM} Workshop on Role-Based
		 Access Control ({RBAC}-00)},
  month = {July},
  year = {2000},
  abstract = {Inheritance and constraints are two common techniques for
  safely managing the complexity of large access control configurations.
  Inheritance is used to help factor the model, while constraints are
  used to help ensure that the complexity will not result in an unsafe
  configuration arising in the future evolution of the system. In this
  paper we develop an integrated mathematical approach to defining both
  inheritance and constraints in the dynamically typed access control
  (DTAC) model. In the process we identify several useful relationships
  among DTAC objects. The combination of DTAC and our new relationships
  allow us to graphically construct a greater variety and complexity of
  efficiently verifiable separation of duty constraints than any other
  model we are aware of.}
}
@inproceedings{J99b,
  author = {Trent Jaeger},
  title = {On the Increasing Importance of Constraints},
  pages = {33-42},
  booktitle = {Proceedings of the Fourth {ACM} Wokshop on Role-Based
		 Access Control},
  month = {October},
  year = {1999}
}
@inproceedings{jmr99,
  title = {Access Control in a Virtual University},
  author = {Trent Jaeger and Tony Michailidis and Roy Rada},
  year = {1999},
  booktitle = {Proceedings of the Workshops on Enabling Technologies: Infrastructures for Collaborative Enterprises},
  pages = {135-140}
}
@inproceedings{lue+99,
  title = {How To Schedule Unlimited Memory Pinning of Untrusted
		 Processes Or Provisional Ideas about Service-Neutrality},
  author = {Jochen Liedtke and Volkmar Uhlig and Kevin Elphinstone
		 and Trent Jaeger and Yoonho Park},
  year = {1999},
  booktitle = {Proceedings of the Workshop on Hot Topics in Operating Systems},
  pages = {153-158},
  abstract = {You can read it as a paper that treats a concrete problem
  motivated in the first section: how can we permit untrusted user
  processes to pin their virtual pages in memory most flexibly and as
  unlimited as possible? From this point of view, the paper presents a
  general solution that is theoretically and experimentally reasonably
  substantiated. However, you can also read the paper as an approach to
  solve the more general problem of how an existing system can be
  extended by new operations while preserving the original system's QoS
  properties. From this point of view, the paper is highly speculative.
  The presented principle of service-neutral operations can successfully
  solve the concrete problem of dynamic pinning. However we still have
  no sound evidence that it is useful for a much broader class of
  problems. Nevertheless, we strongly suspect it}
}
@inproceedings{JEL+99,
  title = {Flexible Access Control using {IPC} Redirection},
  author = {T. Jaeger and K. Elphinstone and J. Liedtke
and V. Panteleenko and Y. Park},
  booktitle = {Workshop on Hot Topics in Operating Systems},
  year = {1999},
  abstract = {We present a mechanism for inter-process communication
  (IPC) redirection that enables efficient and flexible access control
  for micro-kernel systems. In such systems, services are implemented at
  user level, so IPC is the only means of communication between them.
  Thus, the system must be able to mediate IPCs to enforce its access
  control policy. Such mediation must enable enforcement of security
  policy with as little performance overhead as possible, but current
  mechanisms either: (1) place significant access control functionality
  in the kernel which increases IPC cost or (2) are static and require
  more IPCs than necessary to enforce access control. We define an IPC
  redirection mechanism that makes two improvements: (1) it removes the
  management of redirection policy from the kernel, so access control
  enforcement can be implemented outside the kernel; and (2) it
  separates the notion of who controls the redirection policy from the
  redirections themselves, so redirections can be configured arbitrarily
  and dynamically. We define our redirection mechanism, demonstrate its
  use, and examine possible efficient implementations.}
}
@book{book-traynor08,
  author = {Patrick Traynor, Patrick McDaniel and Thomas {La Porta}},
  title = {Security for Telecommunications Networks},
  publisher = {Springer},
  series = {Advances in Information Security},
  volume = {40},
  year = {2008},
  url = {http://www.springer.com/computer/security+and+cryptology/book/978-0-387-72441-6}
}
@book{book-jaeger08,
  author = {Trent Jaeger},
  title = {Operating System Security},
  publisher = {Morgan and Claypool},
  series = {Synthesis Lectures on Information Security, Privacy and Trust},
  year = {2008},
  url = {http://www.morganclaypool.com/doi/abs/10.2200/S00126ED1V01Y200808SPT001}
}

This file was generated by bibtex2html 1.92.