-
Notifications
You must be signed in to change notification settings - Fork 9
/
generals.bib
1461 lines (1270 loc) · 60 KB
/
generals.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
% This file should be better organized, and perhaps eliminated with its
% parts being put in more logical places.
%% See prefetch.bib
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Run-time disambiguation
%%%
@Article{Nicolau89,
author = "Alexandru Nicolau",
title = "Run-Time Disambiguation: Coping with Statically
Unpredictable Dependencies",
pages = "663--678",
journal = "IEEE Transactions on Computers",
volume = 38,
number = 5,
month = may,
year = 1989,
publisher = "IEEE Computer Society",
address = "Washington, DC",
}
@InProceedings{SuEtAl94,
author = "Bogong Su and Stanley Habib and Wei Zhao and Jian Wang
and Youfeng Wu",
title = "A Study of Pointer Aliasing for Software Pipelining
using Run-Time Disambiguation",
crossref = "MICRO97",
pages = "112--117",
}
@InProceedings{HuangSlaShe94,
author = "Andrew S. Huang and Gert Slavenburg and John Paul
Shen",
title = "Speculative Disambiguation: A Compilation Technique
for Dynamic Memory Disambiguation",
crossref = "ISCA94",
pages = "200--210",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Dependence analysis
%%%
@Book{Banerjee:1988:DAS,
author = "Utpal Banerjee",
title = "Dependence analysis for supercomputing",
publisher = "Kluwer Academic",
address = "Boston, MA, USA",
pages = "x + 155",
year = 1988,
ISBN = "0-89838-289-0",
LCCN = "QA76.5 .B2641 1988",
series = "The Kluwer international series in engineering and
computer science. Parallel processing and fifth
generation computing",
keywords = "Parallel processing (Electronic computers);
Supercomputer systems; Supercomputers",
}
@InProceedings{PLDI91*15,
author = "Gina Goff and Ken Kennedy and Chau-Wen Tseng",
title = "Practical Dependence Testing",
crossref = "PLDI91",
pages = "15--29",
}
@Article{Maydan:1995:EDD,
author = "Dror E. Maydan and John L. Hennessy and Monica S.
Lam",
title = "Effectiveness of Data Dependence Analysis",
journal = "International Journal of Parallel Programming",
volume = 23,
number = 1,
pages = "63--81",
month = feb,
year = 1995,
coden = "IJPPE5",
ISSN = "0885-7458",
affiliation = "Stanford Univ",
affiliationaddress = "Stanford, CA, USA",
classification = "722.1; 722.4; 723.1; 723.2; 921.6; C6110P (Parallel
programming); C6130 (Data handling techniques); C6150C
(Compilers, interpreters and other processors); C6150G
(Diagnostic, testing, debugging and evaluating
systems)",
corpsource = "Comput. Syst. Lab., Stanford Univ., CA, USA",
journalabr = "Int J Parallel Program",
keywords = "11pp system; affine approximation; affine memory
disambiguation; affine memory disambiguation system;
Approximation theory; array references; data analysis;
Data dependence; data dependence analysis; data
dependence testing; data flow analysis; Data reduction;
Data storage equipment; dataflow information;
decidability; flow insensitive; Larus; Larus's llpp
system; linear integer functions; Loop level
parallelism; loop level parallelism detection; loop
variables; loops bounds; memory disambiguation;
Numerical analysis; numerical programs; Parallel
processing systems; parallel programming; Program
compilers; program compilers; program testing;
undecidable",
treatment = "P Practical",
}
@Article{Pugh:1992:PAE,
author = "William Pugh",
title = "A practical algorithm for exact array dependence
analysis",
journal = "Communications of the ACM",
volume = 35,
number = 8,
pages = "102--114",
month = aug,
year = 1992,
coden = "CACMA2",
ISSN = "0001-0782",
url = "http://www.acm.org/pubs/toc/Abstracts/0001-0782/135233.html",
keywords = "algorithms; experimentation; languages; measurement;
performance",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Subscript checks
%%%
@InProceedings{Kolte:1995:ERA,
author = "Priyadarshan Kolte and Michael Wolfe",
title = "Elimination of redundant array subscript range
checks",
crossref = "PLDI95",
pages = "270--278",
coden = "SINODQ",
ISSN = "0362-1340",
abstract = "The paper presents a compiler optimization algorithm
to reduce the run time overhead of array subscript
range checks in programs without compromising safety.
The algorithm is based on partial redundancy
elimination and it incorporates previously developed
algorithms for range check optimization. We implemented
the algorithm in our research compiler, Nascent, and
conducted experiments on a suite of 10 benchmark
programs to obtain four results: the execution overhead
of naive range checking is high enough to merit
optimization; there are substantial differences between
various optimizations; loop based optimizations that
hoist checks out of loops are effective in eliminating
about 98\% of the range checks; and more sophisticated
analysis and optimization algorithms produce very
marginal benefits. (18 Refs.)",
affiliation = "Oregon Graduate Inst. of Sci. and Technol., Beaverton,
OR, USA",
classification = "C6150C (Compilers, interpreters and other
processors); C6150G (Diagnostic, testing, debugging and
evaluating systems)",
keywords = "Benchmark programs; Compiler optimization algorithm;
Execution overhead; Loop based optimizations; Naive
range checking; Nascent; Optimization algorithms;
Partial redundancy elimination; Range check
optimization; Redundant array subscript range check
elimination; Research compiler; Run time overhead",
thesaurus = "Optimising compilers; Program diagnostics;
Redundancy",
}
@Article{Gupta:1993:OAB,
author = "Rajiv Gupta",
title = "Optimizing array bound checks using flow analysis",
journal = "ACM Letters on Programming Languages and Systems",
volume = 2,
number = 4,
pages = "135--150",
month = mar,
year = 1993,
coden = "ALPSE8",
ISSN = "1057-4514",
url = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176507.html",
abstract = "Bound checks are introduced in programs for the
run-time detection of array bound violations.
Compile-time optimizations are employed to reduce the
execution-time overhead due to bound checks. The
optimizations reduce the program execution time through
{\em elimination} of checks and {\em propagation} of
checks out of loops. An execution of the optimized
program terminates with an array bound violation if and
only if the same outcome would have resulted during the
execution of the program containing all array bound
checks. However, the point at which the array bound
violation occurs may not be the same. Experimental
results indicate that the number of bound checks
performed during the execution of a program is greatly
reduced using these techniques.",
keywords = "languages, reliability",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Optimization. {\bf D.2.5}: Software,
SOFTWARE ENGINEERING, Testing and Debugging, Error
handling and recovery. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Compilers.",
}
@InProceedings{PLDI90*272,
author = "Rajiv Gupta",
title = "A fresh look at optimizing array bound checking",
crossref = "PLDI90",
pages = "272--282",
abstract = "This paper describes techniques for optimizing range
checks performed to detect array bound violations. In
addition to the elimination of range checks, the
optimizations discussed in this paper also reduce the
overhead due to range checks that cannot be eliminated
by compile-time analysis. The optimizations reduce the
program execution time and the object code size through
elimination of redundant checks, propagation of checks
out of loops, and combination of multiple checks into a
single check. A minimal control flow graph (MCFG) is
constructed using which the minimal amount of data flow
information required for range check optimizations is
computed. The range check optimizations are performed
using the MCFG rather the CFG for the entire program.
This allows the global range check optimizations to be
performed efficiently since the MCFG is significantly
smaller than the CFG.",
affiliation = "Philips Lab",
affiliationaddress = "Briarcliff Manor, NY, USA",
classification = 723,
conference = "Proceedings of the ACM SIGPLAN '90 Conference on
Programming Language Design and Implementation",
conferenceyear = 1990,
journalabr = "SIGPLAN Not",
keywords = "Arrays; Computer Operating Systems; Data Processing
--- Data Structures; Optimizing Compilers; Program
Compilers",
meetingaddress = "White Plains, NY, USA",
meetingdate = "Jun 20--22 1990",
meetingdate2 = "06/20--22/90",
sponsor = "Assoc for Computing Machinery, Special Interest Group
on Programming Languages",
}
@InProceedings{HackettDWY2006,
author = "Hackett, Brian and Das, Manuvir and Wang, Daniel and Yang, Zhe",
title = "Modular checking for buffer overflows in the large",
crossref = "ICSE2006",
pages = "232--241",
doi = {10.1145/1134285.1134319},
}
@InProceedings{evans94a,
author = "David Evans and John Guttag and James Horning and Yang
Meng Tan",
title = "{LCLint}: A Tool for Using Specifications to Check
Code",
crossref = "FSE94",
pages = "87--97",
}
@InProceedings{Evans96,
key = "Evans",
author = "David Evans",
title = "Static detection of dynamic memory errors",
crossref = "PLDI96",
pages = "44--53",
}
@Article{EvansL2002,
author = "David Evans and David Larochelle",
title = "Improving security using extensible lightweight static analysis",
journal = IEEESoftware,
year = 2002,
volume = 19,
number = 1,
pages = "42--51",
doi = {http://dx.doi.org/10.1109/52.976940},
}
@TechReport{MIT-LCS//MIT/LCS/TR-628,
author = "David Evans",
title = "Using Specifications to Check Source Code",
institution = "Massachusetts Institute of Technology, Laboratory for
Computer Science",
number = "MIT-LCS//MIT/LCS/TR-628",
month = jun,
year = 1994,
abstract = "Traditional static checkers are limited to detecting
simple anomalies since thy have no information
regarding the intent of the code. Program verifiers are
too expensive for nearly all applications. This thesis
investigates the possibilities of using specifications
too lightweight static checks to detect inconsistencies
between specifications and implementations. A tool,
LCLint, was developed to do static checks on C source
code using LCL specifications. It is similar to
traditional lint, except it uses information in
specifications to do more powerful checks. Some typical
problems detected by LCLint include violations of
abstraction barriers and modifications of
caller-visible state that are inconsistent with the
specification. Experience using LCLint to check a
specified program and to understand and maintain a
program with no specifications illustrate some
applications of LCLint and suggest future directions
for using specifications to check source code.",
}
@TechReport{MIT-LCS//MIT/LCS/TR-619,
author = "Yang Meng Tan",
title = "Formal Specification Techniques for Promoting Software
Modularity, Enhancing Documentation, and Testing
Specifications",
institution = "Massachusetts Institute of Technology, Laboratory for
Computer Science",
number = "MIT-LCS//MIT/LCS/TR-619",
month = jun,
year = 1994,
abstract = "This thesis presents three ideas. First, it presents a
novel use of formal specification to promote a
programming style based on specified interfaces and
data abstraction in a programming language that lacks
such supports. Second it illustrates the uses of claims
about specifications. Third, it describes a software
reengineering process for making existing software
easier to maintain and reuse. The process centers
around specifying existing software modules and using
the specifications to drive the code improvement
process. The Larch/C Interface Language, or LCL, is a
formal specification language for documenting ANSI C
software modules. Although C does not support abstract
types, LCL is designed to support abstract types. A
lint-like program, LCLint, enforces type discipline in
clients of LCL abstract types. LCL is structured in a
way that enables LCLint to extract information from an
LCL specification for performing some consistency
checks between the specification and its
implementation. LCL also provides facilities to state
claims, or redundant, problem-specific assertions about
a specification. Claims enhance the role of
specifications as a software documentation tool. Claims
can be used to highlight important or unusual
properties, promote design coherence of software
modules, and aid in program reasoning. In addition,
claims about a specification can be used to test the
specification by proving that they follow semantically
from the specification. A semantics of LCL suitable for
reasoning about claims is given. A software
reengineering process developed around LCL and claims
is effective for improving existing programs. The
impact of the process applied to an existing C program
is described. The process improved the modularity and
robustness of the program with changing its essential
functionality or performance. A major product of the
process is the specifications of the main modules of
the reengineered program. A proof checker was used to
verify some claims about the specifications; and in the
process, several specifications mistakes were found.
The specifications are also used to illustrate
specification writing techniques and heuristics",
}
@InProceedings{Tofte:1994:ITC,
author = "Mads Tofte and Jean-Pierre Talpin",
title = "Implementation of the typed call-by-value $lambda$-calculus
using a stack of regions",
crossref = "POPL94",
pages = "188--201",
abstract = "Presents a translation scheme for the polymorphically
typed call-by-value lambda -calculus. All runtime
values, including function closures, are put into
regions. The store consists of a stack of regions.
Region inference and effect inference are used to infer
where regions can be allocated and de-allocated.
Recursive functions are handled using a limited form of
polymorphic recursion. The translation is proved
correct with respect to a store semantics, which models
a region-based run-time system. Experimental results
suggest that regions tend to be small, that region
allocation is frequent and that overall memory demands
are usually modest, even without garbage collection.
(31 Refs.)",
affiliation = "Dept. of Comput. Sci., Copenhagen Univ., Denmark",
classification = "C4210 (Formal logic); C4210L (Formal languages and
computational linguistics); C6120 (File organisation);
C6150C (Compilers, interpreters and other processors)",
keywords = "Allocation; Effect inference; Function closures;
Lambda calculus; Memory demand; Memory management;
Polymorphic recursion; Polymorphically typed; Process
algebra; Recursive functions; Region inference; Runtime
value; Stack of regions; Storage; Store semantics;
Translation scheme; Typed call-by-value lambda
-calculus",
thesaurus = "Inference mechanisms; Lambda calculus; Program
interpreters; Recursive functions; Storage management",
}
% Old key: AikenAandF1995a,
@InProceedings{AikenFL1995,
author = "Alex Aiken and Manuel F{\"a}hndrich and Raph Levien",
title = "Better Static Memory Management: Improving
Region-Based Analysis of Higher-Order Languages",
year = "1995",
abstracturl = "http://http.cs.berkeley.edu/~aiken/pubs.html",
crossref = "PLDI95",
pages = "174-185"
}
@Article{Schnorf93,
author = "P. Schnorf and M. Ganapathi and J. L. Hennessy",
title = "Compile-Time Copy Elimination",
journal = j-SPE,
volume = 23,
number = 11,
pages = "1175--1200",
year = 1993,
keywords = "functional parallel",
abstract = "Single-assignment and functional languages have value
semantics that do not permit side-effects. This lack of
side-effects makes automatic detection of parallelism
and optimization for data locality in programs much
easier. However, the same property poses a challenge in
implementing these languages efficiently. The authors
describe an optimizing compiler system that solves the
key problem of aggregate copy elimination. The methods
developed rely exclusively on compile-time algorithms,
including interprocedural analysis, that are applied to
an intermediate data flow representation. By dividing
the problem into update-in-place and build-in-place
analysis, a small set of relatively simple
techniques-edge substitution, graph pattern matching,
substructure sharing and substructure targeting-was
found to be very powerful. If combined properly and
implemented carefully, the algorithms eliminate
unnecessary copy operations to a very high degree. No
run-time overhead is imposed on the compiled
programs.",
}
@InCollection{Hudak:aiodl:1987,
author = "Paul Hudak",
title = "A semantic model of reference counting and its
abstraction",
editor = "Samson Abramsky and Chris Hankin",
booktitle = "Abstract Interpretation of Declarative Languages",
publisher = "Ellis Horwood",
year = 1987,
pages = "45--62",
chapter = 3
}
@InProceedings{KhurshidS2005,
author = "Sarfraz Khurshid and Yuk Lai Suen",
title = "Generalizing symbolic execution to library classes",
crossref = "PASTE2005",
pages = "103--110",
}
@InProceedings{BodikGS2000,
author = "Bod\'{\i}k, Rastislav and Gupta, Rajiv and Sarkar, Vivek",
title = "{ABCD}: Eliminating Array Bounds Checks on Demand",
crossref = "PLDI2000",
pages = "321--333",
}
@InProceedings{PLDI::KolteW1995,
title = "Elimination of Redundant Array Subscript Range
Checks",
author = "Priyadarshan Kolte and Michael Wolfe",
crossref = "PLDI95",
pages = "270--278",
}
@InProceedings{PLDI::XiP1998,
title = "Eliminating Array Bound Checking through Dependent
Types",
author = "Hongwei Xi and Frank Pfenning",
crossref = "PLDI98",
pages = "249--257",
references = "\cite{PLDI::FreemanP1991} \cite{LOPLAS::Gupta1994}
\cite{POPL::Necula1997} \cite{PLDI::NeculaL1998}
\cite{PLDI::Pugh1992} \cite{JACM::Shostak1977}
\cite{POPL::SusukiI1977}",
}
@InProceedings{Xi-Pfenning99,
key = "Xi \& Pfenning",
author = "Hongwei Xi and Frank Pfenning",
title = "Dependent Types in Practical Programming",
crossref = "POPL99",
pages = "214--227",
annote = "28 references.",
}
@article{Zenger97,
author = {Christoph Zenger},
title = {Indexed Types},
journal = {Theoretical Computer Science},
pages = {147--165},
year = {1997},
volume = {187},
abstract =
"A new extension of the Hindley/Milner type system is
proposed. The type system has algebraic types, that have not only type
parameters, but also value parameters (indices). This allows for example to
parameterize matrices and vectors by their size and to check size
compatibility statically. This is especially of interest in computer
algebra.",
}
@InProceedings{SuzukiIsh77,
author = "Norihisa Suzuki and Kiyoshi Ishihata",
title = "Implementation of an array bound checker",
crossref = "POPL77",
pages = "132--143",
}
@Article{Aggarwal:2001:RFA,
author = "Aneesh Aggarwal and Keith Randall",
title = "Related Field Analysis",
journal = "ACM SIG{\-}PLAN Notices",
volume = "36",
number = "5",
pages = "214--220",
month = may,
year = "2001",
coden = "SINODQ",
ISSN = "0362-1340",
bibdate = "Wed Jul 11 12:48:53 2001",
acknowledgement = ack-nhfb,
}
@InProceedings{QuanHV2002,
author = "Qian, Feng and Hendren, Laurie and Verbrugge, Clark",
title = "A comprehensive approach to array bounds check elimination for {Java}",
crossref = "CC2002",
pages = "325--341",
}
@InProceedings{WeiMRFH2018,
author = "Shiyi Wei and Piotr Mardziel and Andrew Ruef and Jeffrey S. Foster and Michael Hicks",
title = "Evaluating design tradeoffs in numeric static analysis for {Java}",
crossref = "ESOP2018",
NEEDpages = "*",
}
@InProceedings{ElliottRHT2018,
author = "Archibald Samuel Elliott and Andrew Ruef and Michael Hicks and David Tarditi",
title = "Checked {C}: Making {C} safe by extension",
crossref = "SecDev2018",
NEEDpages = "*",
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Dynamic
%%%
@InProceedings{sosp93*203,
author = "Robert Wahbe and Steven Lucco and Thomas E. Anderson
and Susan L. Graham",
title = "Efficient Software-Based Fault Isolation",
pages = "203--216",
ISBN = "0-89791-632-8",
booktitle = "Proceedings of the 14th Symposium on Operating Systems
Principles",
month = dec,
address = "New York, NY, USA",
year = 1993,
}
@InProceedings{Hastings:1992:PAT,
author = "Reed Hastings and Bob Joyce",
title = "{Purify}: A tool for detecting memory leaks and
access errors in {C} and {C++} programs",
crossref = "USENIX92Winter",
pages = "125--138",
affiliation = "Pure Software Inc.",
}
@InProceedings{XuDS2004,
author = "Wei Xu and Daniel C. DuVarney and R. Sekar",
title = "An efficient and backwards-compatible transformation to
ensure memory safety of {C} programs",
crossref = "FSE2004",
pages = "117--126",
}
@Misc{KolawaH,
author = "Adam Kolawa and Arthur Hicken",
title = "Insure++: A Tool To Support Total Quality Software",
howpublished = "ParaSoft Corporation white paper",
note = "http://www.parasoft.com/insure/papers/tech.htm"
}
@TechReport{feuer85,
author = "Alan R. Feuer",
title = "Introduction to the {S}afe {C} Runtime Analyzer",
institution = "Catalytix Corporation",
year = 1985,
month = jan
}
@Manual{ThirdDegreeMan,
title = "Third Degree User Manual",
organization = "Digital Equipment Corporation",
year = 1994,
month = may # "~24,"
}
@Manual{DigitalUnixThirdDegree,
title = "Digital Unix Programmer's Guide",
note = "Chapter 7, ``Debugging Programs with Third Degree''",
year = 1996,
month = mar,
organization = "{Digital Equipment Corporation}"
}
@InProceedings{AustinBS94,
author = "Todd M. Austin and Scott E. Breach and Gurindar S.
Sohi",
title = "Efficient Detection of all Pointer and Array Access
Errors",
crossref = "PLDI94",
pages = "290--301",
}
@Misc{Austin97,
author = "Todd M. Austin",
title = "Compile-time check optimization for {Safe-C}",
howpublished = "Personal communication",
year = 1997,
month = oct # "~6,"
}
@TechReport{ncstrl.uwmadison//CS-TR-93-1197,
number = "CS-TR-93-1197",
institution = "University of Wisconsin, Madison",
title = "Efficient Detection of All Pointer and Array Access
Errors",
year = 1993,
month = dec,
author = "Todd M. Austin and Scott E. Breach and Gurindar S.
Sohi",
abstract = "In this paper, we present a pointer and array access
checking technique that provides complete error
coverage through a simple set of program
transformations. Our technique, based on an extended
safe pointer representation, has a number of novel
aspects. Foremost, it is the first technique that
detects all spatial and temporal access errors. Its use
is not limited by the expressiveness of the language;
that is, it can be applied successfully to compiled or
interpreted languages with subscripted and mutable
pointers, local references, and explicit and typeless
dynamic storage management, e.g., C. Because it is a
source level transformation, it is amenable to both
compile- and run-time optimization. Finally, its
performance, even without compile-time optimization, is
quite good. We implemented a prototype translator for
the C language and analyzed the checking overheads of
six non-trivial, pointer intensive programs. Execution
overheads range from 130\% to 540\%; with text and data
size overheads typically below 100\%.",
}
@Misc{heapagent_comparison97,
author = "{MicroQuill Software Publishing, Inc.}",
title = "Comparing and contrasting the
runtime error detection
technologies used in
{H}eap{A}gent 3.0,
{P}urify {NT} 4.0, and
{B}ounds{C}hecker {P}ro 4.0",
howpublished = "White paper",
year = 1997,
note = "http://www.heapagent.com/prod\_ha/ha\_comp.htm"
}
@InProceedings{Wahbe:1993:PDB,
author = "R. Wahbe and S. Lucco and S. L. Graham",
title = "Practical data breakpoints: design and
implementation",
crossref = "PLDI93",
pages = "1--12",
coden = "SINODQ",
ISSN = "0362-1340",
abstract = "A data breakpoint associates debugging actions with
programmer-specified conditions on the memory state of
an executing program. The authors present the design
and implementation of a practical data breakpoint
facility. A data breakpoint facility must monitor all
memory updates performed by the program being debugged.
The authors implemented and evaluated two complementary
techniques for reducing the overhead of monitoring
memory updates. First, they checked write instructions
by inserting checking code directly into the program
being debugged. The checks use a segmented bitmap data
structure that minimizes address lookup complexity.
Second, they developed data flow algorithms that
eliminate checks on some classes of write instructions
but may increase the complexity of the remaining
checks. They evaluated these techniques on the SPARC
using the SPEC benchmarks. (19 Refs.)",
affiliation = "Div. of Comput. Sci., California Univ., Berkeley, CA,
USA",
classification = "C4240 (Programming and algorithm theory); C6110
(Systems analysis and programming); C6120 (File
organisation); C6150G (Diagnostic, testing, debugging
and evaluating systems)",
confdate = "23-25 June 1993",
confaddress = "Albuquerque, NM, USA",
confsponsor = "ACM",
keywords = "Address lookup complexity; Checking code;
Complementary techniques; Complexity; Data flow
algorithms; Debugging actions; Executing program;
Memory state; Memory updates; Practical data breakpoint
facility; Programmer-specified conditions; Segmented
bitmap data structure; SPARC; SPEC benchmarks; Write
instructions",
thesaurus = "Computational complexity; Data structures; Program
debugging; Storage management; System monitoring",
}
@Article{Steffen92,
key = "Steffen",
author = "J. L. Steffen",
title = "Adding Run-time Checking to the {P}ortable {C}
{C}ompiler",
journal = j-SPE,
pages = "305--316",
volume = 22,
number = 4,
month = apr,
year = 1992,
}
@InProceedings{Kaufer:1988:SIP,
author = "Stephen Kaufer and Russell Lopez and Sesha Pratap",
title = "{Saber-C} --- An Interpreter-based Programming
Environment for the {C} Language",
booktitle = "{USENIX} Conference Proceedings",
address = "San Francisco, CA",
month = "Summer",
year = 1988,
pages = "161--171",
affiliation = "Saber Software, Inc.",
}
@InProceedings{Kendall:1983:BRC,
author = "Samuel C. Kendall",
title = "{Bcc}: Runtime Checking for {C} Programs",
booktitle = "Proceedings: {USENIX} Association [and] Software Tools
Users Group Summer Conference",
address = "Toronto, Ontario, Canada",
pages = "5--16",
month = jul,
year = 1983,
affiliation = "Delft Consulting Corporation",
}
@Article{FischerL1980,
title = "The Implementation of Run-Time Diagnostics in
{Pascal}",
author = "Charles N. Fischer and Richard J. LeBlanc",
journal = "IEEE Transactions on Software Engineering",
pages = "313--319",
month = jul,
year = "1980",
volume = "6",
number = "4",
}
@InProceedings{usenix_su88*223,
author = "Benjamin G. Zorn and Paul N. Hilfinger",
title = "A Memory Allocation Profiler for {C} and Lisp
Programs",
booktitle = "Proceedings of the Summer USENIX Conference",
pages = "223--238",
month = jun,
publisher = "USENIX Association",
address = "San Francisco, CA, USA",
year = 1988,
}
@Misc{Geodesic,
author = "{Geodesic Systems}",
title = "Automatic Memory Management in C/C++ with Garbage Collection",
howpublished = "White paper"
}
@Article{BW:GCIUE,
author = "Hans-Juergen Boehm and Mark Weiser",
title = "Garbage Collection in an Uncooperative Environment",
journal = j-SPE,
volume = 18,
number = 9,
month = sep,
year = 1988,
pages = "807--820",
abstract = "We describe a technique for storage allocation and
garbage collection in the absence of significant
co-operation from the code using the allocator. This
limits garbage collection overhead to the time actually
required for garbage collection. In particular,
application programs that rarely or never make use of
the collector no longer encounter a substantial
performance penalty. This approach greatly simplifies
the implementation of languages supporting garbage
collection. It further allows conventional compilers to
be used with a garbage collector, either as the primary
means of storage reclamation, or as a debugging tool.
Our approach has two potential disadvantages. First,
some garbage may fail to be reclaimed. Secondly, we use
a 'stop and collect' approach, thus making the strategy
unsuitable for applications with severe real-time
constraints. We argue that the first problem is, to
some extent, inherent in any garbage collection system.
Furthermore, based on our experience, it is usually not
significant in practice. In spite of the second
problem, we have had favourable experiences with
interactive applications, including some that use a
heap of several megabytes.",
keywords = "garbage collection, storage management, debugging",
}
@Article{PatilF97,
author = "Harish Patil and Charles Fischer",
title = "Low-cost, concurrent checking of pointer and array
accesses in {C} programs",
journal = SPE,
year = 1997,
volume = 27,
number = 1,
pages = "87--110",
month = jan
}
@InProceedings{NeculaMW2002,
author = "George C. Necula and Scott McPeak and Westley Weimer",
title = "{CCured}: Type-safe retrofitting of legacy code",
crossref = "POPL2002",
pages = "128--139",
}
@InProceedings{ConditHMNW2003,
author = {Jeremy Condit and Mathew Harren and Scott McPeak
and George C. Necula and Westley Weimer},
title = {{CCured} in the Real World},
crossref = "PLDI2003",
pages = "232--244",
}
@InProceedings{NethercoteS2003,
author = {Nicholas Nethercote and Julian Seward},
title = {Valgrind: A Program Supervision Framework},
crossref = "RV2003",
pages = "44-66"
}
@InProceedings{NethercoteS2007,
author = {Nicholas Nethercote and Julian Seward},
title = {Valgrind: A Framework for Heavyweight Dynamic Binary Insrumentation},
crossref = "PLDI2007",
pages = "89--100",
}
@InProceedings{SewardN2005,
author = {Julian Seward and Nicholas Nethercote},
title = {Using {Valgrind} to Detect Undefined Value Errors with Bit-Precision},
crossref = "USENIX2005",
pages = "17--30",
}
% Apparently no page numbers, since the proceedings were only
% published electronically (in ENTCS 89)
@InProceedings{NethercoteM2003,
author = {Nicholas Nethercote and Alan Mycroft},
title = {Redux: A Dynamic Dataflow Tracer},
crossref = "RV2003",
pages = "149-170",
summary = {Description of Redux. Lots of examples, particularly fac(5)
essence comparisons in C, stack machine interpreter, and
Haskell. Also demonstrates program slicing, and discusses
other possible uses of Redux.}
}
@InProceedings{ArthoB,
author = "C. Artho and A. Biere",
title = "Combined static and dynamic analysis",
booktitle = "1st International Workshop on
Abstract Interpretation of Object-Oriented Languages",
NEEDpages = "*",
year = 2005,
address = "Paris, France",
month = jan # "~21,",
abstract =
"Static analysis is usually faster than dynamic analysis but less
precise. Therefore it is often desirable to retain information from
static analysis for run-time verification, or to compare the results of
both techniques. However, this requires writing two [analysis]
programs, which may not act identically under the same conditions. It
would be desirable to share the same generic algorithm by static and
dynamic analysis. In JNuke, a framework for static and dynamic analysis
of Java programs, this has been achieved. By keeping the architecture
of static analysis similar to a virtual machine, the only key
difference between abstract interpretation and execution remains the
nature of program states. In dynamic analysis, concrete states are
available, while in static analysis, sets of (abstract) states are
considered. Our new analysis is generic because it can re-use the same
algorithm in static analysis and dynamic analysis. This paper describes
the architecture of such a generic analysis. To our knowledge, JNuke is
the first tool that has achieved this integration, which enables static
and dynamic analysis to interact in novel ways.",
}
@InProceedings{SullivanBBGA2003,
author = {Gregory T. Sullivan and Derek L. Bruening and Iris
Baron and Timothy Garnett and Saman Amarasinghe},
title = {Dynamic Native Optimization of Interpreters},
crossref = "IVME2003",
pages = "50--57",
}
@InProceedings{DhurjatiA06,