-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.bib
711 lines (711 loc) · 49.2 KB
/
main.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
@inproceedings{Kluyver2016,
address = {G{\"{o}}ttingen, Germany},
author = {Kluyver, Thomas and Ragan-Kelley, Benjamin and Pérez, Fernando and Granger, Brian and Bussonnier, Matthias and Frederic, Jonathan and Kelley, Kyle and Hamrick, Jessica and Grout, Jason and Corlay, Sylvain and Ivanov, Paul and Avila, Damián and Abdalla, Safia and Willing, Carol},
booktitle = {Positioning and Power in Academic Publishing: Players, Agents and Agendas},
doi = {10.3233/978-1-61499-649-1-87},
editor = {{Loizides, Fernando (Emerging Interactive Technologies Lab, University of Wolverhampton}, UK) and {Schmidt, Birgit (University of Göttingen, State and University Library}, Germany)},
keywords = {jupyter,notebook,reproducibility,research code,thesisc2},
mendeley-groups = {CIMAT/Thesis},
mendeley-tags = {jupyter,thesisc2},
pages = {87--90},
publisher = {IOS Press BV},
title = {{Jupyter Notebooks - a publishing format for reproducible computational workflows}},
year = {2016}
}
@inproceedings{Randles2017,
author = {Randles, Bernadette M. and Pasquetto, Irene V. and Golshan, Milena S. and Borgman, Christine L.},
booktitle = {2017 ACM/IEEE Joint Conference on Digital Libraries (JCDL)},
doi = {10.1109/JCDL.2017.7991618},
isbn = {978-1-5386-3861-3},
mendeley-groups = {CIMAT/Thesis},
month = {jun},
pages = {1--2},
publisher = {IEEE},
title = {{Using the Jupyter Notebook as a Tool for Open Science: An Empirical Study}},
url = {http://ieeexplore.ieee.org/document/7991618/},
year = {2017}
}
@article{Pfenninger2017,
author = {Pfenninger, Stefan and DeCarolis, Joseph and Hirth, Lion and Quoilin, Sylvain and Staffell, Iain},
doi = {10.1016/j.enpol.2016.11.046},
issn = {03014215},
journal = {Energy Policy},
keywords = {open-science,thesisc2},
mendeley-groups = {CIMAT/Thesis},
mendeley-tags = {open-science,thesisc2},
month = {feb},
pages = {211--215},
title = {{The importance of open data and software: Is energy research lagging behind?}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0301421516306516},
volume = {101},
year = {2017}
}
@misc{FOSTER2015,
author = {FOSTER},
booktitle = {FOSTER Consortium},
keywords = {thesisc2,web},
mendeley-groups = {CIMAT/Thesis},
mendeley-tags = {thesisc2,web},
title = {{Open Data Definition}},
url = {https://www.fosteropenscience.eu/taxonomy/term/110},
urldate = {2018},
year = {2015}
}
@inproceedings{Pontika2015,
address = {New York, New York, USA},
author = {Pontika, Nancy and Knoth, Petr and Cancellieri, Matteo and Pearce, Samuel},
booktitle = {Proceedings of the 15th International Conference on Knowledge Technologies and Data-driven Business - i-KNOW '15},
doi = {10.1145/2809563.2809571},
isbn = {9781450337212},
mendeley-groups = {CIMAT/Thesis},
pages = {1--8},
publisher = {ACM Press},
title = {{Fostering open science to research using a taxonomy and an eLearning portal}},
url = {http://dl.acm.org/citation.cfm?doid=2809563.2809571},
year = {2015}
}
@book{Nielsen2015,
author = {Nielsen, Michael A.},
mendeley-groups = {CIMAT/Thesis},
publisher = {Determination Press},
title = {{Neural Networks and Deep Learning}},
url = {http://neuralnetworksanddeeplearning.com/},
year = {2015}
}
@article{Agatonovic-Kustrin2000,
author = {Agatonovic-Kustrin, S and Beresford, R},
doi = {10.1016/S0731-7085(99)00272-1},
issn = {07317085},
journal = {Journal of Pharmaceutical and Biomedical Analysis},
keywords = {thesisc2},
mendeley-groups = {CIMAT/Thesis},
mendeley-tags = {thesisc2},
month = {jun},
number = {5},
pages = {717--727},
title = {{Basic concepts of artificial neural network (ANN) modeling and its application in pharmaceutical research}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0731708599002721},
volume = {22},
year = {2000}
}
@incollection{Adankon2009,
address = {Boston, MA},
author = {Adankon, Mathias M. and Cheriet, Mohamed and Daugman, John and Adler, Andy and Schuckers, Stephanie and Nandakumar, Karthik and Kennell, Lauren R. and Rakvic, Ryan N. and Broussard, Randy P. and Matrouf, Driss and Bonastre, Jean-Fran{\c{c}}ois and Cappelli, Raffaele and Martinez-Diaz, Marcos and Fierrez, Julian and Martinez-Diaz, Marcos and Fierrez, Julian and Hangai, Seiichiro and Martinez-Diaz, Marcos and Fierrez, Julian and Hangai, Seiichiro and Henniger, Olaf and Muramatsu, Daigo and Matsumoto, Takashi and Yoshimura, Isao and Yoshimura, Mitsu and Wan, Liang and Lin, Zhouchen and Usher, David and Tosa, Yasunari and Friedman, Marc and Elgammal, Ahmed and Muang, Crystal and Hu, Dunxu and Yi, Dong and Yang, Weilong and Li, Stan Z. and Zhu, Xiangxin and Lei, Zhen and Li, Stan Z. and Zhou, Mingquan and Nandakumar, Karthik and Jain, Anil K. and Ross, Arun and Martin, Alvin F. and Ramos, Daniel and Gonzalez-Dominguez, Javier and Toledano, Doroteo T. and Gonzalez-Rodr{\'{i}}guez, Joaquin and Bonastre, Jean-Fran{\c{c}}ois and Matrouf, Driss and Hennebert, Jean and Markowitz, Judith and Docio-Fernandez, Laura and Garcia-Mateo, Carmen and Toledano, Doroteo T. and Ramos, Daniel and Gonzalez-Dominguez, Javier and Gonz{\'{a}}lez-Rodr{\'{i}}guez, Joaqu{\'{i}}n and Docio-Fernandez, Laura and Garcia-Mateo, Carmen and Kim, Jong Kyoung and Kim, Kye-Hyeon and Choi, Seungjin and Chellappa, Rama and Sankaranarayanan, Aswin C.},
booktitle = {Encyclopedia of Biometrics},
doi = {10.1007/978-0-387-73003-5_299},
mendeley-groups = {CIMAT/Thesis},
pages = {1303--1308},
publisher = {Springer US},
title = {{Support Vector Machine}},
url = {http://www.springerlink.com/index/10.1007/978-0-387-73003-5{\_}299},
year = {2009}
}
@incollection{Koza1996,
abstract = {Paraphrasing Arthur Samuel (1959), the question is: How can computers learn to solve problems without being explicitly programmed?},
address = {Dordrecht},
author = {Koza, John R. and Bennett, Forrest H. and Andre, David and Keane, Martin A.},
booktitle = {Artificial Intelligence in Design '96},
doi = {10.1007/978-94-009-0279-4_9},
keywords = {thesisc2,wiki},
mendeley-groups = {CIMAT/Thesis},
mendeley-tags = {thesisc2,wiki},
pages = {151--170},
publisher = {Springer Netherlands},
title = {{Automated Design of Both the Topology and Sizing of Analog Electrical Circuits Using Genetic Programming}},
url = {http://www.springerlink.com/index/10.1007/978-94-009-0279-4{\_}9},
year = {1996}
}
@misc{Kohavi1998,
author = {Kohavi, Ron and Provost, Foster},
booktitle = {Machine Learning},
keywords = {thesisc2,wiki},
mendeley-groups = {CIMAT/Thesis},
mendeley-tags = {thesisc2,wiki},
pages = {271--274},
title = {{Glossary of Terms}},
url = {http://ai.stanford.edu/{~}ronnyk/glossary.html},
urldate = {2018-02-19},
year = {1998}
}
@book{Davey2014,
address = {Hoboken, NJ, USA},
author = {Davey, Kevin J.},
doi = {10.1002/9781118778944},
edition = {First},
isbn = {9781118778944},
keywords = {monte carlo,trading,trading strategies},
mendeley-groups = {CIMAT/Thesis},
mendeley-tags = {monte carlo,trading,trading strategies},
month = {jul},
publisher = {John Wiley {\&} Sons, Inc.},
title = {{Building Winning Algorithmic Trading Systems}},
url = {http://doi.wiley.com/10.1002/9781118778944},
year = {2014}
}
@book{Bandy2011,
address = {Eugene, Oregon, US},
author = {Bandy, Howard B.},
edition = {First},
isbn = {9780979183829},
keywords = {monte carlo simulation,statistics,trading,trading strategies},
mendeley-groups = {CIMAT/Thesis},
mendeley-tags = {monte carlo simulation,statistics,trading,trading strategies},
publisher = {Blue Owl Press, Inc.},
title = {{Modeling Trading System Performance: Monte Carlo simulation, position sizing, risk management and statistics}},
year = {2011}
}
@article{Kara2011,
abstract = {Prediction of stock price index movement is regarded as a challenging task of financial time series prediction. An accurate prediction of stock price movement may yield profits for investors. Due to the complexity of stock market data, development of efficient models for predicting is very difficult. This study attempted to develop two efficient models and compared their performances in predicting the direction of movement in the daily Istanbul Stock Exchange (ISE) National 100 Index. The models are based on two classification techniques, artificial neural networks (ANN) and support vector machines (SVM). Ten technical indicators were selected as inputs of the proposed models. Two comprehensive parameter setting experiments for both models were performed to improve their prediction performances. Experimental results showed that average performance of ANN model (75.74{\%}) was found significantly better than that of SVM model (71.52{\%}). {\textcopyright} 2010 Elsevier Ltd. All rights reserved.},
author = {Kara, Yakup and {Acar Boyacioglu}, Melek and Baykan, {\"{O}}mer Kaan},
doi = {10.1016/j.eswa.2010.10.027},
isbn = {0957-4174},
issn = {09574174},
journal = {Expert Systems with Applications},
keywords = {Artificial neural networks,Prediction,Stock price index,Support vector machines,ann,artificial intelligence,finance,machine learning,svm},
mendeley-groups = {CIMAT/Thesis},
mendeley-tags = {ann,artificial intelligence,finance,machine learning,svm},
number = {5},
pages = {5311--5319},
pmid = {57533228},
publisher = {Elsevier Ltd},
title = {{Predicting direction of stock price index movement using artificial neural networks and support vector machines: The sample of the Istanbul Stock Exchange}},
url = {http://dx.doi.org/10.1016/j.eswa.2010.10.027},
volume = {38},
year = {2011}
}
@article{Pyo2017,
abstract = {The prediction of the trends of stocks and index prices is one of the important issues to market participants. Investors have set trading or fiscal strategies based on the trends, and considerable research in various academic fields has been studied to forecast financial markets. This study predicts the trends of the Korea Composite Stock Price Index 200 (KOSPI 200) prices using nonparametric machine learning models: artificial neural network, support vector machines with polynomial and radial basis function kernels. In addition, this study states controversial issues and tests hypotheses about the issues. Accordingly, our results are inconsistent with those of the precedent research, which are generally considered to have high prediction performance. Moreover, Google Trends proved that they are not effective factors in predicting the KOSPI 200 index prices in our frameworks. Furthermore, the ensemble methods did not improve the accuracy of the prediction.},
author = {Pyo, Sujin and Lee, Jaewook and Cha, Mincheol and Jang, Huisu},
doi = {10.1371/journal.pone.0188107},
isbn = {1111111111},
issn = {19326203},
journal = {PLoS ONE},
keywords = {ann,artificial intelligence,finance,machine learning,svm},
mendeley-groups = {CIMAT/Thesis},
mendeley-tags = {ann,artificial intelligence,finance,machine learning,svm},
number = {11},
pages = {1--17},
title = {{Predictability of machine learning techniques to forecast the trends of market index prices: Hypothesis testing for the Korean stock markets}},
url = {https://doi.org/10.1371/journal.pone.0188107},
volume = {12},
year = {2017}
}
@book{Dominik2018,
author = {Dominik, Carsten and Goaziou, Nicolas and Schulte, Eric and Jambunathan, K},
booktitle = {Network Theory Ltd},
isbn = {978-1-906966-08-9},
mendeley-groups = {CIMAT/Thesis},
pages = {282},
title = {{The org mode reference manual }},
url = {https://orgmode.org/org.pdf},
volume = {4},
year = {2010}
}
@article{Schulte2011,
author = {Schulte, Eric and Davison, Dan},
doi = {10.1109/MCSE.2011.41},
issn = {1521-9615},
journal = {Computing in Science {\&} Engineering},
mendeley-groups = {CIMAT/Thesis},
month = {may},
number = {3},
pages = {66--73},
title = {{Active Documents with Org-Mode}},
url = {http://ieeexplore.ieee.org/document/5756277/},
volume = {13},
year = {2011}
}
@misc{Community2018,
author = {Jupyter Community, Project Jupyter},
booktitle = {Project Jupyter},
mendeley-groups = {CIMAT/Thesis},
title = {{Project Jupyter}},
url = {http://jupyter.org/},
urldate = {May 28, 2018},
year = {2018}
}
@book{Goodfellow2016,
address = {Cambridge, Massachusetts},
author = {Goodfellow, Ian and Bengio, Yoshua and Courville, Aaron},
isbn = {9780262035613},
mendeley-groups = {CIMAT},
publisher = {MIT Press},
title = {{Deep Learning}},
url = {http://www.deeplearningbook.org/},
year = {2016}
}
@book{Mitchell1997,
author = {Mitchell, Tom},
editor = {Liu, C.L. and Tucker, Allen B.},
isbn = {0070428077},
mendeley-groups = {CIMAT},
pages = {414},
publisher = {McGraw Hill},
title = {{Machine Learning}},
url = {http://www.cs.cmu.edu/{~}tom/mlbook.html},
year = {1997}
}
@article{Batista2004,
author = {Batista, Gustavo EAPA GE and Prati, Ronaldo C RC and Monard, Maria Carolina},
doi = {10.1145/1007730.1007735},
issn = {19310145},
journal = {ACM SIGKDD Explorations},
mendeley-groups = {CIMAT/Thesis},
number = {1},
pages = {20--29},
title = {{A study of the behavior of several methods for balancing machine learning training data}},
url = {http://dl.acm.org/citation.cfm?id=1007730.1007735},
volume = {6},
year = {2004}
}
@book{Bishop2013,
abstract = {The dramatic growth in practical applications for machine learning over the last ten years has been accompanied by many important developments in the underlying algorithms and techniques. For example, Bayesian methods have grown from a specialist niche to become mainstream, while graphical models have emerged as a general framework for describing and applying probabilistic techniques. The practical applicability of Bayesian methods has been greatly enhanced by the development of a range of approximate inference algorithms such as variational Bayes and expectation propagation, while new models based on kernels have had a significant impact on both algorithms and applications. This completely new textbook reflects these recent developments while providing a comprehensive introduction to the fields of pattern recognition and machine learning. It is aimed at advanced undergraduates or first-year PhD students, as well as researchers and practitioners. No previous knowledge of pattern recognition or machine learning concepts is assumed. Familiarity with multivariate calculus and basic linear algebra is required, and some experience in the use of probabilities would be helpful though not essential as the book includes a self-contained introduction to basic probability theory. The book is suitable for courses on machine learning, statistics, computer science, signal processing, computer vision, data mining, and bioinformatics. Extensive support is provided for course instructors, including more than 400 exercises, graded according to difficulty. Example solutions for a subset of the exercises are available from the book web site, while solutions for the remainder can be obtained by instructors from the publisher. The book is supported by a great deal of additional material, and the reader is encouraged to visit the book web site for the latest information. Christopher M. Bishop is Deputy Director of Microsoft Research Cambridge, and holds a Chair in Computer Science at the University of Edinburgh. He is a Fellow of Darwin College Cambridge, a Fellow of the Royal Academy of Engineering, and a Fellow of the Royal Society of Edinburgh. His previous textbook "Neural Networks for Pattern Recognition" has been widely adopted.},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
author = {Bishop, Christopher M},
booktitle = {Journal of Chemical Information and Modeling},
doi = {10.1117/1.2819119},
eprint = {arXiv:1011.1669v3},
isbn = {978-0-387-31073-2},
issn = {1098-6596},
mendeley-groups = {CIMAT/Thesis},
number = {9},
pages = {1689--1699},
pmid = {25246403},
publisher = {Springer},
title = {{Pattern Recognition and Machine Learning}},
volume = {53},
year = {2013}
}
@book{Hastie2009,
abstract = {During the past decade there has been an explosion in computation and information technology. With it has come vast amounts of data in a variety of fields such as medicine, biology, finance, and marketing. The challenge of understanding these data has led to the development of new tools in the field of statistics, and spawned new areas such as data mining, machine learning, and bioinformatics. Many of these tools have common underpinnings but are often expressed with different terminology. This book describes the important ideas in these areas in a common conceptual framework. While the approach is statistical, the emphasis is on concepts rather than mathematics. Many examples are given, with a liberal use of color graphics. It should be a valuable resource for statisticians and anyone interested in data mining in science or industry. The book's coverage is broad, from supervised learning (prediction) to unsupervised learning. The many topics include neural networks, support vector machines, classification trees and boosting-the first comprehensive treatment of this topic in any book. Trevor Hastie, Robert Tibshirani, and Jerome Friedman are professors of statistics at Stanford University. They are prominent researchers in this area: Hastie and Tibshirani developed generalized additive models and wrote a popular book of that title. Hastie wrote much of the statistical modeling software in S-PLUS and invented principal curves and surfaces. Tibshirani proposed the Lasso and is co-author of the very successful An Introduction to the Bootstrap. Friedman is the co-inventor of many data-mining tools including CART, MARS, and projection pursuit. FROM THE REVIEWS: TECHNOMETRICS "This is a vast and complex book. Generally, it concentrates on explaining why and how the methods work, rather than how to use them. Examples and especially the visualizations are principle features...As a source for the methods of statistical learning...it will probably be a long time before there is a competitor to this book."},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
author = {Hastie, Trevor and Tibshirani, Robert and Friedman, Jerome},
booktitle = {Springer Series in Statistics},
doi = {10.1007/b94608},
eprint = {arXiv:1011.1669v3},
file = {:Users/antares/Documents/CIMAT/thesis/papers/machine-learning/The Elements of Statistical Learning Data Mining, Inference, and Prediction by Trevor Hastie, Robert Tibshirani and Jerome Friedman.pdf:pdf},
isbn = {978-0-387-84857-0},
issn = {0172-7397},
mendeley-groups = {CIMAT/Thesis},
pages = {1--694},
pmid = {12377617},
publisher = {Springer},
title = {{The Elements of Statistical Learning}},
url = {http://www.springerlink.com/index/10.1007/b94608},
volume = {1},
year = {2009}
}
@book{P.Murphy2012,
abstract = {Some of the most remarkable issues related to interharmonics observed from a probabilistic perspective are presented. Attention is firstly devoted to interharmonic frequency and amplitude variability. Starting from the basic mathematical and computational aspects of probabilistic harmonic models, the difficulties to include interharmonics are discussed with particular attention to the problem of the frequency resolution and of the computational burden. Then, simulation and measurement aspects are discussed, also showing some numerical and experimental results.},
archivePrefix = {arXiv},
arxivId = {0-387-31073-8},
author = {{P. Murphy}, Kevin},
booktitle = {Machine Learning: A Probabilistic Perspective},
doi = {10.1007/SpringerReference_35834},
eprint = {0-387-31073-8},
file = {:Users/antares/Documents/CIMAT/thesis/papers/machine-learning/ML Machine Learning-A Probabilistic Perspective.pdf:pdf},
isbn = {9780262018029},
issn = {0262018020},
mendeley-groups = {CIMAT/Thesis},
pmid = {20236947},
publisher = {The MIT Press},
title = {{Machine Learning: A Probabilistic Perspective}},
year = {2012}
}
@article{Chandola2009,
abstract = {This clinical report describes a repair protocol for cusp fracture of a failed amalgam-dentin complex. A maxillary right first premolar with an amalgam restoration presented a buccal cusp fracture. Chairside repair has been undertaken by conditioning the existing amalgam restoration with silica coating (30 $\mu$m CoJet{\textregistered}-Sand), phosphoric acid etching the beveled enamel surface, priming dentin, and application of a bonding agent on both enamel and dentin. Thereafter, the amalgam was silanized (ESPE{\textregistered}-Sil), and opaque resin was applied and polymerized to mask the amalgam. The fractured buccal cusp was modeled using resin composite (Clearfil Photo Posterior) and photo-polymerized. Finally, the amalgam was refinished and refurbished and the composite was finished and polished},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
author = {Chandola, Varun and Banerjee, Arindam and Kumar, Vipin},
doi = {10.1145/1541880.1541882},
eprint = {arXiv:1011.1669v3},
file = {:Users/antares/Documents/CIMAT/thesis/papers/machine-learning/Anomaly Detection A Survey chandola2009.pdf:pdf},
isbn = {0361-7734},
issn = {03600300},
journal = {ACM Computing Surveys},
mendeley-groups = {CIMAT/Thesis},
number = {3},
pages = {1--58},
pmid = {21834704},
title = {{Anomaly detection}},
url = {http://portal.acm.org/citation.cfm?doid=1541880.1541882},
volume = {41},
year = {2009}
}
@article{Liu2006,
author = {Liu, Yang and Chawla, Nitesh V. and Harper, Mary P. and Shriberg, Elizabeth and Stolcke, Andreas},
doi = {10.1016/j.csl.2005.06.002},
file = {:Users/antares/Documents/CIMAT/thesis/papers/machine-learning/A study in machine learning from imbalanced data for sentence boundary detection in speech.pdf:pdf},
issn = {08852308},
journal = {Computer Speech {\&} Language},
mendeley-groups = {CIMAT/Thesis},
month = {oct},
number = {4},
pages = {468--494},
title = {{A study in machine learning from imbalanced data for sentence boundary detection in speech}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0885230805000306},
volume = {20},
year = {2006}
}
@article{Goodfellow2013,
abstract = {Recognizing arbitrary multi-character text in unconstrained natural photographs is a hard problem. In this paper, we address an equally hard sub-problem in this domain viz. recognizing arbitrary multi-digit numbers from Street View imagery. Traditional approaches to solve this problem typically separate out the localization, segmentation, and recognition steps. In this paper we propose a unified approach that integrates these three steps via the use of a deep convolutional neural network that operates directly on the image pixels. We employ the DistBelief implementation of deep neural networks in order to train large, distributed neural networks on high quality images. We find that the performance of this approach increases with the depth of the convolutional network, with the best performance occurring in the deepest architecture we trained, with eleven hidden layers. We evaluate this approach on the publicly available SVHN dataset and achieve over {\$}96\backslash{\%}{\$} accuracy in recognizing complete street numbers. We show that on a per-digit recognition task, we improve upon the state-of-the-art, achieving {\$}97.84\backslash{\%}{\$} accuracy. We also evaluate this approach on an even more challenging dataset generated from Street View imagery containing several tens of millions of street number annotations and achieve over {\$}90\backslash{\%}{\$} accuracy. To further explore the applicability of the proposed system to broader text recognition tasks, we apply it to synthetic distorted text from reCAPTCHA. reCAPTCHA is one of the most secure reverse turing tests that uses distorted text to distinguish humans from bots. We report a {\$}99.8\backslash{\%}{\$} accuracy on the hardest category of reCAPTCHA. Our evaluations on both tasks indicate that at specific operating thresholds, the performance of the proposed system is comparable to, and in some cases exceeds, that of human operators.},
archivePrefix = {arXiv},
arxivId = {1312.6082},
author = {Goodfellow, Ian J and Bulatov, Yaroslav and Ibarz, Julian and Arnoud, Sacha and Shet, Vinay},
eprint = {1312.6082},
file = {:Users/antares/Documents/CIMAT/thesis/papers/machine-learning/Multi-digit Number Recognition from Street View Imagery using Deep Convolutional Neural Networks.pdf:pdf},
journal = {CoRR},
mendeley-groups = {CIMAT/Thesis},
pages = {1--13},
pmid = {1000106228},
title = {{Multi-digit Number Recognition from Street View Imagery using Deep Convolutional Neural Networks}},
url = {http://arxiv.org/abs/1312.6082{\%}5Cnhttp://arxiv.org/pdf/1312.6082v4.pdf},
volume = {abs/1312.6},
year = {2013}
}
@article{Low2010,
abstract = {Designing and implementing efficient, provably correct parallel machine learning (ML) algorithms is challenging. Existing high-level parallel abstractions like MapReduce are insufficiently expressive while low-level tools like MPI and Pthreads leave ML experts repeatedly solving the same design challenges. By targeting common patterns in ML, we developed GraphLab, which improves upon abstractions like MapReduce by compactly expressing asynchronous iterative algorithms with sparse computational dependencies while ensuring data consistency and achieving a high degree of parallel performance. We demonstrate the expressiveness of the GraphLab framework by designing and implementing parallel versions of belief propagation, Gibbs sampling, Co-EM, Lasso and Compressed Sensing. We show that using GraphLab we can achieve excellent parallel performance on large scale real-world problems.},
archivePrefix = {arXiv},
arxivId = {1006.4990},
author = {Low, Yucheng and Gonzalez, Joseph and Kyrola, Aapo and Bickson, Danny and Guestrin, Carlos and Hellerstein, Joseph M.},
doi = {10.1.1.167.6156},
eprint = {1006.4990},
file = {:Users/antares/Documents/CIMAT/thesis/papers/machine-learning/GraphLab- A New Framework For Parallel Machine Learning.pdf:pdf},
isbn = {978-0-9749039-6-5},
issn = {2150-8097},
journal = {Proceeedings of 26th Conference on Uncertainty in Artificial Intelligence},
mendeley-groups = {CIMAT/Thesis},
pages = {8--11},
title = {{GraphLab: A New Framework for Parallel Machine Learning}},
url = {http://arxiv.org/abs/1006.4990},
year = {2010}
}
@article{Jiao2016,
author = {Jiao, Yasen and Du, Pufeng},
doi = {10.1007/s40484-016-0081-2},
file = {:Users/antares/Documents/CIMAT/thesis/papers/machine-learning/Performance measures in evaluating machine learning based bioinformatics predictors for classifications.pdf:pdf},
issn = {2095-4689},
journal = {Quantitative Biology},
mendeley-groups = {CIMAT/Thesis},
month = {dec},
number = {4},
pages = {320--330},
title = {{Performance measures in evaluating machine learning based bioinformatics predictors for classifications}},
url = {http://link.springer.com/10.1007/s40484-016-0081-2},
volume = {4},
year = {2016}
}
@article{Fawcett2006,
author = {Fawcett, Tom},
doi = {10.1016/j.patrec.2005.10.010},
issn = {01678655},
journal = {Pattern Recognition Letters},
mendeley-groups = {CIMAT/Thesis},
month = {jun},
number = {8},
pages = {861--874},
title = {{An introduction to ROC analysis}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S016786550500303X},
volume = {27},
year = {2006}
}
@book{Hosmer2000,
author = {Hosmer, D. and Lemeshow, S. and Sturdivant, R.},
edition = {3rd},
file = {:Users/antares/Documents/CIMAT/thesis/papers/machine-learning/Applied Logistic Regression - David Hosmer.pdf:pdf},
isbn = {978-0-470-58247-3},
mendeley-groups = {CIMAT/Thesis},
publisher = {John Wiley {\&} Sons, Inc.},
title = {{Applied Logistic Regression}},
year = {2000}
}
@book{Kleinbaum2010,
address = {New York, NY},
author = {Kleinbaum, David G. and Klein, Mitchel},
doi = {10.1007/978-1-4419-1742-3},
edition = {3rd},
file = {:Users/antares/Documents/CIMAT/thesis/papers/machine-learning/Logistic Regression A Self Learning Text - David Kleinbaum.pdf:pdf},
isbn = {978-1-4419-1741-6},
mendeley-groups = {CIMAT/Thesis},
publisher = {Springer New York},
series = {Statistics for Biology and Health},
title = {{Logistic Regression}},
url = {http://link.springer.com/10.1007/978-1-4419-1742-3},
volume = {1},
year = {2010}
}
@book{Kowalczyk2017,
author = {Kowalczyk, Alexandre},
file = {:Users/antares/Documents/CIMAT/thesis/papers/machine-learning/Support Vector Machines Succinctly by Alexander Kowalczyk.pdf:pdf},
mendeley-groups = {CIMAT/Thesis},
publisher = {Syncfusion},
title = {{Support Vector Machines Succinctly}},
year = {2017}
}
@misc{Ng2016,
author = {Ng, Andrew},
mendeley-groups = {CIMAT/Thesis},
publisher = {Coursera.org, Youtube.com},
title = {{Lecture 12 Support Vector Machines}},
year = {2016}
}
@misc{Wikipedia2018,
author = {Wikipedia},
booktitle = {Wikipedia: The Free Encyclopedia},
mendeley-groups = {CIMAT/Thesis},
publisher = {Wikimedia Foundation Inc.},
title = {{Support vector machine}},
url = {https://en.wikipedia.org/wiki/Support{\_}vector{\_}machine},
year = {2018}
}
@article{Burges1998,
abstract = {The tutorial starts with an overview of the concepts of VC dimension and structural risk minimization. We then describe linear Support Vector Machines (SVMs) for separable and non-separable data, working through a non-trivial example in detail. We describe a mechanical analogy, and discuss when SVM solutions are unique and when they are global. We describe how support vector training can be practically implemented, and discuss in detail the kernel mapping technique which is used to construct SVM solutions which are nonlinear in the data. We show how Support Vector machines can have very large (even infinite) VC dimension by computing the VC dimension for homogeneous polynomial and Gaussian radial basis function kernels. While very high VC dimension would normally bode ill for generalization performance, and while at present there exists no theory which shows that good generalization performance is guaranteed for SVMs, there are several arguments which support the observed high accuracy of SVMs, which we review. Results of some experiments which were inspired by these arguments are also presented. We give numerous examples and proofs of most of the key theorems. There is new material, and I hope that the reader will find that even old material is cast in a fresh light.},
archivePrefix = {arXiv},
arxivId = {1111.6189v1},
author = {Burges, C.J.C. J Christopher J C},
doi = {10.1023/A:1009715923555},
eprint = {1111.6189v1},
file = {:Users/antares/Documents/CIMAT/thesis/papers/machine-learning/A Tutorial on Support Vector Machines for Pattern Recognition.pdf:pdf},
isbn = {0818672404},
issn = {13845810},
journal = {Data Mining and Knowledge Discovery},
keywords = {pattern recognition,statistical learning theory,support vector machines,vc dimension},
mendeley-groups = {CIMAT/Thesis},
number = {2},
pages = {121--167},
pmid = {5207842081938259593},
title = {{A tutorial on support vector machines for pattern recognition}},
url = {http://www.springerlink.com/index/Q87856173126771Q.pdf},
volume = {2},
year = {1998}
}
@misc{Developers,
author = {Developers, Scikit-learn},
booktitle = {scikit-learn.org},
mendeley-groups = {CIMAT/Thesis},
title = {{RBF SVM parameters}},
url = {http://scikit-learn.org/stable/auto{\_}examples/svm/plot{\_}rbf{\_}parameters.html}
}
@book{Russell2010,
abstract = {The long-anticipated revision of this best-selling book offers the most comprehensive, up-to-date introduction to the theory and practice of artificial intelligence. Intelligent Agents. Solving Problems by Searching. Informed Search Methods. Game Playing. Agents that Reason Logically. First-order Logic. Building a Knowledge Base. Inference in First-Order Logic. Logical Reasoning Systems. Practical Planning. Planning and Acting. Uncertainty. Probabilistic Reasoning Systems. Making Simple Decisions. Making Complex Decisions. Learning from Observations. Learning with Neural Networks. Reinforcement Learning. Knowledge in Learning. Agents that Communicate. Practical Communication in English. Perception. Robotics. For those interested in artificial intelligence.},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
author = {Russell, Stuart J. and Norvig, Peter},
booktitle = {Artificial Intelligence},
doi = {10.1017/S0269888900007724},
eprint = {arXiv:1011.1669v3},
file = {:Users/antares/Documents/CIMAT/thesis/papers/machine-learning/Artificial Intelligence A Modern Approach by Stuart Russell and Peter Norvig.pdf:pdf},
isbn = {978-0-13-604259-4},
issn = {00206539},
keywords = {Artificial intelligence,Programming,ai,ai learning machine textbook,artificial-intelligence,artificial-intelligence knowledge-representation m,juergen,local search,planning,scheduling,search space},
mendeley-groups = {CIMAT/Thesis},
pages = {1132},
pmid = {20949757},
publisher = {Prentice Hall},
title = {{Artificial Intelligence: A Modern Approach}},
url = {http://amazon.de/o/ASIN/0130803022/},
year = {2010}
}
@book{Gareth2013,
abstract = {Review From the reviews: .,."There are interesting and non-standard topics that are not usually included in a first course in measture-theoretic probability including Markov Chains and MCMC, the bootstrap, limit theorems for martingales and mixing sequences, Brownian motion and Markov processes. The material is well-suported with many end-of-chapter problems." D.L. McLeish for Short Book Reviews of the ISI, December 2006 "The reader sees not only how measure theory is used to develop probability theory, but also how probability theory is used in applications. a The discourse is delivered in a theorem proof format and thus is better suited for classroom a . The authors prose is generally well thought out a . will make an attractive choice for a two-semester course on measure and probability, or as a second course for students with a semester of measure or probability theory under their belt." (Peter C. Kiessler, Journal of the American Statistical Association, Vol. 102 (479), 2007) "The book is a well written self-contained textbook on measure and probability theory. It consists of 18 chapters. Every chapter contains many well chosen examples and ends with several problems related to the earlier developed theory (some with hints). a At the very end of the book there is an appendix collecting necessary facts from set theory, calculus and metric spaces. The authors suggest a few possibilities on how to use their book." (Kazimierz Musial, Zentralblatt MATH, Vol. 1125 (2), 2008) "The title of the book consists of the names of its two basic parts. The booka (TM)s third part is comprised of some special topics from probability theory. a The authors suggest using the book intwo-semester graduate programs in statistics or a one-semester seminar on special topics. The material of the book is standard a is clear, comprehensive and a {\~{}}without being intimidatinga (TM)." (Rimas NorvaiAa, Mathematical Reviews, Issue 2007 f) Product Description This is a graduate level textbook on measure theory and probability theory. The book can be used as a text for a two semester sequence of courses in measure theory and probability theory, with an option to include supplemental material on stochastic processes and special topics. It is intended primarily for first year Ph.D. students in mathematics and statistics although mathematically advanced students from engineering and economics would also find the book useful. Prerequisites are kept to the minimal level of an understanding of basic real analysis concepts such as limits, continuity, differentiability, Riemann integration, and convergence of sequences and series. A review of this material is included in the appendix. The book starts with an informal introduction that provides some heuristics into the abstract concepts of measure and integration theory, which are then rigorously developed. The first part of the book can be used for a standard real analysis course for both mathematics and statistics Ph.D. students as it provides full coverage of topics such as the construction of Lebesgue-Stieltjes measures on real line and Euclidean spaces, the basic convergence theorems, L p spaces, signed measures, Radon-Nikodym theorem, Lebesgue's decomposition theorem and the fundamental theorem of Lebesgue integration on R, product spaces and product measures, and Fubini-Tonelli theorems. It also provides an elementary introduction to Banach and Hilbert spaces, convolutions, Fourier series and Fourier and Plancherel transforms. Thus part I would be particularly useful for students in a typical Statistics Ph.D. program if a separate course on real analysis is not a standard requirement. Part II (chapters 6-13) provides full coverage of standard graduate level probability theory. It starts with Kolmogorov's probability model and Kolmogorov's existence theorem. It then treats thoroughly the laws of large numbers including renewal theory and ergodic theorems with applications and then weak convergence of probability distributions, characteristic functions, the Levy-Cramer continuity theorem and the central limit theorem as well as stable laws. It ends with conditional expectations and conditional probability, and an introduction to the theory of discrete time martingales. Part III (chapters 14-18) provides a modest coverage of discrete time Markov chains with countable and general state spaces, MCMC, continuous time discrete space jump Markov processes, Brownian motion, mixing sequences, bootstrap methods, and branching processes. It could be used for a topics/seminar course or as an introduction to stochastic processes. From the reviews: "...There are interesting and non-standard topics that are not usually included in a first course in measture-theoretic probability including Markov Chains and MCMC, the bootstrap, limit theorems for martingales and mixing sequences, Brownian motion and Markov processes. The material is well-suported with many end-of-chapter problems." D.L. McLeish for Short Book Reviews of the ISI, December 2006},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
author = {Gareth, James and Daniela, Witten and Trevor, Hastie and Tibshirani, Robert},
doi = {10.1016/j.peva.2007.06.006},
eprint = {arXiv:1011.1669v3},
file = {:Users/antares/Documents/CIMAT/thesis/papers/machine-learning/An Introduction to Statistical Learning with Applications in R by Gareth James, Daniela Witten, Trevor Hastie and Robert Tibshirani.pdf:pdf},
isbn = {9780387781884},
issn = {01621459},
pmid = {10911016},
title = {{An Introduction to Statistical Learning}},
url = {http://books.google.com/books?id=9tv0taI8l6YC},
volume = {8},
year = {2013}
}
@article{Simard2003,
author = {Simard, Patrice Y and Steinkraus, David and Platt, John C and Others},
file = {:Users/antares/Documents/CIMAT/thesis/papers/machine-learning/Best Practices for Convolutional Neural Networks Applied to Visual Document Analysis.pdf:pdf},
journal = {Icdar},
mendeley-groups = {CIMAT/Thesis},
number = {October 2013},
pages = {958--962},
title = {{Best {\{}Practices{\}} for {\{}Convolutional{\}} {\{}Neural{\}} {\{}Networks{\}} {\{}Applied{\}} to {\{}Visual{\}} {\{}Document{\}} {\{}Analysis{\}}.}},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.160.8494{\&}rep=rep1{\&}type=pdf},
volume = {3},
year = {2003}
}
@article{Golik2013,
abstract = {In this paper we investigate the error criteria that are optimized during the training of artificial neural networks (ANN). We compare the bounds of the squared error (SE) and the crossentropy (CE) criteria being the most popular choices in stateof- The art implementations. The evaluation is performed on automatic speech recognition (ASR) and handwriting recognition (HWR) tasks using a hybrid HMM-ANN model. We find that with randomly initialized weights, the squared error based ANN does not converge to a good local optimum. However, with a good initialization by pre-training, the word error rate of our best CE trained system could be reduced from 30.9{\%} to 30.5{\%} on the ASR, and from 22.7{\%} to 21.9{\%} on the HWR task by performing a few additional "fine-tuning" iterations with the SE criterion. Copyright {\textcopyright} 2013 ISCA.},
archivePrefix = {arXiv},
arxivId = {arXiv:1503.01842v1},
author = {Golik, Pavel and Doetsch, Patrick and Ney, Hermann},
doi = {10.1145/1102351.1102422},
eprint = {arXiv:1503.01842v1},
file = {:Users/antares/Documents/CIMAT/thesis/papers/machine-learning/Cross-Entropy vs Squared Error Training a Theoretical and Experimental Comparison.pdf:pdf},
isbn = {1595931805},
issn = {19909772},
journal = {Interspeech 2013},
keywords = {Automatic speech recognition,Handwriting recognition,Hybrid approach,Training criterion for ANN training},
mendeley-groups = {CIMAT/Thesis},
number = {August},
pages = {1756--1760},
title = {{Cross-entropy vs. Squared error training: A theoretical and experimental comparison}},
year = {2013}
}
@book{Newman1992,
address = {London},
doi = {10.1007/978-1-349-11721-5},
editor = {Newman, Peter and Milgate, Murray and Eatwell, John},
isbn = {978-1-349-11723-9},
mendeley-groups = {CIMAT/Thesis},
publisher = {Palgrave Macmillan UK},
title = {{The New Palgrave Dictionary of Money {\&} Finance}},
url = {http://link.springer.com/10.1007/978-1-349-11721-5},
year = {1992}
}
@article{Timmermann2004,
abstract = {The efficient market hypothesis gives rise to forecasting tests that mirror those adopted when testing the optimality of a forecast in the context of a given information set. However, there are also important differences arising from the fact that market efficiency tests rely on establishing profitable trading opportunities in 'real time'. Forecasters constantly search for predictable patterns and affect prices when they attempt to exploit trading opportunities. Stable forecasting patterns are therefore unlikely to persist for long periods of time and will self-destruct when discovered by a large number of investors. This gives rise to non-stationarities in the time series of financial returns and complicates both formal tests of market efficiency and the search for successful forecasting approaches. {\textcopyright} 2003 International Institute of Forecasters. Published by Elsevier B.V. All rights reserved.},
author = {Timmermann, Allan and Granger, Clive W.J.},
doi = {10.1016/S0169-2070(03)00012-8},
file = {:Users/antares/Documents/CIMAT/thesis/papers/finance/Efficient market hypothesis and forecasting.pdf:pdf},
isbn = {0169-2070},
issn = {01692070},
journal = {International Journal of Forecasting},
keywords = {Efficient market hypothesis,Forecast evaluation,Learning,Model specification},
mendeley-groups = {CIMAT/Thesis},
number = {1},
pages = {15--27},
title = {{Efficient market hypothesis and forecasting}},
volume = {20},
year = {2004}
}
@book{Shanmugam2016,
abstract = {Some of the key mathematical results are stated without proof in order to make the underlying theory accessible to a wider audience. The book assumes a knowledge only of basic calculus, matrix algebra, and elementary statistics. The emphasis is on methods and the analysis of data sets. The logic and tools of model-building for stationary and nonstationary time series are developed in detail and numerous exercises, many of which make use of the included computer package, provide the reader with ample opportunity to develop skills in this area. The core of the book covers stationary processes, ARMA and ARIMA processes, multivariate time series and state-space models, with an optional chapter on spectral analysis. Additional topics include harmonic regression, the Burg and Hannan-Rissanen algorithms, unit roots, regression with ARMA errors, structural models, the EM algorithm, generalized state-space models with applications to time series of count data, exponential smoothing, the Holt-Winters and ARAR forecasting algorithms, transfer function models and intervention analysis. Brief introductions are also given to cointegration and to nonlinear, continuous-time and long-memory models. The time series package included in the back of the book is a slightly modified version of the package ITSM, published separately as ITSM for Windows, by Springer-Verlag, 1994. It does not handle such large data sets as ITSM for Windows, but like the latter, runs on IBM-PC compatible computers under either DOS or Windows (version 3.1 or later). The programs are all menu-driven so that the reader can immediately apply the techniques in the book to time series data, with a minimal investment of time in the computational and algorithmic aspects of the analysis.},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
author = {Shanmugam, Ramalingam and Brockwell, Peter J. and Davis, Richard A.},
booktitle = {Technometrics},
doi = {10.2307/1271510},
eprint = {arXiv:1011.1669v3},
file = {:Users/antares/Documents/CIMAT/thesis/papers/finance/Introduction to Time Series and Forecasting by Peter J. Brockwell and Richard A. Davis.pdf:pdf},
isbn = {978-3-319-29852-8},
issn = {00401706},
mendeley-groups = {CIMAT/Thesis},
number = {4},
pages = {426},
pmid = {708181},
title = {{Introduction to Time Series and Forecasting}},
url = {http://www.jstor.org/stable/1271510?origin=crossref},
volume = {39},
year = {2016}
}
@article{Hong2014,
abstract = {Value-at-risk (VaR) and conditional value-at-risk (CVaR) are two widely used risk measures of large losses and are employed in the financial industry for risk management purposes. In practice, loss distributions typically do not have closed-form expressions, but they can often be simulated (i.e., random observations of the loss distribution may be obtained by running a computer program). Therefore, Monte Carlo methods that design simulation experiments and utilize simulated observations are often employed in estimation, sensitivity analysis, and optimization of VaRs and CVaRs. In this article, we review some of the recent devel- opments in these methods, provide a unified framework to understand them, and discuss their applications in financial risk management.},
archivePrefix = {arXiv},
arxivId = {1308.0889},
author = {Hong, L. Jeff and Hu, Zhaolin and Liu, Guangwu},
doi = {10.1145/2661631},
eprint = {1308.0889},
file = {:Users/antares/Documents/CIMAT/thesis/papers/finance/Monte Carlo Methods for Value-at-Risk and Conditional Value-at-Risk - A Review.pdf:pdf},
isbn = {1220130087},
issn = {10493301},
journal = {ACM Transactions on Modeling and Computer Simulation},
mendeley-groups = {CIMAT/Thesis},
number = {4},
pages = {1--37},
title = {{Monte Carlo Methods for Value-at-Risk and Conditional Value-at-Risk}},
url = {http://dl.acm.org/citation.cfm?doid=2617568.2661631},
volume = {24},
year = {2014}
}
@misc{Strong2009,
abstract = {The fundamental principles of financial risk assessment are discussed, with primary emphasis on using simulation to evaluate and compare alternative investments. First we introduce the key measures of performance for such investments, including net present value, internal rate of return, and modified internal rate of return. Next we discuss types of risk and the key measures of risk, including expected present value; the mean, standard deviation, and coefficient of variation of the rate of return; and the risk premium. Finally we detail the following applications: (i) stand-alone risk assessment for a capital-budgeting problem; (ii) comparison of risk-free and risky investment strategies designed merely to keep up with the cost of living; (iii) value-at-risk (VAR) analysis for a single-stock investment; (iv) VAR analyses for two-asset portfolios consisting of stock and either call or put options; and (v) VAR analyses for two-asset portfolios consisting of both puts and calls.},
author = {Strong, Robert A. and Steiger, Natalie M. and Wilson, James R.},
booktitle = {Proceedings - Winter Simulation Conference},
doi = {10.1109/WSC.2009.5429323},
file = {:Users/antares/Documents/CIMAT/thesis/papers/finance/Introduction to financial risk assessment using monte carlo simulation.pdf:pdf},
isbn = {9781424457700},
issn = {08917736},
mendeley-groups = {CIMAT/Thesis},
pages = {99--118},
title = {{Introduction to financial risk assessment using Monte Carlo simulation}},
year = {2009}
}
@book{Bandy2015,
address = {Eugene, Oregon, US},
author = {Bandy, Howard B.},
edition = {First},
isbn = {9780979183850},
keywords = {trading,trading management,trading strategies,trading system development},
mendeley-groups = {CIMAT/Thesis},
mendeley-tags = {trading system development,trading management,trading,trading strategies},
publisher = {Blue Owl Press, Inc.},
title = {{Quantitative Technical Analysis}},
year = {2015}
}
@misc{Bloomberg2018,
author = {Bloomberg},
booktitle = {Bloomberg L.P.},
mendeley-groups = {CIMAT/Thesis},
title = {{S{\&}P 500 Index - Bloomberg}},
url = {https://www.bloomberg.com/quote/SPX:IND},
urldate = {2011-08-20},
year = {2018}
}
@misc{Shiller2016,
author = {Shiller, Robert},
booktitle = {GitHub},
mendeley-groups = {CIMAT/Thesis},
title = {{Investing Returns on the S{\&}P500}},
urldate = {October 2018},
year = {2016}
}
@book{Fabozzi2010,
author = {Fabozzi, Frank J. and Modigliani, Franco P. and Jones, Frank J.},
edition = {Fourth},
isbn = {9780136135319},
mendeley-groups = {CIMAT/Thesis},
publisher = {Prentice Hall},
title = {{Foundations of Financial Markets and Institutions}},
year = {2010}
}
@article{Hendershott2011,
author = {Hendershott, Terrence and Jones, Charles M. and Menkveld, Albert J.},
doi = {10.1111/j.1540-6261.2010.01624.x},
issn = {00221082},
journal = {The Journal of Finance},
mendeley-groups = {CIMAT/Thesis},
month = {feb},
number = {1},
pages = {1--33},
title = {{Does Algorithmic Trading Improve Liquidity?}},
url = {http://doi.wiley.com/10.1111/j.1540-6261.2010.01624.x},
volume = {66},
year = {2011}
}
@book{Taleb2005,
author = {Taleb, Nassim Nicholas},
edition = {Second},
isbn = {0812975219},
mendeley-groups = {CIMAT/Thesis},
publisher = {Penguin Random House LLC},
title = {{Fooled by randomness: the hidden role of of chance in life and in the markets}},
year = {2005}
}
@inproceedings{Bajpai2017,
address = {New York, New York, USA},
author = {Bajpai, Vaibhav and K{\"{u}}hlewind, Mirja and Ott, J{\"{o}}rg and Sch{\"{o}}nw{\"{a}}lder, J{\"{u}}rgen and Sperotto, Anna and Trammell, Brian},
booktitle = {Proceedings of the Reproducibility Workshop on ZZZ - Reproducibility '17},
doi = {10.1145/3097766.3097767},
isbn = {9781450350600},
keywords = {thesisc2},
mendeley-groups = {CIMAT/Thesis},
mendeley-tags = {thesisc2},
pages = {1--4},
publisher = {ACM Press},
title = {{Challenges with Reproducibility}},
url = {http://dl.acm.org/citation.cfm?doid=3097766.3097767},
year = {2017}
}