[ { "id": "6dea352af7", "title": "(Not) Bounding the True Error", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/98c7242894844ecd6ec94af67ac8247d-Abstract.html", "author": "John Langford; Rich Caruana", "abstract": "We present a new approach to bounding the true error rate of a continuous valued classi\ufb01er based upon PAC-Bayes bounds. The method \ufb01rst con- structs a distribution over classi\ufb01ers by determining how sensitive each parameter in the model is to noise. The true error rate of the stochastic classi\ufb01er found with the sensitivity analysis can then be tightly bounded using a PAC-Bayes bound. In this paper we demonstrate the method on arti\ufb01cial neural networks with results of a order of magnitude im- provement vs. the best deterministic neural net bounds.", "bibtex": "@inproceedings{NIPS2001_98c72428,\n author = {Langford, John and Caruana, Rich},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {(Not) Bounding the True Error},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/98c7242894844ecd6ec94af67ac8247d-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/98c7242894844ecd6ec94af67ac8247d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/98c7242894844ecd6ec94af67ac8247d-Metadata.json", "review": "", "metareview": "", "pdf_size": 98228, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=412832143647801361&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, Carnegie-Mellon University, Pittsburgh, PA 15213; Department of Computer Science, Cornell University, Ithaca, NY 14853", "aff_domain": "cs.cmu.edu;cs.cornell.edu", "email": "cs.cmu.edu;cs.cornell.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Carnegie Mellon University;Cornell University", "aff_unique_dep": "Department of Computer Science;Department of Computer Science", "aff_unique_url": "https://www.cmu.edu;https://www.cornell.edu", "aff_unique_abbr": "CMU;Cornell", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Pittsburgh;Ithaca", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "a2d0e6e077", "title": "3 state neurons for contextual processing", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/bdc4626aa1d1df8e14d80d345b2a442d-Abstract.html", "author": "\u00c1d\u00e1m Kepecs; S. Raghavachari", "abstract": "Neurons receive excitatory inputs via both fast AMPA and slow NMDA type receptors. We find that neurons receiving input via NMDA receptors can have two stable membrane states which are input dependent. Action potentials can only be initiated from the higher voltage state. Similar observations have been made in sev(cid:173) eral brain areas which might be explained by our model. The in(cid:173) teractions between the two kinds of inputs lead us to suggest that some neurons may operate in 3 states: disabled, enabled and fir(cid:173) ing. Such enabled, but non-firing modes can be used to introduce context-dependent processing in neural networks. We provide a simple example and discuss possible implications for neuronal pro(cid:173) cessing and response variability.", "bibtex": "@inproceedings{NIPS2001_bdc4626a,\n author = {Kepecs, \\'{A}d\\'{a}m and Raghavachari, S.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {3 state neurons for contextual processing},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/bdc4626aa1d1df8e14d80d345b2a442d-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/bdc4626aa1d1df8e14d80d345b2a442d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/bdc4626aa1d1df8e14d80d345b2a442d-Metadata.json", "review": "", "metareview": "", "pdf_size": 1440160, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4265960785275754229&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "b161dcea92", "title": "A Bayesian Model Predicts Human Parse Preference and Reading Times in Sentence Processing", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/f15d337c70078947cfe1b5d6f0ed3f13-Abstract.html", "author": "S. Narayanan; Daniel Jurafsky", "abstract": "Narayanan and Jurafsky (1998) proposed that human language compre- hension can be modeled by treating human comprehenders as Bayesian reasoners, and modeling the comprehension process with Bayesian de- cision trees. In this paper we extend the Narayanan and Jurafsky model to make further predictions about reading time given the probability of difference parses or interpretations, and test the model against reading time data from a psycholinguistic experiment.", "bibtex": "@inproceedings{NIPS2001_f15d337c,\n author = {Narayanan, S. and Jurafsky, Daniel},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A Bayesian Model Predicts Human Parse Preference and Reading Times in Sentence Processing},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/f15d337c70078947cfe1b5d6f0ed3f13-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/f15d337c70078947cfe1b5d6f0ed3f13-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/f15d337c70078947cfe1b5d6f0ed3f13-Metadata.json", "review": "", "metareview": "", "pdf_size": 77371, "gs_citation": 100, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11935136689944898034&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "SRI International and ICSI Berkeley; University of Colorado, Boulder", "aff_domain": "cs.berkeley.edu;colorado.edu", "email": "cs.berkeley.edu;colorado.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "SRI International;University of Colorado", "aff_unique_dep": ";", "aff_unique_url": "https://www.sri.com;https://www.colorado.edu", "aff_unique_abbr": "SRI;CU", "aff_campus_unique_index": "1", "aff_campus_unique": ";Boulder", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "78e46e9a81", "title": "A Bayesian Network for Real-Time Musical Accompaniment", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/2b0f658cbffd284984fb11d90254081f-Abstract.html", "author": "Christopher Raphael", "abstract": "We describe a computer system that provides a real-time musi(cid:173) cal accompaniment for a live soloist in a piece of non-improvised music for soloist and accompaniment. A Bayesian network is devel(cid:173) oped that represents the joint distribution on the times at which the solo and accompaniment notes are played, relating the two parts through a layer of hidden variables. The network is first con(cid:173) structed using the rhythmic information contained in the musical score. The network is then trained to capture the musical interpre(cid:173) tations of the soloist and accompanist in an off-line rehearsal phase. During live accompaniment the learned distribution of the network is combined with a real-time analysis of the soloist's acoustic sig(cid:173) nal, performed with a hidden Markov model, to generate a musi(cid:173) cally principled accompaniment that respects all available sources of knowledge. A live demonstration will be provided.", "bibtex": "@inproceedings{NIPS2001_2b0f658c,\n author = {Raphael, Christopher},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A Bayesian Network for Real-Time Musical Accompaniment},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/2b0f658cbffd284984fb11d90254081f-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/2b0f658cbffd284984fb11d90254081f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/2b0f658cbffd284984fb11d90254081f-Metadata.json", "review": "", "metareview": "", "pdf_size": 1357175, "gs_citation": 91, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7923819975504069157&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Mathematics and Statistics, University of Massachusetts at Amherst", "aff_domain": "math.umass.edu", "email": "math.umass.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of Massachusetts Amherst", "aff_unique_dep": "Department of Mathematics and Statistics", "aff_unique_url": "https://www.umass.edu", "aff_unique_abbr": "UMass Amherst", "aff_campus_unique_index": "0", "aff_campus_unique": "Amherst", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "b8d41a47a3", "title": "A Dynamic HMM for On-line Segmentation of Sequential Data", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/a48564053b3c7b54800246348c7fa4a0-Abstract.html", "author": "Jens Kohlmorgen; Steven Lemm", "abstract": "We propose a novel method for the analysis of sequential data that exhibits an inherent mode switching. In particular, the data might be a non-stationary time series from a dynamical system that switches between multiple operating modes. Unlike other ap(cid:173) proaches, our method processes the data incrementally and without any training of internal parameters. We use an HMM with a dy(cid:173) namically changing number of states and an on-line variant of the Viterbi algorithm that performs an unsupervised segmentation and classification of the data on-the-fly, i.e. the method is able to pro(cid:173) cess incoming data in real-time. The main idea of the approach is to track and segment changes of the probability density of the data in a sliding window on the incoming data stream. The usefulness of the algorithm is demonstrated by an application to a switching dynamical system.", "bibtex": "@inproceedings{NIPS2001_a4856405,\n author = {Kohlmorgen, Jens and Lemm, Steven},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A Dynamic HMM for On-line Segmentation of Sequential Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/a48564053b3c7b54800246348c7fa4a0-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/a48564053b3c7b54800246348c7fa4a0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/a48564053b3c7b54800246348c7fa4a0-Metadata.json", "review": "", "metareview": "", "pdf_size": 1529387, "gs_citation": 103, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5639868560461261645&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Fraunhofer FIRST.IDA; Fraunhofer FIRST.IDA", "aff_domain": "first\u00b7fraunhofer.de;first\u00b7fraunhofer.de", "email": "first\u00b7fraunhofer.de;first\u00b7fraunhofer.de", "github": "", "project": "http://www.first.fraunhofer.de/..-.jek", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Fraunhofer Institute for Software and Systems Engineering", "aff_unique_dep": "FIRST.IDA", "aff_unique_url": "https://www.first.ida.fraunhofer.de/", "aff_unique_abbr": "Fraunhofer FIRST.IDA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "ea035bb572", "title": "A General Greedy Approximation Algorithm with Applications", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/a9813e9550fee3110373c21fa012eee7-Abstract.html", "author": "T. Zhang", "abstract": "Greedy approximation algorithms have been frequently used to obtain sparse solutions to learning problems. In this paper, we present a general greedy algorithm for solving a class of convex optimization problems. We derive a bound on the rate of approximation for this algorithm, and show that our algorithm includes a number of earlier studies as special cases.", "bibtex": "@inproceedings{NIPS2001_a9813e95,\n author = {Zhang, T.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A General Greedy Approximation Algorithm with Applications},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/a9813e9550fee3110373c21fa012eee7-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/a9813e9550fee3110373c21fa012eee7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/a9813e9550fee3110373c21fa012eee7-Metadata.json", "review": "", "metareview": "", "pdf_size": 79205, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4246648729417124251&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "IBM T.J. Watson Research Center", "aff_domain": "watson.ibm.com", "email": "watson.ibm.com", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "IBM", "aff_unique_dep": "Research Center", "aff_unique_url": "https://www.ibm.com/research/watson", "aff_unique_abbr": "IBM", "aff_campus_unique_index": "0", "aff_campus_unique": "T.J. Watson", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "8bcaf82f4e", "title": "A Generalization of Principal Components Analysis to the Exponential Family", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/f410588e48dc83f2822a880a68f78923-Abstract.html", "author": "Michael Collins; S. Dasgupta; Robert E. Schapire", "abstract": "Principal component analysis (PCA) is a commonly applied technique for dimensionality reduction. PCA implicitly minimizes a squared loss function, which may be inappropriate for data that is not real-valued, such as binary-valued data. This paper draws on ideas from the Exponen- tial family, Generalized linear models, and Bregman distances, to give a generalization of PCA to loss functions that we argue are better suited to other data types. We describe algorithms for minimizing the loss func- tions, and give examples on simulated data.", "bibtex": "@inproceedings{NIPS2001_f410588e,\n author = {Collins, Michael and Dasgupta, S. and Schapire, Robert E},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A Generalization of Principal Components Analysis to the Exponential Family},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/f410588e48dc83f2822a880a68f78923-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/f410588e48dc83f2822a880a68f78923-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/f410588e48dc83f2822a880a68f78923-Metadata.json", "review": "", "metareview": "", "pdf_size": 140745, "gs_citation": 681, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17498632288980371395&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 35, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "c670b9eb34", "title": "A Hierarchical Model of Complex Cells in Visual Cortex for the Binocular Perception of Motion-in-Depth", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/03e7d2ebec1e820ac34d054df7e68f48-Abstract.html", "author": "Silvio P. Sabatini; Fabio Solari; Giulia Andreani; Chiara Bartolozzi; Giacomo M. Bisio", "abstract": "A cortical model for motion-in-depth selectivity of complex cells in the visual cortex is proposed. The model is based on a time ex(cid:173) tension of the phase-based techniques for disparity estimation. We consider the computation of the total temporal derivative of the time-varying disparity through the combination of the responses of disparity energy units. To take into account the physiological plau(cid:173) sibility, the model is based on the combinations of binocular cells characterized by different ocular dominance indices. The resulting cortical units of the model show a sharp selectivity for motion-in(cid:173) depth that has been compared with that reported in the literature for real cortical cells.", "bibtex": "@inproceedings{NIPS2001_03e7d2eb,\n author = {Sabatini, Silvio and Solari, Fabio and Andreani, Giulia and Bartolozzi, Chiara and Bisio, Giacomo},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A Hierarchical Model of Complex Cells in Visual Cortex for the Binocular Perception of Motion-in-Depth},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/03e7d2ebec1e820ac34d054df7e68f48-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/03e7d2ebec1e820ac34d054df7e68f48-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/03e7d2ebec1e820ac34d054df7e68f48-Metadata.json", "review": "", "metareview": "", "pdf_size": 1273103, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3411014853619549724&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 18, "aff": "Department of Biophysical and Electronic Engineering, University of Genoa, 1-16145 Genova, ITALY; Department of Biophysical and Electronic Engineering, University of Genoa, 1-16145 Genova, ITALY; Department of Biophysical and Electronic Engineering, University of Genoa, 1-16145 Genova, ITALY; Department of Biophysical and Electronic Engineering, University of Genoa, 1-16145 Genova, ITALY; Department of Biophysical and Electronic Engineering, University of Genoa, 1-16145 Genova, ITALY", "aff_domain": "dibe.unige.it; ; ; ; ", "email": "dibe.unige.it; ; ; ; ", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "University of Genoa", "aff_unique_dep": "Department of Biophysical and Electronic Engineering", "aff_unique_url": "https://www.unige.it", "aff_unique_abbr": "", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "Italy" }, { "id": "d7ad1576e0", "title": "A Maximum-Likelihood Approach to Modeling Multisensory Enhancement", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/fb87582825f9d28a8d42c5e5e5e8b23d-Abstract.html", "author": "H. Colonius; A. Diederich", "abstract": "Multisensory response enhancement (MRE) is the augmentation of the response of a neuron to sensory input of one modality by si(cid:173) multaneous input from another modality. The maximum likelihood (ML) model presented here modifies the Bayesian model for MRE (Anastasio et al.) by incorporating a decision strategy to maximize the number of correct decisions. Thus the ML model can also deal with the important tasks of stimulus discrimination and identifi(cid:173) cation in the presence of incongruent visual and auditory cues. It accounts for the inverse effectiveness observed in neurophysiolog(cid:173) ical recording data, and it predicts a functional relation between uni- and bimodal levels of discriminability that is testable both in neurophysiological and behavioral experiments.", "bibtex": "@inproceedings{NIPS2001_fb875828,\n author = {Colonius, H. and Diederich, A.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A Maximum-Likelihood Approach to Modeling Multisensory Enhancement},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/fb87582825f9d28a8d42c5e5e5e8b23d-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/fb87582825f9d28a8d42c5e5e5e8b23d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/fb87582825f9d28a8d42c5e5e5e8b23d-Metadata.json", "review": "", "metareview": "", "pdf_size": 1244121, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14766627667488690530&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "Institut fUr Kognitionsforschung, Carl von Ossietzky Universitat Oldenburg; School of Social Sciences, International University Bremen", "aff_domain": "uni-oldenburg.de;iu-bremen.de", "email": "uni-oldenburg.de;iu-bremen.de", "github": "", "project": "www.uni-oldenburg.de/psychologie/hans.colonius/index.html", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Carl von Ossietzky University of Oldenburg;International University Bremen", "aff_unique_dep": "Institut f\u00fcr Kognitionsforschung;School of Social Sciences", "aff_unique_url": "https://www.uol.de;https://www.iubremen.de", "aff_unique_abbr": ";", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "8faa3b2654", "title": "A Model of the Phonological Loop: Generalization and Binding", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/312351bff07989769097660a56395065-Abstract.html", "author": "Randall C. O'Reilly; R. Soto", "abstract": "We present a neural network model that shows how the prefrontal cortex, interacting with the basal ganglia, can maintain a sequence of phonological information in activation-based working memory (i.e., the phonological loop). The primary function of this phono(cid:173) logical loop may be to transiently encode arbitrary bindings of information necessary for tasks - the combinatorial expressive power of language enables very flexible binding of essentially ar(cid:173) bitrary pieces of information. Our model takes advantage of the closed-class nature of phonemes, which allows different neural rep(cid:173) resentations of all possible phonemes at each sequential position to be encoded. To make this work, we suggest that the basal ganglia provide a region-specific update signal that allocates phonemes to the appropriate sequential coding slot. To demonstrate that flexi(cid:173) ble, arbitrary binding of novel sequences can be supported by this mechanism, we show that the model can generalize to novel se(cid:173) quences after moderate amounts of training.", "bibtex": "@inproceedings{NIPS2001_312351bf,\n author = {O\\textquotesingle Reilly, Randall and Soto, R.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A Model of the Phonological Loop: Generalization and Binding},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/312351bff07989769097660a56395065-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/312351bff07989769097660a56395065-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/312351bff07989769097660a56395065-Metadata.json", "review": "", "metareview": "", "pdf_size": 1570938, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6029609666683750106&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Psychology, University of Colorado Boulder; Department of Psychology, University of Colorado Boulder", "aff_domain": "psych.colorado.edu; ", "email": "psych.colorado.edu; ", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Colorado Boulder", "aff_unique_dep": "Department of Psychology", "aff_unique_url": "https://www.colorado.edu", "aff_unique_abbr": "CU Boulder", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Boulder", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "82c3bb1631", "title": "A Natural Policy Gradient", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/4b86abe48d358ecf194c56c69108433e-Abstract.html", "author": "Sham M. Kakade", "abstract": "We provide a natural gradient method that represents the steepest descent direction based on the underlying structure of the param(cid:173) eter space. Although gradient methods cannot make large changes in the values of the parameters, we show that the natural gradi(cid:173) ent is moving toward choosing a greedy optimal action rather than just a better action. These greedy optimal actions are those that would be chosen under one improvement step of policy iteration with approximate, compatible value functions, as defined by Sut(cid:173) ton et al. [9]. We then show drastic performance improvements in simple MDPs and in the more challenging MDP of Tetris.", "bibtex": "@inproceedings{NIPS2001_4b86abe4,\n author = {Kakade, Sham M},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A Natural Policy Gradient},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/4b86abe48d358ecf194c56c69108433e-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/4b86abe48d358ecf194c56c69108433e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/4b86abe48d358ecf194c56c69108433e-Metadata.json", "review": "", "metareview": "", "pdf_size": 1517587, "gs_citation": 1720, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2230462086005572185&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 20, "aff": "Gatsby Computational Neuroscience Unit", "aff_domain": "gatsby.ucl.ac.uk", "email": "gatsby.ucl.ac.uk", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University College London", "aff_unique_dep": "Gatsby Computational Neuroscience Unit", "aff_unique_url": "https://www.ucl.ac.uk", "aff_unique_abbr": "UCL", "aff_country_unique_index": "0", "aff_country_unique": "United Kingdom" }, { "id": "8aae80ee4d", "title": "A Neural Oscillator Model of Auditory Selective Attention", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/c3535febaff29fcb7c0d20cbe94391c7-Abstract.html", "author": "Stuart N. Wrigley; Guy J. Brown", "abstract": "A model of auditory grouping is described in which auditory attention plays a key role. The model is based upon an oscillatory correlation framework, in which neural oscillators representing a single perceptual stream are synchronised, and are desynchronised from oscillators representing other streams. The model suggests a mechanism by which attention can be directed to the high or low tones in a repeating sequence of tones with alternating frequencies. In addition, it simulates the perceptual segregation of a mistuned harmonic from a complex tone.", "bibtex": "@inproceedings{NIPS2001_c3535feb,\n author = {Wrigley, Stuart and Brown, Guy},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A Neural Oscillator Model of Auditory Selective Attention},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/c3535febaff29fcb7c0d20cbe94391c7-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/c3535febaff29fcb7c0d20cbe94391c7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/c3535febaff29fcb7c0d20cbe94391c7-Metadata.json", "review": "", "metareview": "", "pdf_size": 101049, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8206790148069603225&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Computer Science, University of Sheffield; Department of Computer Science, University of Sheffield", "aff_domain": "dcs.shef.ac.uk;dcs.shef.ac.uk", "email": "dcs.shef.ac.uk;dcs.shef.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Sheffield", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.sheffield.ac.uk", "aff_unique_abbr": "Sheffield", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "2852d2f9d9", "title": "A New Discriminative Kernel From Probabilistic Models", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/cee8d6b7ce52554fd70354e37bbf44a2-Abstract.html", "author": "Koji Tsuda; Motoaki Kawanabe; Gunnar R\u00e4tsch; S\u00f6ren Sonnenburg; Klaus-Robert M\u00fcller", "abstract": "Recently, Jaakkola and Haussler proposed a method for construct(cid:173) ing kernel functions from probabilistic models. Their so called \"Fisher kernel\" has been combined with discriminative classifiers such as SVM and applied successfully in e.g. DNA and protein analysis. Whereas the Fisher kernel (FK) is calculated from the marginal log-likelihood, we propose the TOP kernel derived from Tangent vectors Of Posterior log-odds. Furthermore we develop a theoretical framework on feature extractors from probabilistic models and use it for analyzing FK and TOP. In experiments our new discriminative TOP kernel compares favorably to the Fisher kernel.", "bibtex": "@inproceedings{NIPS2001_cee8d6b7,\n author = {Tsuda, Koji and Kawanabe, Motoaki and R\\\"{a}tsch, Gunnar and Sonnenburg, S\\\"{o}ren and M\\\"{u}ller, Klaus-Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A New Discriminative Kernel From Probabilistic Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/cee8d6b7ce52554fd70354e37bbf44a2-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/cee8d6b7ce52554fd70354e37bbf44a2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/cee8d6b7ce52554fd70354e37bbf44a2-Metadata.json", "review": "", "metareview": "", "pdf_size": 1427217, "gs_citation": 214, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16416430334570113117&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 29, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster" }, { "id": "3750a281dc", "title": "A Parallel Mixture of SVMs for Very Large Scale Problems", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/36ac8e558ac7690b6f44e2cb5ef93322-Abstract.html", "author": "Ronan Collobert; Samy Bengio; Yoshua Bengio", "abstract": "Support Vector Machines (SVMs) are currently the state-of-the-art models for many classification problems but they suffer from the complexity of their train(cid:173) ing algorithm which is at least quadratic with respect to the number of examples. Hence, it is hopeless to try to solve real-life problems having more than a few hundreds of thousands examples with SVMs. The present paper proposes a new mixture of SVMs that can be easily implemented in parallel and where each SVM is trained on a small subset of the whole dataset. Experiments on a large benchmark dataset (Forest) as well as a difficult speech database, yielded significant time improvement (time complexity appears empirically to locally grow linearly with the number of examples) . In addition, and that is a surprise, a significant improvement in generalization was observed on Forest.", "bibtex": "@inproceedings{NIPS2001_36ac8e55,\n author = {Collobert, Ronan and Bengio, Samy and Bengio, Yoshua},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A Parallel Mixture of SVMs for Very Large Scale Problems},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/36ac8e558ac7690b6f44e2cb5ef93322-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/36ac8e558ac7690b6f44e2cb5ef93322-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/36ac8e558ac7690b6f44e2cb5ef93322-Metadata.json", "review": "", "metareview": "", "pdf_size": 1474109, "gs_citation": 579, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2901384550363666862&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 47, "aff": "Universite de Montreal, DIRG; IDIAP; Universite de Montreal, DIRG", "aff_domain": "iro.umontreal.ca;idiap.ch;iro.umontreal.ca", "email": "iro.umontreal.ca;idiap.ch;iro.umontreal.ca", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "Universite de Montreal;Idiap Research Institute", "aff_unique_dep": "DIRG;", "aff_unique_url": "https://www.umontreal.ca;https://www.idiap.ch", "aff_unique_abbr": "UM;IDIAP", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Canada;Switzerland" }, { "id": "22f0f8ac27", "title": "A Quantitative Model of Counterfactual Reasoning", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/c3395dd46c34fa7fd8d729d8cf88b7a8-Abstract.html", "author": "Daniel Yarlett; Michael Ramscar", "abstract": "In this paper we explore two quantitative approaches to the modelling of counterfactual reasoning \u2013 a linear and a noisy-OR model \u2013 based on in- formation contained in conceptual dependency networks. Empirical data is acquired in a study and the \ufb01t of the models compared to it. We con- clude by considering the appropriateness of non-parametric approaches to counterfactual reasoning, and examining the prospects for other para- metric approaches in the future.", "bibtex": "@inproceedings{NIPS2001_c3395dd4,\n author = {Yarlett, Daniel and Ramscar, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A Quantitative Model of Counterfactual Reasoning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/c3395dd46c34fa7fd8d729d8cf88b7a8-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/c3395dd46c34fa7fd8d729d8cf88b7a8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/c3395dd46c34fa7fd8d729d8cf88b7a8-Metadata.json", "review": "", "metareview": "", "pdf_size": 71669, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12749924587496382010&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Division of Informatics, University of Edinburgh, Edinburgh, Scotland; Division of Informatics, University of Edinburgh, Edinburgh, Scotland", "aff_domain": "cogsci.ed.ac.uk;dai.ed.ac.uk", "email": "cogsci.ed.ac.uk;dai.ed.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Edinburgh", "aff_unique_dep": "Division of Informatics", "aff_unique_url": "https://www.ed.ac.uk", "aff_unique_abbr": "Edinburgh", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Edinburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "44ad9f77c8", "title": "A Rational Analysis of Cognitive Control in a Speeded Discrimination Task", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/4a3e00961a08879c34f91ca0070ea2f5-Abstract.html", "author": "Michael Mozer; Michael D. Colagrosso; David E. Huber", "abstract": "We are interested in the mechanisms by which individuals monitor and adjust their performance of simple cognitive tasks. We model a speeded discrimination task in which individuals are asked to classify a sequence of stimuli (Jones & Braver, 2001). Response con\ufb02ict arises when one stimulus class is infrequent relative to another, resulting in more errors and slower reaction times for the infrequent class. How do control pro- cesses modulate behavior based on the relative class frequencies? We explain performance from a rational perspective that casts the goal of individuals as minimizing a cost that depends both on error rate and re- action time. With two additional assumptions of rationality\u2014that class prior probabilities are accurately estimated and that inference is optimal subject to limitations on rate of information transmission\u2014we obtain a good \ufb01t to overall RT and error data, as well as trial-by-trial variations in performance.", "bibtex": "@inproceedings{NIPS2001_4a3e0096,\n author = {Mozer, Michael C and Colagrosso, Michael and Huber, David},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A Rational Analysis of Cognitive Control in a Speeded Discrimination Task},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/4a3e00961a08879c34f91ca0070ea2f5-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/4a3e00961a08879c34f91ca0070ea2f5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/4a3e00961a08879c34f91ca0070ea2f5-Metadata.json", "review": "", "metareview": "", "pdf_size": 74167, "gs_citation": 44, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=201289780147307737&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 17, "aff": "Department of Computer Science + Department of Psychology + Institute of Cognitive Science, University of Colorado, Boulder, CO 80309; Department of Computer Science + Department of Psychology + Institute of Cognitive Science, University of Colorado, Boulder, CO 80309; Department of Computer Science + Department of Psychology + Institute of Cognitive Science, University of Colorado, Boulder, CO 80309", "aff_domain": "colorado.edu;colorado.edu;colorado.edu", "email": "colorado.edu;colorado.edu;colorado.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1+2;0+1+2;0+1+2", "aff_unique_norm": "Unknown Institution;University Affiliation Not Specified;University of Colorado Boulder", "aff_unique_dep": "Department of Computer Science;Department of Psychology;Institute of Cognitive Science", "aff_unique_url": ";;https://www.colorado.edu", "aff_unique_abbr": ";;CU Boulder", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Boulder", "aff_country_unique_index": "1;1;1", "aff_country_unique": ";United States" }, { "id": "1f256c256b", "title": "A Rotation and Translation Invariant Discrete Saliency Network", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/96055f5b06bf9381ac43879351642cf5-Abstract.html", "author": "Lance R. Williams; John W. Zweck", "abstract": "We describe a neural network which enhances and completes salient closed contours. Our work is different from all previous work in three important ways. First, like the input provided to V1 by LGN, the in- put to our computation is isotropic. That is, the input is composed of spots not edges. Second, our network computes a well de\ufb01ned function of the input based on a distribution of closed contours characterized by a random process. Third, even though our computation is implemented in a discrete network, its output is invariant to continuous rotations and translations of the input pattern.", "bibtex": "@inproceedings{NIPS2001_96055f5b,\n author = {Williams, Lance and Zweck, John W.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A Rotation and Translation Invariant Discrete Saliency Network},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/96055f5b06bf9381ac43879351642cf5-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/96055f5b06bf9381ac43879351642cf5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/96055f5b06bf9381ac43879351642cf5-Metadata.json", "review": "", "metareview": "", "pdf_size": 108663, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2338166233388521969&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 27, "aff": "Dept. of Computer Science, Univ. of New Mexico, Albuquerque, NM 87131; Dept. of CS and EE, Univ. of Maryland Baltimore County, Baltimore, MD 21250", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of New Mexico;University of Maryland, Baltimore County", "aff_unique_dep": "Department of Computer Science;Department of Computer Science and Electrical Engineering", "aff_unique_url": "https://www.unm.edu;https://www.umbc.edu", "aff_unique_abbr": "UNM;UMBC", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Albuquerque;Baltimore", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "04e1734932", "title": "A Sequence Kernel and its Application to Speaker Recognition", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/6a508a60aa3bf9510ea6acb021c94b48-Abstract.html", "author": "William M. Campbell", "abstract": "A novel approach for comparing sequences of observations using an explicit-expansion kernel is demonstrated. The kernel is derived using the assumption of the independence of the sequence of observations and a mean-squared error training criterion. The use of an explicit expan- sion kernel reduces classi\ufb01er model size and computation dramatically, resulting in model sizes and computation one-hundred times smaller in our application. The explicit expansion also preserves the computational advantages of an earlier architecture based on mean-squared error train- ing. Training using standard support vector machine methodology gives accuracy that signi\ufb01cantly exceeds the performance of state-of-the-art mean-squared error training for a speaker recognition task.", "bibtex": "@inproceedings{NIPS2001_6a508a60,\n author = {Campbell, William},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A Sequence Kernel and its Application to Speaker Recognition},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/6a508a60aa3bf9510ea6acb021c94b48-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/6a508a60aa3bf9510ea6acb021c94b48-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/6a508a60aa3bf9510ea6acb021c94b48-Metadata.json", "review": "", "metareview": "", "pdf_size": 110610, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13811644126518266391&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Motorola Human Interface Lab", "aff_domain": "motorola.com", "email": "motorola.com", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Motorola", "aff_unique_dep": "Human Interface Lab", "aff_unique_url": "https://www.motorola.com", "aff_unique_abbr": "Motorola", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "9562e1378b", "title": "A Variational Approach to Learning Curves", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/26f5bd4aa64fdadf96152ca6e6408068-Abstract.html", "author": "D\u00f6rthe Malzahn; Manfred Opper", "abstract": "We combine the replica approach from statistical physics with a varia- tional approach to analyze learning curves analytically. We apply the method to Gaussian process regression. As a main result we derive ap- proximative relations between empirical error measures, the generaliza- tion error and the posterior variance.", "bibtex": "@inproceedings{NIPS2001_26f5bd4a,\n author = {Malzahn, D\\\"{o}rthe and Opper, Manfred},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A Variational Approach to Learning Curves},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/26f5bd4aa64fdadf96152ca6e6408068-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/26f5bd4aa64fdadf96152ca6e6408068-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/26f5bd4aa64fdadf96152ca6e6408068-Metadata.json", "review": "", "metareview": "", "pdf_size": 111007, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18368729062902188973&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "Neural Computing Research Group, School of Engineering and Applied Science, Aston University, Birmingham B4 7ET, United Kingdom; Neural Computing Research Group, School of Engineering and Applied Science, Aston University, Birmingham B4 7ET, United Kingdom", "aff_domain": "aston.ac.uk;aston.ac.uk", "email": "aston.ac.uk;aston.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Aston University", "aff_unique_dep": "School of Engineering and Applied Science", "aff_unique_url": "https://www.aston.ac.uk", "aff_unique_abbr": "Aston", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Birmingham", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "7b81186db7", "title": "A kernel method for multi-labelled classification", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/39dcaf7a053dc372fbc391d4e6b5d693-Abstract.html", "author": "Andr\u00e9 Elisseeff; Jason Weston", "abstract": "This article presents a Support Vector Machine (SVM) like learning sys- tem to handle multi-label problems. Such problems are usually decom- posed into many two-class problems but the expressive power of such a system can be weak [5, 7]. We explore a new direct approach. It is based on a large margin ranking system that shares a lot of common proper- ties with SVMs. We tested it on a Yeast gene functional classi\ufb01cation problem with positive results.", "bibtex": "@inproceedings{NIPS2001_39dcaf7a,\n author = {Elisseeff, Andr\\'{e} and Weston, Jason},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A kernel method for multi-labelled classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/39dcaf7a053dc372fbc391d4e6b5d693-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/39dcaf7a053dc372fbc391d4e6b5d693-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/39dcaf7a053dc372fbc391d4e6b5d693-Metadata.json", "review": "", "metareview": "", "pdf_size": 91437, "gs_citation": 2107, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18189365690785623389&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "b3b6230f0b", "title": "A theory of neural integration in the head-direction system", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/978d76676f5e7918f81d28e7d092ca0d-Abstract.html", "author": "Richard Hahnloser; Xiaohui Xie; H. S. Seung", "abstract": "Integration in the head-direction system is a computation by which hor- izontal angular head velocity signals from the vestibular nuclei are in- tegrated to yield a neural representation of head direction. In the thala- mus, the postsubiculum and the mammillary nuclei, the head-direction representation has the form of a place code: neurons have a preferred head direction in which their \ufb01ring is maximal [Blair and Sharp, 1995, Blair et al., 1998, ?]. Integration is a dif\ufb01cult computation, given that head-velocities can vary over a large range. Previous models of the head-direction system relied on the assumption that the integration is achieved in a \ufb01ring-rate-based attractor network with a ring structure. In order to correctly integrate head-velocity signals during high-speed head rotations, very fast synaptic dynamics had to be assumed. Here we address the question whether integration in the head-direction system is possible with slow synapses, for example excitatory NMDA and inhibitory GABA(B) type synapses. For neural networks with such slow synapses, rate-based dynamics are a good approximation of spik- ing neurons [Ermentrout, 1994]. We \ufb01nd that correct integration during high-speed head rotations imposes strong constraints on possible net- work architectures.", "bibtex": "@inproceedings{NIPS2001_978d7667,\n author = {Hahnloser, Richard and Xie, Xiaohui and Seung, H.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {A theory of neural integration in the head-direction system},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/978d76676f5e7918f81d28e7d092ca0d-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/978d76676f5e7918f81d28e7d092ca0d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/978d76676f5e7918f81d28e7d092ca0d-Metadata.json", "review": "", "metareview": "", "pdf_size": 78665, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16892336897690348032&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Howard Hughes Medical Institute+Dept. of Brain and Cognitive Sciences+Massachusetts Institute of Technology; Howard Hughes Medical Institute+Dept. of Brain and Cognitive Sciences+Massachusetts Institute of Technology; Howard Hughes Medical Institute+Dept. of Brain and Cognitive Sciences+Massachusetts Institute of Technology", "aff_domain": "mit.edu;mit.edu;mit.edu", "email": "mit.edu;mit.edu;mit.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1+1;0+1+1;0+1+1", "aff_unique_norm": "Howard Hughes Medical Institute;Massachusetts Institute of Technology", "aff_unique_dep": ";Department of Brain and Cognitive Sciences", "aff_unique_url": "https://www.hhmi.org;https://bcsl.mit.edu/", "aff_unique_abbr": "HHMI;MIT", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Cambridge", "aff_country_unique_index": "0+0+0;0+0+0;0+0+0", "aff_country_unique": "United States" }, { "id": "86a87061bb", "title": "ACh, Uncertainty, and Cortical Inference", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/7b1ce3d73b70f1a7246e7b76a35fb552-Abstract.html", "author": "Peter Dayan; Angela J. Yu", "abstract": "Acetylcholine (ACh) has been implicated in a wide variety of tasks involving attentional processes and plasticity. Following extensive animal studies, it has previously been suggested that ACh reports on uncertainty and controls hippocampal, cortical and cortico-amygdalar plasticity. We extend this view and consider its effects on cortical representational inference, arguing that ACh controls the balance between bottom-up inference, in(cid:3)uenced by input stimuli, and top-down inference, in(cid:3)uenced by contextual information. We illustrate our proposal using a hierarchical hid- den Markov model.", "bibtex": "@inproceedings{NIPS2001_7b1ce3d7,\n author = {Dayan, Peter and Yu, Angela J},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {ACh, Uncertainty, and Cortical Inference},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/7b1ce3d73b70f1a7246e7b76a35fb552-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/7b1ce3d73b70f1a7246e7b76a35fb552-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/7b1ce3d73b70f1a7246e7b76a35fb552-Metadata.json", "review": "", "metareview": "", "pdf_size": 219218, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5438280089320732671&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "Gatsby Computational Neuroscience Unit; Gatsby Computational Neuroscience Unit", "aff_domain": "gatsby.ucl.ac.uk;gatsby.ucl.ac.uk", "email": "gatsby.ucl.ac.uk;gatsby.ucl.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University College London", "aff_unique_dep": "Gatsby Computational Neuroscience Unit", "aff_unique_url": "https://www.ucl.ac.uk", "aff_unique_abbr": "UCL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "77d82ae609", "title": "ALGONQUIN - Learning Dynamic Noise Models From Noisy Speech for Robust Speech Recognition", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/95192c98732387165bf8e396c0f2dad2-Abstract.html", "author": "Brendan J. Frey; Trausti T. Kristjansson; Li Deng; Alex Acero", "abstract": "A challenging, unsolved problem in the speech recognition com(cid:173) munity is recognizing speech signals that are corrupted by loud, highly nonstationary noise. One approach to noisy speech recog(cid:173) nition is to automatically remove the noise from the cepstrum se(cid:173) quence before feeding it in to a clean speech recognizer. In previous work published in Eurospeech, we showed how a probability model trained on clean speech and a separate probability model trained on noise could be combined for the purpose of estimating the noise(cid:173) free speech from the noisy speech. We showed how an iterative 2nd order vector Taylor series approximation could be used for prob(cid:173) abilistic inference in this model. In many circumstances, it is not possible to obtain examples of noise without speech. Noise statis(cid:173) tics may change significantly during an utterance, so that speech(cid:173) free frames are not sufficient for estimating the noise model. In this paper, we show how the noise model can be learned even when the data contains speech. In particular, the noise model can be learned from the test utterance and then used to de noise the test utterance. The approximate inference technique is used as an approximate E step in a generalized EM algorithm that learns the parameters of the noise model from a test utterance. For both Wall Street J our(cid:173) nal data with added noise samples and the Aurora benchmark, we show that the new noise adaptive technique performs as well as or significantly better than the non-adaptive algorithm, without the need for a separate training set of noise examples.", "bibtex": "@inproceedings{NIPS2001_95192c98,\n author = {Frey, Brendan J and Kristjansson, Trausti and Deng, Li and Acero, Alex},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {ALGONQUIN - Learning Dynamic Noise Models From Noisy Speech for Robust Speech Recognition},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/95192c98732387165bf8e396c0f2dad2-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/95192c98732387165bf8e396c0f2dad2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/95192c98732387165bf8e396c0f2dad2-Metadata.json", "review": "", "metareview": "", "pdf_size": 1384710, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13519186568353269964&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "5d161e1988", "title": "Active Information Retrieval", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/5a7f963e5e0504740c3a6b10bb6d4fa5-Abstract.html", "author": "Tommi Jaakkola; Hava T. Siegelmann", "abstract": "In classical large information retrieval systems, the system responds to a user initiated query with a list of results ranked by relevance. The users may further refine their query as needed. This process may result in a lengthy correspondence without conclusion. We propose an alternative active learning approach, where the sys(cid:173) tem responds to the initial user's query by successively probing the user for distinctions at multiple levels of abstraction. The system's initiated queries are optimized for speedy recovery and the user is permitted to respond with multiple selections or may reject the query. The information is in each case unambiguously incorporated by the system and the subsequent queries are adjusted to minimize the need for further exchange. The system's initiated queries are subject to resource constraints pertaining to the amount of infor(cid:173) mation that can be presented to the user per iteration.", "bibtex": "@inproceedings{NIPS2001_5a7f963e,\n author = {Jaakkola, Tommi and Siegelmann, Hava},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Active Information Retrieval},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/5a7f963e5e0504740c3a6b10bb6d4fa5-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/5a7f963e5e0504740c3a6b10bb6d4fa5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/5a7f963e5e0504740c3a6b10bb6d4fa5-Metadata.json", "review": "", "metareview": "", "pdf_size": 1434402, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10410325896515219137&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "MIT AI Lab, Cambridge, MA; MIT LIDS, Cambridge, MA", "aff_domain": "ai.mit.edu;mit.edu", "email": "ai.mit.edu;mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "AI Lab", "aff_unique_url": "http://web.mit.edu/", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "7f78522768", "title": "Active Learning in the Drug Discovery Process", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/a03fa30821986dff10fc66647c84c9c3-Abstract.html", "author": "Manfred K. Warmuth; Gunnar R\u00e4tsch; Michael Mathieson; Jun Liao; Christian Lemmen", "abstract": "We investigate the following data mining problem from Computational Chemistry: From a large data set of compounds, \ufb01nd those that bind to a target molecule in as few iterations of biological testing as possible. In each iteration a comparatively small batch of compounds is screened for binding to the target. We apply active learning techniques for selecting the successive batches. One selection strategy picks unlabeled examples closest to the maximum margin hyperplane. Another produces many weight vectors by running perceptrons over multiple permutations of the data. Each weight vector prediction and we pick the unlabeled examples for which votes with its the prediction is most evenly split between . For a third selec- tion strategy note that each unlabeled example bisects the version space of consistent weight vectors. We estimate the volume on both sides of the split by bouncing a billiard through the version space and select un- labeled examples that cause the most even split of the version space. We demonstrate that on two data sets provided by DuPont Pharmaceu- ticals that all three selection strategies perform comparably well and are much better than selecting random batches for testing.", "bibtex": "@inproceedings{NIPS2001_a03fa308,\n author = {Warmuth, Manfred K. K and R\\\"{a}tsch, Gunnar and Mathieson, Michael and Liao, Jun and Lemmen, Christian},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Active Learning in the Drug Discovery Process},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/a03fa30821986dff10fc66647c84c9c3-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/a03fa30821986dff10fc66647c84c9c3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/a03fa30821986dff10fc66647c84c9c3-Metadata.json", "review": "", "metareview": "", "pdf_size": 127037, "gs_citation": 134, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13718121729249456322&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Computer Science Dep., Univ. of Calif. at Santa Cruz; FHG FIRST, Kekul\u00b4 estr. 7, Berlin, Germany + Austrialian National University, Canberra, Austrialia; DuPont Pharmaceuticals,150 California St. San Francisco + BioSolveIT GmbH,An der Ziegelei75, Sankt Augustin, Germany; Computer Science Dep., Univ. of Calif. at Santa Cruz; FHG FIRST, Kekul\u00b4 estr. 7, Berlin, Germany", "aff_domain": "cse.ucsc.edu;cse.ucsc.edu;cse.ucsc.edu;anu.edu.au;biosolveit.de", "email": "cse.ucsc.edu;cse.ucsc.edu;cse.ucsc.edu;anu.edu.au;biosolveit.de", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;1+2;3+4;0;1", "aff_unique_norm": "University of California, Santa Cruz;Fraunhofer Institute for Computer Graphics Research IGD;Australian National University;DuPont Pharmaceuticals;BioSolveIT GmbH", "aff_unique_dep": "Computer Science Department;;;;", "aff_unique_url": "https://www.ucsc.edu;https://www.igd.fraunhofer.de;https://www.anu.edu.au;;", "aff_unique_abbr": "UCSC;FHG IGD;ANU;;", "aff_campus_unique_index": "0;2;;0", "aff_campus_unique": "Santa Cruz;;Canberra", "aff_country_unique_index": "0;1+2;0+1;0;1", "aff_country_unique": "United States;Germany;Australia" }, { "id": "23733ee416", "title": "Active Portfolio-Management based on Error Correction Neural Networks", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/4d8556695c262ab91ff51a943fdd6058-Abstract.html", "author": "Hans-Georg Zimmermann; Ralph Neuneier; Ralph Grothmann", "abstract": "This paper deals with a neural network architecture which establishes a portfolio management system similar to the Black / Litterman approach. This allocation scheme distributes funds across various securities or \ufb01- nancial markets while simultaneously complying with speci\ufb01c allocation constraints which meet the requirements of an investor. The portfolio optimization algorithm is modeled by a feedforward neural network. The underlying expected return forecasts are based on error correction neural networks (ECNN), which utilize the last model error as an auxiliary input to evaluate their own misspeci\ufb01cation. The portfolio optimization is implemented such that (i.) the allocations comply with investor\u2019s constraints and that (ii.) the risk of the portfo- lio can be controlled. We demonstrate the pro\ufb01tability of our approach by constructing internationally diversi\ufb01ed portfolios across different \ufb01nancial markets of the G7 contries. It turns out, that our approach is superior to a preset benchmark portfolio.", "bibtex": "@inproceedings{NIPS2001_4d855669,\n author = {Zimmermann, Hans-Georg and Neuneier, Ralph and Grothmann, Ralph},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Active Portfolio-Management based on Error Correction Neural Networks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/4d8556695c262ab91ff51a943fdd6058-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/4d8556695c262ab91ff51a943fdd6058-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/4d8556695c262ab91ff51a943fdd6058-Metadata.json", "review": "", "metareview": "", "pdf_size": 95225, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10207288581097830501&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Siemens AG Corporate Technology; Siemens AG Corporate Technology; Siemens AG Corporate Technology", "aff_domain": "mchp.siemens.de; ; ", "email": "mchp.siemens.de; ; ", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Siemens AG", "aff_unique_dep": "Corporate Technology", "aff_unique_url": "https://www.siemens.com", "aff_unique_abbr": "Siemens", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany" }, { "id": "fc0454fb1e", "title": "Activity Driven Adaptive Stochastic Resonance", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/a29d1598024f9e87beab4b98411d48ce-Abstract.html", "author": "Gregor Wenning; Klaus Obermayer", "abstract": "Cortical neurons might be considered as threshold elements inte(cid:173) grating in parallel many excitatory and inhibitory inputs. Due to the apparent variability of cortical spike trains this yields a strongly fluctuating membrane potential, such that threshold crossings are highly irregular. Here we study how a neuron could maximize its sensitivity w.r.t. a relatively small subset of excitatory input. Weak signals embedded in fluctuations is the natural realm of stochastic resonance. The neuron's response is described in a hazard-function approximation applied to an Ornstein-Uhlenbeck process. We an(cid:173) alytically derive an optimality criterium and give a learning rule for the adjustment of the membrane fluctuations, such that the sensitivity is maximal exploiting stochastic resonance. We show that adaptation depends only on quantities that could easily be estimated locally (in space and time) by the neuron. The main results are compared with simulations of a biophysically more re(cid:173) alistic neuron model.", "bibtex": "@inproceedings{NIPS2001_a29d1598,\n author = {Wenning, Gregor and Obermayer, Klaus},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Activity Driven Adaptive Stochastic Resonance},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/a29d1598024f9e87beab4b98411d48ce-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/a29d1598024f9e87beab4b98411d48ce-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/a29d1598024f9e87beab4b98411d48ce-Metadata.json", "review": "", "metareview": "", "pdf_size": 1299462, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17502359438931090540&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Electrical Engineering and Computer Science, Technical University of Berlin; Department of Electrical Engineering and Computer Science, Technical University of Berlin", "aff_domain": "cs.tu-berlin.de;cs.tu-berlin.de", "email": "cs.tu-berlin.de;cs.tu-berlin.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Technical University of Berlin", "aff_unique_dep": "Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://www.tu-berlin.de", "aff_unique_abbr": "TU Berlin", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "39279647d5", "title": "Adaptive Nearest Neighbor Classification Using Support Vector Machines", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/955cb567b6e38f4c6b3f28cc857fc38c-Abstract.html", "author": "Carlotta Domeniconi; Dimitrios Gunopulos", "abstract": "The nearest neighbor technique is a simple and appealing method to address classification problems. It relies on t he assumption of locally constant class conditional probabilities. This assumption becomes invalid in high dimensions with a finite number of exam(cid:173) ples due to the curse of dimensionality. We propose a technique that computes a locally flexible metric by means of Support Vector Machines (SVMs). The maximum margin boundary found by the SVM is used to determine the most discriminant direction over the query's neighborhood. Such direction provides a local weighting scheme for input features. We present experimental evidence of classification performance improvement over the SVM algorithm alone and over a variety of adaptive learning schemes, by using both simulated and real data sets.", "bibtex": "@inproceedings{NIPS2001_955cb567,\n author = {Domeniconi, Carlotta and Gunopulos, Dimitrios},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Adaptive Nearest Neighbor Classification Using Support Vector Machines},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/955cb567b6e38f4c6b3f28cc857fc38c-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/955cb567b6e38f4c6b3f28cc857fc38c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/955cb567b6e38f4c6b3f28cc857fc38c-Metadata.json", "review": "", "metareview": "", "pdf_size": 1628890, "gs_citation": 143, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12045178011089583886&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Dept. of Computer Science, University of California, Riverside, CA 92521; Dept. of Computer Science, University of California, Riverside, CA 92521", "aff_domain": "cs.ucr.edu;cs.ucr.edu", "email": "cs.ucr.edu;cs.ucr.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Riverside", "aff_unique_dep": "Dept. of Computer Science", "aff_unique_url": "https://www.ucr.edu", "aff_unique_abbr": "UCR", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Riverside", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "05323738b6", "title": "Adaptive Sparseness Using Jeffreys Prior", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/dd055f53a45702fe05e449c30ac80df9-Abstract.html", "author": "M\u00e1rio Figueiredo", "abstract": "In this paper we introduce a new sparseness inducing prior which does not involve any (hy- per)parameters that need to be adjusted or estimated. Although other applications are possi- ble, we focus here on supervised learning problems: regression and classi\ufb01cation. Experi- ments with several publicly available benchmark data sets show that the proposed approach yields state-of-the-art performance. In particular, our method outperforms support vector machines and performs competitively with the best alternative techniques, both in terms of error rates and sparseness, although it involves no tuning or adjusting of sparseness- controlling hyper-parameters.", "bibtex": "@inproceedings{NIPS2001_dd055f53,\n author = {Figueiredo, M\\'{a}rio},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Adaptive Sparseness Using Jeffreys Prior},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/dd055f53a45702fe05e449c30ac80df9-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/dd055f53a45702fe05e449c30ac80df9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/dd055f53a45702fe05e449c30ac80df9-Metadata.json", "review": "", "metareview": "", "pdf_size": 105074, "gs_citation": 204, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6688678991143145558&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Institute of Telecommunications, and Department of Electrical and Computer Engineering. Instituto Superior T\u00e9cnico, 1049-001 Lisboa, Portugal", "aff_domain": "lx.it.pt", "email": "lx.it.pt", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Instituto Superior T\u00e9cnico", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.ist.utl.pt", "aff_unique_abbr": "IST", "aff_campus_unique_index": "0", "aff_campus_unique": "Lisboa", "aff_country_unique_index": "0", "aff_country_unique": "Portugal" }, { "id": "e42c998d1f", "title": "Agglomerative Multivariate Information Bottleneck", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/1113d7a76ffceca1bb350bfe145467c6-Abstract.html", "author": "Noam Slonim; Nir Friedman; Naftali Tishby", "abstract": "The information bottleneck method is an unsupervised model independent data organization technique. Given a joint distribution peA, B), this method con(cid:173) structs a new variable T that extracts partitions, or clusters, over the values of A that are informative about B. In a recent paper, we introduced a general princi(cid:173) pled framework for multivariate extensions of the information bottleneck method that allows us to consider multiple systems of data partitions that are inter-related. In this paper, we present a new family of simple agglomerative algorithms to construct such systems of inter-related clusters. We analyze the behavior of these algorithms and apply them to several real-life datasets.", "bibtex": "@inproceedings{NIPS2001_1113d7a7,\n author = {Slonim, Noam and Friedman, Nir and Tishby, Naftali},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Agglomerative Multivariate Information Bottleneck},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/1113d7a76ffceca1bb350bfe145467c6-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/1113d7a76ffceca1bb350bfe145467c6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/1113d7a76ffceca1bb350bfe145467c6-Metadata.json", "review": "", "metareview": "", "pdf_size": 1667958, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13339949111939439563&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "School of Computer Science & Engineering, Hebrew University, Jerusalem 91904, Israel; School of Computer Science & Engineering, Hebrew University, Jerusalem 91904, Israel; School of Computer Science & Engineering, Hebrew University, Jerusalem 91904, Israel", "aff_domain": "cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il", "email": "cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Hebrew University", "aff_unique_dep": "School of Computer Science & Engineering", "aff_unique_url": "http://www.huji.ac.il", "aff_unique_abbr": "HUJI", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Jerusalem", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Israel" }, { "id": "46c0eb02b0", "title": "Algorithmic Luckiness", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/24f0d2c90473b2bc949ad962e61d9bcb-Abstract.html", "author": "Ralf Herbrich; Robert C. Williamson", "abstract": "In contrast to standard statistical learning theory which studies uniform bounds on the expected error we present a framework that exploits the specific learning algorithm used. Motivated by the luckiness framework [8] we are also able to exploit the serendipity of the training sample. The main difference to previous approaches lies in the complexity measure; rather than covering all hypothe(cid:173) ses in a given hypothesis space it is only necessary to cover the functions which could have been learned using the fixed learning algorithm. We show how the resulting framework relates to the VC, luckiness and compression frameworks. Finally, we present an application of this framework to the maximum margin algorithm for linear classifiers which results in a bound that exploits both the margin and the distribution of the data in feature space.", "bibtex": "@inproceedings{NIPS2001_24f0d2c9,\n author = {Herbrich, Ralf and Williamson, Robert C},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Algorithmic Luckiness},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/24f0d2c90473b2bc949ad962e61d9bcb-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/24f0d2c90473b2bc949ad962e61d9bcb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/24f0d2c90473b2bc949ad962e61d9bcb-Metadata.json", "review": "", "metareview": "", "pdf_size": 1390123, "gs_citation": 81, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18267082226709507782&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 26, "aff": "Microsoft Research Ltd.; Australian National University", "aff_domain": "microsoft.com;anu.edu.au", "email": "microsoft.com;anu.edu.au", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Microsoft;Australian National University", "aff_unique_dep": "Microsoft Research;", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.anu.edu.au", "aff_unique_abbr": "MSR;ANU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1", "aff_country_unique": "United Kingdom;Australia" }, { "id": "662d5a6876", "title": "An Efficient Clustering Algorithm Using Stochastic Association Model and Its Implementation Using Nanostructures", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/de03beffeed9da5f3639a621bcab5dd4-Abstract.html", "author": "Takashi Morie; Tomohiro Matsuura; Makoto Nagata; Atsushi Iwata", "abstract": "This paper describes a clustering algorithm for vector quantizers using a \u201cstochastic association model\u201d. It offers a new simple and powerful soft- max adaptation rule. The adaptation process is the same as the on-line K-means clustering method except for adding random \ufb02uctuation in the distortion error evaluation process. Simulation results demonstrate that the new algorithm can achieve ef\ufb01cient adaptation as high as the \u201cneural gas\u201d algorithm, which is reported as one of the most ef\ufb01cient clustering methods. It is a key to add uncorrelated random \ufb02uctuation in the simi- larity evaluation process for each reference vector. For hardware imple- mentation of this process, we propose a nanostructure, whose operation is described by a single-electron circuit. It positively uses \ufb02uctuation in quantum mechanical tunneling processes.", "bibtex": "@inproceedings{NIPS2001_de03beff,\n author = {Morie, Takashi and Matsuura, Tomohiro and Nagata, Makoto and Iwata, Atsushi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {An Efficient Clustering Algorithm Using Stochastic Association Model and Its Implementation Using Nanostructures},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/de03beffeed9da5f3639a621bcab5dd4-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/de03beffeed9da5f3639a621bcab5dd4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/de03beffeed9da5f3639a621bcab5dd4-Metadata.json", "review": "", "metareview": "", "pdf_size": 230447, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7125392457397054081&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Graduate School of Advanced Sciences of Matter, Hiroshima University; Graduate School of Advanced Sciences of Matter, Hiroshima University; Graduate School of Advanced Sciences of Matter, Hiroshima University; Graduate School of Advanced Sciences of Matter, Hiroshima University", "aff_domain": "dsl.hiroshima-u.ac.jp; ; ; ", "email": "dsl.hiroshima-u.ac.jp; ; ; ", "github": "", "project": "http://www.dsl.hiroshima-u.ac.jp", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Hiroshima University", "aff_unique_dep": "Graduate School of Advanced Sciences of Matter", "aff_unique_url": "https://www.hiroshima-u.ac.jp", "aff_unique_abbr": "Hiroshima U", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Japan" }, { "id": "ff68569274", "title": "An Efficient, Exact Algorithm for Solving Tree-Structured Graphical Games", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/c5866e93cab1776890fe343c9e7063fb-Abstract.html", "author": "Michael L. Littman; Michael J. Kearns; Satinder P. Singh", "abstract": "We describe a new algorithm for computing a Nash equilibrium in graphical games, a compact representation for multi-agent systems that we introduced in previous work. The algorithm is the first to compute equilibria both efficiently and exactly for a non-trivial class of graphical games.", "bibtex": "@inproceedings{NIPS2001_c5866e93,\n author = {Littman, Michael and Kearns, Michael and Singh, Satinder},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {An Efficient, Exact Algorithm for Solving Tree-Structured Graphical Games},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/c5866e93cab1776890fe343c9e7063fb-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/c5866e93cab1776890fe343c9e7063fb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/c5866e93cab1776890fe343c9e7063fb-Metadata.json", "review": "", "metareview": "", "pdf_size": 1425292, "gs_citation": 63, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7573153820019053318&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "AT&T Labs-Research, Florham Park, NJ 07932-0971; Department of Computer & Information Science, University of Pennsylvania, Philadelphia, PA 19104-6389; Syntek Capital, New York, NY 10019-4460", "aff_domain": "research.att.com;cis.upenn.edu;cs.colorado.edu", "email": "research.att.com;cis.upenn.edu;cs.colorado.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "AT&T Labs-Research;University of Pennsylvania;Syntek Capital", "aff_unique_dep": ";Department of Computer & Information Science;", "aff_unique_url": "https://www.att.com/labs;https://www.upenn.edu;", "aff_unique_abbr": "AT&T Labs;UPenn;", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Florham Park;Philadelphia;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "f5c9dc333c", "title": "Analog Soft-Pattern-Matching Classifier using Floating-Gate MOS Technology", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/51174add1c52758f33d414ceaf3fe6ba-Abstract.html", "author": "Toshihiko Yamasaki; Tadashi Shibata", "abstract": "A flexible pattern-matching analog classifier is presented in con- junction with a robust image representation algorithm called Prin- cipal Axes Projection (PAP). In the circuit, the functional form of matching is configurable in terms of the peak position, the peak height and the sharpness of the similarity evaluation. The test chip was fabri- cated in a 0.6-m m CMOS technology and successfully applied to hand-written pattern recognition and medical radiograph analysis using PAP as a feature extraction pre-processing step for robust image coding. The separation and classification of overlapping patterns is also ex- perimentally demonstrated.", "bibtex": "@inproceedings{NIPS2001_51174add,\n author = {Yamasaki, Toshihiko and Shibata, Tadashi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Analog Soft-Pattern-Matching Classifier using Floating-Gate MOS Technology},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/51174add1c52758f33d414ceaf3fe6ba-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/51174add1c52758f33d414ceaf3fe6ba-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/51174add1c52758f33d414ceaf3fe6ba-Metadata.json", "review": "", "metareview": "", "pdf_size": 357409, "gs_citation": 75, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17298340618087187244&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Electronic Engineering, School of Engineering; Department of Frontier Informatics, School of Frontier Science", "aff_domain": "if.t.u-tokyo.ac.jp;ee.t.u-tokyo.ac.jp", "email": "if.t.u-tokyo.ac.jp;ee.t.u-tokyo.ac.jp", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "School of Engineering;School of Frontier Science", "aff_unique_dep": "Department of Electronic Engineering;Department of Frontier Informatics", "aff_unique_url": ";", "aff_unique_abbr": ";", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "", "aff_country_unique": "" }, { "id": "79e45de448", "title": "Analysis of Sparse Bayesian Learning", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/02b1be0d48924c327124732726097157-Abstract.html", "author": "Anita C. Faul; Michael E. Tipping", "abstract": "The recent introduction of the 'relevance vector machine' has effec(cid:173) tively demonstrated how sparsity may be obtained in generalised linear models within a Bayesian framework. Using a particular form of Gaussian parameter prior, 'learning' is the maximisation, with respect to hyperparameters, of the marginal likelihood of the data. This paper studies the properties of that objective func(cid:173) tion, and demonstrates that conditioned on an individual hyper(cid:173) parameter, the marginal likelihood has a unique maximum which is computable in closed form. It is further shown that if a derived 'sparsity criterion' is satisfied, this maximum is exactly equivalent to 'pruning' the corresponding parameter from the model.", "bibtex": "@inproceedings{NIPS2001_02b1be0d,\n author = {Faul, Anita and Tipping, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Analysis of Sparse Bayesian Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/02b1be0d48924c327124732726097157-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/02b1be0d48924c327124732726097157-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/02b1be0d48924c327124732726097157-Metadata.json", "review": "", "metareview": "", "pdf_size": 1102795, "gs_citation": 419, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3422255107655621887&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 12, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "1f53e6a8ed", "title": "Approximate Dynamic Programming via Linear Programming", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/cd4bb35c75ba84b4f39e547b1416fd35-Abstract.html", "author": "Daniela Farias; Benjamin V. Roy", "abstract": "The curse of dimensionality gives rise to prohibitive computational requirements that render infeasible the exact solution of large- scale stochastic control problems. We study an efficient method based on linear programming for approximating solutions to such prob(cid:173) lems. The approach \"fits\" a linear combination of pre- selected basis functions to the dynamic programming cost- to- go function. We develop bounds on the approximation error and present experi(cid:173) mental results in the domain of queueing network control, providing empirical support for the methodology.", "bibtex": "@inproceedings{NIPS2001_cd4bb35c,\n author = {Farias, Daniela and Roy, Benjamin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Approximate Dynamic Programming via Linear Programming},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/cd4bb35c75ba84b4f39e547b1416fd35-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/cd4bb35c75ba84b4f39e547b1416fd35-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/cd4bb35c75ba84b4f39e547b1416fd35-Metadata.json", "review": "", "metareview": "", "pdf_size": 1216479, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=497865119400578674&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Management Science and Engineering, Stanford University; Department of Management Science and Engineering, Stanford University", "aff_domain": "stanford.edu;stanford.edu", "email": "stanford.edu;stanford.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Management Science and Engineering", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "291049723c", "title": "Associative memory in realistic neuronal networks", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/a96d3afec184766bfeca7a9f989fc7e7-Abstract.html", "author": "Peter E. Latham", "abstract": "Almost two decades ago, Hopfield [1] showed that networks of highly reduced model neurons can exhibit multiple attracting fixed points, thus providing a substrate for associative memory. It is still not clear, however, whether realistic neuronal networks can support multiple attractors. The main difficulty is that neuronal networks in vivo exhibit a stable background state at low firing rate, typ(cid:173) ically a few Hz. Embedding attractor is easy; doing so without destabilizing the background is not. Previous work [2, 3] focused on the sparse coding limit, in which a vanishingly small number of neurons are involved in any memory. Here we investigate the case in which the number of neurons involved in a memory scales with the number of neurons in the network. In contrast to the sparse coding limit, we find that multiple attractors can co-exist robustly with a stable background state. Mean field theory is used to under(cid:173) stand how the behavior of the network scales with its parameters, and simulations with analog neurons are presented.", "bibtex": "@inproceedings{NIPS2001_a96d3afe,\n author = {Latham, Peter},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Associative memory in realistic neuronal networks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/a96d3afec184766bfeca7a9f989fc7e7-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/a96d3afec184766bfeca7a9f989fc7e7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/a96d3afec184766bfeca7a9f989fc7e7-Metadata.json", "review": "", "metareview": "", "pdf_size": 1417006, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17051609112282215208&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Neurobiology, University of California at Los Angeles", "aff_domain": "ucla.edu", "email": "ucla.edu", "github": "", "project": "http://culture.neurobio.ucla.edu/", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "Department of Neurobiology", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "3140b996ec", "title": "Asymptotic Universality for Learning Curves of Support Vector Machines", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/5a1e3a5aede16d438c38862cac1a78db-Abstract.html", "author": "Manfred Opper; Robert Urbanczik", "abstract": "Using methods of Statistical Physics, we investigate the rOle of model complexity in learning with support vector machines (SVMs). We show the advantages of using SVMs with kernels of infinite complexity on noisy target rules, which, in contrast to common theoretical beliefs, are found to achieve optimal general(cid:173) ization error although the training error does not converge to the generalization error. Moreover, we find a universal asymptotics of the learning curves which only depend on the target rule but not on the SVM kernel.", "bibtex": "@inproceedings{NIPS2001_5a1e3a5a,\n author = {Opper, Manfred and Urbanczik, Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Asymptotic Universality for Learning Curves of Support Vector Machines},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/5a1e3a5aede16d438c38862cac1a78db-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/5a1e3a5aede16d438c38862cac1a78db-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/5a1e3a5aede16d438c38862cac1a78db-Metadata.json", "review": "", "metareview": "", "pdf_size": 1444397, "gs_citation": 0, "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:TLuby-hRpnUJ:scholar.google.com/&scioq=Asymptotic+Universality+for+Learning+Curves+of+Support+Vector+Machines&hl=en&as_sdt=0,5", "gs_version_total": 7, "aff": "Neural Computing Research Group, School of Engineering and Applied Science, Aston University, Birmingham B4 7ET, UK; Institut Fur Theoretische Physik, Universitiit Wurzburg Am Rubland, D-97074 Wurzburg, Germany", "aff_domain": "aston.ac.uk;physik.uni-wuerzburg.de", "email": "aston.ac.uk;physik.uni-wuerzburg.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Aston University;University of W\u00fcrzburg", "aff_unique_dep": "School of Engineering and Applied Science;Institut f\u00fcr Theoretische Physik", "aff_unique_url": "https://www.aston.ac.uk;https://www.uni-wuerzburg.de", "aff_unique_abbr": "Aston;", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Birmingham;W\u00fcrzburg", "aff_country_unique_index": "0;1", "aff_country_unique": "United Kingdom;Germany" }, { "id": "2e152efb97", "title": "Audio-Visual Sound Separation Via Hidden Markov Models", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/d47268e9db2e9aa3827bba3afb7ff94a-Abstract.html", "author": "John R. Hershey; Michael Casey", "abstract": "It is well known that under noisy conditions we can hear speech much more clearly when we read the speaker's lips. This sug(cid:173) gests the utility of audio-visual information for the task of speech enhancement. We propose a method to exploit audio-visual cues to enable speech separation under non-stationary noise and with a single microphone. We revise and extend HMM-based speech enhancement techniques, in which signal and noise models are fac(cid:173) tori ally combined, to incorporate visual lip information and em(cid:173) ploy novel signal HMMs in which the dynamics of narrow-band and wide band components are factorial. We avoid the combina(cid:173) torial explosion in the factorial model by using a simple approxi(cid:173) mate inference technique to quickly estimate the clean signals in a mixture. We present a preliminary evaluation of this approach using a small-vocabulary audio-visual database, showing promising improvements in machine intelligibility for speech enhanced using audio and visual information.", "bibtex": "@inproceedings{NIPS2001_d47268e9,\n author = {Hershey, John and Casey, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Audio-Visual Sound Separation Via Hidden Markov Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/d47268e9db2e9aa3827bba3afb7ff94a-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/d47268e9db2e9aa3827bba3afb7ff94a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/d47268e9db2e9aa3827bba3afb7ff94a-Metadata.json", "review": "", "metareview": "", "pdf_size": 1593766, "gs_citation": 90, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=489074804947294743&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Cognitive Science, University of California San Diego; Mitsubishi Electric Research Labs, Cambridge, Massachussets", "aff_domain": "cogsci.ucsd.edu;merl.com", "email": "cogsci.ucsd.edu;merl.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of California, San Diego;Mitsubishi Electric Research Labs", "aff_unique_dep": "Department of Cognitive Science;", "aff_unique_url": "https://ucsd.edu;https://www.merl.com", "aff_unique_abbr": "UCSD;MERL", "aff_campus_unique_index": "0;1", "aff_campus_unique": "San Diego;Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "be815148ab", "title": "Batch Value Function Approximation via Support Vectors", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/4ba3c163cd1efd4c14e3a415fa0a3010-Abstract.html", "author": "Thomas G. Dietterich; Xin Wang", "abstract": "We present three ways of combining linear programming with the kernel trick to find value function approximations for reinforcement learning. One formulation is based on SVM regression; the second is based on the Bellman equation; and the third seeks only to ensure that good moves have an advantage over bad moves. All formu(cid:173) lations attempt to minimize the number of support vectors while fitting the data. Experiments in a difficult, synthetic maze problem show that all three formulations give excellent performance, but the advantage formulation is much easier to train. Unlike policy gradi(cid:173) ent methods, the kernel methods described here can easily 'adjust the complexity of the function approximator to fit the complexity of the value function.", "bibtex": "@inproceedings{NIPS2001_4ba3c163,\n author = {Dietterich, Thomas and Wang, Xin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Batch Value Function Approximation via Support Vectors},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/4ba3c163cd1efd4c14e3a415fa0a3010-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/4ba3c163cd1efd4c14e3a415fa0a3010-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/4ba3c163cd1efd4c14e3a415fa0a3010-Metadata.json", "review": "", "metareview": "", "pdf_size": 804407, "gs_citation": 93, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13345130705717475318&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, Oregon State University; Department of Computer Science, Oregon State University", "aff_domain": "cs.orst.edu;cs.orst.edu", "email": "cs.orst.edu;cs.orst.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Oregon State University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://oregonstate.edu", "aff_unique_abbr": "OSU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Corvallis", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "eddc0c5e5a", "title": "Bayesian Predictive Profiles With Applications to Retail Transaction Data", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/44cd7a8f7f9f85129b9953950665064d-Abstract.html", "author": "Igor V. Cadez; Padhraic Smyth", "abstract": "Massive transaction data sets are recorded in a routine manner in telecommunications, retail commerce, and Web site management. In this paper we address the problem of inferring predictive in- dividual pro\ufb02les from such historical transaction data. We de- scribe a generative mixture model for count data and use an an approximate Bayesian estimation framework that e\ufb01ectively com- bines an individual\u2019s speci\ufb02c history with more general population patterns. We use a large real-world retail transaction data set to illustrate how these pro\ufb02les consistently outperform non-mixture and non-Bayesian techniques in predicting customer behavior in out-of-sample data.", "bibtex": "@inproceedings{NIPS2001_44cd7a8f,\n author = {Cadez, Igor and Smyth, Padhraic},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Bayesian Predictive Profiles With Applications to Retail Transaction Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/44cd7a8f7f9f85129b9953950665064d-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/44cd7a8f7f9f85129b9953950665064d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/44cd7a8f7f9f85129b9953950665064d-Metadata.json", "review": "", "metareview": "", "pdf_size": 177869, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18060490325707635671&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Information and Computer Science, University of California, Irvine, CA 92697-3425, U.S.A.; Information and Computer Science, University of California, Irvine, CA 92697-3425, U.S.A.", "aff_domain": "ics.uci.edu;ics.uci.edu", "email": "ics.uci.edu;ics.uci.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Irvine", "aff_unique_dep": "Department of Information and Computer Science", "aff_unique_url": "https://www.uci.edu", "aff_unique_abbr": "UCI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Irvine", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "5792429555", "title": "Bayesian morphometry of hippocampal cells suggests same-cell somatodendritic repulsion", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/d3a7f48c12e697d50c8a7ae7684644ef-Abstract.html", "author": "Giorgio A. Ascoli; Alexei V. Samsonovich", "abstract": "Visual inspection of neurons suggests that dendritic orientation may be determined both by internal constraints (e.g. membrane tension) and by external vector fields (e.g. neurotrophic gradients). For example, basal dendrites of pyramidal cells appear nicely fan-out. This regular orientation is hard to justify completely with a general tendency to grow straight, given the zigzags observed experimentally. Instead, dendrites could (A) favor a fixed (\u201cexternal\u201d) direction, or (B) repel from their own soma. To investigate these possibilities quantitatively, reconstructed hippocampal cells were subjected to Bayesian analysis. The statistical model combined linearly factors A and B, as well as the tendency to grow straight. For all morphological classes, B was found to be significantly positive and consistently greater than A. In addition, when dendrites were artificially re-oriented according to this model, the resulting structures closely resembled real morphologies. These results suggest that somatodendritic repulsion may play a role in determining dendritic orientation. Since hippocampal cells are very densely packed and their dendritic trees highly overlap, the repulsion must be cell- specific. We discuss possible mechanisms underlying such specificity.", "bibtex": "@inproceedings{NIPS2001_d3a7f48c,\n author = {Ascoli, Giorgio and Samsonovich, Alexei},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Bayesian morphometry of hippocampal cells suggests same-cell somatodendritic repulsion},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/d3a7f48c12e697d50c8a7ae7684644ef-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/d3a7f48c12e697d50c8a7ae7684644ef-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/d3a7f48c12e697d50c8a7ae7684644ef-Metadata.json", "review": "", "metareview": "", "pdf_size": 154549, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11359057564137804743&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Krasnow Institute for Advanced Study at George Mason University; Krasnow Institute for Advanced Study at George Mason University", "aff_domain": "gmu.edu;gmu.edu", "email": "gmu.edu;gmu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "George Mason University", "aff_unique_dep": "Krasnow Institute for Advanced Study", "aff_unique_url": "https://krasnow.gmu.edu", "aff_unique_abbr": "GMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "240821e294", "title": "Bayesian time series classification", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/194cf6c2de8e00c05fcf16c498adc7bf-Abstract.html", "author": "Peter Sykacek; Stephen J. Roberts", "abstract": "This paper proposes an approach to classi\ufb01cation of adjacent segments of a time series as being either of classes. We use a hierarchical model that consists of a feature extraction stage and a generative classi\ufb01er which is built on top of these features. Such two stage approaches are often used in signal and image processing. The novel part of our work is that we link these stages probabilistically by using a latent feature space. To use one joint model is a Bayesian requirement, which has the advantage to fuse information according to its certainty. The classi\ufb01er is implemented as hidden Markov model with Gaussian and Multinomial observation distributions de\ufb01ned on a suitably chosen representation of autoregressive models. The Markov dependency is mo- tivated by the assumption that successive classi\ufb01cations will be corre- lated. Inference is done with Markov chain Monte Carlo (MCMC) tech- niques. We apply the proposed approach to synthetic data and to classi- \ufb01cation of EEG that was recorded while the subjects performed different cognitive tasks. All experiments show that using a latent feature space results in a signi\ufb01cant improvement in generalization accuracy. Hence we expect that this idea generalizes well to other hierarchical models.", "bibtex": "@inproceedings{NIPS2001_194cf6c2,\n author = {Sykacek, Peter and Roberts, Stephen J},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Bayesian time series classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/194cf6c2de8e00c05fcf16c498adc7bf-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/194cf6c2de8e00c05fcf16c498adc7bf-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/194cf6c2de8e00c05fcf16c498adc7bf-Metadata.json", "review": "", "metareview": "", "pdf_size": 117521, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5798069644426220279&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 17, "aff": "Department of Engineering Science, University of Oxford, Oxford, OX1 3PJ, UK; Department of Engineering Science, University of Oxford, Oxford, OX1 3PJ, UK", "aff_domain": "robots.ox.ac.uk;robots.ox.ac.uk", "email": "robots.ox.ac.uk;robots.ox.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Oxford", "aff_unique_dep": "Department of Engineering Science", "aff_unique_url": "https://www.ox.ac.uk", "aff_unique_abbr": "Oxford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Oxford", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "d5baefdefb", "title": "Blind Source Separation via Multinode Sparse Representation", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/f80bf05527157a8c2a7bb63b22f49aaa-Abstract.html", "author": "Michael Zibulevsky; Pavel Kisilev; Yehoshua Y. Zeevi; Barak A. Pearlmutter", "abstract": "We consider a problem of blind source separation from a set of instan(cid:173) taneous linear mixtures, where the mixing matrix is unknown. It was discovered recently, that exploiting the sparsity of sources in an appro(cid:173) priate representation according to some signal dictionary, dramatically improves the quality of separation. In this work we use the property of multi scale transforms, such as wavelet or wavelet packets, to decompose signals into sets of local features with various degrees of sparsity. We use this intrinsic property for selecting the best (most sparse) subsets of features for further separation. The performance of the algorithm is ver(cid:173) ified on noise-free and noisy data. Experiments with simulated signals, musical sounds and images demonstrate significant improvement of sep(cid:173) aration quality over previously reported results.", "bibtex": "@inproceedings{NIPS2001_f80bf055,\n author = {Zibulevsky, Michael and Kisilev, Pavel and Zeevi, Yehoshua and Pearlmutter, Barak},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Blind Source Separation via Multinode Sparse Representation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/f80bf05527157a8c2a7bb63b22f49aaa-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/f80bf05527157a8c2a7bb63b22f49aaa-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/f80bf05527157a8c2a7bb63b22f49aaa-Metadata.json", "review": "", "metareview": "", "pdf_size": 1563370, "gs_citation": 130, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11408130479855104031&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 17, "aff": "Department of Electrical Engineering, Technion, Haifa 32000, Israel; Department of Electrical Engineering, Technion, Haifa 32000, Israel; Department of Electrical Engineering, Technion, Haifa 32000, Israel; Department of Computer Science, University of New Mexico, Albuquerque, NM 87131 USA", "aff_domain": "ee.technion.ac.if;ee.technion.ac.if;tx.technion.ac.if;cs.unm.edu", "email": "ee.technion.ac.if;ee.technion.ac.if;tx.technion.ac.if;cs.unm.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "Technion;University of New Mexico", "aff_unique_dep": "Department of Electrical Engineering;Department of Computer Science", "aff_unique_url": "https://www.technion.ac.il;https://www.unm.edu", "aff_unique_abbr": "Technion;UNM", "aff_campus_unique_index": "0;0;0;1", "aff_campus_unique": "Haifa;Albuquerque", "aff_country_unique_index": "0;0;0;1", "aff_country_unique": "Israel;United States" }, { "id": "9375d0633a", "title": "Boosting and Maximum Likelihood for Exponential Models", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/71e09b16e21f7b6919bbfc43f6a5b2f0-Abstract.html", "author": "Guy Lebanon; John D. Lafferty", "abstract": "We derive an equivalence between AdaBoost and the dual of a convex optimization problem, showing that the only difference between mini- mizing the exponential loss used by AdaBoost and maximum likelihood for exponential models is that the latter requires the model to be normal- ized to form a conditional probability distribution over labels. In addi- tion to establishing a simple and easily understood connection between the two methods, this framework enables us to derive new regularization procedures for boosting that directly correspond to penalized maximum likelihood. Experiments on UCI datasets support our theoretical analy- sis and give additional insight into the relationship between boosting and logistic regression.", "bibtex": "@inproceedings{NIPS2001_71e09b16,\n author = {Lebanon, Guy and Lafferty, John},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Boosting and Maximum Likelihood for Exponential Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/71e09b16e21f7b6919bbfc43f6a5b2f0-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/71e09b16e21f7b6919bbfc43f6a5b2f0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/71e09b16e21f7b6919bbfc43f6a5b2f0-Metadata.json", "review": "", "metareview": "", "pdf_size": 143531, "gs_citation": 237, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16664284258974478894&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 23, "aff": "School of Computer Science, Carnegie Mellon University; School of Computer Science, Carnegie Mellon University", "aff_domain": "cs.cmu.edu;cs.cmu.edu", "email": "cs.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "School of Computer Science", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "c6a03c69a2", "title": "Categorization by Learning and Combining Object Parts", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/05546b0e38ab9175cd905eebcc6ebb76-Abstract.html", "author": "Bernd Heisele; Thomas Serre; Massimiliano Pontil; Thomas Vetter; Tomaso Poggio", "abstract": "We describe an algorithm for automatically learning discriminative com- ponents of objects with SVM classi\ufb01ers. It is based on growing image parts by minimizing theoretical bounds on the error probability of an SVM. Component-based face classi\ufb01ers are then combined in a second stage to yield a hierarchical SVM classi\ufb01er. Experimental results in face classi\ufb01cation show considerable robustness against rotations in depth and suggest performance at signi\ufb01cantly better level than other face detection systems. Novel aspects of our approach are: a) an algorithm to learn component-based classi\ufb01cation experts and their combination, b) the use of 3-D morphable models for training, and c) a maximum operation on the output of each component classi\ufb01er which may be relevant for bio- logical models of visual recognition.", "bibtex": "@inproceedings{NIPS2001_05546b0e,\n author = {Heisele, Bernd and Serre, Thomas and Pontil, Massimiliano and Vetter, Thomas and Poggio, Tomaso},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Categorization by Learning and Combining Object Parts},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/05546b0e38ab9175cd905eebcc6ebb76-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/05546b0e38ab9175cd905eebcc6ebb76-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/05546b0e38ab9175cd905eebcc6ebb76-Metadata.json", "review": "", "metareview": "", "pdf_size": 947786, "gs_citation": 181, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16907609826148495629&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 22, "aff": "Center for Biological and Computational Learning, M.I.T., Cambridge, MA, USA; Honda R&D Americas, Inc., Boston, MA, USA; Department of Information Engineering, University of Siena, Siena, Italy; Computer Graphics Research Group, University of Freiburg, Freiburg, Germany; Center for Biological and Computational Learning, M.I.T., Cambridge, MA, USA", "aff_domain": "ai.mit.edu;ai.mit.edu;ing.unisi.it;informatik.uni-freiburg.de;ai.mit.edu", "email": "ai.mit.edu;ai.mit.edu;ing.unisi.it;informatik.uni-freiburg.de;ai.mit.edu", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2;3;0", "aff_unique_norm": "Massachusetts Institute of Technology;Honda R&D Americas, Inc.;University of Siena;University of Freiburg", "aff_unique_dep": "Center for Biological and Computational Learning;;Department of Information Engineering;Computer Graphics Research Group", "aff_unique_url": "https://www.mit.edu;https://www.honda.com;https://www.unisi.it;https://www.uni-freiburg.de", "aff_unique_abbr": "MIT;Honda R&D;;", "aff_campus_unique_index": "0;1;2;3;0", "aff_campus_unique": "Cambridge;Boston;Siena;Freiburg", "aff_country_unique_index": "0;0;1;2;0", "aff_country_unique": "United States;Italy;Germany" }, { "id": "3b102b70f3", "title": "Causal Categorization with Bayes Nets", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/c5a4e7e6882845ea7bb4d9462868219b-Abstract.html", "author": "Bob Rehder", "abstract": "A theory of categorization is presented in which knowledge of causal relationships between category features is represented as a Bayesian network. Referred to as causal-model theory, this theory predicts that objects are classified as category members to the extent they are likely to have been produced by a categorys causal model. On this view, people have models of the world that lead them to expect a certain distribution of features in category members (e.g., correlations between feature pairs that are directly connected by causal relationships), and consider exemplars good category members when they manifest those expectations. These expectations include sensitivity to higher-order feature interactions that emerge from the asymmetries inherent in causal relationships.", "bibtex": "@inproceedings{NIPS2001_c5a4e7e6,\n author = {Rehder, Bob},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Causal Categorization with Bayes Nets},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/c5a4e7e6882845ea7bb4d9462868219b-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/c5a4e7e6882845ea7bb4d9462868219b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/c5a4e7e6882845ea7bb4d9462868219b-Metadata.json", "review": "", "metareview": "", "pdf_size": 1452509, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2175833549003729399&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Psychology, New York University", "aff_domain": "nyu.edu", "email": "nyu.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "New York University", "aff_unique_dep": "Department of Psychology", "aff_unique_url": "https://www.nyu.edu", "aff_unique_abbr": "NYU", "aff_campus_unique_index": "0", "aff_campus_unique": "New York", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "370391831a", "title": "Characterizing Neural Gain Control using Spike-triggered Covariance", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/7d2b92b6726c241134dae6cd3fb8c182-Abstract.html", "author": "Odelia Schwartz; E. J. Chichilnisky; Eero P. Simoncelli", "abstract": "Spike-triggered averaging techniques are effective for linear characterization of neural responses. But neurons exhibit important nonlinear behaviors, such as gain control, that are not captured by such analyses. We describe a spike-triggered covariance method for retrieving suppressive components of the gain control signal in a neuron. We demonstrate the method in simulation and on retinal ganglion cell data. Analysis of physiological data reveals significant suppressive axes and explains neural nonlinearities. This method should be applicable to other sensory areas and modalities.", "bibtex": "@inproceedings{NIPS2001_7d2b92b6,\n author = {Schwartz, Odelia and Chichilnisky, E.J. and Simoncelli, Eero},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Characterizing Neural Gain Control using Spike-triggered Covariance},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/7d2b92b6726c241134dae6cd3fb8c182-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/7d2b92b6726c241134dae6cd3fb8c182-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/7d2b92b6726c241134dae6cd3fb8c182-Metadata.json", "review": "", "metareview": "", "pdf_size": 1035979, "gs_citation": 120, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3296338831979082603&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 21, "aff": "Center for Neural Science, New York University; Systems Neurobiology, The Salk Institute; Howard Hughes Medical Inst. + Center for Neural Science, New York University", "aff_domain": "cns.nyu.edu;salk.edu;nyu.edu", "email": "cns.nyu.edu;salk.edu;nyu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2+0", "aff_unique_norm": "New York University;Salk Institute;Howard Hughes Medical Institute", "aff_unique_dep": "Center for Neural Science;Systems Neurobiology;", "aff_unique_url": "https://www.nyu.edu;https://www.salk.edu;https://www.hhmi.org", "aff_unique_abbr": "NYU;Salk Institute;HHMI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "New York;", "aff_country_unique_index": "0;0;0+0", "aff_country_unique": "United States" }, { "id": "8acde2bfc0", "title": "Citcuits for VLSI Implementation of Temporally Asymmetric Hebbian Learning", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/ef41d488755367316f04fc0e0e9dc9fc-Abstract.html", "author": "A. Bofill; D. P. Thompson; Alan F. Murray", "abstract": "Experimental data has shown that synaptic strength modification in some types of biological neurons depends upon precise spike tim(cid:173) ing differences between presynaptic and postsynaptic spikes. Sev(cid:173) eral temporally-asymmetric Hebbian learning rules motivated by this data have been proposed. We argue that such learning rules are suitable to analog VLSI implementation. We describe an eas(cid:173) ily tunable circuit to modify the weight of a silicon spiking neuron according to those learning rules. Test results from the fabrication of the circuit using a O.6J.lm CMOS process are given.", "bibtex": "@inproceedings{NIPS2001_ef41d488,\n author = {Bofill, A. and Thompson, D. and Murray, Alan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Citcuits for VLSI Implementation of Temporally Asymmetric Hebbian Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/ef41d488755367316f04fc0e0e9dc9fc-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/ef41d488755367316f04fc0e0e9dc9fc-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/ef41d488755367316f04fc0e0e9dc9fc-Metadata.json", "review": "", "metareview": "", "pdf_size": 1176427, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1270482691834921522&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Dept. of Electrical Engineering, The University of Edinburgh; Dept. of Electrical Engineering, The University of Edinburgh; Dept. of Electrical Engineering, The University of Edinburgh", "aff_domain": "ee.ed.ac.uk;ee.ed.ac.uk;ee.ed.ac.uk", "email": "ee.ed.ac.uk;ee.ed.ac.uk;ee.ed.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Edinburgh", "aff_unique_dep": "Dept. of Electrical Engineering", "aff_unique_url": "https://www.ed.ac.uk", "aff_unique_abbr": "Edinburgh", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "id": "826f26c39a", "title": "Classifying Single Trial EEG: Towards Brain Computer Interfacing", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/2d579dc29360d8bbfbb4aa541de5afa9-Abstract.html", "author": "Benjamin Blankertz; Gabriel Curio; Klaus-Robert M\u00fcller", "abstract": "Driven by the progress in the \ufb01eld of single-trial analysis of EEG, there is a growing interest in brain computer interfaces (BCIs), i.e., systems that enable human subjects to control a computer only by means of their brain signals. In a pseudo-online simulation our BCI detects upcoming \ufb01nger movements in a natural keyboard typing condition and predicts their lat- erality. This can be done on average 100\u2013230 ms before the respective key is actually pressed, i.e., long before the onset of EMG. Our approach is appealing for its short response time and high classi\ufb01cation accuracy (>96%) in a binary decision where no human training is involved. We compare discriminative classi\ufb01ers like Support Vector Machines (SVMs) and different variants of Fisher Discriminant that possess favorable reg- ularization properties for dealing with high noise cases (inter-trial vari- ablity).", "bibtex": "@inproceedings{NIPS2001_2d579dc2,\n author = {Blankertz, Benjamin and Curio, Gabriel and M\\\"{u}ller, Klaus-Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Classifying Single Trial EEG: Towards Brain Computer Interfacing},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/2d579dc29360d8bbfbb4aa541de5afa9-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/2d579dc29360d8bbfbb4aa541de5afa9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/2d579dc29360d8bbfbb4aa541de5afa9-Metadata.json", "review": "", "metareview": "", "pdf_size": 116728, "gs_citation": 707, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=619509154074779848&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 17, "aff": "Fraunhofer-FIRST.IDA, Kekul\u00e9str. 7, 12489 Berlin, Germany; Neurophysics Group, Dept. of Neurology, Klinikum Benjamin Franklin, Freie Universit\u00e4t Berlin, Hindenburgdamm 30, 12203 Berlin, Germany; Fraunhofer-FIRST.IDA, Kekul\u00e9str. 7, 12489 Berlin, Germany + University of Potsdam, Am Neuen Palais 10, 14469 Potsdam, Germany", "aff_domain": "\u0003; \u0004; \u0006\u0005", "email": "\u0003; \u0004; \u0006\u0005", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0+2", "aff_unique_norm": "Fraunhofer Institute for Software and Systems Engineering;Freie Universit\u00e4t Berlin;University of Potsdam", "aff_unique_dep": "FIRST.IDA;Dept. of Neurology;", "aff_unique_url": "https://www.first.ida.fraunhofer.de/;https://www.fu-berlin.de;https://www.uni-potsdam.de", "aff_unique_abbr": "Fraunhofer-FIRST.IDA;FU Berlin;", "aff_campus_unique_index": "1;", "aff_campus_unique": ";Berlin", "aff_country_unique_index": "0;0;0+0", "aff_country_unique": "Germany" }, { "id": "5444b1e675", "title": "Cobot: A Social Reinforcement Learning Agent", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/92bbd31f8e0e43a7da8a6295b251725f-Abstract.html", "author": "Charles Lee Isbell Jr.; Christian R. Shelton", "abstract": "We report on the use of reinforcement learning with Cobot, a software agent residing in the well-known online community LambdaMOO. Our initial work on Cobot (Isbell et al.2000) provided him with the ability to collect social statistics and report them to users. Here we describe an application of RL allowing Cobot to take proactive actions in this complex social environment, and adapt behavior from multiple sources of human reward. After 5 months of training, and 3171 reward and punishment events from 254 different LambdaMOO users, Cobot learned nontrivial preferences for a number of users, modi\ufb01ng his behavior based on his current state. Here we describe LambdaMOO and the state and action spaces of Cobot, and report the statistical results of the learning experiment.", "bibtex": "@inproceedings{NIPS2001_92bbd31f,\n author = {Isbell, Charles and Shelton, Christian},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Cobot: A Social Reinforcement Learning Agent},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/92bbd31f8e0e43a7da8a6295b251725f-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/92bbd31f8e0e43a7da8a6295b251725f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/92bbd31f8e0e43a7da8a6295b251725f-Metadata.json", "review": "", "metareview": "", "pdf_size": 135619, "gs_citation": 232, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=766832207153063511&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 44, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "09a414e0d2", "title": "Computing Time Lower Bounds for Recurrent Sigmoidal Neural Networks", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/1a0a283bfe7c549dee6c638a05200e32-Abstract.html", "author": "M. Schmitt", "abstract": "Recurrent neural networks of analog units are computers for real(cid:173) valued functions. We study the time complexity of real computa(cid:173) tion in general recurrent neural networks. These have sigmoidal, linear, and product units of unlimited order as nodes and no re(cid:173) strictions on the weights. For networks operating in discrete time, we exhibit a family of functions with arbitrarily high complexity, and we derive almost tight bounds on the time required to compute these functions. Thus, evidence is given of the computational lim(cid:173) itations that time-bounded analog recurrent neural networks are subject to.", "bibtex": "@inproceedings{NIPS2001_1a0a283b,\n author = {Schmitt, M.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Computing Time Lower Bounds for Recurrent Sigmoidal Neural Networks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/1a0a283bfe7c549dee6c638a05200e32-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/1a0a283bfe7c549dee6c638a05200e32-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/1a0a283bfe7c549dee6c638a05200e32-Metadata.json", "review": "", "metareview": "", "pdf_size": 1457364, "gs_citation": 0, "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:NX-LQ_rD_FwJ:scholar.google.com/&scioq=Computing+Time+Lower+Bounds+for+Recurrent+Sigmoidal+Neural+Networks&hl=en&as_sdt=0,5", "gs_version_total": 13, "aff": "Lehrstuhl Mathematik und Informatik, Fakultat fUr Mathematik Ruhr-Universitat Bochum, D-44780 Bochum, Germany", "aff_domain": "lmi.ruhr-uni-bochum.de", "email": "lmi.ruhr-uni-bochum.de", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Ruhr-Universitat Bochum", "aff_unique_dep": "Lehrstuhl Mathematik und Informatik", "aff_unique_url": "https://www.ruhr-uni-bochum.de", "aff_unique_abbr": "", "aff_campus_unique_index": "0", "aff_campus_unique": "Bochum", "aff_country_unique_index": "0", "aff_country_unique": "Germany" }, { "id": "de1c1257d3", "title": "Constructing Distributed Representations Using Additive Clustering", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/819e3d6c1381eac87c17617e5165f38c-Abstract.html", "author": "Wheeler Ruml", "abstract": "If the promise of computational modeling is to be fully realized in higher- level cognitive domains such as language processing, principled methods must be developed to construct the semantic representations used in such models. In this paper, we propose the use of an established formalism from mathematical psychology, additive clustering, as a means of auto- matically constructing binary representations for objects using only pair- wise similarity data. However, existing methods for the unsupervised learning of additive clustering models do not scale well to large prob- lems. We present a new algorithm for additive clustering, based on a novel heuristic technique for combinatorial optimization. The algorithm is simpler than previous formulations and makes fewer independence as- sumptions. Extensive empirical tests on both human and synthetic data suggest that it is more effective than previous methods and that it also scales better to larger problems. By making additive clustering practical, we take a significant step toward scaling connectionist models beyond hand-coded examples. 1 Introduction", "bibtex": "@inproceedings{NIPS2001_819e3d6c,\n author = {Ruml, Wheeler},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Constructing Distributed Representations Using Additive Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/819e3d6c1381eac87c17617e5165f38c-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/819e3d6c1381eac87c17617e5165f38c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/819e3d6c1381eac87c17617e5165f38c-Metadata.json", "review": "", "metareview": "", "pdf_size": 56384, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2128774404716642481&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Division of Engineering and Applied Sciences, Harvard University", "aff_domain": "eecs.harvard.edu", "email": "eecs.harvard.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Harvard University", "aff_unique_dep": "Division of Engineering and Applied Sciences", "aff_unique_url": "https://www.harvard.edu", "aff_unique_abbr": "Harvard", "aff_campus_unique_index": "0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "8ea49b7e8c", "title": "Contextual Modulation of Target Saliency", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/1f4fe6a4411edc2ff625888b4093e917-Abstract.html", "author": "Antonio Torralba", "abstract": "The most popular algorithms for object detection require the use of exhaustive spatial and scale search procedures. In such approaches, an object is defined by means of local features. fu this paper we show that including contextual information in object detection pro(cid:173) cedures provides an efficient way of cutting down the need for exhaustive search. We present results with real images showing that the proposed scheme is able to accurately predict likely object classes, locations and sizes.", "bibtex": "@inproceedings{NIPS2001_1f4fe6a4,\n author = {Torralba, Antonio},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Contextual Modulation of Target Saliency},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/1f4fe6a4411edc2ff625888b4093e917-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/1f4fe6a4411edc2ff625888b4093e917-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/1f4fe6a4411edc2ff625888b4093e917-Metadata.json", "review": "", "metareview": "", "pdf_size": 1576292, "gs_citation": 44, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16615303204075155604&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Dept. of Brain and Cognitive Sciences, MIT, Cambridge, MA 02139", "aff_domain": "ai.mit.edu", "email": "ai.mit.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Department of Brain and Cognitive Sciences", "aff_unique_url": "https://www.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "5663dc7339", "title": "Convergence of Optimistic and Incremental Q-Learning", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/6f2688a5fce7d48c8d19762b88c32c3b-Abstract.html", "author": "Eyal Even-dar; Yishay Mansour", "abstract": "Vie sho,v the convergence of tV/O deterministic variants of Q(cid:173) learning. The first is the widely used optimistic Q-learning, which initializes the Q-values to large initial values and then follows a greedy policy with respect to the Q-values. We show that setting the initial value sufficiently large guarantees the converges to an E(cid:173) optimal policy. The second is a new and novel algorithm incremen(cid:173) tal Q-learning, which gradually promotes the values of actions that are not taken. We show that incremental Q-learning converges, in the limit, to the optimal policy. Our incremental Q-learning algo(cid:173) rithm can be viewed as derandomization of the E-greedy Q-learning.", "bibtex": "@inproceedings{NIPS2001_6f2688a5,\n author = {Even-dar, Eyal and Mansour, Yishay},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Convergence of Optimistic and Incremental Q-Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/6f2688a5fce7d48c8d19762b88c32c3b-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/6f2688a5fce7d48c8d19762b88c32c3b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/6f2688a5fce7d48c8d19762b88c32c3b-Metadata.json", "review": "", "metareview": "", "pdf_size": 759282, "gs_citation": 88, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12615567948704785061&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "School of Computer Science, Tel-Aviv University, Tel-Aviv, Israel; School of Computer Science, Tel-Aviv University, Israel", "aff_domain": "cs.tau.ac.il;cs.tau.ac.il", "email": "cs.tau.ac.il;cs.tau.ac.il", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Tel-Aviv University", "aff_unique_dep": "School of Computer Science", "aff_unique_url": "https://www.tau.ac.il", "aff_unique_abbr": "TAU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Tel-Aviv", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "id": "09dd961e23", "title": "Convolution Kernels for Natural Language", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/bf424cb7b0dea050a42b9739eb261a3a-Abstract.html", "author": "Michael Collins; Nigel Duffy", "abstract": "We describe the application of kernel methods to Natural Language Pro- cessing (NLP) problems. In many NLP tasks the objects being modeled are strings, trees, graphs or other discrete structures which require some mechanism to convert them into feature vectors. We describe kernels for various natural language structures, allowing rich, high dimensional rep- resentations of these structures. We show how a kernel over trees can be applied to parsing using the voted perceptron algorithm, and we give experimental results on the ATIS corpus of parse trees.", "bibtex": "@inproceedings{NIPS2001_bf424cb7,\n author = {Collins, Michael and Duffy, Nigel},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Convolution Kernels for Natural Language},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/bf424cb7b0dea050a42b9739eb261a3a-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/bf424cb7b0dea050a42b9739eb261a3a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/bf424cb7b0dea050a42b9739eb261a3a-Metadata.json", "review": "", "metareview": "", "pdf_size": 94761, "gs_citation": 1267, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9481191489485235027&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 31, "aff": "AT&T Labs\u2013Research; Department of Computer Science, University of California at Santa Cruz", "aff_domain": "research.att.com;cse.ucsc.edu", "email": "research.att.com;cse.ucsc.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "AT&T Labs;University of California, Santa Cruz", "aff_unique_dep": "Research;Department of Computer Science", "aff_unique_url": "https://www.att.com/labs/research;https://www.ucsc.edu", "aff_unique_abbr": "AT&T Labs;UCSC", "aff_campus_unique_index": "1", "aff_campus_unique": ";Santa Cruz", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "f3e9812636", "title": "Correlation Codes in Neuronal Populations", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/88ef51f0bf911e452e8dbb1d807a81ab-Abstract.html", "author": "Maoz Shamir; Haim Sompolinsky", "abstract": "Population codes often rely on the tuning of the mean responses to the stimulus parameters. However, this information can be greatly sup- pressed by long range correlations. Here we study the ef\ufb01ciency of cod- ing information in the second order statistics of the population responses. We show that the Fisher Information of this system grows linearly with the size of the system. We propose a bilinear readout model for extract- ing information from correlation codes, and evaluate its performance in discrimination and estimation tasks. It is shown that the main source of information in this system is the stimulus dependence of the variances of the single neuron responses.", "bibtex": "@inproceedings{NIPS2001_88ef51f0,\n author = {Shamir, Maoz and Sompolinsky, Haim},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Correlation Codes in Neuronal Populations},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/88ef51f0bf911e452e8dbb1d807a81ab-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/88ef51f0bf911e452e8dbb1d807a81ab-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/88ef51f0bf911e452e8dbb1d807a81ab-Metadata.json", "review": "", "metareview": "", "pdf_size": 103679, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11977573038192242388&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Racah Institute of Physics and Center for Neural Computation, The Hebrew University of Jerusalem, Jerusalem 91904, Israel; Racah Institute of Physics and Center for Neural Computation, The Hebrew University of Jerusalem, Jerusalem 91904, Israel", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Hebrew University of Jerusalem", "aff_unique_dep": "Racah Institute of Physics", "aff_unique_url": "http://www.huji.ac.il", "aff_unique_abbr": "HUJI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Jerusalem", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "id": "ca3c76547f", "title": "Covariance Kernels from Bayesian Generative Models", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/c902b497eb972281fb5b4e206db38ee6-Abstract.html", "author": "Matthias Seeger", "abstract": "We propose the framework of mutual information kernels for learning covariance kernels, as used in Support Vector machines and Gaussian process classifiers, from unlabeled task data using Bayesian techniques. We describe an implementation of this frame(cid:173) work which uses variational Bayesian mixtures of factor analyzers in order to attack classification problems in high-dimensional spaces where labeled data is sparse, but unlabeled data is abundant.", "bibtex": "@inproceedings{NIPS2001_c902b497,\n author = {Seeger, Matthias},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Covariance Kernels from Bayesian Generative Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/c902b497eb972281fb5b4e206db38ee6-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/c902b497eb972281fb5b4e206db38ee6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/c902b497eb972281fb5b4e206db38ee6-Metadata.json", "review": "", "metareview": "", "pdf_size": 1561415, "gs_citation": 105, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17464748208998687216&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Institute for Adaptive and Neural Computation, University of Edinburgh", "aff_domain": "dai.ed.ac.uk", "email": "dai.ed.ac.uk", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of Edinburgh", "aff_unique_dep": "Institute for Adaptive and Neural Computation", "aff_unique_url": "https://www.ed.ac.uk", "aff_unique_abbr": "Edinburgh", "aff_campus_unique_index": "0", "aff_campus_unique": "Edinburgh", "aff_country_unique_index": "0", "aff_country_unique": "United Kingdom" }, { "id": "d66ee90fed", "title": "Direct value-approximation for factored MDPs", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/b3b4d2dbedc99fe843fd3dedb02f086f-Abstract.html", "author": "Dale Schuurmans; Relu Patrascu", "abstract": "We present a simple approach for computing reasonable policies for factored Markov decision processes (MDPs), when the opti(cid:173) mal value function can be approximated by a compact linear form. Our method is based on solving a single linear program that ap(cid:173) proximates the best linear fit to the optimal value function. By applying an efficient constraint generation procedure we obtain an iterative solution method that tackles concise linear programs. This direct linear programming approach experimentally yields a signif(cid:173) icant reduction in computation time over approximate value- and policy-iteration methods (sometimes reducing several hours to a few seconds). However, the quality of the solutions produced by linear programming is weaker-usually about twice the approxi(cid:173) mation error for the same approximating class. Nevertheless, the speed advantage allows one to use larger approximation classes to achieve similar error in reasonable time.", "bibtex": "@inproceedings{NIPS2001_b3b4d2db,\n author = {Schuurmans, Dale and Patrascu, Relu},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Direct value-approximation for factored MDPs},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/b3b4d2dbedc99fe843fd3dedb02f086f-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/b3b4d2dbedc99fe843fd3dedb02f086f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/b3b4d2dbedc99fe843fd3dedb02f086f-Metadata.json", "review": "", "metareview": "", "pdf_size": 824177, "gs_citation": 115, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8244853058301707713&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department ofComputer Science, University ofWaterloo; Department ofComputer Science, University ofWaterloo", "aff_domain": "cs.uwaterloo.ca;cs.uwaterloo.ca", "email": "cs.uwaterloo.ca;cs.uwaterloo.ca", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Waterloo", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://uwaterloo.ca", "aff_unique_abbr": "UW", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Canada" }, { "id": "7e92884cab", "title": "Discriminative Direction for Kernel Classifiers", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/1f36c15d6a3d18d52e8d493bc8187cb9-Abstract.html", "author": "Polina Golland", "abstract": "In many scientific and engineering applications, detecting and under- standing differences between two groups of examples can be reduced to a classical problem of training a classifier for labeling new examples while making as few mistakes as possible. In the traditional classifi- cation setting, the resulting classifier is rarely analyzed in terms of the properties of the input data captured by the discriminative model. How- ever, such analysis is crucial if we want to understand and visualize the detected differences. We propose an approach to interpretation of the sta- tistical model in the original feature space that allows us to argue about the model in terms of the relevant changes to the input vectors. For each point in the input space, we define a discriminative direction to be the direction that moves the point towards the other class while introducing as little irrelevant change as possible with respect to the classifier func- tion. We derive the discriminative direction for kernel-based classifiers, demonstrate the technique on several examples and briefly discuss its use in the statistical shape analysis, an application that originally motivated this work. 1 Introduction", "bibtex": "@inproceedings{NIPS2001_1f36c15d,\n author = {Golland, Polina},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Discriminative Direction for Kernel Classifiers},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/1f36c15d6a3d18d52e8d493bc8187cb9-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/1f36c15d6a3d18d52e8d493bc8187cb9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/1f36c15d6a3d18d52e8d493bc8187cb9-Metadata.json", "review": "", "metareview": "", "pdf_size": 121863, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16177971365572847231&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "", "aff_domain": "", "email": "", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster" }, { "id": "935a837573", "title": "Distribution of Mutual Information", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/fb2e203234df6dee15934e448ee88971-Abstract.html", "author": "Marcus Hutter", "abstract": "The mutual information of two random variables z and J with joint probabilities {7rij} is commonly used in learning Bayesian nets as well as in many other fields. The chances 7rij are usually estimated by the empirical sampling frequency nij In leading to a point es(cid:173) timate J(nij In) for the mutual information. To answer questions like \"is J (nij In) consistent with zero?\" or \"what is the probability that the true mutual information is much larger than the point es(cid:173) timate?\" one has to go beyond the point estimate. In the Bayesian framework one can answer these questions by utilizing a (second order) prior distribution p( 7r) comprising prior information about 7r. From the prior p(7r) one can compute the posterior p(7rln), from which the distribution p(Iln) of the mutual information can be cal(cid:173) culated. We derive reliable and quickly computable approximations for p(Iln). We concentrate on the mean, variance, skewness, and kurtosis, and non-informative priors. For the mean we also give an exact expression. Numerical issues and the range of validity are discussed.", "bibtex": "@inproceedings{NIPS2001_fb2e2032,\n author = {Hutter, Marcus},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Distribution of Mutual Information},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/fb2e203234df6dee15934e448ee88971-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/fb2e203234df6dee15934e448ee88971-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/fb2e203234df6dee15934e448ee88971-Metadata.json", "review": "", "metareview": "", "pdf_size": 1353500, "gs_citation": 110, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10014114349587526188&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "IDSIA, Galleria 2, CH-6928 Manno-Lugano, Switzerland", "aff_domain": "idsia.ch", "email": "idsia.ch", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "IDSIA", "aff_unique_dep": "", "aff_unique_url": "https://www.idsia.ch", "aff_unique_abbr": "", "aff_country_unique_index": "0", "aff_country_unique": "Switzerland" }, { "id": "bef5fa949b", "title": "Duality, Geometry, and Support Vector Regression", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/f6e794a75c5d51de081dbefa224304f9-Abstract.html", "author": "J. Bi; Kristin P. Bennett", "abstract": "We develop an intuitive geometric framework for support vector regression (SVR). By examining when (cid:15)-tubes exist, we show that SVR can be regarded as a classi(cid:12)cation problem in the dual space. Hard and soft (cid:15)-tubes are constructed by separating the convex or reduced convex hulls respectively of the training data with the response variable shifted up and down by (cid:15). A novel SVR model is proposed based on choosing the max-margin plane between the two shifted datasets. Maximizing the margin corresponds to shrinking the e(cid:11)ective (cid:15)-tube. In the proposed approach the e(cid:11)ects of the choices of all parameters become clear geometrically.", "bibtex": "@inproceedings{NIPS2001_f6e794a7,\n author = {Bi, J. and Bennett, Kristin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Duality, Geometry, and Support Vector Regression},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/f6e794a75c5d51de081dbefa224304f9-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/f6e794a75c5d51de081dbefa224304f9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/f6e794a75c5d51de081dbefa224304f9-Metadata.json", "review": "", "metareview": "", "pdf_size": 163667, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5420835823907203472&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Department of Mathematical Sciences, Rensselaer Polytechnic Institute; Department of Mathematical Sciences, Rensselaer Polytechnic Institute", "aff_domain": "rpi.edu;rpi.edu", "email": "rpi.edu;rpi.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Rensselaer Polytechnic Institute", "aff_unique_dep": "Department of Mathematical Sciences", "aff_unique_url": "https://www.rpi.edu", "aff_unique_abbr": "RPI", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "a8a0494670", "title": "Dynamic Time-Alignment Kernel in Support Vector Machine", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/a869ccbcbd9568808b8497e28275c7c8-Abstract.html", "author": "Hiroshi Shimodaira; Ken-ichi Noma; Mitsuru Nakai; Shigeki Sagayama", "abstract": "A new class of Support Vector Machine (SVM) that is applica- ble to sequential-pattern recognition such as speech recognition is developed by incorporating an idea of non-linear time alignment into the kernel function. Since the time-alignment operation of sequential pattern is embedded in the new kernel function, stan- dard SVM training and classification algorithms can be employed without further modifications. The proposed SVM (DTAK-SVM) is evaluated in speaker-dependent speech recognition experiments of hand-segmented phoneme recognition. Preliminary experimen- tal results show comparable recognition performance with hidden Markov models (HMMs). 1 Introduction", "bibtex": "@inproceedings{NIPS2001_a869ccbc,\n author = {Shimodaira, Hiroshi and Noma, Ken-ichi and Nakai, Mitsuru and Sagayama, Shigeki},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Dynamic Time-Alignment Kernel in Support Vector Machine},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/a869ccbcbd9568808b8497e28275c7c8-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/a869ccbcbd9568808b8497e28275c7c8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/a869ccbcbd9568808b8497e28275c7c8-Metadata.json", "review": "", "metareview": "", "pdf_size": 83822, "gs_citation": 320, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17907042747376544500&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 19, "aff": "School of Information Science, Japan Advanced Institute of Science and Technology; School of Information Science, Japan Advanced Institute of Science and Technology; School of Information Science, Japan Advanced Institute of Science and Technology; Graduate School of Information Science and Technology, The University of Tokyo", "aff_domain": "jaist.ac.jp;jaist.ac.jp;jaist.ac.jp;hil.t.u-tokyo.ac.jp", "email": "jaist.ac.jp;jaist.ac.jp;jaist.ac.jp;hil.t.u-tokyo.ac.jp", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "Japan Advanced Institute of Science and Technology;University of Tokyo", "aff_unique_dep": "School of Information Science;Graduate School of Information Science and Technology", "aff_unique_url": "https://www.jaist.ac.jp;https://www.u-tokyo.ac.jp", "aff_unique_abbr": "JAIST;UTokyo", "aff_campus_unique_index": "1", "aff_campus_unique": ";Tokyo", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Japan" }, { "id": "ab41ecde3d", "title": "EM-DD: An Improved Multiple-Instance Learning Technique", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/e4dd5528f7596dcdf871aa55cfccc53c-Abstract.html", "author": "Qi Zhang; Sally A. Goldman", "abstract": "We present a new multiple-instance (MI) learning technique (EM(cid:173) DD) that combines EM with the diverse density (DD) algorithm. EM-DD is a general-purpose MI algorithm that can be applied with boolean or real-value labels and makes real-value predictions. On the boolean Musk benchmarks, the EM-DD algorithm without any tuning significantly outperforms all previous algorithms. EM-DD is relatively insensitive to the number of relevant attributes in the data set and scales up well to large bag sizes. Furthermore, EM(cid:173) DD provides a new framework for MI learning, in which the MI problem is converted to a single-instance setting by using EM to estimate the instance responsible for the label of the bag.", "bibtex": "@inproceedings{NIPS2001_e4dd5528,\n author = {Zhang, Qi and Goldman, Sally},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {EM-DD: An Improved Multiple-Instance Learning Technique},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/e4dd5528f7596dcdf871aa55cfccc53c-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/e4dd5528f7596dcdf871aa55cfccc53c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/e4dd5528f7596dcdf871aa55cfccc53c-Metadata.json", "review": "", "metareview": "", "pdf_size": 1503027, "gs_citation": 961, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=941056925189880708&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Computer Science, Washington University, St. Louis, MO 63130-4899; Department of Computer Science, Washington University, St. Louis, MO 63130-4899", "aff_domain": "cs.wustl.edu;cs.wustl.edu", "email": "cs.wustl.edu;cs.wustl.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Washington University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://wustl.edu", "aff_unique_abbr": "WU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "St. Louis", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "f27219917d", "title": "Effective Size of Receptive Fields of Inferior Temporal Visual Cortex Neurons in Natural Scenes", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/3937230de3c8041e4da6ac3246a888e8-Abstract.html", "author": "Thomas P. Trappenberg; Edmund T. Rolls; Simon M. Stringer", "abstract": "Inferior temporal cortex (IT) neurons have large receptive \ufb01elds when a single effective object stimulus is shown against a blank background, but have much smaller receptive \ufb01elds when the object is placed in a natural scene. Thus, translation invariant object recognition is reduced in natural scenes, and this may help object selection. We describe a model which accounts for this by competition within an attractor in which the neurons are tuned to different objects in the scene, and the fovea has a higher cortical magni\ufb01cation factor than the peripheral visual \ufb01eld. Further- more, we show that top-down object bias can increase the receptive \ufb01eld size, facilitating object search in complex visual scenes, and providing a model of object-based attention. The model leads to the prediction that introduction of a second object into a scene with blank background will reduce the receptive \ufb01eld size to values that depend on the closeness of the second object to the target stimulus. We suggest that mechanisms of this type enable the output of IT to be primarily about one object, so that the areas that receive from IT can select the object as a potential target for action.", "bibtex": "@inproceedings{NIPS2001_3937230d,\n author = {Trappenberg, Thomas and Rolls, Edmund and Stringer, Simon},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Effective Size of Receptive Fields of Inferior Temporal Visual Cortex Neurons in Natural Scenes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/3937230de3c8041e4da6ac3246a888e8-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/3937230de3c8041e4da6ac3246a888e8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/3937230de3c8041e4da6ac3246a888e8-Metadata.json", "review": "", "metareview": "", "pdf_size": 90239, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1686596234766274580&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 22, "aff": "Dalhousie University, Faculty of Computer Science, 5060 University Avenue, Halifax B3H 1W5, Canada; University of Oxford, Centre for Computational Neuroscience, Department of Experimental Psychology, South Parks Road, Oxford OX1 3UD, UK; University of Oxford, Centre for Computational Neuroscience, Department of Experimental Psychology, South Parks Road, Oxford OX1 3UD, UK", "aff_domain": "cs.dal.ca;psy.ox.ac.uk;psy.ox.ac.uk", "email": "cs.dal.ca;psy.ox.ac.uk;psy.ox.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "Dalhousie University;University of Oxford", "aff_unique_dep": "Faculty of Computer Science;Department of Experimental Psychology", "aff_unique_url": "https://www.dal.ca;https://www.ox.ac.uk", "aff_unique_abbr": "Dalhousie;Oxford", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "Halifax;Oxford", "aff_country_unique_index": "0;1;1", "aff_country_unique": "Canada;United Kingdom" }, { "id": "a1f0d6db9e", "title": "Efficiency versus Convergence of Boolean Kernels for On-Line Learning Algorithms", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/2cad8fa47bbef282badbb8de5374b894-Abstract.html", "author": "Roni Khardon; Dan Roth; Rocco A. Servedio", "abstract": "We study online learning in Boolean domains using kernels which cap- ture feature expansions equivalent to using conjunctions over basic fea- tures. We demonstrate a tradeoff between the computational ef\ufb01ciency with which these kernels can be computed and the generalization abil- ity of the resulting classi\ufb01er. We \ufb01rst describe several kernel functions which capture either limited forms of conjunctions or all conjunctions. We show that these kernels can be used to ef\ufb01ciently run the Percep- tron algorithm over an exponential number of conjunctions; however we also prove that using such kernels the Perceptron algorithm can make an exponential number of mistakes even when learning simple func- tions. We also consider an analogous use of kernel functions to run the multiplicative-update Winnow algorithm over an expanded feature space of exponentially many conjunctions. While known upper bounds imply that Winnow can learn DNF formulae with a polynomial mistake bound in this setting, we prove that it is computationally hard to simulate Win- now\u2019s behavior for learning DNF over such a feature set, and thus that such kernel functions for Winnow are not ef\ufb01ciently computable.", "bibtex": "@inproceedings{NIPS2001_2cad8fa4,\n author = {Khardon, Roni and Roth, Dan and Servedio, Rocco A},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Efficiency versus Convergence of Boolean Kernels for On-Line Learning Algorithms},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/2cad8fa47bbef282badbb8de5374b894-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/2cad8fa47bbef282badbb8de5374b894-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/2cad8fa47bbef282badbb8de5374b894-Metadata.json", "review": "", "metareview": "", "pdf_size": 130839, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10279596400194986128&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 26, "aff": "Tufts University; University of Illinois; Harvard University", "aff_domain": "eecs.tufts.edu;cs.uiuc.edu;deas.harvard.edu", "email": "eecs.tufts.edu;cs.uiuc.edu;deas.harvard.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "Tufts University;University of Illinois;Harvard University", "aff_unique_dep": ";;", "aff_unique_url": "https://www.tufts.edu;https://www.illinois.edu;https://www.harvard.edu", "aff_unique_abbr": "Tufts;UIUC;Harvard", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "c108322bea", "title": "Efficient Resources Allocation for Markov Decision Processes", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/d79c6256b9bdac53a55801a066b70da3-Abstract.html", "author": "R\u00e9mi Munos", "abstract": "It is desirable that a complex decision-making problem in an uncer(cid:173) tain world be adequately modeled by a Markov Decision Process (MDP) whose structural representation is adaptively designed by a parsimonious resources allocation process. Resources include time and cost of exploration, amount of memory and computational time allowed for the policy or value function representation. Concerned about making the best use of the available resources, we address the problem of efficiently estimating where adding extra resources is highly needed in order to improve the expected performance of the resulting policy. Possible application in reinforcement learning (RL) , when real-world exploration is highly costly, concerns the de(cid:173) tection of those areas of the state-space that need primarily to be explored in order to improve the policy. Another application con(cid:173) cerns approximation of continuous state-space stochastic control problems using adaptive discretization techniques for which highly efficient grid points allocation is mandatory to survive high dimen(cid:173) sionality. Maybe surprisingly these two problems can be formu(cid:173) lated under a common framework: for a given resource allocation, which defines a belief state over possible MDPs, find where adding new resources (thus decreasing the uncertainty of some parame(cid:173) ters -transition probabilities or rewards) will most likely increase the expected performance of the new policy. To do so, we use sam(cid:173) pling techniques for estimating the contribution of each parameter's probability distribution function (Pdf) to the expected loss of us(cid:173) ing an approximate policy (such as the optimal policy of the most probable MDP) instead of the true (but unknown) policy.", "bibtex": "@inproceedings{NIPS2001_d79c6256,\n author = {Munos, R\\'{e}mi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Efficient Resources Allocation for Markov Decision Processes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/d79c6256b9bdac53a55801a066b70da3-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/d79c6256b9bdac53a55801a066b70da3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/d79c6256b9bdac53a55801a066b70da3-Metadata.json", "review": "", "metareview": "", "pdf_size": 727263, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13523666309874842977&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "CMAP, Ecole Polytechnique, 91128 Palaiseau, France", "aff_domain": "polytechnique.fr", "email": "polytechnique.fr", "github": "", "project": "http://www.cmap.polytechnique.fr/", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Ecole Polytechnique", "aff_unique_dep": "CMAP", "aff_unique_url": "https://www.ec-polytechnique.fr", "aff_unique_abbr": "Polytechnique", "aff_campus_unique_index": "0", "aff_campus_unique": "Palaiseau", "aff_country_unique_index": "0", "aff_country_unique": "France" }, { "id": "dffeca4e1f", "title": "Entropy and Inference, Revisited", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/d46e1fcf4c07ce4a69ee07e4134bcef1-Abstract.html", "author": "Ilya Nemenman; F. Shafee; William Bialek", "abstract": "We study properties of popular near\u2013uniform (Dirichlet) priors for learn- ing undersampled probability distributions on discrete nonmetric spaces and show that they lead to disastrous results. However, an Occam\u2013style phase space argument expands the priors into their in\ufb01nite mixture and resolves most of the observed problems. This leads to a surprisingly good estimator of entropies of discrete distributions.", "bibtex": "@inproceedings{NIPS2001_d46e1fcf,\n author = {Nemenman, Ilya and Shafee, F. and Bialek, William},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Entropy and Inference, Revisited},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/d46e1fcf4c07ce4a69ee07e4134bcef1-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/d46e1fcf4c07ce4a69ee07e4134bcef1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/d46e1fcf4c07ce4a69ee07e4134bcef1-Metadata.json", "review": "", "metareview": "", "pdf_size": 130195, "gs_citation": 369, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10614066825843699598&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 22, "aff": "NEC Research Institute, 4 Independence Way, Princeton, New Jersey 08540 + Institute for Theoretical Physics, University of California, Santa Barbara, CA 93106; Department of Physics, Princeton University, Princeton, New Jersey 08544; NEC Research Institute, 4 Independence Way, Princeton, New Jersey 08540 + Department of Physics, Princeton University, Princeton, New Jersey 08544", "aff_domain": "itp.ucsb.edu;princeton.edu;princeton.edu", "email": "itp.ucsb.edu;princeton.edu;princeton.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1;2;0+2", "aff_unique_norm": "NEC Research Institute;University of California, Santa Barbara;Princeton University", "aff_unique_dep": ";Institute for Theoretical Physics;Department of Physics", "aff_unique_url": "https://www.nec.com/research/;https://www.uci.edu;https://www.princeton.edu", "aff_unique_abbr": "NEC RI;UCSB;Princeton", "aff_campus_unique_index": "1;2;2", "aff_campus_unique": ";Santa Barbara;Princeton", "aff_country_unique_index": "0+0;0;0+0", "aff_country_unique": "United States" }, { "id": "13baacc4ca", "title": "Escaping the Convex Hull with Extrapolated Vector Machines", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/23d2e1578544b172cca332ff74bddf5f-Abstract.html", "author": "Patrick Haffner", "abstract": "Maximum margin classifiers such as Support Vector Machines (SVMs) critically depends upon the convex hulls of the training samples of each class, as they implicitly search for the minimum distance between the convex hulls. We propose Extrapolated Vec(cid:173) tor Machines (XVMs) which rely on extrapolations outside these convex hulls. XVMs improve SVM generalization very significantly on the MNIST [7] OCR data. They share similarities with the Fisher discriminant: maximize the inter-class margin while mini(cid:173) mizing the intra-class disparity.", "bibtex": "@inproceedings{NIPS2001_23d2e157,\n author = {Haffner, Patrick},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Escaping the Convex Hull with Extrapolated Vector Machines},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/23d2e1578544b172cca332ff74bddf5f-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/23d2e1578544b172cca332ff74bddf5f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/23d2e1578544b172cca332ff74bddf5f-Metadata.json", "review": "", "metareview": "", "pdf_size": 1452926, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13065296389215336502&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "AT&T Labs-Research, 200 Laurel Ave, Middletown, NJ 07748", "aff_domain": "research.att.com", "email": "research.att.com", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "AT&T Labs-Research", "aff_unique_dep": "", "aff_unique_url": "https://www.att.com/labs", "aff_unique_abbr": "AT&T Labs", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "bfc3fedbeb", "title": "Estimating Car Insurance Premia: a Case Study in High-Dimensional Data Inference", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/2d405b367158e3f12d7c1e31a96b3af3-Abstract.html", "author": "Nicolas Chapados; Yoshua Bengio; Pascal Vincent; Joumana Ghosn; Charles Dugas; Ichiro Takeuchi; Linyan Meng", "abstract": "Estimating insurance premia from data is a difficult regression problem for several reasons: the large number of variables, many of which are .discrete, and the very peculiar shape of the noise distri(cid:173) bution, asymmetric with fat tails, with a large majority zeros and a few unreliable and very large values. We compare several machine learning methods for estimating insurance premia, and test them on a large data base of car insurance policies. We find that func(cid:173) tion approximation methods that do not optimize a squared loss, like Support Vector Machines regression, do not work well in this context. Compared methods include decision trees and generalized linear models. The best results are obtained with a mixture of experts, which better identifies the least and most risky contracts, and allows to reduce the median premium by charging more to the most risky customers.", "bibtex": "@inproceedings{NIPS2001_2d405b36,\n author = {Chapados, Nicolas and Bengio, Yoshua and Vincent, Pascal and Ghosn, Joumana and Dugas, Charles and Takeuchi, Ichiro and Meng, Linyan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Estimating Car Insurance Premia: a Case Study in High-Dimensional Data Inference},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/2d405b367158e3f12d7c1e31a96b3af3-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/2d405b367158e3f12d7c1e31a96b3af3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/2d405b367158e3f12d7c1e31a96b3af3-Metadata.json", "review": "", "metareview": "", "pdf_size": 800211, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4266807477439379921&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": ";;;;;;", "aff_domain": ";;;;;;", "email": ";;;;;;", "github": "", "project": "", "author_num": 7, "track": "main", "status": "Poster" }, { "id": "123d54a713", "title": "Estimating the Reliability of ICA Projections", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/f80ff32e08a25270b5f252ce39522f72-Abstract.html", "author": "Frank C. Meinecke; Andreas Ziehe; Motoaki Kawanabe; Klaus-Robert M\u00fcller", "abstract": "When applying unsupervised learning techniques like ICA or tem(cid:173) poral decorrelation, a key question is whether the discovered pro(cid:173) jections are reliable. In other words: can we give error bars or can we assess the quality of our separation? We use resampling meth(cid:173) ods to tackle these questions and show experimentally that our proposed variance estimations are strongly correlated to the sepa(cid:173) ration error. We demonstrate that this reliability estimation can be used to choose the appropriate ICA-model, to enhance signifi(cid:173) cantly the separation performance, and, most important, to mark the components that have a actual physical meaning. Application to 49-channel-data from an magneto encephalography (MEG) ex(cid:173) periment underlines the usefulness of our approach.", "bibtex": "@inproceedings{NIPS2001_f80ff32e,\n author = {Meinecke, Frank and Ziehe, Andreas and Kawanabe, Motoaki and M\\\"{u}ller, Klaus-Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Estimating the Reliability of ICA Projections},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/f80ff32e08a25270b5f252ce39522f72-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/f80ff32e08a25270b5f252ce39522f72-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/f80ff32e08a25270b5f252ce39522f72-Metadata.json", "review": "", "metareview": "", "pdf_size": 1499626, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11347839052581685364&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Fraunhofer FIRST.IDA; Fraunhofer FIRST.IDA + University of Potsdam; Fraunhofer FIRST.IDA; Fraunhofer FIRST.IDA + University of Potsdam", "aff_domain": "first.fhg.de;first.fhg.de;first.fhg.de;first.fhg.de", "email": "first.fhg.de;first.fhg.de;first.fhg.de;first.fhg.de", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0+1;0;0+1", "aff_unique_norm": "Fraunhofer Institute for Software and Systems Engineering;University of Potsdam", "aff_unique_dep": "FIRST.IDA;", "aff_unique_url": "https://www.first.ida.fraunhofer.de/;https://www.uni-potsdam.de", "aff_unique_abbr": "Fraunhofer FIRST.IDA;UP", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0;0+0;0;0+0", "aff_country_unique": "Germany" }, { "id": "fd33eb72b7", "title": "Exact differential equation population dynamics for integrate-and-fire neurons", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/6492d38d732122c58b44e3fdc3e9e9f3-Abstract.html", "author": "Julian Eggert; Berthold B\u00e4uml", "abstract": "In our previous work, integral equation formulations for", "bibtex": "@inproceedings{NIPS2001_6492d38d,\n author = {Eggert, Julian and B\\\"{a}uml, Berthold},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Exact differential equation population dynamics for integrate-and-fire neurons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/6492d38d732122c58b44e3fdc3e9e9f3-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/6492d38d732122c58b44e3fdc3e9e9f3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/6492d38d732122c58b44e3fdc3e9e9f3-Metadata.json", "review": "", "metareview": "", "pdf_size": 1436532, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14799220626055001514&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "HONDA R&D Europe (Deutschland) GmbH Future Technology Research; Institut fur Robotik und Mechatronik Deutsches Zentrum fur Luft und Raumfahrt (DLR) uberpfaffenhofen", "aff_domain": "hre-ftr.f.rd.honda.co.jp;dlr.de", "email": "hre-ftr.f.rd.honda.co.jp;dlr.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Honda R&D Europe;Deutsches Zentrum fur Luft und Raumfahrt", "aff_unique_dep": "Future Technology Research;Institut fur Robotik und Mechatronik", "aff_unique_url": "https://www.honda-rc.de;https://www.dlr.de", "aff_unique_abbr": "Honda R&D Europe;DLR", "aff_campus_unique_index": "1", "aff_campus_unique": ";Uberpfaffenhofen", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "498c14e687", "title": "Eye movements and the maturation of cortical orientation selectivity", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/008bd5ad93b754d500338c253d9c1770-Abstract.html", "author": "Antonino Casile; Michele Rucci", "abstract": "Neural activity appears to be a crucial component for shaping the recep- tive \ufb01elds of cortical simple cells into adjacent, oriented subregions alter- nately receiving ON- and OFF-center excitatory geniculate inputs. It is known that the orientation selective responses of V1 neurons are re\ufb01ned by visual experience. After eye opening, the spatiotemporal structure of neural activity in the early stages of the visual pathway depends both on the visual environment and on how the environment is scanned. We have used computational modeling to investigate how eye movements might affect the re\ufb01nement of the orientation tuning of simple cells in the pres- ence of a Hebbian scheme of synaptic plasticity. Levels of correlation be- tween the activity of simulated cells were examined while natural scenes were scanned so as to model sequences of saccades and \ufb01xational eye movements, such as microsaccades, tremor and ocular drift. The speci\ufb01c patterns of activity required for a quantitatively accurate development of simple cell receptive \ufb01elds with segregated ON and OFF subregions were observed during \ufb01xational eye movements, but not in the presence of saccades or with static presentation of natural visual input. These re- sults suggest an important role for the eye movements occurring during visual \ufb01xation in the re\ufb01nement of orientation selectivity.", "bibtex": "@inproceedings{NIPS2001_008bd5ad,\n author = {Casile, Antonino and Rucci, Michele},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Eye movements and the maturation of cortical orientation selectivity},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/008bd5ad93b754d500338c253d9c1770-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/008bd5ad93b754d500338c253d9c1770-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/008bd5ad93b754d500338c253d9c1770-Metadata.json", "review": "", "metareview": "", "pdf_size": 92677, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8524197895010677457&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Department of Cognitive and Neural Systems, Boston University, Boston, MA 02215; Scuola Superiore S. Anna, Pisa, Italy", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Boston University;Scuola Superiore S. Anna", "aff_unique_dep": "Department of Cognitive and Neural Systems;", "aff_unique_url": "https://www.bu.edu;https://www.sssup.it", "aff_unique_abbr": "BU;SSSA", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Boston;Pisa", "aff_country_unique_index": "0;1", "aff_country_unique": "United States;Italy" }, { "id": "c95200ac64", "title": "Face Recognition Using Kernel Methods", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/4d6b3e38b952600251ee92fe603170ff-Abstract.html", "author": "Ming-Hsuan Yang", "abstract": "Principal Component Analysis and Fisher Linear Discriminant methods have demonstrated their success in face detection, recog(cid:173) nition, and tracking. The representation in these subspace methods is based on second order statistics of the image set, and does not address higher order statistical dependencies such as the relation(cid:173) ships among three or more pixels. Recently Higher Order Statistics and Independent Component Analysis (ICA) have been used as in(cid:173) formative low dimensional representations for visual recognition. In this paper, we investigate the use of Kernel Principal Compo(cid:173) nent Analysis and Kernel Fisher Linear Discriminant for learning low dimensional representations for face recognition, which we call Kernel Eigenface and Kernel Fisherface methods. While Eigenface and Fisherface methods aim to find projection directions based on the second order correlation of samples, Kernel Eigenface and Ker(cid:173) nel Fisherface methods provide generalizations which take higher order correlations into account. We compare the performance of kernel methods with Eigenface, Fisherface and ICA-based meth(cid:173) ods for face recognition with variation in pose, scale, lighting and expression. Experimental results show that kernel methods pro(cid:173) vide better representations and achieve lower error rates for face recognition.", "bibtex": "@inproceedings{NIPS2001_4d6b3e38,\n author = {Yang, Ming-Hsuan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Face Recognition Using Kernel Methods},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/4d6b3e38b952600251ee92fe603170ff-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/4d6b3e38b952600251ee92fe603170ff-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/4d6b3e38b952600251ee92fe603170ff-Metadata.json", "review": "", "metareview": "", "pdf_size": 925273, "gs_citation": 1147, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10781137757630704359&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "Honda Fundamental Research Labs", "aff_domain": "hra.com", "email": "hra.com", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Honda Research Institute", "aff_unique_dep": "Fundamental Research Labs", "aff_unique_url": "https://www.honda-ri.com", "aff_unique_abbr": "Honda R&D", "aff_country_unique_index": "0", "aff_country_unique": "Japan" }, { "id": "91a659059c", "title": "Fast Parameter Estimation Using Green's Functions", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/e7e23670481ac78b3c4122a99ba60573-Abstract.html", "author": "K. Wong; F. Li", "abstract": "We propose a method for the fast estimation of hyperparameters in large networks, based on the linear response relation in the cav(cid:173) ity method, and an empirical measurement of the Green's func(cid:173) tion. Simulation results show that it is efficient and precise, when compared with cross-validation and other techniques which require matrix inversion.", "bibtex": "@inproceedings{NIPS2001_e7e23670,\n author = {Wong, K. and Li, F.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Fast Parameter Estimation Using Green\\textquotesingle s Functions},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/e7e23670481ac78b3c4122a99ba60573-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/e7e23670481ac78b3c4122a99ba60573-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/e7e23670481ac78b3c4122a99ba60573-Metadata.json", "review": "", "metareview": "", "pdf_size": 1432818, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1562679859493390509&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Physics, Hong Kong University of Science and Technology, Clear Water Bay, Hong Kong; Department of Applied Physics, Xian Jiaotong University, Xian, China 710049", "aff_domain": "ust.hk;xjtu.edu.en", "email": "ust.hk;xjtu.edu.en", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Hong Kong University of Science and Technology;Xian Jiao Tong University", "aff_unique_dep": "Department of Physics;Department of Applied Physics", "aff_unique_url": "https://www.ust.hk;http://www.xjtu.edu.cn", "aff_unique_abbr": "HKUST;XJTU", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Hong Kong SAR;Xian", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "id": "c362b59a99", "title": "Fast and Robust Classification using Asymmetric AdaBoost and a Detector Cascade", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/0b1ec366924b26fc98fa7b71a9c249cf-Abstract.html", "author": "Paul Viola; Michael Jones", "abstract": "This paper develops a new approach for extremely fast detection in do- mains where the distribution of positive and negative examples is highly skewed (e.g. face detection or database retrieval). In such domains a cascade of simple classi\ufb01ers each trained to achieve high detection rates and modest false positive rates can yield a \ufb01nal detector with many desir- able features: including high detection rates, very low false positive rates, and fast performance. Achieving extremely high detection rates, rather than low error, is not a task typically addressed by machine learning al- gorithms. We propose a new variant of AdaBoost as a mechanism for training the simple classi\ufb01ers used in the cascade. Experimental results in the domain of face detection show the training algorithm yields sig- ni\ufb01cant improvements in performance over conventional AdaBoost. The \ufb01nal face detection system can process 15 frames per second, achieves over 90% detection, and a false positive rate of 1 in a 1,000,000.", "bibtex": "@inproceedings{NIPS2001_0b1ec366,\n author = {Viola, Paul and Jones, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Fast and Robust Classification using Asymmetric AdaBoost and a Detector Cascade},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/0b1ec366924b26fc98fa7b71a9c249cf-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/0b1ec366924b26fc98fa7b71a9c249cf-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/0b1ec366924b26fc98fa7b71a9c249cf-Metadata.json", "review": "", "metareview": "", "pdf_size": 139289, "gs_citation": 858, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3935261915708343946&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Mistubishi Electric Research Lab; Mistubishi Electric Research Lab", "aff_domain": "merl.com;merl.com", "email": "merl.com;merl.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Mitsubishi Electric Research Laboratories", "aff_unique_dep": "Research Lab", "aff_unique_url": "https://www.merl.com", "aff_unique_abbr": "MERL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "2baa13b37a", "title": "Fast, Large-Scale Transformation-Invariant Clustering", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/95f6870ff3dcd442254e334a9033d349-Abstract.html", "author": "Brendan J. Frey; Nebojsa Jojic", "abstract": "In previous work on", "bibtex": "@inproceedings{NIPS2001_95f6870f,\n author = {Frey, Brendan J and Jojic, Nebojsa},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Fast, Large-Scale Transformation-Invariant Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/95f6870ff3dcd442254e334a9033d349-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/95f6870ff3dcd442254e334a9033d349-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/95f6870ff3dcd442254e334a9033d349-Metadata.json", "review": "", "metareview": "", "pdf_size": 122686, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4302433881080183501&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Machine Learning Group, University of Toronto; Vision Technology Group, Microsoft Research", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Toronto;Microsoft", "aff_unique_dep": "Machine Learning Group;Vision Technology Group", "aff_unique_url": "https://www.utoronto.ca;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "U of T;MSR", "aff_campus_unique_index": "0", "aff_campus_unique": "Toronto;", "aff_country_unique_index": "0;1", "aff_country_unique": "Canada;United States" }, { "id": "42849e1707", "title": "Fragment Completion in Humans and Machines", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/0070d23b06b1486a538c0eaa45dd167a-Abstract.html", "author": "David Jacobs; Bas Rokers; Archisman Rudra; Zili Liu", "abstract": "Partial information can trigger a complete memory. At the same time, human memory is not perfect. A cue can contain enough information to specify an item in memory, but fail to trigger that item. In the context of word memory, we present experiments that demonstrate some basic patterns in human memory errors. We use cues that consist of word frag- ments. We show that short and long cues are completed more accurately than medium length ones and study some of the factors that lead to this behavior. We then present a novel computational model that shows some of the \ufb02exibility and patterns of errors that occur in human memory. This model iterates between bottom-up and top-down computations. These are tied together using a Markov model of words that allows memory to be accessed with a simple feature set, and enables a bottom-up process to compute a probability distribution of possible completions of word frag- ments, in a manner similar to models of visual perceptual completion.", "bibtex": "@inproceedings{NIPS2001_0070d23b,\n author = {Jacobs, David and Rokers, Bas and Rudra, Archisman and Liu, Zili},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Fragment Completion in Humans and Machines},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/0070d23b06b1486a538c0eaa45dd167a-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/0070d23b06b1486a538c0eaa45dd167a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/0070d23b06b1486a538c0eaa45dd167a-Metadata.json", "review": "", "metareview": "", "pdf_size": 52357, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16713559911522169839&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 21, "aff": "NEC Research Institute; Psychology Department at UCLA; CS Department at NYU; Psychology Department at UCLA", "aff_domain": "research.nj.nec.com;psych.ucla.edu;cs.nyu.edu;psych.ucla.edu", "email": "research.nj.nec.com;psych.ucla.edu;cs.nyu.edu;psych.ucla.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2;1", "aff_unique_norm": "NEC Research Institute;University of California, Los Angeles;New York University", "aff_unique_dep": ";Psychology Department;Computer Science Department", "aff_unique_url": "https://www.neclab.eu;https://www.ucla.edu;https://www.nyu.edu", "aff_unique_abbr": "NEC RI;UCLA;NYU", "aff_campus_unique_index": "1;2;1", "aff_campus_unique": ";Los Angeles;New York", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "41da3054f8", "title": "Gaussian Process Regression with Mismatched Models", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/d68a18275455ae3eaa2c291eebb46e6d-Abstract.html", "author": "Peter Sollich", "abstract": "Learning curves for Gaussian process regression are well understood when the 'student' model happens to match the 'teacher' (true data generation process). I derive approximations to the learning curves for the more generic case of mismatched models, and find very rich behaviour: For large input space dimensionality, where the results become exact, there are universal (student-independent) plateaux in the learning curve, with transitions in between that can exhibit arbitrarily many over-fitting maxima; over-fitting can occur even if the student estimates the teacher noise level correctly. In lower dimensions, plateaux also appear, and the learning curve remains dependent on the mismatch between student and teacher even in the asymptotic limit of a large number of training examples. Learn(cid:173) ing with excessively strong smoothness assumptions can be partic(cid:173) ularly dangerous: For example, a student with a standard radial basis function covariance function will learn a rougher teacher func(cid:173) tion only logarithmically slowly. All predictions are confirmed by simulations.", "bibtex": "@inproceedings{NIPS2001_d68a1827,\n author = {Sollich, Peter},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Gaussian Process Regression with Mismatched Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/d68a18275455ae3eaa2c291eebb46e6d-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/d68a18275455ae3eaa2c291eebb46e6d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/d68a18275455ae3eaa2c291eebb46e6d-Metadata.json", "review": "", "metareview": "", "pdf_size": 1616658, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2748808864444000315&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Department of Mathematics, King's College London", "aff_domain": "kcl.ac.uk", "email": "kcl.ac.uk", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "King's College London", "aff_unique_dep": "Department of Mathematics", "aff_unique_url": "https://www.kcl.ac.uk", "aff_unique_abbr": "KCL", "aff_campus_unique_index": "0", "aff_campus_unique": "London", "aff_country_unique_index": "0", "aff_country_unique": "United Kingdom" }, { "id": "96c5de354d", "title": "Generalizable Relational Binding from Coarse-coded Distributed Representations", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/519c84155964659375821f7ca576f095-Abstract.html", "author": "Randall C. O'Reilly; R. S. Busby", "abstract": "We present a model of binding of relationship information in a spatial domain (e.g., square above triangle) that uses low-order coarse-coded", "bibtex": "@inproceedings{NIPS2001_519c8415,\n author = {O\\textquotesingle Reilly, Randall and Busby, R.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Generalizable Relational Binding from Coarse-coded Distributed Representations},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/519c84155964659375821f7ca576f095-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/519c84155964659375821f7ca576f095-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/519c84155964659375821f7ca576f095-Metadata.json", "review": "", "metareview": "", "pdf_size": 161309, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3660873289356016695&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Department of Psychology, University of Colorado Boulder; Department of Psychology, University of Colorado Boulder", "aff_domain": "psych.colorado.edu;Colorado.EDU", "email": "psych.colorado.edu;Colorado.EDU", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Colorado Boulder", "aff_unique_dep": "Department of Psychology", "aff_unique_url": "https://www.colorado.edu", "aff_unique_abbr": "CU Boulder", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Boulder", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "3c12852b86", "title": "Generalization Performance of Some Learning Problems in Hilbert Functional Spaces", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/4e62e752ae53fb6a6eebd0f6146aa702-Abstract.html", "author": "T. Zhang", "abstract": "We investigate the generalization performance of some learning prob- lems in Hilbert functional Spaces. We introduce a notion of convergence of the estimated functional predictor to the best underlying predictor, and obtain an estimate on the rate of the convergence. This estimate allows us to derive generalization bounds on some learning formulations.", "bibtex": "@inproceedings{NIPS2001_4e62e752,\n author = {Zhang, T.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Generalization Performance of Some Learning Problems in Hilbert Functional Spaces},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/4e62e752ae53fb6a6eebd0f6146aa702-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/4e62e752ae53fb6a6eebd0f6146aa702-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/4e62e752ae53fb6a6eebd0f6146aa702-Metadata.json", "review": "", "metareview": "", "pdf_size": 91941, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5496390813706980109&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "IBM T.J. Watson Research Center", "aff_domain": "watson.ibm.com", "email": "watson.ibm.com", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "IBM", "aff_unique_dep": "Research Center", "aff_unique_url": "https://www.ibm.com/research/watson", "aff_unique_abbr": "IBM", "aff_campus_unique_index": "0", "aff_campus_unique": "T.J. Watson", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "0f1952774d", "title": "Generating velocity tuning by asymmetric recurrent connections", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/3f088ebeda03513be71d34d214291986-Abstract.html", "author": "Xiaohui Xie; Martin A. Giese", "abstract": "Asymmetric lateral connections are one possible mechanism that can ac- count for the direction selectivity of cortical neurons. We present a math- ematical analysis for a class of these models. Contrasting with earlier theoretical work that has relied on methods from linear systems theory, we study the network\u2019s nonlinear dynamic properties that arise when the threshold nonlinearity of the neurons is taken into account. We show that such networks have stimulus-locked traveling pulse solutions that are appropriate for modeling the responses of direction selective cortical neurons. In addition, our analysis shows that outside a certain regime of stimulus speeds the stability of this solutions breaks down giving rise to another class of solutions that are characterized by speci\ufb01c spatio- temporal periodicity. This predicts that if direction selectivity in the cor- tex is mainly achieved by asymmetric lateral connections lurching activ- ity waves might be observable in ensembles of direction selective cortical neurons within appropriate regimes of the stimulus speed.", "bibtex": "@inproceedings{NIPS2001_3f088ebe,\n author = {Xie, Xiaohui and Giese, Martin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Generating velocity tuning by asymmetric recurrent connections},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/3f088ebeda03513be71d34d214291986-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/3f088ebeda03513be71d34d214291986-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/3f088ebeda03513be71d34d214291986-Metadata.json", "review": "", "metareview": "", "pdf_size": 143353, "gs_citation": 0, "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:0ylRdsSzIXgJ:scholar.google.com/&scioq=Generating+velocity+tuning+by+asymmetric+recurrent+connections&hl=en&as_sdt=0,14", "gs_version_total": 11, "aff": "Dept. of Brain and Cognitive Sciences and CBCL, Massachusetts Institute of Technology, Cambridge, MA 02139; Dept. for Cognitive Neurology, University Clinic T\u00fcbingen, Max-Planck-Institute for Biological Cybernetics, 72076 T\u00fcbingen, Germany", "aff_domain": "mit.edu;mit.edu", "email": "mit.edu;mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Massachusetts Institute of Technology;University Clinic T\u00fcbingen", "aff_unique_dep": "Dept. of Brain and Cognitive Sciences;Dept. for Cognitive Neurology", "aff_unique_url": "https://www.mit.edu;https://www.uniklinik-tuebingen.de", "aff_unique_abbr": "MIT;", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Cambridge;T\u00fcbingen", "aff_country_unique_index": "0;1", "aff_country_unique": "United States;Germany" }, { "id": "2c8ae1c799", "title": "Geometrical Singularities in the Neuromanifold of Multilayer Perceptrons", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/65d2ea03425887a717c435081cfc5dbb-Abstract.html", "author": "Shun-ichi Amari; Hyeyoung Park; Tomoko Ozeki", "abstract": "Singularities are ubiquitous in the parameter space of hierarchical models such as multilayer perceptrons. At singularities, the Fisher information matrix degenerates, and the Cramer-Rao paradigm does no more hold, implying that the classical model selection the(cid:173) ory such as AIC and MDL cannot be applied. It is important to study the relation between the generalization error and the training error at singularities. The present paper demonstrates a method of analyzing these errors both for the maximum likelihood estima(cid:173) tor and the Bayesian predictive distribution in terms of Gaussian random fields, by using simple models.", "bibtex": "@inproceedings{NIPS2001_65d2ea03,\n author = {Amari, Shun-ichi and Park, Hyeyoung and Ozeki, Tomoko},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Geometrical Singularities in the Neuromanifold of Multilayer Perceptrons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/65d2ea03425887a717c435081cfc5dbb-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/65d2ea03425887a717c435081cfc5dbb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/65d2ea03425887a717c435081cfc5dbb-Metadata.json", "review": "", "metareview": "", "pdf_size": 1175414, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18217856875412602323&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "7e9c6751af", "title": "Global Coordination of Local Linear Models", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/850af92f8d9903e7a4e0559a98ecc857-Abstract.html", "author": "Sam T. Roweis; Lawrence K. Saul; Geoffrey E. Hinton", "abstract": "High dimensional data that lies on or near a low dimensional manifold can be de- scribed by a collection of local linear models. Such a description, however, does not provide a global parameterization of the manifold\u2014arguably an important goal of unsupervised learning. In this paper, we show how to learn a collection of local linear models that solves this more dif\ufb01cult problem. Our local linear models are represented by a mixture of factor analyzers, and the \u201cglobal coordi- nation\u201d of these models is achieved by adding a regularizing term to the standard maximum likelihood objective function. The regularizer breaks a degeneracy in the mixture model\u2019s parameter space, favoring models whose internal coor- dinate systems are aligned in a consistent way. As a result, the internal coor- dinates change smoothly and continuously as one traverses a connected path on the manifold\u2014even when the path crosses the domains of many different local models. The regularizer takes the form of a Kullback-Leibler divergence and illustrates an unexpected application of variational methods: not to perform ap- proximate inference in intractable probabilistic models, but to learn more useful internal representations in tractable ones.", "bibtex": "@inproceedings{NIPS2001_850af92f,\n author = {Roweis, Sam and Saul, Lawrence and Hinton, Geoffrey E},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Global Coordination of Local Linear Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/850af92f8d9903e7a4e0559a98ecc857-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/850af92f8d9903e7a4e0559a98ecc857-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/850af92f8d9903e7a4e0559a98ecc857-Metadata.json", "review": "", "metareview": "", "pdf_size": 304366, "gs_citation": 312, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4085684365904418224&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "Department of Computer Science, University of Toronto; Department of Computer and Information Science, University of Pennsylvania; Department of Computer Science, University of Toronto", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Toronto;University of Pennsylvania", "aff_unique_dep": "Department of Computer Science;Department of Computer and Information Science", "aff_unique_url": "https://www.utoronto.ca;https://www.upenn.edu", "aff_unique_abbr": "U of T;UPenn", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Toronto;", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Canada;United States" }, { "id": "780adb8579", "title": "Grammar Transfer in a Second Order Recurrent Neural Network", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/de73998802680548b916f1947ffbad76-Abstract.html", "author": "Michiro Negishi; Stephen J. Hanson", "abstract": "It has been known that people, after being exposed to sentences generated by an artificial grammar, acquire implicit grammatical knowledge and are able to transfer the knowledge to inputs that are generated by a modified grammar. We show that a second order recurrent neural network is able to transfer grammatical knowledge from one language (generated by a Finite State Machine) to another language which differ both in vocabularies and syntax. Representa(cid:173) tion of the grammatical knowledge in the network is analyzed using linear discriminant analysis.", "bibtex": "@inproceedings{NIPS2001_de739988,\n author = {Negishi, Michiro and Hanson, Stephen},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Grammar Transfer in a Second Order Recurrent Neural Network},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/de73998802680548b916f1947ffbad76-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/de73998802680548b916f1947ffbad76-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/de73998802680548b916f1947ffbad76-Metadata.json", "review": "", "metareview": "", "pdf_size": 1258832, "gs_citation": 0, "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:rAwT0dBoXKsJ:scholar.google.com/&scioq=Grammar+Transfer+in+a+Second+Order+Recurrent+Neural+Network&hl=en&as_sdt=0,5", "gs_version_total": 11, "aff": "Department of Psychology, Rutgers University; Department of Psychology, Rutgers University", "aff_domain": "psychology.rutgers.edu;psychology.rutgers.edu", "email": "psychology.rutgers.edu;psychology.rutgers.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Rutgers University", "aff_unique_dep": "Department of Psychology", "aff_unique_url": "https://www.rutgers.edu", "aff_unique_abbr": "Rutgers", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "7a4965560f", "title": "Grammatical Bigrams", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/89885ff2c83a10305ee08bd507c1049c-Abstract.html", "author": "Mark A. Paskin", "abstract": "Unsupervised learning algorithms have been derived for several sta(cid:173) tistical models of English grammar, but their computational com(cid:173) plexity makes applying them to large data sets intractable. This paper presents a probabilistic model of English grammar that is much simpler than conventional models, but which admits an effi(cid:173) cient EM training algorithm. The model is based upon grammat(cid:173) ical bigrams, i.e. , syntactic relationships between pairs of words. We present the results of experiments that quantify the represen(cid:173) tational adequacy of the grammatical bigram model, its ability to generalize from labelled data, and its ability to induce syntactic structure from large amounts of raw text.", "bibtex": "@inproceedings{NIPS2001_89885ff2,\n author = {Paskin, Mark},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Grammatical Bigrams},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/89885ff2c83a10305ee08bd507c1049c-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/89885ff2c83a10305ee08bd507c1049c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/89885ff2c83a10305ee08bd507c1049c-Metadata.json", "review": "", "metareview": "", "pdf_size": 1440545, "gs_citation": 70, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2653353872922186996&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Computer Science Division, University of California, Berkeley", "aff_domain": "cs.berkeley.edu", "email": "cs.berkeley.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Computer Science Division", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "702d6fa0d2", "title": "Group Redundancy Measures Reveal Redundancy Reduction in the Auditory Pathway", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/05a5cf06982ba7892ed2a6d38fe832d6-Abstract.html", "author": "Gal Chechik; Amir Globerson; M. J. Anderson; E. D. Young; Israel Nelken; Naftali Tishby", "abstract": "The way groups of auditory neurons interact to code acoustic in(cid:173) formation is investigated using an information theoretic approach. We develop measures of redundancy among groups of neurons, and apply them to the study of collaborative coding efficiency in two processing stations in the auditory pathway: the inferior colliculus (IC) and the primary auditory cortex (AI). Under two schemes for the coding of the acoustic content, acoustic segments coding and stimulus identity coding, we show differences both in information content and group redundancies between IC and AI neurons. These results provide for the first time a direct evidence for redundancy reduction along the ascending auditory pathway, as has been hy(cid:173) pothesized for theoretical considerations [Barlow 1959,2001]. The redundancy effects under the single-spikes coding scheme are signif(cid:173) icant only for groups larger than ten cells, and cannot be revealed with the redundancy measures that use only pairs of cells. The results suggest that the auditory system transforms low level rep(cid:173) resentations that contain redundancies due to the statistical struc(cid:173) ture of natural stimuli, into a representation in which cortical neu(cid:173) rons extract rare and independent component of complex acoustic signals, that are useful for auditory scene analysis.", "bibtex": "@inproceedings{NIPS2001_05a5cf06,\n author = {Chechik, Gal and Globerson, Amir and Anderson, M. and Young, E. and Nelken, Israel and Tishby, Naftali},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Group Redundancy Measures Reveal Redundancy Reduction in the Auditory Pathway},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/05a5cf06982ba7892ed2a6d38fe832d6-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/05a5cf06982ba7892ed2a6d38fe832d6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/05a5cf06982ba7892ed2a6d38fe832d6-Metadata.json", "review": "", "metareview": "", "pdf_size": 1523499, "gs_citation": 59, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8726945086771422634&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "School of Computer Science and Engineering and The Interdisciplinary Center for Neural Computation Hebrew University of Jerusalem, Israel; School of Computer Science and Engineering and The Interdisciplinary Center for Neural Computation Hebrew University of Jerusalem, Israel; School of Computer Science and Engineering and The Interdisciplinary Center for Neural Computation Hebrew University of Jerusalem, Israel; Department of Biomedical Engineering Johns Hopkins University, Baltimore, MD, USA; Department of Biomedical Engineering Johns Hopkins University, Baltimore, MD, USA; Department of Physiology, Hadassah Medical School and The Interdisciplinary Center for Neural Computation Hebrew University of Jerusalem, Israel", "aff_domain": "cs.huji.ac.il; ; ; ; ; ", "email": "cs.huji.ac.il; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;1;1;0", "aff_unique_norm": "Hebrew University of Jerusalem;Johns Hopkins University", "aff_unique_dep": "School of Computer Science and Engineering;Department of Biomedical Engineering", "aff_unique_url": "http://www.huji.ac.il;https://www.jhu.edu", "aff_unique_abbr": "HUJI;JHU", "aff_campus_unique_index": "0;0;0;1;1;0", "aff_campus_unique": "Jerusalem;Baltimore", "aff_country_unique_index": "0;0;0;1;1;0", "aff_country_unique": "Israel;United States" }, { "id": "b226584c51", "title": "Grouping and dimensionality reduction by locally linear embedding", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/a5a61717dddc3501cfdf7a4e22d7dbaa-Abstract.html", "author": "Marzia Polito; Pietro Perona", "abstract": "(LLE)", "bibtex": "@inproceedings{NIPS2001_a5a61717,\n author = {Polito, Marzia and Perona, Pietro},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Grouping and dimensionality reduction by locally linear embedding},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/a5a61717dddc3501cfdf7a4e22d7dbaa-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/a5a61717dddc3501cfdf7a4e22d7dbaa-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/a5a61717dddc3501cfdf7a4e22d7dbaa-Metadata.json", "review": "", "metareview": "", "pdf_size": 1269438, "gs_citation": 210, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12104719857390471042&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 9, "aff": "Division of Physics, Mathematics and Astronomy, California Institute of Technology; Division of Engeneering and Applied Mathematics, California Institute of Technology", "aff_domain": "caltech.edu;caltech.edu", "email": "caltech.edu;caltech.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "California Institute of Technology", "aff_unique_dep": "Division of Physics, Mathematics and Astronomy", "aff_unique_url": "https://www.caltech.edu", "aff_unique_abbr": "Caltech", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pasadena", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "63bac3fa97", "title": "Grouping with Bias", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/8038da89e49ac5eabb489cfc6cea9fc1-Abstract.html", "author": "Stella X. Yu; Jianbo Shi", "abstract": "With the optimization of pattern discrimination as a goal, graph partitioning approaches often lack the capability to integrate prior knowledge to guide grouping. In this paper, we consider priors from unitary generative models, partially labeled data and spatial attention. These priors are modelled as constraints in the solution space. By imposing uniformity condition on the constraints, we restrict the feasible space to one of smooth solutions. A subspace projection method is developed to solve this constrained eigenprob(cid:173) lema We demonstrate that simple priors can greatly improve image segmentation results.", "bibtex": "@inproceedings{NIPS2001_8038da89,\n author = {Yu, Stella X. and Shi, Jianbo},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Grouping with Bias},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/8038da89e49ac5eabb489cfc6cea9fc1-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/8038da89e49ac5eabb489cfc6cea9fc1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/8038da89e49ac5eabb489cfc6cea9fc1-Metadata.json", "review": "", "metareview": "", "pdf_size": 1817264, "gs_citation": 106, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1376898102286627163&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Robotics Institute, Carnegie MellonUniversity, CenterfortheNeuralBasisofCognition, Pittsburgh, PA15213-3890; Robotics Institute, Carnegie MellonUniversity, 5000ForbesAve, Pittsburgh, PA15213-3890", "aff_domain": "es.emu.edu;es.emu.edu", "email": "es.emu.edu;es.emu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Robotics Institute", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "f8d659d7b0", "title": "Hyperbolic Self-Organizing Maps for Semantic Navigation", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/093b60fd0557804c8ba0cbf1453da22f-Abstract.html", "author": "Jorg Ontrup; Helge Ritter", "abstract": "We introduce a new type of Self-Organizing Map (SOM) to navigate in the Semantic Space of large text collections. We propose a \u201chyper- bolic SOM\u201d (HSOM) based on a regular tesselation of the hyperbolic plane, which is a non-euclidean space characterized by constant negative gaussian curvature. The exponentially increasing size of a neighborhood around a point in hyperbolic space provides more freedom to map the complex information space arising from language into spatial relations. We describe experiments, showing that the HSOM can successfully be applied to text categorization tasks and yields results comparable to other state-of-the-art methods.", "bibtex": "@inproceedings{NIPS2001_093b60fd,\n author = {Ontrup, Jorg and Ritter, Helge},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Hyperbolic Self-Organizing Maps for Semantic Navigation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/093b60fd0557804c8ba0cbf1453da22f-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/093b60fd0557804c8ba0cbf1453da22f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/093b60fd0557804c8ba0cbf1453da22f-Metadata.json", "review": "", "metareview": "", "pdf_size": 531769, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17614706185221238254&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Neuroinformatics Group, Faculty of Technology, Bielefeld University; Neuroinformatics Group, Faculty of Technology, Bielefeld University", "aff_domain": "techfak.uni-bielefeld.de;techfak.uni-bielefeld.de", "email": "techfak.uni-bielefeld.de;techfak.uni-bielefeld.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Bielefeld University", "aff_unique_dep": "Neuroinformatics Group, Faculty of Technology", "aff_unique_url": "https://www.uni-bielefeld.de", "aff_unique_abbr": "Uni Bielefeld", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "f10948464f", "title": "Improvisation and Learning", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/654ad60ebd1ae29cedc37da04b6b0672-Abstract.html", "author": "Judy A Franklin", "abstract": "This article presents a 2-phase computational learning model and appli- cation. As a demonstration, a system has been built, called CHIME for Computer Human Interacting Musical Entity. In phase 1 of training, re- current back-propagationtrains the machine to reproduce 3 jazz melodies. The recurrent network is expanded and is further trained in phase 2 with a reinforcement learning algorithm and a critique produced by a set of basic rules for jazz improvisation. After each phase CHIME can interactively improvise with a human in real time.", "bibtex": "@inproceedings{NIPS2001_654ad60e,\n author = {Franklin, Judy A},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Improvisation and Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/654ad60ebd1ae29cedc37da04b6b0672-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/654ad60ebd1ae29cedc37da04b6b0672-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/654ad60ebd1ae29cedc37da04b6b0672-Metadata.json", "review": "", "metareview": "", "pdf_size": 367798, "gs_citation": -1, "gs_cited_by_link": "", "gs_version_total": -1, "aff": "Computer Science Department, Smith College, Northampton, MA 01063", "aff_domain": "cs.smith.edu", "email": "cs.smith.edu", "github": "", "project": "http://www.cs.smith.edu/~jfrankli", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Smith College", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.smith.edu", "aff_unique_abbr": "Smith College", "aff_campus_unique_index": "0", "aff_campus_unique": "Northampton", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "cdf3b46455", "title": "Incorporating Invariances in Non-Linear Support Vector Machines", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/07811dc6c422334ce36a09ff5cd6fe71-Abstract.html", "author": "Olivier Chapelle; Bernhard Sch\u00f6lkopf", "abstract": "The choice of an SVM kernel corresponds to the choice of a rep(cid:173) resentation of the data in a feature space and, to improve per(cid:173) formance, it should therefore incorporate prior knowledge such as known transformation invariances. We propose a technique which extends earlier work and aims at incorporating invariances in non(cid:173) linear kernels. We show on a digit recognition task that the pro(cid:173) posed approach is superior to the Virtual Support Vector method, which previously had been the method of choice.", "bibtex": "@inproceedings{NIPS2001_07811dc6,\n author = {Chapelle, Olivier and Sch\\\"{o}lkopf, Bernhard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Incorporating Invariances in Non-Linear Support Vector Machines},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/07811dc6c422334ce36a09ff5cd6fe71-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/07811dc6c422334ce36a09ff5cd6fe71-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/07811dc6c422334ce36a09ff5cd6fe71-Metadata.json", "review": "", "metareview": "", "pdf_size": 1329166, "gs_citation": 97, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10532041035766820703&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "LIP6, Paris, France + Biowulf Technologies; Max-Planck-Institute, Tiibingen, Germany + Biowulf Technologies", "aff_domain": "lip6.fr;tuebingen.mpg.de", "email": "lip6.fr;tuebingen.mpg.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0+1;2+1", "aff_unique_norm": "Laboratoire d'Informatique de Paris 6;Biowulf Technologies;Max-Planck-Institute", "aff_unique_dep": "LIP6;;", "aff_unique_url": "https://www.lip6.fr;https://www.biowulf.com;https://www.mpg.de", "aff_unique_abbr": "LIP6;;MPI", "aff_campus_unique_index": "0;2", "aff_campus_unique": "Paris;;Tiibingen", "aff_country_unique_index": "0+1;2+1", "aff_country_unique": "France;United States;Germany" }, { "id": "d88c689bab", "title": "Incremental A*", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/a591024321c5e2bdbd23ed35f0574dde-Abstract.html", "author": "S. Koenig; M. Likhachev", "abstract": "Incremental search techniques \ufb01nd optimal solutions to series of similar search tasks much faster than is possible by solving each search task from scratch. While researchers have developed incremental versions of uninformed search methods, we develop an incremental version of A", "bibtex": "@inproceedings{NIPS2001_a5910243,\n author = {Koenig, S. and Likhachev, M.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Incremental A\\ast },\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/a591024321c5e2bdbd23ed35f0574dde-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/a591024321c5e2bdbd23ed35f0574dde-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/a591024321c5e2bdbd23ed35f0574dde-Metadata.json", "review": "", "metareview": "", "pdf_size": 133601, "gs_citation": 232, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1695392208448765360&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "61d87351c0", "title": "Incremental Learning and Selective Sampling via Parametric Optimization Framework for SVM", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/405e28906322882c5be9b4b27f4c35fd-Abstract.html", "author": "Shai Fine; Katya Scheinberg", "abstract": "We propose a framework based on a parametric quadratic program(cid:173) ming (QP) technique to solve the support vector machine (SVM) training problem. This framework, can be specialized to obtain two SVM optimization methods. The first solves the fixed bias prob(cid:173) lem, while the second starts with an optimal solution for a fixed bias problem and adjusts the bias until the optimal value is found. The later method can be applied in conjunction with any other ex(cid:173) isting technique which obtains a fixed bias solution. Moreover, the second method can also be used independently to solve the com(cid:173) plete SVM training problem. A combination of these two methods is more flexible than each individual method and, among other things, produces an incremental algorithm which exactly solve the 1-Norm Soft Margin SVM optimization problem. Applying Selec(cid:173) tive Sampling techniques may further boost convergence.", "bibtex": "@inproceedings{NIPS2001_405e2890,\n author = {Fine, Shai and Scheinberg, Katya},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Incremental Learning and Selective Sampling via Parametric Optimization Framework for SVM},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/405e28906322882c5be9b4b27f4c35fd-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/405e28906322882c5be9b4b27f4c35fd-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/405e28906322882c5be9b4b27f4c35fd-Metadata.json", "review": "", "metareview": "", "pdf_size": 1433225, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6454446958344758913&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "IBM T. J. Watson Research Center; IBM T. J. Watson Research Center", "aff_domain": "us.ibm.com;us.ibm.com", "email": "us.ibm.com;us.ibm.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "IBM", "aff_unique_dep": "IBM", "aff_unique_url": "https://www.ibm.com/research/watson", "aff_unique_abbr": "IBM", "aff_campus_unique_index": "0;0", "aff_campus_unique": "T. J. Watson", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "ef3853f24a", "title": "Infinite Mixtures of Gaussian Process Experts", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/9afefc52942cb83c7c1f14b2139b09ba-Abstract.html", "author": "Carl E. Rasmussen; Zoubin Ghahramani", "abstract": "We present an extension to the Mixture of Experts (ME) model, where the individual experts are Gaussian Process (GP) regression models. Us- ing an input-dependent adaptation of the Dirichlet Process, we imple- ment a gating network for an in\ufb01nite number of Experts. Inference in this model may be done ef\ufb01ciently using a Markov Chain relying on Gibbs sampling. The model allows the effective covariance function to vary with the inputs, and may handle large datasets \u2013 thus potentially over- coming two of the biggest hurdles with GP models. Simulations show the viability of this approach.", "bibtex": "@inproceedings{NIPS2001_9afefc52,\n author = {Rasmussen, Carl and Ghahramani, Zoubin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Infinite Mixtures of Gaussian Process Experts},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/9afefc52942cb83c7c1f14b2139b09ba-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/9afefc52942cb83c7c1f14b2139b09ba-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/9afefc52942cb83c7c1f14b2139b09ba-Metadata.json", "review": "", "metareview": "", "pdf_size": 135239, "gs_citation": 697, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1411083338283657159&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "http://www.gatsby.ucl.ac.uk", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "6c10fa3667", "title": "Information Geometrical Framework for Analyzing Belief Propagation Decoder", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/d7a84628c025d30f7b2c52c958767e76-Abstract.html", "author": "Shiro Ikeda; Toshiyuki Tanaka; Shun-ichi Amari", "abstract": "The mystery of belief propagation (BP) decoder, especially of the turbo decoding, is studied from information geometrical viewpoint. The loopy belief network (BN) of turbo codes makes it dif\ufb01cult to obtain the true \u201cbelief\u201d by BP, and the characteristics of the algorithm and its equilib- rium are not clearly understood. Our study gives an intuitive understand- ing of the mechanism, and a new framework for the analysis. Based on the framework, we reveal basic properties of the turbo decoding.", "bibtex": "@inproceedings{NIPS2001_d7a84628,\n author = {Ikeda, Shiro and Tanaka, Toshiyuki and Amari, Shun-ichi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Information Geometrical Framework for Analyzing Belief Propagation Decoder},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/d7a84628c025d30f7b2c52c958767e76-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/d7a84628c025d30f7b2c52c958767e76-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/d7a84628c025d30f7b2c52c958767e76-Metadata.json", "review": "", "metareview": "", "pdf_size": 148181, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16057723148870332361&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "Kyushu Inst. of Tech., & PRESTO, JST; Tokyo Metropolitan Univ.; RIKEN BSI", "aff_domain": "brain.kyutech.ac.jp;eei.metro-u.ac.jp;brain.riken.go.jp", "email": "brain.kyutech.ac.jp;eei.metro-u.ac.jp;brain.riken.go.jp", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "Kyushu Institute of Technology;Tokyo Metropolitan University;RIKEN", "aff_unique_dep": ";;RIKEN BSI", "aff_unique_url": "https://www.kyutech.ac.jp;https://www.tmuc.ac.jp;https://www.riken.jp", "aff_unique_abbr": "Kyutech;TMU;RIKEN", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Japan" }, { "id": "5659662a0c", "title": "Information-Geometric Decomposition in Spike Analysis", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/ea5a486c712a91e48443cd802642223d-Abstract.html", "author": "Hiroyuki Nakahara; Shun-ichi Amari", "abstract": "We present an information-geometric measure to systematically investigate neuronal firing patterns, taking account not only of the second-order but also of higher-order interactions. We begin with the case of two neurons for illustration and show how to test whether or not any pairwise correlation in one period is significantly different from that in the other period. In order to test such a hy(cid:173) pothesis of different firing rates, the correlation term needs to be singled out 'orthogonally' to the firing rates, where the null hypoth(cid:173) esis might not be of independent firing. This method is also shown to directly associate neural firing with behavior via their mutual information, which is decomposed into two types of information, conveyed by mean firing rate and coincident firing, respectively. Then, we show that these results, using the 'orthogonal' decompo(cid:173) sition, are naturally extended to the case of three neurons and n neurons in general.", "bibtex": "@inproceedings{NIPS2001_ea5a486c,\n author = {Nakahara, Hiroyuki and Amari, Shun-ichi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Information-Geometric Decomposition in Spike Analysis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/ea5a486c712a91e48443cd802642223d-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/ea5a486c712a91e48443cd802642223d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/ea5a486c712a91e48443cd802642223d-Metadata.json", "review": "", "metareview": "", "pdf_size": 1505813, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13031363911352463028&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Lab. for Mathematical Neuroscience, RIKEN Brain Science Institute; Lab. for Mathematical Neuroscience, RIKEN Brain Science Institute + Dept. of Knowledge Sci., Japan Advanced Inst. of Sci. & Tech.", "aff_domain": "brain.riken.go.jp;brain.riken.go.jp", "email": "brain.riken.go.jp;brain.riken.go.jp", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0+1", "aff_unique_norm": "RIKEN Brain Science Institute;Japan Advanced Institute of Science and Technology", "aff_unique_dep": "Lab. for Mathematical Neuroscience;Department of Knowledge Science", "aff_unique_url": "https://bSI.riken.jp;https://www.jaist.ac.jp", "aff_unique_abbr": "RIKEN BSI;JAIST", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0+0", "aff_country_unique": "Japan" }, { "id": "692f53dfa4", "title": "Information-Geometrical Significance of Sparsity in Gallager Codes", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/dc513ea4fbdaa7a14786ffdebc4ef64e-Abstract.html", "author": "Toshiyuki Tanaka; Shiro Ikeda; Shun-ichi Amari", "abstract": "We report a result of perturbation analysis on decoding error of the belief propagation decoder for Gallager codes. The analysis is based on infor- mation geometry, and it shows that the principal term of decoding error at equilibrium comes from the m-embedding curvature of the log-linear submanifold spanned by the estimated pseudoposteriors, one for the full marginal, and K for partial posteriors, each of which takes a single check into account, where K is the number of checks in the Gallager code. It is then shown that the principal error term vanishes when the parity-check matrix of the code is so sparse that there are no two columns with overlap greater than 1.", "bibtex": "@inproceedings{NIPS2001_dc513ea4,\n author = {Tanaka, Toshiyuki and Ikeda, Shiro and Amari, Shun-ichi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Information-Geometrical Significance of Sparsity in Gallager Codes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/dc513ea4fbdaa7a14786ffdebc4ef64e-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/dc513ea4fbdaa7a14786ffdebc4ef64e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/dc513ea4fbdaa7a14786ffdebc4ef64e-Metadata.json", "review": "", "metareview": "", "pdf_size": 71900, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15915596204401656266&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Department of Electronics and Information Engineering, Tokyo Metropolitan University; Kyushu Institute of Technology & JST; RIKEN, Brain Science Institute", "aff_domain": "eei.metro-u.ac.jp;brain.kyutech.ac.jp;brain.riken.go.jp", "email": "eei.metro-u.ac.jp;brain.kyutech.ac.jp;brain.riken.go.jp", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "Tokyo Metropolitan University;Kyushu Institute of Technology;RIKEN", "aff_unique_dep": "Department of Electronics and Information Engineering;;Brain Science Institute", "aff_unique_url": "https://www.tmuc.ac.jp;https://www.kyutech.ac.jp;https://www.riken.jp", "aff_unique_abbr": "TMU;Kyutech;RIKEN", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Japan" }, { "id": "df04514d84", "title": "Intransitive Likelihood-Ratio Classifiers", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/a088ea2078cd92b0b8a0e78a32c5c082-Abstract.html", "author": "Jeff Bilmes; Gang Ji; Marina Meila", "abstract": "In this work, we introduce an information-theoretic based correction term to the likelihood ratio classi\ufb01cation method for multiple classes. Under certain conditions, the term is suf\ufb01cient for optimally correcting the dif- ference between the true and estimated likelihood ratio, and we analyze this in the Gaussian case. We \ufb01nd that the new correction term signif- icantly improves the classi\ufb01cation results when tested on medium vo- cabulary speech recognition tasks. Moreover, the addition of this term makes the class comparisons analogous to an intransitive game and we therefore use several tournament-like strategies to deal with this issue. We \ufb01nd that further small improvements are obtained by using an appro- priate tournament. Lastly, we \ufb01nd that intransitivity appears to be a good measure of classi\ufb01cation con\ufb01dence.", "bibtex": "@inproceedings{NIPS2001_a088ea20,\n author = {Bilmes, Jeff and Ji, Gang and Meila, Marina},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Intransitive Likelihood-Ratio Classifiers},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/a088ea2078cd92b0b8a0e78a32c5c082-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/a088ea2078cd92b0b8a0e78a32c5c082-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/a088ea2078cd92b0b8a0e78a32c5c082-Metadata.json", "review": "", "metareview": "", "pdf_size": 85908, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6444083843072916056&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Department of Electrical Engineering, University of Washington; Department of Electrical Engineering, University of Washington; Department of Statistics, University of Washington", "aff_domain": "ee.washington.edu;ee.washington.edu;stat.washington.edu", "email": "ee.washington.edu;ee.washington.edu;stat.washington.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Washington", "aff_unique_dep": "Department of Electrical Engineering", "aff_unique_url": "https://www.washington.edu", "aff_unique_abbr": "UW", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Seattle", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "da8fab89af", "title": "Iterative Double Clustering for Unsupervised and Semi-Supervised Learning", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/798cebccb32617ad94123450fd137104-Abstract.html", "author": "Ran El-Yaniv; Oren Souroujon", "abstract": "We present a powerful meta-clustering technique called Iterative Dou- ble Clustering (IDC). The IDC method is a natural extension of the recent Double Clustering (DC) method of Slonim and Tishby that ex- hibited impressive performance on text categorization tasks [12]. Us- ing synthetically generated data we empirically \ufb02nd that whenever the DC procedure is successful in recovering some of the structure hidden in the data, the extended IDC procedure can incrementally compute a signi\ufb02cantly more accurate classi\ufb02cation. IDC is especially advan- tageous when the data exhibits high attribute noise. Our simulation results also show the e\ufb01ectiveness of IDC in text categorization prob- lems. Surprisingly, this unsupervised procedure can be competitive with a (supervised) SVM trained with a small training set. Finally, we propose a simple and natural extension of IDC for semi-supervised and transductive learning where we are given both labeled and unla- beled examples.", "bibtex": "@inproceedings{NIPS2001_798cebcc,\n author = {El-Yaniv, Ran and Souroujon, Oren},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Iterative Double Clustering for Unsupervised and Semi-Supervised Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/798cebccb32617ad94123450fd137104-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/798cebccb32617ad94123450fd137104-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/798cebccb32617ad94123450fd137104-Metadata.json", "review": "", "metareview": "", "pdf_size": 112331, "gs_citation": 107, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8365517231607899963&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 21, "aff": "ComputerScienceDepartment Technion-IsraelInstituteofTechnology; ComputerScienceDepartment Technion-IsraelInstituteofTechnology", "aff_domain": "cs.technion.ac.il;cs.technion.ac.il", "email": "cs.technion.ac.il;cs.technion.ac.il", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Technion-Israel Institute of Technology", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.technion.ac.il", "aff_unique_abbr": "Technion", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "id": "9cbb7e7459", "title": "K-Local Hyperplane and Convex Distance Nearest Neighbor Algorithms", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/1359aa933b48b754a2f54adb688bfa77-Abstract.html", "author": "Pascal Vincent; Yoshua Bengio", "abstract": "Guided by an initial idea of building a complex (non linear) decision surface with maximal local margin in input space, we give a possible geometrical intuition as to why K-Nearest Neighbor (KNN) algorithms often perform more poorly than SVMs on classi\ufb01cation tasks. We then propose modi\ufb01ed K-Nearest Neighbor algorithms to overcome the per- ceived problem. The approach is similar in spirit to Tangent Distance, but with invariances inferred from the local neighborhood rather than prior knowledge. Experimental results on real world classi\ufb01cation tasks sug- gest that the modi\ufb01ed KNN algorithms often give a dramatic improve- ment over standard KNN and perform as well or better than SVMs.", "bibtex": "@inproceedings{NIPS2001_1359aa93,\n author = {Vincent, Pascal and Bengio, Yoshua},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {K-Local Hyperplane and Convex Distance Nearest Neighbor Algorithms},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/1359aa933b48b754a2f54adb688bfa77-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/1359aa933b48b754a2f54adb688bfa77-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/1359aa933b48b754a2f54adb688bfa77-Metadata.json", "review": "", "metareview": "", "pdf_size": 96488, "gs_citation": 271, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13825031176264460979&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 18, "aff": "Dept. IRO, Universit\u00e9 de Montr\u00e9al; Dept. IRO, Universit\u00e9 de Montr\u00e9al", "aff_domain": "iro.umontreal.ca;iro.umontreal.ca", "email": "iro.umontreal.ca;iro.umontreal.ca", "github": "", "project": "http://www.iro.umontreal.ca/\u2028vincentp", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Universit\u00e9 de Montr\u00e9al", "aff_unique_dep": "Dept. IRO", "aff_unique_url": "https://www.umontreal.ca", "aff_unique_abbr": "UdeM", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Montr\u00e9al", "aff_country_unique_index": "0;0", "aff_country_unique": "Canada" }, { "id": "f8aff5aecf", "title": "KLD-Sampling: Adaptive Particle Filters", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/c5b2cebf15b205503560c4e8e6d1ea78-Abstract.html", "author": "Dieter Fox", "abstract": "Over the last years, particle \ufb01lters have been applied with great success to a variety of state estimation problems. We present a statistical approach to increasing the ef\ufb01ciency of particle \ufb01lters by adapting the size of sample sets on-the-\ufb02y. The key idea of the KLD-sampling method is to bound the approximation error introduced by the sample-based representation of the particle \ufb01lter. The name KLD-sampling is due to the fact that we measure the approximation error by the Kullback-Leibler distance. Our adaptation approach chooses a small number of samples if the density is focused on a small part of the state space, and it chooses a large number of samples if the state uncertainty is high. Both the implementation and computation overhead of this approach are small. Extensive experiments using mobile robot localization as a test application show that our approach yields drastic improvements over particle \ufb01lters with \ufb01xed sample set sizes and over a previously introduced adaptation technique.", "bibtex": "@inproceedings{NIPS2001_c5b2cebf,\n author = {Fox, Dieter},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {KLD-Sampling: Adaptive Particle Filters},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/c5b2cebf15b205503560c4e8e6d1ea78-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/c5b2cebf15b205503560c4e8e6d1ea78-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/c5b2cebf15b205503560c4e8e6d1ea78-Metadata.json", "review": "", "metareview": "", "pdf_size": 957347, "gs_citation": 790, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9189096957975981493&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 21, "aff": "Department of Computer Science & Engineering, University of Washington", "aff_domain": "cs.washington.edu", "email": "cs.washington.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of Washington", "aff_unique_dep": "Department of Computer Science & Engineering", "aff_unique_url": "https://www.washington.edu", "aff_unique_abbr": "UW", "aff_campus_unique_index": "0", "aff_campus_unique": "Seattle", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "8e55963a51", "title": "Kernel Feature Spaces and Nonlinear Blind Souce Separation", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/cf2226ddd41b1a2d0ae51dab54d32c36-Abstract.html", "author": "Stefan Harmeling; Andreas Ziehe; Motoaki Kawanabe; Klaus-Robert M\u00fcller", "abstract": "In kernel based learning the data is mapped to a kernel feature space of a dimension that corresponds to the number of training data points. In practice, however, the data forms a smaller submanifold in feature space, a fact that has been used e.g. by reduced set techniques for SVMs. We propose a new mathematical construction that permits to adapt to the in- trinsic dimension and to \ufb01nd an orthonormal basis of this submanifold. In doing so, computations get much simpler and more important our theoretical framework allows to derive elegant kernelized blind source separation (BSS) algorithms for arbitrary invertible nonlinear mixings. Experiments demonstrate the good performance and high computational ef\ufb01ciency of our kTDSEP algorithm for the problem of nonlinear BSS.", "bibtex": "@inproceedings{NIPS2001_cf2226dd,\n author = {Harmeling, Stefan and Ziehe, Andreas and Kawanabe, Motoaki and M\\\"{u}ller, Klaus-Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Kernel Feature Spaces and Nonlinear Blind Souce Separation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/cf2226ddd41b1a2d0ae51dab54d32c36-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/cf2226ddd41b1a2d0ae51dab54d32c36-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/cf2226ddd41b1a2d0ae51dab54d32c36-Metadata.json", "review": "", "metareview": "", "pdf_size": 585523, "gs_citation": 42, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10520641770376335627&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "Fraunhofer FIRST.IDA; Fraunhofer FIRST.IDA; Fraunhofer FIRST.IDA; Fraunhofer FIRST.IDA+University of Potsdam, Department of Computer Science", "aff_domain": "first.fhg.de;first.fhg.de;first.fhg.de;first.fhg.de", "email": "first.fhg.de;first.fhg.de;first.fhg.de;first.fhg.de", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0+1", "aff_unique_norm": "Fraunhofer Institute for Software and Systems Engineering;University of Potsdam", "aff_unique_dep": "FIRST.IDA;Department of Computer Science", "aff_unique_url": "https://www.first.ida.fraunhofer.de/;https://www.uni-potsdam.de", "aff_unique_abbr": "Fraunhofer FIRST.IDA;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0+0", "aff_country_unique": "Germany" }, { "id": "0caf55ba4e", "title": "Kernel Logistic Regression and the Import Vector Machine", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/2eace51d8f796d04991c831a07059758-Abstract.html", "author": "Ji Zhu; Trevor Hastie", "abstract": "The support vector machine (SVM) is known for its good performance in binary classi\ufb01cation, but its extension to multi-class classi\ufb01cation is still an on-going research issue. In this paper, we propose a new approach for classi\ufb01cation, called the import vector machine (IVM), which is built on kernel logistic regression (KLR). We show that the IVM not only per- forms as well as the SVM in binary classi\ufb01cation, but also can naturally be generalized to the multi-class case. Furthermore, the IVM provides an estimate of the underlying probability. Similar to the \u201csupport points\u201d of the SVM, the IVM model uses only a fraction of the training data to index kernel basis functions, typically a much smaller fraction than the SVM. This gives the IVM a computational advantage over the SVM, especially when the size of the training data set is large.", "bibtex": "@inproceedings{NIPS2001_2eace51d,\n author = {Zhu, Ji and Hastie, Trevor},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Kernel Logistic Regression and the Import Vector Machine},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/2eace51d8f796d04991c831a07059758-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/2eace51d8f796d04991c831a07059758-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/2eace51d8f796d04991c831a07059758-Metadata.json", "review": "", "metareview": "", "pdf_size": 145641, "gs_citation": 477, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12453922707126147272&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Department of Statistics, Stanford University; Department of Statistics, Stanford University", "aff_domain": "stat.stanford.edu;stat.stanford.edu", "email": "stat.stanford.edu;stat.stanford.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Statistics", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "c75a50d401", "title": "Kernel Machines and Boolean Functions", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/d77f00766fd3be3f2189c843a6af3fb2-Abstract.html", "author": "Adam Kowalczyk; Alex J. Smola; Robert C. Williamson", "abstract": "We give results about the learnability and required complexity of logical formulae to solve classi\ufb01cation problems. These results are obtained by linking propositional logic with kernel machines. In particular we show that decision trees and disjunctive normal forms (DNF) can be repre- sented by the help of a special kernel, linking regularized risk to separa- tion margin. Subsequently we derive a number of lower bounds on the required complexity of logic formulae using properties of algorithms for generation of linear estimators, such as perceptron and maximal percep- tron learning.", "bibtex": "@inproceedings{NIPS2001_d77f0076,\n author = {Kowalczyk, Adam and Smola, Alex and Williamson, Robert C},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Kernel Machines and Boolean Functions},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/d77f00766fd3be3f2189c843a6af3fb2-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/d77f00766fd3be3f2189c843a6af3fb2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/d77f00766fd3be3f2189c843a6af3fb2-Metadata.json", "review": "", "metareview": "", "pdf_size": 108422, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13640810249840440304&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "Telstra Research Laboratories, Telstra, Clayton, VIC 3168; RSISE, MLG and TelEng, ANU, Canberra, ACT, 0200; RSISE, MLG and TelEng, ANU, Canberra, ACT, 0200", "aff_domain": "trl.oz.au;anu.edu.au;anu.edu.au", "email": "trl.oz.au;anu.edu.au;anu.edu.au", "github": "", "project": "http://www.kernel-machines.org", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "Telstra;Australian National University", "aff_unique_dep": "Research Laboratories;Research School of Information Sciences and Engineering", "aff_unique_url": "https://www.telstra.com.au;https://www.anu.edu.au", "aff_unique_abbr": "Telstra;ANU", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "Clayton;Canberra", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Australia" }, { "id": "be40383cd8", "title": "Laplacian Eigenmaps and Spectral Techniques for Embedding and Clustering", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/f106b7f99d2cb30c3db1c3cc0fde9ccb-Abstract.html", "author": "Mikhail Belkin; Partha Niyogi", "abstract": "Drawing on the correspondence between the graph Laplacian, the Laplace-Beltrami operator on a manifold , and the connections to the heat equation , we propose a geometrically motivated algorithm for constructing a representation for data sampled from a low di(cid:173) mensional manifold embedded in a higher dimensional space. The algorithm provides a computationally efficient approach to non(cid:173) linear dimensionality reduction that has locality preserving prop(cid:173) erties and a natural connection to clustering. Several applications are considered.", "bibtex": "@inproceedings{NIPS2001_f106b7f9,\n author = {Belkin, Mikhail and Niyogi, Partha},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Laplacian Eigenmaps and Spectral Techniques for Embedding and Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/f106b7f99d2cb30c3db1c3cc0fde9ccb-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/f106b7f99d2cb30c3db1c3cc0fde9ccb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/f106b7f99d2cb30c3db1c3cc0fde9ccb-Metadata.json", "review": "", "metareview": "", "pdf_size": 1023288, "gs_citation": 6423, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6189682175754530914&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "Depts. of Mathematics and Computer Science, The University of Chicago; Depts. of Mathematics and Computer Science, The University of Chicago", "aff_domain": "math.uchicago.edu;cs.uchicago.edu", "email": "math.uchicago.edu;cs.uchicago.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Chicago", "aff_unique_dep": "Depts. of Mathematics and Computer Science", "aff_unique_url": "https://www.uchicago.edu", "aff_unique_abbr": "UChicago", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "c903a50c05", "title": "Latent Dirichlet Allocation", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/296472c9542ad4d4788d543508116cbc-Abstract.html", "author": "David M. Blei; Andrew Y. Ng; Michael I. Jordan", "abstract": "We propose a generative model for text and other collections of dis(cid:173) crete data that generalizes or improves on several previous models including naive Bayes/unigram, mixture of unigrams [6], and Hof(cid:173) mann's aspect model, also known as probabilistic latent semantic indexing (pLSI) [3]. In the context of text modeling, our model posits that each document is generated as a mixture of topics, where the continuous-valued mixture proportions are distributed as a latent Dirichlet random variable. Inference and learning are carried out efficiently via variational algorithms. We present em(cid:173) pirical results on applications of this model to problems in text modeling, collaborative filtering, and text classification.", "bibtex": "@inproceedings{NIPS2001_296472c9,\n author = {Blei, David and Ng, Andrew and Jordan, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Latent Dirichlet Allocation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/296472c9542ad4d4788d543508116cbc-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/296472c9542ad4d4788d543508116cbc-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/296472c9542ad4d4788d543508116cbc-Metadata.json", "review": "", "metareview": "", "pdf_size": 1474593, "gs_citation": 57563, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17756175773309118945&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 88, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "8cad4e9ef7", "title": "Learning Body Pose via Specialized Maps", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/ea6b2efbdd4255a9f1b3bbc6399b58f4-Abstract.html", "author": "R\u00f3mer Rosales; Stan Sclaroff", "abstract": "A nonlinear supervised learning model, the Specialized Mappings Architecture (SMA), is described and applied to the estimation of human body pose from monocular images. The SMA consists of several specialized forward mapping functions and an inverse map(cid:173) ping function. Each specialized function maps certain domains of the input space (image features) onto the output space (body pose parameters). The key algorithmic problems faced are those of learning the specialized domains and mapping functions in an op(cid:173) timal way, as well as performing inference given inputs and knowl(cid:173) edge of the inverse function. Solutions to these problems employ the EM algorithm and alternating choices of conditional indepen(cid:173) dence assumptions. Performance of the approach is evaluated with synthetic and real video sequences of human motion.", "bibtex": "@inproceedings{NIPS2001_ea6b2efb,\n author = {Rosales, R\\'{o}mer and Sclaroff, Stan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Body Pose via Specialized Maps},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/ea6b2efbdd4255a9f1b3bbc6399b58f4-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/ea6b2efbdd4255a9f1b3bbc6399b58f4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/ea6b2efbdd4255a9f1b3bbc6399b58f4-Metadata.json", "review": "", "metareview": "", "pdf_size": 1459955, "gs_citation": 154, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13229138672472669948&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science, Boston University, Boston, MA 02215; Department of Computer Science, Boston University, Boston, MA 02215", "aff_domain": "cs.bu.edu;cs.bu.edu", "email": "cs.bu.edu;cs.bu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Boston University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.bu.edu", "aff_unique_abbr": "BU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Boston", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "19367ff094", "title": "Learning Discriminative Feature Transforms to Low Dimensions in Low Dimentions", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/d860edd1dd83b36f02ce52bde626c653-Abstract.html", "author": "Kari Torkkola", "abstract": "The marriage of Renyi entropy with Parzen density estimation has been shown to be a viable tool in learning discriminative feature transforms. However, it suffers from computational complexity proportional to the square of the number of samples in the training data. This sets a practical limit to using large databases. We suggest immediate divorce of the two methods and remarriage of Renyi entropy with a semi-parametric density estimation method, such as a Gaussian Mixture Models (GMM). This al- lows all of the computation to take place in the low dimensional target space, and it reduces computational complexity proportional to square of the number of components in the mixtures. Furthermore, a conve- nient extension to Hidden Markov Models as commonly used in speech recognition becomes possible.", "bibtex": "@inproceedings{NIPS2001_d860edd1,\n author = {Torkkola, Kari},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Discriminative Feature Transforms to Low Dimensions in Low Dimentions},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/d860edd1dd83b36f02ce52bde626c653-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/d860edd1dd83b36f02ce52bde626c653-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/d860edd1dd83b36f02ce52bde626c653-Metadata.json", "review": "", "metareview": "", "pdf_size": 83402, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2809095486458899499&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Motorola Labs, 7700 South River Parkway, MD ML28, Tempe AZ 85284, USA", "aff_domain": "motorola.com", "email": "motorola.com", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Motorola Labs", "aff_unique_dep": "", "aff_unique_url": "https://www.motorola.com", "aff_unique_abbr": "Motorola", "aff_campus_unique_index": "0", "aff_campus_unique": "Tempe", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "f17dd0cb7d", "title": "Learning Hierarchical Structures with Linear Relational Embedding", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/814a9c18f5abff398787c9cfcbf3d80c-Abstract.html", "author": "Alberto Paccanaro; Geoffrey E. Hinton", "abstract": "We present Linear Relational Embedding (LRE), a new method of learn- ing a distributed representation of concepts from data consisting of in- stances of relations between given concepts. Its \ufb01nal goal is to be able to generalize, i.e. infer new instances of these relations among the con- cepts. On a task involving family relationships we show that LRE can generalize better than any previously published method. We then show how LRE can be used effectively to \ufb01nd compact distributed representa- tions for variable-sized recursive data structures, such as trees and lists.", "bibtex": "@inproceedings{NIPS2001_814a9c18,\n author = {Paccanaro, Alberto and Hinton, Geoffrey E},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Hierarchical Structures with Linear Relational Embedding},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/814a9c18f5abff398787c9cfcbf3d80c-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/814a9c18f5abff398787c9cfcbf3d80c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/814a9c18f5abff398787c9cfcbf3d80c-Metadata.json", "review": "", "metareview": "", "pdf_size": 118775, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3999853003368094439&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 15, "aff": "Gatsby Computational Neuroscience Unit, UCL, 17 Queen Square, London, UK; Gatsby Computational Neuroscience Unit, UCL, 17 Queen Square, London, UK", "aff_domain": "gatsby.ucl.ac.uk;gatsby.ucl.ac.uk", "email": "gatsby.ucl.ac.uk;gatsby.ucl.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University College London", "aff_unique_dep": "Gatsby Computational Neuroscience Unit", "aff_unique_url": "https://www.ucl.ac.uk", "aff_unique_abbr": "UCL", "aff_campus_unique_index": "0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "a9146eaaa9", "title": "Learning Lateral Interactions for Feature Binding and Sensory Segmentation", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/3a824154b16ed7dab899bf000b80eeee-Abstract.html", "author": "Heiko Wersing", "abstract": "We present a new approach to the supervised learning of lateral inter- actions for the competitive layer model (CLM) dynamic feature binding architecture. The method is based on consistency conditions, which were recently shown to characterize the attractor states of this linear threshold recurrent network. For a given set of training examples the learning prob- lem is formulated as a convex quadratic optimization problem in the lat- eral interaction weights. An ef\ufb01cient dimension reduction of the learning problem can be achieved by using a linear superposition of basis inter- actions. We show the successful application of the method to a medical image segmentation problem of \ufb02uorescence microscope cell images.", "bibtex": "@inproceedings{NIPS2001_3a824154,\n author = {Wersing, Heiko},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Lateral Interactions for Feature Binding and Sensory Segmentation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/3a824154b16ed7dab899bf000b80eeee-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/3a824154b16ed7dab899bf000b80eeee-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/3a824154b16ed7dab899bf000b80eeee-Metadata.json", "review": "", "metareview": "", "pdf_size": 159439, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12030863694437812821&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "HONDA R&D Europe GmbH", "aff_domain": "hre-ftr.f.rd.honda.co.jp", "email": "hre-ftr.f.rd.honda.co.jp", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Honda R&D Europe GmbH", "aff_unique_dep": "", "aff_unique_url": "https://www.honda-rc.de", "aff_unique_abbr": "HRE", "aff_country_unique_index": "0", "aff_country_unique": "Germany" }, { "id": "2273c348d3", "title": "Learning Spike-Based Correlations and Conditional Probabilities in Silicon", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/4afd521d77158e02aed37e2274b90c9c-Abstract.html", "author": "Aaron P. Shon; David Hsu; Chris Diorio", "abstract": "Abstract Unavailable", "bibtex": "@inproceedings{NIPS2001_4afd521d,\n author = {Shon, Aaron and Hsu, David and Diorio, Chris},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Spike-Based Correlations and Conditional Probabilities in Silicon},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/4afd521d77158e02aed37e2274b90c9c-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/4afd521d77158e02aed37e2274b90c9c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/4afd521d77158e02aed37e2274b90c9c-Metadata.json", "review": "", "metareview": "", "pdf_size": 60673, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14468438738464599098&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "bbb26175d2", "title": "Learning a Gaussian Process Prior for Automatically Generating Music Playlists", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/6351bf9dce654515bf1ddbd6426dfa97-Abstract.html", "author": "John C. Platt; Christopher J. C. Burges; Steven Swenson; Christopher Weare; Alice Zheng", "abstract": "This paper presents AutoDJ: a system for automatically generating mu- sic playlists based on one or more seed songs selected by a user. AutoDJ uses Gaussian Process Regression to learn a user preference function over songs. This function takes music metadata as inputs. This paper further introduces Kernel Meta-Training, which is a method of learning a Gaussian Process kernel from a distribution of functions that generates the learned function. For playlist generation, AutoDJ learns a kernel from a large set of albums. This learned kernel is shown to be more effective at predicting users\u2019 playlists than a reasonable hand-designed kernel.", "bibtex": "@inproceedings{NIPS2001_6351bf9d,\n author = {Platt, John and Burges, Christopher J. C. and Swenson, Steven and Weare, Christopher and Zheng, Alice},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Learning a Gaussian Process Prior for Automatically Generating Music Playlists},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/6351bf9dce654515bf1ddbd6426dfa97-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/6351bf9dce654515bf1ddbd6426dfa97-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/6351bf9dce654515bf1ddbd6426dfa97-Metadata.json", "review": "", "metareview": "", "pdf_size": 79353, "gs_citation": 180, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=836740085129337161&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation + Department of Electrical Engineering and Computer Science, University of California at Berkeley", "aff_domain": "microsoft.com;microsoft.com;microsoft.com;microsoft.com;cs.berkeley.edu", "email": "microsoft.com;microsoft.com;microsoft.com;microsoft.com;cs.berkeley.edu", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0+1", "aff_unique_norm": "Microsoft;University of California, Berkeley", "aff_unique_dep": "Microsoft Corporation;Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://www.microsoft.com;https://www.berkeley.edu", "aff_unique_abbr": "Microsoft;UC Berkeley", "aff_campus_unique_index": "1", "aff_campus_unique": ";Berkeley", "aff_country_unique_index": "0;0;0;0;0+0", "aff_country_unique": "United States" }, { "id": "5a9001bea6", "title": "Learning from Infinite Data in Finite Time", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/f8bf09f5fceaea80e1f864a1b48938bf-Abstract.html", "author": "Pedro Domingos; Geoff Hulten", "abstract": "We propose the following general method for scaling learning algorithms to arbitrarily large data sets. Consider the model Mii learned by the algorithm using ni examples in step i (ii = (nl , ... ,nm)) , and the model Moo that would be learned using in(cid:173) finite examples. Upper-bound the loss L(Mii' M oo ) between them as a function of ii, and then minimize the algorithm's time com(cid:173) plexity f(ii) subject to the constraint that L(Moo , Mii ) be at most f with probability at most 8. We apply this method to the EM algorithm for mixtures of Gaussians. Preliminary experiments on a series of large data sets provide evidence of the potential of this approach.", "bibtex": "@inproceedings{NIPS2001_f8bf09f5,\n author = {Domingos, Pedro and Hulten, Geoff},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Learning from Infinite Data in Finite Time},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/f8bf09f5fceaea80e1f864a1b48938bf-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/f8bf09f5fceaea80e1f864a1b48938bf-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/f8bf09f5fceaea80e1f864a1b48938bf-Metadata.json", "review": "", "metareview": "", "pdf_size": 1548938, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10194733183046017702&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "3cf9733cf2", "title": "Linear-time inference in Hierarchical HMMs", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/aebf7782a3d445f43cf30ee2c0d84dee-Abstract.html", "author": "Kevin P. Murphy; Mark A. Paskin", "abstract": "The hierarchical hidden Markov model (HHMM) is a generalization of the hidden Markov model (HMM) that models sequences with structure at many length/time scales [FST98]. Unfortunately, the original infer- is ence algorithm is rather complicated, and takes the length of the sequence, making it impractical for many domains. In this paper, we show how HHMMs are a special kind of dynamic Bayesian network (DBN), and thereby derive a much simpler inference algorithm, which only takes time. Furthermore, by drawing the connection between HHMMs and DBNs, we enable the application of many stan- dard approximation techniques to further speed up inference.", "bibtex": "@inproceedings{NIPS2001_aebf7782,\n author = {Murphy, Kevin P and Paskin, Mark},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Linear-time inference in Hierarchical HMMs},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/aebf7782a3d445f43cf30ee2c0d84dee-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/aebf7782a3d445f43cf30ee2c0d84dee-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/aebf7782a3d445f43cf30ee2c0d84dee-Metadata.json", "review": "", "metareview": "", "pdf_size": 99194, "gs_citation": 294, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16571681644837807047&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 18, "aff": "Computer Science Department, University of California, Berkeley, CA 94720-1776; Computer Science Department, University of California, Berkeley, CA 94720-1776", "aff_domain": "cs.berkeley.edu;cs.berkeley.edu", "email": "cs.berkeley.edu;cs.berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "49e7d27e17", "title": "Linking Motor Learning to Function Approximation: Learning in an Unlearnable Force Field", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/3683af9d6f6c06acee72992f2977f67e-Abstract.html", "author": "O. Donchin; Reza Shadmehr", "abstract": "Reaching movements require the brain to generate motor com- mands that rely on an internal model of the task's dynamics. Here we consider the errors that subjects make early in their reaching trajectories to various targets as they learn an internal model. Us- ing a framework from function approximation, we argue that the sequence of errors should reflect the process of gradient descent. If so, then the sequence of errors should obey hidden state transitions of a simple dynamical system. Fitting the system to human data, we find a surprisingly good fit accounting for 98% of the variance. This allows us to draw tentative conclusions about the basis ele- ments used by the brain in transforming sensory space to motor commands. To test the robustness of the results, we estimate the shape of the basis elements under two conditions: in a traditional learning paradigm with a consistent force field, and in a random sequence of force fields where learning is not possible. Remarkably, we find that the basis remains invariant. 1 Introduction", "bibtex": "@inproceedings{NIPS2001_3683af9d,\n author = {Donchin, O. and Shadmehr, Reza},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Linking Motor Learning to Function Approximation: Learning in an Unlearnable Force Field},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/3683af9d6f6c06acee72992f2977f67e-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/3683af9d6f6c06acee72992f2977f67e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/3683af9d6f6c06acee72992f2977f67e-Metadata.json", "review": "", "metareview": "", "pdf_size": 105155, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15587898190859868095&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 19, "aff": "Dept. of Biomedical Engineering, Johns Hopkins University, Baltimore, MD 21205; Dept. of Biomedical Engineering, Johns Hopkins University, Baltimore, MD 21205", "aff_domain": "bme.jhu.edu;bme.jhu.edu", "email": "bme.jhu.edu;bme.jhu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Johns Hopkins University", "aff_unique_dep": "Dept. of Biomedical Engineering", "aff_unique_url": "https://www.jhu.edu", "aff_unique_abbr": "JHU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Baltimore", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "d0d8656411", "title": "MIME: Mutual Information Minimization and Entropy Maximization for Bayesian Belief Propagation", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/f1981e4bd8a0d6d8462016d2fc6276b3-Abstract.html", "author": "Anand Rangarajan; Alan L. Yuille", "abstract": "Bayesian belief propagation in graphical models has been recently shown to have very close ties to inference methods based in statis- tical physics. After Yedidia et al. demonstrated that belief prop- agation (cid:12)xed points correspond to extrema of the so-called Bethe free energy, Yuille derived a double loop algorithm that is guar- anteed to converge to a local minimum of the Bethe free energy. Yuille\u2019s algorithm is based on a certain decomposition of the Bethe free energy and he mentions that other decompositions are possi- ble and may even be fruitful. In the present work, we begin with the Bethe free energy and show that it has a principled interpre- tation as pairwise mutual information minimization and marginal entropy maximization (MIME). Next, we construct a family of free energy functions from a spectrum of decompositions of the original Bethe free energy. For each free energy in this family, we develop a new algorithm that is guaranteed to converge to a local min- imum. Preliminary computer simulations are in agreement with this theoretical development.", "bibtex": "@inproceedings{NIPS2001_f1981e4b,\n author = {Rangarajan, Anand and Yuille, Alan L},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {MIME: Mutual Information Minimization and Entropy Maximization for Bayesian Belief Propagation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/f1981e4bd8a0d6d8462016d2fc6276b3-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/f1981e4bd8a0d6d8462016d2fc6276b3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/f1981e4bd8a0d6d8462016d2fc6276b3-Metadata.json", "review": "", "metareview": "", "pdf_size": 146445, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18010641020714124587&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Dept. of Computer and Information Science and Engineering, University of Florida; Smith-Kettlewell Eye Research Institute", "aff_domain": "cise.ufl.edu;ski.org", "email": "cise.ufl.edu;ski.org", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Florida;Smith-Kettlewell Eye Research Institute", "aff_unique_dep": "Dept. of Computer and Information Science and Engineering;", "aff_unique_url": "https://www.ufl.edu;https://www.ski.org", "aff_unique_abbr": "UF;SKI", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "5f8daf9179", "title": "Matching Free Trees with Replicator Equations", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/df4fe8a8bcd5c95cdb640aa9793bb32b-Abstract.html", "author": "Marcello Pelillo", "abstract": "Motivated by our recent work on rooted tree matching, in this paper we provide a solution to the problem of matching two free (i.e., unrooted) trees by constructing an association graph whose maximal cliques are in one-to-one correspondence with maximal common subtrees. We then solve the problem using simple replicator dynamics from evolutionary game theory. Experiments on hundreds of uniformly random trees are presented. The results are impressive: despite the inherent inability of these simple dynamics to escape from local optima, they always returned a globally optimal solution.", "bibtex": "@inproceedings{NIPS2001_df4fe8a8,\n author = {Pelillo, Marcello},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Matching Free Trees with Replicator Equations},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/df4fe8a8bcd5c95cdb640aa9793bb32b-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/df4fe8a8bcd5c95cdb640aa9793bb32b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/df4fe8a8bcd5c95cdb640aa9793bb32b-Metadata.json", "review": "", "metareview": "", "pdf_size": 85145, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4933431963578151577&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Dipartimento di Informatica, Universit` a Ca\u2019 Foscari di Venezia", "aff_domain": "dsi.unive.it", "email": "dsi.unive.it", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Universit` a Ca\u2019 Foscari di Venezia", "aff_unique_dep": "Dipartimento di Informatica", "aff_unique_url": "https://www.unive.it", "aff_unique_abbr": "UNIVE", "aff_campus_unique_index": "0", "aff_campus_unique": "Venezia", "aff_country_unique_index": "0", "aff_country_unique": "Italy" }, { "id": "5512a06087", "title": "Means, Correlations and Bounds", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/0004d0b59e19461ff126e3a08a814c33-Abstract.html", "author": "Martijn Leisink; Bert Kappen", "abstract": "The partition function for a Boltzmann machine can be bounded from above and below. We can use this to bound the means and the correlations. For networks with small weights, the values of these statistics can be restricted to non-trivial regions (i.e. a subset of [-1 , 1]). Experimental results show that reasonable bounding occurs for weight sizes where mean field expansions generally give good results.", "bibtex": "@inproceedings{NIPS2001_0004d0b5,\n author = {Leisink, Martijn and Kappen, Bert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Means, Correlations and Bounds},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/0004d0b59e19461ff126e3a08a814c33-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/0004d0b59e19461ff126e3a08a814c33-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/0004d0b59e19461ff126e3a08a814c33-Metadata.json", "review": "", "metareview": "", "pdf_size": 1156959, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12765330534741671228&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Biophysics, University of Nijmegen; Department of Biophysics, University of Nijmegen", "aff_domain": "mbfys.kun.nl;mbfys.kun.nl", "email": "mbfys.kun.nl;mbfys.kun.nl", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Nijmegen", "aff_unique_dep": "Department of Biophysics", "aff_unique_url": "https://www.rug.nl", "aff_unique_abbr": "", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Netherlands" }, { "id": "fccc103d24", "title": "Minimax Probability Machine", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/f48c04ffab49ff0e5d1176244fdfb65c-Abstract.html", "author": "Gert Lanckriet; Laurent E. Ghaoui; Chiranjib Bhattacharyya; Michael I. Jordan", "abstract": "When constructing a classifier, the probability of correct classifi(cid:173) cation of future data points should be maximized. In the current paper this desideratum is translated in a very direct way into an optimization problem, which is solved using methods from con(cid:173) vex optimization. We also show how to exploit Mercer kernels in this setting to obtain nonlinear decision boundaries. A worst-case bound on the probability of misclassification of future data is ob(cid:173) tained explicitly.", "bibtex": "@inproceedings{NIPS2001_f48c04ff,\n author = {Lanckriet, Gert and Ghaoui, Laurent and Bhattacharyya, Chiranjib and Jordan, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Minimax Probability Machine},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/f48c04ffab49ff0e5d1176244fdfb65c-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/f48c04ffab49ff0e5d1176244fdfb65c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/f48c04ffab49ff0e5d1176244fdfb65c-Metadata.json", "review": "", "metareview": "", "pdf_size": 1215263, "gs_citation": 197, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7277777379861640648&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Department of EECS, University of California, Berkeley; Department of EECS, University of California, Berkeley; Department of EECS, University of California, Berkeley; Computer Science and Statistics, University of California, Berkeley", "aff_domain": "eecs.berkeley.edu;eecs.berkeley.edu;eecs.berkeley.edu;cs.berkeley.edu", "email": "eecs.berkeley.edu;eecs.berkeley.edu;eecs.berkeley.edu;cs.berkeley.edu", "github": "", "project": "http://robotics.eecs.berkeley.edu/~gert/", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Department of Electrical Engineering and Computer Sciences", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "2856e2ff80", "title": "Model Based Population Tracking and Automatic Detection of Distribution Changes", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/ef8446f35513a8d6aa2308357a268a7e-Abstract.html", "author": "Igor V. Cadez; P. S. Bradley", "abstract": "Probabilistic mixture models are used for a broad range of data anal- ysis tasks such as clustering, classi\ufb01cation, predictive modeling, etc. Due to their inherent probabilistic nature, mixture models can easily be combined with other probabilistic or non-probabilistic techniques thus forming more complex data analysis systems. In the case of online data (where there is a stream of data available) models can be constantly up- dated to re\ufb02ect the most current distribution of the incoming data. How- ever, in many business applications the models themselves represent a parsimonious summary of the data and therefore it is not desirable to change models frequently, much less with every new data point. In such a framework it becomes crucial to track the applicability of the mixture model and detect the point in time when the model fails to adequately represent the data. In this paper we formulate the problem of change detection and propose a principled solution. Empirical results over both synthetic and real-life data sets are presented.", "bibtex": "@inproceedings{NIPS2001_ef8446f3,\n author = {Cadez, Igor and Bradley, P. S.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Model Based Population Tracking and Automatic Detection of Distribution Changes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/ef8446f35513a8d6aa2308357a268a7e-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/ef8446f35513a8d6aa2308357a268a7e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/ef8446f35513a8d6aa2308357a268a7e-Metadata.json", "review": "", "metareview": "", "pdf_size": 162181, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10052425901189012734&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Dept. of Information and Computer Science, University of California, Irvine, CA 92612 + digiMine, Inc., Bellevue, WA 98004-4332; digiMine, Inc., Bellevue, WA 98004-4332", "aff_domain": "ics.uci.edu;digimine.com", "email": "ics.uci.edu;digimine.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0+1;1", "aff_unique_norm": "University of California, Irvine;digiMine, Inc.", "aff_unique_dep": "Dept. of Information and Computer Science;", "aff_unique_url": "https://www.uci.edu;", "aff_unique_abbr": "UCI;", "aff_campus_unique_index": "0", "aff_campus_unique": "Irvine;", "aff_country_unique_index": "0+0;0", "aff_country_unique": "United States" }, { "id": "2ae194e23c", "title": "Model-Free Least-Squares Policy Iteration", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/fca0789e7891cbc0583298a238316122-Abstract.html", "author": "Michail G. Lagoudakis; Ronald Parr", "abstract": "We propose a new approach to reinforcement learning which combines least squares function approximation with policy iteration. Our method is model-free and completely off policy. We are motivated by the least squares temporal difference learning algorithm (LSTD), which is known for its ef\ufb01cient use of sample experiences compared to pure temporal difference algorithms. LSTD is ideal for prediction problems, however it heretofore has not had a straightforward application to control problems. Moreover, approximations learned by LSTD are strongly in\ufb02uenced by the visitation distribution over states. Our new algorithm, Least Squares Policy Iteration (LSPI) addresses these issues. The result is an off-policy method which can use (or reuse) data collected from any source. We have tested LSPI on several problems, including a bicycle simulator in which it learns to guide the bicycle to a goal ef\ufb01ciently by merely observing a relatively small number of completely random trials.", "bibtex": "@inproceedings{NIPS2001_fca0789e,\n author = {Lagoudakis, Michail G. and Parr, Ronald},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Model-Free Least-Squares Policy Iteration},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/fca0789e7891cbc0583298a238316122-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/fca0789e7891cbc0583298a238316122-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/fca0789e7891cbc0583298a238316122-Metadata.json", "review": "", "metareview": "", "pdf_size": 164799, "gs_citation": 116, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16416507513099161143&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 17, "aff": "Department of Computer Science, Duke University; Department of Computer Science, Duke University", "aff_domain": "cs.duke.edu;cs.duke.edu", "email": "cs.duke.edu;cs.duke.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Duke University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.duke.edu", "aff_unique_abbr": "Duke", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "ee9e376a01", "title": "Modeling Temporal Structure in Classical Conditioning", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/c92a10324374fac681719d63979d00fe-Abstract.html", "author": "Aaron C. Courville; David S. Touretzky", "abstract": "The Temporal Coding Hypothesis of Miller and colleagues [7] sug(cid:173) gests that animals integrate related temporal patterns of stimuli into single memory representations. We formalize this concept using quasi-Bayes estimation to update the parameters of a con(cid:173) strained hidden Markov model. This approach allows us to account for some surprising temporal effects in the second order condition(cid:173) ing experiments of Miller et al. [1 , 2, 3], which other models are unable to explain.", "bibtex": "@inproceedings{NIPS2001_c92a1032,\n author = {Courville, Aaron C and Touretzky, David},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Modeling Temporal Structure in Classical Conditioning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/c92a10324374fac681719d63979d00fe-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/c92a10324374fac681719d63979d00fe-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/c92a10324374fac681719d63979d00fe-Metadata.json", "review": "", "metareview": "", "pdf_size": 1575792, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2937686046042548885&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Robotics Institute + Center for the Neural Basis of Cognition; Computer Science Department + Center for the Neural Basis of Cognition", "aff_domain": "es.emu.edu;es.emu.edu", "email": "es.emu.edu;es.emu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0+1;2+1", "aff_unique_norm": "Robotics Institute;Center for the Neural Basis of Cognition;Computer Science Department", "aff_unique_dep": ";;Computer Science", "aff_unique_url": ";;", "aff_unique_abbr": ";;", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "1;1", "aff_country_unique": ";United States" }, { "id": "9962e297a9", "title": "Modeling the Modulatory Effect of Attention on Human Spatial Vision", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/cbef46321026d8404bc3216d4774c8a9-Abstract.html", "author": "Laurent Itti; Jochen Braun; Christof Koch", "abstract": "We present new simulation results, in which a computational model of interacting visual neurons simultaneously predicts the modula(cid:173) tion of spatial vision thresholds by focal visual attention, for five dual-task human psychophysics experiments. This new study com(cid:173) plements our previous findings that attention activates a winner(cid:173) take-all competition among early visual neurons within one cortical hypercolumn. This \"intensified competition\" hypothesis assumed that attention equally affects all neurons, and yielded two single(cid:173) unit predictions: an increase in gain and a sharpening of tuning with attention. While both effects have been separately observed in electrophysiology, no single-unit study has yet shown them si(cid:173) multaneously. Hence, we here explore whether our model could still predict our data if attention might only modulate neuronal gain, but do so non-uniformly across neurons and tasks. Specifically, we investigate whether modulating the gain of only the neurons that are loudest, best-tuned, or most informative about the stimulus, or of all neurons equally but in a task-dependent manner, may ac(cid:173) count for the data. We find that none of these hypotheses yields predictions as plausible as the intensified competition hypothesis, hence providing additional support for our original findings.", "bibtex": "@inproceedings{NIPS2001_cbef4632,\n author = {Itti, Laurent and Braun, Jochen and Koch, Christof},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Modeling the Modulatory Effect of Attention on Human Spatial Vision},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/cbef46321026d8404bc3216d4774c8a9-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/cbef46321026d8404bc3216d4774c8a9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/cbef46321026d8404bc3216d4774c8a9-Metadata.json", "review": "", "metareview": "", "pdf_size": 1374855, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4798510045629539197&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "eaf29c9138", "title": "Modularity in the motor system: decomposition of muscle patterns as combinations of time-varying synergies", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/3d863b367aa379f71c7afc0c9cdca41d-Abstract.html", "author": "A. D'avella; M. C. Tresch", "abstract": "The question of whether the nervous system produces movement through the combination of a few discrete elements has long been central to the study of motor control. Muscle synergies, i.e. coordinated patterns of muscle activity, have been proposed as possible building blocks. Here we propose a model based on combinations of muscle synergies with a spe- ci\ufb01c amplitude and temporal structure. Time-varying synergies provide a realistic basis for the decomposition of the complex patterns observed in natural behaviors. To extract time-varying synergies from simultane- ous recording of EMG activity we developed an algorithm which extends existing non-negative matrix factorization techniques.", "bibtex": "@inproceedings{NIPS2001_3d863b36,\n author = {D\\textquotesingle avella, A. and Tresch, M.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Modularity in the motor system: decomposition of muscle patterns as combinations of time-varying synergies},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/3d863b367aa379f71c7afc0c9cdca41d-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/3d863b367aa379f71c7afc0c9cdca41d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/3d863b367aa379f71c7afc0c9cdca41d-Metadata.json", "review": "", "metareview": "", "pdf_size": 104645, "gs_citation": 141, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9386672048977911984&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Department of Brain and Cognitive Sciences, Massachusetts Institute of Technology; Department of Brain and Cognitive Sciences, Massachusetts Institute of Technology", "aff_domain": "ai.mit.edu;ai.mit.edu", "email": "ai.mit.edu;ai.mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Department of Brain and Cognitive Sciences", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "3870028dce", "title": "Motivated Reinforcement Learning", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/051928341be67dcba03f0e04104d9047-Abstract.html", "author": "Peter Dayan", "abstract": "The standard reinforcement learning view of the involvement of neuromodulatory systems in instrumental conditioning in(cid:173) cludes a rather straightforward conception of motivation as prediction of sum future reward. Competition between actions is based on the motivating characteristics of their consequent states in this sense. Substantial, careful, experiments reviewed in Dickinson & Balleine, 12,13 into the neurobiology and psychol(cid:173) ogy of motivation shows that this view is incomplete. In many cases, animals are faced with the choice not between many dif(cid:173) ferent actions at a given state, but rather whether a single re(cid:173) sponse is worth executing at all. Evidence suggests that the motivational process underlying this choice has different psy(cid:173) chological and neural properties from that underlying action choice. We describe and model these motivational systems, and consider the way they interact.", "bibtex": "@inproceedings{NIPS2001_05192834,\n author = {Dayan, Peter},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Motivated Reinforcement Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/051928341be67dcba03f0e04104d9047-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/051928341be67dcba03f0e04104d9047-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/051928341be67dcba03f0e04104d9047-Metadata.json", "review": "", "metareview": "", "pdf_size": 1879019, "gs_citation": 66, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11362499911488779489&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Gatsby Computational Neuroscience Unit", "aff_domain": "gatsby.ucl.ac.uk", "email": "gatsby.ucl.ac.uk", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University College London", "aff_unique_dep": "Gatsby Computational Neuroscience Unit", "aff_unique_url": "https://www.ucl.ac.uk", "aff_unique_abbr": "UCL", "aff_country_unique_index": "0", "aff_country_unique": "United Kingdom" }, { "id": "00f96e51c9", "title": "Multi Dimensional ICA to Separate Correlated Sources", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/3875115bacc48cca24ac51ee4b0e7975-Abstract.html", "author": "Roland Vollgraf; Klaus Obermayer", "abstract": "We present a new method for the blind separation of sources, which do not fulfill the independence assumption. In contrast to standard methods we consider groups of neighboring samples (\"patches\") within the observed mixtures. First we extract independent features from the observed patches. It turns out that the average dependencies between these features in different sources is in general lower than the dependencies be(cid:173) tween the amplitudes of different sources. We show that it might be the case that most of the dependencies is carried by only a small number of features. Is this case - provided these features can be identified by some heuristic - we project all patches into the subspace which is orthogonal to the subspace spanned by the \"correlated\" features. Standard ICA is then performed on the elements of the transformed patches (for which the independence assumption holds) and ro(cid:173) bustly yields a good estimate of the mixing matrix.", "bibtex": "@inproceedings{NIPS2001_3875115b,\n author = {Vollgraf, Roland and Obermayer, Klaus},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Multi Dimensional ICA to Separate Correlated Sources},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/3875115bacc48cca24ac51ee4b0e7975-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/3875115bacc48cca24ac51ee4b0e7975-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/3875115bacc48cca24ac51ee4b0e7975-Metadata.json", "review": "", "metareview": "", "pdf_size": 1491736, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=638010870434643987&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Electrical Engineering and Computer Science, Technical University of Berlin Germany; Department of Electrical Engineering and Computer Science, Technical University of Berlin Germany", "aff_domain": "cs.tu-berlin.de;cs.tu-berlin.de", "email": "cs.tu-berlin.de;cs.tu-berlin.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Technical University of Berlin", "aff_unique_dep": "Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://www.tu-berlin.de", "aff_unique_abbr": "TU Berlin", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berlin", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "37399be60e", "title": "Multiagent Planning with Factored MDPs", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/7af6266cc52234b5aa339b16695f7fc4-Abstract.html", "author": "Carlos Guestrin; Daphne Koller; Ronald Parr", "abstract": "We present a principled and ef\ufb01cient planning algorithm for cooperative multia- gent dynamic systems. A striking feature of our method is that the coordination and communication between the agents is not imposed, but derived directly from the system dynamics and function approximation architecture. We view the en- tire multiagent system as a single, large Markov decision process (MDP), which we assume can be represented in a factored way using a dynamic Bayesian net- work (DBN). The action space of the resulting MDP is the joint action space of the entire set of agents. Our approach is based on the use of factored linear value functions as an approximation to the joint value function. This factorization of the value function allows the agents to coordinate their actions at runtime using a natural message passing scheme. We provide a simple and ef\ufb01cient method for computing such an approximate value function by solving a single linear pro- gram, whose size is determined by the interaction between the value function structure and the DBN. We thereby avoid the exponential blowup in the state and action space. We show that our approach compares favorably with approaches based on reward sharing. We also show that our algorithm is an ef\ufb01cient alterna- tive to more complicated algorithms even in the single agent case.", "bibtex": "@inproceedings{NIPS2001_7af6266c,\n author = {Guestrin, Carlos and Koller, Daphne and Parr, Ronald},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Multiagent Planning with Factored MDPs},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/7af6266cc52234b5aa339b16695f7fc4-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/7af6266cc52234b5aa339b16695f7fc4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/7af6266cc52234b5aa339b16695f7fc4-Metadata.json", "review": "", "metareview": "", "pdf_size": 350800, "gs_citation": 675, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3613781715619627904&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Computer Science Dept, Stanford University; Computer Science Dept, Stanford University; Computer Science Dept, Duke University", "aff_domain": "cs.stanford.edu;cs.stanford.edu;cs.duke.edu", "email": "cs.stanford.edu;cs.stanford.edu;cs.duke.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Stanford University;Duke University", "aff_unique_dep": "Computer Science Dept;Computer Science Dept", "aff_unique_url": "https://www.stanford.edu;https://www.duke.edu", "aff_unique_abbr": "Stanford;Duke", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "ae181a7111", "title": "Multiplicative Updates for Classification by Mixture Models", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/e0a209539d1e74ab9fe46b9e01a19a97-Abstract.html", "author": "Lawrence K. Saul; Daniel D. Lee", "abstract": "We investigate a learning algorithm for the classi\ufb01cation of nonnegative data by mixture models. Multiplicative update rules are derived that directly optimize the performance of these models as classi\ufb01ers. The update rules have a simple closed form and an intuitive appeal. Our algorithm retains the main virtues of the Expectation-Maximization (EM) algorithm\u2014its guarantee of monotonic im- provement, and its absence of tuning parameters\u2014with the added advantage of optimizing a discriminative objective function. The algorithm reduces as a spe- cial case to the method of generalized iterative scaling for log-linear models. The learning rate of the algorithm is controlled by the sparseness of the training data. We use the method of nonnegative matrix factorization (NMF) to discover sparse distributed representations of the data. This form of feature selection greatly accelerates learning and makes the algorithm practical on large problems. Ex- periments show that discriminatively trained mixture models lead to much better classi\ufb01cation than comparably sized models trained by EM.", "bibtex": "@inproceedings{NIPS2001_e0a20953,\n author = {Saul, Lawrence and Lee, Daniel},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Multiplicative Updates for Classification by Mixture Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/e0a209539d1e74ab9fe46b9e01a19a97-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/e0a209539d1e74ab9fe46b9e01a19a97-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/e0a209539d1e74ab9fe46b9e01a19a97-Metadata.json", "review": "", "metareview": "", "pdf_size": 105081, "gs_citation": 63, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4981286832133469521&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Department of Computer and Information Science + Department of Electrical Engineering, University of Pennsylvania, Philadelphia, PA 19104; Department of Computer and Information Science + Department of Electrical Engineering, University of Pennsylvania, Philadelphia, PA 19104", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "1;1", "aff_unique_norm": ";University of Pennsylvania", "aff_unique_dep": ";Department of Electrical Engineering", "aff_unique_url": ";https://www.upenn.edu", "aff_unique_abbr": ";UPenn", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Philadelphia", "aff_country_unique_index": "1;1", "aff_country_unique": ";United States" }, { "id": "0317a54463", "title": "Natural Language Grammar Induction Using a Constituent-Context Model", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/2d00f43f07911355d4151f13925ff292-Abstract.html", "author": "Dan Klein; Christopher D. Manning", "abstract": "This paper presents a novel approach to the unsupervised learning of syn- tactic analyses of natural language text. Most previous work has focused on maximizing likelihood according to generative PCFG models. In con- trast, we employ a simpler probabilistic model over trees based directly on constituent identity and linear context, and use an EM-like iterative procedure to induce structure. This method produces much higher qual- ity analyses, giving the best published results on the ATIS dataset. 1 Overview", "bibtex": "@inproceedings{NIPS2001_2d00f43f,\n author = {Klein, Dan and Manning, Christopher D},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Natural Language Grammar Induction Using a Constituent-Context Model},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/2d00f43f07911355d4151f13925ff292-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/2d00f43f07911355d4151f13925ff292-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/2d00f43f07911355d4151f13925ff292-Metadata.json", "review": "", "metareview": "", "pdf_size": 77686, "gs_citation": 94, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11743570335478920847&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 28, "aff": "Computer Science Department, Stanford University; Computer Science Department, Stanford University", "aff_domain": "cs.stanford.edu;cs.stanford.edu", "email": "cs.stanford.edu;cs.stanford.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "7843fe5b3d", "title": "Neural Implementation of Bayesian Inference in Population Codes", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/9d7311ba459f9e45ed746755a32dcd11-Abstract.html", "author": "Si Wu; Shun-ichi Amari", "abstract": "This study investigates a population decoding paradigm, in which the estimation of stimulus in the previous step is used as prior knowledge for consecutive decoding. We analyze the decoding accu(cid:173) racy of such a Bayesian decoder (Maximum a Posteriori Estimate), and show that it can be implemented by a biologically plausible recurrent network, where the prior knowledge of stimulus is con(cid:173) veyed by the change in recurrent interactions as a result of Hebbian learning.", "bibtex": "@inproceedings{NIPS2001_9d7311ba,\n author = {Wu, Si and Amari, Shun-ichi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Neural Implementation of Bayesian Inference in Population Codes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/9d7311ba459f9e45ed746755a32dcd11-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/9d7311ba459f9e45ed746755a32dcd11-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/9d7311ba459f9e45ed746755a32dcd11-Metadata.json", "review": "", "metareview": "", "pdf_size": 1226872, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18404736966647900957&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Computer Science Department, Sheffield University, UK; Lab. for Mathematic Neuroscience, RIKEN Brain Science Institute, JAPAN", "aff_domain": "; ", "email": "; ", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Sheffield University;RIKEN Brain Science Institute", "aff_unique_dep": "Computer Science Department;Lab. for Mathematic Neuroscience", "aff_unique_url": "https://www.sheffield.ac.uk;https://bri.riken.jp", "aff_unique_abbr": "Sheffield;RIKEN BSI", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1", "aff_country_unique": "United Kingdom;Japan" }, { "id": "70dcfd07bb", "title": "Novel iteration schemes for the Cluster Variation Method", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/9a49a25d845a483fae4be7e341368e36-Abstract.html", "author": "Hilbert J. Kappen; Wim Wiegerinck", "abstract": "The Cluster Variation method is a class of approximation meth(cid:173) ods containing the Bethe and Kikuchi approximations as special cases. We derive two novel iteration schemes for the Cluster Vari(cid:173) ation Method. One is a fixed point iteration scheme which gives a significant improvement over loopy BP, mean field and TAP meth(cid:173) ods on directed graphical models. The other is a gradient based method, that is guaranteed to converge and is shown to give useful results on random graphs with mild frustration. We conclude that the methods are of significant practical value for large inference problems.", "bibtex": "@inproceedings{NIPS2001_9a49a25d,\n author = {Kappen, Hilbert and Wiegerinck, Wim},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Novel iteration schemes for the Cluster Variation Method},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/9a49a25d845a483fae4be7e341368e36-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/9a49a25d845a483fae4be7e341368e36-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/9a49a25d845a483fae4be7e341368e36-Metadata.json", "review": "", "metareview": "", "pdf_size": 1341387, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18076357102051340183&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Department of Biophysics, Nijmegen University, Nijmegen, the Netherlands; Department of Biophysics, Nijmegen University, Nijmegen, the Netherlands", "aff_domain": "bert\u00a9mbfys.kun.nl; wimw\u00a9mbfys.kun.nl", "email": "bert\u00a9mbfys.kun.nl; wimw\u00a9mbfys.kun.nl", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Nijmegen University", "aff_unique_dep": "Department of Biophysics", "aff_unique_url": "https://www.ru.nl/", "aff_unique_abbr": "", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Nijmegen", "aff_country_unique_index": "0;0", "aff_country_unique": "Netherlands" }, { "id": "3394003642", "title": "On Discriminative vs. Generative Classifiers: A comparison of logistic regression and naive Bayes", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/7b7a53e239400a13bd6be6c91c4f6c4e-Abstract.html", "author": "Andrew Y. Ng; Michael I. Jordan", "abstract": "We compare discriminative and generative learning as typified by logistic regression and naive Bayes. We show, contrary to a widely(cid:173) held belief that discriminative classifiers are almost always to be preferred, that there can often be two distinct regimes of per(cid:173) formance as the training set size is increased, one in which each algorithm does better. This stems from the observation- which is borne out in repeated experiments- that while discriminative learning has lower asymptotic error, a generative classifier may also approach its (higher) asymptotic error much faster.", "bibtex": "@inproceedings{NIPS2001_7b7a53e2,\n author = {Ng, Andrew and Jordan, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {On Discriminative vs. Generative Classifiers: A comparison of logistic regression and naive Bayes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/7b7a53e239400a13bd6be6c91c4f6c4e-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/7b7a53e239400a13bd6be6c91c4f6c4e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/7b7a53e239400a13bd6be6c91c4f6c4e-Metadata.json", "review": "", "metareview": "", "pdf_size": 1608131, "gs_citation": 3859, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14707489917023208212&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Computer Science Division, University of California, Berkeley; C.S. Div. & Dept. of Stat., University of California, Berkeley", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Computer Science Division", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "068c34f0ec", "title": "On Kernel-Target Alignment", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/1f71e393b3809197ed66df836fe833e5-Abstract.html", "author": "Nello Cristianini; John Shawe-Taylor; Andr\u00e9 Elisseeff; Jaz S. Kandola", "abstract": "We introduce the notion of kernel-alignment, a measure of similar(cid:173) ity between two kernel functions or between a kernel and a target function. This quantity captures the degree of agreement between a kernel and a given learning task, and has very natural interpre(cid:173) tations in machine learning, leading also to simple algorithms for model selection and learning. We analyse its theoretical properties, proving that it is sharply concentrated around its expected value, and we discuss its relation with other standard measures of per(cid:173) formance. Finally we describe some of the algorithms that can be obtained within this framework, giving experimental results show(cid:173) ing that adapting the kernel to improve alignment on the labelled data significantly increases the alignment on the test set, giving improved classification accuracy. Hence, the approach provides a principled method of performing transduction.", "bibtex": "@inproceedings{NIPS2001_1f71e393,\n author = {Cristianini, Nello and Shawe-Taylor, John and Elisseeff, Andr\\'{e} and Kandola, Jaz},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {On Kernel-Target Alignment},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/1f71e393b3809197ed66df836fe833e5-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/1f71e393b3809197ed66df836fe833e5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/1f71e393b3809197ed66df836fe833e5-Metadata.json", "review": "", "metareview": "", "pdf_size": 1375117, "gs_citation": 1484, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18079505215581227669&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "BIOwulf Technologies; BIOwulf Technologies; Royal Holloway, University of London; Royal Holloway, University of London", "aff_domain": "support-vector.net;barnhilltechnologies.com;cs.rhul.ac.uk;cs.rhul.ac.uk", "email": "support-vector.net;barnhilltechnologies.com;cs.rhul.ac.uk;cs.rhul.ac.uk", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;1", "aff_unique_norm": "Biowulf Technologies;University of London", "aff_unique_dep": ";", "aff_unique_url": "http://biowulf.com;https://www.royalholloway.ac.uk", "aff_unique_abbr": ";RHUL", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Royal Holloway", "aff_country_unique_index": "0;0;1;1", "aff_country_unique": "United States;United Kingdom" }, { "id": "4fdae119e3", "title": "On Spectral Clustering: Analysis and an algorithm", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/801272ee79cfde7fa5960571fee36b9b-Abstract.html", "author": "Andrew Y. Ng; Michael I. Jordan; Yair Weiss", "abstract": "Despite many empirical successes of spectral clustering methods(cid:173) algorithms that cluster points using eigenvectors of matrices de(cid:173) rived from the data- there are several unresolved issues. First, there are a wide variety of algorithms that use the eigenvectors in slightly different ways. Second, many of these algorithms have no proof that they will actually compute a reasonable clustering. In this paper, we present a simple spectral clustering algorithm that can be implemented using a few lines of Matlab. Using tools from matrix perturbation theory, we analyze the algorithm, and give conditions under which it can be expected to do well. We also show surprisingly good experimental results on a number of challenging clustering problems.", "bibtex": "@inproceedings{NIPS2001_801272ee,\n author = {Ng, Andrew and Jordan, Michael and Weiss, Yair},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {On Spectral Clustering: Analysis and an algorithm},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/801272ee79cfde7fa5960571fee36b9b-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/801272ee79cfde7fa5960571fee36b9b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/801272ee79cfde7fa5960571fee36b9b-Metadata.json", "review": "", "metareview": "", "pdf_size": 1470733, "gs_citation": 13104, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18377783760711975365&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "CS Division U.C. Berkeley; CS Div. & Dept. of Stat. U.C. Berkeley; School of CS & Engr. The Hebrew Univ.", "aff_domain": "cs.berkeley.edu;cs.berkeley.edu;cs.huji.ac.il", "email": "cs.berkeley.edu;cs.berkeley.edu;cs.huji.ac.il", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of California, Berkeley;Hebrew University of Jerusalem", "aff_unique_dep": "Computer Science Division;School of Computer Science and Engineering", "aff_unique_url": "https://www.berkeley.edu;https://www.huji.ac.il", "aff_unique_abbr": "UC Berkeley;HUJI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0;1", "aff_country_unique": "United States;Israel" }, { "id": "14f3a0b8c0", "title": "On the Concentration of Spectral Properties", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/5227b6aaf294f5f027273aebf16015f2-Abstract.html", "author": "John Shawe-Taylor; Nello Cristianini; Jaz S. Kandola", "abstract": "We consider the problem of measuring the eigenvalues of a ran(cid:173) domly drawn sample of points. We show that these values can be reliably estimated as can the sum of the tail of eigenvalues. Fur(cid:173) thermore, the residuals when data is projected into a subspace is shown to be reliably estimated on a random sample. Experiments are presented that confirm the theoretical results.", "bibtex": "@inproceedings{NIPS2001_5227b6aa,\n author = {Shawe-Taylor, John and Cristianini, Nello and Kandola, Jaz},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {On the Concentration of Spectral Properties},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/5227b6aaf294f5f027273aebf16015f2-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/5227b6aaf294f5f027273aebf16015f2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/5227b6aaf294f5f027273aebf16015f2-Metadata.json", "review": "", "metareview": "", "pdf_size": 1023292, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4830459806866577012&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Royal Holloway, University of London; BIOwulf Technologies + Royal Holloway, University of London; Royal Holloway, University of London", "aff_domain": "cs.rhul.ac.uk;cs.rhul.ac.uk;support-vector.net", "email": "cs.rhul.ac.uk;cs.rhul.ac.uk;support-vector.net", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1+0;0", "aff_unique_norm": "University of London;Biowulf Technologies", "aff_unique_dep": ";", "aff_unique_url": "https://www.royalholloway.ac.uk;http://biowulf.com", "aff_unique_abbr": "RHUL;", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Royal Holloway;", "aff_country_unique_index": "0;1+0;0", "aff_country_unique": "United Kingdom;United States" }, { "id": "55991d14be", "title": "On the Convergence of Leveraging", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/c215b446bcdf956d848a8419c1b5a920-Abstract.html", "author": "Gunnar R\u00e4tsch; Sebastian Mika; Manfred K. Warmuth", "abstract": "We give an unified convergence analysis of ensemble learning meth- ods including e.g. AdaBoost, Logistic Regression and the Least-Square- Boost algorithm for regression. These methods have in common that they iteratively call a base learning algorithm which returns hypotheses that are then linearly combined. We show that these methods are related to the Gauss-Southwell method known from numerical optimization and state non-asymptotical convergence results for all these methods. Our analysis includes ` 1 -norm regularized cost functions leading to a clean and general way to regularize ensemble learning. 1 Introduction", "bibtex": "@inproceedings{NIPS2001_c215b446,\n author = {R\\\"{a}tsch, Gunnar and Mika, Sebastian and Warmuth, Manfred K. K},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {On the Convergence of Leveraging},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/c215b446bcdf956d848a8419c1b5a920-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/c215b446bcdf956d848a8419c1b5a920-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/c215b446bcdf956d848a8419c1b5a920-Metadata.json", "review": "", "metareview": "", "pdf_size": 264336, "gs_citation": 49, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17052983262240039696&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 25, "aff": "RSISE, Australian National University, Canberra, ACT 0200 Australia; Fraunhofer FIRST, Kekul\u00b4 estr. 7, 12489 Berlin, Germany + University of California at Santa Cruz, CA 95060, USA; University of California at Santa Cruz, CA 95060, USA", "aff_domain": "csl.anu.edu.au; \ufb01rst.fhg.de;cse.ucsc.edu", "email": "csl.anu.edu.au; \ufb01rst.fhg.de;cse.ucsc.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1+2;2", "aff_unique_norm": "Australian National University;Fraunhofer Institute for Software and Systems Engineering;University of California, Santa Cruz", "aff_unique_dep": "RSISE;;", "aff_unique_url": "https://www.anu.edu.au;https://www.first.fraunhofer.de/;https://www.ucsc.edu", "aff_unique_abbr": "ANU;Fraunhofer FIRST;UCSC", "aff_campus_unique_index": "0;2;2", "aff_campus_unique": "Canberra;;Santa Cruz", "aff_country_unique_index": "0;1+2;2", "aff_country_unique": "Australia;Germany;United States" }, { "id": "087f0a35ab", "title": "On the Generalization Ability of On-Line Learning Algorithms", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/01931a6925d3de09e5f87419d9d55055-Abstract.html", "author": "Nicol\u00f2 Cesa-bianchi; Alex Conconi; Claudio Gentile", "abstract": "In this paper we show that on-line algorithms for classi\ufb01cation and re- gression can be naturally used to obtain hypotheses with good data- dependent tail bounds on their risk. Our results are proven without re- quiring complicated concentration-of-measure arguments and they hold for arbitrary on-line learning algorithms. Furthermore, when applied to concrete on-line algorithms, our results yield tail bounds that in many cases are comparable or better than the best known bounds.", "bibtex": "@inproceedings{NIPS2001_01931a69,\n author = {Cesa-bianchi, Nicol\\`{o} and Conconi, Alex and Gentile, Claudio},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {On the Generalization Ability of On-Line Learning Algorithms},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/01931a6925d3de09e5f87419d9d55055-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/01931a6925d3de09e5f87419d9d55055-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/01931a6925d3de09e5f87419d9d55055-Metadata.json", "review": "", "metareview": "", "pdf_size": 106788, "gs_citation": 652, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13389885368466823178&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "DTI, University of Milan; DTI, University of Milan; DSI, University of Milan", "aff_domain": "dti.unimi.it;dti.unimi.it;dsi.unimi.it", "email": "dti.unimi.it;dti.unimi.it;dsi.unimi.it", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Milan", "aff_unique_dep": "DTI", "aff_unique_url": "https://www.unimi.it", "aff_unique_abbr": "UniMi", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Italy" }, { "id": "9c6bd4f84d", "title": "Online Learning with Kernels", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/bd5af7cd922fd2603be4ee3dc43b0b77-Abstract.html", "author": "Jyrki Kivinen; Alex J. Smola; Robert C. Williamson", "abstract": "We consider online learning in a Reproducing Kernel Hilbert Space. Our method is computationally ef\ufb01cient and leads to simple algorithms. In particular we derive update equations for classi\ufb01cation, regression, and novelty detection. The inclusion of the -trick allows us to give a robust parameterization. Moreover, unlike in batch learning where the -trick only applies to the -insensitive loss function we are able to derive gen- eral trimmed-mean types of estimators such as for Huber\u2019s robust loss.", "bibtex": "@inproceedings{NIPS2001_bd5af7cd,\n author = {Kivinen, Jyrki and Smola, Alex and Williamson, Robert C},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Online Learning with Kernels},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/bd5af7cd922fd2603be4ee3dc43b0b77-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/bd5af7cd922fd2603be4ee3dc43b0b77-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/bd5af7cd922fd2603be4ee3dc43b0b77-Metadata.json", "review": "", "metareview": "", "pdf_size": 260758, "gs_citation": 1209, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14302554628563658353&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "4d21f7f55e", "title": "Optimising Synchronisation Times for Mobile Devices", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/253614bbac999b38b5b60cae531c4969-Abstract.html", "author": "Neil D. Lawrence; Antony I. T. Rowstron; Christopher M. Bishop; Michael J. Taylor", "abstract": "With the increasing number of users of mobile computing devices (e.g. personal digital assistants) and the advent of third generation mobile phones, wireless communications are becoming increasingly important. Many applications rely on the device maintaining a replica of a data-structure which is stored on a server, for exam(cid:173) ple news databases, calendars and e-mail. ill this paper we explore the question of the optimal strategy for synchronising such replicas. We utilise probabilistic models to represent how the data-structures evolve and to model user behaviour. We then formulate objective functions which can be minimised with respect to the synchronisa(cid:173) tion timings. We demonstrate, using two real world data-sets, that a user can obtain more up-to-date information using our approach.", "bibtex": "@inproceedings{NIPS2001_253614bb,\n author = {Lawrence, Neil and Rowstron, Antony I. T. and Bishop, Christopher and Taylor, Michael J.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Optimising Synchronisation Times for Mobile Devices},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/253614bbac999b38b5b60cae531c4969-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/253614bbac999b38b5b60cae531c4969-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/253614bbac999b38b5b60cae531c4969-Metadata.json", "review": "", "metareview": "", "pdf_size": 1617522, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4685967261451452617&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, Regent Court, 211 Portobello Road, Sheffield, Sl 4DP, U.K.; Microsoft Research; Microsoft Research; Microsoft Research", "aff_domain": "dcs.shef.ac.uk;microsoft.com;microsoft.com;microsoft.com", "email": "dcs.shef.ac.uk;microsoft.com;microsoft.com;microsoft.com", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "University of Sheffield;Microsoft", "aff_unique_dep": "Department of Computer Science;Microsoft Research", "aff_unique_url": "https://www.sheffield.ac.uk;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Sheffield;MSR", "aff_campus_unique_index": "0", "aff_campus_unique": "Sheffield;", "aff_country_unique_index": "0;1;1;1", "aff_country_unique": "United Kingdom;United States" }, { "id": "5092bcdf8c", "title": "Orientation-Selective aVLSI Spiking Neurons", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/0189caa552598b845b29b17a427692d1-Abstract.html", "author": "Shih-Chii Liu; J\u00f6rg Kramer; Giacomo Indiveri; Tobi Delbr\u00fcck; Rodney J. Douglas", "abstract": "We describe a programmable multi-chip VLSI neuronal system that can be used for exploring spike-based information processing models. The system consists of a silicon retina, a PIC microcontroller, and a transceiver chip whose integrate-and-\ufb01re neurons are connected in a soft winner-take-all architecture. The circuit on this multi-neuron chip ap- proximates a cortical microcircuit. The neurons can be con\ufb01gured for different computational properties by the virtual connections of a se- lected set of pixels on the silicon retina. The virtual wiring between the different chips is effected by an event-driven communication pro- tocol that uses asynchronous digital pulses, similar to spikes in a neu- ronal system. We used the multi-chip spike-based system to synthe- size orientation-tuned neurons using both a feedforward model and a feedback model. The performance of our analog hardware spiking model matched the experimental observations and digital simulations of continuous-valued neurons. The multi-chip VLSI system has advantages over computer neuronal models in that it is real-time, and the computa- tional time does not scale with the size of the neuronal network.", "bibtex": "@inproceedings{NIPS2001_0189caa5,\n author = {Liu, Shih-Chii and Kramer, J\\\"{o}rg and Indiveri, Giacomo and Delbr\\\"{u}ck, Tobi and Douglas, Rodney},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Orientation-Selective aVLSI Spiking Neurons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/0189caa552598b845b29b17a427692d1-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/0189caa552598b845b29b17a427692d1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/0189caa552598b845b29b17a427692d1-Metadata.json", "review": "", "metareview": "", "pdf_size": 78823, "gs_citation": 138, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8343694037471871664&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 24, "aff": "Institute of Neuroinformatics, University of Zurich and ETH Zurich; Institute of Neuroinformatics, University of Zurich and ETH Zurich; Institute of Neuroinformatics, University of Zurich and ETH Zurich; Institute of Neuroinformatics, University of Zurich and ETH Zurich; Institute of Neuroinformatics, University of Zurich and ETH Zurich", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "University of Zurich", "aff_unique_dep": "Institute of Neuroinformatics", "aff_unique_url": "https://www.neuro.ethz.ch/", "aff_unique_abbr": "UZH", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "Switzerland" }, { "id": "c80a43714b", "title": "Orientational and Geometric Determinants of Place and Head-direction", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/c8758b517083196f05ac29810b924aca-Abstract.html", "author": "Neil Burgess; Tom Hartley", "abstract": "We present a model of the firing of place and head-direction cells in rat hippocampus. The model can predict the response of individual cells and populations to parametric manipulations of both geomet(cid:173) ric (e.g. O'Keefe & Burgess, 1996) and orientational (Fenton et aI., 2000a) cues, extending a previous geometric model (Hartley et al., 2000). It provides a functional description of how these cells' spatial responses are derived from the rat's environment and makes easily testable quantitative predictions. Consideration of the phenomenon of remapping (Muller & Kubie, 1987; Bostock et aI., 1991) indicates that the model may also be consistent with non(cid:173) parametric changes in firing, and provides constraints for its future development.", "bibtex": "@inproceedings{NIPS2001_c8758b51,\n author = {Burgess, Neil and Hartley, Tom},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Orientational and Geometric Determinants of Place and Head-direction},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/c8758b517083196f05ac29810b924aca-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/c8758b517083196f05ac29810b924aca-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/c8758b517083196f05ac29810b924aca-Metadata.json", "review": "", "metareview": "", "pdf_size": 923712, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13574420340699375077&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Institute of Cognitive Neuroscience & Department of Anatomy, UCL; Institute of Cognitive Neuroscience & Department of Anatomy, UCL", "aff_domain": "ucl.ac.uk;ucl.ac.uk", "email": "ucl.ac.uk;ucl.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University College London", "aff_unique_dep": "Institute of Cognitive Neuroscience & Department of Anatomy", "aff_unique_url": "https://www.ucl.ac.uk", "aff_unique_abbr": "UCL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "efb912f39f", "title": "PAC Generalization Bounds for Co-training", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/4c144c47ecba6f8318128703ca9e2601-Abstract.html", "author": "Sanjoy Dasgupta; Michael L. Littman; David A. McAllester", "abstract": "The rule-based bootstrapping introduced by Yarowsky, and its co- training variant by Blum and Mitchell, have met with considerable em- pirical success. Earlier work on the theory of co-training has been only loosely related to empirically useful co-training algorithms. Here we give a new PAC-style bound on generalization error which justi\ufb01es both the use of con\ufb01dences \u2014 partial rules and partial labeling of the unlabeled data \u2014 and the use of an agreement-based objective function as sug- gested by Collins and Singer. Our bounds apply to the multiclass case, i.e., where instances are to be assigned one of", "bibtex": "@inproceedings{NIPS2001_4c144c47,\n author = {Dasgupta, Sanjoy and Littman, Michael and McAllester, David},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {PAC Generalization Bounds for Co-training},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/4c144c47ecba6f8318128703ca9e2601-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/4c144c47ecba6f8318128703ca9e2601-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/4c144c47ecba6f8318128703ca9e2601-Metadata.json", "review": "", "metareview": "", "pdf_size": 90821, "gs_citation": 427, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17949972967515886217&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 19, "aff": "AT&T Labs\u2013Research; AT&T Labs\u2013Research; AT&T Labs\u2013Research", "aff_domain": "research.att.com;research.att.com;research.att.com", "email": "research.att.com;research.att.com;research.att.com", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "AT&T Labs", "aff_unique_dep": "Research", "aff_unique_url": "https://www.att.com/labs/research", "aff_unique_abbr": "AT&T Labs", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "c17a88d5d5", "title": "Partially labeled classification with Markov random walks", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/a82d922b133be19c1171534e6594f754-Abstract.html", "author": "Martin Szummer; Tommi Jaakkola", "abstract": "To classify a large number of unlabeled examples we combine a lim- ited number of labeled examples with a Markov random walk represen- tation over the unlabeled examples. The random walk representation ex- ploits any low dimensional structure in the data in a robust, probabilistic manner. We develop and compare several estimation criteria/algorithms suited to this representation. This includes in particular multi-way clas- si\ufb01cation with an average margin criterion which permits a closed form solution. The time scale of the random walk regularizes the representa- tion and can be set through a margin-based criterion favoring unambigu- ous classi\ufb01cation. We also extend this basic regularization by adapting time scales for individual examples. We demonstrate the approach on synthetic examples and on text classi\ufb01cation problems.", "bibtex": "@inproceedings{NIPS2001_a82d922b,\n author = {Szummer, Martin and Jaakkola, Tommi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Partially labeled classification with Markov random walks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/a82d922b133be19c1171534e6594f754-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/a82d922b133be19c1171534e6594f754-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/a82d922b133be19c1171534e6594f754-Metadata.json", "review": "", "metareview": "", "pdf_size": 110696, "gs_citation": 851, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6059498259680434901&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 19, "aff": "MIT AI Lab & CBCL; MIT AI Lab", "aff_domain": "ai.mit.edu;ai.mit.edu", "email": "ai.mit.edu;ai.mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Artificial Intelligence Laboratory & Center for Biological and Computational Learning", "aff_unique_url": "http://www.ai.mit.edu", "aff_unique_abbr": "MIT AI Lab", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "e8a6cbb443", "title": "Perceptual Metamers in Stereoscopic Vision", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/b5f1e8fb36cd7fbeb7988e8639ac79e9-Abstract.html", "author": "B. T. Backus", "abstract": "Abstract Unavailable", "bibtex": "@inproceedings{NIPS2001_b5f1e8fb,\n author = {Backus, B.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Perceptual Metamers in Stereoscopic Vision},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/b5f1e8fb36cd7fbeb7988e8639ac79e9-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/b5f1e8fb36cd7fbeb7988e8639ac79e9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/b5f1e8fb36cd7fbeb7988e8639ac79e9-Metadata.json", "review": "", "metareview": "", "pdf_size": 53062, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5113326772405907270&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Psychology, University of Pennsylvania", "aff_domain": "psych.upenn.edu", "email": "psych.upenn.edu", "github": "", "project": "http://psych.upenn.edu/~backus", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of Pennsylvania", "aff_unique_dep": "Department of Psychology", "aff_unique_url": "https://www.upenn.edu", "aff_unique_abbr": "UPenn", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "3aee15a0f4", "title": "Playing is believing: The role of beliefs in multi-agent learning", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/1b36ea1c9b7a1c3ad668b8bb5df7963f-Abstract.html", "author": "Yu-Han Chang; Leslie Pack Kaelbling", "abstract": "We propose a new classi\ufb01cation for multi-agent learning algorithms, with each league of players characterized by both their possible strategies and possible beliefs. Using this classi\ufb01cation, we review the optimality of ex- isting algorithms, including the case of interleague play. We propose an incremental improvement to the existing algorithms that seems to achieve average payoffs that are at least the Nash equilibrium payoffs in the long- run against fair opponents.", "bibtex": "@inproceedings{NIPS2001_1b36ea1c,\n author = {Chang, Yu-Han and Kaelbling, Leslie Pack},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Playing is believing: The role of beliefs in multi-agent learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/1b36ea1c9b7a1c3ad668b8bb5df7963f-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/1b36ea1c9b7a1c3ad668b8bb5df7963f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/1b36ea1c9b7a1c3ad668b8bb5df7963f-Metadata.json", "review": "", "metareview": "", "pdf_size": 83925, "gs_citation": 68, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3831949848116783883&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Artificial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, Massachusetts 02139; Artificial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, Massachusetts 02139", "aff_domain": "ai.mit.edu;ai.mit.edu", "email": "ai.mit.edu;ai.mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Artificial Intelligence Laboratory", "aff_unique_url": "https://www.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "a904ef71c3", "title": "Pranking with Ranking", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/5531a5834816222280f20d1ef9e95f69-Abstract.html", "author": "Koby Crammer; Yoram Singer", "abstract": "We discuss the problem of ranking instances. In our framework each instance is associated with a rank or a rating, which is an integer from 1 to k. Our goal is to find a rank-prediction rule that assigns each instance a rank which is as close as possible to the instance's true rank. We describe a simple and efficient online al(cid:173) gorithm, analyze its performance in the mistake bound model, and prove its correctness. We describe two sets of experiments, with synthetic data and with the EachMovie dataset for collaborative filtering. In the experiments we performed, our algorithm outper(cid:173) forms online algorithms for regression and classification applied to ranking.", "bibtex": "@inproceedings{NIPS2001_5531a583,\n author = {Crammer, Koby and Singer, Yoram},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Pranking with Ranking},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/5531a5834816222280f20d1ef9e95f69-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/5531a5834816222280f20d1ef9e95f69-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/5531a5834816222280f20d1ef9e95f69-Metadata.json", "review": "", "metareview": "", "pdf_size": 1492616, "gs_citation": 904, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7522107179612302256&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "School of Computer Science & Engineering, The Hebrew University, Jerusalem 91904, Israel; School of Computer Science & Engineering, The Hebrew University, Jerusalem 91904, Israel", "aff_domain": "cs.huji.ac.il;cs.huji.ac.il", "email": "cs.huji.ac.il;cs.huji.ac.il", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Hebrew University", "aff_unique_dep": "School of Computer Science & Engineering", "aff_unique_url": "http://www.huji.ac.il", "aff_unique_abbr": "HUJI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Jerusalem", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "id": "e8ce9ee359", "title": "Predictive Representations of State", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/1e4d36177d71bbb3558e43af9577d70e-Abstract.html", "author": "Michael L. Littman; Richard S. Sutton", "abstract": "We show that states of a dynamical system can be usefully repre(cid:173) sented by multi-step, action-conditional predictions of future ob(cid:173) servations. State representations that are grounded in data in this way may be easier to learn, generalize better, and be less depen(cid:173) dent on accurate prior models than, for example, POMDP state representations. Building on prior work by Jaeger and by Rivest and Schapire, in this paper we compare and contrast a linear spe(cid:173) cialization of the predictive approach with the state representa(cid:173) tions used in POMDPs and in k-order Markov models. Ours is the first specific formulation of the predictive idea that includes both stochasticity and actions (controls). We show that any system has a linear predictive state representation with number of predictions no greater than the number of states in its minimal POMDP model.", "bibtex": "@inproceedings{NIPS2001_1e4d3617,\n author = {Littman, Michael and Sutton, Richard S},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Predictive Representations of State},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/1e4d36177d71bbb3558e43af9577d70e-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/1e4d36177d71bbb3558e43af9577d70e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/1e4d36177d71bbb3558e43af9577d70e-Metadata.json", "review": "", "metareview": "", "pdf_size": 797664, "gs_citation": 747, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4041253610301763162&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 23, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "d891a12cf1", "title": "Probabilistic Abstraction Hierarchies", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/0ae3f79a30234b6c45a6f7d298ba1310-Abstract.html", "author": "Eran Segal; Daphne Koller; Dirk Ormoneit", "abstract": "Part of", "bibtex": "@inproceedings{NIPS2001_0ae3f79a,\n author = {Segal, Eran and Koller, Daphne and Ormoneit, Dirk},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Probabilistic Abstraction Hierarchies},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/0ae3f79a30234b6c45a6f7d298ba1310-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/0ae3f79a30234b6c45a6f7d298ba1310-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/0ae3f79a30234b6c45a6f7d298ba1310-Metadata.json", "review": "", "metareview": "", "pdf_size": 126967, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7501823798265405780&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Computer Science Dept., Stanford University; Computer Science Dept., Stanford University; Computer Science Dept., Stanford University", "aff_domain": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "email": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Computer Science Dept.", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "b86db289ef", "title": "Probabilistic Inference of Hand Motion from Neural Activity in Motor Cortex", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/06964dce9addb1c5cb5d6e3d9838f733-Abstract.html", "author": "Yun Gao; Michael J. Black; Elie Bienenstock; Shy Shoham; John P. Donoghue", "abstract": "Statistical learning and probabilistic inference techniques are used to in- fer the hand position of a subject from multi-electrode recordings of neu- ral activity in motor cortex. First, an array of electrodes provides train- ing data of neural \ufb01ring conditioned on hand kinematics. We learn a non- parametric representation of this \ufb01ring activity using a Bayesian model and rigorously compare it with previous models using cross-validation. Second, we infer a posterior probability distribution over hand motion conditioned on a sequence of neural test data using Bayesian inference. The learned \ufb01ring models of multiple cells are used to de\ufb01ne a non- Gaussian likelihood term which is combined with a prior probability for the kinematics. A particle \ufb01ltering method is used to represent, update, and propagate the posterior distribution over time. The approach is com- pared with traditional linear \ufb01ltering methods; the results suggest that it may be appropriate for neural prosthetic applications.", "bibtex": "@inproceedings{NIPS2001_06964dce,\n author = {Gao, Yun and Black, Michael and Bienenstock, Elie and Shoham, Shy and Donoghue, John},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Probabilistic Inference of Hand Motion from Neural Activity in Motor Cortex},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/06964dce9addb1c5cb5d6e3d9838f733-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/06964dce9addb1c5cb5d6e3d9838f733-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/06964dce9addb1c5cb5d6e3d9838f733-Metadata.json", "review": "", "metareview": "", "pdf_size": 224228, "gs_citation": 119, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9864550475015680283&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 20, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster" }, { "id": "4a2fa9bbd7", "title": "Probabilistic principles in unsupervised learning of visual structure: human data and a model", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/1b5230e3ea6d7123847ad55a1e06fffd-Abstract.html", "author": "Shimon Edelman; Benjamin P. Hiles; Hwajin Yang; Nathan Intrator", "abstract": "To \ufb01nd out how the representations of structured visual objects depend on the co-occurrence statistics of their constituents, we exposed subjects to a set of composite images with tight control exerted over (1) the condi- tional probabilities of the constituent fragments, and (2) the value of Bar- low\u2019s criterion of \u201csuspicious coincidence\u201d (the ratio of joint probability to the product of marginals). We then compared the part veri\ufb01cation re- sponse times for various probe/target combinations before and after the exposure. For composite probes, the speedup was much larger for tar- gets that contained pairs of fragments perfectly predictive of each other, compared to those that did not. This effect was modulated by the sig- ni\ufb01cance of their co-occurrence as estimated by Barlow\u2019s criterion. For lone-fragment probes, the speedup in all conditions was generally lower than for composites. These results shed light on the brain\u2019s strategies for unsupervised acquisition of structural information in vision.", "bibtex": "@inproceedings{NIPS2001_1b5230e3,\n author = {Edelman, Shimon and Hiles, Benjamin and Yang, Hwajin and Intrator, Nathan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Probabilistic principles in unsupervised learning of visual structure: human data and a model},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/1b5230e3ea6d7123847ad55a1e06fffd-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/1b5230e3ea6d7123847ad55a1e06fffd-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/1b5230e3ea6d7123847ad55a1e06fffd-Metadata.json", "review": "", "metareview": "", "pdf_size": 129196, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17963248000259764201&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 22, "aff": "Department of Psychology, Cornell University, Ithaca, NY 14853; Department of Psychology, Cornell University, Ithaca, NY 14853; Department of Psychology, Cornell University, Ithaca, NY 14853; Institute for Brain and Neural Systems, Box 1843, Brown University, Providence, RI 02912", "aff_domain": "cornell.edu;cornell.edu;cornell.edu;brown.edu", "email": "cornell.edu;cornell.edu;cornell.edu;brown.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "Cornell University;Brown University", "aff_unique_dep": "Department of Psychology;Institute for Brain and Neural Systems", "aff_unique_url": "https://www.cornell.edu;https://www.brown.edu", "aff_unique_abbr": "Cornell;Brown", "aff_campus_unique_index": "0;0;0;1", "aff_campus_unique": "Ithaca;Providence", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "ad4347b6a7", "title": "Prodding the ROC Curve: Constrained Optimization of Classifier Performance", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/2cd4e8a2ce081c3d7c32c3cde4312ef7-Abstract.html", "author": "Michael Mozer; Robert Dodier; Michael D. Colagrosso; Cesar Guerra-Salcedo; Richard Wolniewicz", "abstract": "When designing a two-alternative classi\ufb01er, one ordinarily aims to maximize the classi\ufb01er\u2019s ability to discriminate between members of the two classes. We describe a situation in a real-world business application of machine-learning prediction in which an additional constraint is placed on the nature of the solu- tion: that the classi\ufb01er achieve a speci\ufb01ed correct acceptance or correct rejection rate (i.e., that it achieve a \ufb01xed accuracy on members of one class or the other). Our domain is predicting churn in the telecommunications industry. Churn refers to customers who switch from one service provider to another. We pro- pose four algorithms for training a classi\ufb01er subject to this domain constraint, and present results showing that each algorithm yields a reliable improvement in performance. Although the improvement is modest in magnitude, it is nonethe- less impressive given the dif\ufb01culty of the problem and the \ufb01nancial return that it achieves to the service provider.", "bibtex": "@inproceedings{NIPS2001_2cd4e8a2,\n author = {Mozer, Michael C and Dodier, Robert and Colagrosso, Michael and Guerra-Salcedo, Cesar and Wolniewicz, Richard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Prodding the ROC Curve: Constrained Optimization of Classifier Performance},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/2cd4e8a2ce081c3d7c32c3cde4312ef7-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/2cd4e8a2ce081c3d7c32c3cde4312ef7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/2cd4e8a2ce081c3d7c32c3cde4312ef7-Metadata.json", "review": "", "metareview": "", "pdf_size": 133376, "gs_citation": 64, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2370566188198042724&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Advanced Technology Group; Advanced Technology Group; Advanced Technology Group + Department of Computer Science; Advanced Technology Group; Advanced Technology Group", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0+1;0;0", "aff_unique_norm": "Advanced Technology Group;Unknown Institution", "aff_unique_dep": ";Department of Computer Science", "aff_unique_url": ";", "aff_unique_abbr": ";", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "", "aff_country_unique": "" }, { "id": "d309195b1e", "title": "Product Analysis: Learning to Model Observations as Products of Hidden Variables", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/deb54ffb41e085fd7f69a75b6359c989-Abstract.html", "author": "Brendan J. Frey; Anitha Kannan; Nebojsa Jojic", "abstract": "Factor analysis and principal components analysis can be used to model linear relationships between observed variables and linearly map high-dimensional data to a lower-dimensional hidden space. In factor analysis, the observations are modeled as a linear com(cid:173) bination of normally distributed hidden variables. We describe a nonlinear generalization of factor analysis, called \"product analy(cid:173) sis\", that models the observed variables as a linear combination of products of normally distributed hidden variables. Just as fac(cid:173) tor analysis can be viewed as unsupervised linear regression on unobserved, normally distributed hidden variables, product anal(cid:173) ysis can be viewed as unsupervised linear regression on products of unobserved, normally distributed hidden variables. The map(cid:173) ping between the data and the hidden space is nonlinear, so we use an approximate variational technique for inference and learn(cid:173) ing. Since product analysis is a generalization of factor analysis, product analysis always finds a higher data likelihood than factor analysis. We give results on pattern recognition and illumination(cid:173) invariant image clustering.", "bibtex": "@inproceedings{NIPS2001_deb54ffb,\n author = {Frey, Brendan J and Kannan, Anitha and Jojic, Nebojsa},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Product Analysis: Learning to Model Observations as Products of Hidden Variables},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/deb54ffb41e085fd7f69a75b6359c989-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/deb54ffb41e085fd7f69a75b6359c989-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/deb54ffb41e085fd7f69a75b6359c989-Metadata.json", "review": "", "metareview": "", "pdf_size": 1246198, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10862005363181449311&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "f13ea70f47", "title": "Products of Gaussians", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/8232e119d8f59aa83050a741631803a6-Abstract.html", "author": "Christopher Williams; Felix V. Agakov; Stephen N. Felderhof", "abstract": "Recently Hinton (1999) has introduced the Products of Experts (PoE) model in which several individual probabilistic models for data are combined to provide an overall model of the data. Be(cid:173) low we consider PoE models in which each expert is a Gaussian. Although the product of Gaussians is also a Gaussian, if each Gaus(cid:173) sian has a simple structure the product can have a richer structure. We examine (1) Products of Gaussian pancakes which give rise to probabilistic Minor Components Analysis, (2) products of I-factor PPCA models and (3) a products of experts construction for an AR(l) process.", "bibtex": "@inproceedings{NIPS2001_8232e119,\n author = {Williams, Christopher and Agakov, Felix and Felderhof, Stephen},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Products of Gaussians},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/8232e119d8f59aa83050a741631803a6-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/8232e119d8f59aa83050a741631803a6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/8232e119d8f59aa83050a741631803a6-Metadata.json", "review": "", "metareview": "", "pdf_size": 1414725, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3932476093387162191&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Division of Informatics, University of Edinburgh; System Engineering Research Group, Chair of Manufacturing Technology, Universitiit Erlangen-Niirnberg; Division of Informatics, University of Edinburgh", "aff_domain": "ed.ac.uk;lft\u00b7uni-erlangen.de;dai.ed.ac.uk", "email": "ed.ac.uk;lft\u00b7uni-erlangen.de;dai.ed.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Edinburgh;Universitiit Erlangen-Niirnberg", "aff_unique_dep": "Division of Informatics;System Engineering Research Group, Chair of Manufacturing Technology", "aff_unique_url": "https://www.ed.ac.uk;https://www.uni-erlangen.de/", "aff_unique_abbr": "Edinburgh;", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Edinburgh;", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United Kingdom;Germany" }, { "id": "32e60b7ea1", "title": "Quantizing Density Estimators", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/a00e5eb0973d24649a4a920fc53d9564-Abstract.html", "author": "Peter Meinicke; Helge Ritter", "abstract": "We suggest a nonparametric framework for unsupervised learning of projection models in terms of density estimation on quantized sample spaces. The objective is not to optimally reconstruct the data but in- stead the quantizer is chosen to optimally reconstruct the density of the data. For the resulting quantizing density estimator (QDE) we present a general method for parameter estimation and model selection. We show how projection sets which correspond to traditional unsupervised meth- ods like vector quantization or PCA appear in the new framework. For a principal component quantizer we present results on synthetic and real- world data, which show that the QDE can improve the generalization of the kernel density estimator although its estimate is based on signi\ufb01cantly lower-dimensional projection indices of the data.", "bibtex": "@inproceedings{NIPS2001_a00e5eb0,\n author = {Meinicke, Peter and Ritter, Helge},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Quantizing Density Estimators},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/a00e5eb0973d24649a4a920fc53d9564-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/a00e5eb0973d24649a4a920fc53d9564-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/a00e5eb0973d24649a4a920fc53d9564-Metadata.json", "review": "", "metareview": "", "pdf_size": 89294, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17838011611762309931&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Neuroinformatics Group, University of Bielefeld, Bielefeld, Germany; Neuroinformatics Group, University of Bielefeld, Bielefeld, Germany", "aff_domain": "techfak.uni-bielefeld.de;techfak.uni-bielefeld.de", "email": "techfak.uni-bielefeld.de;techfak.uni-bielefeld.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Bielefeld", "aff_unique_dep": "Neuroinformatics Group", "aff_unique_url": "https://www.uni-bielefeld.de", "aff_unique_abbr": "", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Bielefeld", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "1c331a2413", "title": "Rao-Blackwellised Particle Filtering via Data Augmentation", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/6f4920ea25403ec77bee9efce43ea25e-Abstract.html", "author": "Christophe Andrieu; Nando D. Freitas; Arnaud Doucet", "abstract": "EE Engineering", "bibtex": "@inproceedings{NIPS2001_6f4920ea,\n author = {Andrieu, Christophe and Freitas, Nando and Doucet, Arnaud},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Rao-Blackwellised Particle Filtering via Data Augmentation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/6f4920ea25403ec77bee9efce43ea25e-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/6f4920ea25403ec77bee9efce43ea25e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/6f4920ea25403ec77bee9efce43ea25e-Metadata.json", "review": "", "metareview": "", "pdf_size": 1161600, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=406407417086484087&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Statistics Group, University of Bristol; Computer Science, UC Berkeley; EE Engineering, University of Melbourne", "aff_domain": "bristol.ac.uk;cs.berkeley.edu;ee.mu.oz.au", "email": "bristol.ac.uk;cs.berkeley.edu;ee.mu.oz.au", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "University of Bristol;University of California, Berkeley;University of Melbourne", "aff_unique_dep": "Statistics Group;Department of Computer Science;EE Engineering", "aff_unique_url": "https://www.bristol.ac.uk;https://www.berkeley.edu;https://www.unimelb.edu.au", "aff_unique_abbr": "UoB;UC Berkeley;UniMelb", "aff_campus_unique_index": "1", "aff_campus_unique": ";Berkeley", "aff_country_unique_index": "0;1;2", "aff_country_unique": "United Kingdom;United States;Australia" }, { "id": "9a1e7db38a", "title": "Rates of Convergence of Performance Gradient Estimates Using Function Approximation and Bias in Reinforcement Learning", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/f3e52c300b822a8123e7ace55fe15c08-Abstract.html", "author": "Gregory Z. Grudic; Lyle H. Ungar", "abstract": "", "bibtex": "@inproceedings{NIPS2001_f3e52c30,\n author = {Grudic, Gregory and Ungar, Lyle},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Rates of Convergence of Performance Gradient Estimates Using Function Approximation and Bias in Reinforcement Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/f3e52c300b822a8123e7ace55fe15c08-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/f3e52c300b822a8123e7ace55fe15c08-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/f3e52c300b822a8123e7ace55fe15c08-Metadata.json", "review": "", "metareview": "", "pdf_size": 108401, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4800811934836578957&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "University of Colorado, Boulder; University of Pennsylvania", "aff_domain": "cs.colorado.edu;cis.upenn.edu", "email": "cs.colorado.edu;cis.upenn.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Colorado;University of Pennsylvania", "aff_unique_dep": ";", "aff_unique_url": "https://www.colorado.edu;https://www.upenn.edu", "aff_unique_abbr": "CU;UPenn", "aff_campus_unique_index": "0", "aff_campus_unique": "Boulder;", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "6b32b227a3", "title": "Receptive field structure of flow detectors for heading perception", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/d198bd736a97e7cecfdf8f4f2027ef80-Abstract.html", "author": "J. A. Beintema; M. Lappe; Alexander C. Berg", "abstract": "Observer translation relative to the world creates image flow that expands from the observer's direction of translation (heading) from which the observer can recover heading direction. Yet, the image flow is often more complex, depending on rotation of the eye, scene layout and translation velocity. A number of models [1-4] have been proposed on how the human visual system extracts heading from flow in a neurophysiologic ally plausible way. These models represent heading by a set of neurons that respond to large image flow patterns and receive input from motion sensed at different im(cid:173) age locations. We analysed these models to determine the exact receptive field of these heading detectors. We find most models predict that, contrary to widespread believe, the contribut ing mo(cid:173) tion sensors have a preferred motion directed circularly rather than radially around the detector's preferred heading. Moreover, the re(cid:173) sults suggest to look for more refined structure within the circular flow, such as bi-circularity or local motion-opponency.", "bibtex": "@inproceedings{NIPS2001_d198bd73,\n author = {Beintema, J. and Lappe, M. and Berg, Alexander},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Receptive field structure of flow detectors for heading perception},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/d198bd736a97e7cecfdf8f4f2027ef80-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/d198bd736a97e7cecfdf8f4f2027ef80-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/d198bd736a97e7cecfdf8f4f2027ef80-Metadata.json", "review": "", "metareview": "", "pdf_size": 1441014, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=728770260569739361&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Dept. Zoology & Neurobiology, Ruhr University Bochum, Germany, 44780; Dept. of Neuro-ethology, Helmholtz Institute, Utrecht University, The Netherlands; Dept. Zoology & Neurobiology, Ruhr University Bochum, Germany, 44780", "aff_domain": "neurobiologie.ruhr-uni-bochum.de;bio.uu.nl;neurobiologie.ruhr-uni-bochum.de", "email": "neurobiologie.ruhr-uni-bochum.de;bio.uu.nl;neurobiologie.ruhr-uni-bochum.de", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "Ruhr University Bochum;Utrecht University", "aff_unique_dep": "Dept. Zoology & Neurobiology;Dept. of Neuro-ethology", "aff_unique_url": "https://www.ruhr-uni-bochum.de;https://www.uu.nl", "aff_unique_abbr": ";UU", "aff_campus_unique_index": "1", "aff_campus_unique": ";Utrecht", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Germany;Netherlands" }, { "id": "6696f58472", "title": "Reducing multiclass to binary by coupling probability estimates", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/abdbeb4d8dbe30df8430a8394b7218ef-Abstract.html", "author": "B. Zadrozny", "abstract": "This paper presents a method for obtaining class membership probability esti- mates for multiclass classi\ufb01cation problems by coupling the probability estimates produced by binary classi\ufb01ers. This is an extension for arbitrary code matrices of a method due to Hastie and Tibshirani for pairwise coupling of probability estimates. Experimental results with Boosted Naive Bayes show that our method produces calibrated class membership probability estimates, while having similar classi\ufb01cation accuracy as loss-based decoding, a method for obtaining the most likely class that does not generate probability estimates.", "bibtex": "@inproceedings{NIPS2001_abdbeb4d,\n author = {Zadrozny, B.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Reducing multiclass to binary by coupling probability estimates},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/abdbeb4d8dbe30df8430a8394b7218ef-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/abdbeb4d8dbe30df8430a8394b7218ef-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/abdbeb4d8dbe30df8430a8394b7218ef-Metadata.json", "review": "", "metareview": "", "pdf_size": 80848, "gs_citation": 126, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5009288889584929216&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Department of Computer Science and Engineering, University of California, San Diego", "aff_domain": "cs.ucsd.edu", "email": "cs.ucsd.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0", "aff_campus_unique": "San Diego", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "8f637722df", "title": "Reinforcement Learning and Time Perception -- a Model of Animal Experiments", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/08f90c1a417155361a5c4b8d297e0d78-Abstract.html", "author": "Jonathan L. Shapiro; J. Wearden", "abstract": "Animal data on delayed-reward conditioning experiments shows a striking property - the data for different time intervals collapses into a single curve when the data is scaled by the time interval. This is called the scalar property of interval timing. Here a simple model of a neural clock is presented and shown to give rise to the scalar property. The model is an accumulator consisting of noisy, linear spiking neurons. It is analytically tractable and contains only three parameters. When coupled with reinforcement learning it simulates peak procedure experiments, producing both the scalar property and the pattern of single trial covariances.", "bibtex": "@inproceedings{NIPS2001_08f90c1a,\n author = {Shapiro, Jonathan and Wearden, J.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Reinforcement Learning and Time Perception -- a Model of Animal Experiments},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/08f90c1a417155361a5c4b8d297e0d78-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/08f90c1a417155361a5c4b8d297e0d78-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/08f90c1a417155361a5c4b8d297e0d78-Metadata.json", "review": "", "metareview": "", "pdf_size": 1434111, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3449418200683116821&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Department of Computer Science, University of Manchester; Department of Psychology, University of Manchester", "aff_domain": "cs.man.ac.uk; ", "email": "cs.man.ac.uk; ", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Manchester", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.manchester.ac.uk", "aff_unique_abbr": "UoM", "aff_campus_unique_index": "1", "aff_campus_unique": ";Manchester", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "0e19367ca7", "title": "Reinforcement Learning with Long Short-Term Memory", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/a38b16173474ba8b1a95bcbc30d3b8a5-Abstract.html", "author": "Bram Bakker", "abstract": "This paper presents reinforcement learning with a Long Short(cid:173) Term Memory recurrent neural network: RL-LSTM. Model-free RL-LSTM using Advantage(,x) learning and directed exploration can solve non-Markovian tasks with long-term dependencies be(cid:173) tween relevant events. This is demonstrated in a T-maze task, as well as in a difficult variation of the pole balancing task.", "bibtex": "@inproceedings{NIPS2001_a38b1617,\n author = {Bakker, Bram},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Reinforcement Learning with Long Short-Term Memory},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/a38b16173474ba8b1a95bcbc30d3b8a5-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/a38b16173474ba8b1a95bcbc30d3b8a5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/a38b16173474ba8b1a95bcbc30d3b8a5-Metadata.json", "review": "", "metareview": "", "pdf_size": 961980, "gs_citation": 448, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14749078181704391479&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Dept.ofPsychology, LeidenUniversity /IDSIA", "aff_domain": "fsw.leidenuniv.nl", "email": "fsw.leidenuniv.nl", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Leiden University", "aff_unique_dep": "Department of Psychology", "aff_unique_url": "https://www.universiteitleiden.nl", "aff_unique_abbr": "Leiden U.", "aff_country_unique_index": "0", "aff_country_unique": "Netherlands" }, { "id": "e2dc10491c", "title": "Relative Density Nets: A New Way to Combine Backpropagation with HMM's", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/9bb6dee73b8b0ca97466ccb24fff3139-Abstract.html", "author": "Andrew D. Brown; Geoffrey E. Hinton", "abstract": "Logistic units in the first hidden layer of a feedforward neural net(cid:173) work compute the relative probability of a data point under two Gaussians. This leads us to consider substituting other density models. We present an architecture for performing discriminative learning of Hidden Markov Models using a network of many small HMM's. Experiments on speech data show it to be superior to the standard method of discriminatively training HMM's.", "bibtex": "@inproceedings{NIPS2001_9bb6dee7,\n author = {Brown, Andrew and Hinton, Geoffrey E},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Relative Density Nets: A New Way to Combine Backpropagation with HMM\\textquotesingle s},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/9bb6dee73b8b0ca97466ccb24fff3139-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/9bb6dee73b8b0ca97466ccb24fff3139-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/9bb6dee73b8b0ca97466ccb24fff3139-Metadata.json", "review": "", "metareview": "", "pdf_size": 1391078, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2487754433505879676&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, University of Toronto; Gatsby Unit, UCL", "aff_domain": "cs.utoronto.ca;gatsby.ucl.ac.uk", "email": "cs.utoronto.ca;gatsby.ucl.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Toronto;University College London", "aff_unique_dep": "Department of Computer Science;Gatsby Unit", "aff_unique_url": "https://www.utoronto.ca;https://www.ucl.ac.uk", "aff_unique_abbr": "U of T;UCL", "aff_campus_unique_index": "0", "aff_campus_unique": "Toronto;", "aff_country_unique_index": "0;1", "aff_country_unique": "Canada;United Kingdom" }, { "id": "e65e2a41c1", "title": "Risk Sensitive Particle Filters", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/7ca57a9f85a19a6e4b9a248c1daca185-Abstract.html", "author": "Sebastian Thrun; John Langford; Vandi Verma", "abstract": "We propose a new particle \ufb01lter that incorporates a model of costs when generating particles. The approach is motivated by the observation that the costs of accidentally not tracking hypotheses might be signi\ufb01cant in some areas of state space, and next to irrelevant in others. By incorporat- ing a cost model into particle \ufb01ltering, states that are more critical to the system performance are more likely to be tracked. Automatic calculation of the cost model is implemented using an MDP value function calcula- tion that estimates the value of tracking a particular state. Experiments in two mobile robot domains illustrate the appropriateness of the approach.", "bibtex": "@inproceedings{NIPS2001_7ca57a9f,\n author = {Thrun, Sebastian and Langford, John and Verma, Vandi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Risk Sensitive Particle Filters},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/7ca57a9f85a19a6e4b9a248c1daca185-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/7ca57a9f85a19a6e4b9a248c1daca185-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/7ca57a9f85a19a6e4b9a248c1daca185-Metadata.json", "review": "", "metareview": "", "pdf_size": 178997, "gs_citation": 123, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2219275781834506459&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 19, "aff": "School of Computer Science, Carnegie Mellon University, Pittsburgh, PA 15213; School of Computer Science, Carnegie Mellon University, Pittsburgh, PA 15213; School of Computer Science, Carnegie Mellon University, Pittsburgh, PA 15213", "aff_domain": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "email": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "School of Computer Science", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "fade371731", "title": "Sampling Techniques for Kernel Methods", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/07cb5f86508f146774a2fac4373a8e50-Abstract.html", "author": "Dimitris Achlioptas; Frank Mcsherry; Bernhard Sch\u00f6lkopf", "abstract": "We propose randomized techniques for speeding up Kernel Principal Component Analysis on three levels: sampling and quantization of the Gram matrix in training, randomized rounding in evaluating the kernel expansions, and random projections in evaluating the kernel itself. In all three cases, we give sharp bounds on the accuracy of the obtained ap- proximations. Rather intriguingly, all three techniques can be viewed as instantiations of the following idea: replace the kernel function by a \u201crandomized kernel\u201d which behaves like", "bibtex": "@inproceedings{NIPS2001_07cb5f86,\n author = {Achlioptas, Dimitris and Mcsherry, Frank and Sch\\\"{o}lkopf, Bernhard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Sampling Techniques for Kernel Methods},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/07cb5f86508f146774a2fac4373a8e50-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/07cb5f86508f146774a2fac4373a8e50-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/07cb5f86508f146774a2fac4373a8e50-Metadata.json", "review": "", "metareview": "", "pdf_size": 94973, "gs_citation": 254, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8929612554397011018&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Microsoft Research; University of Washington; Biowulf Technologies NY", "aff_domain": "microsoft.com;cs.washington.edu;conclu.de", "email": "microsoft.com;cs.washington.edu;conclu.de", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "Microsoft;University of Washington;Biowulf Technologies", "aff_unique_dep": "Microsoft Research;;", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.washington.edu;", "aff_unique_abbr": "MSR;UW;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "b20c275010", "title": "Scaling Laws and Local Minima in Hebbian ICA", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/b8b4b727d6f5d1b61fff7be687f7970f-Abstract.html", "author": "Magnus Rattray; Gleb Basalyga", "abstract": "We study the dynamics of a Hebbian ICA algorithm extracting a sin- gle non-Gaussian component from a high-dimensional Gaussian back- ground. For both on-line and batch learning we \ufb01nd that a surprisingly large number of examples are required to avoid trapping in a sub-optimal state close to the initial conditions. To extract a skewed signal at least examples are required for -dimensional data and", "bibtex": "@inproceedings{NIPS2001_b8b4b727,\n author = {Rattray, Magnus and Basalyga, Gleb},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Scaling Laws and Local Minima in Hebbian ICA},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/b8b4b727d6f5d1b61fff7be687f7970f-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/b8b4b727d6f5d1b61fff7be687f7970f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/b8b4b727d6f5d1b61fff7be687f7970f-Metadata.json", "review": "", "metareview": "", "pdf_size": 203677, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6964549817566619422&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Department of Computer Science, University of Manchester; Department of Computer Science, University of Manchester", "aff_domain": "cs.man.ac.uk;cs.man.ac.uk", "email": "cs.man.ac.uk;cs.man.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Manchester", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.manchester.ac.uk", "aff_unique_abbr": "UoM", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "782584dc16", "title": "Self-regulation Mechanism of Temporally Asymmetric Hebbian Plasticity", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/c4de8ced6214345614d33fb0b16a8acd-Abstract.html", "author": "N. Matsumoto; M. Okada", "abstract": "Recent biological experimental (cid:12)ndings have shown that the synap- tic plasticity depends on the relative timing of the pre- and post- synaptic spikes which determines whether Long Term Potentiation (LTP) occurs or Long Term Depression (LTD) does. The synaptic plasticity has been called \\Temporally Asymmetric Hebbian plas- ticity (TAH)\". Many authors have numerically shown that spatio- temporal patterns can be stored in neural networks. However, the mathematical mechanism for storage of the spatio-temporal pat- terns is still unknown, especially the e(cid:11)ects of LTD. In this paper, we employ a simple neural network model and show that inter- ference of LTP and LTD disappears in a sparse coding scheme. On the other hand, it is known that the covariance learning is in- dispensable for storing sparse patterns. We also show that TAH qualitatively has the same e(cid:11)ect as the covariance learning when spatio-temporal patterns are embedded in the network.", "bibtex": "@inproceedings{NIPS2001_c4de8ced,\n author = {Matsumoto, N. and Okada, M.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Self-regulation Mechanism of Temporally Asymmetric Hebbian Plasticity},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/c4de8ced6214345614d33fb0b16a8acd-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/c4de8ced6214345614d33fb0b16a8acd-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/c4de8ced6214345614d33fb0b16a8acd-Metadata.json", "review": "", "metareview": "", "pdf_size": 99463, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4289208845377296002&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 19, "aff": "Graduate School of Science and Engineering, Saitama University + RIKEN Brain Science Institute; RIKEN Brain Science Institute", "aff_domain": "brain.riken.go.jp;brain.riken.go.jp", "email": "brain.riken.go.jp;brain.riken.go.jp", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0+1;1", "aff_unique_norm": "Saitama University;RIKEN", "aff_unique_dep": "Graduate School of Science and Engineering;Brain Science Institute", "aff_unique_url": "https://www.saitama-u.ac.jp;https://briken.org", "aff_unique_abbr": ";RIKEN", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0", "aff_country_unique": "Japan" }, { "id": "b6e2cf3f16", "title": "Semi-supervised MarginBoost", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/931af583573227f0220bc568c65ce104-Abstract.html", "author": "Florence d'Alch\u00e9-Buc; Yves Grandvalet; Christophe Ambroise", "abstract": "In many discrimination problems a large amount of data is available but only a few of them are labeled. This provides a strong motivation to improve or develop methods for semi-supervised learning. In this paper, boosting is generalized to this task within the optimization framework of MarginBoost . We extend the margin definition to unlabeled data and develop the gradient descent algorithm that corresponds to the resulting margin cost function. This meta-learning scheme can be applied to any base classifier able to benefit from unlabeled data. We propose here to apply it to mixture models trained with an Expectation-Maximization algorithm. Promising results are presented on benchmarks with different rates of labeled data.", "bibtex": "@inproceedings{NIPS2001_931af583,\n author = {d\\textquotesingle Alch\\'{e}-Buc, Florence and Grandvalet, Yves and Ambroise, Christophe},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Semi-supervised MarginBoost},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/931af583573227f0220bc568c65ce104-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/931af583573227f0220bc568c65ce104-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/931af583573227f0220bc568c65ce104-Metadata.json", "review": "", "metareview": "", "pdf_size": 1301992, "gs_citation": 122, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2132241354893889700&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "LIP6,UMR CNRS 7606, Universite P. et M. Curie 75252 Paris Cedex, France; Heudiasyc, UMR CNRS 6599, Universite de Technologie de Compiegne, BP 20.529, 60205 Compiegne cedex, France; Heudiasyc, UMR CNRS 6599, Universite de Technologie de Compiegne, BP 20.529, 60205 Compiegne cedex, France", "aff_domain": "lip6.fr;hds.utc.fr;hds.utc.fr", "email": "lip6.fr;hds.utc.fr;hds.utc.fr", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "Universite P. et M. Curie;Universite de Technologie de Compiegne", "aff_unique_dep": "LIP6;Heudiasyc", "aff_unique_url": "https://www.upmc.fr;https://www.utt.fr", "aff_unique_abbr": "UPMC;UTC", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "Paris;Compiegne", "aff_country_unique_index": "0;0;0", "aff_country_unique": "France" }, { "id": "9b565efc44", "title": "Sequential Noise Compensation by Sequential Monte Carlo Method", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/b2ab001909a8a6f04b51920306046ce5-Abstract.html", "author": "K. Yao; S. Nakamura", "abstract": "We present a sequential Monte Carlo method applied to additive noise compensation for robust speech recognition in time-varying noise. The method generates a set of samples according to the prior distribution given by clean speech models and noise prior evolved from previous estimation. An explicit model representing noise ef- fects on speech features is used, so that an extended Kalman filter is constructed for each sample, generating the updated continuous state estimate as the estimation of the noise parameter, and predic- tion likelihood for weighting each sample. Minimum mean square error (MMSE) inference of the time-varying noise parameter is car- ried out over these samples by fusion the estimation of samples ac- cording to their weights. A residual resampling selection step and a Metropolis-Hastings smoothing step are used to improve calcula- tion e#ciency. Experiments were conducted on speech recognition in simulated non-stationary noises, where noise power changed ar- tificially, and highly non-stationary Machinegun noise. In all the experiments carried out, we observed that the method can have sig- nificant recognition performance improvement, over that achieved by noise compensation with stationary noise assumption. 1 Introduction", "bibtex": "@inproceedings{NIPS2001_b2ab0019,\n author = {Yao, K. and Nakamura, S.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Sequential Noise Compensation by Sequential Monte Carlo Method},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/b2ab001909a8a6f04b51920306046ce5-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/b2ab001909a8a6f04b51920306046ce5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/b2ab001909a8a6f04b51920306046ce5-Metadata.json", "review": "", "metareview": "", "pdf_size": 385854, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9298077456956744026&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "052e7b51ea", "title": "Small-World Phenomena and the Dynamics of Information", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/52dbb0686f8bd0c0c757acf716e28ec0-Abstract.html", "author": "Jon M. Kleinberg", "abstract": "The problem of searching for information in networks like the World Wide Web can\nbe approached in a variety of ways, ranging from centralized indexing schemes to\ndecentralized mechanisms that navigate the underlying network without knowledge\nof its global structure. The decentralized approach appears in a variety of settings:\nin the behavior of users browsing the Web by following hyperlinks; in the design of", "bibtex": "@inproceedings{NIPS2001_52dbb068,\n author = {Kleinberg, Jon},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Small-World Phenomena and the Dynamics of Information},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/52dbb0686f8bd0c0c757acf716e28ec0-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/52dbb0686f8bd0c0c757acf716e28ec0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/52dbb0686f8bd0c0c757acf716e28ec0-Metadata.json", "review": "", "metareview": "", "pdf_size": 92503, "gs_citation": 551, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2426584865774857307&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 18, "aff": "Department of Computer Science, Cornell University", "aff_domain": "", "email": "", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Cornell University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.cornell.edu", "aff_unique_abbr": "Cornell", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "47cd3e792a", "title": "Spectral Kernel Methods for Clustering", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/4ba29b9f9e5732ed33761840f4ba6c53-Abstract.html", "author": "Nello Cristianini; John Shawe-Taylor; Jaz S. Kandola", "abstract": "In this paper we introduce new algorithms for unsupervised learn(cid:173) ing based on the use of a kernel matrix. All the information re(cid:173) quired by such algorithms is contained in the eigenvectors of the matrix or of closely related matrices. We use two different but re(cid:173) lated cost functions, the Alignment and the 'cut cost'. The first one is discussed in a companion paper [3], the second one is based on graph theoretic concepts. Both functions measure the level of clustering of a labeled dataset, or the correlation between data clus(cid:173) ters and labels. We state the problem of unsupervised learning as assigning labels so as to optimize these cost functions. We show how the optimal solution can be approximated by slightly relaxing the corresponding optimization problem, and how this corresponds to using eigenvector information. The resulting simple algorithms are tested on real world data with positive results.", "bibtex": "@inproceedings{NIPS2001_4ba29b9f,\n author = {Cristianini, Nello and Shawe-Taylor, John and Kandola, Jaz},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Spectral Kernel Methods for Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/4ba29b9f9e5732ed33761840f4ba6c53-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/4ba29b9f9e5732ed33761840f4ba6c53-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/4ba29b9f9e5732ed33761840f4ba6c53-Metadata.json", "review": "", "metareview": "", "pdf_size": 1339487, "gs_citation": 145, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1484731687009188475&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "BIOwulf Technologies; Royal Holloway, University of London; Royal Holloway, University of London", "aff_domain": "support-vector.net;cs.rhul.ac.uk;cs.rhul.ac.uk", "email": "support-vector.net;cs.rhul.ac.uk;cs.rhul.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "Biowulf Technologies;University of London", "aff_unique_dep": ";", "aff_unique_url": "http://biowulf.com;https://www.royalholloway.ac.uk", "aff_unique_abbr": ";RHUL", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Royal Holloway", "aff_country_unique_index": "0;1;1", "aff_country_unique": "United States;United Kingdom" }, { "id": "75521207b7", "title": "Spectral Relaxation for K-means Clustering", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/d5c186983b52c4551ee00f72316c6eaa-Abstract.html", "author": "Hongyuan Zha; Xiaofeng He; Chris Ding; Ming Gu; Horst D. Simon", "abstract": "The popular K-means clustering partitions a data set by minimiz(cid:173) ing a sum-of-squares cost function. A coordinate descend method is then used to find local minima. In this paper we show that the minimization can be reformulated as a trace maximization problem associated with the Gram matrix of the data vectors. Furthermore, we show that a relaxed version of the trace maximization problem possesses global optimal solutions which can be obtained by com(cid:173) puting a partial eigendecomposition of the Gram matrix, and the cluster assignment for each data vectors can be found by comput(cid:173) ing a pivoted QR decomposition of the eigenvector matrix. As a by-product we also derive a lower bound for the minimum of the sum-of-squares cost function.", "bibtex": "@inproceedings{NIPS2001_d5c18698,\n author = {Zha, Hongyuan and He, Xiaofeng and Ding, Chris and Gu, Ming and Simon, Horst},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Spectral Relaxation for K-means Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/d5c186983b52c4551ee00f72316c6eaa-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/d5c186983b52c4551ee00f72316c6eaa-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/d5c186983b52c4551ee00f72316c6eaa-Metadata.json", "review": "", "metareview": "", "pdf_size": 1283444, "gs_citation": 938, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=192982896818124105&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Dept. of Compo Sci. & Eng., The Pennsylvania State University, University Park, PA 16802; Dept. of Compo Sci. & Eng., The Pennsylvania State University, University Park, PA 16802; NERSC Division, Lawrence Berkeley National Lab., UC Berkeley, Berkeley, CA 94720; NERSC Division, Lawrence Berkeley National Lab., UC Berkeley, Berkeley, CA 94720; Dept. of Mathematics, UC Berkeley, Berkeley, CA 95472", "aff_domain": "cse.psu.edu;cse.psu.edu;lbl.gov;lbl.gov;math.berkeley.edu", "email": "cse.psu.edu;cse.psu.edu;lbl.gov;lbl.gov;math.berkeley.edu", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;1;2", "aff_unique_norm": "Pennsylvania State University;Lawrence Berkeley National Laboratory;University of California, Berkeley", "aff_unique_dep": "Department of Computer Science and Engineering;NERSC Division;Department of Mathematics", "aff_unique_url": "https://www.psu.edu;https://www.lbl.gov;https://www.berkeley.edu", "aff_unique_abbr": "PSU;LBL;UC Berkeley", "aff_campus_unique_index": "0;0;1;1;1", "aff_campus_unique": "University Park;Berkeley", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "id": "49341c4496", "title": "Speech Recognition using SVMs", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/d8330f857a17c53d217014ee776bfd50-Abstract.html", "author": "N. Smith; Mark Gales", "abstract": "An important issue in applying SVMs to speech recognition is the ability to classify variable length sequences. This paper presents extensions to a standard scheme for handling this variable length data, the Fisher score. A more useful mapping is introduced based on the likelihood-ratio. The score-space defined by this mapping avoids some limitations of the Fisher score. Class-conditional gen(cid:173) erative models are directly incorporated into the definition of the score-space. The mapping, and appropriate normalisation schemes, are evaluated on a speaker-independent isolated letter task where the new mapping outperforms both the Fisher score and HMMs trained to maximise likelihood.", "bibtex": "@inproceedings{NIPS2001_d8330f85,\n author = {Smith, N. and Gales, Mark},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Speech Recognition using SVMs},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/d8330f857a17c53d217014ee776bfd50-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/d8330f857a17c53d217014ee776bfd50-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/d8330f857a17c53d217014ee776bfd50-Metadata.json", "review": "", "metareview": "", "pdf_size": 1746918, "gs_citation": 237, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10038071267272269728&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 7, "aff": "Cambridge University Engineering Dept Cambridge, CB2 1PZ, U.K.; Cambridge University Engineering Dept Cambridge, CB2 1PZ, U.K.", "aff_domain": "eng.cam.ac.uk;eng.cam.ac.uk", "email": "eng.cam.ac.uk;eng.cam.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Cambridge University", "aff_unique_dep": "Engineering Dept", "aff_unique_url": "https://www.cam.ac.uk", "aff_unique_abbr": "Cambridge", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "fdeb1963fa", "title": "Speech Recognition with Missing Data using Recurrent Neural Nets", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/7f16109f1619fd7a733daf5a84c708c1-Abstract.html", "author": "S. Parveen; P. Green", "abstract": "In the", "bibtex": "@inproceedings{NIPS2001_7f16109f,\n author = {Parveen, S. and Green, P.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Speech Recognition with Missing Data using Recurrent Neural Nets},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/7f16109f1619fd7a733daf5a84c708c1-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/7f16109f1619fd7a733daf5a84c708c1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/7f16109f1619fd7a733daf5a84c708c1-Metadata.json", "review": "", "metareview": "", "pdf_size": 91653, "gs_citation": 58, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15218619931897961007&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Speech and Hearing Research Group, Department of Computer Science, University of Shef\u00deeld, Shef\u00deeld S14DP, UK; Speech and Hearing Research Group, Department of Computer Science, University of Shef\u00deeld, Shef\u00deeld S14DP, UK", "aff_domain": "dcs.shef.ac.uk;dcs.shef.ac.uk", "email": "dcs.shef.ac.uk;dcs.shef.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Sheffield", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.sheffield.ac.uk", "aff_unique_abbr": "Sheffield", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Sheffield", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "4e996f635e", "title": "Spike timing and the coding of naturalistic sounds in a central auditory area of songbirds", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/2557911c1bf75c2b643afb4ecbfc8ec2-Abstract.html", "author": "B. D. Wright; Kamal Sen; William Bialek; A. J. Doupe", "abstract": "In nature, animals encounter high dimensional sensory stimuli that have complex statistical and dynamical structure. Attempts to study the neu- ral coding of these natural signals face challenges both in the selection of the signal ensemble and in the analysis of the resulting neural responses. For zebra \ufb01nches, naturalistic stimuli can be de\ufb01ned as sounds that they encounter in a colony of conspeci\ufb01c birds. We assembled an ensemble of these sounds by recording groups of 10-40 zebra \ufb01nches, and then ana- lyzed the response of single neurons in the songbird central auditory area (\ufb01eld L) to continuous playback of long segments from this ensemble. Following methods developed in the \ufb02y visual system, we measured the information that spike trains provide about the acoustic stimulus with- out any assumptions about which features of the stimulus are relevant. Preliminary results indicate that large amounts of information are carried by spike timing, with roughly half of the information accessible only at time resolutions better than 10 ms; additional information is still be- ing revealed as time resolution is improved to 2 ms. Information can be decomposed into that carried by the locking of individual spikes to the stimulus (or modulations of spike rate) vs. that carried by timing in spike patterns. Initial results show that in \ufb01eld L, temporal patterns give at least", "bibtex": "@inproceedings{NIPS2001_2557911c,\n author = {Wright, B. and Sen, Kamal and Bialek, William and Doupe, A.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Spike timing and the coding of naturalistic sounds in a central auditory area of songbirds},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/2557911c1bf75c2b643afb4ecbfc8ec2-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/2557911c1bf75c2b643afb4ecbfc8ec2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/2557911c1bf75c2b643afb4ecbfc8ec2-Metadata.json", "review": "", "metareview": "", "pdf_size": 222432, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17134327300444243598&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 19, "aff": "Sloan\u2013Swartz Center for Theoretical Neurobiology, Departments of Physiology and Psychiatry, University of California at San Francisco, San Francisco, California 94143\u20130444; NEC Research Institute, 4 Independence Way, Princeton, New Jersey 08540; Department of Physics, Princeton University, Princeton, New Jersey 08544; Sloan\u2013Swartz Center for Theoretical Neurobiology, Departments of Physiology and Psychiatry, University of California at San Francisco, San Francisco, California 94143\u20130444", "aff_domain": "phy.ucsf.edu;phy.ucsf.edu;phy.ucsf.edu;princeton.edu", "email": "phy.ucsf.edu;phy.ucsf.edu;phy.ucsf.edu;princeton.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "University of California at San Francisco;NEC Research Institute;Princeton University", "aff_unique_dep": "Departments of Physiology and Psychiatry;;Department of Physics", "aff_unique_url": "https://www.ucsf.edu;https://www.nec.com/research/;https://www.princeton.edu", "aff_unique_abbr": "UCSF;NEC RI;Princeton", "aff_campus_unique_index": "0;2;0", "aff_campus_unique": "San Francisco;;Princeton", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "bc7fafd37b", "title": "Stabilizing Value Function Approximation with the BFBP Algorithm", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/8d8818c8e140c64c743113f563cf750f-Abstract.html", "author": "Xin Wang; Thomas G. Dietterich", "abstract": "We address the problem of non-convergence of online reinforcement learning algorithms (e.g., Q learning and SARSA(A)) by adopt(cid:173) ing an incremental-batch approach that separates the exploration process from the function fitting process. Our BFBP (Batch Fit to Best Paths) algorithm alternates between an exploration phase (during which trajectories are generated to try to find fragments of the optimal policy) and a function fitting phase (during which a function approximator is fit to the best known paths from start states to terminal states). An advantage of this approach is that batch value-function fitting is a global process, which allows it to address the tradeoffs in function approximation that cannot be handled by local, online algorithms. This approach was pioneered by Boyan and Moore with their GROWSUPPORT and ROUT al(cid:173) gorithms. We show how to improve upon their work by applying a better exploration process and by enriching the function fitting procedure to incorporate Bellman error and advantage error mea(cid:173) sures into the objective function. The results show improved per(cid:173) formance on several benchmark problems.", "bibtex": "@inproceedings{NIPS2001_8d8818c8,\n author = {Wang, Xin and Dietterich, Thomas},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Stabilizing Value Function Approximation with the BFBP Algorithm},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/8d8818c8e140c64c743113f563cf750f-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/8d8818c8e140c64c743113f563cf750f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/8d8818c8e140c64c743113f563cf750f-Metadata.json", "review": "", "metareview": "", "pdf_size": 840867, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=901845851621099621&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, Oregon State University; Department of Computer Science, Oregon State University", "aff_domain": "cs.orst.edu;cs.orst.edu", "email": "cs.orst.edu;cs.orst.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Oregon State University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://oregonstate.edu", "aff_unique_abbr": "OSU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Corvallis", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "8c9ec4d063", "title": "Stochastic Mixed-Signal VLSI Architecture for High-Dimensional Kernel Machines", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/5352696a9ca3397beb79f116f3a33991-Abstract.html", "author": "Roman Genov; Gert Cauwenberghs", "abstract": "A mixed-signal paradigm is presented for high-resolution parallel inner- product computation in very high dimensions, suitable for ef\ufb01cient im- plementation of kernels in image processing. At the core of the externally digital architecture is a high-density, low-power analog array performing binary-binary partial matrix-vector multiplication. Full digital resolution is maintained even with low-resolution analog-to-digital conversion, ow- ing to random statistics in the analog summation of binary products. A random modulation scheme produces near-Bernoulli statistics even for highly correlated inputs. The approach is validated with real image data, and with experimental results from a CID/DRAM analog array prototype in 0.5", "bibtex": "@inproceedings{NIPS2001_5352696a,\n author = {Genov, Roman and Cauwenberghs, Gert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Stochastic Mixed-Signal VLSI Architecture for High-Dimensional Kernel Machines},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/5352696a9ca3397beb79f116f3a33991-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/5352696a9ca3397beb79f116f3a33991-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/5352696a9ca3397beb79f116f3a33991-Metadata.json", "review": "", "metareview": "", "pdf_size": 210162, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7278257196738024838&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "af8050d949", "title": "Switch Packet Arbitration via Queue-Learning", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/378a063b8fdb1db941e34f4bde584c7d-Abstract.html", "author": "Timothy X. Brown", "abstract": "In packet switches, packets queue at switch inputs and contend for out- puts. The contention arbitration policy directly affects switch perfor- mance. The best policy depends on the current state of the switch and current traf\ufb01c patterns. This problem is hard because the state space, possible transitions, and set of actions all grow exponentially with the size of the switch. We present a reinforcement learning formulation of the problem that decomposes the value function into many small inde- pendent value functions and enables an ef\ufb01cient action selection.", "bibtex": "@inproceedings{NIPS2001_378a063b,\n author = {Brown, Timothy},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Switch Packet Arbitration via Queue-Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/378a063b8fdb1db941e34f4bde584c7d-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/378a063b8fdb1db941e34f4bde584c7d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/378a063b8fdb1db941e34f4bde584c7d-Metadata.json", "review": "", "metareview": "", "pdf_size": 76678, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7297460312090416005&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "", "aff_domain": "", "email": "", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster" }, { "id": "84fe5a3eae", "title": "TAP Gibbs Free Energy, Belief Propagation and Sparsity", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/9f62b8625f914a002496335037e9ad97-Abstract.html", "author": "Lehel Csat\u00f3; Manfred Opper; Ole Winther", "abstract": "The adaptive TAP Gibbs free energy for a general densely connected probabilistic model with quadratic interactions and arbritary single site constraints is derived. We show how a speci\ufb01c sequential minimization of the free energy leads to a generalization of Minka\u2019s expectation propa- gation. Lastly, we derive a sparse representation version of the sequential algorithm. The usefulness of the approach is demonstrated on classi\ufb01ca- tion and density estimation with Gaussian processes and on an indepen- dent component analysis problem.", "bibtex": "@inproceedings{NIPS2001_9f62b862,\n author = {Csat\\'{o}, Lehel and Opper, Manfred and Winther, Ole},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {TAP Gibbs Free Energy, Belief Propagation and Sparsity},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/9f62b8625f914a002496335037e9ad97-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/9f62b8625f914a002496335037e9ad97-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/9f62b8625f914a002496335037e9ad97-Metadata.json", "review": "", "metareview": "", "pdf_size": 131839, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14643587587023883980&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "Neural Computing Research Group, School of Engineering and Applied Science, Aston University, Birmingham B4 7ET, UK; Neural Computing Research Group, School of Engineering and Applied Science, Aston University, Birmingham B4 7ET, UK; Center for Biological Sequence Analysis, BioCentrum, Technical University of Denmark, B208, 2800 Lyngby, Denmark", "aff_domain": "aston.ac.uk;aston.ac.uk;cbs.dtu.dk", "email": "aston.ac.uk;aston.ac.uk;cbs.dtu.dk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Aston University;Technical University of Denmark", "aff_unique_dep": "School of Engineering and Applied Science;Center for Biological Sequence Analysis", "aff_unique_url": "https://www.aston.ac.uk;https://www.teknologisk.dk", "aff_unique_abbr": "Aston;DTU", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Birmingham;Lyngby", "aff_country_unique_index": "0;0;1", "aff_country_unique": "United Kingdom;Denmark" }, { "id": "93096fab51", "title": "Tempo tracking and rhythm quantization by sequential Monte Carlo", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/5ec829debe54b19a5f78d9a65b900a39-Abstract.html", "author": "Ali Taylan Cemgil; Bert Kappen", "abstract": "We present a probabilistic generative model for timing deviations in expressive music. performance. The structure of the proposed model is equivalent to a switching state space model. We formu(cid:173) late two well known music recognition problems, namely tempo tracking and automatic transcription (rhythm quantization) as fil(cid:173) tering and maximum a posteriori (MAP) state estimation tasks. The inferences are carried out using sequential Monte Carlo in(cid:173) tegration (particle filtering) techniques. For this purpose, we have derived a novel Viterbi algorithm for Rao-Blackwellized particle fil(cid:173) ters, where a subset of the hidden variables is integrated out. The resulting model is suitable for realtime tempo tracking and tran(cid:173) scription and hence useful in a number of music applications such as adaptive automatic accompaniment and score typesetting.", "bibtex": "@inproceedings{NIPS2001_5ec829de,\n author = {Cemgil, Ali Taylan and Kappen, Bert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Tempo tracking and rhythm quantization by sequential Monte Carlo},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/5ec829debe54b19a5f78d9a65b900a39-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/5ec829debe54b19a5f78d9a65b900a39-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/5ec829debe54b19a5f78d9a65b900a39-Metadata.json", "review": "", "metareview": "", "pdf_size": 741477, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4489859216564197091&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "SNN, University of Nijmegen; SNN, University of Nijmegen", "aff_domain": "mbfys.kun.nl;mbfys.kun.nl", "email": "mbfys.kun.nl;mbfys.kun.nl", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Nijmegen", "aff_unique_dep": "SNN", "aff_unique_url": "https://www.ru.nl/", "aff_unique_abbr": "", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Netherlands" }, { "id": "1e5c1d94bd", "title": "The Concave-Convex Procedure (CCCP)", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/a012869311d64a44b5a0d567cd20de04-Abstract.html", "author": "Alan L. Yuille; Anand Rangarajan", "abstract": "We introduce the Concave-Convex procedure (CCCP) which con(cid:173) structs discrete time iterative dynamical systems which are guar(cid:173) anteed to monotonically decrease global optimization/energy func(cid:173) tions. It can be applied to (almost) any optimization problem and many existing algorithms can be interpreted in terms of CCCP. In particular, we prove relationships to some applications of Legendre transform techniques. We then illustrate CCCP by applications to Potts models, linear assignment, EM algorithms, and Generalized Iterative Scaling (GIS). CCCP can be used both as a new way to understand existing optimization algorithms and as a procedure for generating new algorithms.", "bibtex": "@inproceedings{NIPS2001_a0128693,\n author = {Yuille, Alan L and Rangarajan, Anand},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {The Concave-Convex Procedure (CCCP)},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/a012869311d64a44b5a0d567cd20de04-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/a012869311d64a44b5a0d567cd20de04-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/a012869311d64a44b5a0d567cd20de04-Metadata.json", "review": "", "metareview": "", "pdf_size": 1427124, "gs_citation": 519, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8719008099818458055&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Smith-Kettlewell Eye Research Institute; Dept. of CISE, Univ. of Florida", "aff_domain": "ski.org;cise.ufl.edu", "email": "ski.org;cise.ufl.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Smith-Kettlewell Eye Research Institute;University of Florida", "aff_unique_dep": ";Department of Computer and Information Science and Engineering", "aff_unique_url": "https://www.ski.org;https://www.ufl.edu", "aff_unique_abbr": "SKI;UF", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "e9d863868c", "title": "The Emergence of Multiple Movement Units in the Presence of Noise and Feedback Delay", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/54ff9e9e3a2ec0300d4ce11261f5169f-Abstract.html", "author": "Michael Kositsky; Andrew G. Barto", "abstract": "Tangential hand velocity pro\ufb01les of rapid human arm movements of- ten appear as sequences of several bell-shaped acceleration-deceleration phases called submovements or movement units. This suggests how the nervous system might ef\ufb01ciently control a motor plant in the presence of noise and feedback delay. Another critical observation is that stochastic- ity in a motor control problem makes the optimal control policy essen- tially different from the optimal control policy for the deterministic case. We use a simpli\ufb01ed dynamic model of an arm and address rapid aimed arm movements. We use reinforcement learning as a tool to approximate the optimal policy in the presence of noise and feedback delay. Using a simpli\ufb01ed model we show that multiple submovements emerge as an optimal policy in the presence of noise and feedback delay. The optimal policy in this situation is to drive the arm\u2019s end point close to the target by one fast submovement and then apply a few slow submovements to accu- rately drive the arm\u2019s end point into the target region. In our simulations, the controller sometimes generates corrective submovements before the initial fast submovement is completed, much like the predictive correc- tions observed in a number of psychophysical experiments.", "bibtex": "@inproceedings{NIPS2001_54ff9e9e,\n author = {Kositsky, Michael and Barto, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {The Emergence of Multiple Movement Units in the Presence of Noise and Feedback Delay},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/54ff9e9e3a2ec0300d4ce11261f5169f-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/54ff9e9e3a2ec0300d4ce11261f5169f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/54ff9e9e3a2ec0300d4ce11261f5169f-Metadata.json", "review": "", "metareview": "", "pdf_size": 103877, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11151812719373397675&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Department of Computer Science, University of Massachusetts; Department of Computer Science, University of Massachusetts", "aff_domain": "cs.umass.edu;cs.umass.edu", "email": "cs.umass.edu;cs.umass.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Massachusetts", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.umass.edu", "aff_unique_abbr": "UMass", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "e55175cd1f", "title": "The Fidelity of Local Ordinal Encoding", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/84ddfb34126fc3a48ee38d7044e87276-Abstract.html", "author": "Javid Sadr; Sayan Mukherjee; Keith Thoresz; Pawan Sinha", "abstract": "A key question in neuroscience is how to encode sensory stimuli such as images and sounds. Motivated by studies of response prop- erties of neurons in the early cortical areas, we propose an encoding scheme that dispenses with absolute measures of signal intensity or contrast and uses, instead, only local ordinal measures. In this scheme, the structure of a signal is represented by a set of equalities and inequalities across adjacent regions. In this paper, we focus on characterizing the (cid:12)delity of this representation strategy. We develop a regularization approach for image reconstruction from ordinal measures and thereby demonstrate that the ordinal repre- sentation scheme can faithfully encode signal structure. We also present a neurally plausible implementation of this computation that uses only local update rules. The results highlight the robust- ness and generalization ability of local ordinal encodings for the task of pattern classi(cid:12)cation.", "bibtex": "@inproceedings{NIPS2001_84ddfb34,\n author = {Sadr, Javid and Mukherjee, Sayan and Thoresz, Keith and Sinha, Pawan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {The Fidelity of Local Ordinal Encoding},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/84ddfb34126fc3a48ee38d7044e87276-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/84ddfb34126fc3a48ee38d7044e87276-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/84ddfb34126fc3a48ee38d7044e87276-Metadata.json", "review": "", "metareview": "", "pdf_size": 291882, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9417120135019400673&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Center for Biological and Computational Learning; Center for Biological and Computational Learning; Center for Biological and Computational Learning; Center for Biological and Computational Learning", "aff_domain": "ai.mit.edu;ai.mit.edu;ai.mit.edu;ai.mit.edu", "email": "ai.mit.edu;ai.mit.edu;ai.mit.edu;ai.mit.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Center for Biological and Computational Learning", "aff_unique_dep": "", "aff_unique_url": "", "aff_unique_abbr": "", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "5b3c58103c", "title": "The Infinite Hidden Markov Model", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/e3408432c1a48a52fb6c74d926b38886-Abstract.html", "author": "Matthew J. Beal; Zoubin Ghahramani; Carl E. Rasmussen", "abstract": "We show that it is possible to extend hidden Markov models to have a countably in\ufb01nite number of hidden states. By using the theory of Dirichlet processes we can implicitly integrate out the in\ufb01nitely many transition parameters, leaving only three hyperparameters which can be learned from data. These three hyperparameters de\ufb01ne a hierarchical Dirichlet process capable of capturing a rich set of transition dynamics. The three hyperparameters control the time scale of the dynamics, the sparsity of the underlying state-transition matrix, and the expected num- ber of distinct hidden states in a \ufb01nite sequence. In this framework it is also natural to allow the alphabet of emitted symbols to be in\ufb01nite\u2014 consider, for example, symbols being possible words appearing in En- glish text.", "bibtex": "@inproceedings{NIPS2001_e3408432,\n author = {Beal, Matthew and Ghahramani, Zoubin and Rasmussen, Carl},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {The Infinite Hidden Markov Model},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/e3408432c1a48a52fb6c74d926b38886-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/e3408432c1a48a52fb6c74d926b38886-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/e3408432c1a48a52fb6c74d926b38886-Metadata.json", "review": "", "metareview": "", "pdf_size": 318650, "gs_citation": 908, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1070100767734863690&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 27, "aff": "Gatsby Computational Neuroscience Unit; Gatsby Computational Neuroscience Unit; Gatsby Computational Neuroscience Unit", "aff_domain": "gatsby.ucl.ac.uk;gatsby.ucl.ac.uk;gatsby.ucl.ac.uk", "email": "gatsby.ucl.ac.uk;gatsby.ucl.ac.uk;gatsby.ucl.ac.uk", "github": "", "project": "http://www.gatsby.ucl.ac.uk", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University College London", "aff_unique_dep": "Gatsby Computational Neuroscience Unit", "aff_unique_url": "https://www.ucl.ac.uk", "aff_unique_abbr": "UCL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "id": "62e8caf016", "title": "The Intelligent surfer: Probabilistic Combination of Link and Content Information in PageRank", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/a501bebf79d570651ff601788ea9d16d-Abstract.html", "author": "Matthew Richardson; Pedro Domingos", "abstract": "The PageRank algorithm, used in the Google search engine, greatly improves the results of Web search by taking into account the link structure of the Web. PageRank assigns to a page a score propor- tional to the number of times a random surfer would visit that page, if it surfed indefinitely from page to page, following all outlinks from a page with equal probability. We propose to improve Page- Rank by using a more intelligent surfer, one that is guided by a probabilistic model of the relevance of a page to a query. Efficient execution of our algorithm at query time is made possible by pre- computing at crawl time (and thus once for all queries) the neces- sary terms. Experiments on two large subsets of the Web indicate that our algorithm significantly outperforms PageRank in the (hu- man-rated) quality of the pages returned, while remaining efficient enough to be used in today\u2019s large search engines.", "bibtex": "@inproceedings{NIPS2001_a501bebf,\n author = {Richardson, Matthew and Domingos, Pedro},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {The Intelligent surfer: Probabilistic Combination of Link and Content Information in PageRank},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/a501bebf79d570651ff601788ea9d16d-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/a501bebf79d570651ff601788ea9d16d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/a501bebf79d570651ff601788ea9d16d-Metadata.json", "review": "", "metareview": "", "pdf_size": 53043, "gs_citation": 645, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5303210589161464062&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 25, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "fad9d7393e", "title": "The Method of Quantum Clustering", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/3e9e39fed3b8369ed940f52cf300cf88-Abstract.html", "author": "David Horn; Assaf Gottlieb", "abstract": "We propose a novel clustering method that is an extension of ideas inher- ent to scale-space clustering and support-vector clustering. Like the lat- ter, it associates every data point with a vector in Hilbert space, and like the former it puts emphasis on their total sum, that is equal to the scale- space probability function. The novelty of our approach is the study of an operator in Hilbert space, represented by the Schr\u00a8odinger equation of which the probability function is a solution. This Schr\u00a8odinger equation contains a potential function that can be derived analytically from the probability function. We associate minima of the potential with cluster centers. The method has one variable parameter, the scale of its Gaussian kernel. We demonstrate its applicability on known data sets. By limiting the evaluation of the Schr\u00a8odinger potential to the locations of data points, we can apply this method to problems in high dimensions.", "bibtex": "@inproceedings{NIPS2001_3e9e39fe,\n author = {Horn, David and Gottlieb, Assaf},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {The Method of Quantum Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/3e9e39fed3b8369ed940f52cf300cf88-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/3e9e39fed3b8369ed940f52cf300cf88-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/3e9e39fed3b8369ed940f52cf300cf88-Metadata.json", "review": "", "metareview": "", "pdf_size": 885499, "gs_citation": 101, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17427273712715007578&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 17, "aff": "School of Physics and Astronomy, Raymond and Beverly Sackler Faculty of Exact Sciences, Tel Aviv University, Tel Aviv 69978, Israel; School of Physics and Astronomy, Raymond and Beverly Sackler Faculty of Exact Sciences, Tel Aviv University, Tel Aviv 69978, Israel", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Tel Aviv University", "aff_unique_dep": "School of Physics and Astronomy", "aff_unique_url": "https://www.tau.ac.il", "aff_unique_abbr": "TAU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Tel Aviv", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "id": "e009d8314b", "title": "The Noisy Euclidean Traveling Salesman Problem and Learning", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/06b1338ba02add2b5d2da67663b19ebe-Abstract.html", "author": "Mikio L. Braun; Joachim M. Buhmann", "abstract": "We consider noisy Euclidean traveling salesman problems in the plane, which are random combinatorial problems with underlying structure. Gibbs sampling is used to compute average trajectories, which estimate the underlying structure common to all instances. This procedure requires identifying the exact relationship between permutations and tours. In a learning setting, the average trajec(cid:173) tory is used as a model to construct solutions to new instances sampled from the same source. Experimental results show that the average trajectory can in fact estimate the underlying structure and that overfitting effects occur if the trajectory adapts too closely to a single instance.", "bibtex": "@inproceedings{NIPS2001_06b1338b,\n author = {Braun, Mikio and Buhmann, Joachim},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {The Noisy Euclidean Traveling Salesman Problem and Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/06b1338ba02add2b5d2da67663b19ebe-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/06b1338ba02add2b5d2da67663b19ebe-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/06b1338ba02add2b5d2da67663b19ebe-Metadata.json", "review": "", "metareview": "", "pdf_size": 1550422, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13912396312880912432&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Institute for Computer Science, Dept. III, University of Bonn; Institute for Computer Science, Dept. III, University of Bonn", "aff_domain": "cs.uni-bonn.de;cs.uni-bonn.de", "email": "cs.uni-bonn.de;cs.uni-bonn.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Bonn", "aff_unique_dep": "Institute for Computer Science, Dept. III", "aff_unique_url": "https://www.uni-bonn.de", "aff_unique_abbr": "", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "65c7f320ad", "title": "The Steering Approach for Multi-Criteria Reinforcement Learning", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/8c249675aea6c3cbd91661bbae767ff1-Abstract.html", "author": "Shie Mannor; Nahum Shimkin", "abstract": "We consider the problem of learning to attain multiple goals in a dynamic envi- ronment, which is initially unknown. In addition, the environment may contain arbitrarily varying elements related to actions of other agents or to non-stationary moves of Nature. This problem is modelled as a stochastic (Markov) game between the learning agent and an arbitrary player, with a vector-valued reward function. The objective of the learning agent is to have its long-term average reward vector belong to a given target set. We devise an algorithm for achieving this task, which is based on the theory of approachability for stochastic games. This algorithm com- bines, in an appropriate way, a \ufb02nite set of standard, scalar-reward learning algo- rithms. Su\u2013cient conditions are given for the convergence of the learning algorithm to a general target set. The specialization of these results to the single-controller Markov decision problem are discussed as well.", "bibtex": "@inproceedings{NIPS2001_8c249675,\n author = {Mannor, Shie and Shimkin, Nahum},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {The Steering Approach for Multi-Criteria Reinforcement Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/8c249675aea6c3cbd91661bbae767ff1-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/8c249675aea6c3cbd91661bbae767ff1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/8c249675aea6c3cbd91661bbae767ff1-Metadata.json", "review": "", "metareview": "", "pdf_size": 102900, "gs_citation": 65, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3874241652999312570&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 9, "aff": "Department of Electrical Engineering, Technion, Haifa 32000, Israel; Department of Electrical Engineering, Technion, Haifa 32000, Israel", "aff_domain": "ftx.technion.ac.il;eeg.technion.ac.il", "email": "ftx.technion.ac.il;eeg.technion.ac.il", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Technion", "aff_unique_dep": "Department of Electrical Engineering", "aff_unique_url": "https://www.technion.ac.il", "aff_unique_abbr": "Technion", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Haifa", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "id": "5c9d2108c6", "title": "The Unified Propagation and Scaling Algorithm", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/d0fb963ff976f9c37fc81fe03c21ea7b-Abstract.html", "author": "Yee W. Teh; Max Welling", "abstract": "In this paper we will show that a restricted class of constrained mini- mum divergence problems, named generalized inference problems, can be solved by approximating the KL divergence with a Bethe free energy. The algorithm we derive is closely related to both loopy belief propaga- tion and iterative scaling. This uni\ufb01ed propagation and scaling algorithm reduces to a convergent alternative to loopy belief propagation when no constraints are present. Experiments show the viability of our algorithm.", "bibtex": "@inproceedings{NIPS2001_d0fb963f,\n author = {Teh, Yee and Welling, Max},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {The Unified Propagation and Scaling Algorithm},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/d0fb963ff976f9c37fc81fe03c21ea7b-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/d0fb963ff976f9c37fc81fe03c21ea7b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/d0fb963ff976f9c37fc81fe03c21ea7b-Metadata.json", "review": "", "metareview": "", "pdf_size": 167869, "gs_citation": 81, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9810040780097527632&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Department of Computer Science, University of Toronto; Gatsby Computational Neuroscience Unit, University College London", "aff_domain": "cs.toronto.edu;gatsby.ucl.ac.uk", "email": "cs.toronto.edu;gatsby.ucl.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Toronto;University College London", "aff_unique_dep": "Department of Computer Science;Gatsby Computational Neuroscience Unit", "aff_unique_url": "https://www.utoronto.ca;https://www.ucl.ac.uk", "aff_unique_abbr": "U of T;UCL", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Toronto;London", "aff_country_unique_index": "0;1", "aff_country_unique": "Canada;United Kingdom" }, { "id": "fb218b8640", "title": "The g Factor: Relating Distributions on Features to Distributions on Images", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/f8c0c968632845cd133308b1a494967f-Abstract.html", "author": "James M. Coughlan; Alan L. Yuille", "abstract": "We describe the g-factor, which relates probability distributions on image features to distributions on the images themselves. The g-factor depends only on our choice of features and lattice quanti(cid:173) zation and is independent of the training image data. We illustrate the importance of the g-factor by analyzing how the parameters of Markov Random Field (i.e. Gibbs or log-linear) probability models of images are learned from data by maximum likelihood estimation. In particular, we study homogeneous MRF models which learn im(cid:173) age distributions in terms of clique potentials corresponding to fea(cid:173) ture histogram statistics (d. Minimax Entropy Learning (MEL) by Zhu, Wu and Mumford 1997 [11]) . We first use our analysis of the g-factor to determine when the clique potentials decouple for different features . Second, we show that clique potentials can be computed analytically by approximating the g-factor. Third, we demonstrate a connection between this approximation and the Generalized Iterative Scaling algorithm (GIS), due to Darroch and Ratcliff 1972 [2], for calculating potentials. This connection en(cid:173) ables us to use GIS to improve our multinomial approximation, using Bethe-Kikuchi[8] approximations to simplify the GIS proce(cid:173) dure. We support our analysis by computer simulations.", "bibtex": "@inproceedings{NIPS2001_f8c0c968,\n author = {Coughlan, James and Yuille, Alan L},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {The g Factor: Relating Distributions on Features to Distributions on Images},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/f8c0c968632845cd133308b1a494967f-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/f8c0c968632845cd133308b1a494967f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/f8c0c968632845cd133308b1a494967f-Metadata.json", "review": "", "metareview": "", "pdf_size": 1376942, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9191959856300594818&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "c9e1231082", "title": "Thin Junction Trees", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/325995af77a0e8b06d1204a171010b3a-Abstract.html", "author": "Francis R. Bach; Michael I. Jordan", "abstract": "We present an algorithm that induces a class of models with thin junction trees\u2014models that are characterized by an upper bound on the size of the maximal cliques of their triangulated graph. By ensuring that the junction tree is thin, inference in our models remains tractable throughout the learning process. This allows both an ef\ufb01cient implementation of an iterative scaling parameter estimation algorithm and also ensures that inference can be performed ef\ufb01ciently with the \ufb01nal model. We illustrate the approach with applications in handwritten digit recognition and DNA splice site detection.", "bibtex": "@inproceedings{NIPS2001_325995af,\n author = {Bach, Francis and Jordan, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Thin Junction Trees},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/325995af77a0e8b06d1204a171010b3a-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/325995af77a0e8b06d1204a171010b3a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/325995af77a0e8b06d1204a171010b3a-Metadata.json", "review": "", "metareview": "", "pdf_size": 75713, "gs_citation": 211, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3741461827468314004&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Computer Science Division, University of California, Berkeley, CA 94720; Computer Science and Statistics, University of California, Berkeley, CA 94720", "aff_domain": "cs.berkeley.edu;cs.berkeley.edu", "email": "cs.berkeley.edu;cs.berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Computer Science Division", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "76e8c909d0", "title": "Transform-invariant Image Decomposition with Similarity Templates", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/a1d7311f2a312426d710e1c617fcbc8c-Abstract.html", "author": "Chris Stauffer; Erik Miller; Kinh Tieu", "abstract": "Recent work has shown impressive transform-invariant modeling and clustering for sets of images of objects with similar appearance. We seek to expand these capabilities to sets of images of an object class that show considerable variation across individual instances (e.g. pedestrian images) using a representation based on pixel-wise similarities, similarity templates. Because of its invariance to the colors of particular components of an object, this representation en- ables detection of instances of an object class and enables alignment of those instances. Further, this model implicitly represents the re- gions of color regularity in the class-speci(cid:12)c image set enabling a decomposition of that object class into component regions.", "bibtex": "@inproceedings{NIPS2001_a1d7311f,\n author = {Stauffer, Chris and Miller, Erik and Tieu, Kinh},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Transform-invariant Image Decomposition with Similarity Templates},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/a1d7311f2a312426d710e1c617fcbc8c-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/a1d7311f2a312426d710e1c617fcbc8c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/a1d7311f2a312426d710e1c617fcbc8c-Metadata.json", "review": "", "metareview": "", "pdf_size": 425443, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8768405422874215289&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "MIT Artificial Intelligence Lab; MIT Artificial Intelligence Lab; MIT Artificial Intelligence Lab", "aff_domain": "ai.mit.edu;ai.mit.edu;ai.mit.edu", "email": "ai.mit.edu;ai.mit.edu;ai.mit.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Artificial Intelligence Lab", "aff_unique_url": "http://web.mit.edu/", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "fb63accd30", "title": "Tree-based reparameterization for approximate inference on loopy graphs", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/9185f3ec501c674c7c788464a36e7fb3-Abstract.html", "author": "Martin J. Wainwright; Tommi Jaakkola; Alan S. Willsky", "abstract": "We develop a tree-based reparameterization framework that pro(cid:173) vides a new conceptual view of a large class of iterative algorithms for computing approximate marginals in graphs with cycles. It includes belief propagation (BP), which can be reformulated as a very local form of reparameterization. More generally, we consider algorithms that perform exact computations over spanning trees of the full graph. On the practical side, we find that such tree reparameterization (TRP) algorithms have convergence properties superior to BP. The reparameterization perspective also provides a number of theoretical insights into approximate inference, in(cid:173) cluding a new characterization of fixed points; and an invariance intrinsic to TRP /BP. These two properties enable us to analyze and bound the error between the TRP /BP approximations and the actual marginals. While our results arise naturally from the TRP perspective, most of them apply in an algorithm-independent manner to any local minimum of the Bethe free energy. Our re(cid:173) sults also have natural extensions to more structured approxima(cid:173) tions [e.g. , 1, 2].", "bibtex": "@inproceedings{NIPS2001_9185f3ec,\n author = {Wainwright, Martin J and Jaakkola, Tommi and Willsky, Alan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Tree-based reparameterization for approximate inference on loopy graphs},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/9185f3ec501c674c7c788464a36e7fb3-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/9185f3ec501c674c7c788464a36e7fb3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/9185f3ec501c674c7c788464a36e7fb3-Metadata.json", "review": "", "metareview": "", "pdf_size": 1542693, "gs_citation": 115, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4459495732798035041&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Electrical Engineering and Computer Science; Department of Electrical Engineering and Computer Science; Department of Electrical Engineering and Computer Science", "aff_domain": "mit.edu;ai.mit.edu;mit.edu", "email": "mit.edu;ai.mit.edu;mit.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "7e942ca7e2", "title": "Unsupervised Learning of Human Motion Models", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/c6335734dbc0b1ded766421cfc611750-Abstract.html", "author": "Yang Song; Luis Goncalves; Pietro Perona", "abstract": "This paper presents an unsupervised learning algorithm that can derive the probabilistic dependence structure of parts of an object (a moving hu- man body in our examples) automatically from unlabeled data. The dis- tinguished part of this work is that it is based on unlabeled data, i.e., the training features include both useful foreground parts and background clutter and the correspondence between the parts and detected features are unknown. We use decomposable triangulated graphs to depict the probabilistic independence of parts, but the unsupervised technique is not limited to this type of graph. In the new approach, labeling of the data (part assignments) is taken as hidden variables and the EM algo- rithm is applied. A greedy algorithm is developed to select parts and to search for the optimal structure based on the differential entropy of these variables. The success of our algorithm is demonstrated by applying it to generate models of human motion automatically from unlabeled real image sequences.", "bibtex": "@inproceedings{NIPS2001_c6335734,\n author = {Song, Yang and Goncalves, Luis and Perona, Pietro},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Unsupervised Learning of Human Motion Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/c6335734dbc0b1ded766421cfc611750-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/c6335734dbc0b1ded766421cfc611750-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/c6335734dbc0b1ded766421cfc611750-Metadata.json", "review": "", "metareview": "", "pdf_size": 211851, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18152875247010575930&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "1ddd367d48", "title": "Using Vocabulary Knowledge in Bayesian Multinomial Estimation", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/16ba72172e6a4f1de54d11ab6967e371-Abstract.html", "author": "Thomas L. Griffiths; Joshua B. Tenenbaum", "abstract": "Estimating the parameters of sparse multinomial distributions is an important component of many statistical learning tasks. Recent approaches have used uncertainty over the vocabulary of symbols in a multinomial distribution as a means of accounting for sparsity. We present a Bayesian approach that allows weak prior knowledge, in the form of a small set of approximate candidate vocabularies, to be used to dramatically improve the resulting estimates. We demonstrate these improvements in applications to text compres(cid:173) sion and estimating distributions over words in newsgroup data.", "bibtex": "@inproceedings{NIPS2001_16ba7217,\n author = {Griffiths, Thomas and Tenenbaum, Joshua},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Using Vocabulary Knowledge in Bayesian Multinomial Estimation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/16ba72172e6a4f1de54d11ab6967e371-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/16ba72172e6a4f1de54d11ab6967e371-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/16ba72172e6a4f1de54d11ab6967e371-Metadata.json", "review": "", "metareview": "", "pdf_size": 818773, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16120644340321215110&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Psychology, Stanford University, Stanford, CA 94305; Department of Psychology, Stanford University, Stanford, CA 94305", "aff_domain": "psych.stanford.edu;psych.stanford.edu", "email": "psych.stanford.edu;psych.stanford.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Psychology", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "72f53a9037", "title": "Variance Reduction Techniques for Gradient Estimates in Reinforcement Learning", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/584b98aac2dddf59ee2cf19ca4ccb75e-Abstract.html", "author": "Evan Greensmith; Peter L. Bartlett; Jonathan Baxter", "abstract": "We consider the use of two additive control variate methods to reduce the variance of performance gradient estimates in reinforcement learn- ing problems. The \ufb01rst approach we consider is the baseline method, in which a function of the current state is added to the discounted value estimate. We relate the performance of these methods, which use sam- ple paths, to the variance of estimates based on iid data. We derive the baseline function that minimizes this variance, and we show that the vari- ance for any baseline is the sum of the optimal variance and a weighted squared distance to the optimal baseline. We show that the widely used average discounted value baseline (where the reward is replaced by the difference between the reward and its expectation) is suboptimal. The second approach we consider is the actor-critic method, which uses an approximate value function. We give bounds on the expected squared error of its estimates. We show that minimizing distance to the true value function is suboptimal in general; we provide an example for which the true value function gives an estimate with positive variance, but the op- timal value function gives an unbiased estimate with zero variance. Our bounds suggest algorithms to estimate the gradient of the performance of parameterized baseline or value functions. We present preliminary exper- iments that illustrate the performance improvements on a simple control problem.", "bibtex": "@inproceedings{NIPS2001_584b98aa,\n author = {Greensmith, Evan and Bartlett, Peter and Baxter, Jonathan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Variance Reduction Techniques for Gradient Estimates in Reinforcement Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/584b98aac2dddf59ee2cf19ca4ccb75e-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/584b98aac2dddf59ee2cf19ca4ccb75e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/584b98aac2dddf59ee2cf19ca4ccb75e-Metadata.json", "review": "", "metareview": "", "pdf_size": 101804, "gs_citation": 602, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16146102359910917743&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Australian National University; BIOwulf Technologies + Research School of Information Sciences and Engineering at the Australian National University; WhizBang! Labs, East", "aff_domain": "csl.anu.edu.au;anu.edu.au;whizbang.com", "email": "csl.anu.edu.au;anu.edu.au;whizbang.com", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1+0;2", "aff_unique_norm": "Australian National University;Biowulf Technologies;WhizBang! Labs", "aff_unique_dep": ";;", "aff_unique_url": "https://www.anu.edu.au;http://biowulf.com;", "aff_unique_abbr": "ANU;;", "aff_campus_unique_index": ";1", "aff_campus_unique": ";East", "aff_country_unique_index": "0;1+0", "aff_country_unique": "Australia;United States;" }, { "id": "4f3b289f35", "title": "Very loopy belief propagation for unwrapping phase images", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/3b92d18aa7a6176dd37d372bc2f1eb71-Abstract.html", "author": "Brendan J. Frey; Ralf Koetter; Nemanja Petrovic", "abstract": "Since the discovery that the best error-correcting decoding algo(cid:173) rithm can be viewed as belief propagation in a cycle-bound graph, researchers have been trying to determine under what circum(cid:173) stances \"loopy belief propagation\" is effective for probabilistic infer(cid:173) ence. Despite several theoretical advances in our understanding of loopy belief propagation, to our knowledge, the only problem that has been solved using loopy belief propagation is error-correcting decoding on Gaussian channels. We propose a new representation for the two-dimensional phase unwrapping problem, and we show that loopy belief propagation produces results that are superior to existing techniques. This is an important result, since many imag(cid:173) ing techniques, including magnetic resonance imaging and interfer(cid:173) ometric synthetic aperture radar, produce phase-wrapped images. Interestingly, the graph that we use has a very large number of very short cycles, supporting evidence that a large minimum cycle length is not needed for excellent results using belief propagation.", "bibtex": "@inproceedings{NIPS2001_3b92d18a,\n author = {Frey, Brendan J and Koetter, Ralf and Petrovic, Nemanja},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Very loopy belief propagation for unwrapping phase images},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/3b92d18aa7a6176dd37d372bc2f1eb71-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/3b92d18aa7a6176dd37d372bc2f1eb71-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/3b92d18aa7a6176dd37d372bc2f1eb71-Metadata.json", "review": "", "metareview": "", "pdf_size": 1389687, "gs_citation": 89, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2204085185691899998&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "cc306bb7a8", "title": "Why Neuronal Dynamics Should Control Synaptic Learning Rules", "site": "https://papers.nips.cc/paper_files/paper/2001/hash/277a78fc05c8864a170e9a56ceeabc4c-Abstract.html", "author": "Jesper Tegn\u00e9r; \u00c1d\u00e1m Kepecs", "abstract": "Hebbian learning rules are generally formulated as static rules. Un(cid:173) der changing condition (e.g. neuromodulation, input statistics) most rules are sensitive to parameters. In particular, recent work has focused on two different formulations of spike-timing-dependent plasticity rules. Additive STDP [1] is remarkably versatile but also very fragile, whereas multiplicative STDP [2, 3] is more ro(cid:173) bust but lacks attractive features such as synaptic competition and rate stabilization. Here we address the problem of robustness in the additive STDP rule. We derive an adaptive control scheme, where the learning function is under fast dynamic control by post(cid:173) synaptic activity to stabilize learning under a variety of conditions. Such a control scheme can be implemented using known biophysical mechanisms of synapses. We show that this adaptive rule makes the addit ive STDP more robust. Finally, we give an example how meta plasticity of the adaptive rule can be used to guide STDP into different type of learning regimes.", "bibtex": "@inproceedings{NIPS2001_277a78fc,\n author = {Tegn\\'{e}r, Jesper and Kepecs, \\'{A}d\\'{a}m},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {T. Dietterich and S. Becker and Z. Ghahramani},\n pages = {},\n publisher = {MIT Press},\n title = {Why Neuronal Dynamics Should Control Synaptic Learning Rules},\n url = {https://proceedings.neurips.cc/paper_files/paper/2001/file/277a78fc05c8864a170e9a56ceeabc4c-Paper.pdf},\n volume = {14},\n year = {2001}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2001/file/277a78fc05c8864a170e9a56ceeabc4c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2001/file/277a78fc05c8864a170e9a56ceeabc4c-Metadata.json", "review": "", "metareview": "", "pdf_size": 1571725, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=682608007111068088&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Stockholm Bioinformatics Center, Dept. of Numerical Analysis & Computing Science, Royal Institute for Technology, S-10044 Stockholm, Sweden; Volen Center for Complex Systems, Brandeis University, Waltham, MA 02454", "aff_domain": "nada.kth.se;brandeis.edu", "email": "nada.kth.se;brandeis.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Royal Institute for Technology;Brandeis University", "aff_unique_dep": "Dept. of Numerical Analysis & Computing Science;Volen Center for Complex Systems", "aff_unique_url": "https://www.kth.se;https://www.brandeis.edu", "aff_unique_abbr": "KTH;Brandeis", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Stockholm;Waltham", "aff_country_unique_index": "0;1", "aff_country_unique": "Sweden;United States" } ]