[ { "id": "5a44e759d2", "title": "1-norm Support Vector Machines", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/49d4b2faeb4b7b9e745775793141e2b2-Abstract.html", "author": "Ji Zhu; Saharon Rosset; Robert Tibshirani; Trevor J. Hastie", "abstract": "The standard 2-norm SVM is known for its good performance in two- In this paper, we consider the 1-norm SVM. We class classi\u00a3cation. argue that the 1-norm SVM may have some advantage over the standard 2-norm SVM, especially when there are redundant noise features. We also propose an ef\u00a3cient algorithm that computes the whole solution path of the 1-norm SVM, hence facilitates adaptive selection of the tuning parameter for the 1-norm SVM.", "bibtex": "@inproceedings{NIPS2003_49d4b2fa,\n author = {Zhu, Ji and Rosset, Saharon and Tibshirani, Robert and Hastie, Trevor},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {1-norm Support Vector Machines},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/49d4b2faeb4b7b9e745775793141e2b2-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/49d4b2faeb4b7b9e745775793141e2b2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/49d4b2faeb4b7b9e745775793141e2b2-Metadata.json", "review": "", "metareview": "", "pdf_size": 107008, "gs_citation": 1293, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1117203360703180446&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Statistics, Stanford University; Department of Statistics, Stanford University; Department of Statistics, Stanford University; Department of Statistics, Stanford University", "aff_domain": "stat.stanford.edu;stat.stanford.edu;stat.stanford.edu;stat.stanford.edu", "email": "stat.stanford.edu;stat.stanford.edu;stat.stanford.edu;stat.stanford.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Statistics", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "61066fe587", "title": "A Biologically Plausible Algorithm for Reinforcement-shaped Representational Learning", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/186fb23a33995d91ce3c2212189178c8-Abstract.html", "author": "Maneesh Sahani", "abstract": "Signi\ufb01cant plasticity in sensory cortical representations can be driven in mature animals either by behavioural tasks that pair sensory stimuli with reinforcement, or by electrophysiological experiments that pair sensory input with direct stimulation of neuromodulatory nuclei, but usually not by sensory stimuli presented alone. Biologically motivated theories of representational learning, however, have tended to focus on unsupervised mechanisms, which may play a signi\ufb01cant role on evolutionary or devel- opmental timescales, but which neglect this essential role of reinforce- ment in adult plasticity. By contrast, theoretical reinforcement learning has generally dealt with the acquisition of optimal policies for action in an uncertain world, rather than with the concurrent shaping of sensory representations. This paper develops a framework for representational learning which builds on the relative success of unsupervised generative- modelling accounts of cortical encodings to incorporate the effects of reinforcement in a biologically plausible way.", "bibtex": "@inproceedings{NIPS2003_186fb23a,\n author = {Sahani, Maneesh},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {A Biologically Plausible Algorithm for Reinforcement-shaped Representational Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/186fb23a33995d91ce3c2212189178c8-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/186fb23a33995d91ce3c2212189178c8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/186fb23a33995d91ce3c2212189178c8-Metadata.json", "review": "", "metareview": "", "pdf_size": 105860, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12775318012550598538&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "W.M. Keck Foundation Center for Integrative Neuroscience, University of California, San Francisco, CA 94143-0732", "aff_domain": "phy.ucsf.edu", "email": "phy.ucsf.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of California, San Francisco", "aff_unique_dep": "W.M. Keck Foundation Center for Integrative Neuroscience", "aff_unique_url": "https://www.ucsf.edu", "aff_unique_abbr": "UCSF", "aff_campus_unique_index": "0", "aff_campus_unique": "San Francisco", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "df3394b3f3", "title": "A Classification-based Cocktail-party Processor", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/cb2c2041d9763d84d7d655e81178f444-Abstract.html", "author": "Nicoleta Roman; Deliang Wang; Guy J. Brown", "abstract": "Guy J. Brown", "bibtex": "@inproceedings{NIPS2003_cb2c2041,\n author = {Roman, Nicoleta and Wang, Deliang and Brown, Guy},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {A Classification-based Cocktail-party Processor},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/cb2c2041d9763d84d7d655e81178f444-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/cb2c2041d9763d84d7d655e81178f444-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/cb2c2041d9763d84d7d655e81178f444-Metadata.json", "review": "", "metareview": "", "pdf_size": 1246926, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10696039156738739443&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "5650a14338", "title": "A Fast Multi-Resolution Method for Detection of Significant Spatial Disease Clusters", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/82ca5dd156cc926b2992f73c2896f761-Abstract.html", "author": "Daniel B. Neill; Andrew W. Moore", "abstract": "Given an N(cid:2)N grid of squares, where each square has a count and an un- derlying population, our goal is to \ufb01nd the square region with the highest density, and to calculate its signi\ufb01cance by randomization. Any density measure D, dependent on the total count and total population of a re- gion, can be used. For example, if each count represents the number of disease cases occurring in that square, we can use Kulldorff\u2019s spatial scan statistic DK to \ufb01nd the most signi\ufb01cant spatial disease cluster. A naive approach to \ufb01nding the maximum density region requires O(N 3) time, and is generally computationally infeasible. We present a novel algorithm which partitions the grid into overlapping regions, bounds the maximum score of subregions contained in each region, and prunes re- gions which cannot contain the maximum density region. For suf\ufb01ciently dense regions, this method \ufb01nds the maximum density region in optimal O(N2) time, in practice resulting in signi\ufb01cant (10-200x) speedups.", "bibtex": "@inproceedings{NIPS2003_82ca5dd1,\n author = {Neill, Daniel and Moore, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {A Fast Multi-Resolution Method for Detection of Significant Spatial Disease Clusters},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/82ca5dd156cc926b2992f73c2896f761-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/82ca5dd156cc926b2992f73c2896f761-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/82ca5dd156cc926b2992f73c2896f761-Metadata.json", "review": "", "metareview": "", "pdf_size": 111101, "gs_citation": 89, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1534805890118691839&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, Carnegie Mellon University; Department of Computer Science, Carnegie Mellon University", "aff_domain": "cs.cmu.edu;cs.cmu.edu", "email": "cs.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "5f216dbb0d", "title": "A Functional Architecture for Motion Pattern Processing in MSTd", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/fec87a37cdeec1c6ecf8181c0aa2d3bf-Abstract.html", "author": "Scott A. Beardsley; Lucia M. Vaina", "abstract": "Lucia M. Vaina", "bibtex": "@inproceedings{NIPS2003_fec87a37,\n author = {Beardsley, Scott and Vaina, Lucia},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {A Functional Architecture for Motion Pattern Processing in MSTd},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/fec87a37cdeec1c6ecf8181c0aa2d3bf-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/fec87a37cdeec1c6ecf8181c0aa2d3bf-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/fec87a37cdeec1c6ecf8181c0aa2d3bf-Metadata.json", "review": "", "metareview": "", "pdf_size": 312400, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7107507478648863837&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 9, "aff": "Dept. of Biomedical Engineering, Boston University; Dept. of Biomedical Engineering, Boston University", "aff_domain": "bu.edu;bu.edu", "email": "bu.edu;bu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Boston University", "aff_unique_dep": "Dept. of Biomedical Engineering", "aff_unique_url": "https://www.bu.edu", "aff_unique_abbr": "BU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "5718276a1f", "title": "A Holistic Approach to Compositional Semantics: a connectionist model and robot experiments", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/3baa271bc35fe054c86928f7016e8ae6-Abstract.html", "author": "Yuuya Sugita; Jun Tani", "abstract": "We present a novel connectionist model for acquiring the semantics of a simple language through the behavioral experiences of a real robot. We focus on the \u201ccompositionality\u201d of semantics, a fundamental character- istic of human language, which is the ability to understand the meaning of a sentence as a combination of the meanings of words. We also pay much attention to the \u201cembodiment\u201d of a robot, which means that the robot should acquire semantics which matches its body, or sensory-motor system. The essential claim is that an embodied compositional semantic representation can be self-organized from generalized correspondences between sentences and behavioral patterns. This claim is examined and con\ufb01rmed through simple experiments in which a robot generates corre- sponding behaviors from unlearned sentences by analogy with the corre- spondences between learned sentences and behaviors.", "bibtex": "@inproceedings{NIPS2003_3baa271b,\n author = {Sugita, Yuuya and Tani, Jun},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {A Holistic Approach to Compositional Semantics: a connectionist model and robot experiments},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/3baa271bc35fe054c86928f7016e8ae6-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/3baa271bc35fe054c86928f7016e8ae6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/3baa271bc35fe054c86928f7016e8ae6-Metadata.json", "review": "", "metareview": "", "pdf_size": 74503, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8741217131559896332&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "BSI, RIKEN; BSI, RIKEN", "aff_domain": "bdc.brain.riken.go.jp;bdc.brain.riken.go.jp", "email": "bdc.brain.riken.go.jp;bdc.brain.riken.go.jp", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "RIKEN", "aff_unique_dep": "BSI", "aff_unique_url": "https://www.riken.jp", "aff_unique_abbr": "RIKEN", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Japan" }, { "id": "436c551c9b", "title": "A Kullback-Leibler Divergence Based Kernel for SVM Classification in Multimedia Applications", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/0abdc563a06105aee3c6136871c9f4d1-Abstract.html", "author": "Pedro J. Moreno; Purdy P. Ho; Nuno Vasconcelos", "abstract": "Over the last years signi\ufb01cant efforts have been made to develop kernels that can be applied to sequence data such as DNA, text, speech, video and images. The Fisher Kernel and similar variants have been suggested as good ways to combine an underlying generative model in the feature space and discriminant classi\ufb01ers such as SVM\u2019s. In this paper we sug- gest an alternative procedure to the Fisher kernel for systematically \ufb01nd- ing kernel functions that naturally handle variable length sequence data in multimedia domains. In particular for domains such as speech and images we explore the use of kernel functions that take full advantage of well known probabilistic models such as Gaussian Mixtures and sin- gle full covariance Gaussian models. We derive a kernel distance based on the Kullback-Leibler (KL) divergence between generative models. In effect our approach combines the best of both generative and discrim- inative methods and replaces the standard SVM kernels. We perform experiments on speaker identi\ufb01cation/veri\ufb01cation and image classi\ufb01ca- tion tasks and show that these new kernels have the best performance in speaker veri\ufb01cation and mostly outperform the Fisher kernel based SVM\u2019s and the generative classi\ufb01ers in speaker identi\ufb01cation and image classi\ufb01cation.", "bibtex": "@inproceedings{NIPS2003_0abdc563,\n author = {Moreno, Pedro and Ho, Purdy and Vasconcelos, Nuno},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {A Kullback-Leibler Divergence Based Kernel for SVM Classification in Multimedia Applications},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/0abdc563a06105aee3c6136871c9f4d1-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/0abdc563a06105aee3c6136871c9f4d1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/0abdc563a06105aee3c6136871c9f4d1-Metadata.json", "review": "", "metareview": "", "pdf_size": 101574, "gs_citation": 593, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7599225152025850277&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 22, "aff": "Hewlett-Packard Cambridge Research Laboratory; Hewlett-Packard Cambridge Research Laboratory; UCSD ECE Department", "aff_domain": "hp.com;hp.com;ece.ucsd.edu", "email": "hp.com;hp.com;ece.ucsd.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Hewlett-Packard;University of California, San Diego", "aff_unique_dep": "Research Laboratory;Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.hpe.com;https://www.ucsd.edu", "aff_unique_abbr": "HP;UCSD", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Cambridge;La Jolla", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "5d0ad506e1", "title": "A Low-Power Analog VLSI Visual Collision Detector", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/46515dcd99ea50dd0671bc6840830404-Abstract.html", "author": "Reid R. Harrison", "abstract": "We have designed and tested a single-chip analog VLSI sensor that detects imminent collisions by measuring radially expansive optic flow. The design of the chip is based on a model proposed to explain leg-extension behavior in flies during landing approaches. A new elementary motion detector (EMD) circuit was developed to measure optic flow. This EMD circuit models the bandpass nature of large monopolar cells (LMCs) immediately postsynaptic to photoreceptors in the fly visual system. A 16 \u00d7 16 array of 2-D motion detectors was fabricated on a 2.24 mm \u00d7 2.24 mm die in a standard 0.5-\u00b5m CMOS process. The chip consumes 140 \u00b5W of power from a 5 V supply. With the addition of wide-angle optics, the sensor is able to detect collisions around 500 ms before impact in complex, real-world scenes.", "bibtex": "@inproceedings{NIPS2003_46515dcd,\n author = {Harrison, Reid},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {A Low-Power Analog VLSI Visual Collision Detector},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/46515dcd99ea50dd0671bc6840830404-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/46515dcd99ea50dd0671bc6840830404-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/46515dcd99ea50dd0671bc6840830404-Metadata.json", "review": "", "metareview": "", "pdf_size": 186228, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1315322135916581412&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "", "aff_domain": "", "email": "", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster" }, { "id": "68945823de", "title": "A Mixed-Signal VLSI for Real-Time Generation of Edge-Based Image Vectors", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/b3f61131b6eceeb2b14835fa648a48ff-Abstract.html", "author": "Masakazu Yagi; Hideo Yamasaki; Tadashi Shibata", "abstract": "A mixed-signal image filtering VLSI has been developed aiming at real-time generation of edge-based image vectors for robust image recognition. A four-stage asynchronous median detection architec- ture based on analog digital mixed-signal circuits has been intro- duced to determine the threshold value of edge detection, the key processing parameter in vector generation. As a result, a fully seamless pipeline processing from threshold detection to edge fea- ture map generation has been established. A prototype chip was designed in a 0.35-\u00b5m double-polysilicon three-metal-layer CMOS technology and the concept was verified by the fabricated chip. The chip generates a 64-dimension feature vector from a 64x64-pixel gray scale image every 80\u00b5sec. This is about 104 times faster than the software computation, making a real-time image recognition system feasible.", "bibtex": "@inproceedings{NIPS2003_b3f61131,\n author = {Yagi, Masakazu and Yamasaki, Hideo and Shibata, Tadashi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {A Mixed-Signal VLSI for Real-Time Generation of Edge-Based Image Vectors},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/b3f61131b6eceeb2b14835fa648a48ff-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/b3f61131b6eceeb2b14835fa648a48ff-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/b3f61131b6eceeb2b14835fa648a48ff-Metadata.json", "review": "", "metareview": "", "pdf_size": 298385, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10298455979419338365&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Electronic Engineering; Department of Electronic Engineering; Department of Frontier Informatics + Department of Electronic Engineering", "aff_domain": "dent.osaka-u.ac.jp;if.t.u-tokyo.ac.jp;ee.t.u-tokyo.ac.jp", "email": "dent.osaka-u.ac.jp;if.t.u-tokyo.ac.jp;ee.t.u-tokyo.ac.jp", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1+0", "aff_unique_norm": "Institution Name Not Provided;Department of Frontier Informatics", "aff_unique_dep": "Department of Electronic Engineering;Frontier Informatics", "aff_unique_url": ";", "aff_unique_abbr": ";", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "", "aff_country_unique": "" }, { "id": "3d2a897a6f", "title": "A Model for Learning the Semantics of Pictures", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/0bf727e907c5fc9d5356f11e4c45d613-Abstract.html", "author": "Victor Lavrenko; R. Manmatha; Jiwoon Jeon", "abstract": "We propose an approach to learning the semantics of images which al- lows us to automatically annotate an image with keywords and to retrieve images based on text queries. We do this using a formalism that models the generation of annotated images. We assume that every image is di- vided into regions, each described by a continuous-valued feature vector. Given a training set of images with annotations, we compute a joint prob- abilistic model of image features and words which allow us to predict the probability of generating a word given the image regions. This may be used to automatically annotate and retrieve images given a word as a query. Experiments show that our model signi\ufb01cantly outperforms the best of the previously reported results on the tasks of automatic image annotation and retrieval.", "bibtex": "@inproceedings{NIPS2003_0bf727e9,\n author = {Lavrenko, Victor and Manmatha, R. and Jeon, Jiwoon},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {A Model for Learning the Semantics of Pictures},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/0bf727e907c5fc9d5356f11e4c45d613-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/0bf727e907c5fc9d5356f11e4c45d613-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/0bf727e907c5fc9d5356f11e4c45d613-Metadata.json", "review": "", "metareview": "", "pdf_size": 110122, "gs_citation": 977, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=748085819773672814&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Center for Intelligent Information Retrieval, Computer Science Department, University of Massachusetts Amherst; Center for Intelligent Information Retrieval, Computer Science Department, University of Massachusetts Amherst; Center for Intelligent Information Retrieval, Computer Science Department, University of Massachusetts Amherst", "aff_domain": "cs.umass.edu;cs.umass.edu;cs.umass.edu", "email": "cs.umass.edu;cs.umass.edu;cs.umass.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Massachusetts Amherst", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.umass.edu", "aff_unique_abbr": "UMass Amherst", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Amherst", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "84e6da0f2c", "title": "A Neuromorphic Multi-chip Model of a Disparity Selective Complex Cell", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/2f4fe03d77724a7217006e5d16728874-Abstract.html", "author": "Bertram E. Shi; Eric K. Tsang", "abstract": "The relative depth of objects causes small shifts in the left and right ret- inal positions of these objects, called binocular disparity. Here, we describe a neuromorphic implementation of a disparity selective com- plex cell using the binocular energy model, which has been proposed to model the response of disparity selective cells in the visual cortex. Our system consists of two silicon chips containing spiking neurons with monocular Gabor-type spatial receptive fields (RF) and circuits that combine the spike outputs to compute a disparity selective complex cell response. The disparity selectivity of the cell can be adjusted by both position and phase shifts between the monocular RF profiles, which are both used in biology. Our neuromorphic system performs better with phase encoding, because the relative responses of neurons tuned to dif- ferent disparities by phase shifts are better matched than the responses of neurons tuned by position shifts.", "bibtex": "@inproceedings{NIPS2003_2f4fe03d,\n author = {Shi, Bertram and Tsang, Eric},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {A Neuromorphic Multi-chip Model of a Disparity Selective Complex Cell},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/2f4fe03d77724a7217006e5d16728874-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/2f4fe03d77724a7217006e5d16728874-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/2f4fe03d77724a7217006e5d16728874-Metadata.json", "review": "", "metareview": "", "pdf_size": 85330, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12125635626251266111&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Dept. of Electrical and Electronic Engineering, Hong Kong University of Science and Technology, Kowloon, HONG KONG SAR; Dept. of Electrical and Electronic Engineering, Hong Kong University of Science and Technology, Kowloon, HONG KONG SAR", "aff_domain": "ust.hk;ust.hk", "email": "ust.hk;ust.hk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Hong Kong University of Science and Technology", "aff_unique_dep": "Dept. of Electrical and Electronic Engineering", "aff_unique_url": "https://www.ust.hk", "aff_unique_abbr": "HKUST", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Hong Kong SAR", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "id": "b2066f0f4a", "title": "A Nonlinear Predictive State Representation", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/72e6d3238361fe70f22fb0ac624a7072-Abstract.html", "author": "Matthew R. Rudary; Satinder P. Singh", "abstract": "Predictive state representations (PSRs) use predictions of a set of tests to represent the state of controlled dynamical systems. One reason why this representation is exciting as an alternative to partially observable Markov decision processes (POMDPs) is that PSR models of dynamical systems may be much more compact than POMDP models. Empirical work on PSRs to date has focused on linear PSRs, which have not allowed for compression relative to POMDPs. We introduce a new notion of tests which allows us to de\ufb01ne a new type of PSR that is nonlinear in general and allows for exponential compression in some deterministic dynami- cal systems. These new tests, called e-tests, are related to the tests used by Rivest and Schapire [1] in their work with the diversity representation, but our PSR avoids some of the pitfalls of their representation\u2014in partic- ular, its potential to be exponentially larger than the equivalent POMDP.", "bibtex": "@inproceedings{NIPS2003_72e6d323,\n author = {Rudary, Matthew and Singh, Satinder},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {A Nonlinear Predictive State Representation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/72e6d3238361fe70f22fb0ac624a7072-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/72e6d3238361fe70f22fb0ac624a7072-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/72e6d3238361fe70f22fb0ac624a7072-Metadata.json", "review": "", "metareview": "", "pdf_size": 104944, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12762464370547083861&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Computer Science and Engineering, University of Michigan; Computer Science and Engineering, University of Michigan", "aff_domain": "umich.edu;umich.edu", "email": "umich.edu;umich.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Michigan", "aff_unique_dep": "Computer Science and Engineering", "aff_unique_url": "https://www.umich.edu", "aff_unique_abbr": "UM", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Ann Arbor", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "25549dbe95", "title": "A Probabilistic Model of Auditory Space Representation in the Barn Owl", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/959ef477884b6ac2241b19ee4fb776ae-Abstract.html", "author": "Brian J. Fischer; Charles H. Anderson", "abstract": "The barn owl is a nocturnal hunter, capable of capturing prey using au- ditory information alone [1]. The neural basis for this localization be- havior is the existence of auditory neurons with spatial receptive \ufb01elds [2]. We provide a mathematical description of the operations performed on auditory input signals by the barn owl that facilitate the creation of a representation of auditory space. To develop our model, we \ufb01rst formu- late the sound localization problem solved by the barn owl as a statistical estimation problem. The implementation of the solution is constrained by the known neurobiology.", "bibtex": "@inproceedings{NIPS2003_959ef477,\n author = {Fischer, Brian and Anderson, Charles},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {A Probabilistic Model of Auditory Space Representation in the Barn Owl},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/959ef477884b6ac2241b19ee4fb776ae-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/959ef477884b6ac2241b19ee4fb776ae-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/959ef477884b6ac2241b19ee4fb776ae-Metadata.json", "review": "", "metareview": "", "pdf_size": 202711, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3004903101073067712&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Dept. of Electrical and Systems Eng., Washington University in St. Louis, St. Louis, MO 63110; Department of Anatomy and Neurbiology, Washington University in St. Louis, St. Louis, MO 63110", "aff_domain": "pcg.wustl.edu;pcg.wustl.edu", "email": "pcg.wustl.edu;pcg.wustl.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Washington University in St. Louis", "aff_unique_dep": "Dept. of Electrical and Systems Eng.", "aff_unique_url": "https://wustl.edu", "aff_unique_abbr": "WUSTL", "aff_campus_unique_index": "0;0", "aff_campus_unique": "St. Louis", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "74c8dcbc95", "title": "A Recurrent Model of Orientation Maps with Simple and Complex Cells", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/73f490f3f868edbcd80b5d3f7cedc403-Abstract.html", "author": "Paul Merolla; Kwabena A. Boahen", "abstract": "that utilizes", "bibtex": "@inproceedings{NIPS2003_73f490f3,\n author = {Merolla, Paul and Boahen, Kwabena A},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {A Recurrent Model of Orientation Maps with Simple and Complex Cells},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/73f490f3f868edbcd80b5d3f7cedc403-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/73f490f3f868edbcd80b5d3f7cedc403-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/73f490f3f868edbcd80b5d3f7cedc403-Metadata.json", "review": "", "metareview": "", "pdf_size": 654855, "gs_citation": 78, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10653282080468444990&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Department of Bioengineering, University of Pennsylvania; Department of Bioengineering, University of Pennsylvania", "aff_domain": "seas.upenn.edu;seas.upenn.edu", "email": "seas.upenn.edu;seas.upenn.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Pennsylvania", "aff_unique_dep": "Department of Bioengineering", "aff_unique_url": "https://www.upenn.edu", "aff_unique_abbr": "UPenn", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "6e393ae8e4", "title": "A Sampled Texture Prior for Image Super-Resolution", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/7b66b4fd401a271a1c7224027ce111bc-Abstract.html", "author": "Lyndsey C. Pickup; Stephen J. Roberts; Andrew Zisserman", "abstract": "Super-resolution aims to produce a high-resolution image from a set of one or more low-resolution images by recovering or inventing plausible high-frequency image content. Typical approaches try to reconstruct a high-resolution image using the sub-pixel displacements of several low- resolution images, usually regularized by a generic smoothness prior over the high-resolution image space. Other methods use training data to learn low-to-high-resolution matches, and have been highly successful even in the single-input-image case. Here we present a domain-speci\ufb01c im- age prior in the form of a p.d.f. based upon sampled images, and show that for certain types of super-resolution problems, this sample-based prior gives a signi\ufb01cant improvement over other common multiple-image super-resolution techniques.", "bibtex": "@inproceedings{NIPS2003_7b66b4fd,\n author = {Pickup, Lyndsey and Roberts, Stephen J and Zisserman, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {A Sampled Texture Prior for Image Super-Resolution},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/7b66b4fd401a271a1c7224027ce111bc-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/7b66b4fd401a271a1c7224027ce111bc-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/7b66b4fd401a271a1c7224027ce111bc-Metadata.json", "review": "", "metareview": "", "pdf_size": 231961, "gs_citation": 120, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9418748548102091129&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Robotics Research Group, Department of Engineering Science, University of Oxford; Robotics Research Group, Department of Engineering Science, University of Oxford; Robotics Research Group, Department of Engineering Science, University of Oxford", "aff_domain": "robots.ox.ac.uk;robots.ox.ac.uk;robots.ox.ac.uk", "email": "robots.ox.ac.uk;robots.ox.ac.uk;robots.ox.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Oxford", "aff_unique_dep": "Department of Engineering Science", "aff_unique_url": "https://www.ox.ac.uk", "aff_unique_abbr": "Oxford", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "id": "51a715e318", "title": "A Summating, Exponentially-Decaying CMOS Synapse for Spiking Neural Systems", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/e3ca0449fa2ea7701a7ac53fb719c51a-Abstract.html", "author": "Rock Z. Shi; Timothy K. Horiuchi", "abstract": "Synapses are a critical element of biologically-realistic, spike-based neu- ral computation, serving the role of communication, computation, and modi\ufb01cation. Many different circuit implementations of synapse func- tion exist with different computational goals in mind. In this paper we describe a new CMOS synapse design that separately controls quiescent leak current, synaptic gain, and time-constant of decay. This circuit im- plements part of a commonly-used kinetic model of synaptic conduc- tance. We show a theoretical analysis and experimental data for proto- types fabricated in a commercially-available 1.5\u00b5m CMOS process.", "bibtex": "@inproceedings{NIPS2003_e3ca0449,\n author = {Shi, Z. and Horiuchi, Timothy},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {A Summating, Exponentially-Decaying CMOS Synapse for Spiking Neural Systems},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/e3ca0449fa2ea7701a7ac53fb719c51a-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/e3ca0449fa2ea7701a7ac53fb719c51a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/e3ca0449fa2ea7701a7ac53fb719c51a-Metadata.json", "review": "", "metareview": "", "pdf_size": 288672, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16461346841444505379&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Electrical and Computer Engineering Department + Institute for Systems Research + Neuroscience and Cognitive Science Program; Electrical and Computer Engineering Department + Institute for Systems Research + Neuroscience and Cognitive Science Program", "aff_domain": "glue.umd.edu;isr.umd.edu", "email": "glue.umd.edu;isr.umd.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0+1+1;0+1+1", "aff_unique_norm": "Electrical and Computer Engineering Department;University of Maryland, College Park", "aff_unique_dep": "Electrical and Computer Engineering;Institute for Systems Research", "aff_unique_url": ";https://www.isr.umd.edu", "aff_unique_abbr": ";ISR", "aff_campus_unique_index": "1+1;1+1", "aff_campus_unique": ";College Park", "aff_country_unique_index": "1+1;1+1", "aff_country_unique": ";United States" }, { "id": "6c390bd872", "title": "ARA*: Anytime A* with Provable Bounds on Sub-Optimality", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/ee8fe9093fbbb687bef15a38facc44d2-Abstract.html", "author": "Maxim Likhachev; Geoffrey J. Gordon; Sebastian Thrun", "abstract": "In real world planning problems, time for deliberation is often limited. Anytime planners are well suited for these problems: they \ufb01nd a feasi- ble solution quickly and then continually work on improving it until time runs out. In this paper we propose an anytime heuristic search, ARA", "bibtex": "@inproceedings{NIPS2003_ee8fe909,\n author = {Likhachev, Maxim and Gordon, Geoffrey J and Thrun, Sebastian},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {ARA\\ast : Anytime A\\ast with Provable Bounds on Sub-Optimality},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/ee8fe9093fbbb687bef15a38facc44d2-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/ee8fe9093fbbb687bef15a38facc44d2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/ee8fe9093fbbb687bef15a38facc44d2-Metadata.json", "review": "", "metareview": "", "pdf_size": 198420, "gs_citation": 1099, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10629907110119427375&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 20, "aff": "School of Computer Science, Carnegie Mellon University; School of Computer Science, Carnegie Mellon University; School of Computer Science, Carnegie Mellon University", "aff_domain": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "email": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "School of Computer Science", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "a8750b22e1", "title": "AUC Optimization vs. Error Rate Minimization", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/6ef80bb237adf4b6f77d0700e1255907-Abstract.html", "author": "Corinna Cortes; Mehryar Mohri", "abstract": "The area under an ROC curve (AUC) is a criterion used in many appli- cations to measure the quality of a classi\ufb01cation algorithm. However, the objective function optimized in most of these algorithms is the error rate and not the AUC value. We give a detailed statistical analysis of the relationship between the AUC and the error rate, including the \ufb01rst exact expression of the expected value and the variance of the AUC for a \ufb01xed error rate. Our results show that the average AUC is monotonically in- creasing as a function of the classi\ufb01cation accuracy, but that the standard deviation for uneven distributions and higher error rates is noticeable. Thus, algorithms designed to minimize the error rate may not lead to the best possible AUC values. We show that, under certain conditions, the global function optimized by the RankBoost algorithm is exactly the AUC. We report the results of our experiments with RankBoost in several datasets demonstrating the bene\ufb01ts of an algorithm speci\ufb01cally designed to globally optimize the AUC over other existing algorithms optimizing an approximation of the AUC or only locally optimizing the AUC.", "bibtex": "@inproceedings{NIPS2003_6ef80bb2,\n author = {Cortes, Corinna and Mohri, Mehryar},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {AUC Optimization vs. Error Rate Minimization},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/6ef80bb237adf4b6f77d0700e1255907-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/6ef80bb237adf4b6f77d0700e1255907-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/6ef80bb237adf4b6f77d0700e1255907-Metadata.json", "review": "", "metareview": "", "pdf_size": 92423, "gs_citation": 858, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10491239852521352274&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "47d1fe9ad2", "title": "Algorithms for Interdependent Security Games", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/33bb83720ba9d2b6da87114380314af5-Abstract.html", "author": "Michael Kearns; Luis E. Ortiz", "abstract": "nspired by events ranging from 9/11 to the collapse of the accounting \ufb01rm Arthur Ander-\nsen, economists Kunreuther and Heal [5] recently introduced an interesting game-theoretic\nmodel for problems of interdependent security (IDS), in which a large number of players\nmust make individual investment decisions related to security \u2014 whether physical, \ufb01nan-\ncial, medical, or some other type \u2014 but in which the ultimate safety of each participant\nmay depend in a complex way on the actions of the entire population. A simple example is\nthe choice of whether to install a \ufb01re sprinkler system in an individual condominium in a\nlarge building. While such a system might greatly reduce the chances of the owner\u2019s prop-\nerty being destroyed by a \ufb01re originating within their own unit, it might do little or nothing\nto reduce the chances of damage caused by \ufb01res originating in other units (since sprinklers\ncan usually only douse small \ufb01res early). If \u201cenough\u201d other unit owners have not made the\ninvestment in sprinklers, it may be not cost-effective for any individual to do so.", "bibtex": "@inproceedings{NIPS2003_33bb8372,\n author = {Kearns, Michael and Ortiz, Luis E},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Algorithms for Interdependent Security Games},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/33bb83720ba9d2b6da87114380314af5-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/33bb83720ba9d2b6da87114380314af5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/33bb83720ba9d2b6da87114380314af5-Metadata.json", "review": "", "metareview": "", "pdf_size": 179916, "gs_citation": 106, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6365671560316317189&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Department of Computer and Information Science; Department of Computer and Information Science", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "", "aff_unique_norm": "", "aff_unique_dep": "", "aff_unique_url": "", "aff_unique_abbr": "", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "", "aff_country_unique": "" }, { "id": "98ccd97961", "title": "All learning is Local: Multi-agent Learning in Global Reward Games", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/c8067ad1937f728f51288b3eb986afaa-Abstract.html", "author": "Yu-han Chang; Tracey Ho; Leslie P. Kaelbling", "abstract": "In large multiagent games, partial observability, coordination, and credit assignment persistently plague attempts to design good learning algo- rithms. We provide a simple and ef\ufb01cient algorithm that in part uses a linear system to model the world from a single agent\u2019s limited per- spective, and takes advantage of Kalman \ufb01ltering to allow an agent to construct a good training signal and learn an effective policy.", "bibtex": "@inproceedings{NIPS2003_c8067ad1,\n author = {Chang, Yu-han and Ho, Tracey and Kaelbling, Leslie},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {All learning is Local: Multi-agent Learning in Global Reward Games},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/c8067ad1937f728f51288b3eb986afaa-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/c8067ad1937f728f51288b3eb986afaa-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/c8067ad1937f728f51288b3eb986afaa-Metadata.json", "review": "", "metareview": "", "pdf_size": 989029, "gs_citation": 240, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14712211212649321566&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "MIT CSAIL; LIDS, MIT; MIT CSAIL", "aff_domain": "csail.mit.edu;mit.edu;csail.mit.edu", "email": "csail.mit.edu;mit.edu;csail.mit.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory", "aff_unique_url": "https://www.csail.mit.edu", "aff_unique_abbr": "MIT CSAIL", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "2ed19af1f9", "title": "Ambiguous Model Learning Made Unambiguous with 1/f Priors", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/8bdb5058376143fa358981954e7626b8-Abstract.html", "author": "Gurinder S. Atwal; William Bialek", "abstract": "What happens to the optimal interpretation of noisy data when there exists more than one equally plausible interpretation of the data? In a Bayesian model-learning framework the answer depends on the prior ex- pectations of the dynamics of the model parameter that is to be inferred from the data. Local time constraints on the priors are insuf\ufb01cient to pick one interpretation over another. On the other hand, nonlocal time constraints, induced by a 1/f noise spectrum of the priors, is shown to permit learning of a speci\ufb01c model parameter even when there are in- \ufb01nitely many equally plausible interpretations of the data. This transition is inferred by a remarkable mapping of the model estimation problem to a dissipative physical system, allowing the use of powerful statisti- cal mechanical methods to uncover the transition from indeterminate to determinate model learning.", "bibtex": "@inproceedings{NIPS2003_8bdb5058,\n author = {Atwal, Gurinder and Bialek, William},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Ambiguous Model Learning Made Unambiguous with 1/f Priors},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/8bdb5058376143fa358981954e7626b8-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/8bdb5058376143fa358981954e7626b8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/8bdb5058376143fa358981954e7626b8-Metadata.json", "review": "", "metareview": "", "pdf_size": 66666, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4501492096182951919&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "d5491ecef1", "title": "An Autonomous Robotic System for Mapping Abandoned Mines", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/54b2b21af94108d83c2a909d5b0a6a50-Abstract.html", "author": "David Ferguson; Aaron Morris; Dirk H\u00e4hnel; Christopher Baker; Zachary Omohundro; Carlos Reverte; Scott Thayer; Charles Whittaker; William Whittaker; Wolfram Burgard; Sebastian Thrun", "abstract": "We present the software architecture of a robotic system for mapping abandoned mines. The software is capable of acquiring consistent 2D maps of large mines with many cycles, represented as Markov random \u00a3elds. 3D C-space maps are acquired from local 3D range scans, which are used to identify navigable paths using A* search. Our system has been deployed in three abandoned mines, two of which inaccessible to people, where it has acquired maps of unprecedented detail and accuracy.", "bibtex": "@inproceedings{NIPS2003_54b2b21a,\n author = {Ferguson, David and Morris, Aaron and H\\\"{a}hnel, Dirk and Baker, Christopher and Omohundro, Zachary and Reverte, Carlos and Thayer, Scott and Whittaker, Charles and Whittaker, William and Burgard, Wolfram and Thrun, Sebastian},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {An Autonomous Robotic System for Mapping Abandoned Mines},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/54b2b21af94108d83c2a909d5b0a6a50-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/54b2b21af94108d83c2a909d5b0a6a50-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/54b2b21af94108d83c2a909d5b0a6a50-Metadata.json", "review": "", "metareview": "", "pdf_size": 296628, "gs_citation": 81, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5544391519715573667&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 19, "aff": ";;;;;;;;;;", "aff_domain": ";;;;;;;;;;", "email": ";;;;;;;;;;", "github": "", "project": "", "author_num": 11, "track": "main", "status": "Poster" }, { "id": "f8bc39ed0c", "title": "An Improved Scheme for Detection and Labelling in Johansson Displays", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/466accbac9a66b805ba50e42ad715740-Abstract.html", "author": "Claudio Fanti; Marzia Polito; Pietro Perona", "abstract": "Consider a number of moving points, where each point is attached to a joint of the human body and projected onto an image plane. Johannson showed that humans can e\ufb00ortlessly detect and recog- nize the presence of other humans from such displays. This is true even when some of the body points are missing (e.g. because of occlusion) and unrelated clutter points are added to the display. We are interested in replicating this ability in a machine. To this end, we present a labelling and detection scheme in a probabilistic framework. Our method is based on representing the joint prob- ability density of positions and velocities of body points with a graphical model, and using Loopy Belief Propagation to calculate a likely interpretation of the scene. Furthermore, we introduce a global variable representing the body\u2019s centroid. Experiments on one motion-captured sequence suggest that our scheme improves on the accuracy of a previous approach based on triangulated graph- ical models, especially when very few parts are visible. The im- provement is due both to the more general graph structure we use and, more signi\ufb01cantly, to the introduction of the centroid variable.", "bibtex": "@inproceedings{NIPS2003_466accba,\n author = {Fanti, Claudio and Polito, Marzia and Perona, Pietro},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {An Improved Scheme for Detection and Labelling in Johansson Displays},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/466accbac9a66b805ba50e42ad715740-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/466accbac9a66b805ba50e42ad715740-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/466accbac9a66b805ba50e42ad715740-Metadata.json", "review": "", "metareview": "", "pdf_size": 217281, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9255977870105355460&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Computational Vision Lab, 136-93 California Institute of Technology, Pasadena, CA 91125, USA; Intel Corporation, SC12-303 2200 Mission College Blvd., Santa Clara, CA 95054, USA; Computational Vision Lab, 136-93 California Institute of Technology, Pasadena, CA 91125, USA", "aff_domain": "vision.caltech.edu;intel.com;vision.caltech.edu", "email": "vision.caltech.edu;intel.com;vision.caltech.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "California Institute of Technology;Intel", "aff_unique_dep": "Computational Vision Lab;Intel Corporation", "aff_unique_url": "https://www.caltech.edu;https://www.intel.com", "aff_unique_abbr": "Caltech;Intel", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Pasadena;Santa Clara", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "79780a33ae", "title": "An Infinity-sample Theory for Multi-category Large Margin Classification", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/2dbf21633f03afcf882eaf10e4b5caca-Abstract.html", "author": "Tong Zhang", "abstract": "The purpose of this paper is to investigate in\ufb01nity-sample properties of risk minimization based multi-category classi\ufb01cation methods. These methods can be considered as natural extensions to binary large margin classi\ufb01cation. We establish conditions that guarantee the in\ufb01nity-sample consistency of classi\ufb01ers obtained in the risk minimization framework. Examples are provided for two speci\ufb01c forms of the general formulation, which extend a number of known methods. Using these examples, we show that some risk minimization formulations can also be used to ob- tain conditional probability estimates for the underlying problem. Such conditional probability information will be useful for statistical inferenc- ing tasks beyond classi\ufb01cation.", "bibtex": "@inproceedings{NIPS2003_2dbf2163,\n author = {Zhang, Tong},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {An Infinity-sample Theory for Multi-category Large Margin Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/2dbf21633f03afcf882eaf10e4b5caca-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/2dbf21633f03afcf882eaf10e4b5caca-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/2dbf21633f03afcf882eaf10e4b5caca-Metadata.json", "review": "", "metareview": "", "pdf_size": 94034, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15942244045953444876&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "IBM T.J. Watson Research Center", "aff_domain": "watson.ibm.com", "email": "watson.ibm.com", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "IBM", "aff_unique_dep": "Research Center", "aff_unique_url": "https://www.ibm.com/research/watson", "aff_unique_abbr": "IBM", "aff_campus_unique_index": "0", "aff_campus_unique": "T.J. Watson", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "cf0079367b", "title": "An Iterative Improvement Procedure for Hierarchical Clustering", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/f7696a9b362ac5a51c3dc8f098b73923-Abstract.html", "author": "David Kauchak; Sanjoy Dasgupta", "abstract": "We describe a procedure which \ufb01nds a hierarchical clustering by hill- climbing. The cost function we use is a hierarchical extension of the k-means cost; our local moves are tree restructurings and node reorder- ings. We show these can be accomplished ef\ufb01ciently, by exploiting spe- cial properties of squared Euclidean distances and by using techniques from scheduling algorithms.", "bibtex": "@inproceedings{NIPS2003_f7696a9b,\n author = {Kauchak, David and Dasgupta, Sanjoy},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {An Iterative Improvement Procedure for Hierarchical Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/f7696a9b362ac5a51c3dc8f098b73923-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/f7696a9b362ac5a51c3dc8f098b73923-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/f7696a9b362ac5a51c3dc8f098b73923-Metadata.json", "review": "", "metareview": "", "pdf_size": 74949, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17880558209452716145&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science, University of California, San Diego; Department of Computer Science, University of California, San Diego", "aff_domain": "cs.ucsd.edu;cs.ucsd.edu", "email": "cs.ucsd.edu;cs.ucsd.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0;0", "aff_campus_unique": "San Diego", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "8222d7c4d6", "title": "An MCMC-Based Method of Comparing Connectionist Models in Cognitive Science", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/8cbd005a556ccd4211ce43f309bc0eac-Abstract.html", "author": "Woojae Kim; Daniel J. Navarro; Mark A. Pitt; In J. Myung", "abstract": "Despite the popularity of connectionist models in cognitive science, their performance can often be di\ufb03cult to evaluate. Inspired by the geometric approach to statistical model selection, we introduce a conceptually similar method to examine the global behavior of a connectionist model, by counting the number and types of response patterns it can simulate. The Markov Chain Monte Carlo-based algorithm that we constructed (cid:222)nds these patterns e\ufb03ciently. We demonstrate the approach using two localist network models of speech perception.", "bibtex": "@inproceedings{NIPS2003_8cbd005a,\n author = {Kim, Woojae and Navarro, Daniel and Pitt, Mark and Myung, In},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {An MCMC-Based Method of Comparing Connectionist Models in Cognitive Science},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/8cbd005a556ccd4211ce43f309bc0eac-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/8cbd005a556ccd4211ce43f309bc0eac-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/8cbd005a556ccd4211ce43f309bc0eac-Metadata.json", "review": "", "metareview": "", "pdf_size": 161440, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9026500449803362573&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Psychology, Ohio State University; Department of Psychology, Ohio State University; Department of Psychology, Ohio State University; Department of Psychology, Ohio State University", "aff_domain": "osu.edu;osu.edu;osu.edu;osu.edu", "email": "osu.edu;osu.edu;osu.edu;osu.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Ohio State University", "aff_unique_dep": "Department of Psychology", "aff_unique_url": "https://www.osu.edu", "aff_unique_abbr": "OSU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "3d6d27cad3", "title": "An MDP-Based Approach to Online Mechanism Design", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/d16509f6eaca1022bd8f28d6bc582cae-Abstract.html", "author": "David C. Parkes; Satinder P. Singh", "abstract": "Online mechanism design (MD) considers the problem of provid- ing incentives to implement desired system-wide outcomes in sys- tems with self-interested agents that arrive and depart dynami- cally. Agents can choose to misrepresent their arrival and depar- ture times, in addition to information about their value for di(cid:11)erent outcomes. We consider the problem of maximizing the total long- term value of the system despite the self-interest of agents. The online MD problem induces a Markov Decision Process (MDP), which when solved can be used to implement optimal policies in a truth-revealing Bayesian-Nash equilibrium.", "bibtex": "@inproceedings{NIPS2003_d16509f6,\n author = {Parkes, David C and Singh, Satinder},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {An MDP-Based Approach to Online Mechanism Design},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/d16509f6eaca1022bd8f28d6bc582cae-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/d16509f6eaca1022bd8f28d6bc582cae-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/d16509f6eaca1022bd8f28d6bc582cae-Metadata.json", "review": "", "metareview": "", "pdf_size": 101346, "gs_citation": 252, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=606721034218316420&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 19, "aff": "Division of Engineering and Applied Sciences, Harvard University; Computer Science and Engineering, University of Michigan", "aff_domain": "eecs.harvard.edu;umich.edu", "email": "eecs.harvard.edu;umich.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Harvard University;University of Michigan", "aff_unique_dep": "Division of Engineering and Applied Sciences;Computer Science and Engineering", "aff_unique_url": "https://www.harvard.edu;https://www.umich.edu", "aff_unique_abbr": "Harvard;UM", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Cambridge;Ann Arbor", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "430d700925", "title": "Analytical Solution of Spike-timing Dependent Plasticity Based on Synaptic Biophysics", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/7cac11e2f46ed46c339ec3d569853759-Abstract.html", "author": "Bernd Porr; Ausra Saudargiene; Florentin W\u00f6rg\u00f6tter", "abstract": "Spike timing plasticity (STDP) is a special form of synaptic plasticity where the relative timing of post- and presynaptic activity determines the change of the synaptic weight. On the postsynaptic side, active back- propagating spikes in dendrites seem to play a crucial role in the induc- tion of spike timing dependent plasticity. We argue that postsynaptically the temporal change of the membrane potential determines the weight change. Coming from the presynaptic side induction of STDP is closely related to the activation of NMDA channels. Therefore, we will calculate analytically the change of the synaptic weight by correlating the deriva- tive of the membrane potential with the activity of the NMDA channel. Thus, for this calculation we utilise biophysical variables of the physi- ological cell. The \ufb01nal result shows a weight change curve which con- forms with measurements from biology. The positive part of the weight change curve is determined by the NMDA activation. The negative part of the weight change curve is determined by the membrane potential change. Therefore, the weight change curve should change its shape de- pending on the distance from the soma of the postsynaptic cell. We \ufb01nd temporally asymmetric weight change close to the soma and temporally symmetric weight change in the distal dendrite.", "bibtex": "@inproceedings{NIPS2003_7cac11e2,\n author = {Porr, Bernd and Saudargiene, Ausra and W\\\"{o}rg\\\"{o}tter, Florentin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Analytical Solution of Spike-timing Dependent Plasticity Based on Synaptic Biophysics},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/7cac11e2f46ed46c339ec3d569853759-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/7cac11e2f46ed46c339ec3d569853759-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/7cac11e2f46ed46c339ec3d569853759-Metadata.json", "review": "", "metareview": "", "pdf_size": 391540, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4984659572372230265&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "University of Stirling; University of Stirling; University of Stirling", "aff_domain": "cn.stir.ac.uk;cn.stir.ac.uk;cn.stir.ac.uk", "email": "cn.stir.ac.uk;cn.stir.ac.uk;cn.stir.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Stirling", "aff_unique_dep": "", "aff_unique_url": "https://www.stir.ac.uk", "aff_unique_abbr": "Stirling", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "id": "6d3411c2f3", "title": "Application of SVMs for Colour Classification and Collision Detection with AIBO Robots", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/6a4cbdaedcbda0fa8ddc7ea32073c475-Abstract.html", "author": "Michael J. Quinlan; Stephan K. Chalup; Richard H. Middleton", "abstract": "This article addresses the issues of colour classi\ufb01cation and collision de- tection as they occur in the legged league robot soccer environment of RoboCup. We show how the method of one-class classi\ufb01cation with sup- port vector machines (SVMs) can be applied to solve these tasks satisfac- torily using the limited hardware capacity of the prescribed Sony AIBO quadruped robots. The experimental evaluation shows an improvement over our previous methods of ellipse \ufb01tting for colour classi\ufb01cation and the statistical approach used for collision detection.", "bibtex": "@inproceedings{NIPS2003_6a4cbdae,\n author = {Quinlan, Michael and Chalup, Stephan and Middleton, Richard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Application of SVMs for Colour Classification and Collision Detection with AIBO Robots},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/6a4cbdaedcbda0fa8ddc7ea32073c475-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/6a4cbdaedcbda0fa8ddc7ea32073c475-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/6a4cbdaedcbda0fa8ddc7ea32073c475-Metadata.json", "review": "", "metareview": "", "pdf_size": 273595, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13255766893337095928&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "School of Electrical Engineering & Computer Science, The University of Newcastle, Callaghan 2308, Australia; School of Electrical Engineering & Computer Science, The University of Newcastle, Callaghan 2308, Australia; School of Electrical Engineering & Computer Science, The University of Newcastle, Callaghan 2308, Australia", "aff_domain": "eecs.newcastle.edu.au;eecs.newcastle.edu.au;eecs.newcastle.edu.au", "email": "eecs.newcastle.edu.au;eecs.newcastle.edu.au;eecs.newcastle.edu.au", "github": "", "project": "http://www.robots.newcastle.edu.au", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Newcastle", "aff_unique_dep": "School of Electrical Engineering & Computer Science", "aff_unique_url": "https://www.newcastle.edu.au", "aff_unique_abbr": "UON", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Callaghan", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Australia" }, { "id": "2ed61ecd44", "title": "Applying Metric-Trees to Belief-Point POMDPs", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/6547884cea64550284728eb26b0947ef-Abstract.html", "author": "Joelle Pineau; Geoffrey J. Gordon; Sebastian Thrun", "abstract": "Recent developments in grid-based and point-based approximation algo- rithms for POMDPs have greatly improved the tractability of POMDP planning. These approaches operate on sets of belief points by individ- ually learning a value function for each point. In reality, belief points exist in a highly-structured metric simplex, but current POMDP algo- rithms do not exploit this property. This paper presents a new metric-tree algorithm which can be used in the context of POMDP planning to sort belief points spatially, and then perform fast value function updates over groups of points. We present results showing that this approach can re- duce computation in point-based POMDP algorithms for a wide range of problems.", "bibtex": "@inproceedings{NIPS2003_6547884c,\n author = {Pineau, Joelle and Gordon, Geoffrey J and Thrun, Sebastian},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Applying Metric-Trees to Belief-Point POMDPs},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/6547884cea64550284728eb26b0947ef-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/6547884cea64550284728eb26b0947ef-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/6547884cea64550284728eb26b0947ef-Metadata.json", "review": "", "metareview": "", "pdf_size": 112847, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10136160915630130668&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 19, "aff": "School of Computer Science, Carnegie Mellon University; School of Computer Science, Carnegie Mellon University; Computer Science Department, Stanford University", "aff_domain": "cs.cmu.edu;cs.cmu.edu;stanford.edu", "email": "cs.cmu.edu;cs.cmu.edu;stanford.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Carnegie Mellon University;Stanford University", "aff_unique_dep": "School of Computer Science;Computer Science Department", "aff_unique_url": "https://www.cmu.edu;https://www.stanford.edu", "aff_unique_abbr": "CMU;Stanford", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Pittsburgh;Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "6ac0b9a249", "title": "Approximability of Probability Distributions", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/9af76329c78e28c977ab1bcd1c3fe9b8-Abstract.html", "author": "Alina Beygelzimer; Irina Rish", "abstract": "We consider the question of how well a given distribution can be approx- imated with probabilistic graphical models. We introduce a new param- eter, effective treewidth, that captures the degree of approximability as a tradeoff between the accuracy and the complexity of approximation. We present a simple approach to analyzing achievable tradeoffs that ex- ploits the threshold behavior of monotone graph properties, and provide experimental results that support the approach.", "bibtex": "@inproceedings{NIPS2003_9af76329,\n author = {Beygelzimer, Alina and Rish, Irina},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Approximability of Probability Distributions},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/9af76329c78e28c977ab1bcd1c3fe9b8-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/9af76329c78e28c977ab1bcd1c3fe9b8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/9af76329c78e28c977ab1bcd1c3fe9b8-Metadata.json", "review": "", "metareview": "", "pdf_size": 96388, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17418785919016956373&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "IBM T. J. Watson Research Center+Department of Computer Science, University of Rochester; IBM T. J. Watson Research Center", "aff_domain": "cs.rochester.edu;us.ibm.com", "email": "cs.rochester.edu;us.ibm.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0+1;0", "aff_unique_norm": "IBM;University of Rochester", "aff_unique_dep": "IBM;Department of Computer Science", "aff_unique_url": "https://www.ibm.com/research/watson;https://www.rochester.edu", "aff_unique_abbr": "IBM;U of R", "aff_campus_unique_index": "0;0", "aff_campus_unique": "T. J. Watson;", "aff_country_unique_index": "0+0;0", "aff_country_unique": "United States" }, { "id": "47283960d6", "title": "Approximate Analytical Bootstrap Averages for Support Vector Classifiers", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/2c6ae45a3e88aee548c0714fad7f8269-Abstract.html", "author": "D\u00f6rthe Malzahn; Manfred Opper", "abstract": "We compute approximate analytical bootstrap averages for support vec- tor classi\ufb01cation using a combination of the replica method of statistical physics and the TAP approach for approximate inference. We test our method on a few datasets and compare it with exact averages obtained by extensive Monte-Carlo sampling.", "bibtex": "@inproceedings{NIPS2003_2c6ae45a,\n author = {Malzahn, D\\\"{o}rthe and Opper, Manfred},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Approximate Analytical Bootstrap Averages for Support Vector Classifiers},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/2c6ae45a3e88aee548c0714fad7f8269-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/2c6ae45a3e88aee548c0714fad7f8269-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/2c6ae45a3e88aee548c0714fad7f8269-Metadata.json", "review": "", "metareview": "", "pdf_size": 113714, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18298028696642012099&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Informatics and Mathematical Modelling, Technical University of Denmark; Institute of Mathematical Stochastics, University of Karlsruhe + Neural Computing Research Group, School of Engineering and Applied Science, Aston University", "aff_domain": "isp.imm.dtu.dk;aston.ac.uk", "email": "isp.imm.dtu.dk;aston.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1+2", "aff_unique_norm": "Technical University of Denmark;University of Karlsruhe;Aston University", "aff_unique_dep": "Informatics and Mathematical Modelling;Institute of Mathematical Stochastics;School of Engineering and Applied Science", "aff_unique_url": "https://www.tu-dresden.de;https://www.kit.edu;https://www.aston.ac.uk", "aff_unique_abbr": "DTU;KIT;Aston", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1+2", "aff_country_unique": "Denmark;Germany;United Kingdom" }, { "id": "02f99c0a3c", "title": "Approximate Expectation Maximization", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/8208974663db80265e9bfe7b222dcb18-Abstract.html", "author": "Tom Heskes; Onno Zoeter; Wim Wiegerinck", "abstract": "We discuss the integration of the expectation-maximization (EM) algorithm for maximum likelihood learning of Bayesian networks with belief propagation algorithms for approximate inference. Specifically we propose to combine the outer-loop step of convergent belief propagation algorithms with the M-step of the EM algorithm. This then yields an approximate EM algorithm that is essentially still double loop, with the important advantage of an inner loop that is guaranteed to converge. Simulations illustrate the merits of such an approach.", "bibtex": "@inproceedings{NIPS2003_82089746,\n author = {Heskes, Tom and Zoeter, Onno and Wiegerinck, Wim},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Approximate Expectation Maximization},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/8208974663db80265e9bfe7b222dcb18-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/8208974663db80265e9bfe7b222dcb18-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/8208974663db80265e9bfe7b222dcb18-Metadata.json", "review": "", "metareview": "", "pdf_size": 854137, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17472936224201093674&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "ee69f436c9", "title": "Approximate Planning in POMDPs with Macro-Actions", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/ea4eb49329550caaa1d2044105223721-Abstract.html", "author": "Georgios Theocharous; Leslie P. Kaelbling", "abstract": "Leslie Pack Kaelbling", "bibtex": "@inproceedings{NIPS2003_ea4eb493,\n author = {Theocharous, Georgios and Kaelbling, Leslie},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Approximate Planning in POMDPs with Macro-Actions},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/ea4eb49329550caaa1d2044105223721-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/ea4eb49329550caaa1d2044105223721-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/ea4eb49329550caaa1d2044105223721-Metadata.json", "review": "", "metareview": "", "pdf_size": 99210, "gs_citation": 124, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7461040198175246536&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "MIT AI Lab; MIT AI Lab", "aff_domain": "ai.mit.edu;ai.mit.edu", "email": "ai.mit.edu;ai.mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Artificial Intelligence Laboratory", "aff_unique_url": "http://www.ai.mit.edu", "aff_unique_abbr": "MIT AI Lab", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "b974b3057b", "title": "Approximate Policy Iteration with a Policy Language Bias", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/7cf64379eb6f29a4d25c4b6a2df713e4-Abstract.html", "author": "Alan Fern; Sungwook Yoon; Robert Givan", "abstract": "We explore approximate policy iteration, replacing the usual cost- function learning step with a learning step in policy space. We give policy-language biases that enable solution of very large relational Markov decision processes (MDPs) that no previous technique can solve. In particular, we induce high-quality domain-speci\ufb01c planners for clas- sical planning domains (both deterministic and stochastic variants) by solving such domains as extremely large MDPs.", "bibtex": "@inproceedings{NIPS2003_7cf64379,\n author = {Fern, Alan and Yoon, Sungwook and Givan, Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Approximate Policy Iteration with a Policy Language Bias},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/7cf64379eb6f29a4d25c4b6a2df713e4-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/7cf64379eb6f29a4d25c4b6a2df713e4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/7cf64379eb6f29a4d25c4b6a2df713e4-Metadata.json", "review": "", "metareview": "", "pdf_size": 162565, "gs_citation": 278, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8686789908870901391&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 20, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "29082b7835", "title": "Attractive People: Assembling Loose-Limbed Models using Non-parametric Belief Propagation", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/cd10c7f376188a4a2ca3e8fea2c03aeb-Abstract.html", "author": "Leonid Sigal; Michael Isard; Benjamin H. Sigelman; Michael J. Black", "abstract": "The detection and pose estimation of people in images and video is made challenging by the variability of human appearance, the complexity of natural scenes, and the high dimensionality of articulated body mod- els. To cope with these problems we represent the 3D human body as a graphical model in which the relationships between the body parts are represented by conditional probability distributions. We formulate the pose estimation problem as one of probabilistic inference over a graphi- cal model where the random variables correspond to the individual limb parameters (position and orientation). Because the limbs are described by 6-dimensional vectors encoding pose in 3-space, discretization is im- practical and the random variables in our model must be continuous- valued. To approximate belief propagation in such a graph we exploit a recently introduced generalization of the particle \ufb01lter. This framework facilitates the automatic initialization of the body-model from low level cues and is robust to occlusion of body parts and scene clutter.", "bibtex": "@inproceedings{NIPS2003_cd10c7f3,\n author = {Sigal, Leonid and Isard, Michael and Sigelman, Benjamin and Black, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Attractive People: Assembling Loose-Limbed Models using Non-parametric Belief Propagation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/cd10c7f376188a4a2ca3e8fea2c03aeb-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/cd10c7f376188a4a2ca3e8fea2c03aeb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/cd10c7f376188a4a2ca3e8fea2c03aeb-Metadata.json", "review": "", "metareview": "", "pdf_size": 249782, "gs_citation": 187, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12462121060734780783&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 22, "aff": "Department of Computer Science, Brown University; Microsoft Research Silicon Valley; Department of Computer Science, Brown University; Department of Computer Science, Brown University", "aff_domain": "cs.brown.edu;microsoft.com;cs.brown.edu;cs.brown.edu", "email": "cs.brown.edu;microsoft.com;cs.brown.edu;cs.brown.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "Brown University;Microsoft", "aff_unique_dep": "Department of Computer Science;Microsoft Research", "aff_unique_url": "https://www.brown.edu;https://www.microsoft.com/en-us/research/group/microsoft-research-silicon-valley", "aff_unique_abbr": "Brown;MSR SV", "aff_campus_unique_index": "1", "aff_campus_unique": ";Silicon Valley", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "9f4eaaeaa1", "title": "Auction Mechanism Design for Multi-Robot Coordination", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/40c48dab939a482f04dcecde07e27de6-Abstract.html", "author": "Curt Bererton; Geoffrey J. Gordon; Sebastian Thrun", "abstract": "The design of cooperative multi-robot systems is a highly active research area in robotics. Two lines of research in particular have generated inter- est: the solution of large, weakly coupled MDPs, and the design and im- plementation of market architectures. We propose a new algorithm which joins together these two lines of research. For a class of coupled MDPs, our algorithm automatically designs a market architecture which causes a decentralized multi-robot system to converge to a consistent policy. We can show that this policy is the same as the one which would be produced by a particular centralized planning algorithm. We demonstrate the new algorithm on three simulation examples: multi-robot towing, multi-robot path planning with a limited fuel resource, and coordinating behaviors in a game of paint ball.", "bibtex": "@inproceedings{NIPS2003_40c48dab,\n author = {Bererton, Curt and Gordon, Geoffrey J and Thrun, Sebastian},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Auction Mechanism Design for Multi-Robot Coordination},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/40c48dab939a482f04dcecde07e27de6-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/40c48dab939a482f04dcecde07e27de6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/40c48dab939a482f04dcecde07e27de6-Metadata.json", "review": "", "metareview": "", "pdf_size": 122784, "gs_citation": 70, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1982143542572029781&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "c816931140", "title": "Automatic Annotation of Everyday Movements", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/ca43108ded5aabc7793d3f9b928cdd54-Abstract.html", "author": "Deva Ramanan; David A. Forsyth", "abstract": "This paper describes a system that can annotate a video sequence with: a description of the appearance of each actor; when the actor is in view; and a representation of the actor\u2019s activity while in view. The system does not require a \ufb01xed background, and is automatic. The system works by (1) tracking people in 2D and then, using an annotated motion capture dataset, (2) synthesizing an annotated 3D motion sequence matching the 2D tracks. The 3D motion capture data is manually annotated off-line using a class structure that describes everyday motions and allows mo- tion annotations to be composed \u2014 one may jump while running, for example. Descriptions computed from video of real motions show that the method is accurate.", "bibtex": "@inproceedings{NIPS2003_ca43108d,\n author = {Ramanan, Deva and Forsyth, David},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Automatic Annotation of Everyday Movements},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/ca43108ded5aabc7793d3f9b928cdd54-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/ca43108ded5aabc7793d3f9b928cdd54-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/ca43108ded5aabc7793d3f9b928cdd54-Metadata.json", "review": "", "metareview": "", "pdf_size": 131378, "gs_citation": 244, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8489102694110764115&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 21, "aff": "Computer Science Division, University of California, Berkeley; Computer Science Division, University of California, Berkeley", "aff_domain": "cs.berkeley.edu;cs.berkeley.edu", "email": "cs.berkeley.edu;cs.berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Computer Science Division", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "c39c63d21f", "title": "Autonomous Helicopter Flight via Reinforcement Learning", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/b427426b8acd2c2e53827970f2c2f526-Abstract.html", "author": "H. J. Kim; Michael I. Jordan; Shankar Sastry; Andrew Y. Ng", "abstract": "Autonomous helicopter \ufb02ight represents a challenging control problem, with complex, noisy, dynamics. In this paper, we describe a successful application of reinforcement learning to autonomous helicopter \ufb02ight. We \ufb01rst \ufb01t a stochastic, nonlinear model of the helicopter dynamics. We then use the model to learn to hover in place, and to \ufb02y a number of maneuvers taken from an RC helicopter competition.", "bibtex": "@inproceedings{NIPS2003_b427426b,\n author = {Kim, H. and Jordan, Michael and Sastry, Shankar and Ng, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Autonomous Helicopter Flight via Reinforcement Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/b427426b8acd2c2e53827970f2c2f526-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/b427426b8acd2c2e53827970f2c2f526-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/b427426b8acd2c2e53827970f2c2f526-Metadata.json", "review": "", "metareview": "", "pdf_size": 286853, "gs_citation": 109, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17319417255609162178&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 19, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "0abd22648b", "title": "Bayesian Color Constancy with Non-Gaussian Models", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/c65d7bd70fe3e5e3a2f3de681edc193d-Abstract.html", "author": "Charles Rosenberg; Alok Ladsariya; Tom Minka", "abstract": "We present a Bayesian approach to color constancy which utilizes a non- Gaussian probabilistic model of the image formation process. The pa- rameters of this model are estimated directly from an uncalibrated image set and a small number of additional algorithmic parameters are chosen using cross validation. The algorithm is empirically shown to exhibit RMS error lower than other color constancy algorithms based on the Lambertian surface re\ufb02ectance model when estimating the illuminants of a set of test images. This is demonstrated via a direct performance comparison utilizing a publicly available set of real world test images and code base.", "bibtex": "@inproceedings{NIPS2003_c65d7bd7,\n author = {Rosenberg, Charles and Ladsariya, Alok and Minka, Tom},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Bayesian Color Constancy with Non-Gaussian Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/c65d7bd70fe3e5e3a2f3de681edc193d-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/c65d7bd70fe3e5e3a2f3de681edc193d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/c65d7bd70fe3e5e3a2f3de681edc193d-Metadata.json", "review": "", "metareview": "", "pdf_size": 98705, "gs_citation": 117, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16771660476788566284&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Computer Science Department, Carnegie Mellon University; Statistics Department, Carnegie Mellon University; Computer Science Department, Carnegie Mellon University", "aff_domain": "cs.cmu.edu;stat.cmu.edu;cs.cmu.edu", "email": "cs.cmu.edu;stat.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "a8cf2f6903", "title": "Bias-Corrected Bootstrap and Model Uncertainty", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/2aaaddf27344ee54058548dc081c6541-Abstract.html", "author": "Harald Steck; Tommi S. Jaakkola", "abstract": "The bootstrap has become a popular method for exploring model (structure) uncertainty. Our experiments with arti\ufb01cial and real- world data demonstrate that the graphs learned from bootstrap samples can be severely biased towards too complex graphical mod- els. Accounting for this bias is hence essential, e.g., when explor- ing model uncertainty. We \ufb01nd that this bias is intimately tied to (well-known) spurious dependences induced by the bootstrap. The leading-order bias-correction equals one half of Akaike\u2019s penalty for model complexity. We demonstrate the e\ufb00ect of this simple bias-correction in our experiments. We also relate this bias to the bias of the plug-in estimator for entropy, as well as to the di\ufb00er- ence between the expected test and training errors of a graphical model, which asymptotically equals Akaike\u2019s penalty (rather than one half).", "bibtex": "@inproceedings{NIPS2003_2aaaddf2,\n author = {Steck, Harald and Jaakkola, Tommi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Bias-Corrected Bootstrap and Model Uncertainty},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/2aaaddf27344ee54058548dc081c6541-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/2aaaddf27344ee54058548dc081c6541-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/2aaaddf27344ee54058548dc081c6541-Metadata.json", "review": "", "metareview": "", "pdf_size": 186441, "gs_citation": 73, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5663772861264547805&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "MIT CSAIL+ETH Zurich, Institute for Computational Science; MIT CSAIL", "aff_domain": "ai.mit.edu;ai.mit.edu", "email": "ai.mit.edu;ai.mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0+1;0", "aff_unique_norm": "Massachusetts Institute of Technology;ETH Zurich", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory;Institute for Computational Science", "aff_unique_url": "https://www.csail.mit.edu;https://www.ethz.ch", "aff_unique_abbr": "MIT CSAIL;ETHZ", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge;", "aff_country_unique_index": "0+1;0", "aff_country_unique": "United States;Switzerland" }, { "id": "b5ec1511ef", "title": "Boosting versus Covering", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/9824f9c1543628a85bb51d2dd6fcf8a3-Abstract.html", "author": "Kohei Hatano; Manfred K. Warmuth", "abstract": "We investigate improvements of AdaBoost that can exploit the fact that the weak hypotheses are one-sided, i.e. either all its positive (or negative) predictions are correct. In particular, for any set of m labeled examples consistent with a disjunction of k literals (which are one-sided in this case), AdaBoost constructs a consistent hypothesis by using O(k2 log m) iterations. On the other hand, a greedy set covering algorithm \ufb01nds a consistent hypothesis of size O(k log m). Our primary question is whether there is a simple boosting algorithm that performs as well as the greedy set covering. We \ufb01rst show that InfoBoost, a modi\ufb01cation of AdaBoost pro- posed by Aslam for a di\ufb00erent purpose, does perform as well as the greedy set covering algorithm. We then show that AdaBoost requires \u2126(k2 log m) iterations for learning k-literal disjunctions. We achieve this with an adversary construction and as well as in simple experiments based on arti\ufb01cial data. Further we give a vari- ant called SemiBoost that can handle the degenerate case when the given examples all have the same label. We conclude by showing that SemiBoost can be used to produce small conjunctions as well.", "bibtex": "@inproceedings{NIPS2003_9824f9c1,\n author = {Hatano, Kohei and Warmuth, Manfred K. K},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Boosting versus Covering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/9824f9c1543628a85bb51d2dd6fcf8a3-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/9824f9c1543628a85bb51d2dd6fcf8a3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/9824f9c1543628a85bb51d2dd6fcf8a3-Metadata.json", "review": "", "metareview": "", "pdf_size": 126948, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16673606686932962823&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Tokyo Institute of Technology; UC Santa Cruz", "aff_domain": "is.titech.ac.jp;cse.ucsc.edu", "email": "is.titech.ac.jp;cse.ucsc.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Tokyo Institute of Technology;University of California, Santa Cruz", "aff_unique_dep": ";", "aff_unique_url": "https://www.titech.ac.jp;https://www.ucsc.edu", "aff_unique_abbr": "Titech;UCSC", "aff_campus_unique_index": "1", "aff_campus_unique": ";Santa Cruz", "aff_country_unique_index": "0;1", "aff_country_unique": "Japan;United States" }, { "id": "27160566bc", "title": "Bounded Finite State Controllers", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/4c5bcfec8584af0d967f1ab10179ca4b-Abstract.html", "author": "Pascal Poupart; Craig Boutilier", "abstract": "We describe a new approximation algorithm for solving partially observ- able MDPs. Our bounded policy iteration approach searches through the space of bounded-size, stochastic \ufb01nite state controllers, combining sev- eral advantages of gradient ascent (ef\ufb01ciency, search through restricted controller space) and policy iteration (less vulnerability to local optima).", "bibtex": "@inproceedings{NIPS2003_4c5bcfec,\n author = {Poupart, Pascal and Boutilier, Craig},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Bounded Finite State Controllers},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/4c5bcfec8584af0d967f1ab10179ca4b-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/4c5bcfec8584af0d967f1ab10179ca4b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/4c5bcfec8584af0d967f1ab10179ca4b-Metadata.json", "review": "", "metareview": "", "pdf_size": 98946, "gs_citation": 271, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1835140708944181204&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "Department of Computer Science, University of Toronto, Toronto, ON M5S 3H5; Department of Computer Science, University of Toronto, Toronto, ON M5S 3H5", "aff_domain": "cs.toronto.edu;cs.toronto.edu", "email": "cs.toronto.edu;cs.toronto.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Toronto", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.utoronto.ca", "aff_unique_abbr": "U of T", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Toronto", "aff_country_unique_index": "0;0", "aff_country_unique": "Canada" }, { "id": "bdf7c957f3", "title": "Bounded Invariance and the Formation of Place Fields", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/f83630579d055dc5843ae693e7cdafe0-Abstract.html", "author": "Reto Wyss; Paul F. Verschure", "abstract": "One current explanation of the view independent representation of space by the place-cells of the hippocampus is that they arise out of the summation of view dependent Gaussians. This proposal as- sumes that visual representations show bounded invariance. Here we investigate whether a recently proposed visual encoding scheme called the temporal population code can provide such representa- tions. Our analysis is based on the behavior of a simulated robot in a virtual environment containing speci(cid:12)c visual cues. Our re- sults show that the temporal population code provides a represen- tational substrate that can naturally account for the formation of place (cid:12)elds.", "bibtex": "@inproceedings{NIPS2003_f8363057,\n author = {Wyss, Reto and Verschure, Paul},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Bounded Invariance and the Formation of Place Fields},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/f83630579d055dc5843ae693e7cdafe0-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/f83630579d055dc5843ae693e7cdafe0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/f83630579d055dc5843ae693e7cdafe0-Metadata.json", "review": "", "metareview": "", "pdf_size": 290875, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=724901061605822753&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Institute of Neuroinformatics, University/ETH Zurich, Zurich, Switzerland; Institute of Neuroinformatics, University/ETH Zurich, Zurich, Switzerland", "aff_domain": "ini.phys.ethz.ch;ini.phys.ethz.ch", "email": "ini.phys.ethz.ch;ini.phys.ethz.ch", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Institute of Neuroinformatics", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Zurich", "aff_country_unique_index": "0;0", "aff_country_unique": "Switzerland" }, { "id": "d585e5d0f1", "title": "Can We Learn to Beat the Best Stock", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/8c9f32e03aeb2e3000825c8c875c4edd-Abstract.html", "author": "Allan Borodin; Ran El-Yaniv; Vincent Gogan", "abstract": "A novel algorithm for actively trading stocks is presented. While tradi- tional universal algorithms (and technical trading heuristics) attempt to predict winners or trends, our approach relies on predictable statistical relations between all pairs of stocks in the market. Our empirical results on historical markets provide strong evidence that this type of techni- cal trading can \u201cbeat the market\u201d and moreover, can beat the best stock in the market. In doing so we utilize a new idea for smoothing critical parameters in the context of expert learning.", "bibtex": "@inproceedings{NIPS2003_8c9f32e0,\n author = {Borodin, Allan and El-Yaniv, Ran and Gogan, Vincent},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Can We Learn to Beat the Best Stock},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/8c9f32e03aeb2e3000825c8c875c4edd-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/8c9f32e03aeb2e3000825c8c875c4edd-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/8c9f32e03aeb2e3000825c8c875c4edd-Metadata.json", "review": "", "metareview": "", "pdf_size": 151921, "gs_citation": 334, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14101788789683158218&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 27, "aff": "Department of Computer Science, University of Toronto1; Technion- Israel Institute of Technology2; Department of Computer Science, University of Toronto1", "aff_domain": "cs.toronto.edu;cs.technion.ac.il;cs.toronto.edu", "email": "cs.toronto.edu;cs.technion.ac.il;cs.toronto.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Toronto;Technion - Israel Institute of Technology", "aff_unique_dep": "Department of Computer Science;", "aff_unique_url": "https://www.utoronto.ca;https://www.technion.ac.il/en/", "aff_unique_abbr": "U of T;Technion", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Toronto;", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Canada;Israel" }, { "id": "4fa9f252cd", "title": "Circuit Optimization Predicts Dynamic Networks for Chemosensory Orientation in Nematode C. elegans", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/ab8aa05e782481f55fc1412a97e7ac34-Abstract.html", "author": "Nathan A. Dunn; John S. Conery; Shawn R. Lockery", "abstract": "The connectivity of the nervous system of the nematode Caenorhabdi- tis elegans has been described completely, but the analysis of the neu- ronal basis of behavior in this system is just beginning. Here, we used an optimization algorithm to search for patterns of connectivity suf\ufb01- cient to compute the sensorimotor transformation underlying C. elegans chemotaxis, a simple form of spatial orientation behavior in which turn- ing probability is modulated by the rate of change of chemical concen- tration. Optimization produced differentiator networks with inhibitory feedback among all neurons. Further analysis showed that feedback reg- ulates the latency between sensory input and behavior. Common patterns of connectivity between the model and biological networks suggest new functions for previously identi\ufb01ed connections in the C. elegans nervous system.", "bibtex": "@inproceedings{NIPS2003_ab8aa05e,\n author = {Dunn, Nathan A. and Conery, John S. and Lockery, Shawn},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Circuit Optimization Predicts Dynamic Networks for Chemosensory Orientation in Nematode C. elegans},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/ab8aa05e782481f55fc1412a97e7ac34-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/ab8aa05e782481f55fc1412a97e7ac34-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/ab8aa05e782481f55fc1412a97e7ac34-Metadata.json", "review": "", "metareview": "", "pdf_size": 410517, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1176515738083111877&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 2, "aff": "Dept. of Computer Science, University of Oregon; Dept. of Computer Science, University of Oregon; Institute of Neuroscience, University of Oregon", "aff_domain": "cs.uoregon.edu;cs.uoregon.edu;lox.uoregon.edu", "email": "cs.uoregon.edu;cs.uoregon.edu;lox.uoregon.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Oregon", "aff_unique_dep": "Dept. of Computer Science", "aff_unique_url": "https://www.uoregon.edu", "aff_unique_abbr": "UO", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "a3221a210f", "title": "Classification with Hybrid Generative/Discriminative Models", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/b53477c2821c1bf0da5d40e57b870d35-Abstract.html", "author": "Rajat Raina; Yirong Shen; Andrew McCallum; Andrew Y. Ng", "abstract": "Although discriminatively trained classi\ufb01ers are usually more accurate when labeled training data is abundant, previous work has shown that when training data is limited, generative classi\ufb01ers can out-perform them. This paper describes a hybrid model in which a high-dimensional subset of the parameters are trained to maximize generative likelihood, and another, small, subset of parameters are discriminatively trained to maximize conditional likelihood. We give a sample complexity bound showing that in order to \ufb01t the discriminative parameters well, the num- ber of training examples required depends only on the logarithm of the number of feature occurrences and feature set size. Experimental results show that hybrid models can provide lower test error and can produce better accuracy/coverage curves than either their purely generative or purely discriminative counterparts. We also discuss several advantages of hybrid models, and advocate further work in this area.", "bibtex": "@inproceedings{NIPS2003_b53477c2,\n author = {Raina, Rajat and Shen, Yirong and McCallum, Andrew and Ng, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Classification with Hybrid Generative/Discriminative Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/b53477c2821c1bf0da5d40e57b870d35-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/b53477c2821c1bf0da5d40e57b870d35-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/b53477c2821c1bf0da5d40e57b870d35-Metadata.json", "review": "", "metareview": "", "pdf_size": 89575, "gs_citation": 335, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12582495627708914368&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "1fe2dd81b0", "title": "Clustering with the Connectivity Kernel", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/cc0991344c3d760ae42259064406bae1-Abstract.html", "author": "Bernd Fischer; Volker Roth; Joachim M. Buhmann", "abstract": "Clustering aims at extracting hidden structure in dataset. While the prob- lem of \ufb01nding compact clusters has been widely studied in the litera- ture, extracting arbitrarily formed elongated structures is considered a much harder problem. In this paper we present a novel clustering algo- rithm which tackles the problem by a two step procedure: \ufb01rst the data are transformed in such a way that elongated structures become compact ones. In a second step, these new objects are clustered by optimizing a compactness-based criterion. The advantages of the method over related approaches are threefold: (i) robustness properties of compactness-based criteria naturally transfer to the problem of extracting elongated struc- tures, leading to a model which is highly robust against outlier objects; (ii) the transformed distances induce a Mercer kernel which allows us to formulate a polynomial approximation scheme to the generally NP- hard clustering problem; (iii) the new method does not contain free kernel parameters in contrast to methods like spectral clustering or mean-shift clustering.", "bibtex": "@inproceedings{NIPS2003_cc099134,\n author = {Fischer, Bernd and Roth, Volker and Buhmann, Joachim},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Clustering with the Connectivity Kernel},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/cc0991344c3d760ae42259064406bae1-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/cc0991344c3d760ae42259064406bae1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/cc0991344c3d760ae42259064406bae1-Metadata.json", "review": "", "metareview": "", "pdf_size": 232890, "gs_citation": 120, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1651320960637107780&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Institute of Computational Science, Swiss Federal Institute of Technology Zurich; Institute of Computational Science, Swiss Federal Institute of Technology Zurich; Institute of Computational Science, Swiss Federal Institute of Technology Zurich", "aff_domain": "inf.ethz.ch;inf.ethz.ch;inf.ethz.ch", "email": "inf.ethz.ch;inf.ethz.ch;inf.ethz.ch", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Swiss Federal Institute of Technology Zurich", "aff_unique_dep": "Institute of Computational Science", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETH Zurich", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Zurich", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Switzerland" }, { "id": "0d03a758e6", "title": "Computing Gaussian Mixture Models with EM Using Equivalence Constraints", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/831caa1b600f852b7844499430ecac17-Abstract.html", "author": "Noam Shental; Aharon Bar-hillel; Tomer Hertz; Daphna Weinshall", "abstract": "Density estimation with Gaussian Mixture Models is a popular gener- ative technique used also for clustering. We develop a framework to incorporate side information in the form of equivalence constraints into the model estimation procedure. Equivalence constraints are de\ufb01ned on pairs of data points, indicating whether the points arise from the same source (positive constraints) or from different sources (negative con- straints). Such constraints can be gathered automatically in some learn- ing problems, and are a natural form of supervision in others. For the estimation of model parameters we present a closed form EM procedure which handles positive constraints, and a Generalized EM procedure us- ing a Markov net which handles negative constraints. Using publicly available data sets we demonstrate that such side information can lead to considerable improvement in clustering tasks, and that our algorithm is preferable to two other suggested methods using the same type of side information.", "bibtex": "@inproceedings{NIPS2003_831caa1b,\n author = {Shental, Noam and Bar-hillel, Aharon and Hertz, Tomer and Weinshall, Daphna},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Computing Gaussian Mixture Models with EM Using Equivalence Constraints},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/831caa1b600f852b7844499430ecac17-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/831caa1b600f852b7844499430ecac17-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/831caa1b600f852b7844499430ecac17-Metadata.json", "review": "", "metareview": "", "pdf_size": 328488, "gs_citation": 394, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=255954276712204037&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "Computer Science & Eng. + Center for Neural Computation, Hebrew University of Jerusalem, Jerusalem, Israel 91904; Computer Science & Eng. + Center for Neural Computation, Hebrew University of Jerusalem, Jerusalem, Israel 91904; Computer Science & Eng. + Center for Neural Computation, Hebrew University of Jerusalem, Jerusalem, Israel 91904; Computer Science & Eng. + Center for Neural Computation, Hebrew University of Jerusalem, Jerusalem, Israel 91904", "aff_domain": "cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il", "email": "cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0+1;0+1;0+1;0+1", "aff_unique_norm": "Computer Science & Engineering;Hebrew University of Jerusalem", "aff_unique_dep": "Computer Science & Engineering;Center for Neural Computation", "aff_unique_url": ";https://www.huji.ac.il", "aff_unique_abbr": ";HUJI", "aff_campus_unique_index": "1;1;1;1", "aff_campus_unique": ";Jerusalem", "aff_country_unique_index": "1;1;1;1", "aff_country_unique": ";Israel" }, { "id": "f7198979fe", "title": "Convex Methods for Transduction", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/83691715fdc5baf20ed0742b0b85785b-Abstract.html", "author": "Tijl D. Bie; Nello Cristianini", "abstract": "The 2-class transduction problem, as formulated by Vapnik [1], involves \ufb01nding a separating hyperplane for a labelled data set that is also maximally distant from a given set of unlabelled test points. In this form, the problem has exponential computational complexity in the size of the working set. So far it has been attacked by means of integer programming techniques [2] that do not scale to reasonable problem sizes, or by local search procedures [3]. In this paper we present a relaxation of this task based on semi- de\ufb01nite programming (SDP), resulting in a convex optimization problem that has polynomial complexity in the size of the data set. The results are very encouraging for mid sized data sets, however the cost is still too high for large scale problems, due to the high di- mensional search space. To this end, we restrict the feasible region by introducing an approximation based on solving an eigenproblem. With this approximation, the computational cost of the algorithm is such that problems with more than 1000 points can be treated.", "bibtex": "@inproceedings{NIPS2003_83691715,\n author = {Bie, Tijl and Cristianini, Nello},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Convex Methods for Transduction},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/83691715fdc5baf20ed0742b0b85785b-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/83691715fdc5baf20ed0742b0b85785b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/83691715fdc5baf20ed0742b0b85785b-Metadata.json", "review": "", "metareview": "", "pdf_size": 138119, "gs_citation": 170, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12314533080268810887&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "SAT-SCD/SISTA, K.U.Leuven; Department of Statistics, U.C.Davis", "aff_domain": "esat.kuleuven.ac.be;support-vector.net", "email": "esat.kuleuven.ac.be;support-vector.net", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "KU Leuven;University of California, Davis", "aff_unique_dep": "SAT-SCD/SISTA;Department of Statistics", "aff_unique_url": "https://www.kuleuven.be;https://www.ucdavis.edu", "aff_unique_abbr": "KU Leuven;UC Davis", "aff_campus_unique_index": "1", "aff_campus_unique": ";Davis", "aff_country_unique_index": "0;1", "aff_country_unique": "Belgium;United States" }, { "id": "9b0a560830", "title": "Decoding V1 Neuronal Activity using Particle Filtering with Volterra Kernels", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/01a0683665f38d8e5e567b3b15ca98bf-Abstract.html", "author": "Ryan C. Kelly; Tai Sing Lee", "abstract": "Decoding is a strategy that allows us to assess the amount of information neurons can provide about certain aspects of the visual scene. In this study, we develop a method based on Bayesian sequential updating and the particle \ufb01ltering algorithm to decode the activity of V1 neurons in awake monkeys. A distinction in our method is the use of Volterra ker- nels to \ufb01lter the particles, which live in a high dimensional space. This parametric Bayesian decoding scheme is compared to the optimal linear decoder and is shown to work consistently better than the linear optimal decoder. Interestingly, our results suggest that for decoding in real time, spike trains of as few as 10 independent but similar neurons would be suf\ufb01cient for decoding a critical scene variable in a particular class of visual stimuli. The reconstructed variable can predict the neural activity about as well as the actual signal with respect to the Volterra kernels.", "bibtex": "@inproceedings{NIPS2003_01a06836,\n author = {Kelly, Ryan and Lee, Tai Sing},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Decoding V1 Neuronal Activity using Particle Filtering with Volterra Kernels},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/01a0683665f38d8e5e567b3b15ca98bf-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/01a0683665f38d8e5e567b3b15ca98bf-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/01a0683665f38d8e5e567b3b15ca98bf-Metadata.json", "review": "", "metareview": "", "pdf_size": 277168, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16450292816048931247&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 10, "aff": "Center for the Neural Basis ofCognition, Carnegie-Mellon University; Center for the Neural Basis ofCognition, Carnegie-Mellon University", "aff_domain": "cs.cmu.edu;cnbc.cmu.edu", "email": "cs.cmu.edu;cnbc.cmu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Center for the Neural Basis of Cognition", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "713ea6cbe3", "title": "Denoising and Untangling Graphs Using Degree Priors", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/d254c8a084d4545bd80577481aa03076-Abstract.html", "author": "Quaid D. Morris; Brendan J. Frey", "abstract": "This paper addresses the problem of untangling hidden graphs from a set of noisy detections of undirected edges. We present a model of the generation of the observed graph that includes degree-based structure priors on the hidden graphs. Exact inference in the model is intractable; we present an e\u2013cient approximate inference algo- rithm to compute edge appearance posteriors. We evaluate our model and algorithm on a biological graph inference problem.", "bibtex": "@inproceedings{NIPS2003_d254c8a0,\n author = {Morris, Quaid and Frey, Brendan J},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Denoising and Untangling Graphs Using Degree Priors},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/d254c8a084d4545bd80577481aa03076-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/d254c8a084d4545bd80577481aa03076-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/d254c8a084d4545bd80577481aa03076-Metadata.json", "review": "", "metareview": "", "pdf_size": 131529, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9200901089783557068&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "ef37b293b2", "title": "Design of Experiments via Information Theory", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/5e6bd7a6970cd4325e587f02667f7f73-Abstract.html", "author": "Liam Paninski", "abstract": "We discuss an idea for collecting data in a relatively ef\ufb01cient manner. Our point of view is Bayesian and information-theoretic: on any given trial, we want to adaptively choose the input in such a way that the mutual in- formation between the (unknown) state of the system and the (stochastic) output is maximal, given any prior information (including data collected on any previous trials). We prove a theorem that quanti\ufb01es the effective- ness of this strategy and give a few illustrative examples comparing the performance of this adaptive technique to that of the more usual nonadap- tive experimental design. For example, we are able to explicitly calculate the asymptotic relative ef\ufb01ciency of the \u201cstaircase method\u201d widely em- ployed in psychophysics research, and to demonstrate the dependence of this ef\ufb01ciency on the form of the \u201cpsychometric function\u201d underlying the output responses.", "bibtex": "@inproceedings{NIPS2003_5e6bd7a6,\n author = {Paninski, Liam},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Design of Experiments via Information Theory},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/5e6bd7a6970cd4325e587f02667f7f73-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/5e6bd7a6970cd4325e587f02667f7f73-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/5e6bd7a6970cd4325e587f02667f7f73-Metadata.json", "review": "", "metareview": "", "pdf_size": 71030, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11578467957379774172&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Center for Neural Science, New York University", "aff_domain": "cns.nyu.edu", "email": "cns.nyu.edu", "github": "", "project": "http://www.cns.nyu.edu/~liam", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "New York University", "aff_unique_dep": "Center for Neural Science", "aff_unique_url": "https://www.nyu.edu", "aff_unique_abbr": "NYU", "aff_campus_unique_index": "0", "aff_campus_unique": "New York", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "25fabd41ae", "title": "Different Cortico-Basal Ganglia Loops Specialize in Reward Prediction at Different Time Scales", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/7827d1ec626c891d4b61a15c9dff296e-Abstract.html", "author": "Saori C. Tanaka; Kenji Doya; Go Okada; Kazutaka Ueda; Yasumasa Okamoto; Shigeto Yamawaki", "abstract": "To understand the brain mechanisms involved in reward prediction on different time scales, we developed a Markov decision task that requires prediction of both immediate and future rewards, and ana- lyzed subjects\u2019 brain activities using functional MRI. We estimated the time course of reward prediction and reward prediction error on different time scales from subjects' performance data, and used them as the explanatory variables for SPM analysis. We found topog- raphic maps of different time scales in medial frontal cortex and striatum. The result suggests that different cortico-basal ganglia loops are specialized for reward prediction on different time scales.", "bibtex": "@inproceedings{NIPS2003_7827d1ec,\n author = {Tanaka, Saori and Doya, Kenji and Okada, Go and Ueda, Kazutaka and Okamoto, Yasumasa and Yamawaki, Shigeto},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Different Cortico-Basal Ganglia Loops Specialize in Reward Prediction at Different Time Scales},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/7827d1ec626c891d4b61a15c9dff296e-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/7827d1ec626c891d4b61a15c9dff296e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/7827d1ec626c891d4b61a15c9dff296e-Metadata.json", "review": "", "metareview": "", "pdf_size": 664382, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3772402946878326186&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "track": "main", "status": "Poster" }, { "id": "747c64353d", "title": "Discriminating Deformable Shape Classes", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/3f53d7190148675e3cd472fc826828c5-Abstract.html", "author": "Salvador Ruiz-correa; Linda G. Shapiro; Marina Meila; Gabriel Berson", "abstract": "We present and empirically test a novel approach for categorizing 3-D free form ob- ject shapes represented by range data . In contrast to traditional surface-signature based systems that use alignment to match speci\ufb01c objects, we adapted the newly introduced symbolic-signature representation to classify deformable shapes [10]. Our approach con- structs an abstract description of shape classes using an ensemble of classi\ufb01ers that learn object class parts and their corresponding geometrical relationships from a set of numeric and symbolic descriptors. We used our classi\ufb01cation engine in a series of large scale dis- crimination experiments on two well-de\ufb01ned classes that share many common distinctive features. The experimental results suggest that our method outperforms traditional numeric signature-based methodologies. 1", "bibtex": "@inproceedings{NIPS2003_3f53d719,\n author = {Ruiz-correa, Salvador and Shapiro, Linda and Meila, Marina and Berson, Gabriel},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Discriminating Deformable Shape Classes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/3f53d7190148675e3cd472fc826828c5-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/3f53d7190148675e3cd472fc826828c5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/3f53d7190148675e3cd472fc826828c5-Metadata.json", "review": "", "metareview": "", "pdf_size": 197857, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5302383183423808946&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "298b200268", "title": "Discriminative Fields for Modeling Spatial Dependencies in Natural Images", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/92049debbe566ca5782a3045cf300a3c-Abstract.html", "author": "Sanjiv Kumar; Martial Hebert", "abstract": "In this paper we present Discriminative Random Fields (DRF), a discrim- inative framework for the classi\ufb01cation of natural image regions by incor- porating neighborhood spatial dependencies in the labels as well as the observed data. The proposed model exploits local discriminative models and allows to relax the assumption of conditional independence of the observed data given the labels, commonly used in the Markov Random Field (MRF) framework. The parameters of the DRF model are learned using penalized maximum pseudo-likelihood method. Furthermore, the form of the DRF model allows the MAP inference for binary classi\ufb01ca- tion problems using the graph min-cut algorithms. The performance of the model was veri\ufb01ed on the synthetic as well as the real-world images. The DRF model outperforms the MRF model in the experiments.", "bibtex": "@inproceedings{NIPS2003_92049deb,\n author = {Kumar, Sanjiv and Hebert, Martial},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Discriminative Fields for Modeling Spatial Dependencies in Natural Images},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/92049debbe566ca5782a3045cf300a3c-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/92049debbe566ca5782a3045cf300a3c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/92049debbe566ca5782a3045cf300a3c-Metadata.json", "review": "", "metareview": "", "pdf_size": 177418, "gs_citation": 319, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10778440019733852858&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 24, "aff": "The Robotics Institute, Carnegie Mellon University; The Robotics Institute, Carnegie Mellon University", "aff_domain": "ri.cmu.edu;ri.cmu.edu", "email": "ri.cmu.edu;ri.cmu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "The Robotics Institute", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "286f431b51", "title": "Distributed Optimization in Adaptive Networks", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/07a9d3fed4c5ea6b17e80258dee231fa-Abstract.html", "author": "Ciamac C. Moallemi; Benjamin V. Roy", "abstract": "We develop a protocol for optimizing dynamic behavior of a network of simple electronic components, such as a sensor network, an ad hoc network of mobile devices, or a network of communication switches. This protocol requires only local communication and simple computa- tions which are distributed among devices. The protocol is scalable to large networks. As a motivating example, we discuss a problem involv- ing optimization of power consumption, delay, and buffer over\ufb02ow in a sensor network. Our approach builds on policy gradient methods for optimization of Markov decision processes. The protocol can be viewed as an extension of policy gradient methods to a context involving a team of agents op- timizing aggregate performance through asynchronous distributed com- munication and computation. We establish that the dynamics of the pro- tocol approximate the solution to an ordinary differential equation that follows the gradient of the performance objective.", "bibtex": "@inproceedings{NIPS2003_07a9d3fe,\n author = {Moallemi, Ciamac C and Roy, Benjamin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Distributed Optimization in Adaptive Networks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/07a9d3fed4c5ea6b17e80258dee231fa-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/07a9d3fed4c5ea6b17e80258dee231fa-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/07a9d3fed4c5ea6b17e80258dee231fa-Metadata.json", "review": "", "metareview": "", "pdf_size": 117947, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13608337785791116212&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Electrical Engineering, Stanford University; Management Science and Engineering + Electrical Engineering, Stanford University", "aff_domain": "stanford.edu;stanford.edu", "email": "stanford.edu;stanford.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0+0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Electrical Engineering", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0+0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0+0", "aff_country_unique": "United States" }, { "id": "07a92565f6", "title": "Dopamine Modulation in a Basal Ganglio-Cortical Network of Working Memory", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/ea204361fe7f024b130143eb3e189a18-Abstract.html", "author": "Aaron J. Gruber; Peter Dayan; Boris S. Gutkin; Sara A. Solla", "abstract": "Dopamine exerts two classes of effect on the sustained neural activity in prefrontal cortex that underlies working memory. Direct release in the cortex increases the contrast of prefrontal neurons, enhancing the ro- bustness of storage. Release of dopamine in the striatum is associated with salient stimuli and makes medium spiny neurons bistable; this mod- ulation of the output of spiny neurons affects prefrontal cortex so as to indirectly gate access to working memory and additionally damp sensi- tivity to noise. Existing models have treated dopamine in one or other structure, or have addressed basal ganglia gating of working memory ex- clusive of dopamine effects. In this paper we combine these mechanisms and explore their joint effect. We model a memory-guided saccade task to illustrate how dopamine\u2019s actions lead to working memory that is se- lective for salient input and has increased robustness to distraction.", "bibtex": "@inproceedings{NIPS2003_ea204361,\n author = {Gruber, Aaron and Dayan, Peter and Gutkin, Boris and Solla, Sara},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Dopamine Modulation in a Basal Ganglio-Cortical Network of Working Memory},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/ea204361fe7f024b130143eb3e189a18-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/ea204361fe7f024b130143eb3e189a18-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/ea204361fe7f024b130143eb3e189a18-Metadata.json", "review": "", "metareview": "", "pdf_size": 1451696, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5068781869512113058&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Biomedical Engineering1, Physiology2, and Physics and Astronomy4, Northwestern University, Chicago, IL, USA; Gatsby Computational Neuroscience Unit3, University College London, London, UK; Gatsby Computational Neuroscience Unit3, University College London, London, UK; Biomedical Engineering1, Physiology2, and Physics and Astronomy4, Northwestern University, Chicago, IL, USA", "aff_domain": "northwestern.edu;gatsby.ucl.ac.uk;gatsby.ucl.ac.uk;northwestern.edu", "email": "northwestern.edu;gatsby.ucl.ac.uk;gatsby.ucl.ac.uk;northwestern.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1;0", "aff_unique_norm": "Northwestern University;University College London", "aff_unique_dep": "Biomedical Engineering;Gatsby Computational Neuroscience Unit", "aff_unique_url": "https://www.northwestern.edu;https://www.ucl.ac.uk", "aff_unique_abbr": "NU;UCL", "aff_campus_unique_index": "0;1;1;0", "aff_campus_unique": "Chicago;London", "aff_country_unique_index": "0;1;1;0", "aff_country_unique": "United States;United Kingdom" }, { "id": "a62434603a", "title": "Dynamical Modeling with Kernels for Nonlinear Time Series Prediction", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/8d09e4b85c783cbc30c9b8ae175f2d33-Abstract.html", "author": "Liva Ralaivola; Florence d'Alch\u00e9-Buc", "abstract": "We consider the question of predicting nonlinear time series. Kernel Dy- namical Modeling (KDM), a new method based on kernels, is proposed as an extension to linear dynamical models. The kernel trick is used twice: \ufb01rst, to learn the parameters of the model, and second, to compute preimages of the time series predicted in the feature space by means of Support Vector Regression. Our model shows strong connection with the classic Kalman Filter model, with the kernel feature space as hidden state space. Kernel Dynamical Modeling is tested against two benchmark time series and achieves high quality predictions.", "bibtex": "@inproceedings{NIPS2003_8d09e4b8,\n author = {Ralaivola, Liva and d\\textquotesingle Alch\\'{e}-Buc, Florence},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Dynamical Modeling with Kernels for Nonlinear Time Series Prediction},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/8d09e4b85c783cbc30c9b8ae175f2d33-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/8d09e4b85c783cbc30c9b8ae175f2d33-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/8d09e4b85c783cbc30c9b8ae175f2d33-Metadata.json", "review": "", "metareview": "", "pdf_size": 98723, "gs_citation": 78, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1344971728391758822&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Laboratoire d\u2019Informatique de Paris 6 + Universit \u00e9 Pierre et Marie Curie; Laboratoire d\u2019Informatique de Paris 6 + Universit \u00e9 Pierre et Marie Curie", "aff_domain": "lip6.fr;lip6.fr", "email": "lip6.fr;lip6.fr", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0+0;0+0", "aff_unique_norm": "Universit\u00e9 Pierre et Marie Curie", "aff_unique_dep": "Laboratoire d\u2019Informatique de Paris 6", "aff_unique_url": "https://www.upmc.fr", "aff_unique_abbr": "UPMC", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Paris;", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "France" }, { "id": "9ac10076a0", "title": "Efficient Multiscale Sampling from Products of Gaussian Mixtures", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/b1301141feffabac455e1f90a7de2054-Abstract.html", "author": "Alexander T. Ihler; Erik B. Sudderth; William T. Freeman; Alan S. Willsky", "abstract": "The problem of approximating the product of several Gaussian mixture distributions arises in a number of contexts, including the nonparametric belief propagation (NBP) inference algorithm and the training of prod- uct of experts models. This paper develops two multiscale algorithms for sampling from a product of Gaussian mixtures, and compares their performance to existing methods. The \ufb01rst is a multiscale variant of pre- viously proposed Monte Carlo techniques, with comparable theoretical guarantees but improved empirical convergence rates. The second makes use of approximate kernel density evaluation methods to construct a fast approximate sampler, which is guaranteed to sample points to within a tunable parameter (cid:15) of their true probability. We compare both multi- scale samplers on a set of computational examples motivated by NBP, demonstrating signi\ufb01cant improvements over existing methods.", "bibtex": "@inproceedings{NIPS2003_b1301141,\n author = {Ihler, Alexander and Sudderth, Erik and Freeman, William and Willsky, Alan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Efficient Multiscale Sampling from Products of Gaussian Mixtures},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/b1301141feffabac455e1f90a7de2054-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/b1301141feffabac455e1f90a7de2054-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/b1301141feffabac455e1f90a7de2054-Metadata.json", "review": "", "metareview": "", "pdf_size": 233648, "gs_citation": 115, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14752952291336747035&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 18, "aff": "Department of Electrical Engineering and Computer Science; Department of Electrical Engineering and Computer Science; Department of Electrical Engineering and Computer Science; Department of Electrical Engineering and Computer Science", "aff_domain": "mit.edu;mit.edu;ai.mit.edu;mit.edu", "email": "mit.edu;mit.edu;ai.mit.edu;mit.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "fe9bc1f9d8", "title": "Efficient and Robust Feature Extraction by Maximum Margin Criterion", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/6048ff4e8cb07aa60b6777b6f7384d52-Abstract.html", "author": "Haifeng Li; Tao Jiang; Keshu Zhang", "abstract": "A new feature extraction criterion, maximum margin criterion (MMC), is proposed in this paper. This new criterion is general in the sense that, when combined with a suitable constraint, it can actually give rise to the most popular feature extractor in the literature, linear discriminate analysis (LDA). We derive a new feature extractor based on MMC using a different constraint that does not depend on the nonsingularity of the within-class scatter matrix Sw. Such a dependence is a major drawback of LDA especially when the sample size is small. The kernelized (nonlin- ear) counterpart of this linear feature extractor is also established in this paper. Our preliminary experimental results on face images demonstrate that the new feature extractors are ef\ufb01cient and stable.", "bibtex": "@inproceedings{NIPS2003_6048ff4e,\n author = {Li, Haifeng and Jiang, Tao and Zhang, Keshu},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Efficient and Robust Feature Extraction by Maximum Margin Criterion},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/6048ff4e8cb07aa60b6777b6f7384d52-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/6048ff4e8cb07aa60b6777b6f7384d52-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/6048ff4e8cb07aa60b6777b6f7384d52-Metadata.json", "review": "", "metareview": "", "pdf_size": 81180, "gs_citation": 1188, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12507439161026456656&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 22, "aff": "Department of Computer Science, University of California, Riverside, CA 92521; Department of Computer Science, University of California, Riverside, CA 92521; Department of Electrical Engineering, University of New Orleans, New Orleans, LA 70148", "aff_domain": "cs.ucr.edu;cs.ucr.edu;uno.edu", "email": "cs.ucr.edu;cs.ucr.edu;uno.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of California, Riverside;University of New Orleans", "aff_unique_dep": "Department of Computer Science;Department of Electrical Engineering", "aff_unique_url": "https://www.ucr.edu;https://www.uno.edu", "aff_unique_abbr": "UCR;UNO", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Riverside;New Orleans", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "7683cb411a", "title": "Eigenvoice Speaker Adaptation via Composite Kernel Principal Component Analysis", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/e139c454239bfde741e893edb46a06cc-Abstract.html", "author": "James T. Kwok; Brian Mak; Simon Ho", "abstract": "Eigenvoice speaker adaptation has been shown to be effective when only a small amount of adaptation data is available. At the heart of the method is principal component analysis (PCA) employed to \ufb01nd the most im- portant eigenvoices. In this paper, we postulate that nonlinear PCA, in particular kernel PCA, may be even more effective. One major challenge is to map the feature-space eigenvoices back to the observation space so that the state observation likelihoods can be computed during the estima- tion of eigenvoice weights and subsequent decoding. Our solution is to compute kernel PCA using composite kernels, and we will call our new method kernel eigenvoice speaker adaptation. On the TIDIGITS corpus, we found that compared with a speaker-independent model, our kernel eigenvoice adaptation method can reduce the word error rate by 28\u201333% while the standard eigenvoice approach can only match the performance of the speaker-independent model.", "bibtex": "@inproceedings{NIPS2003_e139c454,\n author = {Kwok, James and Mak, Brian and Ho, Simon},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Eigenvoice Speaker Adaptation via Composite Kernel Principal Component Analysis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/e139c454239bfde741e893edb46a06cc-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/e139c454239bfde741e893edb46a06cc-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/e139c454239bfde741e893edb46a06cc-Metadata.json", "review": "", "metareview": "", "pdf_size": 85435, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4451613605683684189&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "Department of Computer Science, Hong Kong University of Science and Technology; Department of Computer Science, Hong Kong University of Science and Technology; Department of Computer Science, Hong Kong University of Science and Technology", "aff_domain": "cs.ust.hk;cs.ust.hk;cs.ust.hk", "email": "cs.ust.hk;cs.ust.hk;cs.ust.hk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Hong Kong University of Science and Technology", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ust.hk", "aff_unique_abbr": "HKUST", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Hong Kong SAR", "aff_country_unique_index": "0;0;0", "aff_country_unique": "China" }, { "id": "c099e7ff1e", "title": "Entrainment of Silicon Central Pattern Generators for Legged Locomotory Control", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/4de754248c196c85ee4fbdcee89179bd-Abstract.html", "author": "Francesco Tenore; Ralph Etienne-Cummings; M. A. Lewis", "abstract": "We have constructed a second generation CPG chip capable of generating the necessary timing to control the leg of a walking machine. We demonstrate improvements over a previous chip by moving toward a significantly more versatile device. This includes a larger number of silicon neurons, more sophisticated neurons including voltage dependent charging and relative and absolute refractory periods, and enhanced programmability of neural networks. This chip builds on the basic results achieved on a previous chip and expands its versatility to get closer to a self-contained locomotion controller for walking robots. 1", "bibtex": "@inproceedings{NIPS2003_4de75424,\n author = {Tenore, Francesco and Etienne-Cummings, Ralph and Lewis, M.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Entrainment of Silicon Central Pattern Generators for Legged Locomotory Control},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/4de754248c196c85ee4fbdcee89179bd-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/4de754248c196c85ee4fbdcee89179bd-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/4de754248c196c85ee4fbdcee89179bd-Metadata.json", "review": "", "metareview": "", "pdf_size": 81845, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16392607592536896884&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Dept. of Electrical & Computer Eng., Johns Hopkins University; Dept. of Electrical & Computer Eng., Johns Hopkins University + Institute of Systems Research, University of Maryland; Iguana Robotics, Inc.", "aff_domain": "jhu.edu;jhu.edu;iguana-robotics.com", "email": "jhu.edu;jhu.edu;iguana-robotics.com", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0+1;2", "aff_unique_norm": "Johns Hopkins University;University of Maryland;Iguana Robotics", "aff_unique_dep": "Dept. of Electrical & Computer Eng.;Institute of Systems Research;", "aff_unique_url": "https://www.jhu.edu;https://www.umd.edu;", "aff_unique_abbr": "JHU;UMD;Iguana Robotics", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0+0;0", "aff_country_unique": "United States" }, { "id": "a2e3ca148f", "title": "Envelope-based Planning in Relational MDPs", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/4a06d868d044c50af0cf9bc82d2fc19f-Abstract.html", "author": "Natalia H. Gardiol; Leslie P. Kaelbling", "abstract": "A mobile robot acting in the world is faced with a large amount of sen- sory data and uncertainty in its action outcomes. Indeed, almost all in- teresting sequential decision-making domains involve large state spaces and large, stochastic action sets. We investigate a way to act intelli- gently as quickly as possible in domains where \ufb01nding a complete policy would take a hopelessly long time. This approach, Relational Envelope- based Planning (REBP) tackles large, noisy problems along two axes. First, describing a domain as a relational MDP (instead of as an atomic or propositionally-factored MDP) allows problem structure and dynam- ics to be captured compactly with a small set of probabilistic, relational rules. Second, an envelope-based approach to planning lets an agent be- gin acting quickly within a restricted part of the full state space and to judiciously expand its envelope as resources permit.", "bibtex": "@inproceedings{NIPS2003_4a06d868,\n author = {Gardiol, Natalia and Kaelbling, Leslie},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Envelope-based Planning in Relational MDPs},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/4a06d868d044c50af0cf9bc82d2fc19f-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/4a06d868d044c50af0cf9bc82d2fc19f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/4a06d868d044c50af0cf9bc82d2fc19f-Metadata.json", "review": "", "metareview": "", "pdf_size": 996428, "gs_citation": 56, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17556365621646777965&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "MIT AI Lab; MIT AI Lab", "aff_domain": "ai.mit.edu;ai.mit.edu", "email": "ai.mit.edu;ai.mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Artificial Intelligence Laboratory", "aff_unique_url": "http://www.ai.mit.edu", "aff_unique_abbr": "MIT AI Lab", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "52035536fc", "title": "Error Bounds for Transductive Learning via Compression and Clustering", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/81b073de9370ea873f548e31b8adc081-Abstract.html", "author": "Philip Derbeko; Ran El-Yaniv; Ron Meir", "abstract": "This paper is concerned with transductive learning. Although transduc- tion appears to be an easier task than induction, there have not been many provably useful algorithms and bounds for transduction. We present ex- plicit error bounds for transduction and derive a general technique for devising bounds within this setting. The technique is applied to derive error bounds for compression schemes such as (transductive) SVMs and for transduction algorithms based on clustering.", "bibtex": "@inproceedings{NIPS2003_81b073de,\n author = {Derbeko, Philip and El-Yaniv, Ran and Meir, Ron},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Error Bounds for Transductive Learning via Compression and Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/81b073de9370ea873f548e31b8adc081-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/81b073de9370ea873f548e31b8adc081-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/81b073de9370ea873f548e31b8adc081-Metadata.json", "review": "", "metareview": "", "pdf_size": 132893, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=712317251280099973&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Technion- Israel Institute of Technology; Technion- Israel Institute of Technology; Technion- Israel Institute of Technology", "aff_domain": "cs.technion.ac.il;cs.technion.ac.il;ee.technion.ac.il", "email": "cs.technion.ac.il;cs.technion.ac.il;ee.technion.ac.il", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Technion - Israel Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://www.technion.ac.il/en/", "aff_unique_abbr": "Technion", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Israel" }, { "id": "029e862056", "title": "Estimating Internal Variables and Paramters of a Learning Agent by a Particle Filter", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/db60b95decdeed944b4cd8685417cfdc-Abstract.html", "author": "Kazuyuki Samejima; Kenji Doya; Yasumasa Ueda; Minoru Kimura", "abstract": "When we model a higher order functions, such as learning and memory, we face a dif\ufb01culty of comparing neural activities with hidden variables that depend on the history of sensory and motor signals and the dynam- ics of the network. Here, we propose novel method for estimating hidden variables of a learning agent, such as connection weights from sequences of observable variables. Bayesian estimation is a method to estimate the posterior probability of hidden variables from observable data sequence using a dynamic model of hidden and observable variables. In this pa- per, we apply particle \ufb01lter for estimating internal parameters and meta- parameters of a reinforcement learning model. We veri\ufb01ed the effective- ness of the method using both arti\ufb01cial data and real animal behavioral data.", "bibtex": "@inproceedings{NIPS2003_db60b95d,\n author = {Samejima, Kazuyuki and Doya, Kenji and Ueda, Yasumasa and Kimura, Minoru},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Estimating Internal Variables and Paramters of a Learning Agent by a Particle Filter},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/db60b95decdeed944b4cd8685417cfdc-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/db60b95decdeed944b4cd8685417cfdc-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/db60b95decdeed944b4cd8685417cfdc-Metadata.json", "review": "", "metareview": "", "pdf_size": 284232, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17971536066912223409&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "497cc6410c", "title": "Extending Q-Learning to General Adaptive Multi-Agent Systems", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/e71e5cd119bbc5797164fb0cd7fd94a4-Abstract.html", "author": "Gerald Tesauro", "abstract": "Recent multi-agent extensions of Q-Learning require knowledge of other agents\u2019 payoffs and Q-functions, and assume game-theoretic play at all times by all other agents. This paper proposes a fundamentally different approach, dubbed \u201cHyper-Q\u201d Learning, in which values of mixed strategies rather than base actions are learned, and in which other agents\u2019 strategies are estimated from observed actions via Bayesian in- ference. Hyper-Q may be effective against many different types of adap- tive agents, even if they are persistently dynamic. Against certain broad categories of adaptation, it is argued that Hyper-Q may converge to ex- act optimal time-varying policies. In tests using Rock-Paper-Scissors, Hyper-Q learns to significantly exploit an Infinitesimal Gradient Ascent (IGA) player, as well as a Policy Hill Climber (PHC) player. Preliminary analysis of Hyper-Q against itself is also presented.", "bibtex": "@inproceedings{NIPS2003_e71e5cd1,\n author = {Tesauro, Gerald},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Extending Q-Learning to General Adaptive Multi-Agent Systems},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/e71e5cd119bbc5797164fb0cd7fd94a4-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/e71e5cd119bbc5797164fb0cd7fd94a4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/e71e5cd119bbc5797164fb0cd7fd94a4-Metadata.json", "review": "", "metareview": "", "pdf_size": 202655, "gs_citation": 313, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14035548583933363387&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "IBM Thomas J.Watson Research Center", "aff_domain": "watson.ibm.com", "email": "watson.ibm.com", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "IBM", "aff_unique_dep": "Thomas J. Watson Research Center", "aff_unique_url": "https://www.ibm.com/research", "aff_unique_abbr": "IBM", "aff_campus_unique_index": "0", "aff_campus_unique": "Yorktown Heights", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "c14fc669a9", "title": "Extreme Components Analysis", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/c5dc3e08849bec07e33ca353de62ea04-Abstract.html", "author": "Max Welling; Christopher Williams; Felix V. Agakov", "abstract": "Principal components analysis (PCA) is one of the most widely used techniques in machine learning and data mining. Minor components analysis (MCA) is less well known, but can also play an important role in the presence of constraints on the data distribution. In this paper we present a probabilistic model for \u201cextreme components analysis\u201d (XCA) which at the maximum likelihood solution extracts an optimal combina- tion of principal and minor components. For a given number of compo- nents, the log-likelihood of the XCA model is guaranteed to be larger or equal than that of the probabilistic models for PCA and MCA. We de- scribe an ef\ufb01cient algorithm to solve for the globally optimal solution. For log-convex spectra we prove that the solution consists of principal components only, while for log-concave spectra the solution consists of minor components. In general, the solution admits a combination of both. In experiments we explore the properties of XCA on some synthetic and real-world datasets.", "bibtex": "@inproceedings{NIPS2003_c5dc3e08,\n author = {Welling, Max and Williams, Christopher and Agakov, Felix},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Extreme Components Analysis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/c5dc3e08849bec07e33ca353de62ea04-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/c5dc3e08849bec07e33ca353de62ea04-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/c5dc3e08849bec07e33ca353de62ea04-Metadata.json", "review": "", "metareview": "", "pdf_size": 162188, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13377558578657923473&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "Department of Computer Science, University of Toronto; Institute for Adaptive and Neural Computation, School of Informatics, University of Edinburgh; Institute for Adaptive and Neural Computation, School of Informatics, University of Edinburgh", "aff_domain": "cs.toronto.edu;inf.ed.ac.uk;inf.ed.ac.uk", "email": "cs.toronto.edu;inf.ed.ac.uk;inf.ed.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "University of Toronto;University of Edinburgh", "aff_unique_dep": "Department of Computer Science;School of Informatics", "aff_unique_url": "https://www.utoronto.ca;https://www.ed.ac.uk", "aff_unique_abbr": "U of T;Edinburgh", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "Toronto;Edinburgh", "aff_country_unique_index": "0;1;1", "aff_country_unique": "Canada;United Kingdom" }, { "id": "59e0b6a6a3", "title": "Eye Micro-movements Improve Stimulus Detection Beyond the Nyquist Limit in the Peripheral Retina", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/b06f50d1f89bd8b2a0fb771c1a69c2b0-Abstract.html", "author": "Matthias H. Hennig; Florentin W\u00f6rg\u00f6tter", "abstract": "Even under perfect \ufb01xation the human eye is under steady motion (tremor, microsaccades, slow drift). The \u201cdynamic\u201d theory of vi- sion [1, 2] states that eye-movements can improve hyperacuity. Accord- ing to this theory, eye movements are thought to create variable spatial excitation patterns on the photoreceptor grid, which will allow for better spatiotemporal summation at later stages. We reexamine this theory us- ing a realistic model of the vertebrate retina by comparing responses of a resting and a moving eye. The performance of simulated ganglion cells in a hyperacuity task is evaluated by ideal observer analysis. We \ufb01nd that in the central retina eye-micromovements have no effect on the perfor- mance. Here optical blurring limits vernier acuity. In the retinal periph- ery however, eye-micromovements clearly improve performance. Based on ROC analysis, our predictions are quantitatively testable in electro- physiological and psychophysical experiments.", "bibtex": "@inproceedings{NIPS2003_b06f50d1,\n author = {Hennig, Matthias and W\\\"{o}rg\\\"{o}tter, Florentin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Eye Micro-movements Improve Stimulus Detection Beyond the Nyquist Limit in the Peripheral Retina},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/b06f50d1f89bd8b2a0fb771c1a69c2b0-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/b06f50d1f89bd8b2a0fb771c1a69c2b0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/b06f50d1f89bd8b2a0fb771c1a69c2b0-Metadata.json", "review": "", "metareview": "", "pdf_size": 467897, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9913982528567642008&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Computational Neuroscience, University of Stirling; Psychology, University of Stirling", "aff_domain": "cn.stir.ac.uk;cn.stir.ac.uk", "email": "cn.stir.ac.uk;cn.stir.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Stirling", "aff_unique_dep": "Computational Neuroscience", "aff_unique_url": "https://www.stirling.ac.uk", "aff_unique_abbr": "", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "52bc6493d9", "title": "Eye Movements for Reward Maximization", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/da4902cb0bc38210839714ebdcf0efc3-Abstract.html", "author": "Nathan Sprague; Dana Ballard", "abstract": "University of Rochester Rochester, NY 14627 dana@cs.rochester.edu", "bibtex": "@inproceedings{NIPS2003_da4902cb,\n author = {Sprague, Nathan and Ballard, Dana},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Eye Movements for Reward Maximization},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/da4902cb0bc38210839714ebdcf0efc3-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/da4902cb0bc38210839714ebdcf0efc3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/da4902cb0bc38210839714ebdcf0efc3-Metadata.json", "review": "", "metareview": "", "pdf_size": 214524, "gs_citation": 179, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11247570711223261308&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Computer Science Department, University of Rochester, Rochester, NY 14627; Computer Science Department, University of Rochester, Rochester, NY 14627", "aff_domain": "cs.rochester.edu;cs.rochester.edu", "email": "cs.rochester.edu;cs.rochester.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Rochester", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.rochester.edu", "aff_unique_abbr": "U of R", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Rochester", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "d3876fa328", "title": "Factorization with Uncertainty and Missing Data: Exploiting Temporal Coherence", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/8a20a8621978632d76c43dfd28b67767-Abstract.html", "author": "Amit Gruber; Yair Weiss", "abstract": "The problem of \\Structure From Motion\" is a central problem in vision: given the 2D locations of certain points we wish to recover the camera motion and the 3D coordinates of the points. Un- der simpli\ufb02ed camera models, the problem reduces to factorizing a measurement matrix into the product of two low rank matrices. Each element of the measurement matrix contains the position of a point in a particular image. When all elements are observed, the problem can be solved trivially using SVD, but in any realistic sit- uation many elements of the matrix are missing and the ones that are observed have a di\ufb01erent directional uncertainty. Under these conditions, most existing factorization algorithms fail while human perception is relatively unchanged. In this paper we use the well known EM algorithm for factor analy- sis to perform factorization. This allows us to easily handle missing data and measurement uncertainty and more importantly allows us to place a prior on the temporal trajectory of the latent variables (the camera position). We show that incorporating this prior gives a signi\ufb02cant improvement in performance in challenging image se- quences.", "bibtex": "@inproceedings{NIPS2003_8a20a862,\n author = {Gruber, Amit and Weiss, Yair},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Factorization with Uncertainty and Missing Data: Exploiting Temporal Coherence},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/8a20a8621978632d76c43dfd28b67767-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/8a20a8621978632d76c43dfd28b67767-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/8a20a8621978632d76c43dfd28b67767-Metadata.json", "review": "", "metareview": "", "pdf_size": 121821, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17537142790508410358&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "aris.ss.uci.edu/cogsci/personnel/ho\u00aeman/cylinderapplet.html", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "cee2e33f59", "title": "Fast Algorithms for Large-State-Space HMMs with Applications to Web Usage Analysis", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/9407c826d8e3c07ad37cb2d13d1cb641-Abstract.html", "author": "Pedro F. Felzenszwalb; Daniel P. Huttenlocher; Jon M. Kleinberg", "abstract": "In applying Hidden Markov Models to the analysis of massive data streams, it is often necessary to use an arti(cid:12)cially reduced set of states; this is due in large part to the fact that the basic HMM estimation algorithms have a quadratic dependence on the size of the state set. We present algorithms that reduce this computational bottleneck to linear or near-linear time, when the states can be embedded in an underlying grid of parameters. This type of state representation arises in many domains; in particular, we show an application to tra(cid:14)c analysis at a high-volume Web site.", "bibtex": "@inproceedings{NIPS2003_9407c826,\n author = {Felzenszwalb, Pedro and Huttenlocher, Daniel and Kleinberg, Jon},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Fast Algorithms for Large-State-Space HMMs with Applications to Web Usage Analysis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/9407c826d8e3c07ad37cb2d13d1cb641-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/9407c826d8e3c07ad37cb2d13d1cb641-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/9407c826d8e3c07ad37cb2d13d1cb641-Metadata.json", "review": "", "metareview": "", "pdf_size": 130608, "gs_citation": 89, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1611728174409664328&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "d08a804e97", "title": "Fast Embedding of Sparse Similarity Graphs", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/4e0223a87610176ef0d24ef6d2dcde3a-Abstract.html", "author": "John C. Platt", "abstract": "This paper applies fast sparse multidimensional scaling (MDS) to a large graph of music similarity, with 267K vertices that represent artists, al- bums, and tracks; and 3.22M edges that represent similarity between those entities. Once vertices are assigned locations in a Euclidean space, the locations can be used to browse music and to generate playlists. MDS on very large sparse graphs can be effectively performed by a family of algorithms called Rectangular Dijsktra (RD) MDS algorithms. These RD algorithms operate on a dense rectangular slice of the distance matrix, created by calling Dijsktra a constant number of times. Two RD algorithms are compared: Landmark MDS, which uses the Nystr(cid:246)m ap- proximation to perform MDS; and a new algorithm called Fast Sparse Embedding, which uses FastMap. These algorithms compare favorably to Laplacian Eigenmaps, both in terms of speed and embedding quality.", "bibtex": "@inproceedings{NIPS2003_4e0223a8,\n author = {Platt, John},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Fast Embedding of Sparse Similarity Graphs},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/4e0223a87610176ef0d24ef6d2dcde3a-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/4e0223a87610176ef0d24ef6d2dcde3a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/4e0223a87610176ef0d24ef6d2dcde3a-Metadata.json", "review": "", "metareview": "", "pdf_size": 129611, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14245776573241675654&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 2, "aff": "Microsoft Research", "aff_domain": "microsoft.com", "email": "microsoft.com", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Microsoft", "aff_unique_dep": "Microsoft Research", "aff_unique_url": "https://www.microsoft.com/en-us/research", "aff_unique_abbr": "MSR", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "09d7d8d79b", "title": "Fast Feature Selection from Microarray Expression Data via Multiplicative Large Margin Algorithms", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/ba3e9b6a519cfddc560b5d53210df1bd-Abstract.html", "author": "Claudio Gentile", "abstract": "New feature selection algorithms for linear threshold functions are de- scribed which combine backward elimination with an adaptive regular- ization method. This makes them particularly suitable to the classi\ufb01ca- tion of microarray expression data, where the goal is to obtain accurate rules depending on few genes only. Our algorithms are fast and easy to implement, since they center on an incremental (large margin) algorithm which allows us to avoid linear, quadratic or higher-order programming methods. We report on preliminary experiments with \ufb01ve known DNA microarray datasets. These experiments suggest that multiplicative large margin algorithms tend to outperform additive algorithms (such as SVM) on feature selection tasks.", "bibtex": "@inproceedings{NIPS2003_ba3e9b6a,\n author = {Gentile, Claudio},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Fast Feature Selection from Microarray Expression Data via Multiplicative Large Margin Algorithms},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/ba3e9b6a519cfddc560b5d53210df1bd-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/ba3e9b6a519cfddc560b5d53210df1bd-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/ba3e9b6a519cfddc560b5d53210df1bd-Metadata.json", "review": "", "metareview": "", "pdf_size": 94233, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3167663991738633602&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "DICOM, Universit `a dell\u2019Insubria", "aff_domain": "dsi.unimi.it", "email": "dsi.unimi.it", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Universit\u00e0 dell\u2019Insubria", "aff_unique_dep": "DICOM", "aff_unique_url": "https://www.uninsubria.it", "aff_unique_abbr": "", "aff_country_unique_index": "0", "aff_country_unique": "Italy" }, { "id": "2641cbd656", "title": "Feature Selection in Clustering Problems", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/bb03e43ffe34eeb242a2ee4a4f125e56-Abstract.html", "author": "Volker Roth; Tilman Lange", "abstract": "A novel approach to combining clustering and feature selection is pre- sented. It implements a wrapper strategy for feature selection, in the sense that the features are directly selected by optimizing the discrimina- tive power of the used partitioning algorithm. On the technical side, we present an ef\ufb01cient optimization algorithm with guaranteed local con- vergence property. The only free parameter of this method is selected by a resampling-based stability analysis. Experiments with real-world datasets demonstrate that our method is able to infer both meaningful partitions and meaningful subsets of features.", "bibtex": "@inproceedings{NIPS2003_bb03e43f,\n author = {Roth, Volker and Lange, Tilman},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Feature Selection in Clustering Problems},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/bb03e43ffe34eeb242a2ee4a4f125e56-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/bb03e43ffe34eeb242a2ee4a4f125e56-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/bb03e43ffe34eeb242a2ee4a4f125e56-Metadata.json", "review": "", "metareview": "", "pdf_size": 154226, "gs_citation": 204, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11199842206999101843&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "ETH Zurich, Institut f. Computational Science; ETH Zurich, Institut f. Computational Science", "aff_domain": "inf.ethz.ch;inf.ethz.ch", "email": "inf.ethz.ch;inf.ethz.ch", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Institut f\u00fcr Computational Science", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Switzerland" }, { "id": "45a740c494", "title": "Finding the M Most Probable Configurations using Loopy Belief Propagation", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/70fcb77e6349f4467edd7227baa73222-Abstract.html", "author": "Chen Yanover; Yair Weiss", "abstract": "Loopy belief propagation (BP) has been successfully used in a num- ber of di\u2013cult graphical models to \ufb02nd the most probable con\ufb02gu- ration of the hidden variables. In applications ranging from protein folding to image analysis one would like to \ufb02nd not just the best con\ufb02guration but rather the top M . While this problem has been solved using the junction tree formalism, in many real world prob- lems the clique size in the junction tree is prohibitively large. In this work we address the problem of \ufb02nding the M best con\ufb02gura- tions when exact inference is impossible. We start by developing a new exact inference algorithm for calculat- ing the best con\ufb02gurations that uses only max-marginals. For ap- proximate inference, we replace the max-marginals with the beliefs calculated using max-product BP and generalized BP. We show em- pirically that the algorithm can accurately and rapidly approximate the M best con\ufb02gurations in graphs with hundreds of variables.", "bibtex": "@inproceedings{NIPS2003_70fcb77e,\n author = {Yanover, Chen and Weiss, Yair},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Finding the M Most Probable Configurations using Loopy Belief Propagation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/70fcb77e6349f4467edd7227baa73222-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/70fcb77e6349f4467edd7227baa73222-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/70fcb77e6349f4467edd7227baa73222-Metadata.json", "review": "", "metareview": "", "pdf_size": 201953, "gs_citation": 124, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10705592588520492656&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 8, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "7a957ee827", "title": "From Algorithmic to Subjective Randomness", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/b06b5541a62ed438f956b662b4e1ec28-Abstract.html", "author": "Thomas L. Griffiths; Joshua B. Tenenbaum", "abstract": "We explore the phenomena of subjective randomness as a case study in understanding how people discover structure embedded in noise. We present a rational account of randomness perception based on the statis- tical problem of model selection: given a stimulus, inferring whether the process that generated it was random or regular. Inspired by the mathe- matical definition of randomness given by Kolmogorov complexity, we characterize regularity in terms of a hierarchy of automata that augment a finite controller with different forms of memory. We find that the reg- ularities detected in binary sequences depend upon presentation format, and that the kinds of automata that can identify these regularities are in- formative about the cognitive processes engaged by different formats.", "bibtex": "@inproceedings{NIPS2003_b06b5541,\n author = {Griffiths, Thomas and Tenenbaum, Joshua},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {From Algorithmic to Subjective Randomness},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/b06b5541a62ed438f956b662b4e1ec28-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/b06b5541a62ed438f956b662b4e1ec28-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/b06b5541a62ed438f956b662b4e1ec28-Metadata.json", "review": "", "metareview": "", "pdf_size": 76941, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14721764738308036578&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Massachusetts Institute of Technology; Massachusetts Institute of Technology", "aff_domain": "mit.edu;mit.edu", "email": "mit.edu;mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "2ed1e241fe", "title": "GPPS: A Gaussian Process Positioning System for Cellular Networks", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/e92d74ccacdc984afa0c517ad0d557a6-Abstract.html", "author": "Anton Schwaighofer; Marian Grigoras; Volker Tresp; Clemens Hoffmann", "abstract": "In this article, we present a novel approach to solving the localization problem in cellular networks. The goal is to estimate a mobile user\u2019s position, based on measurements of the signal strengths received from network base stations. Our solution works by building Gaussian process models for the distribution of signal strengths, as obtained in a series of calibration measurements. In the localization stage, the user\u2019s posi- tion can be estimated by maximizing the likelihood of received signal strengths with respect to the position. We investigate the accuracy of the proposed approach on data obtained within a large indoor cellular network.", "bibtex": "@inproceedings{NIPS2003_e92d74cc,\n author = {Schwaighofer, Anton and Grigoras, Marian and Tresp, Volker and Hoffmann, Clemens},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {GPPS: A Gaussian Process Positioning System for Cellular Networks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/e92d74ccacdc984afa0c517ad0d557a6-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/e92d74ccacdc984afa0c517ad0d557a6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/e92d74ccacdc984afa0c517ad0d557a6-Metadata.json", "review": "", "metareview": "", "pdf_size": 120338, "gs_citation": 183, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15609690581376137342&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Siemens Corporate Technology, Information and Communications+Institute for Theoretical Computer Science, Graz University of Technology; Siemens Corporate Technology, Information and Communications; Siemens Corporate Technology, Information and Communications; Siemens Corporate Technology, Information and Communications", "aff_domain": "igi.tugraz.at; ; ; ", "email": "igi.tugraz.at; ; ; ", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0+1;0;0;0", "aff_unique_norm": "Siemens AG;Graz University of Technology", "aff_unique_dep": "Corporate Technology;Institute for Theoretical Computer Science", "aff_unique_url": "https://www.siemens.com;https://www.tugraz.at", "aff_unique_abbr": "Siemens;TU Graz", "aff_campus_unique_index": "1", "aff_campus_unique": ";Graz", "aff_country_unique_index": "0+1;0;0;0", "aff_country_unique": "Germany;Austria" }, { "id": "2233e89378", "title": "Gaussian Process Latent Variable Models for Visualisation of High Dimensional Data", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/9657c1fffd38824e5ab0472e022e577e-Abstract.html", "author": "Neil D. Lawrence", "abstract": "In this paper we introduce a new underlying probabilistic model for prin- cipal component analysis (PCA). Our formulation interprets PCA as a particular Gaussian process prior on a mapping from a latent space to the observed data-space. We show that if the prior\u2019s covariance func- tion constrains the mappings to be linear the model is equivalent to PCA, we then extend the model by considering less restrictive covariance func- tions which allow non-linear mappings. This more general Gaussian pro- cess latent variable model (GPLVM) is then evaluated as an approach to the visualisation of high dimensional data for three different data-sets. Additionally our non-linear algorithm can be further kernelised leading to \u2018twin kernel PCA\u2019 in which a mapping between feature spaces occurs.", "bibtex": "@inproceedings{NIPS2003_9657c1ff,\n author = {Lawrence, Neil},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Gaussian Process Latent Variable Models for Visualisation of High Dimensional Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/9657c1fffd38824e5ab0472e022e577e-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/9657c1fffd38824e5ab0472e022e577e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/9657c1fffd38824e5ab0472e022e577e-Metadata.json", "review": "", "metareview": "", "pdf_size": 546904, "gs_citation": 1235, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15815256923191800798&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science, University of Shef\ufb01eld", "aff_domain": "dcs.shef.ac.uk", "email": "dcs.shef.ac.uk", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of Sheffield", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.sheffield.ac.uk", "aff_unique_abbr": "Sheffield", "aff_country_unique_index": "0", "aff_country_unique": "United Kingdom" }, { "id": "2063187578", "title": "Gaussian Processes in Reinforcement Learning", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/7993e11204b215b27694b6f139e34ce8-Abstract.html", "author": "Malte Kuss; Carl E. Rasmussen", "abstract": "We exploit some useful properties of Gaussian process (GP) regression models for reinforcement learning in continuous state spaces and dis- crete time. We demonstrate how the GP model allows evaluation of the value function in closed form. The resulting policy iteration algorithm is demonstrated on a simple problem with a two dimensional state space. Further, we speculate that the intrinsic ability of GP models to charac- terise distributions of functions would allow the method to capture entire distributions over future values instead of merely their expectation, which has traditionally been the focus of much of reinforcement learning.", "bibtex": "@inproceedings{NIPS2003_7993e112,\n author = {Kuss, Malte and Rasmussen, Carl},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Gaussian Processes in Reinforcement Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/7993e11204b215b27694b6f139e34ce8-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/7993e11204b215b27694b6f139e34ce8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/7993e11204b215b27694b6f139e34ce8-Metadata.json", "review": "", "metareview": "", "pdf_size": 213269, "gs_citation": 388, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7352429115160881544&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "352fbb9a1e", "title": "Gene Expression Clustering with Functional Mixture Models", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/9ed9328611fe3f45b3cce8ffe386ee97-Abstract.html", "author": "Darya Chudova; Christopher Hart; Eric Mjolsness; Padhraic Smyth", "abstract": "We propose a functional mixture model for simultaneous clustering and alignment of sets of curves measured on a discrete time grid. The model is speci\ufb01cally tailored to gene expression time course data. Each func- tional cluster center is a nonlinear combination of solutions of a simple linear differential equation that describes the change of individual mRNA levels when the synthesis and decay rates are constant. The mixture of continuous time parametric functional forms allows one to (a) account for the heterogeneity in the observed pro\ufb01les, (b) align the pro\ufb01les in time by estimating real-valued time shifts, (c) capture the synthesis and decay of mRNA in the course of an experiment, and (d) regularize noisy pro\ufb01les by enforcing smoothness in the mean curves. We derive an EM algo- rithm for estimating the parameters of the model, and apply the proposed approach to the set of cycling genes in yeast. The experiments show consistent improvement in predictive power and within cluster variance compared to regular Gaussian mixtures.", "bibtex": "@inproceedings{NIPS2003_9ed93286,\n author = {Chudova, Darya and Hart, Christopher and Mjolsness, Eric and Smyth, Padhraic},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Gene Expression Clustering with Functional Mixture Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/9ed9328611fe3f45b3cce8ffe386ee97-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/9ed9328611fe3f45b3cce8ffe386ee97-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/9ed9328611fe3f45b3cce8ffe386ee97-Metadata.json", "review": "", "metareview": "", "pdf_size": 91812, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15980466343375548257&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 18, "aff": "Department of Computer Science, University of California, Irvine; Division of Biology, California Institute of Technology; Department of Computer Science, University of California, Irvine; Department of Computer Science, University of California, Irvine", "aff_domain": "ics.uci.edu;caltech.edu;uci.edu;ics.uci.edu", "email": "ics.uci.edu;caltech.edu;uci.edu;ics.uci.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "University of California, Irvine;California Institute of Technology", "aff_unique_dep": "Department of Computer Science;Division of Biology", "aff_unique_url": "https://www.uci.edu;https://www.caltech.edu", "aff_unique_abbr": "UCI;Caltech", "aff_campus_unique_index": "0;1;0;0", "aff_campus_unique": "Irvine;Pasadena", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "849556aede", "title": "Generalised Propagation for Fast Fourier Transforms with Partial or Missing Data", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/8c1b6fa97c4288a4514365198566c6fa-Abstract.html", "author": "Amos J. Storkey", "abstract": "Discrete Fourier transforms and other related Fourier methods have been practically implementable due to the fast Fourier transform (FFT). However there are many situations where doing fast Fourier transforms without complete data would be desirable. In this pa- per it is recognised that formulating the FFT algorithm as a belief network allows suitable priors to be set for the Fourier coe(cid:14)cients. Furthermore e(cid:14)cient generalised belief propagation methods be- tween clusters of four nodes enable the Fourier coe(cid:14)cients to be inferred and the missing data to be estimated in near to O(n log n) time, where n is the total of the given and missing data points. This method is compared with a number of common approaches such as setting missing data to zero or to interpolation. It is tested on generated data and for a Fourier analysis of a damaged audio signal.", "bibtex": "@inproceedings{NIPS2003_8c1b6fa9,\n author = {Storkey, Amos J},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Generalised Propagation for Fast Fourier Transforms with Partial or Missing Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/8c1b6fa97c4288a4514365198566c6fa-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/8c1b6fa97c4288a4514365198566c6fa-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/8c1b6fa97c4288a4514365198566c6fa-Metadata.json", "review": "", "metareview": "", "pdf_size": 97534, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11345072916613723957&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "School of Informatics, University of Edinburgh", "aff_domain": "ed.ac.uk", "email": "ed.ac.uk", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of Edinburgh", "aff_unique_dep": "School of Informatics", "aff_unique_url": "https://www.ed.ac.uk", "aff_unique_abbr": "Edinburgh", "aff_campus_unique_index": "0", "aff_campus_unique": "Edinburgh", "aff_country_unique_index": "0", "aff_country_unique": "United Kingdom" }, { "id": "1fcbe41e4d", "title": "Geometric Analysis of Constrained Curves", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/415e1af7ea95f89f4e375162b21ae38c-Abstract.html", "author": "Anuj Srivastava; Washington Mio; Xiuwen Liu; Eric Klassen", "abstract": "We present a geometric approach to statistical shape analysis of closed curves in images. The basic idea is to specify a space of closed curves satisfying given constraints, and exploit the differential geometry of this space to solve optimization and inference problems. We demonstrate this approach by: (i) de\ufb01ning and computing statistics of observed shapes, (ii) de\ufb01ning and learning a parametric probability model on shape space, and (iii) designing a binary hypothesis test on this space.", "bibtex": "@inproceedings{NIPS2003_415e1af7,\n author = {Srivastava, Anuj and Mio, Washington and Liu, Xiuwen and Klassen, Eric},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Geometric Analysis of Constrained Curves},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/415e1af7ea95f89f4e375162b21ae38c-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/415e1af7ea95f89f4e375162b21ae38c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/415e1af7ea95f89f4e375162b21ae38c-Metadata.json", "review": "", "metareview": "", "pdf_size": 217011, "gs_citation": 0, "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:YzlR8KvVsP0J:scholar.google.com/&scioq=Geometric+Analysis+of+Constrained+Curves&hl=en&as_sdt=0,5", "gs_version_total": 5, "aff": "Department of Statistics, Florida State University; Department of Mathematics, Florida State University; Department of Computer Science, Florida State University; Department of Mathematics, Florida State University", "aff_domain": "stat.fsu.edu;math.fsu.edu;cs.fsu.edu;math.fsu.edu", "email": "stat.fsu.edu;math.fsu.edu;cs.fsu.edu;math.fsu.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Florida State University", "aff_unique_dep": "Department of Statistics", "aff_unique_url": "https://www.fsu.edu", "aff_unique_abbr": "FSU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Tallahassee;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "a7ac7a8418", "title": "Geometric Clustering Using the Information Bottleneck Method", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/794288f252f45d35735a13853e605939-Abstract.html", "author": "Susanne Still; William Bialek; L\u00e9on Bottou", "abstract": "We argue that K\u2013means and deterministic annealing algorithms for geo- metric clustering can be derived from the more general Information Bot- tleneck approach. If we cluster the identities of data points to preserve information about their location, the set of optimal solutions is massively degenerate. But if we treat the equations that de\ufb01ne the optimal solution as an iterative algorithm, then a set of \u201csmooth\u201d initial conditions selects solutions with the desired geometrical properties. In addition to concep- tual uni\ufb01cation, we argue that this approach can be more ef\ufb01cient and robust than classic algorithms.", "bibtex": "@inproceedings{NIPS2003_794288f2,\n author = {Still, Susanne and Bialek, William and Bottou, L\\'{e}on},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Geometric Clustering Using the Information Bottleneck Method},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/794288f252f45d35735a13853e605939-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/794288f252f45d35735a13853e605939-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/794288f252f45d35735a13853e605939-Metadata.json", "review": "", "metareview": "", "pdf_size": 115210, "gs_citation": 59, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12997680476277309939&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Department of Physics, Princeton University, Princeton, NJ 08544; Department of Physics, Princeton University, Princeton, NJ 08544; NEC Laboratories America, 4 Independence Way, Princeton, NJ 08540", "aff_domain": "princeton.edu;princeton.edu;bottou.org", "email": "princeton.edu;princeton.edu;bottou.org", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Princeton University;NEC Laboratories America", "aff_unique_dep": "Department of Physics;", "aff_unique_url": "https://www.princeton.edu;https://www.nec-labs.com", "aff_unique_abbr": "Princeton;NEC Labs", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Princeton", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "d37b69aae5", "title": "Hierarchical Topic Models and the Nested Chinese Restaurant Process", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/7b41bfa5085806dfa24b8c9de0ce567f-Abstract.html", "author": "Thomas L. Griffiths; Michael I. Jordan; Joshua B. Tenenbaum; David M. Blei", "abstract": "We address the problem of learning topic hierarchies from data. The model selection problem in this domain is daunting\u2014which of the large collection of possible trees to use? We take a Bayesian approach, gen- erating an appropriate prior via a distribution on partitions that we refer to as the nested Chinese restaurant process. This nonparametric prior al- lows arbitrarily large branching factors and readily accommodates grow- ing data collections. We build a hierarchical topic model by combining this prior with a likelihood that is based on a hierarchical variant of latent Dirichlet allocation. We illustrate our approach on simulated data and with an application to the modeling of NIPS abstracts.", "bibtex": "@inproceedings{NIPS2003_7b41bfa5,\n author = {Griffiths, Thomas and Jordan, Michael and Tenenbaum, Joshua and Blei, David},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Hierarchical Topic Models and the Nested Chinese Restaurant Process},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/7b41bfa5085806dfa24b8c9de0ce567f-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/7b41bfa5085806dfa24b8c9de0ce567f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/7b41bfa5085806dfa24b8c9de0ce567f-Metadata.json", "review": "", "metareview": "", "pdf_size": 83723, "gs_citation": 1603, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3610011262516386804&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 28, "aff": "University of California, Berkeley; Massachusetts Institute of Technology; University of California, Berkeley; Massachusetts Institute of Technology", "aff_domain": "cs.berkeley.edu;mit.edu;cs.berkeley.edu;mit.edu", "email": "cs.berkeley.edu;mit.edu;cs.berkeley.edu;mit.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0;1", "aff_unique_norm": "University of California, Berkeley;Massachusetts Institute of Technology", "aff_unique_dep": ";", "aff_unique_url": "https://www.berkeley.edu;https://web.mit.edu", "aff_unique_abbr": "UC Berkeley;MIT", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "0a2d2d2443", "title": "How to Combine Expert (and Novice) Advice when Actions Impact the Environment?", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/3430095c577593aad3c39c701712bcfe-Abstract.html", "author": "Daniela Pucci de Farias; Nimrod Megiddo", "abstract": "The so-called \u201cexperts algorithms\u201d constitute a methodology for choos- ing actions repeatedly, when the rewards depend both on the choice of action and on the unknown current state of the environment. An experts algorithm has access to a set of strategies (\u201cexperts\u201d), each of which may recommend which action to choose. The algorithm learns how to com- bine the recommendations of individual experts so that, in the long run, for any \ufb01xed sequence of states of the environment, it does as well as the best expert would have done relative to the same sequence. This method- ology may not be suitable for situations where the evolution of states of the environment depends on past chosen actions, as is usually the case, for example, in a repeated non-zero-sum game. A new experts algorithm is presented and analyzed in the context of re- peated games. It is shown that asymptotically, under certain conditions, it performs as well as the best available expert. This algorithm is quite different from previously proposed experts algorithms. It represents a shift from the paradigms of regret minimization and myopic optimiza- tion to consideration of the long-term effect of a player\u2019s actions on the opponent\u2019s actions or the environment. The importance of this shift is demonstrated by the fact that this algorithm is capable of inducing co- operation in the repeated Prisoner\u2019s Dilemma game, whereas previous experts algorithms converge to the suboptimal non-cooperative play.", "bibtex": "@inproceedings{NIPS2003_3430095c,\n author = {de Farias, Daniela and Megiddo, Nimrod},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {How to Combine Expert (and Novice) Advice when Actions Impact the Environment?},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/3430095c577593aad3c39c701712bcfe-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/3430095c577593aad3c39c701712bcfe-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/3430095c577593aad3c39c701712bcfe-Metadata.json", "review": "", "metareview": "", "pdf_size": 138266, "gs_citation": 58, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7763793169045761653&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Department of Mechanical Engineering, Massachusetts Institute of Technology; IBM Almaden Research Center", "aff_domain": "mit.edu;almaden.ibm.com", "email": "mit.edu;almaden.ibm.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Massachusetts Institute of Technology;IBM", "aff_unique_dep": "Department of Mechanical Engineering;Research Center", "aff_unique_url": "https://web.mit.edu;https://www.ibm.com/research", "aff_unique_abbr": "MIT;IBM", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Cambridge;Almaden", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "d1a5210b7a", "title": "Human and Ideal Observers for Detecting Image Curves", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/1f34004ebcb05f9acda6016d5cc52d5e-Abstract.html", "author": "Fang Fang; Daniel Kersten; Paul R. Schrater; Alan L. Yuille", "abstract": "This paper compares the ability of human observers to detect target im- age curves with that of an ideal observer. The target curves are sam- pled from a generative model which speci\ufb01es (probabilistically) the ge- ometry and local intensity properties of the curve. The ideal observer performs Bayesian inference on the generative model using MAP esti- mation. Varying the probability model for the curve geometry enables us investigate whether human performance is best for target curves that obey speci\ufb01c shape statistics, in particular those observed on natural shapes. Experiments are performed with data on both rectangular and hexagonal lattices. Our results show that human observers\u2019 performance approaches that of the ideal observer and are, in general, closest to the ideal for con- ditions where the target curve tends to be straight or similar to natural statistics on curves. This suggests a bias of human observers towards straight curves and natural statistics.", "bibtex": "@inproceedings{NIPS2003_1f34004e,\n author = {Fang, Fang and Kersten, Daniel and Schrater, Paul R and Yuille, Alan L},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Human and Ideal Observers for Detecting Image Curves},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/1f34004ebcb05f9acda6016d5cc52d5e-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/1f34004ebcb05f9acda6016d5cc52d5e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/1f34004ebcb05f9acda6016d5cc52d5e-Metadata.json", "review": "", "metareview": "", "pdf_size": 244538, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9938638694911011423&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 21, "aff": "Department of Statistics & Psychology, University of California Los Angeles; Psychology, University of Minnesota; Psychology, University of Minnesota; Psychology, University of Minnesota", "aff_domain": "stat.ucla.edu;tc.umn.edu;umn.edu;umn.edu", "email": "stat.ucla.edu;tc.umn.edu;umn.edu;umn.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "University of California, Los Angeles;University of Minnesota", "aff_unique_dep": "Department of Statistics & Psychology;Department of Psychology", "aff_unique_url": "https://www.ucla.edu;https://www.minnesota.edu", "aff_unique_abbr": "UCLA;UMN", "aff_campus_unique_index": "0", "aff_campus_unique": "Los Angeles;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "89a96a15b7", "title": "ICA-based Clustering of Genes from Microarray Expression Data", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/dcf6070a4ab7f3afbfd2809173e0824b-Abstract.html", "author": "Su-in Lee; Serafim Batzoglou", "abstract": "We propose an unsupervised methodology using independent component analysis (ICA) to cluster genes from DNA microarray data. Based on an ICA mixture model of genomic expression patterns, linear and nonlinear ICA finds components that are specific to certain biological processes. Genes that exhibit significant up-regulation or down-regulation within each component are grouped into clusters. We test the statistical significance of enrichment of gene annotations within each cluster. ICA-based clustering outperformed other leading methods in constructing functionally coherent clusters on various datasets. This result supports our model of genomic expression data as composite effect of independent biological processes. Comparison of clustering performance among various including a kernel-based nonlinear ICA algorithm shows that nonlinear ICA performed the best for small datasets and natural-gradient maximization-likelihood worked well for all the datasets.", "bibtex": "@inproceedings{NIPS2003_dcf6070a,\n author = {Lee, Su-in and Batzoglou, Serafim},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {ICA-based Clustering of Genes from Microarray Expression Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/dcf6070a4ab7f3afbfd2809173e0824b-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/dcf6070a4ab7f3afbfd2809173e0824b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/dcf6070a4ab7f3afbfd2809173e0824b-Metadata.json", "review": "", "metareview": "", "pdf_size": 302677, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15607002219983948996&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 9, "aff": "Department of Electrical Engineering; Department of Computer Science", "aff_domain": "stanford.edu;cs.stanford.edu", "email": "stanford.edu;cs.stanford.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Institution not specified;Unknown Institution", "aff_unique_dep": "Department of Electrical Engineering;Department of Computer Science", "aff_unique_url": ";", "aff_unique_abbr": ";", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "", "aff_country_unique": "" }, { "id": "ba8ad66de5", "title": "Identifying Structure across Pre-partitioned Data", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/de7092ba6df4276921d27a3704c57998-Abstract.html", "author": "Zvika Marx; Ido Dagan; Eli Shamir", "abstract": "We propose an information-theoretic clustering approach that incorporates a pre-known partition of the data, aiming to identify common clusters that cut across the given partition. In the standard clustering setting the formation of clusters is guided by a single source of feature information. The newly utilized pre-partition factor introduces an additional bias that counterbalances the impact of the features whenever they become correlated with this known partition. The resulting algorithmic framework was applied successfully to synthetic data, as well as to identifying text-based cross-religion correspondences.", "bibtex": "@inproceedings{NIPS2003_de7092ba,\n author = {Marx, Zvika and Dagan, Ido and Shamir, Eli},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Identifying Structure across Pre-partitioned Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/de7092ba6df4276921d27a3704c57998-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/de7092ba6df4276921d27a3704c57998-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/de7092ba6df4276921d27a3704c57998-Metadata.json", "review": "", "metareview": "", "pdf_size": 171734, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7659701453110978476&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "77614c825a", "title": "Image Reconstruction by Linear Programming", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/c3a690be93aa602ee2dc0ccab5b7b67e-Abstract.html", "author": "Koji Tsuda; Gunnar R\u00e4tsch", "abstract": "A common way of image denoising is to project a noisy image to the sub- space of admissible images made for instance by PCA. However, a major drawback of this method is that all pixels are updated by the projection, even when only a few pixels are corrupted by noise or occlusion. We pro- pose a new method to identify the noisy pixels by (cid:1) 1-norm penalization and update the identi\ufb01ed pixels only. The identi\ufb01cation and updating of noisy pixels are formulated as one linear program which can be solved ef\ufb01ciently. Especially, one can apply the \u03bd-trick to directly specify the fraction of pixels to be reconstructed. Moreover, we extend the linear program to be able to exploit prior knowledge that occlusions often ap- pear in contiguous blocks (e.g. sunglasses on faces). The basic idea is to penalize boundary points and interior points of the occluded area dif- ferently. We are able to show the \u03bd-property also for this extended LP leading a method which is easy to use. Experimental results impressively demonstrate the power of our approach.", "bibtex": "@inproceedings{NIPS2003_c3a690be,\n author = {Tsuda, Koji and R\\\"{a}tsch, Gunnar},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Image Reconstruction by Linear Programming},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/c3a690be93aa602ee2dc0ccab5b7b67e-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/c3a690be93aa602ee2dc0ccab5b7b67e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/c3a690be93aa602ee2dc0ccab5b7b67e-Metadata.json", "review": "", "metareview": "", "pdf_size": 115509, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10768832952524154894&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 25, "aff": "Max Planck Institute for Biological Cybernetics+ AIST CBRC; Max Planck Institute for Biological Cybernetics+ Fraunhofer FIRST", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de", "email": "tuebingen.mpg.de;tuebingen.mpg.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0+1;0+2", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics;AIST;Fraunhofer Institute for Software and Systems Engineering", "aff_unique_dep": "Biological Cybernetics;CBRC;", "aff_unique_url": "https://www.biocybernetics.mpg.de;https://www.aist.go.jp;https://www.first.fraunhofer.de/", "aff_unique_abbr": "MPIBC;AIST;Fraunhofer FIRST", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+1;0+0", "aff_country_unique": "Germany;Japan" }, { "id": "d05fef5812", "title": "Impact of an Energy Normalization Transform on the Performance of the LF-ASD Brain Computer Interface", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/be1df9a5d08724971f64a511e24fc904-Abstract.html", "author": "Yu Zhou; Steven G. Mason; Gary E. Birch", "abstract": "This paper presents an energy normalization transform as a method to reduce system errors in the LF-ASD brain-computer interface. The energy normalization transform has two major benefits to the system performance. First, it can increase class separation between the active and idle EEG data. Second, it can desensitize the system to the signal amplitude variability. For four subjects in the study, the benefits resulted in the performance improvement of the LF-ASD in the range from 7.7% to 18.9%, while for the fifth subject, who had the highest non-normalized accuracy of 90.5%, the performance did not change notably with normalization.", "bibtex": "@inproceedings{NIPS2003_be1df9a5,\n author = {Zhou, Yu and Mason, Steven and Birch, Gary},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Impact of an Energy Normalization Transform on the Performance of the LF-ASD Brain Computer Interface},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/be1df9a5d08724971f64a511e24fc904-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/be1df9a5d08724971f64a511e24fc904-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/be1df9a5d08724971f64a511e24fc904-Metadata.json", "review": "", "metareview": "", "pdf_size": 273248, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7916051770517683509&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "ce2c437fdb", "title": "Increase Information Transfer Rates in BCI by CSP Extension to Multi-class", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/ea159dc9788ffac311592613b7f71fbb-Abstract.html", "author": "Guido Dornhege; Benjamin Blankertz; Gabriel Curio; Klaus-Robert M\u00fcller", "abstract": "Brain-Computer Interfaces (BCI) are an interesting emerging technology that is driven by the motivation to develop an effective communication in- terface translating human intentions into a control signal for devices like computers or neuroprostheses. If this can be done bypassing the usual hu- man output pathways like peripheral nerves and muscles it can ultimately become a valuable tool for paralyzed patients. Most activity in BCI re- search is devoted to \ufb01nding suitable features and algorithms to increase information transfer rates (ITRs). The present paper studies the implica- tions of using more classes, e.g., left vs. right hand vs. foot, for operating a BCI. We contribute by (1) a theoretical study showing under some mild assumptions that it is practically not useful to employ more than three or four classes, (2) two extensions of the common spatial pattern (CSP) algorithm, one interestingly based on simultaneous diagonalization, and (3) controlled EEG experiments that underline our theoretical \ufb01ndings and show excellent improved ITRs.", "bibtex": "@inproceedings{NIPS2003_ea159dc9,\n author = {Dornhege, Guido and Blankertz, Benjamin and Curio, Gabriel and M\\\"{u}ller, Klaus-Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Increase Information Transfer Rates in BCI by CSP Extension to Multi-class},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/ea159dc9788ffac311592613b7f71fbb-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/ea159dc9788ffac311592613b7f71fbb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/ea159dc9788ffac311592613b7f71fbb-Metadata.json", "review": "", "metareview": "", "pdf_size": 206520, "gs_citation": 156, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6052952403985345536&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 10, "aff": "Fraunhofer FIRST.IDA; Fraunhofer FIRST.IDA; Neurophysics Group, Dept. of Neurology, Klinikum Benjamin Franklin, Freie Universit\u00e4t Berlin; Fraunhofer FIRST.IDA+University of Potsdam", "aff_domain": "first.fraunhofer.de;first.fraunhofer.de;zedat.fu-berlin.de;first.fraunhofer.de", "email": "first.fraunhofer.de;first.fraunhofer.de;zedat.fu-berlin.de;first.fraunhofer.de", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;0+2", "aff_unique_norm": "Fraunhofer Institute for Software and Systems Engineering;Freie Universit\u00e4t Berlin;University of Potsdam", "aff_unique_dep": "FIRST.IDA;Dept. of Neurology;", "aff_unique_url": "https://www.first.ida.fraunhofer.de/;https://www.fu-berlin.de;https://www.uni-potsdam.de", "aff_unique_abbr": "Fraunhofer FIRST.IDA;FU Berlin;UP", "aff_campus_unique_index": "1;", "aff_campus_unique": ";Berlin", "aff_country_unique_index": "0;0;0;0+0", "aff_country_unique": "Germany" }, { "id": "59d9db36ec", "title": "Inferring State Sequences for Non-linear Systems with Embedded Hidden Markov Models", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/102f0bb6efb3a6128a3c750dd16729be-Abstract.html", "author": "Radford M. Neal; Matthew J. Beal; Sam T. Roweis", "abstract": "We describe a Markov chain method for sampling from the distribution of the hidden state sequence in a non-linear dynamical system, given a sequence of observations. This method updates all states in the sequence simultaneously using an embedded Hidden Markov Model (HMM). An update begins with the creation of \u201cpools\u201d of candidate states at each time. We then de\ufb01ne an embedded HMM whose states are indexes within these pools. Using a forward-backward dynamic programming algo- rithm, we can ef\ufb01ciently choose a state sequence with the appropriate probabilities from the exponentially large number of state sequences that pass through states in these pools. We illustrate the method in a simple one-dimensional example, and in an example showing how an embed- ded HMM can be used to in effect discretize the state space without any discretization error. We also compare the embedded HMM to a particle smoother on a more substantial problem of inferring human motion from 2D traces of markers.", "bibtex": "@inproceedings{NIPS2003_102f0bb6,\n author = {Neal, Radford and Beal, Matthew and Roweis, Sam},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Inferring State Sequences for Non-linear Systems with Embedded Hidden Markov Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/102f0bb6efb3a6128a3c750dd16729be-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/102f0bb6efb3a6128a3c750dd16729be-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/102f0bb6efb3a6128a3c750dd16729be-Metadata.json", "review": "", "metareview": "", "pdf_size": 206761, "gs_citation": 60, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7328084055728772505&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 24, "aff": "Department of Computer Science, University of Toronto; Department of Computer Science, University of Toronto; Department of Computer Science, University of Toronto", "aff_domain": "cs.utoronto.ca;cs.utoronto.ca;cs.utoronto.ca", "email": "cs.utoronto.ca;cs.utoronto.ca;cs.utoronto.ca", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Toronto", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.utoronto.ca", "aff_unique_abbr": "U of T", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Toronto", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Canada" }, { "id": "12a8e11bf6", "title": "Information Bottleneck for Gaussian Variables", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/7e05d6f828574fbc975a896b25bb011e-Abstract.html", "author": "Gal Chechik; Amir Globerson; Naftali Tishby; Yair Weiss", "abstract": "The problem of extracting the relevant aspects of data was ad- dressed through the information bottleneck (IB) method, by (soft) clustering one variable while preserving information about another - relevance - variable. An interesting question addressed in the current work is the extension of these ideas to obtain continuous representations that preserve relevant information, rather than dis- crete clusters. We give a formal de\ufb02nition of the general continuous IB problem and obtain an analytic solution for the optimal repre- sentation for the important case of multivariate Gaussian variables. The obtained optimal representation is a noisy linear projection to eigenvectors of the normalized correlation matrix \u00a7xjy\u00a7\u00a11 x , which is also the basis obtained in Canonical Correlation Analysis. How- ever, in Gaussian IB, the compression tradeo\ufb01 parameter uniquely determines the dimension, as well as the scale of each eigenvector. This introduces a novel interpretation where solutions of di\ufb01erent ranks lie on a continuum parametrized by the compression level. Our analysis also provides an analytic expression for the optimal tradeo\ufb01 - the information curve - in terms of the eigenvalue spec- trum.", "bibtex": "@inproceedings{NIPS2003_7e05d6f8,\n author = {Chechik, Gal and Globerson, Amir and Tishby, Naftali and Weiss, Yair},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Information Bottleneck for Gaussian Variables},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/7e05d6f828574fbc975a896b25bb011e-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/7e05d6f828574fbc975a896b25bb011e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/7e05d6f828574fbc975a896b25bb011e-Metadata.json", "review": "", "metareview": "", "pdf_size": 297697, "gs_citation": 379, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9340000231144655111&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 26, "aff": "School of Computer Science and Engineering and The Interdisciplinary Center for Neural Computation The Hebrew University of Jerusalem, 91904, Israel; School of Computer Science and Engineering and The Interdisciplinary Center for Neural Computation The Hebrew University of Jerusalem, 91904, Israel; School of Computer Science and Engineering and The Interdisciplinary Center for Neural Computation The Hebrew University of Jerusalem, 91904, Israel; School of Computer Science and Engineering and The Interdisciplinary Center for Neural Computation The Hebrew University of Jerusalem, 91904, Israel", "aff_domain": "cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il", "email": "cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Hebrew University of Jerusalem", "aff_unique_dep": "School of Computer Science and Engineering", "aff_unique_url": "http://www.huji.ac.il", "aff_unique_abbr": "HUJI", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Jerusalem", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Israel" }, { "id": "d5d8bf801e", "title": "Information Dynamics and Emergent Computation in Recurrent Circuits of Spiking Neurons", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/063e26c670d07bb7c4d30e6fc69fe056-Abstract.html", "author": "Thomas Natschl\u00e4ger; Wolfgang Maass", "abstract": "We employ an ef\ufb01cient method using Bayesian and linear classi\ufb01ers for analyzing the dynamics of information in high-dimensional states of generic cortical microcircuit models. It is shown that such recurrent cir- cuits of spiking neurons have an inherent capability to carry out rapid computations on complex spike patterns, merging information contained in the order of spike arrival with previously acquired context information.", "bibtex": "@inproceedings{NIPS2003_063e26c6,\n author = {Natschl\\\"{a}ger, Thomas and Maass, Wolfgang},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Information Dynamics and Emergent Computation in Recurrent Circuits of Spiking Neurons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/063e26c670d07bb7c4d30e6fc69fe056-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/063e26c670d07bb7c4d30e6fc69fe056-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/063e26c670d07bb7c4d30e6fc69fe056-Metadata.json", "review": "", "metareview": "", "pdf_size": 139287, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8193051486467074354&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "17c8ae40fd", "title": "Information Maximization in Noisy Channels : A Variational Approach", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/a6ea8471c120fe8cc35a2954c9b9c595-Abstract.html", "author": "David Barber; Felix V. Agakov", "abstract": "The maximisation of information transmission over noisy channels is a common, albeit generally computationally di\ufb03cult problem. We approach the di\ufb03culty of computing the mutual information for noisy channels by using a variational approximation. The re- sulting IM algorithm is analagous to the EM algorithm, yet max- imises mutual information, as opposed to likelihood. We apply the method to several practical examples, including linear compression, population encoding and CDMA.", "bibtex": "@inproceedings{NIPS2003_a6ea8471,\n author = {Barber, David and Agakov, Felix},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Information Maximization in Noisy Channels : A Variational Approach},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/a6ea8471c120fe8cc35a2954c9b9c595-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/a6ea8471c120fe8cc35a2954c9b9c595-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/a6ea8471c120fe8cc35a2954c9b9c595-Metadata.json", "review": "", "metareview": "", "pdf_size": 133452, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9240454488466434462&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Institute for Adaptive and Neural Computation, Edinburgh University, EH1 2QL, U.K.; Institute for Adaptive and Neural Computation, Edinburgh University, EH1 2QL, U.K.", "aff_domain": ";", "email": ";", "github": "", "project": "www.anc.ed.ac.uk", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Edinburgh University", "aff_unique_dep": "Institute for Adaptive and Neural Computation", "aff_unique_url": "https://www.ed.ac.uk", "aff_unique_abbr": "Edinburgh", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Edinburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "541ca30559", "title": "Insights from Machine Learning Applied to Human Visual Classification", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/b4baaff0e2f11b5356193849021d641f-Abstract.html", "author": "Felix A. Wichmann; Arnulf B. Graf", "abstract": "We attempt to understand visual classi\ufb01cation in humans using both psy- chophysical and machine learning techniques. Frontal views of human faces were used for a gender classi\ufb01cation task. Human subjects classi- \ufb01ed the faces and their gender judgment, reaction time and con\ufb01dence rating were recorded. Several hyperplane learning algorithms were used on the same classi\ufb01cation task using the Principal Components of the texture and shape representation of the faces. The classi\ufb01cation perfor- mance of the learning algorithms was estimated using the face database with the true gender of the faces as labels, and also with the gender es- timated by the subjects. We then correlated the human responses to the distance of the stimuli to the separating hyperplane of the learning algo- rithms. Our results suggest that human classi\ufb01cation can be modeled by some hyperplane algorithms in the feature space we used. For classi\ufb01ca- tion, the brain needs more processing for stimuli close to that hyperplane than for those further away.", "bibtex": "@inproceedings{NIPS2003_b4baaff0,\n author = {Wichmann, Felix A. and Graf, Arnulf},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Insights from Machine Learning Applied to Human Visual Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/b4baaff0e2f11b5356193849021d641f-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/b4baaff0e2f11b5356193849021d641f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/b4baaff0e2f11b5356193849021d641f-Metadata.json", "review": "", "metareview": "", "pdf_size": 136613, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5030844147841552332&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 7, "aff": "Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de", "email": "tuebingen.mpg.de;tuebingen.mpg.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics", "aff_unique_dep": "Biological Cybernetics", "aff_unique_url": "https://www.biocybernetics.mpg.de", "aff_unique_abbr": "MPIBC", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "db5d6f6f88", "title": "Invariant Pattern Recognition by Semi-Definite Programming Machines", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/bd7db7397f7d83052f829816ecc7f004-Abstract.html", "author": "Thore Graepel; Ralf Herbrich", "abstract": "invariances with respect to given pattern Knowledge about local transformations can greatly improve the accuracy of classi\ufb01cation. Previous approaches are either based on regularisation or on the gen- eration of virtual (transformed) examples. We develop a new frame- work for learning linear classi\ufb01ers under known transformations based on semide\ufb01nite programming. We present a new learning algorithm\u2014 the Semide\ufb01nite Programming Machine (SDPM)\u2014which is able to \ufb01nd a maximum margin hyperplane when the training examples are polynomial trajectories instead of single points. The solution is found to be sparse in dual variables and allows to identify those points on the trajectory with minimal real-valued output as virtual support vec- tors. Extensions to segments of trajectories, to more than one trans- formation parameter, and to learning with kernels are discussed. In experiments we use a Taylor expansion to locally approximate rota- tional invariance in pixel images from USPS and \ufb01nd improvements over known methods.", "bibtex": "@inproceedings{NIPS2003_bd7db739,\n author = {Graepel, Thore and Herbrich, Ralf},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Invariant Pattern Recognition by Semi-Definite Programming Machines},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/bd7db7397f7d83052f829816ecc7f004-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/bd7db7397f7d83052f829816ecc7f004-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/bd7db7397f7d83052f829816ecc7f004-Metadata.json", "review": "", "metareview": "", "pdf_size": 245221, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4835680440053779320&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Microsoft Research Ltd., Cambridge, UK; Microsoft Research Ltd., Cambridge, UK", "aff_domain": "microsoft.com;microsoft.com", "email": "microsoft.com;microsoft.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Microsoft", "aff_unique_dep": "Microsoft Research", "aff_unique_url": "https://www.microsoft.com/en-us/research", "aff_unique_abbr": "MSR", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "678320fd9f", "title": "Iterative Scaled Trust-Region Learning in Krylov Subspaces via Pearlmutter's Implicit Sparse Hessian", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/6ee69d3769e832ec77c9584e0b7ba112-Abstract.html", "author": "Eiji Mizutani; James Demmel", "abstract": "The online incremental gradient (or backpropagation) algorithm is widely considered to be the fastest method for solving large-scale neural-network (NN) learning problems. In contrast, we show that an appropriately implemented iterative batch-mode (or block-mode) learning method can be much faster. For example, it is three times faster in the UCI letter classi\ufb01cation problem (26 outputs, 16,000 data items, 6,066 parameters with a two-hidden-layer multilayer perceptron) and 353 times faster in a nonlinear regression problem arising in color recipe prediction (10 outputs, 1,000 data items, 2,210 parameters with a neuro-fuzzy modular network). The three principal innovative ingredients in our algorithm are the following: First, we use scaled trust-region regularization with inner-outer it- eration to solve the associated \u201coverdetermined\u201d nonlinear least squares problem, where the inner iteration performs a truncated (or inexact) Newton method. Second, we employ Pearlmutter\u2019s implicit sparse Hessian matrix-vector multiply algorithm to con- struct the Krylov subspaces used to solve for the truncated New- ton update. Third, we exploit sparsity (for preconditioning) in the matrices resulting from the NNs having many outputs.", "bibtex": "@inproceedings{NIPS2003_6ee69d37,\n author = {Mizutani, Eiji and Demmel, James},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Iterative Scaled Trust-Region Learning in Krylov Subspaces via Pearlmutter\\textquotesingle s Implicit Sparse Hessian},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/6ee69d3769e832ec77c9584e0b7ba112-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/6ee69d3769e832ec77c9584e0b7ba112-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/6ee69d3769e832ec77c9584e0b7ba112-Metadata.json", "review": "", "metareview": "", "pdf_size": 156354, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11750144234363060143&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, Tsing Hua University, Hsinchu, 300 TAIWAN R.O.C.; Mathematics and Computer Science, University of California at Berkeley, Berkeley, CA 94720 USA", "aff_domain": "wayne.cs.nthu.edu.tw;cs.berkeley.edu", "email": "wayne.cs.nthu.edu.tw;cs.berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Tsing Hua University;University of California, Berkeley", "aff_unique_dep": "Department of Computer Science;Department of Mathematics and Computer Science", "aff_unique_url": "https://www.thu.edu.tw;https://www.berkeley.edu", "aff_unique_abbr": "THU;UC Berkeley", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Taiwan;Berkeley", "aff_country_unique_index": "0;1", "aff_country_unique": "China;United States" }, { "id": "ea78c87b84", "title": "Kernel Dimensionality Reduction for Supervised Learning", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/84b20b1f5a0d103f5710bb67a043cd78-Abstract.html", "author": "Kenji Fukumizu; Francis R. Bach; Michael I. Jordan", "abstract": "We propose a novel method of dimensionality reduction for supervised learning. Given a regression or classi\ufb01cation problem in which we wish to predict a variable Y from an explanatory vector X, we treat the prob- lem of dimensionality reduction as that of \ufb01nding a low-dimensional \u201cef- fective subspace\u201d of X which retains the statistical relationship between X and Y . We show that this problem can be formulated in terms of conditional independence. To turn this formulation into an optimization problem, we characterize the notion of conditional independence using covariance operators on reproducing kernel Hilbert spaces; this allows us to derive a contrast function for estimation of the effective subspace. Un- like many conventional methods, the proposed method requires neither assumptions on the marginal distribution of X, nor a parametric model of the conditional distribution of Y .", "bibtex": "@inproceedings{NIPS2003_84b20b1f,\n author = {Fukumizu, Kenji and Bach, Francis and Jordan, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Kernel Dimensionality Reduction for Supervised Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/84b20b1f5a0d103f5710bb67a043cd78-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/84b20b1f5a0d103f5710bb67a043cd78-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/84b20b1f5a0d103f5710bb67a043cd78-Metadata.json", "review": "", "metareview": "", "pdf_size": 134796, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7877748939499027698&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Institute of Statistical Mathematics; CS Division, University of California; CS Division and Statistics, University of California", "aff_domain": "ism.ac.jp;cs.berkeley.edu;cs.berkeley.edu", "email": "ism.ac.jp;cs.berkeley.edu;cs.berkeley.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "Institute of Statistical Mathematics;University of California", "aff_unique_dep": ";CS Division", "aff_unique_url": "https://www.ism.ac.jp;https://www.universityofcalifornia.edu", "aff_unique_abbr": "ISM;UC", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1", "aff_country_unique": "Japan;United States" }, { "id": "722bcbd31a", "title": "Kernels for Structured Natural Language Data", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/f7ac67a9aa8d255282de7d11391e1b69-Abstract.html", "author": "Jun Suzuki; Yutaka Sasaki; Eisaku Maeda", "abstract": "This paper devises a novel kernel function for structured natural language data. In the \ufb01eld of Natural Language Processing, feature extraction consists of the following two steps: (1) syntactically and semantically analyzing raw data, i.e., character strings, then representing the results as discrete structures, such as parse trees and dependency graphs with part-of-speech tags; (2) creating (possibly high-dimensional) numerical feature vectors from the discrete structures. The new kernels, called Hier- archical Directed Acyclic Graph (HDAG) kernels, directly accept DAGs whose nodes can contain DAGs. HDAG data structures are needed to fully re\ufb02ect the syntactic and semantic structures that natural language data inherently have. In this paper, we de\ufb01ne the kernel function and show how it permits ef\ufb01cient calculation. Experiments demonstrate that the proposed kernels are superior to existing kernel functions, e.g., se- quence kernels, tree kernels, and bag-of-words kernels.", "bibtex": "@inproceedings{NIPS2003_f7ac67a9,\n author = {Suzuki, Jun and Sasaki, Yutaka and Maeda, Eisaku},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Kernels for Structured Natural Language Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/f7ac67a9aa8d255282de7d11391e1b69-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/f7ac67a9aa8d255282de7d11391e1b69-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/f7ac67a9aa8d255282de7d11391e1b69-Metadata.json", "review": "", "metareview": "", "pdf_size": 437247, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11176251929860813525&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "52e1350403", "title": "Laplace Propagation", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/7fd804295ef7f6a2822bf4c61f9dc4a8-Abstract.html", "author": "Eleazar Eskin; Alex J. Smola; S.v.n. Vishwanathan", "abstract": "We present a novel method for approximate inference in Bayesian mod- els and regularized risk functionals. It is based on the propagation of mean and variance derived from the Laplace approximation of condi- tional probabilities in factorizing distributions, much akin to Minka\u2019s Expectation Propagation. In the jointly normal case, it coincides with the latter and belief propagation, whereas in the general case, it provides an optimization strategy containing Support Vector chunking, the Bayes Committee Machine, and Gaussian Process chunking as special cases.", "bibtex": "@inproceedings{NIPS2003_7fd80429,\n author = {Eskin, Eleazar and Smola, Alex and Vishwanathan, S.v.n.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Laplace Propagation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/7fd804295ef7f6a2822bf4c61f9dc4a8-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/7fd804295ef7f6a2822bf4c61f9dc4a8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/7fd804295ef7f6a2822bf4c61f9dc4a8-Metadata.json", "review": "", "metareview": "", "pdf_size": 109951, "gs_citation": 64, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=553279663092931869&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Machine Learning Group, ANU and National ICT Australia, Canberra, ACT, 0200; Machine Learning Group, ANU and National ICT Australia, Canberra, ACT, 0200; Department of Computer Science, Hebrew University Jerusalem, Jerusalem, Israel, 91904", "aff_domain": "axiom.anu.edu.au;axiom.anu.edu.au;cs.columbia.edu", "email": "axiom.anu.edu.au;axiom.anu.edu.au;cs.columbia.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Australian National University;Hebrew University of Jerusalem", "aff_unique_dep": "Machine Learning Group;Department of Computer Science", "aff_unique_url": "https://www.anu.edu.au;https://www.huji.ac.il", "aff_unique_abbr": "ANU;HUJI", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Canberra;Jerusalem", "aff_country_unique_index": "0;0;1", "aff_country_unique": "Australia;Israel" }, { "id": "aefbfe9ae1", "title": "Large Margin Classifiers: Convex Loss, Low Noise, and Convergence Rates", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/5dec707028b05bcbd3a1db5640f842c5-Abstract.html", "author": "Peter L. Bartlett; Michael I. Jordan; Jon D. Mcauliffe", "abstract": "Many classi\ufb01cation algorithms, including the support vector machine, boosting and logistic regression, can be viewed as minimum contrast methods that minimize a convex surrogate of the 0-1 loss function. We characterize the statistical consequences of using such a surrogate by pro- viding a general quantitative relationship between the risk as assessed us- ing the 0-1 loss and the risk as assessed using any nonnegative surrogate loss function. We show that this relationship gives nontrivial bounds un- der the weakest possible condition on the loss function\u2014that it satisfy a pointwise form of Fisher consistency for classi\ufb01cation. The relationship is based on a variational transformation of the loss function that is easy to compute in many applications. We also present a re\ufb01ned version of this result in the case of low noise. Finally, we present applications of our results to the estimation of convergence rates in the general setting of function classes that are scaled hulls of a \ufb01nite-dimensional base class.", "bibtex": "@inproceedings{NIPS2003_5dec7070,\n author = {Bartlett, Peter and Jordan, Michael and Mcauliffe, Jon},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Large Margin Classifiers: Convex Loss, Low Noise, and Convergence Rates},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/5dec707028b05bcbd3a1db5640f842c5-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/5dec707028b05bcbd3a1db5640f842c5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/5dec707028b05bcbd3a1db5640f842c5-Metadata.json", "review": "", "metareview": "", "pdf_size": 89110, "gs_citation": 67, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15299482549210646284&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "c19f82a6e0", "title": "Large Scale Online Learning", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/9fb7b048c96d44a0337f049e0a61ff06-Abstract.html", "author": "L\u00e9on Bottou; Yann L. Cun", "abstract": "We consider situations where training data is abundant and computing resources are comparatively scarce. We argue that suitably designed on- line learning algorithms asymptotically outperform any batch learning algorithm. Both theoretical and experimental evidences are presented.", "bibtex": "@inproceedings{NIPS2003_9fb7b048,\n author = {Bottou, L\\'{e}on and Cun, Yann},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Large Scale Online Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/9fb7b048c96d44a0337f049e0a61ff06-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/9fb7b048c96d44a0337f049e0a61ff06-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/9fb7b048c96d44a0337f049e0a61ff06-Metadata.json", "review": "", "metareview": "", "pdf_size": 86849, "gs_citation": 625, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=225691450898595589&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 18, "aff": "NEC Labs America; NEC Labs America", "aff_domain": "bottou.org;lecun.com", "email": "bottou.org;lecun.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "NEC Labs America", "aff_unique_dep": "", "aff_unique_url": "https://www.nec-labs.com", "aff_unique_abbr": "NEC LA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "aa9e636709", "title": "Learning Bounds for a Generalized Family of Bayesian Posterior Distributions", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/fc79250f8c5b804390e8da280b4cf06e-Abstract.html", "author": "Tong Zhang", "abstract": "In this paper we obtain convergence bounds for the concentration of Bayesian posterior distributions (around the true distribution) using a novel method that simpli\ufb01es and enhances previous results. Based on the analysis, we also introduce a generalized family of Bayesian posteriors, and show that the convergence behavior of these generalized posteriors is completely determined by the local prior structure around the true distri- bution. This important and surprising robustness property does not hold for the standard Bayesian posterior in that it may not concentrate when there exist \u201cbad\u201d prior structures even at places far away from the true distribution.", "bibtex": "@inproceedings{NIPS2003_fc79250f,\n author = {Zhang, Tong},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Bounds for a Generalized Family of Bayesian Posterior Distributions},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/fc79250f8c5b804390e8da280b4cf06e-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/fc79250f8c5b804390e8da280b4cf06e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/fc79250f8c5b804390e8da280b4cf06e-Metadata.json", "review": "", "metareview": "", "pdf_size": 112301, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1370098849443291347&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "IBM T.J. Watson Research Center", "aff_domain": "watson.ibm.com", "email": "watson.ibm.com", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "IBM", "aff_unique_dep": "Research Center", "aff_unique_url": "https://www.ibm.com/research/watson", "aff_unique_abbr": "IBM", "aff_campus_unique_index": "0", "aff_campus_unique": "T.J. Watson", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "6920d0c558", "title": "Learning Curves for Stochastic Gradient Descent in Linear Feedforward Networks", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/f8b932c70d0b2e6bf071729a4fa68dfc-Abstract.html", "author": "Justin Werfel; Xiaohui Xie; H. S. Seung", "abstract": "Dept. of Brain & Cog. Sci.", "bibtex": "@inproceedings{NIPS2003_f8b932c7,\n author = {Werfel, Justin and Xie, Xiaohui and Seung, H.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Curves for Stochastic Gradient Descent in Linear Feedforward Networks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/f8b932c70d0b2e6bf071729a4fa68dfc-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/f8b932c70d0b2e6bf071729a4fa68dfc-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/f8b932c70d0b2e6bf071729a4fa68dfc-Metadata.json", "review": "", "metareview": "", "pdf_size": 227483, "gs_citation": 151, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5138754023274782346&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 23, "aff": "Dept. of EECS, MIT; Dept. of Molecular Biology, Princeton University; HHMI, Dept. of Brain & Cog. Sci., MIT", "aff_domain": "mit.edu;princeton.edu;mit.edu", "email": "mit.edu;princeton.edu;mit.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "Massachusetts Institute of Technology;Princeton University", "aff_unique_dep": "Department of Electrical Engineering and Computer Science;Dept. of Molecular Biology", "aff_unique_url": "https://web.mit.edu;https://www.princeton.edu", "aff_unique_abbr": "MIT;Princeton", "aff_campus_unique_index": "0", "aff_campus_unique": "Cambridge;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "0bf1ef1eaa", "title": "Learning Near-Pareto-Optimal Conventions in Polynomial Time", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/999600eb275cc7196161261972daa59b-Abstract.html", "author": "Xiaofeng Wang; Tuomas Sandholm", "abstract": "We study how to learn to play a Pareto-optimal strict Nash equilibrium when there exist multiple equilibria and agents may have different pref- erences among the equilibria. We focus on repeated coordination games of non-identical interest where agents do not know the game structure up front and receive noisy payoffs. We design ef\ufb01cient near-optimal al- gorithms for both the perfect monitoring and the imperfect monitoring setting(where the agents only observe their own payoffs and the joint actions).", "bibtex": "@inproceedings{NIPS2003_999600eb,\n author = {Wang, Xiaofeng and Sandholm, Tuomas},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Near-Pareto-Optimal Conventions in Polynomial Time},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/999600eb275cc7196161261972daa59b-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/999600eb275cc7196161261972daa59b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/999600eb275cc7196161261972daa59b-Metadata.json", "review": "", "metareview": "", "pdf_size": 85334, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3398196491614327342&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "ECE Department, Carnegie Mellon University, Pittsburgh, PA 15213; CS Department, Carnegie Mellon University, Pittsburgh, PA 15213", "aff_domain": "andrew.cmu.edu;cs.cmu.edu", "email": "andrew.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "ECE Department", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "a72b8560d4", "title": "Learning Non-Rigid 3D Shape from 2D Motion", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/8db9264228dc48fbf47535e888c02ae0-Abstract.html", "author": "Lorenzo Torresani; Aaron Hertzmann; Christoph Bregler", "abstract": "This paper presents an algorithm for learning the time-varying shape of a non-rigid 3D object from uncalibrated 2D tracking data. We model shape motion as a rigid component (rotation and translation) combined with a non-rigid deformation. Reconstruction is ill-posed if arbitrary deforma- tions are allowed. We constrain the problem by assuming that the object shape at each time instant is drawn from a Gaussian distribution. Based on this assumption, the algorithm simultaneously estimates 3D shape and motion for each time frame, learns the parameters of the Gaussian, and robustly \ufb01lls-in missing data points. We then extend the algorithm to model temporal smoothness in object shape, thus allowing it to handle severe cases of missing data.", "bibtex": "@inproceedings{NIPS2003_8db92642,\n author = {Torresani, Lorenzo and Hertzmann, Aaron and Bregler, Christoph},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Non-Rigid 3D Shape from 2D Motion},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/8db9264228dc48fbf47535e888c02ae0-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/8db9264228dc48fbf47535e888c02ae0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/8db9264228dc48fbf47535e888c02ae0-Metadata.json", "review": "", "metareview": "", "pdf_size": 141042, "gs_citation": 223, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4721329474410046335&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "Stanford University; University of Toronto; New York University", "aff_domain": "cs.stanford.edu;dgp.toronto.edu;nyu.edu", "email": "cs.stanford.edu;dgp.toronto.edu;nyu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "Stanford University;University of Toronto;New York University", "aff_unique_dep": ";;", "aff_unique_url": "https://www.stanford.edu;https://www.utoronto.ca;https://www.nyu.edu", "aff_unique_abbr": "Stanford;U of T;NYU", "aff_campus_unique_index": "0", "aff_campus_unique": "Stanford;", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;Canada" }, { "id": "f71ef2d9e2", "title": "Learning Spectral Clustering", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/d04863f100d59b3eb688a11f95b0ae60-Abstract.html", "author": "Francis R. Bach; Michael I. Jordan", "abstract": "Spectral clustering refers to a class of techniques which rely on the eigen- structure of a similarity matrix to partition points into disjoint clusters with points in the same cluster having high similarity and points in dif- ferent clusters having low similarity. In this paper, we derive a new cost function for spectral clustering based on a measure of error between a given partition and a solution of the spectral relaxation of a minimum normalized cut problem. Minimizing this cost function with respect to the partition leads to a new spectral clustering algorithm. Minimizing with respect to the similarity matrix leads to an algorithm for learning the similarity matrix. We develop a tractable approximation of our cost function that is based on the power method of computing eigenvectors.", "bibtex": "@inproceedings{NIPS2003_d04863f1,\n author = {Bach, Francis and Jordan, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Spectral Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/d04863f100d59b3eb688a11f95b0ae60-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/d04863f100d59b3eb688a11f95b0ae60-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/d04863f100d59b3eb688a11f95b0ae60-Metadata.json", "review": "", "metareview": "", "pdf_size": 94334, "gs_citation": 684, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3683566240147668678&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 24, "aff": "Computer Science, University of California, Berkeley, CA 94720; Computer Science and Statistics, University of California, Berkeley, CA 94720", "aff_domain": "cs.berkeley.edu;cs.berkeley.edu", "email": "cs.berkeley.edu;cs.berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Computer Science", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "611ffc36f5", "title": "Learning a Distance Metric from Relative Comparisons", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/d3b1fb02964aa64e257f9f26a31f72cf-Abstract.html", "author": "Matthew Schultz; Thorsten Joachims", "abstract": "This paper presents a method for learning a distance metric from rel- ative comparison such as \u201cA is closer to B than A is to C\u201d. Taking a Support Vector Machine (SVM) approach, we develop an algorithm that provides a \ufb02exible way of describing qualitative training data as a set of constraints. We show that such constraints lead to a convex quadratic programming problem that can be solved by adapting standard meth- ods for SVM training. We empirically evaluate the performance and the modelling \ufb02exibility of the algorithm on a collection of text documents.", "bibtex": "@inproceedings{NIPS2003_d3b1fb02,\n author = {Schultz, Matthew and Joachims, Thorsten},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Learning a Distance Metric from Relative Comparisons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/d3b1fb02964aa64e257f9f26a31f72cf-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/d3b1fb02964aa64e257f9f26a31f72cf-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/d3b1fb02964aa64e257f9f26a31f72cf-Metadata.json", "review": "", "metareview": "", "pdf_size": 91574, "gs_citation": 958, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15235829403929436565&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "1f558791e5", "title": "Learning a Rare Event Detection Cascade by Direct Feature Selection", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/23af4b45f1e166141a790d1a3126e77a-Abstract.html", "author": "Jianxin Wu; James M. Rehg; Matthew D. Mullin", "abstract": "Face detection is a canonical example of a rare event detection prob- lem, in which target patterns occur with much lower frequency than non- targets. Out of millions of face-sized windows in an input image, for ex- ample, only a few will typically contain a face. Viola and Jones recently proposed a cascade architecture for face detection which successfully ad- dresses the rare event nature of the task. A central part of their method is a feature selection algorithm based on AdaBoost. We present a novel cascade learning algorithm based on forward feature selection which is two orders of magnitude faster than the Viola-Jones approach and yields classi\ufb01ers of equivalent quality. This faster method could be used for more demanding classi\ufb01cation tasks, such as on-line learning.", "bibtex": "@inproceedings{NIPS2003_23af4b45,\n author = {Wu, Jianxin and Rehg, James M and Mullin, Matthew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Learning a Rare Event Detection Cascade by Direct Feature Selection},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/23af4b45f1e166141a790d1a3126e77a-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/23af4b45f1e166141a790d1a3126e77a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/23af4b45f1e166141a790d1a3126e77a-Metadata.json", "review": "", "metareview": "", "pdf_size": 103927, "gs_citation": 177, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13035181626079711213&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "c0f5e5b778", "title": "Learning a World Model and Planning with a Self-Organizing, Dynamic Neural System", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/28b60a16b55fd531047c0c958ce14b95-Abstract.html", "author": "Marc Toussaint", "abstract": "We present a connectionist architecture that can learn a model of the relations between perceptions and actions and use this model for be- havior planning. State representations are learned with a growing self- organizing layer which is directly coupled to a perception and a motor layer. Knowledge about possible state transitions is encoded in the lat- eral connectivity. Motor signals modulate this lateral connectivity and a dynamic \ufb01eld on the layer organizes a planning process. All mecha- nisms are local and adaptation is based on Hebbian ideas. The model is continuous in the action, perception, and time domain.", "bibtex": "@inproceedings{NIPS2003_28b60a16,\n author = {Toussaint, Marc},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Learning a World Model and Planning with a Self-Organizing, Dynamic Neural System},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/28b60a16b55fd531047c0c958ce14b95-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/28b60a16b55fd531047c0c958ce14b95-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/28b60a16b55fd531047c0c958ce14b95-Metadata.json", "review": "", "metareview": "", "pdf_size": 217084, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2897018960819197110&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Institut f\u00a8ur Neuroinformatik, Ruhr-Universit \u00a8at Bochum, ND 04", "aff_domain": "neuroinformatik.rub.de", "email": "neuroinformatik.rub.de", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Ruhr-Universit\u00e4t Bochum", "aff_unique_dep": "Institut f\u00fcr Neuroinformatik", "aff_unique_url": "https://www.ruhr-uni-bochum.de", "aff_unique_abbr": "RUB", "aff_campus_unique_index": "0", "aff_campus_unique": "Bochum", "aff_country_unique_index": "0", "aff_country_unique": "Germany" }, { "id": "21a1a94972", "title": "Learning the k in k-means", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/234833147b97bb6aed53a8f4f1c7a7d8-Abstract.html", "author": "Greg Hamerly; Charles Elkan", "abstract": "When clustering a dataset, the right number k of clusters to use is often not obvious, and choosing k automatically is a hard algorithmic prob- lem. In this paper we present an improved algorithm for learning k while clustering. The G-means algorithm is based on a statistical test for the hypothesis that a subset of data follows a Gaussian distribution. G-means runs k-means with increasing k in a hierarchical fashion until the test ac- cepts the hypothesis that the data assigned to each k-means center are Gaussian. Two key advantages are that the hypothesis test does not limit the covariance of the data and does not compute a full covariance matrix. Additionally, G-means only requires one intuitive parameter, the stand- ard statistical signi\ufb01cance level \u03b1. We present results from experiments showing that the algorithm works well, and better than a recent method based on the BIC penalty for model complexity. In these experiments, we show that the BIC is ineffective as a scoring function, since it does not penalize strongly enough the model\u2019s complexity.", "bibtex": "@inproceedings{NIPS2003_23483314,\n author = {Hamerly, Greg and Elkan, Charles},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Learning the k in k-means},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/234833147b97bb6aed53a8f4f1c7a7d8-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/234833147b97bb6aed53a8f4f1c7a7d8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/234833147b97bb6aed53a8f4f1c7a7d8-Metadata.json", "review": "", "metareview": "", "pdf_size": 939299, "gs_citation": 1513, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3502385374571296093&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Department of Computer Science and Engineering, University of California, San Diego; Department of Computer Science and Engineering, University of California, San Diego", "aff_domain": "cs.ucsd.edu;cs.ucsd.edu", "email": "cs.ucsd.edu;cs.ucsd.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0;0", "aff_campus_unique": "San Diego", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "43038daf30", "title": "Learning to Find Pre-Images", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/ac1ad983e08ad3304a97e147f522747e-Abstract.html", "author": "Jason Weston; Bernhard Sch\u00f6lkopf; G\u00f6khan H. Bakir", "abstract": "We consider the problem of reconstructing patterns from a feature map. Learning algorithms using kernels to operate in a reproducing kernel Hilbert space (RKHS) express their solutions in terms of input points mapped into the RKHS. We introduce a technique based on kernel princi- pal component analysis and regression to reconstruct corresponding pat- terns in the input space (aka pre-images) and review its performance in several applications requiring the construction of pre-images. The intro- duced technique avoids dif\ufb01cult and/or unstable numerical optimization, is easy to implement and, unlike previous methods, permits the compu- tation of pre-images in discrete input spaces.", "bibtex": "@inproceedings{NIPS2003_ac1ad983,\n author = {Weston, Jason and Sch\\\"{o}lkopf, Bernhard and Bakir, G\\\"{o}khan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Learning to Find Pre-Images},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/ac1ad983e08ad3304a97e147f522747e-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/ac1ad983e08ad3304a97e147f522747e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/ac1ad983e08ad3304a97e147f522747e-Metadata.json", "review": "", "metareview": "", "pdf_size": 133566, "gs_citation": 259, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9801281826406716563&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "611b8ff6b1", "title": "Learning with Local and Global Consistency", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/87682805257e619d49b8e0dfdc14affa-Abstract.html", "author": "Dengyong Zhou; Olivier Bousquet; Thomas N. Lal; Jason Weston; Bernhard Sch\u00f6lkopf", "abstract": "We consider the general problem of learning from labeled and unlabeled data, which is often called semi-supervised learning or transductive in- ference. A principled approach to semi-supervised learning is to design a classifying function which is suf(cid:2)ciently smooth with respect to the intrinsic structure collectively revealed by known labeled and unlabeled points. We present a simple algorithm to obtain such a smooth solution. Our method yields encouraging experimental results on a number of clas- si(cid:2)cation problems and demonstrates effective use of unlabeled data.", "bibtex": "@inproceedings{NIPS2003_87682805,\n author = {Zhou, Dengyong and Bousquet, Olivier and Lal, Thomas and Weston, Jason and Sch\\\"{o}lkopf, Bernhard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Learning with Local and Global Consistency},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/87682805257e619d49b8e0dfdc14affa-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/87682805257e619d49b8e0dfdc14affa-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/87682805257e619d49b8e0dfdc14affa-Metadata.json", "review": "", "metareview": "", "pdf_size": 211917, "gs_citation": 5697, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2302411469055510933&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 24, "aff": "Max Planck Institute for Biological Cybernetics, 72076 Tuebingen, Germany; Max Planck Institute for Biological Cybernetics, 72076 Tuebingen, Germany; Max Planck Institute for Biological Cybernetics, 72076 Tuebingen, Germany; Max Planck Institute for Biological Cybernetics, 72076 Tuebingen, Germany; Max Planck Institute for Biological Cybernetics, 72076 Tuebingen, Germany", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de", "email": "tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics", "aff_unique_dep": "Biological Cybernetics", "aff_unique_url": "https://www.biocybernetics.mpg.de", "aff_unique_abbr": "MPIBC", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Tuebingen", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "Germany" }, { "id": "fbd160fb8c", "title": "Limiting Form of the Sample Covariance Eigenspectrum in PCA and Kernel PCA", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/3f998e713a6e02287c374fd26835d87e-Abstract.html", "author": "David Hoyle; Magnus Rattray", "abstract": "We derive the limiting form of the eigenvalue spectrum for sample co- variance matrices produced from non-isotropic data. For the analysis of standard PCA we study the case where the data has increased variance along a small number of symmetry-breaking directions. The spectrum depends on the strength of the symmetry-breaking signals and on a pa- rameter (cid:11) which is the ratio of sample size to data dimension. Results are derived in the limit of large data dimension while keeping (cid:11) \ufb01xed. As (cid:11) increases there are transitions in which delta functions emerge from the upper end of the bulk spectrum, corresponding to the symmetry-breaking directions in the data, and we calculate the bias in the corresponding eigenvalues. For kernel PCA the covariance matrix in feature space may contain symmetry-breaking structure even when the data components are independently distributed with equal variance. We show examples of phase-transition behaviour analogous to the PCA results in this case.", "bibtex": "@inproceedings{NIPS2003_3f998e71,\n author = {Hoyle, David and Rattray, Magnus},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Limiting Form of the Sample Covariance Eigenspectrum in PCA and Kernel PCA},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/3f998e713a6e02287c374fd26835d87e-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/3f998e713a6e02287c374fd26835d87e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/3f998e713a6e02287c374fd26835d87e-Metadata.json", "review": "", "metareview": "", "pdf_size": 105392, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2831891029015237311&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Computer Science, University of Manchester, Manchester M13 9PL, UK; Department of Computer Science, University of Manchester, Manchester M13 9PL, UK", "aff_domain": "man.ac.uk;cs.man.ac.uk", "email": "man.ac.uk;cs.man.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Manchester", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.manchester.ac.uk", "aff_unique_abbr": "UoM", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Manchester", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "120111cf38", "title": "Linear Dependent Dimensionality Reduction", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/cbf8710b43df3f2c1553e649403426df-Abstract.html", "author": "Nathan Srebro; Tommi S. Jaakkola", "abstract": "We formulate linear dimensionality reduction as a semi-parametric esti- mation problem, enabling us to study its asymptotic behavior. We gen- eralize the problem beyond additive Gaussian noise to (unknown) non- Gaussian additive noise, and to unbiased non-additive models.", "bibtex": "@inproceedings{NIPS2003_cbf8710b,\n author = {Srebro, Nathan and Jaakkola, Tommi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Linear Dependent Dimensionality Reduction},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/cbf8710b43df3f2c1553e649403426df-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/cbf8710b43df3f2c1553e649403426df-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/cbf8710b43df3f2c1553e649403426df-Metadata.json", "review": "", "metareview": "", "pdf_size": 328679, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5147232862283920988&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Electrical Engineering and Computer Science; Department of Electrical Engineering and Computer Science", "aff_domain": "mit.edu;ai.mit.edu", "email": "mit.edu;ai.mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "ac07ff9f4a", "title": "Linear Program Approximations for Factored Continuous-State Markov Decision Processes", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/1fb2a1c37b18aa4611c3949d6148d0f8-Abstract.html", "author": "Milos Hauskrecht; Branislav Kveton", "abstract": "Approximate linear programming (ALP) has emerged recently as one of the most promising methods for solving complex factored MDPs with (cid:2)nite state spaces. In this work we show that ALP solutions are not limited only to MDPs with (cid:2)nite state spaces, but that they can also be applied successfully to factored continuous-state MDPs (CMDPs). We show how one can build an ALP-based approximation for such a model and contrast it to existing solution methods. We argue that this approach offers a robust alternative for solving high dimensional continuous-state space problems. The point is supported by experiments on three CMDP problems with 24-25 continuous state factors.", "bibtex": "@inproceedings{NIPS2003_1fb2a1c3,\n author = {Hauskrecht, Milos and Kveton, Branislav},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Linear Program Approximations for Factored Continuous-State Markov Decision Processes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/1fb2a1c37b18aa4611c3949d6148d0f8-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/1fb2a1c37b18aa4611c3949d6148d0f8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/1fb2a1c37b18aa4611c3949d6148d0f8-Metadata.json", "review": "", "metareview": "", "pdf_size": 201596, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7724222608936010107&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science and Intelligent Systems Program, University of Pittsburgh; Department of Computer Science and Intelligent Systems Program, University of Pittsburgh", "aff_domain": "cs.pitt.edu;cs.pitt.edu", "email": "cs.pitt.edu;cs.pitt.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Pittsburgh", "aff_unique_dep": "Department of Computer Science and Intelligent Systems Program", "aff_unique_url": "https://www.pitt.edu", "aff_unique_abbr": "Pitt", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "dea5095637", "title": "Linear Response for Approximate Inference", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/020bf2c45e7bb322f89a226bd2c5d41b-Abstract.html", "author": "Max Welling; Yee W. Teh", "abstract": "Belief propagation on cyclic graphs is an ef\ufb01cient algorithm for comput- ing approximate marginal probability distributions over single nodes and neighboring nodes in the graph. In this paper we propose two new al- gorithms for approximating joint probabilities of arbitrary pairs of nodes and prove a number of desirable properties that these estimates ful\ufb01ll. The \ufb01rst algorithm is a propagation algorithm which is shown to con- verge if belief propagation converges to a stable \ufb01xed point. The second algorithm is based on matrix inversion. Experiments compare a number of competing methods.", "bibtex": "@inproceedings{NIPS2003_020bf2c4,\n author = {Welling, Max and Teh, Yee},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Linear Response for Approximate Inference},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/020bf2c45e7bb322f89a226bd2c5d41b-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/020bf2c45e7bb322f89a226bd2c5d41b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/020bf2c45e7bb322f89a226bd2c5d41b-Metadata.json", "review": "", "metareview": "", "pdf_size": 133136, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10201557640885017058&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Department of Computer Science, University of Toronto; Computer Science Division, University of California at Berkeley", "aff_domain": "cs.utoronto.ca;eecs.berkeley.edu", "email": "cs.utoronto.ca;eecs.berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Toronto;University of California, Berkeley", "aff_unique_dep": "Department of Computer Science;Computer Science Division", "aff_unique_url": "https://www.utoronto.ca;https://www.berkeley.edu", "aff_unique_abbr": "U of T;UC Berkeley", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Toronto;Berkeley", "aff_country_unique_index": "0;1", "aff_country_unique": "Canada;United States" }, { "id": "bce411a0bd", "title": "Link Prediction in Relational Data", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/1e0a84051e6a4a7381473328f43c4884-Abstract.html", "author": "Ben Taskar; Ming-fai Wong; Pieter Abbeel; Daphne Koller", "abstract": "Many real-world domains are relational in nature, consisting of a set of objects related to each other in complex ways. This paper focuses on predicting the existence and the type of links between entities in such domains. We apply the relational Markov network framework of Taskar et al. to de\ufb01ne a joint probabilis- tic model over the entire link graph \u2014 entity attributes and links. The application of the RMN algorithm to this task requires the de\ufb01nition of probabilistic patterns over subgraph structures. We apply this method to two new relational datasets, one involving university webpages, and the other a social network. We show that the collective classi\ufb01cation approach of RMNs, and the introduction of subgraph patterns over link labels, provide signi\ufb01cant improvements in accuracy over \ufb02at classi\ufb01cation, which attempts to predict each link in isolation.", "bibtex": "@inproceedings{NIPS2003_1e0a8405,\n author = {Taskar, Ben and Wong, Ming-fai and Abbeel, Pieter and Koller, Daphne},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Link Prediction in Relational Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/1e0a84051e6a4a7381473328f43c4884-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/1e0a84051e6a4a7381473328f43c4884-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/1e0a84051e6a4a7381473328f43c4884-Metadata.json", "review": "", "metareview": "", "pdf_size": 65836, "gs_citation": 675, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15698674262635722928&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 23, "aff": "Stanford University; Stanford University; Stanford University; Stanford University", "aff_domain": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "email": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "bfaccc0087", "title": "Local Phase Coherence and the Perception of Blur", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/565030e1fce4e481f9823a7de3b8a047-Abstract.html", "author": "Zhou Wang; Eero P. Simoncelli", "abstract": "Abstract Unavailable", "bibtex": "@inproceedings{NIPS2003_565030e1,\n author = {Wang, Zhou and Simoncelli, Eero},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Local Phase Coherence and the Perception of Blur},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/565030e1fce4e481f9823a7de3b8a047-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/565030e1fce4e481f9823a7de3b8a047-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/565030e1fce4e481f9823a7de3b8a047-Metadata.json", "review": "", "metareview": "", "pdf_size": 103770, "gs_citation": 217, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3500774477324882431&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Howard Hughes Medical Institute + Center for Neural Science and Courant Institute of Mathematical Sciences, New York University, New York, NY 10003; Howard Hughes Medical Institute + Center for Neural Science and Courant Institute of Mathematical Sciences, New York University, New York, NY 10003", "aff_domain": "ieee.org;nyu.edu", "email": "ieee.org;nyu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0+1;0+1", "aff_unique_norm": "Howard Hughes Medical Institute;New York University", "aff_unique_dep": ";Center for Neural Science, Courant Institute of Mathematical Sciences", "aff_unique_url": "https://www.hhmi.org;https://www.nyu.edu", "aff_unique_abbr": "HHMI;NYU", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";New York", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "United States" }, { "id": "a897dd674f", "title": "Locality Preserving Projections", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/d69116f8b0140cdeb1f99a4d5096ffe4-Abstract.html", "author": "Xiaofei He; Partha Niyogi", "abstract": "Many problems in information processing involve some form of dimen- sionality reduction. In this paper, we introduce Locality Preserving Pro- jections (LPP). These are linear projective maps that arise by solving a variational problem that optimally preserves the neighborhood structure of the data set. LPP should be seen as an alternative to Principal Com- ponent Analysis (PCA) \u2013 a classical linear technique that projects the data along the directions of maximal variance. When the high dimen- sional data lies on a low dimensional manifold embedded in the ambient space, the Locality Preserving Projections are obtained by \ufb01nding the optimal linear approximations to the eigenfunctions of the Laplace Bel- trami operator on the manifold. As a result, LPP shares many of the data representation properties of nonlinear techniques such as Laplacian Eigenmaps or Locally Linear Embedding. Yet LPP is linear and more crucially is de\ufb01ned everywhere in ambient space rather than just on the training data points. This is borne out by illustrative examples on some high dimensional data sets.", "bibtex": "@inproceedings{NIPS2003_d69116f8,\n author = {He, Xiaofei and Niyogi, Partha},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Locality Preserving Projections},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/d69116f8b0140cdeb1f99a4d5096ffe4-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/d69116f8b0140cdeb1f99a4d5096ffe4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/d69116f8b0140cdeb1f99a4d5096ffe4-Metadata.json", "review": "", "metareview": "", "pdf_size": 137633, "gs_citation": 5752, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8704156794478468024&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science, The University of Chicago, Chicago, IL 60637; Department of Computer Science, The University of Chicago, Chicago, IL 60637", "aff_domain": "cs.uchicago.edu;cs.uchicago.edu", "email": "cs.uchicago.edu;cs.uchicago.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Chicago", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.uchicago.edu", "aff_unique_abbr": "UChicago", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Chicago", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "75732a3216", "title": "Log-Linear Models for Label Ranking", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/217c0e01c1828e7279051f1b6675745d-Abstract.html", "author": "Ofer Dekel; Yoram Singer; Christopher D. Manning", "abstract": "Label ranking is the task of inferring a total order over a prede\ufb01ned set of labels for each given instance. We present a general framework for batch learning of label ranking functions from supervised data. We assume that each instance in the training data is associated with a list of preferences over the label-set, however we do not assume that this list is either com- plete or consistent. This enables us to accommodate a variety of ranking problems. In contrast to the general form of the supervision, our goal is to learn a ranking function that induces a total order over the entire set of labels. Special cases of our setting are multilabel categorization and hierarchical classi\ufb01cation. We present a general boosting-based learning algorithm for the label ranking problem and prove a lower bound on the progress of each boosting iteration. The applicability of our approach is demonstrated with a set of experiments on a large-scale text corpus.", "bibtex": "@inproceedings{NIPS2003_217c0e01,\n author = {Dekel, Ofer and Singer, Yoram and Manning, Christopher D},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Log-Linear Models for Label Ranking},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/217c0e01c1828e7279051f1b6675745d-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/217c0e01c1828e7279051f1b6675745d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/217c0e01c1828e7279051f1b6675745d-Metadata.json", "review": "", "metareview": "", "pdf_size": 90582, "gs_citation": 245, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16983233081357139706&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "Computer Science & Eng., Hebrew University; Computer Science Dept., Stanford University; Computer Science & Eng., Hebrew University", "aff_domain": "cs.huji.ac.il;cs.stanford.edu;cs.huji.ac.il", "email": "cs.huji.ac.il;cs.stanford.edu;cs.huji.ac.il", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "Hebrew University;Stanford University", "aff_unique_dep": "Computer Science & Eng.;Computer Science Dept.", "aff_unique_url": "http://www.huji.ac.il;https://www.stanford.edu", "aff_unique_abbr": "HUJI;Stanford", "aff_campus_unique_index": "1", "aff_campus_unique": ";Stanford", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Israel;United States" }, { "id": "f14f8f42bd", "title": "Margin Maximizing Loss Functions", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/0fe473396242072e84af286632d3f0ff-Abstract.html", "author": "Saharon Rosset; Ji Zhu; Trevor J. Hastie", "abstract": "Margin maximizing properties play an important role in the analysis of classi\u00a3- cation models, such as boosting and support vector machines. Margin maximiza- tion is theoretically interesting because it facilitates generalization error analysis, and practically interesting because it presents a clear geometric interpretation of the models being built. We formulate and prove a suf\u00a3cient condition for the solutions of regularized loss functions to converge to margin maximizing separa- tors, as the regularization vanishes. This condition covers the hinge loss of SVM, the exponential loss of AdaBoost and logistic regression loss. We also generalize it to multi-class classi\u00a3cation problems, and present margin maximizing multi- class versions of logistic regression and support vector machines.", "bibtex": "@inproceedings{NIPS2003_0fe47339,\n author = {Rosset, Saharon and Zhu, Ji and Hastie, Trevor},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Margin Maximizing Loss Functions},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/0fe473396242072e84af286632d3f0ff-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/0fe473396242072e84af286632d3f0ff-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/0fe473396242072e84af286632d3f0ff-Metadata.json", "review": "", "metareview": "", "pdf_size": 87136, "gs_citation": 177, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3558383796545036823&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Watson Research Center, IBM, Yorktown, NY, 10598; Department of Statistics, University of Michigan, Ann Arbor, MI, 48109; Department of Statistics, Stanford University, Stanford, CA, 94305", "aff_domain": "us.ibm.com;umich.edu;stat.stanford.edu", "email": "us.ibm.com;umich.edu;stat.stanford.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "IBM;University of Michigan;Stanford University", "aff_unique_dep": "Watson Research Center;Department of Statistics;Department of Statistics", "aff_unique_url": "https://www.ibm.com;https://www.umich.edu;https://www.stanford.edu", "aff_unique_abbr": "IBM;UM;Stanford", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "Yorktown;Ann Arbor;Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "4822a0aea2", "title": "Markov Models for Automated ECG Interval Analysis", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/b23975176653284f1f7356ba5539cfcb-Abstract.html", "author": "Nicholas P. Hughes; Lionel Tarassenko; Stephen J. Roberts", "abstract": "We examine the use of hidden Markov and hidden semi-Markov mod- els for automatically segmenting an electrocardiogram waveform into its constituent waveform features. An undecimated wavelet transform is used to generate an overcomplete representation of the signal that is more appropriate for subsequent modelling. We show that the state dura- tions implicit in a standard hidden Markov model are ill-suited to those of real ECG features, and we investigate the use of hidden semi-Markov models for improved state duration modelling.", "bibtex": "@inproceedings{NIPS2003_b2397517,\n author = {Hughes, Nicholas and Tarassenko, Lionel and Roberts, Stephen J},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Markov Models for Automated ECG Interval Analysis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/b23975176653284f1f7356ba5539cfcb-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/b23975176653284f1f7356ba5539cfcb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/b23975176653284f1f7356ba5539cfcb-Metadata.json", "review": "", "metareview": "", "pdf_size": 78160, "gs_citation": 144, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3661566752891128027&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Department of Engineering Science, University of Oxford; Department of Engineering Science, University of Oxford; Department of Engineering Science, University of Oxford", "aff_domain": "robots.ox.ac.uk;robots.ox.ac.uk;robots.ox.ac.uk", "email": "robots.ox.ac.uk;robots.ox.ac.uk;robots.ox.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Oxford", "aff_unique_dep": "Department of Engineering Science", "aff_unique_url": "https://www.ox.ac.uk", "aff_unique_abbr": "Oxford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Oxford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "id": "7dfb0e9f93", "title": "Max-Margin Markov Networks", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/878d5691c824ee2aaf770f7d36c151d6-Abstract.html", "author": "Ben Taskar; Carlos Guestrin; Daphne Koller", "abstract": "In typical classi\ufb01cation tasks, we seek a function which assigns a label to a sin- gle object. Kernel-based approaches, such as support vector machines (SVMs), which maximize the margin of con\ufb01dence of the classi\ufb01er, are the method of choice for many such tasks. Their popularity stems both from the ability to use high-dimensional feature spaces, and from their strong theoretical guaran- tees. However, many real-world tasks involve sequential, spatial, or structured data, where multiple labels must be assigned. Existing kernel-based methods ig- nore structure in the problem, assigning labels independently to each object, los- ing much useful information. Conversely, probabilistic graphical models, such as Markov networks, can represent correlations between labels, by exploiting problem structure, but cannot handle high-dimensional feature spaces, and lack strong theoretical generalization guarantees. In this paper, we present a new framework that combines the advantages of both approaches: Maximum mar- gin Markov (M3) networks incorporate both kernels, which ef\ufb01ciently deal with high-dimensional features, and the ability to capture correlations in structured data. We present an ef\ufb01cient algorithm for learning M3 networks based on a compact quadratic program formulation. We provide a new theoretical bound for generalization in structured domains. Experiments on the task of handwrit- ten character recognition and collective hypertext classi\ufb01cation demonstrate very signi\ufb01cant gains over previous approaches.", "bibtex": "@inproceedings{NIPS2003_878d5691,\n author = {Taskar, Ben and Guestrin, Carlos and Koller, Daphne},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Max-Margin Markov Networks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/878d5691c824ee2aaf770f7d36c151d6-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/878d5691c824ee2aaf770f7d36c151d6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/878d5691c824ee2aaf770f7d36c151d6-Metadata.json", "review": "", "metareview": "", "pdf_size": 112450, "gs_citation": 1827, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13190351765342352422&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 25, "aff": "Stanford University; Stanford University; Stanford University", "aff_domain": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "email": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "30b7e4da25", "title": "Maximum Likelihood Estimation of a Stochastic Integrate-and-Fire Neural Model", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/d0921d442ee91b896ad95059d13df618-Abstract.html", "author": "Liam Paninski; Eero P. Simoncelli; Jonathan W. Pillow", "abstract": "Recent work has examined the estimation of models of stimulus-driven neural activity in which some linear \ufb01ltering process is followed by a nonlinear, probabilistic spiking stage. We analyze the estimation of one such model for which this nonlinear step is implemented by a noisy, leaky, integrate-and-\ufb01re mechanism with a spike-dependent after- current. This model is a biophysically plausible alternative to models with Poisson (memory-less) spiking, and has been shown to effectively reproduce various spiking statistics of neurons in vivo. However, the problem of estimating the model from extracellular spike train data has not been examined in depth. We formulate the problem in terms of max- imum likelihood estimation, and show that the computational problem of maximizing the likelihood is tractable. Our main contribution is an algorithm and a proof that this algorithm is guaranteed to \ufb01nd the global optimum with reasonable speed. We demonstrate the effectiveness of our estimator with numerical simulations.", "bibtex": "@inproceedings{NIPS2003_d0921d44,\n author = {Paninski, Liam and Simoncelli, Eero and Pillow, Jonathan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Maximum Likelihood Estimation of a Stochastic Integrate-and-Fire Neural Model},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/d0921d442ee91b896ad95059d13df618-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/d0921d442ee91b896ad95059d13df618-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/d0921d442ee91b896ad95059d13df618-Metadata.json", "review": "", "metareview": "", "pdf_size": 236697, "gs_citation": 351, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14634764014878664016&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 21, "aff": "Howard Hughes Medical Institute + Center for Neural Science, New York University; Howard Hughes Medical Institute + Center for Neural Science, New York University; Howard Hughes Medical Institute + Center for Neural Science, New York University", "aff_domain": "cns.nyu.edu;cns.nyu.edu;cns.nyu.edu", "email": "cns.nyu.edu;cns.nyu.edu;cns.nyu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1;0+1;0+1", "aff_unique_norm": "Howard Hughes Medical Institute;New York University", "aff_unique_dep": ";Center for Neural Science", "aff_unique_url": "https://www.hhmi.org;https://www.nyu.edu", "aff_unique_abbr": "HHMI;NYU", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";New York", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "United States" }, { "id": "d150f1ce04", "title": "Measure Based Regularization", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/7884a9652e94555c70f96b6be63be216-Abstract.html", "author": "Olivier Bousquet; Olivier Chapelle; Matthias Hein", "abstract": "We address in this paper the question of how the knowledge of the marginal distribution P (x) can be incorporated in a learning algorithm. We suggest three theoretical methods for taking into account this distribution for regularization and provide links to existing graph-based semi-supervised learning algorithms. We also propose practical implementations.", "bibtex": "@inproceedings{NIPS2003_7884a965,\n author = {Bousquet, Olivier and Chapelle, Olivier and Hein, Matthias},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Measure Based Regularization},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/7884a9652e94555c70f96b6be63be216-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/7884a9652e94555c70f96b6be63be216-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/7884a9652e94555c70f96b6be63be216-Metadata.json", "review": "", "metareview": "", "pdf_size": 155370, "gs_citation": 162, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15657968621517831297&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "0ffceaaf2d", "title": "Mechanism of Neural Interference by Transcranial Magnetic Stimulation: Network or Single Neuron?", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/b090409688550f3cc93f4ed88ec6cafb-Abstract.html", "author": "Yoichi Miyawaki; Masato Okada", "abstract": "This paper proposes neural mechanisms of transcranial magnetic stim- ulation (TMS). TMS can stimulate the brain non-invasively through a brief magnetic pulse delivered by a coil placed on the scalp, interfering with speci\ufb01c cortical functions with a high temporal resolution. Due to these advantages, TMS has been a popular experimental tool in various neuroscience \ufb01elds. However, the neural mechanisms underlying TMS- induced interference are still unknown; a theoretical basis for TMS has not been developed. This paper provides computational evidence that in- hibitory interactions in a neural population, not an isolated single neuron, play a critical role in yielding the neural interference induced by TMS.", "bibtex": "@inproceedings{NIPS2003_b0904096,\n author = {Miyawaki, Yoichi and Okada, Masato},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Mechanism of Neural Interference by Transcranial Magnetic Stimulation: Network or Single Neuron?},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/b090409688550f3cc93f4ed88ec6cafb-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/b090409688550f3cc93f4ed88ec6cafb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/b090409688550f3cc93f4ed88ec6cafb-Metadata.json", "review": "", "metareview": "", "pdf_size": 763603, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12742080417628486171&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "RIKEN Brain Science Institute, Wako, Saitama 351-0198, JAPAN; RIKEN Brain Science Institute, PRESTO, JST, Wako, Saitama 351-0198, JAPAN", "aff_domain": "brain.riken.jp;brain.riken.jp", "email": "brain.riken.jp;brain.riken.jp", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "RIKEN Brain Science Institute;", "aff_unique_dep": "Brain Science Institute;", "aff_unique_url": "https://briken.org;", "aff_unique_abbr": "RIKEN BSI;", "aff_campus_unique_index": "0", "aff_campus_unique": "Wako;", "aff_country_unique_index": "0", "aff_country_unique": "Japan;" }, { "id": "e5191f585b", "title": "Minimax Embeddings", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/5f14615696649541a025d3d0f8e0447f-Abstract.html", "author": "Matthew Brand", "abstract": "Spectral methods for nonlinear dimensionality reduction (NLDR) impose a neighborhood graph on point data and compute eigenfunctions of a quadratic form generated from the graph. We introduce a more general and more robust formulation of NLDR based on the singular value de- composition (SVD). In this framework, most spectral NLDR principles can be recovered by taking a subset of the constraints in a quadratic form built from local nullspaces on the manifold. The minimax formulation also opens up an interesting class of methods in which the graph is \u201cdec- orated\u201d with information at the vertices, offering discrete or continuous maps, reduced computational complexity, and immunity to some solu- tion instabilities of eigenfunction approaches. Apropos, we show almost all NLDR methods based on eigenvalue decompositions (EVD) have a so- lution instability that increases faster than problem size. This pathology can be observed (and corrected via the minimax formulation) in problems as small as N < 100 points.", "bibtex": "@inproceedings{NIPS2003_5f146156,\n author = {Brand, Matthew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Minimax Embeddings},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/5f14615696649541a025d3d0f8e0447f-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/5f14615696649541a025d3d0f8e0447f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/5f14615696649541a025d3d0f8e0447f-Metadata.json", "review": "", "metareview": "", "pdf_size": 260310, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11212818423998424426&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Mitsubishi Electric Research Labs", "aff_domain": "", "email": "", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Mitsubishi Electric Research Laboratories", "aff_unique_dep": "", "aff_unique_url": "https://www.merl.com", "aff_unique_abbr": "MERL", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "bbeba90ef5", "title": "Minimising Contrastive Divergence in Noisy, Mixed-mode VLSI Neurons", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/d9ff90f4000eacd3a6c9cb27f78994cf-Abstract.html", "author": "Hsin Chen; Patrice Fleury; Alan F. Murray", "abstract": "This paper presents VLSI circuits with continuous-valued proba- bilistic behaviour realized by injecting noise into each computing unit(neuron). Interconnecting the noisy neurons forms a Contin- uous Restricted Boltzmann Machine (CRBM), which has shown promising performance in modelling and classifying noisy biomed- ical data. The Minimising-Contrastive-Divergence learning algo- rithm for CRBM is also implemented in mixed-mode VLSI, to adapt the noisy neurons\u2019 parameters on-chip.", "bibtex": "@inproceedings{NIPS2003_d9ff90f4,\n author = {Chen, Hsin and Fleury, Patrice and Murray, Alan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Minimising Contrastive Divergence in Noisy, Mixed-mode VLSI Neurons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/d9ff90f4000eacd3a6c9cb27f78994cf-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/d9ff90f4000eacd3a6c9cb27f78994cf-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/d9ff90f4000eacd3a6c9cb27f78994cf-Metadata.json", "review": "", "metareview": "", "pdf_size": 317150, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7714002634442308532&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "e5143dba99", "title": "Model Uncertainty in Classical Conditioning", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/55a988dfb00a914717b3000a3374694c-Abstract.html", "author": "Aaron C. Courville; Geoffrey J. Gordon; David S. Touretzky; Nathaniel D. Daw", "abstract": "We develop a framework based on Bayesian model averaging to explain how animals cope with uncertainty about contingencies in classical con- ditioning experiments. Traditional accounts of conditioning \ufb01t parame- ters within a \ufb01xed generative model of reinforcer delivery; uncertainty over the model structure is not considered. We apply the theory to ex- plain the puzzling relationship between second-order conditioning and conditioned inhibition, two similar conditioning regimes that nonethe- less result in strongly divergent behavioral outcomes. According to the theory, second-order conditioning results when limited experience leads animals to prefer a simpler world model that produces spurious corre- lations; conditioned inhibition results when a more complex model is justi\ufb01ed by additional experience.", "bibtex": "@inproceedings{NIPS2003_55a988df,\n author = {Courville, Aaron C and Gordon, Geoffrey J and Touretzky, David and Daw, Nathaniel},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Model Uncertainty in Classical Conditioning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/55a988dfb00a914717b3000a3374694c-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/55a988dfb00a914717b3000a3374694c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/55a988dfb00a914717b3000a3374694c-Metadata.json", "review": "", "metareview": "", "pdf_size": 110053, "gs_citation": 70, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8406609124370367770&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Robotics Institute; Computer Science Department; Center for the Neural Basis of Cognition; Center for Automated Learning and Discovery", "aff_domain": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "email": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "Robotics Institute;Computer Science Department;Center for the Neural Basis of Cognition;Carnegie Mellon University", "aff_unique_dep": ";Computer Science;;Center for Automated Learning and Discovery", "aff_unique_url": ";;;https://www.cmu.edu", "aff_unique_abbr": ";;;CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "1;1", "aff_country_unique": ";United States" }, { "id": "4903abffc8", "title": "Modeling User Rating Profiles For Collaborative Filtering", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/269d837afada308dd4aeab28ca2d57e4-Abstract.html", "author": "Benjamin M. Marlin", "abstract": "In this paper we present a generative latent variable model for rating-based collaborative (cid:12)ltering called the User Rating Pro(cid:12)le model (URP). The generative process which underlies URP is de- signed to produce complete user rating pro(cid:12)les, an assignment of one rating to each item for each user. Our model represents each user as a mixture of user attitudes, and the mixing proportions are distributed according to a Dirichlet random variable. The rating for each item is generated by selecting a user attitude for the item, and then selecting a rating according to the preference pattern associ- ated with that attitude. URP is related to several models including a multinomial mixture model, the aspect model [7], and LDA [1], but has clear advantages over each.", "bibtex": "@inproceedings{NIPS2003_269d837a,\n author = {Marlin, Benjamin M},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Modeling User Rating Profiles For Collaborative Filtering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/269d837afada308dd4aeab28ca2d57e4-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/269d837afada308dd4aeab28ca2d57e4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/269d837afada308dd4aeab28ca2d57e4-Metadata.json", "review": "", "metareview": "", "pdf_size": 94534, "gs_citation": 475, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2239963210002812914&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science, University of Toronto", "aff_domain": "cs.toronto.edu", "email": "cs.toronto.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of Toronto", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.utoronto.ca", "aff_unique_abbr": "U of T", "aff_campus_unique_index": "0", "aff_campus_unique": "Toronto", "aff_country_unique_index": "0", "aff_country_unique": "Canada" }, { "id": "ecf6154b31", "title": "Multiple Instance Learning via Disjunctive Programming Boosting", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/53f0d7c537d99b3824f0f99d62ea2428-Abstract.html", "author": "Stuart Andrews; Thomas Hofmann", "abstract": "Learning from ambiguous training data is highly relevant in many applications. We present a new learning algorithm for classi\ufb01cation problems where labels are associated with sets of pattern instead of individual patterns. This encompasses multiple instance learn- ing as a special case. Our approach is based on a generalization of linear programming boosting and uses results from disjunctive programming to generate successively stronger linear relaxations of a discrete non-convex problem.", "bibtex": "@inproceedings{NIPS2003_53f0d7c5,\n author = {Andrews, Stuart and Hofmann, Thomas},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Multiple Instance Learning via Disjunctive Programming Boosting},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/53f0d7c537d99b3824f0f99d62ea2428-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/53f0d7c537d99b3824f0f99d62ea2428-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/53f0d7c537d99b3824f0f99d62ea2428-Metadata.json", "review": "", "metareview": "", "pdf_size": 107342, "gs_citation": 63, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11987627867489863963&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Computer Science, Brown University, Providence, RI, 02912; Department of Computer Science, Brown University, Providence, RI, 02912", "aff_domain": "cs.brown.edu;cs.brown.edu", "email": "cs.brown.edu;cs.brown.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Brown University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.brown.edu", "aff_unique_abbr": "Brown", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Providence", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "2031c39d1f", "title": "Mutual Boosting for Contextual Inference", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/070dbb6024b5ef93784428afc71f2146-Abstract.html", "author": "Michael Fink; Pietro Perona", "abstract": "Mutual Boosting is a method aimed at incorporating contextual information to augment object detection. When multiple detectors of objects and parts are trained in parallel using AdaBoost [1], object detectors might use the remaining intermediate detectors to enrich the weak learner set. This method generalizes the efficient features suggested by Viola and Jones thus enabling information inference between parts and objects in a compositional hierarchy. In our experiments eye-, nose-, mouth- and face detectors are trained using the Mutual Boosting framework. Results show the method outperforms applications overlooking contextual information. We suggest that achieving contextual integration is a step toward human-like detection capabilities.", "bibtex": "@inproceedings{NIPS2003_070dbb60,\n author = {Fink, Michael and Perona, Pietro},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Mutual Boosting for Contextual Inference},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/070dbb6024b5ef93784428afc71f2146-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/070dbb6024b5ef93784428afc71f2146-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/070dbb6024b5ef93784428afc71f2146-Metadata.json", "review": "", "metareview": "", "pdf_size": 462105, "gs_citation": 144, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10544007731461470074&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Center for Neural Computation, Hebrew University of Jerusalem; Electrical Engineering Department, California Institute of Technology", "aff_domain": "huji.ac.il;vision.caltech.edu", "email": "huji.ac.il;vision.caltech.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Hebrew University of Jerusalem;California Institute of Technology", "aff_unique_dep": "Center for Neural Computation;Electrical Engineering Department", "aff_unique_url": "https://www.huji.ac.il;https://www.caltech.edu", "aff_unique_abbr": "HUJI;Caltech", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Jerusalem;Pasadena", "aff_country_unique_index": "0;1", "aff_country_unique": "Israel;United States" }, { "id": "b5027ee86d", "title": "Near-Minimax Optimal Classification with Dyadic Classification Trees", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/f7e2b2b75b04175610e5a00c1e221ebb-Abstract.html", "author": "Clayton Scott; Robert Nowak", "abstract": "This paper reports on a family of computationally practical classi\ufb01ers that converge to the Bayes error at near-minimax optimal rates for a va- riety of distributions. The classi\ufb01ers are based on dyadic classi\ufb01cation trees (DCTs), which involve adaptively pruned partitions of the feature space. A key aspect of DCTs is their spatial adaptivity, which enables lo- cal (rather than global) \ufb01tting of the decision boundary. Our risk analysis involves a spatial decomposition of the usual concentration inequalities, leading to a spatially adaptive, data-dependent pruning criterion. For any distribution on (X, Y ) whose Bayes decision boundary behaves locally like a Lipschitz smooth function, we show that the DCT error converges to the Bayes error at a rate within a logarithmic factor of the minimax optimal rate. We also study DCTs equipped with polynomial classi\ufb01ca- tion rules at each leaf, and show that as the smoothness of the boundary increases their errors converge to the Bayes error at a rate approaching n\u22121/2, the parametric rate. We are not aware of any other practical classi- \ufb01ers that provide similar rate of convergence guarantees. Fast algorithms for tree pruning are discussed.", "bibtex": "@inproceedings{NIPS2003_f7e2b2b7,\n author = {Scott, Clayton and Nowak, Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Near-Minimax Optimal Classification with Dyadic Classification Trees},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/f7e2b2b75b04175610e5a00c1e221ebb-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/f7e2b2b75b04175610e5a00c1e221ebb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/f7e2b2b75b04175610e5a00c1e221ebb-Metadata.json", "review": "", "metareview": "", "pdf_size": 79160, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8610807458636725976&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Electrical and Computer Engineering, Rice University; Electrical and Computer Engineering, University of Wisconsin", "aff_domain": "rice.edu;engr.wisc.edu", "email": "rice.edu;engr.wisc.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Rice University;University of Wisconsin", "aff_unique_dep": "Electrical and Computer Engineering;Electrical and Computer Engineering", "aff_unique_url": "https://www.rice.edu;https://www.wisc.edu", "aff_unique_abbr": "Rice;UW", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "e9cbbe1fb2", "title": "Necessary Intransitive Likelihood-Ratio Classifiers", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/4a1590df1d5968d41b855005bb8b67bf-Abstract.html", "author": "Gang Ji; Jeff A. Bilmes", "abstract": "In pattern classi\ufb01cation tasks, errors are introduced because of differ- ences between the true model and the one obtained via model estimation. Using likelihood-ratio based classi\ufb01cation, it is possible to correct for this discrepancy by \ufb01nding class-pair speci\ufb01c terms to adjust the likelihood ratio directly, and that can make class-pair preference relationships in- transitive. In this work, we introduce new methodology that makes nec- essary corrections to the likelihood ratio, speci\ufb01cally those that are nec- essary to achieve perfect classi\ufb01cation (but not perfect likelihood-ratio correction which can be overkill). The new corrections, while weaker than previously reported such adjustments, are analytically challenging since they involve discontinuous functions, therefore requiring several approximations. We test a number of these new schemes on an isolated- word speech recognition task as well as on the UCI machine learning data sets. Results show that by using the bias terms calculated in this new way, classi\ufb01cation accuracy can substantially improve over both the baseline and over our previous results.", "bibtex": "@inproceedings{NIPS2003_4a1590df,\n author = {Ji, Gang and Bilmes, Jeff A},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Necessary Intransitive Likelihood-Ratio Classifiers},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/4a1590df1d5968d41b855005bb8b67bf-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/4a1590df1d5968d41b855005bb8b67bf-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/4a1590df1d5968d41b855005bb8b67bf-Metadata.json", "review": "", "metareview": "", "pdf_size": 100823, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5576979243401848613&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "SSLI-Lab, Department of Electrical Engineering, University of Washington; SSLI-Lab, Department of Electrical Engineering, University of Washington", "aff_domain": "ee.washington.edu;ee.washington.edu", "email": "ee.washington.edu;ee.washington.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Washington", "aff_unique_dep": "Department of Electrical Engineering", "aff_unique_url": "https://www.washington.edu", "aff_unique_abbr": "UW", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Seattle", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "c4f2c00a15", "title": "New Algorithms for Efficient High Dimensional Non-parametric Classification", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/57bafb2c2dfeefba931bb03a835b1fa9-Abstract.html", "author": "Ting liu; Andrew W. Moore; Alexander Gray", "abstract": "Alexander Gray", "bibtex": "@inproceedings{NIPS2003_57bafb2c,\n author = {liu, Ting and Moore, Andrew and Gray, Alexander},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {New Algorithms for Efficient High Dimensional Non-parametric Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/57bafb2c2dfeefba931bb03a835b1fa9-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/57bafb2c2dfeefba931bb03a835b1fa9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/57bafb2c2dfeefba931bb03a835b1fa9-Metadata.json", "review": "", "metareview": "", "pdf_size": 75683, "gs_citation": 215, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13487353261236300699&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 13, "aff": "Computer Science Dept., Carnegie Mellon University, Pittsburgh, PA 15213; Computer Science Dept., Carnegie Mellon University, Pittsburgh, PA 15213; Computer Science Dept., Carnegie Mellon University, Pittsburgh, PA 15213", "aff_domain": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "email": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Computer Science Dept.", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "1450cdaa78", "title": "No Unbiased Estimator of the Variance of K-Fold Cross-Validation", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/e82c4b19b8151ddc25d4d93baf7b908f-Abstract.html", "author": "Yoshua Bengio; Yves Grandvalet", "abstract": "Most machine learning researchers perform quantitative experiments to estimate generalization error and compare algorithm performances. In order to draw statistically convincing conclusions, it is important to esti- mate the uncertainty of such estimates. This paper studies the estimation of uncertainty around the K-fold cross-validation estimator. The main theorem shows that there exists no universal unbiased estimator of the variance of K-fold cross-validation. An analysis based on the eigende- composition of the covariance matrix of errors helps to better understand the nature of the problem and shows that naive estimators may grossly underestimate variance, as con\u00a3rmed by numerical experiments.", "bibtex": "@inproceedings{NIPS2003_e82c4b19,\n author = {Bengio, Yoshua and Grandvalet, Yves},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {No Unbiased Estimator of the Variance of K-Fold Cross-Validation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/e82c4b19b8151ddc25d4d93baf7b908f-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/e82c4b19b8151ddc25d4d93baf7b908f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/e82c4b19b8151ddc25d4d93baf7b908f-Metadata.json", "review": "", "metareview": "", "pdf_size": 94278, "gs_citation": 1526, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7208882207537029325&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 33, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "03bdea1962", "title": "Non-linear CCA and PCA by Alignment of Local Models", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/24e27b869b66e9e62724bd7725d5d9c1-Abstract.html", "author": "Jakob J. Verbeek; Sam T. Roweis; Nikos Vlassis", "abstract": "We propose a non-linear Canonical Correlation Analysis (CCA) method which works by coordinating or aligning mixtures of linear models. In the same way that CCA extends the idea of PCA, our work extends re- cent methods for non-linear dimensionality reduction to the case where multiple embeddings of the same underlying low dimensional coordi- nates are observed, each lying on a different high dimensional manifold. We also show that a special case of our method, when applied to only a single manifold, reduces to the Laplacian Eigenmaps algorithm. As with previous alignment schemes, once the mixture models have been estimated, all of the parameters of our model can be estimated in closed form without local optima in the learning. Experimental results illustrate the viability of the approach as a non-linear extension of CCA.", "bibtex": "@inproceedings{NIPS2003_24e27b86,\n author = {Verbeek, Jakob and Roweis, Sam and Vlassis, Nikos},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Non-linear CCA and PCA by Alignment of Local Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/24e27b869b66e9e62724bd7725d5d9c1-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/24e27b869b66e9e62724bd7725d5d9c1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/24e27b869b66e9e62724bd7725d5d9c1-Metadata.json", "review": "", "metareview": "", "pdf_size": 248980, "gs_citation": 68, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17268144082943231858&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Informatics Institute, University of Amsterdam; Department of Computer Science, University of Toronto; Informatics Institute, University of Amsterdam", "aff_domain": "uva.nl;cs.toronto.edu;uva.nl", "email": "uva.nl;cs.toronto.edu;uva.nl", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Amsterdam;University of Toronto", "aff_unique_dep": "Informatics Institute;Department of Computer Science", "aff_unique_url": "https://www.uva.nl;https://www.utoronto.ca", "aff_unique_abbr": "UvA;U of T", "aff_campus_unique_index": "1", "aff_campus_unique": ";Toronto", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Netherlands;Canada" }, { "id": "01c3db7b3c", "title": "Nonlinear Filtering of Electron Micrographs by Means of Support Vector Regression", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/696b35cc35e710279b9c2dedc08e22d7-Abstract.html", "author": "Roland Vollgraf; Michael Scholz; Ian A. Meinertzhagen; Klaus Obermayer", "abstract": "Nonlinear (cid:12)ltering can solve very complex problems, but typically involve very time consuming calculations. Here we show that for (cid:12)lters that are constructed as a RBF network with Gaussian basis functions, a decomposition into linear (cid:12)lters exists, which can be computed e(cid:14)ciently in the frequency domain, yielding dramatic improvement in speed. We present an application of this idea to image processing. In electron micrograph images of photoreceptor terminals of the fruit (cid:13)y, Drosophila, synaptic vesicles containing neurotransmitter should be detected and labeled automatically. We use hand labels, provided by human experts, to learn a RBF (cid:12)lter using Support Vector Regression with Gaussian kernels. We will show that the resulting nonlinear (cid:12)lter solves the task to a degree of accuracy, which is close to what can be achieved by human experts. This allows the very time consuming task of data evaluation to be done e(cid:14)ciently.", "bibtex": "@inproceedings{NIPS2003_696b35cc,\n author = {Vollgraf, Roland and Scholz, Michael and Meinertzhagen, Ian and Obermayer, Klaus},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Nonlinear Filtering of Electron Micrographs by Means of Support Vector Regression},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/696b35cc35e710279b9c2dedc08e22d7-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/696b35cc35e710279b9c2dedc08e22d7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/696b35cc35e710279b9c2dedc08e22d7-Metadata.json", "review": "", "metareview": "", "pdf_size": 211679, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17001110124354370280&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "Department of Electrical Engineering and Computer Science, Berlin University of Technology, Germany; Department of Electrical Engineering and Computer Science, Berlin University of Technology, Germany; Dalhousie University, Halifax, Canada; Department of Electrical Engineering and Computer Science, Berlin University of Technology, Germany", "aff_domain": "cs.tu-berlin.de;cs.tu-berlin.de;is.dal.ca;cs.tu-berlin.de", "email": "cs.tu-berlin.de;cs.tu-berlin.de;is.dal.ca;cs.tu-berlin.de", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "Berlin University of Technology;Dalhousie University", "aff_unique_dep": "Department of Electrical Engineering and Computer Science;", "aff_unique_url": "https://www.tu-berlin.de;https://www.dal.ca", "aff_unique_abbr": "TU Berlin;Dal", "aff_campus_unique_index": "0;0;1;0", "aff_campus_unique": "Berlin;Halifax", "aff_country_unique_index": "0;0;1;0", "aff_country_unique": "Germany;Canada" }, { "id": "d7e68627b9", "title": "Nonlinear Processing in LGN Neurons", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/2aedcba61ca55ceb62d785c6b7f10a83-Abstract.html", "author": "Vincent Bonin; Valerio Mante; Matteo Carandini", "abstract": "According to a widely held view, neurons in lateral geniculate nucleus (LGN) operate on visual stimuli in a linear fashion. There is ample evidence, however, that LGN responses are not entirely linear. To account for nonlinearities we propose a model that synthesizes more than 30 years of research in the field. Model neurons have a linear receptive field, and a nonlinear, divisive suppressive field. The suppressive field computes local root-mean- square contrast. To test this model we recorded responses from LGN of anesthetized paralyzed cats. We estimate model parameters from a basic set of measurements and show that the model can accurately predict responses to novel stimuli. The model might serve as the new standard model of LGN responses. It specifies how visual processing in LGN involves both linear filtering and divisive gain control.", "bibtex": "@inproceedings{NIPS2003_2aedcba6,\n author = {Bonin, Vincent and Mante, Valerio and Carandini, Matteo},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Nonlinear Processing in LGN Neurons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/2aedcba61ca55ceb62d785c6b7f10a83-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/2aedcba61ca55ceb62d785c6b7f10a83-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/2aedcba61ca55ceb62d785c6b7f10a83-Metadata.json", "review": "", "metareview": "", "pdf_size": 128982, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2010855200833389550&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Smith-Kettlewell Eye Research Institute; Institute of Neuroinformatics; Institute of Neuroinformatics", "aff_domain": "ski.org;ski.org;ski.org", "email": "ski.org;ski.org;ski.org", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "Smith-Kettlewell Eye Research Institute;Institute of Neuroinformatics", "aff_unique_dep": ";", "aff_unique_url": "https://www.ski.org;https://www.ini.uzh.ch", "aff_unique_abbr": "SKI;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1", "aff_country_unique": "United States;Switzerland" }, { "id": "085a7d9183", "title": "Nonstationary Covariance Functions for Gaussian Process Regression", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/326a8c055c0d04f5b06544665d8bb3ea-Abstract.html", "author": "Christopher J. Paciorek; Mark J. Schervish", "abstract": "We introduce a class of nonstationary covariance functions for Gaussian process (GP) regression. Nonstationary covariance functions allow the model to adapt to functions whose smoothness varies with the inputs. The class includes a nonstationary version of the Mat\u00e9rn stationary co- variance, in which the differentiability of the regression function is con- trolled by a parameter, freeing one from \ufb01xing the differentiability in advance. In experiments, the nonstationary GP regression model per- forms well when the input space is two or three dimensions, outperform- ing a neural network model and Bayesian free-knot spline models, and competitive with a Bayesian neural network, but is outperformed in one dimension by a state-of-the-art Bayesian free-knot spline model. The model readily generalizes to non-Gaussian data. Use of computational methods for speeding GP \ufb01tting may allow for implementation of the method on larger datasets.", "bibtex": "@inproceedings{NIPS2003_326a8c05,\n author = {Paciorek, Christopher and Schervish, Mark},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Nonstationary Covariance Functions for Gaussian Process Regression},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/326a8c055c0d04f5b06544665d8bb3ea-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/326a8c055c0d04f5b06544665d8bb3ea-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/326a8c055c0d04f5b06544665d8bb3ea-Metadata.json", "review": "", "metareview": "", "pdf_size": 128671, "gs_citation": 554, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8009646219532351500&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Statistics, Carnegie Mellon University; Department of Statistics, Carnegie Mellon University", "aff_domain": "alumni.cmu.edu;stat.cmu.edu", "email": "alumni.cmu.edu;stat.cmu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Department of Statistics", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "d6c74938e8", "title": "On the Concentration of Expectation and Approximate Inference in Layered Networks", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/ddeebdeefdb7e7e7a697e1c3e3d8ef54-Abstract.html", "author": "Xuanlong Nguyen; Michael I. Jordan", "abstract": "We present an analysis of concentration-of-expectation phenomena in layered Bayesian networks that use generalized linear models as the local conditional probabilities. This framework encompasses a wide variety of probability distributions, including both discrete and continuous random variables. We utilize ideas from large deviation analysis and the delta method to devise and evaluate a class of approximate inference algo- rithms for layered Bayesian networks that have superior asymptotic error bounds and very fast computation time.", "bibtex": "@inproceedings{NIPS2003_ddeebdee,\n author = {Nguyen, XuanLong and Jordan, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {On the Concentration of Expectation and Approximate Inference in Layered Networks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/ddeebdeefdb7e7e7a697e1c3e3d8ef54-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/ddeebdeefdb7e7e7a697e1c3e3d8ef54-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/ddeebdeefdb7e7e7a697e1c3e3d8ef54-Metadata.json", "review": "", "metareview": "", "pdf_size": 88189, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12166983100529657988&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "University of California, Berkeley, CA 94720; University of California, Berkeley, CA 94720", "aff_domain": "cs.berkeley.edu;cs.berkeley.edu", "email": "cs.berkeley.edu;cs.berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "c52e9ddb23", "title": "On the Dynamics of Boosting", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/0747b9be4f90056c30eb5241f06bfe9b-Abstract.html", "author": "Cynthia Rudin; Ingrid Daubechies; Robert E. Schapire", "abstract": "In order to understand AdaBoost\u2019s dynamics, especially its ability to maximize margins, we derive an associated simpli\ufb01ed nonlinear iterated map and analyze its behavior in low-dimensional cases. We \ufb01nd stable cycles for these cases, which can explicitly be used to solve for Ada- Boost\u2019s output. By considering AdaBoost as a dynamical system, we are able to prove R\u00a8atsch and Warmuth\u2019s conjecture that AdaBoost may fail to converge to a maximal-margin combined classi\ufb01er when given a \u2018non- optimal\u2019 weak learning algorithm. AdaBoost is known to be a coordinate descent method, but other known algorithms that explicitly aim to max- imize the margin (such as AdaBoost\u2044 and arc-gv) are not. We consider a differentiable function for which coordinate ascent will yield a maxi- mum margin solution. We then make a simple approximation to derive a new boosting algorithm whose updates are slightly more aggressive than those of arc-gv.", "bibtex": "@inproceedings{NIPS2003_0747b9be,\n author = {Rudin, Cynthia and Daubechies, Ingrid and Schapire, Robert E},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {On the Dynamics of Boosting},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/0747b9be4f90056c30eb5241f06bfe9b-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/0747b9be4f90056c30eb5241f06bfe9b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/0747b9be4f90056c30eb5241f06bfe9b-Metadata.json", "review": "", "metareview": "", "pdf_size": 203406, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10623372125417083682&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "Princeton University; Princeton University; Princeton University", "aff_domain": "math.princeton.edu;math.princeton.edu;cs.princeton.edu", "email": "math.princeton.edu;math.princeton.edu;cs.princeton.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Princeton University", "aff_unique_dep": "", "aff_unique_url": "https://www.princeton.edu", "aff_unique_abbr": "Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "288f1b0fd4", "title": "One Microphone Blind Dereverberation Based on Quasi-periodicity of Speech Signals", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/5314b9674c86e3f9d1ba25ef9bb32895-Abstract.html", "author": "Tomohiro Nakatani; Masato Miyoshi; Keisuke Kinoshita", "abstract": "Speech dereverberation is desirable with a view to achieving, for exam- ple, robust speech recognition in the real world. However, it is still a chal- lenging problem, especially when using a single microphone. Although blind equalization techniques have been exploited, they cannot deal with speech signals appropriately because their assumptions are not satis\ufb01ed by speech signals. We propose a new dereverberation principle based on an inherent property of speech signals, namely quasi-periodicity. The present methods learn the dereverberation \ufb01lter from a lot of speech data with no prior knowledge of the data, and can achieve high quality speech dereverberation especially when the reverberation time is long.", "bibtex": "@inproceedings{NIPS2003_5314b967,\n author = {Nakatani, Tomohiro and Miyoshi, Masato and Kinoshita, Keisuke},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {One Microphone Blind Dereverberation Based on Quasi-periodicity of Speech Signals},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/5314b9674c86e3f9d1ba25ef9bb32895-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/5314b9674c86e3f9d1ba25ef9bb32895-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/5314b9674c86e3f9d1ba25ef9bb32895-Metadata.json", "review": "", "metareview": "", "pdf_size": 208844, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3116487895262360482&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "0d3bcf5a5c", "title": "Online Classification on a Budget", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/1a68e5f4ade56ed1d4bf273e55510750-Abstract.html", "author": "Koby Crammer; Jaz Kandola; Yoram Singer", "abstract": "Online algorithms for classi\ufb01cation often require vast amounts of mem- ory and computation time when employed in conjunction with kernel functions. In this paper we describe and analyze a simple approach for an on-the-\ufb02y reduction of the number of past examples used for prediction. Experiments performed with real datasets show that using the proposed algorithmic approach with a single epoch is competitive with the sup- port vector machine (SVM) although the latter, being a batch algorithm, accesses each training example multiple times.", "bibtex": "@inproceedings{NIPS2003_1a68e5f4,\n author = {Crammer, Koby and Kandola, Jaz and Singer, Yoram},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Online Classification on a Budget},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/1a68e5f4ade56ed1d4bf273e55510750-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/1a68e5f4ade56ed1d4bf273e55510750-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/1a68e5f4ade56ed1d4bf273e55510750-Metadata.json", "review": "", "metareview": "", "pdf_size": 85951, "gs_citation": 215, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=887485722333863444&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Computer Sci. & Eng., Hebrew University, Jerusalem 91904, Israel; Royal Holloway, University of London, Egham, UK; Computer Sci. & Eng., Hebrew University, Jerusalem 91904, Israel", "aff_domain": "cs.huji.ac.il;cs.rhul.ac.uk;cs.huji.ac.il", "email": "cs.huji.ac.il;cs.rhul.ac.uk;cs.huji.ac.il", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "Hebrew University;University of London", "aff_unique_dep": "Computer Sci. & Eng.;Royal Holloway", "aff_unique_url": "http://www.huji.ac.il;https://www.royalholloway.ac.uk", "aff_unique_abbr": "HUJI;RHUL", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Jerusalem;Egham", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Israel;United Kingdom" }, { "id": "8a91d439b4", "title": "Online Learning of Non-stationary Sequences", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/feecee9f1643651799ede2740927317a-Abstract.html", "author": "Claire Monteleoni; Tommi S. Jaakkola", "abstract": "We consider an online learning scenario in which the learner can make predictions on the basis of a \ufb01xed set of experts. We derive upper and lower relative loss bounds for a class of universal learning algorithms in- volving a switching dynamics over the choice of the experts. On the basis of the performance bounds we provide the optimal a priori discretiza- tion for learning the parameter that governs the switching dynamics. We demonstrate the new algorithm in the context of wireless networks.", "bibtex": "@inproceedings{NIPS2003_feecee9f,\n author = {Monteleoni, Claire and Jaakkola, Tommi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Online Learning of Non-stationary Sequences},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/feecee9f1643651799ede2740927317a-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/feecee9f1643651799ede2740927317a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/feecee9f1643651799ede2740927317a-Metadata.json", "review": "", "metareview": "", "pdf_size": 115716, "gs_citation": 76, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4250815118906167114&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 19, "aff": "MIT Computer Science and Artificial Intelligence Laboratory; MIT Computer Science and Artificial Intelligence Laboratory", "aff_domain": "ai.mit.edu;ai.mit.edu", "email": "ai.mit.edu;ai.mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory", "aff_unique_url": "https://www.csail.mit.edu", "aff_unique_abbr": "MIT CSAIL", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "21ed87eea5", "title": "Online Learning via Global Feedback for Phrase Recognition", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/6be93f7a96fed60c477d30ae1de032fd-Abstract.html", "author": "Xavier Carreras; Llu\u00eds M\u00e0rquez", "abstract": "This work presents an architecture based on perceptrons to recognize phrase structures, and an online learning algorithm to train the percep- trons together and dependently. The recognition strategy applies learning in two layers: a \ufb01ltering layer, which reduces the search space by identi- fying plausible phrase candidates, and a ranking layer, which recursively builds the optimal phrase structure. We provide a recognition-based feed- back rule which re\ufb02ects to each local function its committed errors from a global point of view, and allows to train them together online as percep- trons. Experimentation on a syntactic parsing problem, the recognition of clause hierarchies, improves state-of-the-art results and evinces the advantages of our global training method over optimizing each function locally and independently.", "bibtex": "@inproceedings{NIPS2003_6be93f7a,\n author = {Carreras, Xavier and M\\`{a}rquez, Llu\\'{\\i}s},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Online Learning via Global Feedback for Phrase Recognition},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/6be93f7a96fed60c477d30ae1de032fd-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/6be93f7a96fed60c477d30ae1de032fd-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/6be93f7a96fed60c477d30ae1de032fd-Metadata.json", "review": "", "metareview": "", "pdf_size": 106215, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10170328343918101805&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "TALP Research Center, LSI Department, Technical University of Catalonia (UPC); TALP Research Center, LSI Department, Technical University of Catalonia (UPC)", "aff_domain": "lsi.upc.es;lsi.upc.es", "email": "lsi.upc.es;lsi.upc.es", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Technical University of Catalonia", "aff_unique_dep": "LSI Department", "aff_unique_url": "https://www.upc.edu", "aff_unique_abbr": "UPC", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Spain" }, { "id": "f9e36b3953", "title": "Online Passive-Aggressive Algorithms", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/4ebd440d99504722d80de606ea8507da-Abstract.html", "author": "Shai Shalev-shwartz; Koby Crammer; Ofer Dekel; Yoram Singer", "abstract": "We present a uni\ufb01ed view for online classi\ufb01cation, regression, and uni- class problems. This view leads to a single algorithmic framework for the three problems. We prove worst case loss bounds for various algorithms for both the realizable case and the non-realizable case. A conversion of our main online algorithm to the setting of batch learning is also dis- cussed. The end result is new algorithms and accompanying loss bounds for the hinge-loss.", "bibtex": "@inproceedings{NIPS2003_4ebd440d,\n author = {Shalev-shwartz, Shai and Crammer, Koby and Dekel, Ofer and Singer, Yoram},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Online Passive-Aggressive Algorithms},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/4ebd440d99504722d80de606ea8507da-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/4ebd440d99504722d80de606ea8507da-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/4ebd440d99504722d80de606ea8507da-Metadata.json", "review": "", "metareview": "", "pdf_size": 80628, "gs_citation": 2702, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12223766143218461673&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 31, "aff": "School of Computer Science & Engineering, The Hebrew University, Jerusalem 91904, Israel; School of Computer Science & Engineering, The Hebrew University, Jerusalem 91904, Israel; School of Computer Science & Engineering, The Hebrew University, Jerusalem 91904, Israel; School of Computer Science & Engineering, The Hebrew University, Jerusalem 91904, Israel", "aff_domain": "cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il", "email": "cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Hebrew University", "aff_unique_dep": "School of Computer Science & Engineering", "aff_unique_url": "http://www.huji.ac.il", "aff_unique_abbr": "HUJI", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Jerusalem", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Israel" }, { "id": "dc90606630", "title": "Optimal Manifold Representation of Data: An Information Theoretic Approach", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/bc7f621451b4f5df308a8e098112185d-Abstract.html", "author": "Denis V. Chigirev; William Bialek", "abstract": "We introduce an information theoretic method for nonparametric, non- linear dimensionality reduction, based on the in\ufb01nite cluster limit of rate distortion theory. By constraining the information available to manifold coordinates, a natural probabilistic map emerges that assigns original data to corresponding points on a lower dimensional manifold. With only the information-distortion trade off as a parameter, our method de- termines the shape of the manifold, its dimensionality, the probabilistic map and the prior that provide optimal description of the data.", "bibtex": "@inproceedings{NIPS2003_bc7f6214,\n author = {Chigirev, Denis and Bialek, William},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Optimal Manifold Representation of Data: An Information Theoretic Approach},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/bc7f621451b4f5df308a8e098112185d-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/bc7f621451b4f5df308a8e098112185d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/bc7f621451b4f5df308a8e098112185d-Metadata.json", "review": "", "metareview": "", "pdf_size": 537426, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4620766094602822193&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Department of Physics and the Lewis-Sigler Institute for Integrative Genomics, Princeton University, Princeton, New Jersey 08544; Department of Physics and the Lewis-Sigler Institute for Integrative Genomics, Princeton University, Princeton, New Jersey 08544", "aff_domain": "princeton.edu;princeton.edu", "email": "princeton.edu;princeton.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Princeton University", "aff_unique_dep": "Department of Physics", "aff_unique_url": "https://www.princeton.edu", "aff_unique_abbr": "Princeton", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Princeton", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "b36ddcf06a", "title": "Out-of-Sample Extensions for LLE, Isomap, MDS, Eigenmaps, and Spectral Clustering", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/cf05968255451bdefe3c5bc64d550517-Abstract.html", "author": "Yoshua Bengio; Jean-fran\u00e7cois Paiement; Pascal Vincent; Olivier Delalleau; Nicolas L. Roux; Marie Ouimet", "abstract": "Several unsupervised learning algorithms based on an eigendecompo- sition provide either an embedding or a clustering only for given train- ing points, with no straightforward extension for out-of-sample examples short of recomputing eigenvectors. This paper provides a uni\ufb01ed frame- work for extending Local Linear Embedding (LLE), Isomap, Laplacian Eigenmaps, Multi-Dimensional Scaling (for dimensionality reduction) as well as for Spectral Clustering. This framework is based on seeing these algorithms as learning eigenfunctions of a data-dependent kernel. Numerical experiments show that the generalizations performed have a level of error comparable to the variability of the embedding algorithms due to the choice of training data.", "bibtex": "@inproceedings{NIPS2003_cf059682,\n author = {Bengio, Yoshua and Paiement, Jean-fran\\c{c}cois and Vincent, Pascal and Delalleau, Olivier and Roux, Nicolas and Ouimet, Marie},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Out-of-Sample Extensions for LLE, Isomap, MDS, Eigenmaps, and Spectral Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/cf05968255451bdefe3c5bc64d550517-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/cf05968255451bdefe3c5bc64d550517-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/cf05968255451bdefe3c5bc64d550517-Metadata.json", "review": "", "metareview": "", "pdf_size": 77687, "gs_citation": 1458, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11495997787599276624&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 20, "aff": "D\u00b4epartement d\u2019Informatique et Recherche Op\u00b4erationnelle, Universit\u00b4e de Montr\u00b4eal; D\u00b4epartement d\u2019Informatique et Recherche Op\u00b4erationnelle, Universit\u00b4e de Montr\u00b4eal; D\u00b4epartement d\u2019Informatique et Recherche Op\u00b4erationnelle, Universit\u00b4e de Montr\u00b4eal; D\u00b4epartement d\u2019Informatique et Recherche Op\u00b4erationnelle, Universit\u00b4e de Montr\u00b4eal; D\u00b4epartement d\u2019Informatique et Recherche Op\u00b4erationnelle, Universit\u00b4e de Montr\u00b4eal; D\u00b4epartement d\u2019Informatique et Recherche Op\u00b4erationnelle, Universit\u00b4e de Montr\u00b4eal", "aff_domain": "iro.umontreal.ca;iro.umontreal.ca;iro.umontreal.ca;iro.umontreal.ca;iro.umontreal.ca;iro.umontreal.ca", "email": "iro.umontreal.ca;iro.umontreal.ca;iro.umontreal.ca;iro.umontreal.ca;iro.umontreal.ca;iro.umontreal.ca", "github": "", "project": "", "author_num": 6, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0;0", "aff_unique_norm": "Universit u00e9 de Montr u00eal", "aff_unique_dep": "D u00e9partement d\u2019Informatique et Recherche Op u00e9rationnelle", "aff_unique_url": "https://www.umontreal.ca", "aff_unique_abbr": "UdeM", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "Montr u00eal", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "Canada" }, { "id": "215693bb45", "title": "PAC-Bayesian Generic Chaining", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/8b4224068a41c5d37f5e2d54f3995089-Abstract.html", "author": "Jean-yves Audibert; Olivier Bousquet", "abstract": "There exist many different generalization error bounds for classi\ufb01cation. Each of these bounds contains an improvement over the others for cer- tain situations. Our goal is to combine these different improvements into a single bound. In particular we combine the PAC-Bayes approach intro- duced by McAllester [1], which is interesting for averaging classi\ufb01ers, with the optimal union bound provided by the generic chaining technique developed by Fernique and Talagrand [2]. This combination is quite nat- ural since the generic chaining is based on the notion of majorizing mea- sures, which can be considered as priors on the set of classi\ufb01ers, and such priors also arise in the PAC-bayesian setting.", "bibtex": "@inproceedings{NIPS2003_8b422406,\n author = {Audibert, Jean-yves and Bousquet, Olivier},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {PAC-Bayesian Generic Chaining},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/8b4224068a41c5d37f5e2d54f3995089-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/8b4224068a41c5d37f5e2d54f3995089-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/8b4224068a41c5d37f5e2d54f3995089-Metadata.json", "review": "", "metareview": "", "pdf_size": 93117, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6047860493130808936&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Universit \u00b4e Paris 6, Laboratoire de Probabilit \u00b4es et Mod `eles al \u00b4eatoires+CREST, ENSAE, Laboratoire de Finance et Assurance; Max Planck Institute for Biological Cybernetics", "aff_domain": "ccr.jussieu.fr;tuebingen.mpg.de", "email": "ccr.jussieu.fr;tuebingen.mpg.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0+1;2", "aff_unique_norm": "Universit\u00e9 Paris 6;CREST;Max Planck Institute for Biological Cybernetics", "aff_unique_dep": "Laboratoire de Probabilit\u00e9s et Mod\u00e8les al\u00e9atoires;Laboratoire de Finance et Assurance;Biological Cybernetics", "aff_unique_url": "https://www.upmc.fr;https://www.crest.fr;https://www.biocybernetics.mpg.de", "aff_unique_abbr": "UP6;;MPIBC", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;1", "aff_country_unique": "France;Germany" }, { "id": "d878454461", "title": "Pairwise Clustering and Graphical Models", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/1bf0c59238dd24a7f09a889483a50e8f-Abstract.html", "author": "Noam Shental; Assaf Zomet; Tomer Hertz; Yair Weiss", "abstract": "Signi(cid:2)cant progress in clustering has been achieved by algorithms that are based on pairwise af(cid:2)nities between the datapoints. In particular, spectral clustering methods have the advantage of being able to divide arbitrarily shaped clusters and are based on ef(cid:2)cient eigenvector calcu- lations. However, spectral methods lack a straightforward probabilistic interpretation which makes it dif(cid:2)cult to automatically set parameters us- ing training data. In this paper we use the previously proposed typical cut framework for pairwise clustering. We show an equivalence between calculating the typical cut and inference in an undirected graphical model. We show that for clustering problems with hundreds of datapoints exact inference may still be possible. For more complicated datasets, we show that loopy be- lief propagation (BP) and generalized belief propagation (GBP) can give excellent results on challenging clustering problems. We also use graph- ical models to derive a learning algorithm for af(cid:2)nity matrices based on labeled data.", "bibtex": "@inproceedings{NIPS2003_1bf0c592,\n author = {Shental, Noam and Zomet, Assaf and Hertz, Tomer and Weiss, Yair},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Pairwise Clustering and Graphical Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/1bf0c59238dd24a7f09a889483a50e8f-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/1bf0c59238dd24a7f09a889483a50e8f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/1bf0c59238dd24a7f09a889483a50e8f-Metadata.json", "review": "", "metareview": "", "pdf_size": 882894, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13169155853953932847&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "Computer Science & Eng. + Center for Neural Computation, Hebrew University of Jerusalem, Jerusalem, Israel 91904; Computer Science & Eng., Hebrew University of Jerusalem, Jerusalem, Israel 91904; Computer Science & Eng. + Center for Neural Computation, Hebrew University of Jerusalem, Jerusalem, Israel 91904; Computer Science & Eng. + Center for Neural Computation, Hebrew University of Jerusalem, Jerusalem, Israel 91904", "aff_domain": "cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il", "email": "cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0+1;1;0+1;0+1", "aff_unique_norm": "Computer Science & Engineering;Hebrew University of Jerusalem", "aff_unique_dep": "Computer Science & Engineering;Center for Neural Computation", "aff_unique_url": ";https://www.huji.ac.il", "aff_unique_abbr": ";HUJI", "aff_campus_unique_index": "1;1;1;1", "aff_campus_unique": ";Jerusalem", "aff_country_unique_index": "1;1;1;1", "aff_country_unique": ";Israel" }, { "id": "e475b426d4", "title": "Parameterized Novelty Detectors for Environmental Sensor Monitoring", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/30aaf34d6afd4b11cc3b3ac4704c7908-Abstract.html", "author": "Cynthia Archer; Todd K. Leen; Ant\u00f3nio M. Baptista", "abstract": "As part of an environmental observation and forecasting system, sensors deployed in the Columbia RIver Estuary (CORIE) gather information on physical dynamics and changes in estuary habi- tat. Of these, salinity sensors are particularly susceptible to bio- fouling, which gradually degrades sensor response and corrupts crit- ical data. Automatic fault detectors have the capability to identify bio-fouling early and minimize data loss. Complicating the devel- opment of discriminatory classi(cid:12)ers is the scarcity of bio-fouling onset examples and the variability of the bio-fouling signature. To solve these problems, we take a novelty detection approach that incorporates a parameterized bio-fouling model. These detectors identify the occurrence of bio-fouling, and its onset time as reliably as human experts. Real-time detectors installed during the sum- mer of 2001 produced no false alarms, yet detected all episodes of sensor degradation before the (cid:12)eld sta(cid:11) scheduled these sensors for cleaning. From this initial deployment through February 2003, our bio-fouling detectors have essentially doubled the amount of useful data coming from the CORIE sensors.", "bibtex": "@inproceedings{NIPS2003_30aaf34d,\n author = {Archer, Cynthia and Leen, Todd and Baptista, Ant\\'{o}nio},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Parameterized Novelty Detectors for Environmental Sensor Monitoring},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/30aaf34d6afd4b11cc3b3ac4704c7908-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/30aaf34d6afd4b11cc3b3ac4704c7908-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/30aaf34d6afd4b11cc3b3ac4704c7908-Metadata.json", "review": "", "metareview": "", "pdf_size": 154255, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=334725374008962378&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "9df24e5c76", "title": "Perception of the Structure of the Physical World Using Unknown Multimodal Sensors and Effectors", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/20c9f5700da1088260df60fcc5df2b53-Abstract.html", "author": "D. Philipona; J.k. O'regan; J.-p. Nadal; Olivier Coenen", "abstract": "Is there a way for an algorithm linked to an unknown body to infer by itself information about this body and the world it is in? Taking the case of space for example, is there a way for this algorithm to realize that its body is in a three dimensional world? Is it possible for this algorithm to discover how to move in a straight line? And more basically: do these questions make any sense at all given that the algorithm only has access to the very high-dimensional data consisting of its sensory inputs and motor outputs? We demonstrate in this article how these questions can be given a positive answer. We show that it is possible to make an algorithm that, by ana- lyzing the law that links its motor outputs to its sensory inputs, discovers information about the structure of the world regardless of the devices constituting the body it is linked to. We present results from simulations demonstrating a way to issue motor orders resulting in \u201cfundamental\u201d movements of the body as regards the structure of the physical world.", "bibtex": "@inproceedings{NIPS2003_20c9f570,\n author = {Philipona, D. and O\\textquotesingle regan, J.k. and Nadal, J.-p. and Coenen, Olivier},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Perception of the Structure of the Physical World Using Unknown Multimodal Sensors and Effectors},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/20c9f5700da1088260df60fcc5df2b53-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/20c9f5700da1088260df60fcc5df2b53-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/20c9f5700da1088260df60fcc5df2b53-Metadata.json", "review": "", "metareview": "", "pdf_size": 168842, "gs_citation": 105, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2694340780306411921&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "http://nivea.psycho.univ-paris5.fr", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "9863f9115f", "title": "Perspectives on Sparse Bayesian Learning", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/52cf49fea5ff66588408852f65cf8272-Abstract.html", "author": "Jason Palmer; Bhaskar D. Rao; David P. Wipf", "abstract": "Recently, relevance vector machines (RVM) have been fashioned from a sparse Bayesian learning (SBL) framework to perform supervised learn- ing using a weight prior that encourages sparsity of representation. The methodology incorporates an additional set of hyperparameters govern- ing the prior, one for each weight, and then adopts a speci\ufb01c approxi- mation to the full marginalization over all weights and hyperparameters. Despite its empirical success however, no rigorous motivation for this particular approximation is currently available. To address this issue, we demonstrate that SBL can be recast as the application of a rigorous vari- ational approximation to the full model by expressing the prior in a dual form. This formulation obviates the necessity of assuming any hyperpri- ors and leads to natural, intuitive explanations of why sparsity is achieved in practice.", "bibtex": "@inproceedings{NIPS2003_52cf49fe,\n author = {Palmer, Jason and Rao, Bhaskar and Wipf, David},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Perspectives on Sparse Bayesian Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/52cf49fea5ff66588408852f65cf8272-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/52cf49fea5ff66588408852f65cf8272-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/52cf49fea5ff66588408852f65cf8272-Metadata.json", "review": "", "metareview": "", "pdf_size": 143822, "gs_citation": 92, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5508726819902475232&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Electrical and Computer Engineering, University of California, San Diego, CA 92092; Department of Electrical and Computer Engineering, University of California, San Diego, CA 92092; Department of Electrical and Computer Engineering, University of California, San Diego, CA 92092", "aff_domain": "ucsd.edu;ucsd.edu;ece.ucsd.edu", "email": "ucsd.edu;ucsd.edu;ece.ucsd.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "San Diego", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "4479782259", "title": "Phonetic Speaker Recognition with Support Vector Machines", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/a6d259bfbfa2062843ef543e21d7ec8e-Abstract.html", "author": "William M. Campbell; Joseph P. Campbell; Douglas A. Reynolds; Douglas A. Jones; Timothy R. Leek", "abstract": "A recent area of signi\ufb01cant progress in speaker recognition is the use of high level features\u2014idiolect, phonetic relations, prosody, discourse structure, etc. A speaker not only has a distinctive acoustic sound but uses language in a characteristic manner. Large corpora of speech data available in recent years allow experimentation with long term statistics of phone patterns, word patterns, etc. of an individual. We propose the use of support vector machines and term frequency analysis of phone se- quences to model a given speaker. To this end, we explore techniques for text categorization applied to the problem. We derive a new kernel based upon a linearization of likelihood ratio scoring. We introduce a new phone-based SVM speaker recognition approach that halves the er- ror rate of conventional phone-based approaches.", "bibtex": "@inproceedings{NIPS2003_a6d259bf,\n author = {Campbell, William and Campbell, Joseph and Reynolds, Douglas and Jones, Douglas and Leek, Timothy},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Phonetic Speaker Recognition with Support Vector Machines},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/a6d259bfbfa2062843ef543e21d7ec8e-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/a6d259bfbfa2062843ef543e21d7ec8e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/a6d259bfbfa2062843ef543e21d7ec8e-Metadata.json", "review": "", "metareview": "", "pdf_size": 137177, "gs_citation": 209, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4664291305384792296&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster" }, { "id": "d273adecd5", "title": "Plasticity Kernels and Temporal Statistics", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/a554f89dd61cabd2ff833d3468e2008a-Abstract.html", "author": "Peter Dayan; Michael H\u00e4usser; Michael London", "abstract": "Computational mysteries surround the kernels relating the magnitude and sign of changes in efficacy as a function of the time difference between pre- and post-synaptic activity at a synapse. One important idea34 is that kernels result from fil(cid:173) tering, ie an attempt by synapses to eliminate noise corrupting learning. This idea has hitherto been applied to trace learning rules; we apply it to experimentally-defined kernels, using it to reverse-engineer assumed signal statistics. We also extend it to consider the additional goal for filtering of weighting learning according to statistical surprise, as in the Z-score transform. This provides a fresh view of observed kernels and can lead to different, and more natural, signal statistics.", "bibtex": "@inproceedings{NIPS2003_a554f89d,\n author = {Dayan, Peter and H\\\"{a}usser, Michael and London, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Plasticity Kernels and Temporal Statistics},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/a554f89dd61cabd2ff833d3468e2008a-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/a554f89dd61cabd2ff833d3468e2008a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/a554f89dd61cabd2ff833d3468e2008a-Metadata.json", "review": "", "metareview": "", "pdf_size": 877971, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5019929615714568497&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "GCNU; WIBR, Dept of Physiology; GCNU+WIBR, Dept of Physiology", "aff_domain": "gats5y.ucl.ac.uk;ucl.ac.uk;ucl.ac.uk", "email": "gats5y.ucl.ac.uk;ucl.ac.uk;ucl.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0+1", "aff_unique_norm": "GCNU;WIBR", "aff_unique_dep": ";Dept of Physiology", "aff_unique_url": ";", "aff_unique_abbr": ";", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "", "aff_country_unique": "" }, { "id": "f512bea188", "title": "Policy Search by Dynamic Programming", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/3837a451cd0abc5ce4069304c5442c87-Abstract.html", "author": "J. A. Bagnell; Sham M. Kakade; Jeff G. Schneider; Andrew Y. Ng", "abstract": "We consider the policy search approach to reinforcement learning. We show that if a \u201cbaseline distribution\u201d is given (indicating roughly how often we expect a good policy to visit each state), then we can derive a policy search algorithm that terminates in a \ufb01nite number of steps, and for which we can provide non-trivial performance guarantees. We also demonstrate this algorithm on several grid-world POMDPs, a planar biped walking robot, and a double-pole balancing problem.", "bibtex": "@inproceedings{NIPS2003_3837a451,\n author = {Bagnell, J. and Kakade, Sham M and Schneider, Jeff and Ng, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Policy Search by Dynamic Programming},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/3837a451cd0abc5ce4069304c5442c87-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/3837a451cd0abc5ce4069304c5442c87-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/3837a451cd0abc5ce4069304c5442c87-Metadata.json", "review": "", "metareview": "", "pdf_size": 187304, "gs_citation": 258, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13956646554137349512&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 31, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "744640eb38", "title": "Predicting Speech Intelligibility from a Population of Neurons", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/777669af68dbccabc30c3b6bcaa81825-Abstract.html", "author": "Jeff Bondy; Ian Bruce; Suzanna Becker; Simon Haykin", "abstract": "Simon Haykin", "bibtex": "@inproceedings{NIPS2003_777669af,\n author = {Bondy, Jeff and Bruce, Ian and Becker, Suzanna and Haykin, Simon},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Predicting Speech Intelligibility from a Population of Neurons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/777669af68dbccabc30c3b6bcaa81825-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/777669af68dbccabc30c3b6bcaa81825-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/777669af68dbccabc30c3b6bcaa81825-Metadata.json", "review": "", "metareview": "", "pdf_size": 223150, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12232831550767689435&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Dept. of Electrical Engineering, McMaster University, Hamilton, ON; Dept. of Electrical Engineering, McMaster University, Hamilton, ON; Dept. of Psychology, McMaster University; Dept. of Electrical Engineering, McMaster University, Hamilton, ON", "aff_domain": "soma.crl.mcmaster.ca;ieee.org;mcmaster.ca;mcmaster.ca", "email": "soma.crl.mcmaster.ca;ieee.org;mcmaster.ca;mcmaster.ca", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "McMaster University", "aff_unique_dep": "Department of Electrical Engineering", "aff_unique_url": "https://www.mcmaster.ca", "aff_unique_abbr": "McMaster", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Hamilton;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Canada" }, { "id": "c29a2a2571", "title": "Prediction on Spike Data Using Kernel Algorithms", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/d095a94d20dcaf7aa07301948549bede-Abstract.html", "author": "Jan Eichhorn; Andreas Tolias; Alexander Zien; Malte Kuss; Jason Weston; Nikos Logothetis; Bernhard Sch\u00f6lkopf; Carl E. Rasmussen", "abstract": "We report and compare the performance of different learning algorithms based on data from cortical recordings. The task is to predict the orienta- tion of visual stimuli from the activity of a population of simultaneously recorded neurons. We compare several ways of improving the coding of the input (i.e., the spike data) as well as of the output (i.e., the orienta- tion), and report the results obtained using different kernel algorithms.", "bibtex": "@inproceedings{NIPS2003_d095a94d,\n author = {Eichhorn, Jan and Tolias, Andreas and Zien, Alexander and Kuss, Malte and Weston, Jason and Logothetis, Nikos and Sch\\\"{o}lkopf, Bernhard and Rasmussen, Carl},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Prediction on Spike Data Using Kernel Algorithms},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/d095a94d20dcaf7aa07301948549bede-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/d095a94d20dcaf7aa07301948549bede-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/d095a94d20dcaf7aa07301948549bede-Metadata.json", "review": "", "metareview": "", "pdf_size": 84282, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2092199063846624784&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de", "email": "tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de", "github": "", "project": "", "author_num": 8, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0;0;0;0", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics", "aff_unique_dep": "Biological Cybernetics", "aff_unique_url": "https://www.biocybernetics.mpg.de", "aff_unique_abbr": "MPIBC", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "Germany" }, { "id": "cf2a2cb04a", "title": "Probabilistic Inference in Human Sensorimotor Processing", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/af5d5ef24881f3c3049a7b9bfe74d58b-Abstract.html", "author": "Konrad P. K\u00f6rding; Daniel M. Wolpert", "abstract": "When we learn a new motor skill, we have to contend with both the vari- ability inherent in our sensors and the task. The sensory uncertainty can be reduced by using information about the distribution of previously ex- perienced tasks. Here we impose a distribution on a novel sensorimotor task and manipulate the variability of the sensory feedback. We show that subjects internally represent both the distribution of the task as well as their sensory uncertainty. Moreover, they combine these two sources of information in a way that is qualitatively predicted by optimal Bayesian processing. We further analyze if the subjects can represent multimodal distributions such as mixtures of Gaussians. The results show that the CNS employs probabilistic models during sensorimotor learning even when the priors are multimodal.", "bibtex": "@inproceedings{NIPS2003_af5d5ef2,\n author = {K\\\"{o}rding, Konrad and Wolpert, Daniel M},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Probabilistic Inference in Human Sensorimotor Processing},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/af5d5ef24881f3c3049a7b9bfe74d58b-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/af5d5ef24881f3c3049a7b9bfe74d58b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/af5d5ef24881f3c3049a7b9bfe74d58b-Metadata.json", "review": "", "metareview": "", "pdf_size": 582057, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9554448201338890598&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 17, "aff": "Institute of Neurology, UCL London, London WC1N 3BG, UK; Institute of Neurology, UCL London, London WC1N 3BG, UK", "aff_domain": "koerding.com;ion.ucl.ac.uk", "email": "koerding.com;ion.ucl.ac.uk", "github": "", "project": "www.koerding.com; www.wolpertlab.com", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University College London", "aff_unique_dep": "Institute of Neurology", "aff_unique_url": "https://www.ucl.ac.uk", "aff_unique_abbr": "UCL", "aff_campus_unique_index": "0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "1f0b4cfd91", "title": "Probabilistic Inference of Speech Signals from Phaseless Spectrograms", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/d20be76a86c0d71c75035fced631f874-Abstract.html", "author": "Kannan Achan; Sam T. Roweis; Brendan J. Frey", "abstract": "Many techniques for complex speech processing such as denoising and deconvolution, time/frequency warping, multiple speaker separation, and multiple microphone analysis operate on sequences of short-time power spectra (spectrograms), a representation which is often well-suited to these tasks. However, a signi\ufb01cant problem with algorithms that manipu- late spectrograms is that the output spectrogram does not include a phase component, which is needed to create a time-domain signal that has good perceptual quality. Here we describe a generative model of time-domain speech signals and their spectrograms, and show how an ef\ufb01cient opti- mizer can be used to \ufb01nd the maximum a posteriori speech signal, given the spectrogram. In contrast to techniques that alternate between esti- mating the phase and a spectrally-consistent signal, our technique di- rectly infers the speech signal, thus jointly optimizing the phase and a spectrally-consistent signal. We compare our technique with a standard method using signal-to-noise ratios, but we also provide audio \ufb01les on the web for the purpose of demonstrating the improvement in perceptual quality that our technique offers.", "bibtex": "@inproceedings{NIPS2003_d20be76a,\n author = {Achan, Kannan and Roweis, Sam and Frey, Brendan J},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Probabilistic Inference of Speech Signals from Phaseless Spectrograms},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/d20be76a86c0d71c75035fced631f874-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/d20be76a86c0d71c75035fced631f874-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/d20be76a86c0d71c75035fced631f874-Metadata.json", "review": "", "metareview": "", "pdf_size": 356179, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11047384119938344451&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "7636872958", "title": "Probability Estimates for Multi-Class Classification by Pairwise Coupling", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/03e7ef47cee6fa4ae7567394b99912b7-Abstract.html", "author": "Ting-fan Wu; Chih-jen Lin; Ruby C. Weng", "abstract": "Pairwise coupling is a popular multi-class classi\ufb01cation method that combines together all pairwise comparisons for each pair of classes. This paper presents two approaches for obtaining class probabilities. Both methods can be reduced to linear systems and are easy to implement. We show conceptually and experimentally that the proposed approaches are more stable than two existing popular methods: voting and [3].", "bibtex": "@inproceedings{NIPS2003_03e7ef47,\n author = {Wu, Ting-fan and Lin, Chih-jen and Weng, Ruby},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Probability Estimates for Multi-Class Classification by Pairwise Coupling},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/03e7ef47cee6fa4ae7567394b99912b7-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/03e7ef47cee6fa4ae7567394b99912b7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/03e7ef47cee6fa4ae7567394b99912b7-Metadata.json", "review": "", "metareview": "", "pdf_size": 92712, "gs_citation": 2549, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15497451921352377561&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 21, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "543ae954ff", "title": "Ranking on Data Manifolds", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/2c3ddf4bf13852db711dd1901fb517fa-Abstract.html", "author": "Dengyong Zhou; Jason Weston; Arthur Gretton; Olivier Bousquet; Bernhard Sch\u00f6lkopf", "abstract": "The Google search engine has enjoyed huge success with its web page ranking algorithm, which exploits global, rather than local, hyperlink structure of the web using random walks. Here we propose a simple universal ranking algorithm for data lying in the Euclidean space, such as text or image data. The core idea of our method is to rank the data with respect to the intrinsic manifold structure collectively revealed by a great amount of data. Encouraging experimental results from synthetic, image, and text data illustrate the validity of our method.", "bibtex": "@inproceedings{NIPS2003_2c3ddf4b,\n author = {Zhou, Dengyong and Weston, Jason and Gretton, Arthur and Bousquet, Olivier and Sch\\\"{o}lkopf, Bernhard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Ranking on Data Manifolds},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/2c3ddf4bf13852db711dd1901fb517fa-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/2c3ddf4bf13852db711dd1901fb517fa-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/2c3ddf4bf13852db711dd1901fb517fa-Metadata.json", "review": "", "metareview": "", "pdf_size": 235964, "gs_citation": 1001, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15742391550555919687&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 24, "aff": "Max Planck Institute for Biological Cybernetics, 72076 Tuebingen, Germany; Max Planck Institute for Biological Cybernetics, 72076 Tuebingen, Germany; Max Planck Institute for Biological Cybernetics, 72076 Tuebingen, Germany; Max Planck Institute for Biological Cybernetics, 72076 Tuebingen, Germany; Max Planck Institute for Biological Cybernetics, 72076 Tuebingen, Germany", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de", "email": "tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics", "aff_unique_dep": "Biological Cybernetics", "aff_unique_url": "https://www.biocybernetics.mpg.de", "aff_unique_abbr": "MPIBC", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Tuebingen", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "Germany" }, { "id": "a0f5da305f", "title": "Reasoning about Time and Knowledge in Neural Symbolic Learning Systems", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/347665597cbfaef834886adbb848011f-Abstract.html", "author": "Artur Garcez; Luis C. Lamb", "abstract": "We show that temporal logic and combinations of temporal logics and modal logics of knowledge can be effectively represented in ar(cid:173) tificial neural networks. We present a Translation Algorithm from temporal rules to neural networks, and show that the networks compute a fixed-point semantics of the rules. We also apply the translation to the muddy children puzzle, which has been used as a testbed for distributed multi-agent systems. We provide a complete solution to the puzzle with the use of simple neural networks, capa(cid:173) ble of reasoning about time and of knowledge acquisition through inductive learning.", "bibtex": "@inproceedings{NIPS2003_34766559,\n author = {Garcez, Artur and Lamb, Luis},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Reasoning about Time and Knowledge in Neural Symbolic Learning Systems},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/347665597cbfaef834886adbb848011f-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/347665597cbfaef834886adbb848011f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/347665597cbfaef834886adbb848011f-Metadata.json", "review": "", "metareview": "", "pdf_size": 1617307, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11139163612546458665&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Dept. of Computing, City University London; Dept. of Computing Theory, PPGC-II-UFRGS", "aff_domain": "soi.city.ac.uk;inf.ufrgs.br", "email": "soi.city.ac.uk;inf.ufrgs.br", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "City University London;Universidade Federal do Rio Grande do Sul", "aff_unique_dep": "Dept. of Computing;Dept. of Computing Theory", "aff_unique_url": "https://www.city.ac.uk;https://www.ufrgs.br", "aff_unique_abbr": "City, University of London;UFRGS", "aff_campus_unique_index": "0", "aff_campus_unique": "London;", "aff_country_unique_index": "0;1", "aff_country_unique": "United Kingdom;Brazil" }, { "id": "86a8b9407d", "title": "Reconstructing MEG Sources with Unknown Correlations", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/e9412ee564384b987d086df32d4ce6b7-Abstract.html", "author": "Maneesh Sahani; Srikantan S. Nagarajan", "abstract": "Existing source location and recovery algorithms used in magnetoen- cephalographic imaging generally assume that the source activity at dif- ferent brain locations is independent or that the correlation structure is known. However, electrophysiological recordings of local \ufb01eld poten- tials show strong correlations in aggregate activity over signi\ufb01cant dis- tances. Indeed, it seems very likely that stimulus-evoked activity would follow strongly correlated time-courses in different brain areas. Here, we present, and validate through simulations, a new approach to source reconstruction in which the correlation between sources is modelled and estimated explicitly by variational Bayesian methods, facilitating accu- rate recovery of source locations and the time-courses of their activation.", "bibtex": "@inproceedings{NIPS2003_e9412ee5,\n author = {Sahani, Maneesh and Nagarajan, Srikantan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Reconstructing MEG Sources with Unknown Correlations},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/e9412ee564384b987d086df32d4ce6b7-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/e9412ee564384b987d086df32d4ce6b7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/e9412ee564384b987d086df32d4ce6b7-Metadata.json", "review": "", "metareview": "", "pdf_size": 241958, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14235822305143222959&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "W. M. Keck Foundation Center for Integrative Neuroscience, UC, San Francisco, CA 94143-0732; Biomagnetic Imaging Laboratory, Department of Radiology, UC, San Francisco, CA 94143-0628", "aff_domain": "phy.ucsf.edu;radiology.ucsf.edu", "email": "phy.ucsf.edu;radiology.ucsf.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, San Francisco", "aff_unique_dep": "W. M. Keck Foundation Center for Integrative Neuroscience", "aff_unique_url": "https://www.ucsf.edu", "aff_unique_abbr": "UCSF", "aff_campus_unique_index": "0;0", "aff_campus_unique": "San Francisco", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "5440f135da", "title": "Robustness in Markov Decision Problems with Uncertain Transition Matrices", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/300891a62162b960cf02ce3827bb363c-Abstract.html", "author": "Arnab Nilim; Laurent El Ghaoui", "abstract": "Optimal solutions to Markov Decision Problems (MDPs) are very sen- sitive with respect to the state transition probabilities. In many practi- cal problems, the estimation of those probabilities is far from accurate. Hence, estimation errors are limiting factors in applying MDPs to real- world problems. We propose an algorithm for solving \ufb01nite-state and \ufb01nite-action MDPs, where the solution is guaranteed to be robust with respect to estimation errors on the state transition probabilities. Our al- gorithm involves a statistically accurate yet numerically ef\ufb01cient repre- sentation of uncertainty, via Kullback-Leibler divergence bounds. The worst-case complexity of the robust algorithm is the same as the origi- nal Bellman recursion. Hence, robustness can be added at practically no extra computing cost.", "bibtex": "@inproceedings{NIPS2003_300891a6,\n author = {Nilim, Arnab and Ghaoui, Laurent},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Robustness in Markov Decision Problems with Uncertain Transition Matrices},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/300891a62162b960cf02ce3827bb363c-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/300891a62162b960cf02ce3827bb363c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/300891a62162b960cf02ce3827bb363c-Metadata.json", "review": "", "metareview": "", "pdf_size": 116329, "gs_citation": 150, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=584439841835502819&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of EECS, University of California, Berkeley, CA 94720; Department of EECS, University of California, Berkeley, CA 94720", "aff_domain": "eecs.berkeley.edu;eecs.berkeley.edu", "email": "eecs.berkeley.edu;eecs.berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Department of Electrical Engineering and Computer Sciences", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "b92d99b8ec", "title": "Salient Boundary Detection using Ratio Contour", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/f55cadb97eaff2ba1980e001b0bd9842-Abstract.html", "author": "Song Wang; Toshiro Kubota; Jeffrey M. Siskind", "abstract": "This paper presents a novel graph-theoretic approach, named ratio con- tour, to extract perceptually salient boundaries from a set of noisy bound- ary fragments detected in real images. The boundary saliency is de\ufb01ned using the Gestalt laws of closure, proximity, and continuity. This pa- per \ufb01rst constructs an undirected graph with two different sets of edges: solid edges and dashed edges. The weights of solid and dashed edges measure the local saliency in and between boundary fragments, respec- tively. Then the most salient boundary is detected by searching for an optimal cycle in this graph with minimum average weight. The proposed approach guarantees the global optimality without introducing any biases related to region area or boundary length. We collect a variety of images for testing the proposed approach with encouraging results.", "bibtex": "@inproceedings{NIPS2003_f55cadb9,\n author = {Wang, Song and Kubota, Toshiro and Siskind, Jeffrey},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Salient Boundary Detection using Ratio Contour},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/f55cadb97eaff2ba1980e001b0bd9842-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/f55cadb97eaff2ba1980e001b0bd9842-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/f55cadb97eaff2ba1980e001b0bd9842-Metadata.json", "review": "", "metareview": "", "pdf_size": 174358, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2865435734165093027&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Dept. Computer Science & Engineering, University of South Carolina, Columbia, SC 29208; Dept. Computer Science & Engineering, University of South Carolina, Columbia, SC 29208; School Electrical & Comput. Engr., Purdue University, West Lafayette, IN 47906", "aff_domain": "cse.sc.edu;cse.sc.edu;purdue.edu", "email": "cse.sc.edu;cse.sc.edu;purdue.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of South Carolina;Purdue University", "aff_unique_dep": "Department of Computer Science & Engineering;School of Electrical and Computer Engineering", "aff_unique_url": "https://www.sc.edu;https://www.purdue.edu", "aff_unique_abbr": "USC;Purdue", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Columbia;West Lafayette", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "8c67508875", "title": "Sample Propagation", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/7989edad14ebcd3adfacc7344dc6b739-Abstract.html", "author": "Mark A. Paskin", "abstract": "Rao\u2013Blackwellization is an approximation technique for probabilistic in- ference that \ufb02exibly combines exact inference with sampling. It is useful in models where conditioning on some of the variables leaves a sim- pler inference problem that can be solved tractably. This paper presents Sample Propagation, an ef\ufb01cient implementation of Rao\u2013Blackwellized approximate inference for a large class of models. Sample Propagation tightly integrates sampling with message passing in a junction tree, and is named for its simple, appealing structure: it walks the clusters of a junction tree, sampling some of the current cluster\u2019s variables and then passing a message to one of its neighbors. We discuss the application of Sample Propagation to conditional Gaussian inference problems such as switching linear dynamical systems.", "bibtex": "@inproceedings{NIPS2003_7989edad,\n author = {Paskin, Mark},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Sample Propagation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/7989edad14ebcd3adfacc7344dc6b739-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/7989edad14ebcd3adfacc7344dc6b739-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/7989edad14ebcd3adfacc7344dc6b739-Metadata.json", "review": "", "metareview": "", "pdf_size": 153541, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12870581540174382623&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Computer Science Division, University of California, Berkeley", "aff_domain": "paskin.org", "email": "paskin.org", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Computer Science Division", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "1dc77ce513", "title": "Self-calibrating Probability Forecasting", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/10c66082c124f8afe3df4886f5e516e0-Abstract.html", "author": "Vladimir Vovk; Glenn Shafer; Ilia Nouretdinov", "abstract": "In the problem of probability forecasting the learner\u2019s goal is to output, given a training set and a new object, a suitable probability measure on the possible values of the new object\u2019s label. An on-line algorithm for probability forecasting is said to be well-calibrated if the probabilities it outputs agree with the observed frequencies. We give a natural non- asymptotic formalization of the notion of well-calibratedness, which we then study under the assumption of randomness (the object/label pairs are independent and identically distributed). It turns out that, although no probability forecasting algorithm is automatically well-calibrated in our sense, there exists a wide class of algorithms for \u201cmultiprobability forecasting\u201d (such algorithms are allowed to output a set, ideally very narrow, of probability measures) which satisfy this property; we call the algorithms in this class \u201cVenn probability machines\u201d. Our experimental results demonstrate that a 1-Nearest Neighbor Venn probability machine performs reasonably well on a standard benchmark data set, and one of our theoretical results asserts that a simple Venn probability machine asymptotically approaches the true conditional probabilities regardless, and without knowledge, of the true probability measure generating the examples.", "bibtex": "@inproceedings{NIPS2003_10c66082,\n author = {Vovk, Vladimir and Shafer, Glenn and Nouretdinov, Ilia},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Self-calibrating Probability Forecasting},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/10c66082c124f8afe3df4886f5e516e0-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/10c66082c124f8afe3df4886f5e516e0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/10c66082c124f8afe3df4886f5e516e0-Metadata.json", "review": "", "metareview": "", "pdf_size": 184344, "gs_citation": 79, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1639317379133128055&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Computer Learning Research Centre, Department of Computer Science, Royal Holloway, University of London, Egham, Surrey TW20 0EX, UK; Rutgers School of Business, Newark and New Brunswick, 180 University Avenue, Newark, NJ 07102, USA; Computer Learning Research Centre, Department of Computer Science, Royal Holloway, University of London, Egham, Surrey TW20 0EX, UK", "aff_domain": "cs.rhul.ac.uk;andromeda.rutgers.edu;cs.rhul.ac.uk", "email": "cs.rhul.ac.uk;andromeda.rutgers.edu;cs.rhul.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "Royal Holloway, University of London;Rutgers University", "aff_unique_dep": "Department of Computer Science;School of Business", "aff_unique_url": "https://www.royalholloway.ac.uk;https://business.rutgers.edu", "aff_unique_abbr": "RHUL;Rutgers", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Egham;Newark and New Brunswick", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United Kingdom;United States" }, { "id": "2dec5ad6d7", "title": "Semi-Definite Programming by Perceptron Learning", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/f708f064faaf32a43e4d3c784e6af9ea-Abstract.html", "author": "Thore Graepel; Ralf Herbrich; Andriy Kharechko; John S. Shawe-taylor", "abstract": "We present a modi\ufb01ed version of the perceptron learning algorithm (PLA) which solves semide\ufb01nite programs (SDPs) in polynomial time. The algorithm is based on the following three observations: (i) Semide\ufb01nite programs are linear programs with in\ufb01nitely many (linear) constraints; (ii) every linear program can be solved by a sequence of constraint satisfaction problems with linear constraints; (iii) in general, the perceptron learning algorithm solves a constraint satisfaction problem with linear constraints in \ufb01nitely many updates. Combining the PLA with a probabilistic rescaling algorithm (which, on average, increases the size of the feasable region) results in a prob- abilistic algorithm for solving SDPs that runs in polynomial time. We present preliminary results which demonstrate that the algo- rithm works, but is not competitive with state-of-the-art interior point methods.", "bibtex": "@inproceedings{NIPS2003_f708f064,\n author = {Graepel, Thore and Herbrich, Ralf and Kharechko, Andriy and Shawe-taylor, John},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Semi-Definite Programming by Perceptron Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/f708f064faaf32a43e4d3c784e6af9ea-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/f708f064faaf32a43e4d3c784e6af9ea-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/f708f064faaf32a43e4d3c784e6af9ea-Metadata.json", "review": "", "metareview": "", "pdf_size": 228413, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1667252278490403345&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Microsoft Research Ltd., Cambridge, UK; Microsoft Research Ltd., Cambridge, UK; Royal Holloway, University of London, UK; Royal Holloway, University of London, UK", "aff_domain": "microsoft.com;microsoft.com;ecs.soton.ac.uk;ecs.soton.ac.uk", "email": "microsoft.com;microsoft.com;ecs.soton.ac.uk;ecs.soton.ac.uk", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;1", "aff_unique_norm": "Microsoft;University of London", "aff_unique_dep": "Microsoft Research;", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.royalholloway.ac.uk", "aff_unique_abbr": "MSR;RHUL", "aff_campus_unique_index": "0;0;1;1", "aff_campus_unique": "Cambridge;Royal Holloway", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "id": "16094b7795", "title": "Semi-Supervised Learning with Trees", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/70c445ee64b1ed0583367a12a79a9ef2-Abstract.html", "author": "Charles Kemp; Thomas L. Griffiths; Sean Stromsten; Joshua B. Tenenbaum", "abstract": "We describe a nonparametric Bayesian approach to generalizing from few labeled examples, guided by a larger set of unlabeled objects and the assumption of a latent tree-structure to the domain. The tree (or a distribution over trees) may be inferred using the unlabeled data. A prior over concepts generated by a mutation process on the inferred tree(s) allows ef\ufb01cient computation of the optimal Bayesian classi\ufb01cation func- tion from the labeled examples. We test our approach on eight real-world datasets.", "bibtex": "@inproceedings{NIPS2003_70c445ee,\n author = {Kemp, Charles and Griffiths, Thomas and Stromsten, Sean and Tenenbaum, Joshua},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Semi-Supervised Learning with Trees},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/70c445ee64b1ed0583367a12a79a9ef2-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/70c445ee64b1ed0583367a12a79a9ef2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/70c445ee64b1ed0583367a12a79a9ef2-Metadata.json", "review": "", "metareview": "", "pdf_size": 113098, "gs_citation": 91, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2517930564848625876&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Department of Brain and Cognitive Sciences, MIT, Cambridge, MA 02139; Department of Brain and Cognitive Sciences, MIT, Cambridge, MA 02139; Department of Brain and Cognitive Sciences, MIT, Cambridge, MA 02139; Department of Brain and Cognitive Sciences, MIT, Cambridge, MA 02139", "aff_domain": "mit.edu;mit.edu;mit.edu;mit.edu", "email": "mit.edu;mit.edu;mit.edu;mit.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Department of Brain and Cognitive Sciences", "aff_unique_url": "https://www.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "4b40b7af18", "title": "Semi-supervised Protein Classification Using Cluster Kernels", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/12ffb0968f2f56e51a59a6beb37b2859-Abstract.html", "author": "Jason Weston; Dengyong Zhou; Andr\u00e9 Elisseeff; William S. Noble; Christina S. Leslie", "abstract": "A key issue in supervised protein classi\ufb01cation is the representation of in- put sequences of amino acids. Recent work using string kernels for pro- tein data has achieved state-of-the-art classi\ufb01cation performance. How- ever, such representations are based only on labeled data \u2014 examples with known 3D structures, organized into structural classes \u2014 while in practice, unlabeled data is far more plentiful. In this work, we de- velop simple and scalable cluster kernel techniques for incorporating un- labeled data into the representation of protein sequences. We show that our methods greatly improve the classi\ufb01cation performance of string ker- nels and outperform standard approaches for using unlabeled data, such as adding close homologs of the positive examples to the training data. We achieve equal or superior performance to previously presented cluster kernel methods while achieving far greater computational ef\ufb01ciency.", "bibtex": "@inproceedings{NIPS2003_12ffb096,\n author = {Weston, Jason and Zhou, Dengyong and Elisseeff, Andr\\'{e} and Noble, William and Leslie, Christina},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Semi-supervised Protein Classification Using Cluster Kernels},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/12ffb0968f2f56e51a59a6beb37b2859-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/12ffb0968f2f56e51a59a6beb37b2859-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/12ffb0968f2f56e51a59a6beb37b2859-Metadata.json", "review": "", "metareview": "", "pdf_size": 88205, "gs_citation": 324, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=634404288324326161&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 33, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "http://www.kyb.tuebingen.mpg.de/bs/people/weston/semiprot", "author_num": 5, "track": "main", "status": "Poster" }, { "id": "1d5935ef16", "title": "Semidefinite Relaxations for Approximate Inference on Graphs with Cycles", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/3cf2559725a9fdfa602ec8c887440f32-Abstract.html", "author": "Michael I. Jordan; Martin J. Wainwright", "abstract": "We present a new method for calculating approximate marginals for probability distributions de\ufb01ned by graphs with cycles, based on a Gaus- sian entropy bound combined with a semide\ufb01nite outer bound on the marginal polytope. This combination leads to a log-determinant max- imization problem that can be solved by ef\ufb01cient interior point meth- ods [8]. As with the Bethe approximation and its generalizations [12], the optimizing arguments of this problem can be taken as approximations to the exact marginals. In contrast to Bethe/Kikuchi approaches, our vari- ational problem is strictly convex and so has a unique global optimum. An additional desirable feature is that the value of the optimal solution is guaranteed to provide an upper bound on the log partition function. In experimental trials, the performance of the log-determinant relaxation is comparable to or better than the sum-product algorithm, and by a sub- stantial margin for certain problem classes. Finally, the zero-temperature limit of our log-determinant relaxation recovers a class of well-known semide\ufb01nite relaxations for integer programming [e.g., 3].", "bibtex": "@inproceedings{NIPS2003_3cf25597,\n author = {Jordan, Michael and Wainwright, Martin J},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Semidefinite Relaxations for Approximate Inference on Graphs with Cycles},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/3cf2559725a9fdfa602ec8c887440f32-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/3cf2559725a9fdfa602ec8c887440f32-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/3cf2559725a9fdfa602ec8c887440f32-Metadata.json", "review": "", "metareview": "", "pdf_size": 99775, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1327826211659816818&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Electrical Engineering and Computer Science, UC Berkeley, Berkeley, CA 94720; Computer Science and Statistics, UC Berkeley, Berkeley, CA 94720", "aff_domain": "eecs.berkeley.edu;cs.berkeley.edu", "email": "eecs.berkeley.edu;cs.berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Electrical Engineering and Computer Science", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "8160642184", "title": "Sensory Modality Segregation", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/31c97cbb941d3e92d0e6f9925e9bc4d7-Abstract.html", "author": "Virginia Sa", "abstract": "Why are sensory modalities segregated the way they are? In this paper we show that sensory modalities are well designed for self-supervised cross-modal learning. Using the Minimizing-Disagreement algorithm on an unsupervised speech categorization task with visual (moving lips) and auditory (sound signal) inputs, we show that very informative auditory dimensions actually harm performance when moved to the visual side of the network. It is better to throw them away than to consider them part of the \u201cvisual input\u201d. We explain this \ufb01nding in terms of the statistical structure in sensory inputs.", "bibtex": "@inproceedings{NIPS2003_31c97cbb,\n author = {Sa, Virginia},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Sensory Modality Segregation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/31c97cbb941d3e92d0e6f9925e9bc4d7-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/31c97cbb941d3e92d0e6f9925e9bc4d7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/31c97cbb941d3e92d0e6f9925e9bc4d7-Metadata.json", "review": "", "metareview": "", "pdf_size": 110789, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9448467778493551598&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Department of Cognitive Science, University of California, San Diego", "aff_domain": "ucsd.edu", "email": "ucsd.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "Department of Cognitive Science", "aff_unique_url": "https://ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0", "aff_campus_unique": "San Diego", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "2902dd1c94", "title": "Sequential Bayesian Kernel Regression", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/8aec51422b30d61bce078b27f0babeb1-Abstract.html", "author": "Jaco Vermaak; Simon J. Godsill; Arnaud Doucet", "abstract": "We propose a method for sequential Bayesian kernel regression. As is the case for the popular Relevance Vector Machine (RVM) [10, 11], the method automatically identi\ufb01es the number and locations of the kernels. Our algorithm overcomes some of the computational dif\ufb01culties related to batch methods for kernel regression. It is non-iterative, and requires only a single pass over the data. It is thus applicable to truly sequen- tial data sets and batch data sets alike. The algorithm is based on a generalisation of Importance Sampling, which allows the design of in- tuitively simple and ef\ufb01cient proposal distributions for the model param- eters. Comparative results on two standard data sets show our algorithm to compare favourably with existing batch estimation strategies.", "bibtex": "@inproceedings{NIPS2003_8aec5142,\n author = {Vermaak, Jaco and Godsill, Simon and Doucet, Arnaud},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Sequential Bayesian Kernel Regression},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/8aec51422b30d61bce078b27f0babeb1-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/8aec51422b30d61bce078b27f0babeb1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/8aec51422b30d61bce078b27f0babeb1-Metadata.json", "review": "", "metareview": "", "pdf_size": 156164, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16371950501418068013&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Cambridge University Engineering Department; Cambridge University Engineering Department; Cambridge University Engineering Department", "aff_domain": "eng.cam.ac.uk;eng.cam.ac.uk;eng.cam.ac.uk", "email": "eng.cam.ac.uk;eng.cam.ac.uk;eng.cam.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Cambridge University", "aff_unique_dep": "Engineering Department", "aff_unique_url": "https://www.cam.ac.uk", "aff_unique_abbr": "Cambridge", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "id": "d980a3bc9a", "title": "Simplicial Mixtures of Markov Chains: Distributed Modelling of Dynamic User Profiles", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/4a8423d5e91fda00bb7e46540e2b0cf1-Abstract.html", "author": "Mark Girolami; Ata Kab\u00e1n", "abstract": "To provide a compact generative representation of the sequential activ- ity of a number of individuals within a group there is a tradeoff between the de\ufb01nition of individual speci\ufb01c and global models. This paper pro- poses a linear-time distributed model for \ufb01nite state symbolic sequences representing traces of individual user activity by making the assump- tion that heterogeneous user behavior may be \u2018explained\u2019 by a relatively small number of common structurally simple behavioral patterns which may interleave randomly in a user-speci\ufb01c proportion. The results of an empirical study on three different sources of user traces indicates that this modelling approach provides an ef\ufb01cient representation scheme, re- \ufb02ected by improved prediction performance as well as providing low- complexity and intuitively interpretable representations.", "bibtex": "@inproceedings{NIPS2003_4a8423d5,\n author = {Girolami, Mark and Kab\\'{a}n, Ata},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Simplicial Mixtures of Markov Chains: Distributed Modelling of Dynamic User Profiles},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/4a8423d5e91fda00bb7e46540e2b0cf1-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/4a8423d5e91fda00bb7e46540e2b0cf1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/4a8423d5e91fda00bb7e46540e2b0cf1-Metadata.json", "review": "", "metareview": "", "pdf_size": 130465, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1673989039876856105&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Computing Science, University of Glasgow, Glasgow, UK; School of Computer Science, University of Birmingham, Birmingham, UK", "aff_domain": "dcs.gla.ac.uk;cs.bham.ac.uk", "email": "dcs.gla.ac.uk;cs.bham.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Glasgow;University of Birmingham", "aff_unique_dep": "Department of Computing Science;School of Computer Science", "aff_unique_url": "https://www.gla.ac.uk;https://www.birmingham.ac.uk", "aff_unique_abbr": "UofG;UoB", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Glasgow;Birmingham", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "0de8d0b5eb", "title": "Sparse Greedy Minimax Probability Machine Classification", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/7cc234202e98d2722580858573fd0817-Abstract.html", "author": "Thomas R. Strohmann; Andrei Belitski; Gregory Z. Grudic; Dennis DeCoste", "abstract": "The Minimax Probability Machine Classi\ufb01cation (MPMC) framework [Lanckriet et al., 2002] builds classi\ufb01ers by minimizing the maximum probability of misclassi\ufb01cation, and gives direct estimates of the proba- bilistic accuracy bound \u2126. The only assumptions that MPMC makes is that good estimates of means and covariance matrices of the classes exist. However, as with Support Vector Machines, MPMC is computationally expensive and requires extensive cross validation experiments to choose kernels and kernel parameters that give good performance. In this paper we address the computational cost of MPMC by proposing an algorithm that constructs nonlinear sparse MPMC (SMPMC) models by incremen- tally adding basis functions (i.e. kernels) one at a time \u2013 greedily select- ing the next one that maximizes the accuracy bound \u2126. SMPMC auto- matically chooses both kernel parameters and feature weights without us- ing computationally expensive cross validation. Therefore the SMPMC algorithm simultaneously addresses the problem of kernel selection and feature selection (i.e. feature weighting), based solely on maximizing the accuracy bound \u2126. Experimental results indicate that we can obtain reli- able bounds \u2126, as well as test set accuracies that are comparable to state of the art classi\ufb01cation algorithms.", "bibtex": "@inproceedings{NIPS2003_7cc23420,\n author = {Strohmann, Thomas R. and Belitski, Andrei and Grudic, Gregory and DeCoste, Dennis},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Sparse Greedy Minimax Probability Machine Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/7cc234202e98d2722580858573fd0817-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/7cc234202e98d2722580858573fd0817-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/7cc234202e98d2722580858573fd0817-Metadata.json", "review": "", "metareview": "", "pdf_size": 153302, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13283656546587170165&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science, University of Colorado, Boulder; Department of Computer Science, University of Colorado, Boulder; Department of Computer Science, University of Colorado, Boulder; Machine Learning Systems Group, NASA Jet Propulsion Laboratory", "aff_domain": "cs.colorado.edu;colorado.edu;cs.colorado.edu;aig.jpl.nasa.gov", "email": "cs.colorado.edu;colorado.edu;cs.colorado.edu;aig.jpl.nasa.gov", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "University of Colorado;NASA Jet Propulsion Laboratory", "aff_unique_dep": "Department of Computer Science;Machine Learning Systems Group", "aff_unique_url": "https://www.colorado.edu;https://www.jpl.nasa.gov", "aff_unique_abbr": "CU;JPL", "aff_campus_unique_index": "0;0;0;1", "aff_campus_unique": "Boulder;Pasadena", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "b7fbf5c0e0", "title": "Sparse Representation and Its Applications in Blind Source Separation", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/add217938e07bb1fd8796e0315b88c10-Abstract.html", "author": "Yuanqing Li; Shun-ichi Amari; Sergei Shishkin; Jianting Cao; Fanji Gu; Andrzej S. Cichocki", "abstract": "In this paper, sparse representation (factorization) of a data matrix is \ufb01rst discussed. An overcomplete basis matrix is estimated by using the K(cid:0)means method. We have proved that for the estimated overcom- plete basis matrix, the sparse solution (coef\ufb01cient matrix) with minimum l1(cid:0)norm is unique with probability of one, which can be obtained using a linear programming algorithm. The comparisons of the l1(cid:0)norm so- lution and the l0(cid:0)norm solution are also presented, which can be used in recoverability analysis of blind source separation (BSS). Next, we ap- ply the sparse matrix factorization approach to BSS in the overcomplete case. Generally, if the sources are not suf\ufb01ciently sparse, we perform blind separation in the time-frequency domain after preprocessing the observed data using the wavelet packets transformation. Third, an EEG experimental data analysis example is presented to illustrate the useful- ness of the proposed approach and demonstrate its performance. Two almost independent components obtained by the sparse representation method are selected for phase synchronization analysis, and their peri- ods of signi\ufb01cant phase synchronization are found which are related to tasks. Finally, concluding remarks review the approach and state areas that require further study.", "bibtex": "@inproceedings{NIPS2003_add21793,\n author = {Li, Yuanqing and Amari, Shun-ichi and Shishkin, Sergei and Cao, Jianting and Gu, Fanji and Cichocki, Andrzej},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Sparse Representation and Its Applications in Blind Source Separation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/add217938e07bb1fd8796e0315b88c10-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/add217938e07bb1fd8796e0315b88c10-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/add217938e07bb1fd8796e0315b88c10-Metadata.json", "review": "", "metareview": "", "pdf_size": 714994, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11679741881334944434&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "RIKEN Brain Science Institute, Saitama, 3510198, Japan; RIKEN Brain Science Institute, Saitama, 3510198, Japan; RIKEN Brain Science Institute, Saitama, 3510198, Japan; RIKEN Brain Science Institute, Saitama, 3510198, Japan; Department of Electronic Engineering, Saitama Institute of Technology, Saitama, 3510198, Japan; Department of Physiology and Biophysics, Fudan University, Shanghai, China", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;1;2", "aff_unique_norm": "RIKEN Brain Science Institute;Saitama Institute of Technology;Fudan University", "aff_unique_dep": "Brain Science Institute;Department of Electronic Engineering;Department of Physiology and Biophysics", "aff_unique_url": "https://briken.org;;https://www.fudan.edu.cn", "aff_unique_abbr": "RIKEN BSI;;Fudan", "aff_campus_unique_index": "0;0;0;0;0;1", "aff_campus_unique": "Saitama;Shanghai", "aff_country_unique_index": "0;0;0;0;0;1", "aff_country_unique": "Japan;China" }, { "id": "e2e4fda2e1", "title": "Sparseness of Support Vector Machines---Some Asymptotically Sharp Bounds", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/4c8c76b39d294759a9000cbda3a6571a-Abstract.html", "author": "Ingo Steinwart", "abstract": "The decision functions constructed by support vector machines (SVM\u2019s) usually depend only on a subset of the training set\u2014the so-called support vectors. We derive asymptotically sharp lower and upper bounds on the number of support vectors for several standard types of SVM\u2019s. In par- ticular, we show for the Gaussian RBF kernel that the fraction of support vectors tends to twice the Bayes risk for the L1-SVM, to the probability of noise for the L2-SVM, and to 1 for the LS-SVM.", "bibtex": "@inproceedings{NIPS2003_4c8c76b3,\n author = {Steinwart, Ingo},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Sparseness of Support Vector Machines---Some Asymptotically Sharp Bounds},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/4c8c76b39d294759a9000cbda3a6571a-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/4c8c76b39d294759a9000cbda3a6571a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/4c8c76b39d294759a9000cbda3a6571a-Metadata.json", "review": "", "metareview": "", "pdf_size": 83025, "gs_citation": 106, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5849478337661803478&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Modeling, Algorithms, and Informatics Group, CCS-3, Mail Stop B256, Los Alamos National Laboratory, Los Alamos, NM 87545, USA", "aff_domain": "lanl.gov", "email": "lanl.gov", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Los Alamos National Laboratory", "aff_unique_dep": "Modeling, Algorithms, and Informatics Group", "aff_unique_url": "https://www.lanl.gov", "aff_unique_abbr": "LANL", "aff_campus_unique_index": "0", "aff_campus_unique": "Los Alamos", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "b9061cff00", "title": "Statistical Debugging of Sampled Programs", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/0a65e195cb51418279b6fa8d96847a60-Abstract.html", "author": "Alice X. Zheng; Michael I. Jordan; Ben Liblit; Alex Aiken", "abstract": "We present a novel strategy for automatically debugging programs given sampled data from thousands of actual user runs. Our goal is to pinpoint those features that are most correlated with crashes. This is accomplished by maximizing an appropriately de\ufb01ned utility function. It has analogies with intuitive debugging heuristics, and, as we demonstrate, is able to deal with various types of bugs that occur in real programs.", "bibtex": "@inproceedings{NIPS2003_0a65e195,\n author = {Zheng, Alice and Jordan, Michael and Liblit, Ben and Aiken, Alex},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Statistical Debugging of Sampled Programs},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/0a65e195cb51418279b6fa8d96847a60-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/0a65e195cb51418279b6fa8d96847a60-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/0a65e195cb51418279b6fa8d96847a60-Metadata.json", "review": "", "metareview": "", "pdf_size": 163301, "gs_citation": 96, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12631374821157002623&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "EE Division, UC Berkeley; CS Division and Department of Statistics, UC Berkeley; CS Division, UC Berkeley; CS Division, UC Berkeley", "aff_domain": "cs.berkeley.edu;cs.berkeley.edu;cs.berkeley.edu;cs.berkeley.edu", "email": "cs.berkeley.edu;cs.berkeley.edu;cs.berkeley.edu;cs.berkeley.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Electrical Engineering and Computer Sciences", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "d51d73c9e0", "title": "Subject-Independent Magnetoencephalographic Source Localization by a Multilayer Perceptron", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/c30fb4dc55d801fc7473840b5b161dfa-Abstract.html", "author": "Sung C. Jun; Barak A. Pearlmutter", "abstract": "We describe a system that localizes a single dipole to reasonable accu- racy from noisy magnetoencephalographic (MEG) measurements in real time. At its core is a multilayer perceptron (MLP) trained to map sen- sor signals and head position to dipole location. Including head position overcomes the previous need to retrain the MLP for each subject and ses- sion. The training dataset was generated by mapping randomly chosen dipoles and head positions through an analytic model and adding noise from real MEG recordings. After training, a localization took 0.7 ms with an average error of 0.90 cm. A few iterations of a Levenberg-Marquardt routine using the MLP\u2019s output as its initial guess took 15 ms and im- proved the accuracy to 0.53 cm, only slightly above the statistical limits on accuracy imposed by the noise. We applied these methods to localize single dipole sources from MEG components isolated by blind source separation and compared the estimated locations to those generated by standard manually-assisted commercial software.", "bibtex": "@inproceedings{NIPS2003_c30fb4dc,\n author = {Jun, Sung and Pearlmutter, Barak},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Subject-Independent Magnetoencephalographic Source Localization by a Multilayer Perceptron},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/c30fb4dc55d801fc7473840b5b161dfa-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/c30fb4dc55d801fc7473840b5b161dfa-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/c30fb4dc55d801fc7473840b5b161dfa-Metadata.json", "review": "", "metareview": "", "pdf_size": 72726, "gs_citation": 0, "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:FtH92eNx0i8J:scholar.google.com/&scioq=Subject-Independent+Magnetoencephalographic+Source+Localization+by+a+Multilayer+Perceptron&hl=en&as_sdt=0,33", "gs_version_total": 23, "aff": "Biological and Quantum Physics Group, MS-D454, Los Alamos National Laboratory, Los Alamos, NM 87545, USA; Hamilton Institute, NUI Maynooth, Maynooth, Co. Kildare, Ireland", "aff_domain": "lanl.gov;cs.may.ie", "email": "lanl.gov;cs.may.ie", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Los Alamos National Laboratory;National University of Ireland Maynooth", "aff_unique_dep": "Biological and Quantum Physics Group;Hamilton Institute", "aff_unique_url": "https://www.lanl.gov;https://www.nuim.ie", "aff_unique_abbr": "LANL;NUI Maynooth", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Los Alamos;Maynooth", "aff_country_unique_index": "0;1", "aff_country_unique": "United States;Ireland" }, { "id": "b9ca5dddae", "title": "Synchrony Detection by Analogue VLSI Neurons with Bimodal STDP Synapses", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/123b7f02433572a0a560e620311a469c-Abstract.html", "author": "Adria Bofill-i-petit; Alan F. Murray", "abstract": "We present test results from spike-timing correlation learning ex- periments carried out with silicon neurons with STDP (Spike Tim- ing Dependent Plasticity) synapses. The weight change scheme of the STDP synapses can be set to either weight-independent or weight-dependent mode. We present results that characterise the learning window implemented for both modes of operation. When presented with spike trains with di(cid:11)erent types of synchronisation the neurons develop bimodal weight distributions. We also show that a 2-layered network of silicon spiking neurons with STDP synapses can perform hierarchical synchrony detection.", "bibtex": "@inproceedings{NIPS2003_123b7f02,\n author = {Bofill-i-petit, Adria and Murray, Alan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Synchrony Detection by Analogue VLSI Neurons with Bimodal STDP Synapses},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/123b7f02433572a0a560e620311a469c-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/123b7f02433572a0a560e620311a469c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/123b7f02433572a0a560e620311a469c-Metadata.json", "review": "", "metareview": "", "pdf_size": 588043, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1247053539326595877&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "The University of Edinburgh; The University of Edinburgh", "aff_domain": "ee.ed.ac.uk;ee.ed.ac.uk", "email": "ee.ed.ac.uk;ee.ed.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Edinburgh", "aff_unique_dep": "", "aff_unique_url": "https://www.ed.ac.uk", "aff_unique_abbr": "Edinburgh", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "944fb01c46", "title": "The Diffusion-Limited Biochemical Signal-Relay Channel", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/0d7363894acdee742caf7fe4e97c4d49-Abstract.html", "author": "Peter J. Thomas; Donald J. Spencer; Sierra K. Hampton; Peter Park; Joseph P. Zurkus", "abstract": "are", "bibtex": "@inproceedings{NIPS2003_0d736389,\n author = {Thomas, Peter and Spencer, Donald and Hampton, Sierra and Park, Peter and Zurkus, Joseph},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {The Diffusion-Limited Biochemical Signal-Relay Channel},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/0d7363894acdee742caf7fe4e97c4d49-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/0d7363894acdee742caf7fe4e97c4d49-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/0d7363894acdee742caf7fe4e97c4d49-Metadata.json", "review": "", "metareview": "", "pdf_size": 296989, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1446118786899253592&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Salk Institute for Biological Studies; Salk Institute for Biological Studies; Department of Electrical and Computer Engineering, University of California San Diego; Department of Electrical and Computer Engineering, University of California San Diego; Department of Electrical and Computer Engineering, University of California San Diego", "aff_domain": "salk.edu;salk.edu; ; ; ", "email": "salk.edu;salk.edu; ; ; ", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;1;1", "aff_unique_norm": "Salk Institute for Biological Studies;University of California, San Diego", "aff_unique_dep": ";Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.salk.edu;https://www.ucsd.edu", "aff_unique_abbr": "Salk Institute;UCSD", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";San Diego", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "id": "205c3b7b88", "title": "The Doubly Balanced Network of Spiking Neurons: A Memory Model with High Capacity", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/7a68443f5c80d181c42967cd71612af1-Abstract.html", "author": "Yuval Aviel; David Horn; Moshe Abeles", "abstract": "A balanced network leads to contradictory constraints on memory models, as exemplified in previous work on accommodation of synfire chains. Here we show that these constraints can be overcome by introducing a 'shadow' inhibitory pattern for each excitatory pattern of the model. This is interpreted as a double- balance principle, whereby there exists both global balance between average excitatory and inhibitory currents and local balance between the currents carrying coherent activity at any given time frame. This principle can be applied to networks with Hebbian cell assemblies, leading to a high capacity of the associative memory. The number of possible patterns is limited by a combinatorial constraint that turns out to be P=0.06N within the specific model that we employ. This limit is reached by the Hebbian cell assembly network. To the best of our knowledge this is the first time that such high memory capacities are demonstrated in the asynchronous state of models of spiking neurons.", "bibtex": "@inproceedings{NIPS2003_7a68443f,\n author = {Aviel, Yuval and Horn, David and Abeles, Moshe},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {The Doubly Balanced Network of Spiking Neurons: A Memory Model with High Capacity},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/7a68443f5c80d181c42967cd71612af1-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/7a68443f5c80d181c42967cd71612af1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/7a68443f5c80d181c42967cd71612af1-Metadata.json", "review": "", "metareview": "", "pdf_size": 241584, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10252465164703242021&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Interdisciplinary Center for Neural Computation, Hebrew University; School of Physics, Tel Aviv University; Interdisciplinary Center for Neural Computation, Hebrew University", "aff_domain": "cc.huji.ac.il;post.tau.ac.il;vms.huji.ac.il", "email": "cc.huji.ac.il;post.tau.ac.il;vms.huji.ac.il", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "Hebrew University;Tel Aviv University", "aff_unique_dep": "Interdisciplinary Center for Neural Computation;School of Physics", "aff_unique_url": "http://www.huji.ac.il;https://www.tau.ac.il", "aff_unique_abbr": "HUJI;TAU", "aff_campus_unique_index": "1", "aff_campus_unique": ";Tel Aviv", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Israel" }, { "id": "98f126efd8", "title": "Towards Social Robots: Automatic Evaluation of Human-Robot Interaction by Facial Expression Classification", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/4dd9cec1c21bc54eecb53786a2c5fa09-Abstract.html", "author": "G.C. Littlewort; M.S. Bartlett; I.R. Fasel; J. Chenu; T. Kanda; H. Ishiguro; J.R. Movellan", "abstract": "Computer animated agents and robots bring a social dimension to hu- man computer interaction and force us to think in new ways about how computers could be used in daily life. Face to face communication is a real-time process operating at a time scale of less than a second. In this paper we present progress on a perceptual primitive to automatically detect frontal faces in the video stream and code them with respect to 7 dimensions in real time: neutral, anger, disgust, fear, joy, sadness, sur- prise. The face \ufb01nder employs a cascade of feature detectors trained with boosting techniques [13, 2]. The expression recognizer employs a novel combination of Adaboost and SVM\u2019s. The generalization performance to new subjects for a 7-way forced choice was 93.3% and 97% correct on two publicly available datasets. The outputs of the classi\ufb01er change smoothly as a function of time, providing a potentially valuable repre- sentation to code facial expression dynamics in a fully automatic and unobtrusive manner. The system was deployed and evaluated for mea- suring spontaneous facial expressions in the \ufb01eld in an application for automatic assessment of human-robot interaction.", "bibtex": "@inproceedings{NIPS2003_4dd9cec1,\n author = {Littlewort, G.C. and Bartlett, M.S. and Fasel, I.R. and Chenu, J. and Kanda, T. and Ishiguro, H. and Movellan, J.R.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Towards Social Robots: Automatic Evaluation of Human-Robot Interaction by Facial Expression Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/4dd9cec1c21bc54eecb53786a2c5fa09-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/4dd9cec1c21bc54eecb53786a2c5fa09-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/4dd9cec1c21bc54eecb53786a2c5fa09-Metadata.json", "review": "", "metareview": "", "pdf_size": 140602, "gs_citation": 107, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13240657032075669556&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Institute forNeural Computation,Universityof California,San Diego; IntelligentRobotics andCommunication Laboratory,ATR, KyotoJapan; Institute forNeural Computation,Universityof California,San Diego; IntelligentRobotics andCommunication Laboratory,ATR, KyotoJapan; Institute forNeural Computation,Universityof California,San Diego; IntelligentRobotics andCommunication Laboratory,ATR, KyotoJapan; Institute forNeural Computation,Universityof California,San Diego", "aff_domain": "inc.ucsd.edu;inc.ucsd.edu;inc.ucsd.edu;inc.ucsd.edu;inc.ucsd.edu; ; ", "email": "inc.ucsd.edu;inc.ucsd.edu;inc.ucsd.edu;inc.ucsd.edu;inc.ucsd.edu; ; ", "github": "", "project": "http://mplab.ucsd.edu", "author_num": 7, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0;1;0;1;0", "aff_unique_norm": "University of California, San Diego;ATR", "aff_unique_dep": "Institute for Neural Computation;Intelligent Robotics and Communication Laboratory", "aff_unique_url": "https://www.ucsd.edu;https://www.atr.jp", "aff_unique_abbr": "UCSD;ATR", "aff_campus_unique_index": "0;1;0;1;0;1;0", "aff_campus_unique": "San Diego;Kyoto", "aff_country_unique_index": "0;1;0;1;0;1;0", "aff_country_unique": "United States;Japan" }, { "id": "189d309799", "title": "Training a Quantum Neural Network", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/505259756244493872b7709a8a01b536-Abstract.html", "author": "Bob Ricks; Dan Ventura", "abstract": "Most proposals for quantum neural networks have skipped over the prob- lem of how to train the networks. The mechanics of quantum computing are different enough from classical computing that the issue of training should be treated in detail. We propose a simple quantum neural network and a training method for it. It can be shown that this algorithm works in quantum systems. Results on several real-world data sets show that this algorithm can train the proposed quantum neural networks, and that it has some advantages over classical learning algorithms.", "bibtex": "@inproceedings{NIPS2003_50525975,\n author = {Ricks, Bob and Ventura, Dan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Training a Quantum Neural Network},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/505259756244493872b7709a8a01b536-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/505259756244493872b7709a8a01b536-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/505259756244493872b7709a8a01b536-Metadata.json", "review": "", "metareview": "", "pdf_size": 79246, "gs_citation": 111, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5053895571600456683&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Department of Computer Science, Brigham Young University; Department of Computer Science, Brigham Young University", "aff_domain": "cs.byu.edu;cs.byu.edu", "email": "cs.byu.edu;cs.byu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Brigham Young University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.byu.edu", "aff_unique_abbr": "BYU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "d2174aaddd", "title": "Training fMRI Classifiers to Detect Cognitive States across Multiple Human Subjects", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/61d77652c97ef636343742fc3dcf3ba9-Abstract.html", "author": "Xuerui Wang; Rebecca Hutchinson; Tom M. Mitchell", "abstract": "We consider learning to classify cognitive states of human subjects, based on their brain activity observed via functional Magnetic Resonance Imaging (fMRI). This problem is important because such classi\ufb01ers con- stitute \u201cvirtual sensors\u201d of hidden cognitive states, which may be useful in cognitive science research and clinical applications. In recent work, Mitchell, et al. [6,7,9] have demonstrated the feasibility of training such classi\ufb01ers for individual human subjects (e.g., to distinguish whether the subject is reading an ambiguous or unambiguous sentence, or whether they are reading a noun or a verb). Here we extend that line of research, exploring how to train classi\ufb01ers that can be applied across multiple hu- man subjects, including subjects who were not involved in training the classi\ufb01er. We describe the design of several machine learning approaches to training multiple-subject classi\ufb01ers, and report experimental results demonstrating the success of these methods in learning cross-subject classi\ufb01ers for two different fMRI data sets.", "bibtex": "@inproceedings{NIPS2003_61d77652,\n author = {Wang, Xuerui and Hutchinson, Rebecca and Mitchell, Tom M},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Training fMRI Classifiers to Detect Cognitive States across Multiple Human Subjects},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/61d77652c97ef636343742fc3dcf3ba9-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/61d77652c97ef636343742fc3dcf3ba9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/61d77652c97ef636343742fc3dcf3ba9-Metadata.json", "review": "", "metareview": "", "pdf_size": 97985, "gs_citation": 138, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11595042180509785124&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "a15259dc8b", "title": "Tree-structured Approximations by Expectation Propagation", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/97416ac0f58056947e2eb5d5d253d4f2-Abstract.html", "author": "Yuan Qi; Tom Minka", "abstract": "Approximation structure plays an important role in inference on loopy graphs. As a tractable structure, tree approximations have been utilized in the variational method of Ghahramani & Jordan (1997) and the se- quential projection method of Frey et al. (2000). However, belief propa- gation represents each factor of the graph with a product of single-node messages. In this paper, belief propagation is extended to represent fac- tors with tree approximations, by way of the expectation propagation framework. That is, each factor sends a \u201cmessage\u201d to all pairs of nodes in a tree structure. The result is more accurate inferences and more fre- quent convergence than ordinary belief propagation, at a lower cost than variational trees or double-loop algorithms.", "bibtex": "@inproceedings{NIPS2003_97416ac0,\n author = {Qi, Yuan and Minka, Tom},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Tree-structured Approximations by Expectation Propagation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/97416ac0f58056947e2eb5d5d253d4f2-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/97416ac0f58056947e2eb5d5d253d4f2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/97416ac0f58056947e2eb5d5d253d4f2-Metadata.json", "review": "", "metareview": "", "pdf_size": 77473, "gs_citation": 103, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2975115100523473631&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Department of Statistics, Carnegie Mellon University; Media Laboratory, Massachusetts Institute of Technology", "aff_domain": "stat.cmu.edu;media.mit.edu", "email": "stat.cmu.edu;media.mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Carnegie Mellon University;Massachusetts Institute of Technology", "aff_unique_dep": "Department of Statistics;Media Laboratory", "aff_unique_url": "https://www.cmu.edu;https://www.mit.edu", "aff_unique_abbr": "CMU;MIT", "aff_campus_unique_index": "1", "aff_campus_unique": ";Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "9e0a33c4d3", "title": "Unsupervised Color Decomposition Of Histologically Stained Tissue Samples", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/d9fc0cdb67638d50f411432d0d41d0ba-Abstract.html", "author": "Andrew Rabinovich; Sameer Agarwal; Casey Laris; Jeffrey H. Price; Serge J. Belongie", "abstract": "Accurate spectral decomposition is essential for the analysis and diagnosis of histologically stained tissue sections. In this paper we present the \ufb01rst automated system for performing this decompo- sition. We compare the performance of our system with ground truth data and report favorable results.", "bibtex": "@inproceedings{NIPS2003_d9fc0cdb,\n author = {Rabinovich, Andrew and Agarwal, Sameer and Laris, Casey and Price, Jeffrey and Belongie, Serge},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Unsupervised Color Decomposition Of Histologically Stained Tissue Samples},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/d9fc0cdb67638d50f411432d0d41d0ba-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/d9fc0cdb67638d50f411432d0d41d0ba-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/d9fc0cdb67638d50f411432d0d41d0ba-Metadata.json", "review": "", "metareview": "", "pdf_size": 170531, "gs_citation": 162, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2059602936937382664&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science, University of California, San Diego; Department of Computer Science, University of California, San Diego; Q3DM, Inc.; Department of Bioengineering, University of California, San Diego; Department of Computer Science, University of California, San Diego", "aff_domain": "ucsd.edu;cs.ucsd.edu;q3dm.com;ucsd.edu;cs.ucsd.edu", "email": "ucsd.edu;cs.ucsd.edu;q3dm.com;ucsd.edu;cs.ucsd.edu", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;0;0", "aff_unique_norm": "University of California, San Diego;Q3DM, Inc.", "aff_unique_dep": "Department of Computer Science;", "aff_unique_url": "https://www.ucsd.edu;", "aff_unique_abbr": "UCSD;", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "San Diego;", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "id": "2e3ed2e832", "title": "Unsupervised Context Sensitive Language Acquisition from a Large Corpus", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/250413d2982f1f83aa62a3a323cd2a87-Abstract.html", "author": "Zach Solan; David Horn; Eytan Ruppin; Shimon Edelman", "abstract": "We describe a pattern acquisition algorithm that learns, in an unsuper- vised fashion, a streamlined representation of linguistic structures from a plain natural-language corpus. This paper addresses the issues of learn- ing structured knowledge from a large-scale natural language data set, and of generalization to unseen text. The implemented algorithm repre- sents sentences as paths on a graph whose vertices are words (or parts of words). Signi\ufb01cant patterns, determined by recursive context-sensitive statistical inference, form new vertices. Linguistic constructions are rep- resented by trees composed of signi\ufb01cant patterns and their associated equivalence classes. An input module allows the algorithm to be sub- jected to a standard test of English as a Second Language (ESL) pro\ufb01- ciency. The results are encouraging: the model attains a level of per- formance considered to be \u201cintermediate\u201d for 9th-grade students, de- spite having been trained on a corpus (CHILDES) containing transcribed speech of parents directed to small children.", "bibtex": "@inproceedings{NIPS2003_250413d2,\n author = {Solan, Zach and Horn, David and Ruppin, Eytan and Edelman, Shimon},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Unsupervised Context Sensitive Language Acquisition from a Large Corpus},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/250413d2982f1f83aa62a3a323cd2a87-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/250413d2982f1f83aa62a3a323cd2a87-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/250413d2982f1f83aa62a3a323cd2a87-Metadata.json", "review": "", "metareview": "", "pdf_size": 130935, "gs_citation": 42, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3191245861952396723&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "Sackler Faculty of Exact Sciences, Tel Aviv University, Tel Aviv, Israel 69978; Sackler Faculty of Exact Sciences, Tel Aviv University, Tel Aviv, Israel 69978; Sackler Faculty of Exact Sciences, Tel Aviv University, Tel Aviv, Israel 69978; Department of Psychology, Cornell University, Ithaca, NY 14853, USA", "aff_domain": "post.tau.ac.il;post.tau.ac.il;post.tau.ac.il;cornell.edu", "email": "post.tau.ac.il;post.tau.ac.il;post.tau.ac.il;cornell.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "Tel Aviv University;Cornell University", "aff_unique_dep": "Sackler Faculty of Exact Sciences;Department of Psychology", "aff_unique_url": "https://www.tau.ac.il;https://www.cornell.edu", "aff_unique_abbr": "TAU;Cornell", "aff_campus_unique_index": "0;0;0;1", "aff_campus_unique": "Tel Aviv;Ithaca", "aff_country_unique_index": "0;0;0;1", "aff_country_unique": "Israel;United States" }, { "id": "314d312879", "title": "Using the Forest to See the Trees: A Graphical Model Relating Features, Objects, and Scenes", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/99f59c0842e83c808dd1813b48a37c6a-Abstract.html", "author": "Kevin P. Murphy; Antonio Torralba; William T. Freeman", "abstract": "Standard approaches to object detection focus on local patches of the image, and try to classify them as background or not. We propose to use the scene context (image as a whole) as an extra source of (global) information, to help resolve local ambiguities. We present a conditional random \ufb01eld for jointly solving the tasks of object detection and scene classi\ufb01cation.", "bibtex": "@inproceedings{NIPS2003_99f59c08,\n author = {Murphy, Kevin P and Torralba, Antonio and Freeman, William},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Using the Forest to See the Trees: A Graphical Model Relating Features, Objects, and Scenes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/99f59c0842e83c808dd1813b48a37c6a-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/99f59c0842e83c808dd1813b48a37c6a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/99f59c0842e83c808dd1813b48a37c6a-Metadata.json", "review": "", "metareview": "", "pdf_size": 145936, "gs_citation": 558, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9666872637046479725&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "MIT AI lab; MIT AI lab; MIT AI lab", "aff_domain": "ai.mit.edu;ai.mit.edu;ai.mit.edu", "email": "ai.mit.edu;ai.mit.edu;ai.mit.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Artificial Intelligence Laboratory", "aff_unique_url": "http://www.ai.mit.edu", "aff_unique_abbr": "MIT AI lab", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "8fc748a2e1", "title": "Variational Linear Response", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/a9365bd906e11324065c35be476beb0c-Abstract.html", "author": "Manfred Opper; Ole Winther", "abstract": "A general linear response method for deriving improved estimates of cor- relations in the variational Bayes framework is presented. Three applica- tions are given and it is discussed how to use linear response as a general principle for improving mean \ufb01eld approximations.", "bibtex": "@inproceedings{NIPS2003_a9365bd9,\n author = {Opper, Manfred and Winther, Ole},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Variational Linear Response},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/a9365bd906e11324065c35be476beb0c-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/a9365bd906e11324065c35be476beb0c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/a9365bd906e11324065c35be476beb0c-Metadata.json", "review": "", "metareview": "", "pdf_size": 95928, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8729818681019900642&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Neural Computing Research Group, School of Engineering and Applied Science, Aston University, Birmingham B4 7ET, United Kingdom; Informatics and Mathematical Modelling, Technical University of Denmark, R. Petersens Plads, Building 321, DK-2800 Lyngby, Denmark", "aff_domain": "aston.ac.uk;imm.dtu.dk", "email": "aston.ac.uk;imm.dtu.dk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Aston University;Technical University of Denmark", "aff_unique_dep": "School of Engineering and Applied Science;Informatics and Mathematical Modelling", "aff_unique_url": "https://www.aston.ac.uk;https://www.tu-dresden.de", "aff_unique_abbr": "Aston;DTU", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Birmingham;Lyngby", "aff_country_unique_index": "0;1", "aff_country_unique": "United Kingdom;Denmark" }, { "id": "621b909a80", "title": "Warped Gaussian Processes", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/6b5754d737784b51ec5075c0dc437bf0-Abstract.html", "author": "Edward Snelson; Zoubin Ghahramani; Carl E. Rasmussen", "abstract": "We generalise the Gaussian process (GP) framework for regression by learning a nonlinear transformation of the GP outputs. This allows for non-Gaussian processes and non-Gaussian noise. The learning algo- rithm chooses a nonlinear transformation such that transformed data is well-modelled by a GP. This can be seen as including a preprocessing transformation as an integral part of the probabilistic modelling problem, rather than as an ad-hoc step. We demonstrate on several real regression problems that learning the transformation can lead to signi\ufb01cantly better performance than using a regular GP, or a GP with a \ufb01xed transformation.", "bibtex": "@inproceedings{NIPS2003_6b5754d7,\n author = {Snelson, Edward and Ghahramani, Zoubin and Rasmussen, Carl},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Warped Gaussian Processes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/6b5754d737784b51ec5075c0dc437bf0-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/6b5754d737784b51ec5075c0dc437bf0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/6b5754d737784b51ec5075c0dc437bf0-Metadata.json", "review": "", "metareview": "", "pdf_size": 94303, "gs_citation": 492, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6418795309177964859&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Gatsby Computational Neuroscience Unit, University College London; Max Planck Institute for Biological Cybernetics; Gatsby Computational Neuroscience Unit, University College London", "aff_domain": "gatsby.ucl.ac.uk;tuebingen.mpg.de;gatsby.ucl.ac.uk", "email": "gatsby.ucl.ac.uk;tuebingen.mpg.de;gatsby.ucl.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University College London;Max Planck Institute for Biological Cybernetics", "aff_unique_dep": "Gatsby Computational Neuroscience Unit;Biological Cybernetics", "aff_unique_url": "https://www.ucl.ac.uk;https://www.biocybernetics.mpg.de", "aff_unique_abbr": "UCL;MPIBC", "aff_campus_unique_index": "0;0", "aff_campus_unique": "London;", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United Kingdom;Germany" }, { "id": "13d856f305", "title": "When Does Non-Negative Matrix Factorization Give a Correct Decomposition into Parts?", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/1843e35d41ccf6e63273495ba42df3c1-Abstract.html", "author": "David Donoho; Victoria Stodden", "abstract": "We interpret non-negative matrix factorization geometrically, as the problem of \ufb01nding a simplicial cone which contains a cloud of data points and which is contained in the positive orthant. We show that under certain conditions, basically requiring that some of the data are spread across the faces of the positive orthant, there is a unique such simpli- cial cone. We give examples of synthetic image articulation databases which obey these conditions; these require separated support and facto- rial sampling. For such databases there is a generative model in terms of \u2018parts\u2019 and NMF correctly identi\ufb01es the \u2018parts\u2019. We show that our theoretical results are predictive of the performance of published NMF code, by running the published algorithms on one of our synthetic image articulation databases.", "bibtex": "@inproceedings{NIPS2003_1843e35d,\n author = {Donoho, David and Stodden, Victoria},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {When Does Non-Negative Matrix Factorization Give a Correct Decomposition into Parts?},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/1843e35d41ccf6e63273495ba42df3c1-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/1843e35d41ccf6e63273495ba42df3c1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/1843e35d41ccf6e63273495ba42df3c1-Metadata.json", "review": "", "metareview": "", "pdf_size": 62932, "gs_citation": 1185, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16286744853814618414&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 22, "aff": "Department of Statistics, Stanford University; Department of Statistics, Stanford University", "aff_domain": "stat.stanford.edu;stat.stanford.edu", "email": "stat.stanford.edu;stat.stanford.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Statistics", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "07a4aaebfd", "title": "Wormholes Improve Contrastive Divergence", "site": "https://papers.nips.cc/paper_files/paper/2003/hash/03cf87174debaccd689c90c34577b82f-Abstract.html", "author": "Max Welling; Andriy Mnih; Geoffrey E. Hinton", "abstract": "In models that de\ufb01ne probabilities via energies, maximum likelihood learning typically involves using Markov Chain Monte Carlo to sample from the model\u2019s distribution. If the Markov chain is started at the data distribution, learning often works well even if the chain is only run for a few time steps [3]. But if the data distribution contains modes separated by regions of very low density, brief MCMC will not ensure that different modes have the correct relative energies because it cannot move particles from one mode to another. We show how to improve brief MCMC by allowing long-range moves that are suggested by the data distribution. If the model is approximately correct, these long-range moves have a reasonable acceptance rate.", "bibtex": "@inproceedings{NIPS2003_03cf8717,\n author = {Welling, Max and Mnih, Andriy and Hinton, Geoffrey E},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Thrun and L. Saul and B. Sch\\\"{o}lkopf},\n pages = {},\n publisher = {MIT Press},\n title = {Wormholes Improve Contrastive Divergence},\n url = {https://proceedings.neurips.cc/paper_files/paper/2003/file/03cf87174debaccd689c90c34577b82f-Paper.pdf},\n volume = {16},\n year = {2003}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2003/file/03cf87174debaccd689c90c34577b82f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2003/file/03cf87174debaccd689c90c34577b82f-Metadata.json", "review": "", "metareview": "", "pdf_size": 244741, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18005493299835231968&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" } ]