[ { "id": "1891503f90", "title": "A Cost-Shaping LP for Bellman Error Minimization with Performance Guarantees", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/cc70903297fe1e25537ae50aea186306-Abstract.html", "author": "Daniela D. Farias; Benjamin V. Roy", "abstract": "We introduce a new algorithm based on linear programming that approximates the differential value function of an average-cost Markov decision process via a linear combination of pre-selected basis functions. The algorithm carries out a form of cost shaping and minimizes a version of Bellman error. We establish an error bound that scales gracefully with the number of states without imposing the (strong) Lyapunov condition required by its counter- part in [6]. We propose a path-following method that automates selection of important algorithm parameters which represent coun- terparts to the \"state-relevance weights\" studied in [6].", "bibtex": "@inproceedings{NIPS2004_cc709032,\n author = {Farias, Daniela and Roy, Benjamin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {A Cost-Shaping LP for Bellman Error Minimization with Performance Guarantees},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/cc70903297fe1e25537ae50aea186306-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/cc70903297fe1e25537ae50aea186306-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/cc70903297fe1e25537ae50aea186306-Metadata.json", "review": "", "metareview": "", "pdf_size": 92140, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7495966707254917692&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Mechanical Engineering, Massachusetts Institute of Technology; Management Science and Engineering and Electrical Engineering, Stanford University", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Massachusetts Institute of Technology;Stanford University", "aff_unique_dep": "Mechanical Engineering;Management Science and Engineering, Electrical Engineering", "aff_unique_url": "https://web.mit.edu;https://www.stanford.edu", "aff_unique_abbr": "MIT;Stanford", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Cambridge;Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "766541a0a8", "title": "A Direct Formulation for Sparse PCA Using Semidefinite Programming", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/8e065119c74efe3a47aec8796964cf8b-Abstract.html", "author": "Alexandre D'aspremont; Laurent E. Ghaoui; Michael I. Jordan; Gert R. Lanckriet", "abstract": "We examine the problem of approximating, in the Frobenius-norm sense, a positive, semidefinite symmetric matrix by a rank-one matrix, with an upper bound on the cardinality of its eigenvector. The problem arises in the decomposition of a covariance matrix into sparse factors, and has wide applications ranging from biology to finance. We use a modifica- tion of the classical variational representation of the largest eigenvalue of a symmetric matrix, where cardinality is constrained, and derive a semidefinite programming based relaxation for our problem.", "bibtex": "@inproceedings{NIPS2004_8e065119,\n author = {D\\textquotesingle aspremont, Alexandre and Ghaoui, Laurent and Jordan, Michael and Lanckriet, Gert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {A Direct Formulation for Sparse PCA Using Semidefinite Programming},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/8e065119c74efe3a47aec8796964cf8b-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/8e065119c74efe3a47aec8796964cf8b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/8e065119c74efe3a47aec8796964cf8b-Metadata.json", "review": "", "metareview": "", "pdf_size": 62181, "gs_citation": 1286, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13451428275026699787&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 35, "aff": "EECS Dept., U.C. Berkeley; SAC Capital + EECS, U.C. Berkeley; EECS and Statistics Depts., U.C. Berkeley; EECS Dept., U.C. Berkeley", "aff_domain": "m4x.org;sac.com;cs.berkeley.edu;eecs.berkeley.edu", "email": "m4x.org;sac.com;cs.berkeley.edu;eecs.berkeley.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1+0;0;0", "aff_unique_norm": "University of California, Berkeley;SAC Capital", "aff_unique_dep": "Electrical Engineering and Computer Sciences;", "aff_unique_url": "https://www.berkeley.edu;", "aff_unique_abbr": "UC Berkeley;", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0+0;0;0", "aff_country_unique": "United States" }, { "id": "e8bb159e8a", "title": "A Feature Selection Algorithm Based on the Global Minimization of a Generalization Error Bound", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/8cff9bf6694dccfc3b6a613d05d51d16-Abstract.html", "author": "Dori Peleg; Ron Meir", "abstract": "A novel linear feature selection algorithm is presented based on the global minimization of a data-dependent generalization error bound. Feature selection and scaling algorithms often lead to non-convex opti- mization problems, which in many previous approaches were addressed through gradient descent procedures that can only guarantee convergence to a local minimum. We propose an alternative approach, whereby the global solution of the non-convex optimization problem is derived via an equivalent optimization problem. Moreover, the convex optimization task is reduced to a conic quadratic programming problem for which effi- cient solvers are available. Highly competitive numerical results on both artificial and real-world data sets are reported.", "bibtex": "@inproceedings{NIPS2004_8cff9bf6,\n author = {Peleg, Dori and Meir, Ron},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {A Feature Selection Algorithm Based on the Global Minimization of a Generalization Error Bound},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/8cff9bf6694dccfc3b6a613d05d51d16-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/8cff9bf6694dccfc3b6a613d05d51d16-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/8cff9bf6694dccfc3b6a613d05d51d16-Metadata.json", "review": "", "metareview": "", "pdf_size": 127514, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4775024212185148623&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Electrical Engineering, Technion, Haifa, Israel; Department of Electrical Engineering, Technion, Haifa, Israel", "aff_domain": "tx.technion.ac.il;tx.technion.ac.il", "email": "tx.technion.ac.il;tx.technion.ac.il", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Technion", "aff_unique_dep": "Department of Electrical Engineering", "aff_unique_url": "https://www.technion.ac.il", "aff_unique_abbr": "Technion", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Haifa", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "id": "fa008c3781", "title": "A Generalized Bradley-Terry Model: From Group Competition to Individual Skill", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/825f9cd5f0390bc77c1fed3c94885c87-Abstract.html", "author": "Tzu-kuo Huang; Chih-jen Lin; Ruby C. Weng", "abstract": "The Bradley-Terry model for paired comparison has been popular in many areas. We propose a generalized version in which paired individual comparisons are extended to paired team comparisons. We introduce a simple algorithm with convergence proofs to solve the model and obtain individual skill. A useful application to multi-class probability estimates using error-correcting codes is demonstrated.", "bibtex": "@inproceedings{NIPS2004_825f9cd5,\n author = {Huang, Tzu-kuo and Lin, Chih-jen and Weng, Ruby},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {A Generalized Bradley-Terry Model: From Group Competition to Individual Skill},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/825f9cd5f0390bc77c1fed3c94885c87-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/825f9cd5f0390bc77c1fed3c94885c87-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/825f9cd5f0390bc77c1fed3c94885c87-Metadata.json", "review": "", "metareview": "", "pdf_size": 91024, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10329837826717833283&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "5b4425f6f5", "title": "A Harmonic Excitation State-Space Approach to Blind Separation of Speech", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/bbeb0c1b1fd44e392c7ce2fdbd137e87-Abstract.html", "author": "Rasmus K. Olsson; Lars K. Hansen", "abstract": "We discuss an identi\ufb01cation framework for noisy speech mixtures. A\nblock-based generative model is formulated that explicitly incorporates\nthe time-varying harmonic plus noise (H+N) model for a number of latent\nsources observed through noisy convolutive mixtures. All parameters\nincluding the pitches of the source signals, the amplitudes and phases of\nthe sources, the mixing \ufb01lters and the noise statistics are estimated by\nmaximum likelihood, using an EM-algorithm. Exact averaging over the\nhidden sources is obtained using the Kalman smoother. We show that\npitch estimation and source separation can be performed simultaneously.\nThe pitch estimates are compared to laryngograph (EGG) measurements.\nArti\ufb01cial and real room mixtures are used to demonstrate the viability\nof the approach. Intelligible speech signals are re-synthesized from the\nestimated H+N models.", "bibtex": "@inproceedings{NIPS2004_bbeb0c1b,\n author = {Olsson, Rasmus and Hansen, Lars K},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {A Harmonic Excitation State-Space Approach to Blind Separation of Speech},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/bbeb0c1b1fd44e392c7ce2fdbd137e87-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/bbeb0c1b1fd44e392c7ce2fdbd137e87-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/bbeb0c1b1fd44e392c7ce2fdbd137e87-Metadata.json", "review": "", "metareview": "", "pdf_size": 298276, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11739889786386075028&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Informatics and Mathematical Modelling, Technical University of Denmark, 2800 Lyngby, Denmark; Informatics and Mathematical Modelling, Technical University of Denmark, 2800 Lyngby, Denmark", "aff_domain": "imm.dtu.dk;imm.dtu.dk", "email": "imm.dtu.dk;imm.dtu.dk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Technical University of Denmark", "aff_unique_dep": "Informatics and Mathematical Modelling", "aff_unique_url": "https://www.tu-dresden.de", "aff_unique_abbr": "DTU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Lyngby", "aff_country_unique_index": "0;0", "aff_country_unique": "Denmark" }, { "id": "8c4a2a54d1", "title": "A Hidden Markov Model for de Novo Peptide Sequencing", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/5ac8bb8a7d745102a978c5f8ccdb61b8-Abstract.html", "author": "Bernd Fischer; Volker Roth; Jonas Grossmann; Sacha Baginsky; Wilhelm Gruissem; Franz Roos; Peter Widmayer; Joachim M. Buhmann", "abstract": "De novo Sequencing of peptides is a challenging task in proteome re- search. While there exist reliable DNA-sequencing methods, the high- throughput de novo sequencing of proteins by mass spectrometry is still an open problem. Current approaches suffer from a lack in precision to detect mass peaks in the spectrograms. In this paper we present a novel method for de novo peptide sequencing based on a hidden Markov model. Experiments effectively demonstrate that this new method signif- icantly outperforms standard approaches in matching quality.", "bibtex": "@inproceedings{NIPS2004_5ac8bb8a,\n author = {Fischer, Bernd and Roth, Volker and Grossmann, Jonas and Baginsky, Sacha and Gruissem, Wilhelm and Roos, Franz and Widmayer, Peter and Buhmann, Joachim},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {A Hidden Markov Model for de Novo Peptide Sequencing},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/5ac8bb8a7d745102a978c5f8ccdb61b8-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/5ac8bb8a7d745102a978c5f8ccdb61b8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/5ac8bb8a7d745102a978c5f8ccdb61b8-Metadata.json", "review": "", "metareview": "", "pdf_size": 134694, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3220976806320289074&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Institute of Computational Science, ETH Zurich; Institute of Computational Science, ETH Zurich; Institute of Computational Science, ETH Zurich; Institute of Plant Sciences, ETH Zurich; Institute of Plant Sciences, ETH Zurich; Institute of Plant Sciences, ETH Zurich; Inst. of Theoretical Computer Science, ETH Zurich; Inst. of Theoretical Computer Science, ETH Zurich", "aff_domain": "inf.ethz.ch; ; ; ; ; ; ; ", "email": "inf.ethz.ch; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0;0;0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Institute of Computational Science", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "Switzerland" }, { "id": "d3ccbba2a0", "title": "A Large Deviation Bound for the Area Under the ROC Curve", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/f0f6ba4b5e0000340312d33c212c3ae8-Abstract.html", "author": "Shivani Agarwal; Thore Graepel; Ralf Herbrich; Dan Roth", "abstract": "The area under the ROC curve (AUC) has been advocated as an evalu- ation criterion for the bipartite ranking problem. We study large devi- ation properties of the AUC; in particular, we derive a distribution-free large deviation bound for the AUC which serves to bound the expected accuracy of a ranking function in terms of its empirical AUC on an inde- pendent test sequence. A comparison of our result with a corresponding large deviation result for the classi\ufb01cation error rate suggests that the test sample size required to obtain an -accurate estimate of the expected ac- curacy of a ranking function with \u03b4-con\ufb01dence is larger than that required to obtain an -accurate estimate of the expected error rate of a classi\ufb01- cation function with the same con\ufb01dence. A simple application of the union bound allows the large deviation bound to be extended to learned ranking functions chosen from \ufb01nite function classes.", "bibtex": "@inproceedings{NIPS2004_f0f6ba4b,\n author = {Agarwal, Shivani and Graepel, Thore and Herbrich, Ralf and Roth, Dan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {A Large Deviation Bound for the Area Under the ROC Curve},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/f0f6ba4b5e0000340312d33c212c3ae8-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/f0f6ba4b5e0000340312d33c212c3ae8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/f0f6ba4b5e0000340312d33c212c3ae8-Metadata.json", "review": "", "metareview": "", "pdf_size": 154543, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2606608693863330780&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Dept. of Computer Science, University of Illinois, Urbana, IL 61801, USA; Microsoft Research, 7 JJ Thomson Avenue, Cambridge CB3 0FB, UK; Microsoft Research, 7 JJ Thomson Avenue, Cambridge CB3 0FB, UK; Dept. of Computer Science, University of Illinois, Urbana, IL 61801, USA", "aff_domain": "cs.uiuc.edu;microsoft.com;microsoft.com;cs.uiuc.edu", "email": "cs.uiuc.edu;microsoft.com;microsoft.com;cs.uiuc.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1;0", "aff_unique_norm": "University of Illinois;Microsoft", "aff_unique_dep": "Dept. of Computer Science;Microsoft Research", "aff_unique_url": "https://illinois.edu;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "UIUC;MSR", "aff_campus_unique_index": "0;1;1;0", "aff_campus_unique": "Urbana;Cambridge", "aff_country_unique_index": "0;1;1;0", "aff_country_unique": "United States;United Kingdom" }, { "id": "fb6a3d0cbb", "title": "A Machine Learning Approach to Conjoint Analysis", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/4bbdcc0e821637155ac4217bdab70d2e-Abstract.html", "author": "Olivier Chapelle; Za\u00efd Harchaoui", "abstract": "Choice-based conjoint analysis builds models of consumer preferences over products with answers gathered in questionnaires. Our main goal is to bring tools from the machine learning community to solve this prob- lem more efficiently. Thus, we propose two algorithms to quickly and accurately estimate consumer preferences.", "bibtex": "@inproceedings{NIPS2004_4bbdcc0e,\n author = {Chapelle, Olivier and Harchaoui, Za\\\"{\\i}d},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {A Machine Learning Approach to Conjoint Analysis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/4bbdcc0e821637155ac4217bdab70d2e-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/4bbdcc0e821637155ac4217bdab70d2e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/4bbdcc0e821637155ac4217bdab70d2e-Metadata.json", "review": "", "metareview": "", "pdf_size": 162061, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18023743319787237383&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "a8a2069277", "title": "A Method for Inferring Label Sampling Mechanisms in Semi-Supervised Learning", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/eaa52f3366768bca401dca9ea5b181dd-Abstract.html", "author": "Saharon Rosset; Ji Zhu; Hui Zou; Trevor J. Hastie", "abstract": "We consider the situation in semi-supervised learning, where the \"label sampling\" mechanism stochastically depends on the true response (as well as potentially on the features). We suggest a method of moments for estimating this stochastic dependence using the unlabeled data. This is potentially useful for two distinct purposes: a. As an input to a super- vised learning procedure which can be used to \"de-bias\" its results using labeled data only and b. As a potentially interesting learning task in it- self. We present several examples to illustrate the practical usefulness of our method.", "bibtex": "@inproceedings{NIPS2004_eaa52f33,\n author = {Rosset, Saharon and Zhu, Ji and Zou, Hui and Hastie, Trevor},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {A Method for Inferring Label Sampling Mechanisms in Semi-Supervised Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/eaa52f3366768bca401dca9ea5b181dd-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/eaa52f3366768bca401dca9ea5b181dd-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/eaa52f3366768bca401dca9ea5b181dd-Metadata.json", "review": "", "metareview": "", "pdf_size": 118531, "gs_citation": 70, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11420136477627507624&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Data Analytics Research Group, IBM T.J. Watson Research Center, Yorktown Heights, NY 10598; Department of Statistics, University of Michigan, Ann Arbor, MI 48109; Department of Statistics, Stanford University, Stanford, CA 94305; Department of Statistics, Stanford University, Stanford, CA 94305", "aff_domain": "us.ibm.com;umich.edu;stat.stanford.com;stanford.edu", "email": "us.ibm.com;umich.edu;stat.stanford.com;stanford.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2;2", "aff_unique_norm": "IBM;University of Michigan;Stanford University", "aff_unique_dep": "Data Analytics Research Group;Department of Statistics;Department of Statistics", "aff_unique_url": "https://www.ibm.com/research/watson;https://www.umich.edu;https://www.stanford.edu", "aff_unique_abbr": "IBM Watson;UM;Stanford", "aff_campus_unique_index": "0;1;2;2", "aff_campus_unique": "Yorktown Heights;Ann Arbor;Stanford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "54b85751da", "title": "A Probabilistic Model for Online Document Clustering with Application to Novelty Detection", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/09a5e2a11bea20817477e0b1dfe2cc21-Abstract.html", "author": "Jian Zhang; Zoubin Ghahramani; Yiming Yang", "abstract": "In this paper we propose a probabilistic model for online document clus- tering. We use non-parametric Dirichlet process prior to model the grow- ing number of clusters, and use a prior of general English language model as the base distribution to handle the generation of novel clusters. Furthermore, cluster uncertainty is modeled with a Bayesian Dirichlet- multinomial distribution. We use empirical Bayes method to estimate hyperparameters based on a historical dataset. Our probabilistic model is applied to the novelty detection task in Topic Detection and Tracking (TDT) and compared with existing approaches in the literature.", "bibtex": "@inproceedings{NIPS2004_09a5e2a1,\n author = {Zhang, Jian and Ghahramani, Zoubin and Yang, Yiming},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {A Probabilistic Model for Online Document Clustering with Application to Novelty Detection},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/09a5e2a11bea20817477e0b1dfe2cc21-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/09a5e2a11bea20817477e0b1dfe2cc21-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/09a5e2a11bea20817477e0b1dfe2cc21-Metadata.json", "review": "", "metareview": "", "pdf_size": 86648, "gs_citation": 199, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13895735807507610976&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "School of Computer Science, Carnegie Mellon University, Pittsburgh, PA 15213; Gatsby Computational Neuroscience Unit, University College London, London WC1N 3AR, UK; School of Computer Science, Carnegie Mellon University, Pittsburgh, PA 15213", "aff_domain": "cs.cmu.edu;gatsby.ucl.ac.uk;cs.cmu.edu", "email": "cs.cmu.edu;gatsby.ucl.ac.uk;cs.cmu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "Carnegie Mellon University;University College London", "aff_unique_dep": "School of Computer Science;Gatsby Computational Neuroscience Unit", "aff_unique_url": "https://www.cmu.edu;https://www.ucl.ac.uk", "aff_unique_abbr": "CMU;UCL", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Pittsburgh;London", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;United Kingdom" }, { "id": "25cc8ab13c", "title": "A Second Order Cone programming Formulation for Classifying Missing Data", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/028ee724157b05d04e7bdcf237d12e60-Abstract.html", "author": "Chiranjib Bhattacharyya; Pannagadatta K. Shivaswamy; Alex J. Smola", "abstract": "We propose a convex optimization based strategy to deal with uncertainty in the observations of a classification problem. We assume that instead of a sample (xi, yi) a distribution over (xi, yi) is specified. In particu- lar, we derive a robust formulation when the distribution is given by a normal distribution. It leads to Second Order Cone Programming formu- lation. Our method is applied to the problem of missing data, where it outperforms direct imputation.", "bibtex": "@inproceedings{NIPS2004_028ee724,\n author = {Bhattacharyya, Chiranjib and Shivaswamy, Pannagadatta and Smola, Alex},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {A Second Order Cone programming Formulation for Classifying Missing Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/028ee724157b05d04e7bdcf237d12e60-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/028ee724157b05d04e7bdcf237d12e60-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/028ee724157b05d04e7bdcf237d12e60-Metadata.json", "review": "", "metareview": "", "pdf_size": 78803, "gs_citation": 94, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9588171600983634313&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 13, "aff": "Department of Computer Science and Automation, Indian Institute of Science, Bangalore, 560 012, India; Department of Electrical Engineering, Indian Institute of Science, Bangalore, 560 012, India; Machine Learning Program, National ICT Australia and ANU, Canberra, ACT 0200, Australia", "aff_domain": "csa.iisc.ernet.in;ee.iisc.ernet.in;anu.edu.au", "email": "csa.iisc.ernet.in;ee.iisc.ernet.in;anu.edu.au", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Indian Institute of Science;Australian National University", "aff_unique_dep": "Department of Computer Science and Automation;Machine Learning Program", "aff_unique_url": "https://www.iisc.ac.in;https://www.anu.edu.au", "aff_unique_abbr": "IISc;ANU", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Bangalore;Canberra", "aff_country_unique_index": "0;0;1", "aff_country_unique": "India;Australia" }, { "id": "98e32e7180", "title": "A Temporal Kernel-Based Model for Tracking Hand Movements from Neural Activities", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/f35a2bc72dfdc2aae569a0c7370bd7f5-Abstract.html", "author": "Lavi Shpigelman; Koby Crammer; Rony Paz; Eilon Vaadia; Yoram Singer", "abstract": "We devise and experiment with a dynamical kernel-based system for tracking hand movements from neural activity. The state of the system corresponds to the hand location, velocity, and acceleration, while the system's input are the instantaneous spike rates. The system's state dy- namics is defined as a combination of a linear mapping from the previous estimated state and a kernel-based mapping tailored for modeling neural activities. In contrast to generative models, the activity-to-state mapping is learned using discriminative methods by minimizing a noise-robust loss function. We use this approach to predict hand trajectories on the basis of neural activity in motor cortex of behaving monkeys and find that the proposed approach is more accurate than both a static approach based on support vector regression and the Kalman filter.", "bibtex": "@inproceedings{NIPS2004_f35a2bc7,\n author = {Shpigelman, Lavi and Crammer, Koby and Paz, Rony and Vaadia, Eilon and Singer, Yoram},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {A Temporal Kernel-Based Model for Tracking Hand Movements from Neural Activities},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/f35a2bc72dfdc2aae569a0c7370bd7f5-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/f35a2bc72dfdc2aae569a0c7370bd7f5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/f35a2bc72dfdc2aae569a0c7370bd7f5-Metadata.json", "review": "", "metareview": "", "pdf_size": 107053, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5667043053272419712&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "School of computerScience and Engineering; School of computerScience and Engineering; Interdisciplinary CenterforNeural Computation + Dept. of Physiology, Hadassah Medical School; Interdisciplinary CenterforNeural Computation + Dept. of Physiology, Hadassah Medical School; School of computerScience and Engineering", "aff_domain": "cs.huji.ac.il; ; ; ; ", "email": "cs.huji.ac.il; ; ; ; ", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1+2;1+2;0", "aff_unique_norm": "School of Computer Science and Engineering;Interdisciplinary Center for Neural Computation;Hadassah Medical School", "aff_unique_dep": "Computer Science and Engineering;Interdisciplinary Center for Neural Computation;Dept. of Physiology", "aff_unique_url": ";;https://www.hadassah.org.il", "aff_unique_abbr": ";;", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "1+1;1+1", "aff_country_unique": ";Israel" }, { "id": "db2b1ed38d", "title": "A Three Tiered Approach for Articulated Object Action Modeling and Recognition", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/9a5748a2fbaa6564d05d7f2ae29a9355-Abstract.html", "author": "Le Lu; Gregory D. Hager; Laurent Younes", "abstract": "Visual action recognition is an important problem in computer vision. In this paper, we propose a new method to probabilistically model and recognize actions of articulated objects, such as hand or body gestures, in image sequences. Our method consists of three levels of representa- tion. At the low level, we first extract a feature vector invariant to scale and in-plane rotation by using the Fourier transform of a circular spatial histogram. Then, spectral partitioning [20] is utilized to obtain an initial clustering; this clustering is then refined using a temporal smoothness constraint. Gaussian mixture model (GMM) based clustering and density estimation in the subspace of linear discriminant analysis (LDA) are then applied to thousands of image feature vectors to obtain an intermediate level representation. Finally, at the high level we build a temporal multi- resolution histogram model for each action by aggregating the clustering weights of sampled images belonging to that action. We discuss how this high level representation can be extended to achieve temporal scaling in- variance and to include Bi-gram or Multi-gram transition information. Both image clustering and action recognition/segmentation results are given to show the validity of our three tiered representation.", "bibtex": "@inproceedings{NIPS2004_9a5748a2,\n author = {Lu, Le and Hager, Gregory and Younes, Laurent},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {A Three Tiered Approach for Articulated Object Action Modeling and Recognition},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/9a5748a2fbaa6564d05d7f2ae29a9355-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/9a5748a2fbaa6564d05d7f2ae29a9355-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/9a5748a2fbaa6564d05d7f2ae29a9355-Metadata.json", "review": "", "metareview": "", "pdf_size": 194135, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11946080120327387706&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Computer Science, Johns Hopkins University; Department of Computer Science, Johns Hopkins University; Center of Imaging Science, Johns Hopkins University", "aff_domain": "cs.jhu.edu;cs.jhu.edu;cis.jhu.edu", "email": "cs.jhu.edu;cs.jhu.edu;cis.jhu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Johns Hopkins University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.jhu.edu", "aff_unique_abbr": "JHU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "ad0a253e0b", "title": "A Topographic Support Vector Machine: Classification Using Local Label Configurations", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/217e342fc01668b10cb1188d40d3370e-Abstract.html", "author": "Johannes Mohr; Klaus Obermayer", "abstract": "The standard approach to the classification of objects is to consider the examples as independent and identically distributed (iid). In many real world settings, however, this assumption is not valid, because a topo- graphical relationship exists between the objects. In this contribution we consider the special case of image segmentation, where the objects are pixels and where the underlying topography is a 2D regular rectangular grid. We introduce a classification method which not only uses measured vectorial feature information but also the label configuration within a to- pographic neighborhood. Due to the resulting dependence between the labels of neighboring pixels, a collective classification of a set of pixels becomes necessary. We propose a new method called 'Topographic Sup- port Vector Machine' (TSVM), which is based on a topographic kernel and a self-consistent solution to the label assignment shown to be equiv- alent to a recurrent neural network. The performance of the algorithm is compared to a conventional SVM on a cell image segmentation task.", "bibtex": "@inproceedings{NIPS2004_217e342f,\n author = {Mohr, Johannes and Obermayer, Klaus},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {A Topographic Support Vector Machine: Classification Using Local Label Configurations},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/217e342fc01668b10cb1188d40d3370e-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/217e342fc01668b10cb1188d40d3370e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/217e342fc01668b10cb1188d40d3370e-Metadata.json", "review": "", "metareview": "", "pdf_size": 160031, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4842591397254805347&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Clinic for Psychiatry and Psychotherapy, Charit \u00b4e Medical School and Bernstein Center for Computational Neuroscience Berlin; Department of Electrical Engineering and Computer Science, Berlin University of Technology and Bernstein Center for Computational Neuroscience Berlin", "aff_domain": "cs.tu-berlin.de;cs.tu-berlin.de", "email": "cs.tu-berlin.de;cs.tu-berlin.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Charit\u00e9 Medical School;Berlin University of Technology", "aff_unique_dep": "Clinic for Psychiatry and Psychotherapy;Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://www.charite.de;https://www.tu-berlin.de", "aff_unique_abbr": "Charit\u00e9;TU Berlin", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berlin", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "7d2c5a85fa", "title": "Active Learning for Anomaly and Rare-Category Detection", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/8c59fd6fbe0e9793ec2b27971221cace-Abstract.html", "author": "Dan Pelleg; Andrew W. Moore", "abstract": "We introduce a novel active-learning scenario in which a user wants to work with a learning algorithm to identify useful anomalies. These are distinguished from the traditional statistical definition of anomalies as outliers or merely ill-modeled points. Our distinction is that the useful- ness of anomalies is categorized subjectively by the user. We make two additional assumptions. First, there exist extremely few useful anoma- lies to be hunted down within a massive dataset. Second, both useful and useless anomalies may sometimes exist within tiny classes of similar anomalies. The challenge is thus to identify \"rare category\" records in an unlabeled noisy set with help (in the form of class labels) from a human expert who has a small budget of datapoints that they are prepared to cat- egorize. We propose a technique to meet this challenge, which assumes a mixture model fit to the data, but otherwise makes no assumptions on the particular form of the mixture components. This property promises wide applicability in real-life scenarios and for various statistical mod- els. We give an overview of several alternative methods, highlighting their strengths and weaknesses, and conclude with a detailed empirical analysis. We show that our method can quickly zoom in on an anomaly set containing a few tens of points in a dataset of hundreds of thousands.", "bibtex": "@inproceedings{NIPS2004_8c59fd6f,\n author = {Pelleg, Dan and Moore, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Active Learning for Anomaly and Rare-Category Detection},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/8c59fd6fbe0e9793ec2b27971221cace-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/8c59fd6fbe0e9793ec2b27971221cace-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/8c59fd6fbe0e9793ec2b27971221cace-Metadata.json", "review": "", "metareview": "", "pdf_size": 69177, "gs_citation": 240, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8765213269342511081&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "School of Computer Science, Carnegie-Mellon University; School of Computer Science, Carnegie-Mellon University", "aff_domain": "cs.cmu.edu;cs.cmu.edu", "email": "cs.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "School of Computer Science", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "e436a4a733", "title": "Adaptive Discriminative Generative Model and Its Applications", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/f12f2b34a0c3174269c19e21c07dee68-Abstract.html", "author": "Ruei-sung Lin; David A. Ross; Jongwoo Lim; Ming-Hsuan Yang", "abstract": "This paper presents an adaptive discriminative generative model that gen- eralizes the conventional Fisher Linear Discriminant algorithm and ren- ders a proper probabilistic interpretation. Within the context of object tracking, we aim to find a discriminative generative model that best sep- arates the target from the background. We present a computationally efficient algorithm to constantly update this discriminative model as time progresses. While most tracking algorithms operate on the premise that the object appearance or ambient lighting condition does not significantly change as time progresses, our method adapts a discriminative genera- tive model to reflect appearance variation of the target and background, thereby facilitating the tracking task in ever-changing environments. Nu- merous experiments show that our method is able to learn a discrimina- tive generative model for tracking target objects undergoing large pose and lighting changes.", "bibtex": "@inproceedings{NIPS2004_f12f2b34,\n author = {Lin, Ruei-sung and Ross, David and Lim, Jongwoo and Yang, Ming-Hsuan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Adaptive Discriminative Generative Model and Its Applications},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/f12f2b34a0c3174269c19e21c07dee68-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/f12f2b34a0c3174269c19e21c07dee68-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/f12f2b34a0c3174269c19e21c07dee68-Metadata.json", "review": "", "metareview": "", "pdf_size": 208515, "gs_citation": 130, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9142739616194011791&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "University of Illinois; University of Toronto; University of Illinois + Honda Research Institute; Honda Research Institute", "aff_domain": "uiuc.edu;cs.toronto.edu;uiuc.edu;honda-ri.com", "email": "uiuc.edu;cs.toronto.edu;uiuc.edu;honda-ri.com", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0+2;2", "aff_unique_norm": "University of Illinois;University of Toronto;Honda Research Institute", "aff_unique_dep": ";;", "aff_unique_url": "https://www.illinois.edu;https://www.utoronto.ca;https://www.honda-ri.com", "aff_unique_abbr": "UIUC;U of T;HRI", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0+2;2", "aff_country_unique": "United States;Canada;Japan" }, { "id": "2d2f902017", "title": "Adaptive Manifold Learning", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/eb0ecdb070a1a0ac46de0cd733d39cf3-Abstract.html", "author": "Jing Wang; Zhenyue Zhang; Hongyuan Zha", "abstract": "Recently, there have been several advances in the machine learning and pattern recognition communities for developing manifold learning algo- rithms to construct nonlinear low-dimensional manifolds from sample data points embedded in high-dimensional spaces. In this paper, we de- velop algorithms that address two key issues in manifold learning: 1) the adaptive selection of the neighborhood sizes; and 2) better fitting the local geometric structure to account for the variations in the curvature of the manifold and its interplay with the sampling density of the data set. We also illustrate the effectiveness of our methods on some synthetic data sets.", "bibtex": "@inproceedings{NIPS2004_eb0ecdb0,\n author = {Wang, Jing and Zhang, Zhenyue and Zha, Hongyuan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Adaptive Manifold Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/eb0ecdb070a1a0ac46de0cd733d39cf3-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/eb0ecdb070a1a0ac46de0cd733d39cf3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/eb0ecdb070a1a0ac46de0cd733d39cf3-Metadata.json", "review": "", "metareview": "", "pdf_size": 874005, "gs_citation": 273, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1499379387817738936&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Mathematics, Zhejiang University; Department of Mathematics, Zhejiang University; Department of Computer Science, Pennsylvania State University", "aff_domain": "sohu.com;zju.edu.cn;cse.psu.edu", "email": "sohu.com;zju.edu.cn;cse.psu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Zhejiang University;Pennsylvania State University", "aff_unique_dep": "Department of Mathematics;Department of Computer Science", "aff_unique_url": "http://www.zju.edu.cn;https://www.psu.edu", "aff_unique_abbr": "ZJU;PSU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1", "aff_country_unique": "China;United States" }, { "id": "b1f8eb85d4", "title": "Algebraic Set Kernels with Application to Inference Over Local Image Representations", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/df0e09d6f25a15a815563df9827f48fa-Abstract.html", "author": "Amnon Shashua; Tamir Hazan", "abstract": "This paper presents a general family of algebraic positive definite simi- larity functions over spaces of matrices with varying column rank. The columns can represent local regions in an image (whereby images have varying number of local parts), images of an image sequence, motion tra- jectories in a multibody motion, and so forth. The family of set kernels we derive is based on a group invariant tensor product lifting with param- eters that can be naturally tuned to provide a cook-book of sorts covering the possible \"wish lists\" from similarity measures over sets of varying cardinality. We highlight the strengths of our approach by demonstrat- ing the set kernels for visual recognition of pedestrians using local parts representations.", "bibtex": "@inproceedings{NIPS2004_df0e09d6,\n author = {Shashua, Amnon and Hazan, Tamir},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Algebraic Set Kernels with Application to Inference Over Local Image Representations},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/df0e09d6f25a15a815563df9827f48fa-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/df0e09d6f25a15a815563df9827f48fa-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/df0e09d6f25a15a815563df9827f48fa-Metadata.json", "review": "", "metareview": "", "pdf_size": 144259, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1513744098572390029&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "School of Engineering and Computer Science, Hebrew University of Jerusalem; School of Engineering and Computer Science, Hebrew University of Jerusalem", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Hebrew University of Jerusalem", "aff_unique_dep": "School of Engineering and Computer Science", "aff_unique_url": "https://www.huji.ac.il", "aff_unique_abbr": "HUJI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Jerusalem", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "id": "fb4fe8983d", "title": "An Application of Boosting to Graph Classification", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/56584778d5a8ab88d6393cc4cd11e090-Abstract.html", "author": "Taku Kudo; Eisaku Maeda; Yuji Matsumoto", "abstract": "This paper presents an application of Boosting for classifying labeled graphs, general structures for modeling a number of real-world data, such as chemical compounds, natural language texts, and bio sequences. The proposal consists of i) decision stumps that use subgraph as features, and ii) a Boosting algorithm in which subgraph-based decision stumps are used as weak learners. We also discuss the relation between our al- gorithm and SVMs with convolution kernels. Two experiments using natural language data and chemical compounds show that our method achieves comparable or even better performance than SVMs with convo- lution kernels as well as improves the testing efficiency.", "bibtex": "@inproceedings{NIPS2004_56584778,\n author = {Kudo, Taku and Maeda, Eisaku and Matsumoto, Yuji},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {An Application of Boosting to Graph Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/56584778d5a8ab88d6393cc4cd11e090-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/56584778d5a8ab88d6393cc4cd11e090-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/56584778d5a8ab88d6393cc4cd11e090-Metadata.json", "review": "", "metareview": "", "pdf_size": 246053, "gs_citation": 247, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15247327867329026550&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "NTT Communication Science Laboratories; NTT Communication Science Laboratories; Nara Institute of Science and Technology", "aff_domain": "cslab.kecl.ntt.co.jp;cslab.kecl.ntt.co.jp;is.naist.jp", "email": "cslab.kecl.ntt.co.jp;cslab.kecl.ntt.co.jp;is.naist.jp", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "NTT Communication Science Laboratories;Nara Institute of Science and Technology", "aff_unique_dep": ";", "aff_unique_url": "https://www.ntt-csl.com;https://www.nist.go.jp", "aff_unique_abbr": "NTT CSL;NIST", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Japan" }, { "id": "b464e44ba3", "title": "An Auditory Paradigm for Brain-Computer Interfaces", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/921c2dc40d0b979c2910298d2f880152-Abstract.html", "author": "N. J. Hill; Thomas N. Lal; Karin Bierig; Niels Birbaumer; Bernhard Sch\u00f6lkopf", "abstract": "Motivated by the particular problems involved in communicating with \"locked-in\" paralysed patients, we aim to develop a brain- computer interface that uses auditory stimuli. We describe a paradigm that allows a user to make a binary decision by focusing attention on one of two concurrent auditory stimulus sequences. Using Support Vector Machine classification and Recursive Chan- nel Elimination on the independent components of averaged event- related potentials, we show that an untrained user's EEG data can be classified with an encouragingly high level of accuracy. This suggests that it is possible for users to modulate EEG signals in a single trial by the conscious direction of attention, well enough to be useful in BCI.", "bibtex": "@inproceedings{NIPS2004_921c2dc4,\n author = {Hill, N. and Lal, Thomas and Bierig, Karin and Birbaumer, Niels and Sch\\\"{o}lkopf, Bernhard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {An Auditory Paradigm for Brain-Computer Interfaces},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/921c2dc40d0b979c2910298d2f880152-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/921c2dc40d0b979c2910298d2f880152-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/921c2dc40d0b979c2910298d2f880152-Metadata.json", "review": "", "metareview": "", "pdf_size": 132856, "gs_citation": 149, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=332647847121413235&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics; Institute for Medical Psychology and Behavioural Neurobiology, University of T\u00a8ubingen; Max Planck Institute for Biological Cybernetics", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;uni-tuebingen.de;tuebingen.mpg.de", "email": "tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;uni-tuebingen.de;tuebingen.mpg.de", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;1;0", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics;University of T\u00fcbingen", "aff_unique_dep": "Biological Cybernetics;Institute for Medical Psychology and Behavioural Neurobiology", "aff_unique_url": "https://www.biocybernetics.mpg.de;https://www.uni-tuebingen.de", "aff_unique_abbr": "MPIBC;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "Germany" }, { "id": "bb7fca434d", "title": "An Information Maximization Model of Eye Movements", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/1b9812b99fe2672af746cefda86be5f9-Abstract.html", "author": "Laura W. Renninger; James M. Coughlan; Preeti Verghese; Jitendra Malik", "abstract": "We propose a sequential information maximization model as a general strategy for programming eye movements. The model reconstructs high-resolution visual information from a sequence of fixations, taking into account the fall-off in resolution from the fovea to the periphery. From this framework we get a simple rule for predicting fixation sequences: after each fixation, fixate next at the location that minimizes uncertainty (maximizes information) about the stimulus. By comparing our model performance to human eye movement data and to predictions from a saliency and random model, we demonstrate that our model is best at predicting fixation locations. Modeling additional biological constraints will improve the prediction of fixation sequences. Our results suggest that information maximization is a useful principle for programming eye movements.", "bibtex": "@inproceedings{NIPS2004_1b9812b9,\n author = {Renninger, Laura and Coughlan, James and Verghese, Preeti and Malik, Jitendra},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {An Information Maximization Model of Eye Movements},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/1b9812b99fe2672af746cefda86be5f9-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/1b9812b99fe2672af746cefda86be5f9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/1b9812b99fe2672af746cefda86be5f9-Metadata.json", "review": "", "metareview": "", "pdf_size": 293507, "gs_citation": 156, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15877957422374876108&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 19, "aff": "Smith-Kettlewell Eye Research Institute; Smith-Kettlewell Eye Research Institute; Smith-Kettlewell Eye Research Institute; University of California, Berkeley", "aff_domain": "ski.org;ski.org;ski.org;eecs.berkeley.edu", "email": "ski.org;ski.org;ski.org;eecs.berkeley.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "Smith-Kettlewell Eye Research Institute;University of California, Berkeley", "aff_unique_dep": ";", "aff_unique_url": "https://www.ski.org;https://www.berkeley.edu", "aff_unique_abbr": "SKI;UC Berkeley", "aff_campus_unique_index": "1", "aff_campus_unique": ";Berkeley", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "26f3206627", "title": "An Investigation of Practical Approximate Nearest Neighbor Algorithms", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/1102a326d5f7c9e04fc3c89d0ede88c9-Abstract.html", "author": "Ting Liu; Andrew W. Moore; Ke Yang; Alexander G. Gray", "abstract": "This paper concerns approximate nearest neighbor searching algorithms, which have become increasingly important, especially in high dimen- sional perception areas such as computer vision, with dozens of publica- tions in recent years. Much of this enthusiasm is due to a successful new approximate nearest neighbor approach called Locality Sensitive Hash- ing (LSH). In this paper we ask the question: can earlier spatial data structure approaches to exact nearest neighbor, such as metric trees, be altered to provide approximate answers to proximity queries and if so, how? We introduce a new kind of metric tree that allows overlap: certain datapoints may appear in both the children of a parent. We also intro- duce new approximate k-NN search algorithms on this structure. We show why these structures should be able to exploit the same random- projection-based approximations that LSH enjoys, but with a simpler al- gorithm and perhaps with greater efficiency. We then provide a detailed empirical evaluation on five large, high dimensional datasets which show up to 31-fold accelerations over LSH. This result holds true throughout the spectrum of approximation levels.", "bibtex": "@inproceedings{NIPS2004_1102a326,\n author = {Liu, Ting and Moore, Andrew and Yang, Ke and Gray, Alexander},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {An Investigation of Practical Approximate Nearest Neighbor Algorithms},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/1102a326d5f7c9e04fc3c89d0ede88c9-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/1102a326d5f7c9e04fc3c89d0ede88c9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/1102a326d5f7c9e04fc3c89d0ede88c9-Metadata.json", "review": "", "metareview": "", "pdf_size": 99627, "gs_citation": 638, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17377384765419801604&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "School of Computer Science, Carnegie-Mellon University; School of Computer Science, Carnegie-Mellon University; School of Computer Science, Carnegie-Mellon University; School of Computer Science, Carnegie-Mellon University", "aff_domain": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "email": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "School of Computer Science", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "3229e0a714", "title": "Analysis of a greedy active learning strategy", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/c61fbef63df5ff317aecdc3670094472-Abstract.html", "author": "Sanjoy Dasgupta", "abstract": "We abstract out the core search problem of active learning schemes, to better understand the extent to which adaptive labeling can improve sam- ple complexity. We give various upper and lower bounds on the number of labels which need to be queried, and we prove that a popular greedy active learning rule is approximately as good as any other strategy for minimizing this number of labels.", "bibtex": "@inproceedings{NIPS2004_c61fbef6,\n author = {Dasgupta, Sanjoy},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Analysis of a greedy active learning strategy},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/c61fbef63df5ff317aecdc3670094472-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/c61fbef63df5ff317aecdc3670094472-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/c61fbef63df5ff317aecdc3670094472-Metadata.json", "review": "", "metareview": "", "pdf_size": 82199, "gs_citation": 424, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5113272675781942630&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "University of California, San Diego", "aff_domain": "cs.ucsd.edu", "email": "cs.ucsd.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "", "aff_unique_url": "https://www.ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0", "aff_campus_unique": "San Diego", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "70d3709896", "title": "Approximately Efficient Online Mechanism Design", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/fc03d48253286a798f5116ec00e99b2b-Abstract.html", "author": "David C. Parkes; Dimah Yanovsky; Satinder P. Singh", "abstract": "Online mechanism design (OMD) addresses the problem of sequential decision making in a stochastic environment with multiple self-interested agents. The goal in OMD is to make value-maximizing decisions despite this self-interest. In previous work we presented a Markov decision pro- cess (MDP)-based approach to OMD in large-scale problem domains. In practice the underlying MDP needed to solve OMD is too large and hence the mechanism must consider approximations. This raises the pos- sibility that agents may be able to exploit the approximation for selfish gain. We adopt sparse-sampling-based MDP algorithms to implement - efficient policies, and retain truth-revelation as an approximate Bayesian- Nash equilibrium. Our approach is empirically illustrated in the context of the dynamic allocation of WiFi connectivity to users in a coffeehouse.", "bibtex": "@inproceedings{NIPS2004_fc03d482,\n author = {Parkes, David C and Yanovsky, Dimah and Singh, Satinder},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Approximately Efficient Online Mechanism Design},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/fc03d48253286a798f5116ec00e99b2b-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/fc03d48253286a798f5116ec00e99b2b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/fc03d48253286a798f5116ec00e99b2b-Metadata.json", "review": "", "metareview": "", "pdf_size": 86030, "gs_citation": 61, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4077555252942500878&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "DEAS, Maxwell-Dworkin, Harvard University; Comp. Science and Engin., University of Michigan; Harvard College", "aff_domain": "eecs.harvard.edu;umich.edu;fas.harvard.edu", "email": "eecs.harvard.edu;umich.edu;fas.harvard.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "Harvard University;University of Michigan", "aff_unique_dep": "Division of Engineering and Applied Sciences;Department of Computer Science and Engineering", "aff_unique_url": "https://www.harvard.edu;https://www.umich.edu", "aff_unique_abbr": "Harvard;UM", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Cambridge;Ann Arbor", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "ba656632f2", "title": "Assignment of Multiplicative Mixtures in Natural Images", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/880610aa9f9de9ea7c545169c716f477-Abstract.html", "author": "Odelia Schwartz; Terrence J. Sejnowski; Peter Dayan", "abstract": "In the analysis of natural images, Gaussian scale mixtures (GSM) have been used to account for the statistics of (cid:2)lter responses, and to inspire hi- erarchical cortical representational learning schemes. GSMs pose a crit- ical assignment problem, working out which (cid:2)lter responses were gen- erated by a common multiplicative factor. We present a new approach to solving this assignment problem through a probabilistic extension to the basic GSM, and show how to perform inference in the model using Gibbs sampling. We demonstrate the ef(cid:2)cacy of the approach on both synthetic and image data.", "bibtex": "@inproceedings{NIPS2004_880610aa,\n author = {Schwartz, Odelia and Sejnowski, Terrence J and Dayan, Peter},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Assignment of Multiplicative Mixtures in Natural Images},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/880610aa9f9de9ea7c545169c716f477-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/880610aa9f9de9ea7c545169c716f477-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/880610aa9f9de9ea7c545169c716f477-Metadata.json", "review": "", "metareview": "", "pdf_size": 206147, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5500256906743127963&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "HHMI and Salk Institute, La Jolla, CA 92014; HHMI and Salk Institute, La Jolla, CA 92014; GCNU, UCL, 17 Queen Square, London", "aff_domain": "salk.edu;salk.edu;gatsby.ucl.ac.uk", "email": "salk.edu;salk.edu;gatsby.ucl.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Salk Institute;University College London", "aff_unique_dep": ";", "aff_unique_url": "https://www.salk.edu;https://www.ucl.ac.uk", "aff_unique_abbr": "Salk;UCL", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "La Jolla;London", "aff_country_unique_index": "0;0;1", "aff_country_unique": "United States;United Kingdom" }, { "id": "c93d96f514", "title": "At the Edge of Chaos: Real-time Computations and Self-Organized Criticality in Recurrent Neural Networks", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/f8da71e562ff44a2bc7edf3578c593da-Abstract.html", "author": "Nils Bertschinger; Thomas Natschl\u00e4ger; Robert A. Legenstein", "abstract": "In this paper we analyze the relationship between the computational ca- pabilities of randomly connected networks of threshold gates in the time- series domain and their dynamical properties. In particular we propose a complexity measure which we find to assume its highest values near the edge of chaos, i.e. the transition from ordered to chaotic dynamics. Furthermore we show that the proposed complexity measure predicts the computational capabilities very well: only near the edge of chaos are such networks able to perform complex computations on time series. Ad- ditionally a simple synaptic scaling rule for self-organized criticality is presented and analyzed.", "bibtex": "@inproceedings{NIPS2004_f8da71e5,\n author = {Bertschinger, Nils and Natschl\\\"{a}ger, Thomas and Legenstein, Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {At the Edge of Chaos: Real-time Computations and Self-Organized Criticality in Recurrent Neural Networks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/f8da71e562ff44a2bc7edf3578c593da-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/f8da71e562ff44a2bc7edf3578c593da-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/f8da71e562ff44a2bc7edf3578c593da-Metadata.json", "review": "", "metareview": "", "pdf_size": 761413, "gs_citation": 1035, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3454489914770678259&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Software Competence Center Hagenberg; Max Planck Institute for Mathematics in the Sciences; Institute for Theoretical Computer Science, TU Graz", "aff_domain": "scch.at;mis.mpg.de;igi.tu-graz.ac.at", "email": "scch.at;mis.mpg.de;igi.tu-graz.ac.at", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "Software Competence Center Hagenberg;Max Planck Institute for Mathematics in the Sciences;Graz University of Technology", "aff_unique_dep": ";Mathematics;Institute for Theoretical Computer Science", "aff_unique_url": "https://www.scc-hagenberg.at;https://www.mis.mpg.de;https://www.tugraz.at", "aff_unique_abbr": ";MPI MIS;TU Graz", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Austria;Germany" }, { "id": "9c962cee5f", "title": "Bayesian Regularization and Nonnegative Deconvolution for Time Delay Estimation", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/de594ef5c314372edec29b93cab9d72e-Abstract.html", "author": "Yuanqing Lin; Daniel D. Lee", "abstract": "Bayesian Regularization and Nonnegative Deconvolution (BRAND) is proposed for estimating time delays of acoustic signals in reverberant environments. Sparsity of the nonnegative filter coefficients is enforced using an L1-norm regularization. A probabilistic generative model is used to simultaneously estimate the regularization parameters and filter coefficients from the signal data. Iterative update rules are derived under a Bayesian framework using the Expectation-Maximization procedure. The resulting time delay estimation algorithm is demonstrated on noisy acoustic data.", "bibtex": "@inproceedings{NIPS2004_de594ef5,\n author = {Lin, Yuanqing and Lee, Daniel},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Bayesian Regularization and Nonnegative Deconvolution for Time Delay Estimation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/de594ef5c314372edec29b93cab9d72e-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/de594ef5c314372edec29b93cab9d72e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/de594ef5c314372edec29b93cab9d72e-Metadata.json", "review": "", "metareview": "", "pdf_size": 154944, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2023898222565168166&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "GRASP Laboratory, Department of Electrical and System Engineering, University of Pennsylvania, Philadelphia, PA 19104; GRASP Laboratory, Department of Electrical and System Engineering, University of Pennsylvania, Philadelphia, PA 19104", "aff_domain": "seas.upenn.edu;seas.upenn.edu", "email": "seas.upenn.edu;seas.upenn.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Pennsylvania", "aff_unique_dep": "Department of Electrical and System Engineering", "aff_unique_url": "https://www.upenn.edu", "aff_unique_abbr": "UPenn", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Philadelphia", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "6b369ee1ac", "title": "Bayesian inference in spiking neurons", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/cdd96eedd7f695f4d61802f8105ba2b0-Abstract.html", "author": "Sophie Deneve", "abstract": "We propose a new interpretation of spiking neurons as Bayesian integra- tors accumulating evidence over time about events in the external world or the body, and communicating to other neurons their certainties about these events. In this model, spikes signal the occurrence of new infor- mation, i.e. what cannot be predicted from the past activity. As a result, firing statistics are close to Poisson, albeit providing a deterministic rep- resentation of probabilities. We proceed to develop a theory of Bayesian inference in spiking neural networks, recurrent interactions implement- ing a variant of belief propagation.", "bibtex": "@inproceedings{NIPS2004_cdd96eed,\n author = {Deneve, Sophie},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Bayesian inference in spiking neurons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/cdd96eedd7f695f4d61802f8105ba2b0-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/cdd96eedd7f695f4d61802f8105ba2b0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/cdd96eedd7f695f4d61802f8105ba2b0-Metadata.json", "review": "", "metareview": "", "pdf_size": 245575, "gs_citation": 111, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12838063750191855244&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Gatsby Computational Neuroscience Unit, University College London, London, UK WC1N 3AR + Institute of Cognitive Science, 69645 Bron, France", "aff_domain": "gatsby.ucl.ac.uk", "email": "gatsby.ucl.ac.uk", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0+1", "aff_unique_norm": "University College London;Institute of Cognitive Science", "aff_unique_dep": "Gatsby Computational Neuroscience Unit;Cognitive Science", "aff_unique_url": "https://www.ucl.ac.uk;", "aff_unique_abbr": "UCL;", "aff_campus_unique_index": "0", "aff_campus_unique": "London;", "aff_country_unique_index": "0+1", "aff_country_unique": "United Kingdom;France" }, { "id": "54a4d73a48", "title": "Beat Tracking the Graphical Model Way", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/531db99cb00833bcd414459069dc7387-Abstract.html", "author": "Dustin Lang; Nando D. Freitas", "abstract": "We present a graphical model for beat tracking in recorded music. Using a probabilistic graphical model allows us to incorporate local information and global smoothness constraints in a principled manner. We evaluate our model on a set of varied and difficult examples, and achieve impres- sive results. By using a fast dual-tree algorithm for graphical model in- ference, our system runs in less time than the duration of the music being processed.", "bibtex": "@inproceedings{NIPS2004_531db99c,\n author = {Lang, Dustin and Freitas, Nando},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Beat Tracking the Graphical Model Way},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/531db99cb00833bcd414459069dc7387-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/531db99cb00833bcd414459069dc7387-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/531db99cb00833bcd414459069dc7387-Metadata.json", "review": "", "metareview": "", "pdf_size": 199523, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2291399750663405595&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Computer Science, University of British Columbia, Vancouver, BC; Department of Computer Science, University of British Columbia, Vancouver, BC", "aff_domain": "cs.ubc.ca;cs.ubc.ca", "email": "cs.ubc.ca;cs.ubc.ca", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of British Columbia", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ubc.ca", "aff_unique_abbr": "UBC", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Vancouver", "aff_country_unique_index": "0;0", "aff_country_unique": "Canada" }, { "id": "3b7ef8b0b8", "title": "Binet-Cauchy Kernels", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/058d6f2fbe951a5a56d96b1f1a6bca1c-Abstract.html", "author": "Alex J. Smola; S.v.n. Vishwanathan", "abstract": "We propose a family of kernels based on the Binet-Cauchy theorem and its ex- tension to Fredholm operators. This includes as special cases all currently known kernels derived from the behavioral framework, diffusion processes, marginalized kernels, kernels on graphs, and the kernels on sets arising from the subspace angle approach. Many of these kernels can be seen as the extrema of a new continuum of kernel functions, which leads to numerous new special cases. As an application, we apply the new class of kernels to the problem of clustering of video sequences with encouraging results.", "bibtex": "@inproceedings{NIPS2004_058d6f2f,\n author = {Smola, Alex and Vishwanathan, S.v.n.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Binet-Cauchy Kernels},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/058d6f2fbe951a5a56d96b1f1a6bca1c-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/058d6f2fbe951a5a56d96b1f1a6bca1c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/058d6f2fbe951a5a56d96b1f1a6bca1c-Metadata.json", "review": "", "metareview": "", "pdf_size": 116078, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6207199025376146390&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "National ICT Australia, Machine Learning Program, Canberra, ACT 0200, Australia; National ICT Australia, Machine Learning Program, Canberra, ACT 0200, Australia", "aff_domain": "nicta.com.au;nicta.com.au", "email": "nicta.com.au;nicta.com.au", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "National ICT Australia", "aff_unique_dep": "Machine Learning Program", "aff_unique_url": "https://www.nicta.com.au", "aff_unique_abbr": "NICTA", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Canberra", "aff_country_unique_index": "0;0", "aff_country_unique": "Australia" }, { "id": "096e33f393", "title": "Blind One-microphone Speech Separation: A Spectral Learning Approach", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/c0e90532fb42ac6de18e25e95db73047-Abstract.html", "author": "Francis R. Bach; Michael I. Jordan", "abstract": "We present an algorithm to perform blind, one-microphone speech sep- aration. Our algorithm separates mixtures of speech without modeling individual speakers. Instead, we formulate the problem of speech sep- aration as a problem in segmenting the spectrogram of the signal into two or more disjoint sets. We build feature sets for our segmenter using classical cues from speech psychophysics. We then combine these fea- tures into parameterized affinity matrices. We also take advantage of the fact that we can generate training examples for segmentation by artifi- cially superposing separately-recorded signals. Thus the parameters of the affinity matrices can be tuned using recent work on learning spectral clustering [1]. This yields an adaptive, speech-specific segmentation al- gorithm that can successfully separate one-microphone speech mixtures.", "bibtex": "@inproceedings{NIPS2004_c0e90532,\n author = {Bach, Francis and Jordan, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Blind One-microphone Speech Separation: A Spectral Learning Approach},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/c0e90532fb42ac6de18e25e95db73047-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/c0e90532fb42ac6de18e25e95db73047-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/c0e90532fb42ac6de18e25e95db73047-Metadata.json", "review": "", "metareview": "", "pdf_size": 132003, "gs_citation": 144, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6194885216502065463&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Computer Science, University of California, Berkeley, CA 94720; Computer Science and Statistics, University of California, Berkeley, CA 94720", "aff_domain": "cs.berkeley.edu;cs.berkeley.edu", "email": "cs.berkeley.edu;cs.berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Computer Science", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "78efd918ad", "title": "Boosting on Manifolds: Adaptive Regularization of Base Classifiers", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/6aed000af86a084f9cb0264161e29dd3-Abstract.html", "author": "Ligen Wang; Bal\u00e1zs K\u00e9gl", "abstract": "In this paper we propose to combine two powerful ideas, boosting and manifold learning. On the one hand, we improve ADABOOST by incor- porating knowledge on the structure of the data into base classifier design and selection. On the other hand, we use ADABOOST's efficient learn- ing mechanism to significantly improve supervised and semi-supervised algorithms proposed in the context of manifold learning. Beside the spe- cific manifold-based penalization, the resulting algorithm also accommo- dates the boosting of a large family of regularized learning algorithms.", "bibtex": "@inproceedings{NIPS2004_6aed000a,\n author = {Wang, Ligen and K\\'{e}gl, Bal\\'{a}zs},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Boosting on Manifolds: Adaptive Regularization of Base Classifiers},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/6aed000af86a084f9cb0264161e29dd3-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/6aed000af86a084f9cb0264161e29dd3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/6aed000af86a084f9cb0264161e29dd3-Metadata.json", "review": "", "metareview": "", "pdf_size": 174997, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8683688664073437024&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "2b2b3784ae", "title": "Brain Inspired Reinforcement Learning", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/d37b3ca37106b2bfdeaa12647e3bb1c9-Abstract.html", "author": "Fran\u00e7cois Rivest; Yoshua Bengio; John Kalaska", "abstract": "Successful application of reinforcement learning algorithms often involves considerable hand-crafting of the necessary non-linear features to reduce the complexity of the value functions and hence to promote convergence of the algorithm. In contrast, the human brain readily and autonomously finds the complex features when provided with sufficient training. Recent work in machine learning and neurophysiology has demonstrated the role of the basal ganglia and the frontal cortex in mammalian reinforcement learning. This paper develops and explores new learning algorithms that provides potential new approaches to the feature construction problem. The algorithms are compared and evaluated on the Acrobot task.", "bibtex": "@inproceedings{NIPS2004_d37b3ca3,\n author = {Rivest, Fran\\c{c}cois and Bengio, Yoshua and Kalaska, John},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Brain Inspired Reinforcement Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/d37b3ca37106b2bfdeaa12647e3bb1c9-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/d37b3ca37106b2bfdeaa12647e3bb1c9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/d37b3ca37106b2bfdeaa12647e3bb1c9-Metadata.json", "review": "", "metareview": "", "pdf_size": 223646, "gs_citation": 34, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=873151280750153788&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "D\u00e9partement d\u2019informatique et de recherche op\u00e9rationnelle, Universit\u00e9 de Montr\u00e9al; D\u00e9partement d\u2019informatique et de recherche op\u00e9rationnelle, Universit\u00e9 de Montr\u00e9al; D\u00e9partement de physiologie, Universit\u00e9 de Montr\u00e9al", "aff_domain": "mail.mcgill.ca;iro.umontreal.ca;physio.umontreal.ca", "email": "mail.mcgill.ca;iro.umontreal.ca;physio.umontreal.ca", "github": "", "project": "http://www.iro.umontreal.ca/~rivestfr", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Universit\u00e9 de Montr\u00e9al", "aff_unique_dep": "D\u00e9partement d\u2019informatique et de recherche op\u00e9rationnelle", "aff_unique_url": "https://www.umontreal.ca", "aff_unique_abbr": "UdeM", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Montr\u00e9al", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Canada" }, { "id": "ac82540947", "title": "Breaking SVM Complexity with Cross-Training", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/dbbf603ff0e99629dda5d75b6f75f966-Abstract.html", "author": "L\u00e9on Bottou; Jason Weston; G\u00f6khan H. Bakir", "abstract": "We propose to selectively remove examples from the training set using probabilistic estimates related to editing algorithms (Devijver and Kittler, 1982). This heuristic procedure aims at creating a separable distribution of training examples with minimal impact on the position of the decision boundary. It breaks the linear dependency between the number of SVs and the number of training examples, and sharply reduces the complexity of SVMs during both the training and prediction stages.", "bibtex": "@inproceedings{NIPS2004_dbbf603f,\n author = {Bottou, L\\'{e}on and Weston, Jason and Bakir, G\\\"{o}khan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Breaking SVM Complexity with Cross-Training},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/dbbf603ff0e99629dda5d75b6f75f966-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/dbbf603ff0e99629dda5d75b6f75f966-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/dbbf603ff0e99629dda5d75b6f75f966-Metadata.json", "review": "", "metareview": "", "pdf_size": 224815, "gs_citation": 109, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4545924136242166646&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Max Planck Institute for Biological Cybernetics, T\u00a8ubingen, Germany; NEC Labs America, Princeton NJ, USA; NEC Labs America, Princeton NJ, USA", "aff_domain": "tuebingen.mpg.de;bottou.org;nec-labs.com", "email": "tuebingen.mpg.de;bottou.org;nec-labs.com", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics;NEC Labs America", "aff_unique_dep": ";", "aff_unique_url": "https://www.biocybernetics.mpg.de;https://www.nec-labs.com", "aff_unique_abbr": "MPIBC;NEC LA", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "T\u00fcbingen;Princeton", "aff_country_unique_index": "0;1;1", "aff_country_unique": "Germany;United States" }, { "id": "e03b661878", "title": "Chemosensory Processing in a Spiking Model of the Olfactory Bulb: Chemotopic Convergence and Center Surround Inhibition", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/7e83722522e8aeb7512b7075311316b7-Abstract.html", "author": "Baranidharan Raman; Ricardo Gutierrez-osuna", "abstract": "This paper presents a neuromorphic model of two olfactory signal- processing primitives: chemotopic convergence of olfactory receptor neurons, and center on-off surround lateral inhibition in the olfactory bulb. A self-organizing model of receptor convergence onto glomeruli is used to generate a spatially organized map, an olfactory image. This map serves as input to a lattice of spiking neurons with lateral connections. The dynamics of this recurrent network transforms the initial olfactory image into a spatio-temporal pattern that evolves and stabilizes into odor- and intensity-coding attractors. The model is validated using experimental data from an array of temperature-modulated gas sensors. Our results are consistent with recent neurobiological findings on the antennal lobe of the honeybee and the locust.", "bibtex": "@inproceedings{NIPS2004_7e837225,\n author = {Raman, Baranidharan and Gutierrez-osuna, Ricardo},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Chemosensory Processing in a Spiking Model of the Olfactory Bulb: Chemotopic Convergence and Center Surround Inhibition},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/7e83722522e8aeb7512b7075311316b7-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/7e83722522e8aeb7512b7075311316b7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/7e83722522e8aeb7512b7075311316b7-Metadata.json", "review": "", "metareview": "", "pdf_size": 845969, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10139709142025612546&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Department of Computer Science Texas A&M University; Department of Computer Science Texas A&M University", "aff_domain": "cs.tamu.edu;cs.tamu.edu", "email": "cs.tamu.edu;cs.tamu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Texas A&M University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.tamu.edu", "aff_unique_abbr": "TAMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "2fc116779e", "title": "Class-size Independent Generalization Analsysis of Some Discriminative Multi-Category Classification", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/b096577e264d1ebd6b41041f392eec23-Abstract.html", "author": "Tong Zhang", "abstract": "We consider the problem of deriving class-size independent generaliza- tion bounds for some regularized discriminative multi-category classi- fication methods. In particular, we obtain an expected generalization bound for a standard formulation of multi-category support vector ma- chines. Based on the theoretical result, we argue that the formula- tion over-penalizes misclassification error, which in theory may lead to poor generalization performance. A remedy, based on a generalization of multi-category logistic regression (conditional maximum entropy), is then proposed, and its theoretical properties are examined.", "bibtex": "@inproceedings{NIPS2004_b096577e,\n author = {Zhang, Tong},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Class-size Independent Generalization Analsysis of Some Discriminative Multi-Category Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/b096577e264d1ebd6b41041f392eec23-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/b096577e264d1ebd6b41041f392eec23-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/b096577e264d1ebd6b41041f392eec23-Metadata.json", "review": "", "metareview": "", "pdf_size": 143925, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16562452627114876085&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "IBM T.J. Watson Research Center", "aff_domain": "watson.ibm.com", "email": "watson.ibm.com", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "IBM", "aff_unique_dep": "Research Center", "aff_unique_url": "https://www.ibm.com/research/watson", "aff_unique_abbr": "IBM", "aff_campus_unique_index": "0", "aff_campus_unique": "T.J. Watson", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "5795068090", "title": "Co-Training and Expansion: Towards Bridging Theory and Practice", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/9457fc28ceb408103e13533e4a5b6bd1-Abstract.html", "author": "Maria-florina Balcan; Avrim Blum; Ke Yang", "abstract": "Ke Yang", "bibtex": "@inproceedings{NIPS2004_9457fc28,\n author = {Balcan, Maria-florina and Blum, Avrim and Yang, Ke},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Co-Training and Expansion: Towards Bridging Theory and Practice},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/9457fc28ceb408103e13533e4a5b6bd1-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/9457fc28ceb408103e13533e4a5b6bd1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/9457fc28ceb408103e13533e4a5b6bd1-Metadata.json", "review": "", "metareview": "", "pdf_size": 105569, "gs_citation": 405, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2349327494698924665&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 18, "aff": "Computer Science Dept., Carnegie Mellon Univ., Pittsburgh, PA 15213; Computer Science Dept., Carnegie Mellon Univ., Pittsburgh, PA 15213; Computer Science Dept., Carnegie Mellon Univ., Pittsburgh, PA 15213", "aff_domain": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "email": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "eb32255155", "title": "Co-Validation: Using Model Disagreement on Unlabeled Data to Validate Classification Algorithms", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/92f54963fc39a9d87c2253186808ea61-Abstract.html", "author": "Omid Madani; David M. Pennock; Gary W. Flake", "abstract": "In the context of binary classification, we define disagreement as a mea- sure of how often two independently-trained models differ in their clas- sification of unlabeled data. We explore the use of disagreement for error estimation and model selection. We call the procedure co-validation, since the two models effectively (in)validate one another by comparing results on unlabeled data, which we assume is relatively cheap and plen- tiful compared to labeled data. We show that per-instance disagreement is an unbiased estimate of the variance of error for that instance. We also show that disagreement provides a lower bound on the prediction (gen- eralization) error, and a tight upper bound on the \"variance of prediction error\", or the variance of the average error across instances, where vari- ance is measured across training sets. We present experimental results on several data sets exploring co-validation for error estimation and model selection. The procedure is especially effective in active learning set- tings, where training sets are not drawn at random and cross validation overestimates error.", "bibtex": "@inproceedings{NIPS2004_92f54963,\n author = {Madani, Omid and Pennock, David and Flake, Gary},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Co-Validation: Using Model Disagreement on Unlabeled Data to Validate Classification Algorithms},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/92f54963fc39a9d87c2253186808ea61-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/92f54963fc39a9d87c2253186808ea61-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/92f54963fc39a9d87c2253186808ea61-Metadata.json", "review": "", "metareview": "", "pdf_size": 85946, "gs_citation": 63, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2039585297170748584&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Yahoo! Research Labs; Yahoo! Research Labs; Yahoo! Research Labs", "aff_domain": "yahoo-inc.com;yahoo-inc.com;yahoo-inc.com", "email": "yahoo-inc.com;yahoo-inc.com;yahoo-inc.com", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Yahoo!", "aff_unique_dep": "Research Labs", "aff_unique_url": "https://research.yahoo.com", "aff_unique_abbr": "Yahoo! Research", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "cdd9b2e9d5", "title": "Coarticulation in Markov Decision Processes", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/ed57844fa5e051809ead5aa7e3e1d555-Abstract.html", "author": "Khashayar Rohanimanesh; Robert Platt; Sridhar Mahadevan; Roderic Grupen", "abstract": "We investigate an approach for simultaneously committing to mul- tiple activities, each modeled as a temporally extended action in a semi-Markov decision process (SMDP). For each activity we de- fine a set of admissible solutions consisting of the redundant set of optimal policies, and those policies that ascend the optimal state- value function associated with them. A plan is then generated by merging them in such a way that the solutions to the subordinate activities are realized in the set of admissible solutions satisfying the superior activities. We present our theoretical results and em- pirically evaluate our approach in a simulated domain.", "bibtex": "@inproceedings{NIPS2004_ed57844f,\n author = {Rohanimanesh, Khashayar and Platt, Robert and Mahadevan, Sridhar and Grupen, Roderic},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Coarticulation in Markov Decision Processes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/ed57844fa5e051809ead5aa7e3e1d555-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/ed57844fa5e051809ead5aa7e3e1d555-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/ed57844fa5e051809ead5aa7e3e1d555-Metadata.json", "review": "", "metareview": "", "pdf_size": 153796, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13016590321837577136&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science, University of Massachusetts; Department of Computer Science, University of Massachusetts; Department of Computer Science, University of Massachusetts; Department of Computer Science, University of Massachusetts", "aff_domain": "cs.umass.edu;cs.umass.edu;cs.umass.edu;cs.umass.edu", "email": "cs.umass.edu;cs.umass.edu;cs.umass.edu;cs.umass.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Massachusetts", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.umass.edu", "aff_unique_abbr": "UMass", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "71a3f9018b", "title": "Common-Frame Model for Object Recognition", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/d37124c4c79f357cb02c655671a432fa-Abstract.html", "author": "Pierre Moreels; Pietro Perona", "abstract": "A generative probabilistic model for objects in images is presented. An object consists of a constellation of features. Feature appearance and pose are modeled probabilistically. Scene images are generated by draw- ing a set of objects from a given database, with random clutter sprinkled on the remaining image surface. Occlusion is allowed. We study the case where features from the same object share a common reference frame. Moreover, parameters for shape and appearance den- sities are shared across features. This is to be contrasted with previous work on probabilistic `constellation' models where features depend on each other, and each feature and model have different pose and appear- ance statistics [1, 2]. These two differences allow us to build models containing hundreds of features, as well as to train each model from a single example. Our model may also be thought of as a probabilistic revisitation of Lowe's model [3, 4]. We propose an efficient entropy-minimization inference algorithm that constructs the best interpretation of a scene as a collection of objects and clutter. We test our ideas with experiments on two image databases. We compare with Lowe's algorithm and demonstrate better performance, in particular in presence of large amounts of background clutter.", "bibtex": "@inproceedings{NIPS2004_d37124c4,\n author = {Moreels, Pierre and Perona, Pietro},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Common-Frame Model for Object Recognition},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/d37124c4c79f357cb02c655671a432fa-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/d37124c4c79f357cb02c655671a432fa-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/d37124c4c79f357cb02c655671a432fa-Metadata.json", "review": "", "metareview": "", "pdf_size": 321224, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8686546297788539924&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "California Insitute of Technology - Pasadena CA91125 - USA; California Insitute of Technology - Pasadena CA91125 - USA", "aff_domain": "vision.caltech.edu;vision.caltech.edu", "email": "vision.caltech.edu;vision.caltech.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "California Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://www.caltech.edu", "aff_unique_abbr": "Caltech", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pasadena", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "59f485f51a", "title": "Comparing Beliefs, Surveys, and Random Walks", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/fecf2c550171d3195c879d115440ae45-Abstract.html", "author": "Erik Aurell; Uri Gordon; Scott Kirkpatrick", "abstract": "Survey propagation is a powerful technique from statistical physics that has been applied to solve the 3-SAT problem both in principle and in practice. We give, using only probability arguments, a common deriva- tion of survey propagation, belief propagation and several interesting hy- brid methods. We then present numerical experiments which use WSAT (a widely used random-walk based SAT solver) to quantify the complex- ity of the 3-SAT formulae as a function of their parameters, both as ran- domly generated and after simplication, guided by survey propagation. Some properties of WSAT which have not previously been reported make it an ideal tool for this purpose its mean cost is proportional to the num- ber of variables in the formula (at a xed ratio of clauses to variables) in the easy-SAT regime and slightly beyond, and its behavior in the hard- SAT regime appears to reect the underlying structure of the solution space that has been predicted by replica symmetry-breaking arguments. An analysis of the tradeoffs between the various methods of search for satisfying assignments shows WSAT to be far more powerful than has been appreciated, and suggests some interesting new directions for prac- tical algorithm development.", "bibtex": "@inproceedings{NIPS2004_fecf2c55,\n author = {Aurell, Erik and Gordon, Uri and Kirkpatrick, Scott},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Comparing Beliefs, Surveys, and Random Walks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/fecf2c550171d3195c879d115440ae45-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/fecf2c550171d3195c879d115440ae45-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/fecf2c550171d3195c879d115440ae45-Metadata.json", "review": "", "metareview": "", "pdf_size": 88282, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7484294159721573850&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "SICS, Swedish Institute of Computer Science + Dept. of Physics, KTH \u2013 Royal Institute of Technology; School of Engineering and Computer Science, Hebrew University of Jerusalem; School of Engineering and Computer Science, Hebrew University of Jerusalem", "aff_domain": "sics.se;cs.huji.ac.il;cs.huji.ac.il", "email": "sics.se;cs.huji.ac.il;cs.huji.ac.il", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1;2;2", "aff_unique_norm": "Swedish Institute of Computer Science;KTH \u2013 Royal Institute of Technology;Hebrew University of Jerusalem", "aff_unique_dep": ";Dept. of Physics;School of Engineering and Computer Science", "aff_unique_url": "https://www.sics.se;https://www.kth.se;https://www.huji.ac.il", "aff_unique_abbr": "SICS;KTH;HUJI", "aff_campus_unique_index": ";1;1", "aff_campus_unique": ";Jerusalem", "aff_country_unique_index": "0+0;1;1", "aff_country_unique": "Sweden;Israel" }, { "id": "1eb2335fef", "title": "Computing regularization paths for learning multiple kernels", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/ce758408f6ef98d7c7a7b786eca7b3a8-Abstract.html", "author": "Francis R. Bach; Romain Thibaux; Michael I. Jordan", "abstract": "The problem of learning a sparse conic combination of kernel functions or kernel matrices for classification or regression can be achieved via the regularization by a block 1-norm [1]. In this paper, we present an al- gorithm that computes the entire regularization path for these problems. The path is obtained by using numerical continuation techniques, and involves a running time complexity that is a constant times the complex- ity of solving the problem for one value of the regularization parameter. Working in the setting of kernel linear regression and kernel logistic re- gression, we show empirically that the effect of the block 1-norm reg- ularization differs notably from the (non-block) 1-norm regularization commonly used for variable selection, and that the regularization path is of particular value in the block case.", "bibtex": "@inproceedings{NIPS2004_ce758408,\n author = {Bach, Francis and Thibaux, Romain and Jordan, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Computing regularization paths for learning multiple kernels},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/ce758408f6ef98d7c7a7b786eca7b3a8-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/ce758408f6ef98d7c7a7b786eca7b3a8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/ce758408f6ef98d7c7a7b786eca7b3a8-Metadata.json", "review": "", "metareview": "", "pdf_size": 107686, "gs_citation": 146, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2397065157847749678&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Computer Science, University of California, Berkeley, CA 94720; Computer Science, University of California, Berkeley, CA 94720; Computer Science and Statistics, University of California, Berkeley, CA 94720", "aff_domain": "cs.berkeley.edu;cs.berkeley.edu;cs.berkeley.edu", "email": "cs.berkeley.edu;cs.berkeley.edu;cs.berkeley.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Computer Science", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "09e85fb1bf", "title": "Conditional Models of Identity Uncertainty with Application to Noun Coreference", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/1680829293f2a8541efa2647a0290f88-Abstract.html", "author": "Andrew McCallum; Ben Wellner", "abstract": "Coreference analysis, also known as record linkage or identity uncer- tainty, is a difficult and important problem in natural language process- ing, databases, citation matching and many other tasks. This paper intro- duces several discriminative, conditional-probability models for coref- erence analysis, all examples of undirected graphical models. Unlike many historical approaches to coreference, the models presented here are relational--they do not assume that pairwise coreference decisions should be made independently from each other. Unlike other relational models of coreference that are generative, the conditional model here can incorporate a great variety of features of the input without having to be concerned about their dependencies--paralleling the advantages of con- ditional random fields over hidden Markov models. We present positive results on noun phrase coreference in two standard text data sets.", "bibtex": "@inproceedings{NIPS2004_16808292,\n author = {McCallum, Andrew and Wellner, Ben},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Conditional Models of Identity Uncertainty with Application to Noun Coreference},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/1680829293f2a8541efa2647a0290f88-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/1680829293f2a8541efa2647a0290f88-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/1680829293f2a8541efa2647a0290f88-Metadata.json", "review": "", "metareview": "", "pdf_size": 105233, "gs_citation": 323, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9870516205762738295&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Department of Computer Science, University of Massachusetts Amherst; The MITRE Corporation + Department of Computer Science, University of Massachusetts Amherst", "aff_domain": "cs.umass.edu;cs.umass.edu", "email": "cs.umass.edu;cs.umass.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1+0", "aff_unique_norm": "University of Massachusetts Amherst;MITRE Corporation", "aff_unique_dep": "Department of Computer Science;", "aff_unique_url": "https://www.umass.edu;https://www.mitre.org", "aff_unique_abbr": "UMass Amherst;MITRE", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Amherst;", "aff_country_unique_index": "0;0+0", "aff_country_unique": "United States" }, { "id": "ef35666657", "title": "Conditional Random Fields for Object Recognition", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/0c215f194276000be6a6df6528067151-Abstract.html", "author": "Ariadna Quattoni; Michael Collins; Trevor Darrell", "abstract": "We present a discriminative part-based approach for the recognition of object classes from unsegmented cluttered scenes. Objects are modeled as flexible constellations of parts conditioned on local observations found by an interest operator. For each object class the probability of a given assignment of parts to local features is modeled by a Conditional Ran- dom Field (CRF). We propose an extension of the CRF framework that incorporates hidden variables and combines class conditional CRFs into a unified framework for part-based object recognition. The parameters of the CRF are estimated in a maximum likelihood framework and recogni- tion proceeds by finding the most likely class under our model. The main advantage of the proposed CRF framework is that it allows us to relax the assumption of conditional independence of the observed data (i.e. local features) often used in generative approaches, an assumption that might be too restrictive for a considerable number of object classes.", "bibtex": "@inproceedings{NIPS2004_0c215f19,\n author = {Quattoni, Ariadna and Collins, Michael and Darrell, Trevor},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Conditional Random Fields for Object Recognition},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/0c215f194276000be6a6df6528067151-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/0c215f194276000be6a6df6528067151-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/0c215f194276000be6a6df6528067151-Metadata.json", "review": "", "metareview": "", "pdf_size": 194084, "gs_citation": 1143, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7777260979101125233&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 30, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "dcba59001c", "title": "Confidence Intervals for the Area Under the ROC Curve", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/a7789ef88d599b8df86bbee632b2994d-Abstract.html", "author": "Corinna Cortes; Mehryar Mohri", "abstract": "In many applications, good ranking is a highly desirable performance for a classifier. The criterion commonly used to measure the ranking quality of a classification algorithm is the area under the ROC curve (AUC). To report it properly, it is crucial to determine an interval of confidence for its value. This paper provides confidence intervals for the AUC based on a statistical and combinatorial analysis using only simple parameters such as the error rate and the number of positive and negative examples. The analysis is distribution-independent, it makes no assumption about the distribution of the scores of negative or positive examples. The results are of practical use and can be viewed as the equivalent for AUC of the standard confidence intervals given in the case of the error rate. They are compared with previous approaches in several standard classification tasks demonstrating the benefits of our analysis.", "bibtex": "@inproceedings{NIPS2004_a7789ef8,\n author = {Cortes, Corinna and Mohri, Mehryar},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Confidence Intervals for the Area Under the ROC Curve},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/a7789ef88d599b8df86bbee632b2994d-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/a7789ef88d599b8df86bbee632b2994d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/a7789ef88d599b8df86bbee632b2994d-Metadata.json", "review": "", "metareview": "", "pdf_size": 88699, "gs_citation": 271, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16960859504537703971&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Google Research; Courant Institute, NYU", "aff_domain": "google.com;cs.nyu.edu", "email": "google.com;cs.nyu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Google;New York University", "aff_unique_dep": "Google Research;Courant Institute", "aff_unique_url": "https://research.google;https://www.courant.nyu.edu", "aff_unique_abbr": "Google Research;NYU", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Mountain View;New York", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "746ab17341", "title": "Constraining a Bayesian Model of Human Visual Speed Perception", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/852c44ddce7e0c7e4c64d86147300831-Abstract.html", "author": "Alan Stocker; Eero P. Simoncelli", "abstract": "It has been demonstrated that basic aspects of human visual motion per- ception are qualitatively consistent with a Bayesian estimation frame- work, where the prior probability distribution on velocity favors slow speeds. Here, we present a refined probabilistic model that can account for the typical trial-to-trial variabilities observed in psychophysical speed perception experiments. We also show that data from such experiments can be used to constrain both the likelihood and prior functions of the model. Specifically, we measured matching speeds and thresholds in a two-alternative forced choice speed discrimination task. Parametric fits to the data reveal that the likelihood function is well approximated by a LogNormal distribution with a characteristic contrast-dependent vari- ance, and that the prior distribution on velocity exhibits significantly heavier tails than a Gaussian, and approximately follows a power-law function.", "bibtex": "@inproceedings{NIPS2004_852c44dd,\n author = {Stocker, Alan A and Simoncelli, Eero},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Constraining a Bayesian Model of Human Visual Speed Perception},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/852c44ddce7e0c7e4c64d86147300831-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/852c44ddce7e0c7e4c64d86147300831-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/852c44ddce7e0c7e4c64d86147300831-Metadata.json", "review": "", "metareview": "", "pdf_size": 521704, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4925267999516825359&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 19, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "2d06a62bc6", "title": "Contextual Models for Object Detection Using Boosted Random Fields", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/908a6f6a6c131a850ecb0e3f11b08189-Abstract.html", "author": "Antonio Torralba; Kevin P. Murphy; William T. Freeman", "abstract": "We seek to both detect and segment objects in images. To exploit both lo- cal image data as well as contextual information, we introduce Boosted Random Fields (BRFs), which uses Boosting to learn the graph struc- ture and local evidence of a conditional random field (CRF). The graph structure is learned by assembling graph fragments in an additive model. The connections between individual pixels are not very informative, but by using dense graphs, we can pool information from large regions of the image; dense models also support efficient inference. We show how contextual information from other objects can improve detection perfor- mance, both in terms of accuracy and speed, by using a computational cascade. We apply our system to detect stuff and things in office and street scenes. 1 Introduction", "bibtex": "@inproceedings{NIPS2004_908a6f6a,\n author = {Torralba, Antonio and Murphy, Kevin P and Freeman, William},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Contextual Models for Object Detection Using Boosted Random Fields},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/908a6f6a6c131a850ecb0e3f11b08189-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/908a6f6a6c131a850ecb0e3f11b08189-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/908a6f6a6c131a850ecb0e3f11b08189-Metadata.json", "review": "", "metareview": "", "pdf_size": 272126, "gs_citation": 490, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6788893579183333418&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 20, "aff": "MIT, CSAIL; UBC, CS; MIT, CSAIL", "aff_domain": "mit.edu;cs.ubc.edu;mit.edu", "email": "mit.edu;cs.ubc.edu;mit.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "Massachusetts Institute of Technology;University of British Columbia", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory;Department of Computer Science", "aff_unique_url": "https://www.csail.mit.edu;https://www.ubc.ca", "aff_unique_abbr": "MIT;UBC", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;Canada" }, { "id": "60579d43a2", "title": "Convergence and No-Regret in Multiagent Learning", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/88fee0421317424e4469f33a48f50cb0-Abstract.html", "author": "Michael Bowling", "abstract": "Learning in a multiagent system is a challenging problem due to two key factors. First, if other agents are simultaneously learning then the envi- ronment is no longer stationary, thus undermining convergence guaran- tees. Second, learning is often susceptible to deception, where the other agents may be able to exploit a learner's particular dynamics. In the worst case, this could result in poorer performance than if the agent was not learning at all. These challenges are identifiable in the two most com- mon evaluation criteria for multiagent learning algorithms: convergence and regret. Algorithms focusing on convergence or regret in isolation are numerous. In this paper, we seek to address both criteria in a single algorithm by introducing GIGA-WoLF, a learning algorithm for normal- form games. We prove the algorithm guarantees at most zero average regret, while demonstrating the algorithm converges in many situations of self-play. We prove convergence in a limited setting and give empir- ical results in a wider variety of situations. These results also suggest a third new learning criterion combining convergence and regret, which we call negative non-convergence regret (NNR).", "bibtex": "@inproceedings{NIPS2004_88fee042,\n author = {Bowling, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Convergence and No-Regret in Multiagent Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/88fee0421317424e4469f33a48f50cb0-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/88fee0421317424e4469f33a48f50cb0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/88fee0421317424e4469f33a48f50cb0-Metadata.json", "review": "", "metareview": "", "pdf_size": 98119, "gs_citation": 390, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16695297044639977347&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Computing Science, University of Alberta", "aff_domain": "cs.ualberta.ca", "email": "cs.ualberta.ca", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of Alberta", "aff_unique_dep": "Department of Computing Science", "aff_unique_url": "https://www.ualberta.ca", "aff_unique_abbr": "UAlberta", "aff_country_unique_index": "0", "aff_country_unique": "Canada" }, { "id": "9dbc4fbdf7", "title": "Density Level Detection is Classification", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/339a18def9898dd60a634b2ad8fbbd58-Abstract.html", "author": "Ingo Steinwart; Don Hush; Clint Scovel", "abstract": "We show that anomaly detection can be interpreted as a binary classifi- cation problem. Using this interpretation we propose a support vector machine (SVM) for anomaly detection. We then present some theoret- ical results which include consistency and learning rates. Finally, we experimentally compare our SVM with the standard one-class SVM.", "bibtex": "@inproceedings{NIPS2004_339a18de,\n author = {Steinwart, Ingo and Hush, Don and Scovel, Clint},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Density Level Detection is Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/339a18def9898dd60a634b2ad8fbbd58-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/339a18def9898dd60a634b2ad8fbbd58-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/339a18def9898dd60a634b2ad8fbbd58-Metadata.json", "review": "", "metareview": "", "pdf_size": 97149, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10414309155575650040&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Modeling, Algorithms and Informatics Group, CCS-3; Modeling, Algorithms and Informatics Group, CCS-3; Modeling, Algorithms and Informatics Group, CCS-3", "aff_domain": "lanl.gov;lanl.gov;lanl.gov", "email": "lanl.gov;lanl.gov;lanl.gov", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Los Alamos National Laboratory", "aff_unique_dep": "Modeling, Algorithms and Informatics Group", "aff_unique_url": "https://www.lanl.gov", "aff_unique_abbr": "LANL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "2612773431", "title": "Dependent Gaussian Processes", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/59eb5dd36914c29b299c84b7ddaf08ec-Abstract.html", "author": "Phillip Boyle; Marcus Frean", "abstract": "Gaussian processes are usually parameterised in terms of their covari- ance functions. However, this makes it difficult to deal with multiple outputs, because ensuring that the covariance matrix is positive definite is problematic. An alternative formulation is to treat Gaussian processes as white noise sources convolved with smoothing kernels, and to param- eterise the kernel instead. Using this, we extend Gaussian processes to handle multiple, coupled outputs.", "bibtex": "@inproceedings{NIPS2004_59eb5dd3,\n author = {Boyle, Phillip and Frean, Marcus},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Dependent Gaussian Processes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/59eb5dd36914c29b299c84b7ddaf08ec-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/59eb5dd36914c29b299c84b7ddaf08ec-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/59eb5dd36914c29b299c84b7ddaf08ec-Metadata.json", "review": "", "metareview": "", "pdf_size": 216871, "gs_citation": 407, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2366425289121721379&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "School of Mathematical and Computing Sciences, Victoria University of Wellington, Wellington, New Zealand; School of Mathematical and Computing Sciences, Victoria University of Wellington, Wellington, New Zealand", "aff_domain": "mcs.vuw.ac.nz;mcs.vuw.ac.nz", "email": "mcs.vuw.ac.nz;mcs.vuw.ac.nz", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Victoria University of Wellington", "aff_unique_dep": "School of Mathematical and Computing Sciences", "aff_unique_url": "https://www.victoria.ac.nz", "aff_unique_abbr": "VUW", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Wellington", "aff_country_unique_index": "0;0", "aff_country_unique": "New Zealand" }, { "id": "059551d97f", "title": "Detecting Significant Multidimensional Spatial Clusters", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/978fce5bcc4eccc88ad48ce3914124a2-Abstract.html", "author": "Daniel B. Neill; Andrew W. Moore; Francisco Pereira; Tom M. Mitchell", "abstract": "Assume a uniform, multidimensional grid of bivariate data, where each cell of the grid has a count ci and a baseline bi. Our goal is to find spatial regions (d-dimensional rectangles) where the ci are significantly higher than expected given bi. We focus on two applications: detection of clusters of disease cases from epidemiological data (emergency depart- ment visits, over-the-counter drug sales), and discovery of regions of in- creased brain activity corresponding to given cognitive tasks (from fMRI data). Each of these problems can be solved using a spatial scan statistic (Kulldorff, 1997), where we compute the maximum of a likelihood ratio statistic over all spatial regions, and find the significance of this region by randomization. However, computing the scan statistic for all spatial regions is generally computationally infeasible, so we introduce a novel fast spatial scan algorithm, generalizing the 2D scan algorithm of (Neill and Moore, 2004) to arbitrary dimensions. Our new multidimensional multiresolution algorithm allows us to find spatial clusters up to 1400x faster than the naive spatial scan, without any loss of accuracy.", "bibtex": "@inproceedings{NIPS2004_978fce5b,\n author = {Neill, Daniel and Moore, Andrew and Pereira, Francisco and Mitchell, Tom M},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Detecting Significant Multidimensional Spatial Clusters},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/978fce5bcc4eccc88ad48ce3914124a2-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/978fce5bcc4eccc88ad48ce3914124a2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/978fce5bcc4eccc88ad48ce3914124a2-Metadata.json", "review": "", "metareview": "", "pdf_size": 91197, "gs_citation": 76, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3811677871911026430&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "School of Computer Science, Carnegie Mellon University; School of Computer Science, Carnegie Mellon University; School of Computer Science, Carnegie Mellon University; School of Computer Science, Carnegie Mellon University", "aff_domain": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "email": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "School of Computer Science", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "0f8bc67501", "title": "Discrete profile alignment via constrained information bottleneck", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/1175defd049d3301e047ce50d93e9c7a-Abstract.html", "author": "Sean O'rourke; Gal Chechik; Robin Friedman; Eleazar Eskin", "abstract": "Amino acid profiles, which capture position-specific mutation prob- abilities, are a richer encoding of biological sequences than the in- dividual sequences themselves. However, profile comparisons are much more computationally expensive than discrete symbol com- parisons, making profiles impractical for many large datasets. Fur- thermore, because they are such a rich representation, profiles can be difficult to visualize. To overcome these problems, we propose a discretization for profiles using an expanded alphabet representing not just individual amino acids, but common profiles. By using an extension of information bottleneck (IB) incorporating constraints and priors on the class distributions, we find an informationally optimal alphabet. This discretization yields a concise, informative textual representation for profile sequences. Also alignments be- tween these sequences, while nearly as accurate as the full profile- profile alignments, can be computed almost as quickly as those between individual or consensus sequences. A full pairwise align- ment of SwissProt would take years using profiles, but less than 3 days using a discrete IB encoding, illustrating how discrete en- coding can expand the range of sequence problems to which profile information can be applied.", "bibtex": "@inproceedings{NIPS2004_1175defd,\n author = {O\\textquotesingle rourke, Sean and Chechik, Gal and Friedman, Robin and Eskin, Eleazar},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Discrete profile alignment via constrained information bottleneck},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/1175defd049d3301e047ce50d93e9c7a-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/1175defd049d3301e047ce50d93e9c7a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/1175defd049d3301e047ce50d93e9c7a-Metadata.json", "review": "", "metareview": "", "pdf_size": 256675, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9546851776406645810&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Computer Science and Engineering, University of California San Diego; Department of Computer Science, Stanford University; Department of Computer Science and Engineering, University of California San Diego; Department of Computer Science and Engineering, University of California San Diego", "aff_domain": "cs.ucsd.edu;stanford.edu;ucsd.edu;cs.ucsd.edu", "email": "cs.ucsd.edu;stanford.edu;ucsd.edu;cs.ucsd.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "University of California, San Diego;Stanford University", "aff_unique_dep": "Department of Computer Science and Engineering;Department of Computer Science", "aff_unique_url": "https://www.ucsd.edu;https://www.stanford.edu", "aff_unique_abbr": "UCSD;Stanford", "aff_campus_unique_index": "0;1;0;0", "aff_campus_unique": "San Diego;Stanford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "84d4bbb31c", "title": "Discriminant Saliency for Visual Recognition from Cluttered Scenes", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/dda04f9d634145a9c68d5dfe53b21272-Abstract.html", "author": "Dashan Gao; Nuno Vasconcelos", "abstract": "Saliency mechanisms play an important role when visual recognition must be performed in cluttered scenes. We propose a computational defi- nition of saliency that deviates from existing models by equating saliency to discrimination. In particular, the salient attributes of a given visual class are defined as the features that enable best discrimination between that class and all other classes of recognition interest. It is shown that this definition leads to saliency algorithms of low complexity, that are scalable to large recognition problems, and is compatible with existing models of early biological vision. Experimental results demonstrating success in the context of challenging recognition problems are also pre- sented.", "bibtex": "@inproceedings{NIPS2004_dda04f9d,\n author = {Gao, Dashan and Vasconcelos, Nuno},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Discriminant Saliency for Visual Recognition from Cluttered Scenes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/dda04f9d634145a9c68d5dfe53b21272-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/dda04f9d634145a9c68d5dfe53b21272-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/dda04f9d634145a9c68d5dfe53b21272-Metadata.json", "review": "", "metareview": "", "pdf_size": 521216, "gs_citation": 280, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4730297018470577343&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Department of Electrical and Computer Engineering, University of California, San Diego; Department of Electrical and Computer Engineering, University of California, San Diego", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0;0", "aff_campus_unique": "San Diego", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "4222f555a6", "title": "Distributed Information Regularization on Graphs", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/75455e062929d32a333868084286bb68-Abstract.html", "author": "Adrian Corduneanu; Tommi S. Jaakkola", "abstract": "We provide a principle for semi-supervised learning based on optimizing the rate of communicating labels for unlabeled points with side informa- tion. The side information is expressed in terms of identities of sets of points or regions with the purpose of biasing the labels in each region to be the same. The resulting regularization objective is convex, has a unique solution, and the solution can be found with a pair of local prop- agation operations on graphs induced by the regions. We analyze the properties of the algorithm and demonstrate its performance on docu- ment classification tasks.", "bibtex": "@inproceedings{NIPS2004_75455e06,\n author = {Corduneanu, Adrian and Jaakkola, Tommi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Distributed Information Regularization on Graphs},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/75455e062929d32a333868084286bb68-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/75455e062929d32a333868084286bb68-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/75455e062929d32a333868084286bb68-Metadata.json", "review": "", "metareview": "", "pdf_size": 105583, "gs_citation": 49, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7886692200181421955&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "CSAIL MIT; CSAIL MIT", "aff_domain": "csail.mit.edu;csail.mit.edu", "email": "csail.mit.edu;csail.mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory", "aff_unique_url": "https://www.csail.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "0bba1b7bab", "title": "Distributed Occlusion Reasoning for Tracking with Nonparametric Belief Propagation", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/1006ff12c465532f8c574aeaa4461b16-Abstract.html", "author": "Erik B. Sudderth; Michael I. Mandel; William T. Freeman; Alan S. Willsky", "abstract": "We describe a threedimensional geometric hand model suitable for vi- sual tracking applications. The kinematic constraints implied by the model's joints have a probabilistic structure which is well described by a graphical model. Inference in this model is complicated by the hand's many degrees of freedom, as well as multimodal likelihoods caused by ambiguous image measurements. We use nonparametric belief propaga- tion (NBP) to develop a tracking algorithm which exploits the graph's structure to control complexity, while avoiding costly discretization. While kinematic constraints naturally have a local structure, self occlusions created by the imaging process lead to complex interpenden- cies in color and edgebased likelihood functions. However, we show that local structure may be recovered by introducing binary hidden vari- ables describing the occlusion state of each pixel. We augment the NBP algorithm to infer these occlusion variables in a distributed fashion, and then analytically marginalize over them to produce hand position esti- mates which properly account for occlusion events. We provide simula- tions showing that NBP may be used to refine inaccurate model initializa- tions, as well as track hand motion through extended image sequences.", "bibtex": "@inproceedings{NIPS2004_1006ff12,\n author = {Sudderth, Erik and Mandel, Michael and Freeman, William and Willsky, Alan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Distributed Occlusion Reasoning for Tracking with Nonparametric Belief Propagation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/1006ff12c465532f8c574aeaa4461b16-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/1006ff12c465532f8c574aeaa4461b16-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/1006ff12c465532f8c574aeaa4461b16-Metadata.json", "review": "", "metareview": "", "pdf_size": 183003, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7864743782753371652&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 0, "aff": "Department of Electrical Engineering and Computer Science; Department of Electrical Engineering and Computer Science; Department of Electrical Engineering and Computer Science; Department of Electrical Engineering and Computer Science", "aff_domain": "mit.edu;alum.mit.edu;mit.edu;mit.edu", "email": "mit.edu;alum.mit.edu;mit.edu;mit.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "a8d46fc223", "title": "Dynamic Bayesian Networks for Brain-Computer Interfaces", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/daaaf13651380465fc284db6940d8478-Abstract.html", "author": "Pradeep Shenoy; Rajesh P. Rao", "abstract": "We describe an approach to building brain-computer interfaces (BCI) based on graphical models for probabilistic inference and learning. We show how a dynamic Bayesian network (DBN) can be used to infer probability distributions over brain- and body-states during planning and execution of actions. The DBN is learned directly from observed data and allows measured signals such as EEG and EMG to be interpreted in terms of internal states such as intent to move, preparatory activity, and movement execution. Unlike traditional classification-based approaches to BCI, the proposed approach (1) allows continuous tracking and predic- tion of internal states over time, and (2) generates control signals based on an entire probability distribution over states rather than binary yes/no decisions. We present preliminary results of brain- and body-state es- timation using simultaneous EEG and EMG signals recorded during a self-paced left/right hand movement task.", "bibtex": "@inproceedings{NIPS2004_daaaf136,\n author = {Shenoy, Pradeep and Rao, Rajesh PN},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Dynamic Bayesian Networks for Brain-Computer Interfaces},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/daaaf13651380465fc284db6940d8478-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/daaaf13651380465fc284db6940d8478-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/daaaf13651380465fc284db6940d8478-Metadata.json", "review": "", "metareview": "", "pdf_size": 448787, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2034764468272641662&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science, University of Washington; Department of Computer Science, University of Washington", "aff_domain": "cs.washington.edu;cs.washington.edu", "email": "cs.washington.edu;cs.washington.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Washington", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.washington.edu", "aff_unique_abbr": "UW", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Seattle", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "98e3c9f9be", "title": "Economic Properties of Social Networks", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/50abc3e730e36b387ca8e02c26dc0a22-Abstract.html", "author": "Sham M. Kakade; Michael Kearns; Luis E. Ortiz; Robin Pemantle; Siddharth Suri", "abstract": "We examine the marriage of recent probabilistic generative models for social networks with classical frameworks from mathematical eco- nomics. We are particularly interested in how the statistical structure of such networks influences global economic quantities such as price vari- ation. Our findings are a mixture of formal analysis, simulation, and experiments on an international trade data set from the United Nations.", "bibtex": "@inproceedings{NIPS2004_50abc3e7,\n author = {Kakade, Sham M and Kearns, Michael and Ortiz, Luis E and Pemantle, Robin and Suri, Siddharth},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Economic Properties of Social Networks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/50abc3e730e36b387ca8e02c26dc0a22-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/50abc3e730e36b387ca8e02c26dc0a22-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/50abc3e730e36b387ca8e02c26dc0a22-Metadata.json", "review": "", "metareview": "", "pdf_size": 97743, "gs_citation": 117, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14697907479513564226&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 23, "aff": "University of Pennsylvania; University of Pennsylvania; University of Pennsylvania; University of Pennsylvania; University of Pennsylvania", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "University of Pennsylvania", "aff_unique_dep": "", "aff_unique_url": "https://www.upenn.edu", "aff_unique_abbr": "UPenn", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "id": "b78f10b33f", "title": "Edge of Chaos Computation in Mixed-Mode VLSI - A Hard Liquid", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/dbab2adc8f9d078009ee3fa810bea142-Abstract.html", "author": "Felix Sch\u00fcrmann; Karlheinz Meier; Johannes Schemmel", "abstract": "Computation without stable states is a computing paradigm dif- ferent from Turing's and has been demonstrated for various types of simulated neural networks. This publication transfers this to a hardware implemented neural network. Results of a software im- plementation are reproduced showing that the performance peaks when the network exhibits dynamics at the edge of chaos. The liquid computing approach seems well suited for operating analog computing devices such as the used VLSI neural network.", "bibtex": "@inproceedings{NIPS2004_dbab2adc,\n author = {Sch\\\"{u}rmann, Felix and Meier, Karlheinz and Schemmel, Johannes},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Edge of Chaos Computation in Mixed-Mode VLSI - A Hard Liquid},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/dbab2adc8f9d078009ee3fa810bea142-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/dbab2adc8f9d078009ee3fa810bea142-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/dbab2adc8f9d078009ee3fa810bea142-Metadata.json", "review": "", "metareview": "", "pdf_size": 394783, "gs_citation": 78, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8682890942159185862&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "http://www.kip.uni-heidelberg.de/vision", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "12df4be3fa", "title": "Efficient Kernel Discriminant Analysis via QR Decomposition", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/3a0844cee4fcf57de0c71e9ad3035478-Abstract.html", "author": "Tao Xiong; Jieping Ye; Qi Li; Ravi Janardan; Vladimir Cherkassky", "abstract": "Linear Discriminant Analysis (LDA) is a well-known method for fea- ture extraction and dimension reduction. It has been used widely in many applications such as face recognition. Recently, a novel LDA algo- rithm based on QR Decomposition, namely LDA/QR, has been proposed, which is competitive in terms of classification accuracy with other LDA algorithms, but it has much lower costs in time and space. However, LDA/QR is based on linear projection, which may not be suitable for data with nonlinear structure. This paper first proposes an algorithm called KDA/QR, which extends the LDA/QR algorithm to deal with nonlin- ear data by using the kernel operator. Then an efficient approximation of KDA/QR called AKDA/QR is proposed. Experiments on face image data show that the classification accuracy of both KDA/QR and AKDA/QR are competitive with Generalized Discriminant Analysis (GDA), a gen- eral kernel discriminant analysis algorithm, while AKDA/QR has much lower time and space costs.", "bibtex": "@inproceedings{NIPS2004_3a0844ce,\n author = {Xiong, Tao and Ye, Jieping and Li, Qi and Janardan, Ravi and Cherkassky, Vladimir},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Efficient Kernel Discriminant Analysis via QR Decomposition},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/3a0844cee4fcf57de0c71e9ad3035478-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/3a0844cee4fcf57de0c71e9ad3035478-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/3a0844cee4fcf57de0c71e9ad3035478-Metadata.json", "review": "", "metareview": "", "pdf_size": 81169, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2672324444410935279&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of ECE, University of Minnesota; Department of CSE, University of Minnesota; Department of CIS, University of Delaware; Department of ECE, University of Minnesota; Department of CSE, University of Minnesota", "aff_domain": "ece.umn.edu;cs.umn.edu;cis.udel.edu;ece.umn.edu;cs.umn.edu", "email": "ece.umn.edu;cs.umn.edu;cis.udel.edu;ece.umn.edu;cs.umn.edu", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;0;0", "aff_unique_norm": "University of Minnesota;University of Delaware", "aff_unique_dep": "Department of Electrical and Computer Engineering;Department of CIS", "aff_unique_url": "https://www.umn.edu;https://www.udel.edu", "aff_unique_abbr": "UMN;UD", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Minneapolis", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "id": "870a89852d", "title": "Efficient Kernel Machines Using the Improved Fast Gauss Transform", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/85353d3b2f39b9c9b5ee3576578c04b7-Abstract.html", "author": "Changjiang Yang; Ramani Duraiswami; Larry S. Davis", "abstract": "The computation and memory required for kernel machines with N train- ing samples is at least O(N 2). Such a complexity is significant even for moderate size problems and is prohibitive for large datasets. We present an approximation technique based on the improved fast Gauss transform to reduce the computation to O(N ). We also give an error bound for the approximation, and provide experimental results on the UCI datasets.", "bibtex": "@inproceedings{NIPS2004_85353d3b,\n author = {Yang, Changjiang and Duraiswami, Ramani and Davis, Larry S},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Efficient Kernel Machines Using the Improved Fast Gauss Transform},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/85353d3b2f39b9c9b5ee3576578c04b7-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/85353d3b2f39b9c9b5ee3576578c04b7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/85353d3b2f39b9c9b5ee3576578c04b7-Metadata.json", "review": "", "metareview": "", "pdf_size": 108993, "gs_citation": 194, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16166470723127742094&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "51a1bbd61d", "title": "Efficient Out-of-Sample Extension of Dominant-Set Clusters", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/e354fd90b2d5c777bfec87a352a18976-Abstract.html", "author": "Massimiliano Pavan; Marcello Pelillo", "abstract": "Dominant sets are a new graph-theoretic concept that has proven to be relevant in pairwise data clustering problems, such as image seg- mentation. They generalize the notion of a maximal clique to edge- weighted graphs and have intriguing, non-trivial connections to continu- ous quadratic optimization and spectral-based grouping. We address the problem of grouping out-of-sample examples after the clustering process has taken place. This may serve either to drastically reduce the compu- tational burden associated to the processing of very large data sets, or to efficiently deal with dynamic situations whereby data sets need to be updated continually. We show that the very notion of a dominant set of- fers a simple and efficient way of doing this. Numerical experiments on various grouping problems show the effectiveness of the approach.", "bibtex": "@inproceedings{NIPS2004_e354fd90,\n author = {Pavan, Massimiliano and Pelillo, Marcello},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Efficient Out-of-Sample Extension of Dominant-Set Clusters},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/e354fd90b2d5c777bfec87a352a18976-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/e354fd90b2d5c777bfec87a352a18976-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/e354fd90b2d5c777bfec87a352a18976-Metadata.json", "review": "", "metareview": "", "pdf_size": 150401, "gs_citation": 64, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18200244579516849325&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "6fee1a09bb", "title": "Euclidean Embedding of Co-Occurrence Data", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/ec1f850d934f440cfa8e4a18d2cf5463-Abstract.html", "author": "Amir Globerson; Gal Chechik; Fernando Pereira; Naftali Tishby", "abstract": "Embedding algorithms search for low dimensional structure in complex data, but most algorithms only handle objects of a single type for which pairwise distances are specified. This paper describes a method for em- bedding objects of different types, such as images and text, into a single common Euclidean space based on their co-occurrence statistics. The joint distributions are modeled as exponentials of Euclidean distances in the low-dimensional embedding space, which links the problem to con- vex optimization over positive semidefinite matrices. The local struc- ture of our embedding corresponds to the statistical correlations via ran- dom walks in the Euclidean space. We quantify the performance of our method on two text datasets, and show that it consistently and signifi- cantly outperforms standard methods of statistical correspondence mod- eling, such as multidimensional scaling and correspondence analysis.", "bibtex": "@inproceedings{NIPS2004_ec1f850d,\n author = {Globerson, Amir and Chechik, Gal and Pereira, Fernando and Tishby, Naftali},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Euclidean Embedding of Co-Occurrence Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/ec1f850d934f440cfa8e4a18d2cf5463-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/ec1f850d934f440cfa8e4a18d2cf5463-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/ec1f850d934f440cfa8e4a18d2cf5463-Metadata.json", "review": "", "metareview": "", "pdf_size": 184140, "gs_citation": 365, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8523850141429484808&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 23, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "f3dfbfb7bd", "title": "Expectation Consistent Free Energies for Approximate Inference", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/2417dc8af8570f274e6775d4d60496da-Abstract.html", "author": "Manfred Opper; Ole Winther", "abstract": "We propose a novel a framework for deriving approximations for in- tractable probabilistic models. This framework is based on a free energy (negative log marginal likelihood) and can be seen as a generalization of adaptive TAP [1, 2, 3] and expectation propagation (EP) [4, 5]. The free energy is constructed from two approximating distributions which encode different aspects of the intractable model such a single node con- straints and couplings and are by construction consistent on a chosen set of moments. We test the framework on a difficult benchmark problem with binary variables on fully connected graphs and 2D grid graphs. We find good performance using sets of moments which either specify fac- torized nodes or a spanning tree on the nodes (structured approximation). Surprisingly, the Bethe approximation gives very inferior results even on grids.", "bibtex": "@inproceedings{NIPS2004_2417dc8a,\n author = {Opper, Manfred and Winther, Ole},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Expectation Consistent Free Energies for Approximate Inference},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/2417dc8af8570f274e6775d4d60496da-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/2417dc8af8570f274e6775d4d60496da-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/2417dc8af8570f274e6775d4d60496da-Metadata.json", "review": "", "metareview": "", "pdf_size": 82468, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17131905216385167558&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "ISIS School of Electronics and Computer Science University of Southampton SO17 1BJ, United Kingdom; Informatics and Mathematical Modelling Technical University of Denmark DK-2800 Lyngby, Denmark", "aff_domain": "ecs.soton.ac.uk;imm.dtu.dk", "email": "ecs.soton.ac.uk;imm.dtu.dk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Southampton;Technical University of Denmark", "aff_unique_dep": "ISIS School of Electronics and Computer Science;Informatics and Mathematical Modelling", "aff_unique_url": "https://www.southampton.ac.uk;https://www.tu-dresden.de", "aff_unique_abbr": ";DTU", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Southampton;Lyngby", "aff_country_unique_index": "0;1", "aff_country_unique": "United Kingdom;Denmark" }, { "id": "c4be304919", "title": "Experts in a Markov Decision Process", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/421b3ac5c24ee992edd6087611c60dbb-Abstract.html", "author": "Eyal Even-dar; Sham M. Kakade; Yishay Mansour", "abstract": "We consider an MDP setting in which the reward function is allowed to change during each time step of play (possibly in an adversarial manner), yet the dynamics remain fixed. Similar to the experts setting, we address the question of how well can an agent do when compared to the reward achieved under the best stationary policy over time. We provide efficient algorithms, which have regret bounds with no dependence on the size of state space. Instead, these bounds depend only on a certain horizon time of the process and logarithmically on the number of actions. We also show that in the case that the dynamics change over time, the problem becomes computationally hard.", "bibtex": "@inproceedings{NIPS2004_421b3ac5,\n author = {Even-dar, Eyal and Kakade, Sham M and Mansour, Yishay},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Experts in a Markov Decision Process},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/421b3ac5c24ee992edd6087611c60dbb-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/421b3ac5c24ee992edd6087611c60dbb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/421b3ac5c24ee992edd6087611c60dbb-Metadata.json", "review": "", "metareview": "", "pdf_size": 75400, "gs_citation": 114, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3403945721669167727&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 17, "aff": "Computer Science, Tel-Aviv University; Computer and Information Science, University of Pennsylvania; Computer Science, Tel-Aviv University", "aff_domain": "post.tau.ac.il;linc.cis.upenn.edu;post.tau.ac.il", "email": "post.tau.ac.il;linc.cis.upenn.edu;post.tau.ac.il", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "Tel-Aviv University;University of Pennsylvania", "aff_unique_dep": "Computer Science;Computer and Information Science", "aff_unique_url": "https://www.tau.ac.il;https://www.upenn.edu", "aff_unique_abbr": "TAU;UPenn", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Israel;United States" }, { "id": "28a809af67", "title": "Exploration-Exploitation Tradeoffs for Experts Algorithms in Reactive Environments", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/7f7c351ee977c765aa8cd5c7020bc38f-Abstract.html", "author": "Daniela D. Farias; Nimrod Megiddo", "abstract": "A reactive environment is one that responds to the actions of an agent rather than evolving obliviously. In reactive environments, experts algorithms must balance exploration and exploitation of experts more carefully than in oblivious ones. In addition, a more subtle definition of a learnable value of an expert is required. A general exploration-exploitation experts method is presented along with a proper definition of value. The method is shown to asymptotically perform as well as the best available expert. Several variants are analyzed from the viewpoint of the exploration-exploitation tradeoff, including explore-then-exploit, polynomially vanishing exploration, constant-frequency exploration, and constant-size explo- ration phases. Complexity and performance bounds are proven.", "bibtex": "@inproceedings{NIPS2004_7f7c351e,\n author = {Farias, Daniela and Megiddo, Nimrod},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Exploration-Exploitation Tradeoffs for Experts Algorithms in Reactive Environments},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/7f7c351ee977c765aa8cd5c7020bc38f-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/7f7c351ee977c765aa8cd5c7020bc38f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/7f7c351ee977c765aa8cd5c7020bc38f-Metadata.json", "review": "", "metareview": "", "pdf_size": 117188, "gs_citation": 54, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5057150648764954204&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Mechanical Engineering, Massachusetts Institute of Technology; IBM Almaden Research Center", "aff_domain": "mit.edu;almaden.ibm.com", "email": "mit.edu;almaden.ibm.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Massachusetts Institute of Technology;IBM", "aff_unique_dep": "Department of Mechanical Engineering;Research Center", "aff_unique_url": "https://web.mit.edu;https://www.ibm.com/research", "aff_unique_abbr": "MIT;IBM", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Cambridge;Almaden", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "f7542b7e9f", "title": "Exponential Family Harmoniums with an Application to Information Retrieval", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/0e900ad84f63618452210ab8baae0218-Abstract.html", "author": "Max Welling; Michal Rosen-zvi; Geoffrey E. Hinton", "abstract": "Directed graphical models with one layer of observed random variables and one or more layers of hidden random variables have been the dom- inant modelling paradigm in many research \ufb01elds. Although this ap- proach has met with considerable success, the causal semantics of these models can make it dif\ufb01cult to infer the posterior distribution over the hidden variables. In this paper we propose an alternative two-layer model based on exponential family distributions and the semantics of undi- rected models. Inference in these \u201cexponential family harmoniums\u201d is fast while learning is performed by minimizing contrastive divergence. A member of this family is then studied as an alternative probabilistic model for latent semantic indexing. In experiments it is shown that they perform well on document retrieval tasks and provide an elegant solution to searching with keywords.", "bibtex": "@inproceedings{NIPS2004_0e900ad8,\n author = {Welling, Max and Rosen-zvi, Michal and Hinton, Geoffrey E},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Exponential Family Harmoniums with an Application to Information Retrieval},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/0e900ad84f63618452210ab8baae0218-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/0e900ad84f63618452210ab8baae0218-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/0e900ad84f63618452210ab8baae0218-Metadata.json", "review": "", "metareview": "", "pdf_size": 542814, "gs_citation": 615, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8430163586994787595&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 19, "aff": "Information and Computer Science, University of California, Irvine CA 92697-3425 USA; Information and Computer Science, University of California, Irvine CA 92697-3425 USA; Department of Computer Science, University of Toronto, Toronto, 290G M5S 3G4, Canada", "aff_domain": "ics.uci.edu; ;cs.toronto.edu", "email": "ics.uci.edu; ;cs.toronto.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of California, Irvine;University of Toronto", "aff_unique_dep": "Department of Information and Computer Science;Department of Computer Science", "aff_unique_url": "https://www.uci.edu;https://www.utoronto.ca", "aff_unique_abbr": "UCI;U of T", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Irvine;Toronto", "aff_country_unique_index": "0;0;1", "aff_country_unique": "United States;Canada" }, { "id": "e55e7221c2", "title": "Exponentiated Gradient Algorithms for Large-margin Structured Classification", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/e97c864e8ac67f7aed5ce53ec28638f5-Abstract.html", "author": "Peter L. Bartlett; Michael Collins; Ben Taskar; David A. McAllester", "abstract": "We consider the problem of structured classi\ufb01cation, where the task is to predict a label y from an input x, and y has meaningful internal struc- ture. Our framework includes supervised training of Markov random \ufb01elds and weighted context-free grammars as special cases. We describe an algorithm that solves the large-margin optimization problem de\ufb01ned in [12], using an exponential-family (Gibbs distribution) representation of structured objects. The algorithm is ef\ufb01cient\u2014even in cases where the number of labels y is exponential in size\u2014provided that certain expecta- tions under Gibbs distributions can be calculated ef\ufb01ciently. The method for structured labels relies on a more general result, speci\ufb01cally the ap- plication of exponentiated gradient updates [7, 8] to quadratic programs.", "bibtex": "@inproceedings{NIPS2004_e97c864e,\n author = {Bartlett, Peter and Collins, Michael and Taskar, Ben and McAllester, David},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Exponentiated Gradient Algorithms for Large-margin Structured Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/e97c864e8ac67f7aed5ce53ec28638f5-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/e97c864e8ac67f7aed5ce53ec28638f5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/e97c864e8ac67f7aed5ce53ec28638f5-Metadata.json", "review": "", "metareview": "", "pdf_size": 114701, "gs_citation": 95, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1588277107245499089&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 23, "aff": "U.C.Berkeley; MIT CSAIL; Stanford University; TTI at Chicago", "aff_domain": "stat.berkeley.edu;csail.mit.edu;cs.stanford.edu;tti-c.org", "email": "stat.berkeley.edu;csail.mit.edu;cs.stanford.edu;tti-c.org", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "University of California, Berkeley;Massachusetts Institute of Technology;Stanford University;Toyota Technological Institute at Chicago", "aff_unique_dep": ";Computer Science and Artificial Intelligence Laboratory;;", "aff_unique_url": "https://www.berkeley.edu;https://www.csail.mit.edu;https://www.stanford.edu;https://www.tti-chicago.org", "aff_unique_abbr": "UC Berkeley;MIT CSAIL;Stanford;TTI", "aff_campus_unique_index": "0;1;2;3", "aff_campus_unique": "Berkeley;Cambridge;Stanford;Chicago", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "d0f9ddb7f1", "title": "Face Detection --- Efficient and Rank Deficient", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/f2e43fa3400d826df4195a9ac70dca62-Abstract.html", "author": "Wolf Kienzle; Matthias O. Franz; Bernhard Sch\u00f6lkopf; G\u00f6khan H. Bakir", "abstract": "This paper proposes a method for computing fast approximations to sup- port vector decision functions in the field of object detection. In the present approach we are building on an existing algorithm where the set of support vectors is replaced by a smaller, so-called reduced set of syn- thesized input space points. In contrast to the existing method that finds the reduced set via unconstrained optimization, we impose a structural constraint on the synthetic points such that the resulting approximations can be evaluated via separable filters. For applications that require scan- ning large images, this decreases the computational complexity by a sig- nificant amount. Experimental results show that in face detection, rank deficient approximations are 4 to 6 times faster than unconstrained re- duced set systems.", "bibtex": "@inproceedings{NIPS2004_f2e43fa3,\n author = {Kienzle, Wolf and Franz, Matthias and Sch\\\"{o}lkopf, Bernhard and Bakir, G\\\"{o}khan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Face Detection --- Efficient and Rank Deficient},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/f2e43fa3400d826df4195a9ac70dca62-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/f2e43fa3400d826df4195a9ac70dca62-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/f2e43fa3400d826df4195a9ac70dca62-Metadata.json", "review": "", "metareview": "", "pdf_size": 135228, "gs_citation": 187, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7780720596643513182&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Max-Planck-Institute for Biological Cybernetics; Max-Planck-Institute for Biological Cybernetics; Max-Planck-Institute for Biological Cybernetics; Max-Planck-Institute for Biological Cybernetics", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de", "email": "tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Max-Planck-Institute for Biological Cybernetics", "aff_unique_dep": "Biological Cybernetics", "aff_unique_url": "https://www.biolcyber.mpg.de", "aff_unique_abbr": "MPIBC", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Germany" }, { "id": "e579085ca6", "title": "Fast Rates to Bayes for Kernel Machines", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/dd17e652cd2a08fdb8bf7f68e2ad3814-Abstract.html", "author": "Ingo Steinwart; Clint Scovel", "abstract": "We establish learning rates to the Bayes risk for support vector machines (SVMs) with hinge loss. In particular, for SVMs with Gaussian RBF kernels we propose a geometric condition for distributions which can be used to determine approximation properties of these kernels. Finally, we compare our methods with a recent paper of G. Blanchard et al..", "bibtex": "@inproceedings{NIPS2004_dd17e652,\n author = {Steinwart, Ingo and Scovel, Clint},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Fast Rates to Bayes for Kernel Machines},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/dd17e652cd2a08fdb8bf7f68e2ad3814-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/dd17e652cd2a08fdb8bf7f68e2ad3814-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/dd17e652cd2a08fdb8bf7f68e2ad3814-Metadata.json", "review": "", "metareview": "", "pdf_size": 85174, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11515097318727166034&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Modeling, Algorithms and Informatics Group, CCS-3; Modeling, Algorithms and Informatics Group, CCS-3", "aff_domain": "lanl.gov;lanl.gov", "email": "lanl.gov;lanl.gov", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Los Alamos National Laboratory", "aff_unique_dep": "Modeling, Algorithms and Informatics Group", "aff_unique_url": "https://www.lanl.gov", "aff_unique_abbr": "LANL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "dd79dd6f34", "title": "Following Curved Regularized Optimization Solution Paths", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/32b991e5d77ad140559ffb95522992d0-Abstract.html", "author": "Saharon Rosset", "abstract": "Regularization plays a central role in the analysis of modern data, where non-regularized fitting is likely to lead to over-fitted models, useless for both prediction and interpretation. We consider the design of incremen- tal algorithms which follow paths of regularized solutions, as the regu- larization varies. These approaches often result in methods which are both efficient and highly flexible. We suggest a general path-following algorithm based on second-order approximations, prove that under mild conditions it remains \"very close\" to the path of optimal solutions and illustrate it with examples.", "bibtex": "@inproceedings{NIPS2004_32b991e5,\n author = {Rosset, Saharon},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Following Curved Regularized Optimization Solution Paths},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/32b991e5d77ad140559ffb95522992d0-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/32b991e5d77ad140559ffb95522992d0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/32b991e5d77ad140559ffb95522992d0-Metadata.json", "review": "", "metareview": "", "pdf_size": 146509, "gs_citation": 68, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9332819643855473331&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "IBM T.J. Watson Research Center, Yorktown Heights, NY 10598", "aff_domain": "us.ibm.com", "email": "us.ibm.com", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "IBM", "aff_unique_dep": "IBM T.J. Watson Research Center", "aff_unique_url": "https://www.ibm.com/research/watson", "aff_unique_abbr": "IBM Watson", "aff_campus_unique_index": "0", "aff_campus_unique": "Yorktown Heights", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "397b30df52", "title": "Generalization Error Bounds for Collaborative Prediction with Low-Rank Matrices", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/f1b0775946bc0329b35b823b86eeb5f5-Abstract.html", "author": "Nathan Srebro; Noga Alon; Tommi S. Jaakkola", "abstract": "We prove generalization error bounds for predicting entries in a partially observed matrix by fitting the observed entries with a low-rank matrix. In justifying the analysis approach we take to obtain the bounds, we present an example of a class of functions of finite pseudodimension such that the sums of functions from this class have unbounded pseudodimension.", "bibtex": "@inproceedings{NIPS2004_f1b07759,\n author = {Srebro, Nathan and Alon, Noga and Jaakkola, Tommi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Generalization Error Bounds for Collaborative Prediction with Low-Rank Matrices},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/f1b0775946bc0329b35b823b86eeb5f5-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/f1b0775946bc0329b35b823b86eeb5f5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/f1b0775946bc0329b35b823b86eeb5f5-Metadata.json", "review": "", "metareview": "", "pdf_size": 178284, "gs_citation": 147, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3220331559279018992&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 21, "aff": "Department of Computer Science, University of Toronto, Toronto, ON, Canada; School of Mathematical Sciences, Tel Aviv University, Ramat Aviv, Israel; Computer Science and Artificial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA", "aff_domain": "cs.toronto.edu;tau.ac.il;csail.mit.edu", "email": "cs.toronto.edu;tau.ac.il;csail.mit.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "University of Toronto;Tel Aviv University;Massachusetts Institute of Technology", "aff_unique_dep": "Department of Computer Science;School of Mathematical Sciences;Computer Science and Artificial Intelligence Laboratory", "aff_unique_url": "https://www.utoronto.ca;https://www.tau.ac.il;https://www.mit.edu", "aff_unique_abbr": "U of T;TAU;MIT", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "Toronto;Ramat Aviv;Cambridge", "aff_country_unique_index": "0;1;2", "aff_country_unique": "Canada;Israel;United States" }, { "id": "babb6859a8", "title": "Generalization Error and Algorithmic Convergence of Median Boosting", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/a431d70133ef6cf688bc4f6093922b48-Abstract.html", "author": "Bal\u00e1zs K\u00e9gl", "abstract": "We have recently proposed an extension of ADABOOST to regression that uses the median of the base regressors as the final regressor. In this paper we extend theoretical results obtained for ADABOOST to median boosting and to its localized variant. First, we extend recent results on ef- ficient margin maximizing to show that the algorithm can converge to the maximum achievable margin within a preset precision in a finite number of steps. Then we provide confidence-interval-type bounds on the gener- alization error.", "bibtex": "@inproceedings{NIPS2004_a431d701,\n author = {K\\'{e}gl, Bal\\'{a}zs},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Generalization Error and Algorithmic Convergence of Median Boosting},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/a431d70133ef6cf688bc4f6093922b48-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/a431d70133ef6cf688bc4f6093922b48-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/a431d70133ef6cf688bc4f6093922b48-Metadata.json", "review": "", "metareview": "", "pdf_size": 122265, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12268838049541326802&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science and Operations Research, University of Montreal", "aff_domain": "iro.umontreal.ca", "email": "iro.umontreal.ca", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of Montreal", "aff_unique_dep": "Department of Computer Science and Operations Research", "aff_unique_url": "https://www.umontreal.ca", "aff_unique_abbr": "UM", "aff_country_unique_index": "0", "aff_country_unique": "Canada" }, { "id": "a59e3133fa", "title": "Generative Affine Localisation and Tracking", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/18ad9899f3f21a5a1583584d5f11c0c0-Abstract.html", "author": "John Winn; Andrew Blake", "abstract": "We present an extension to the Jojic and Frey (2001) layered sprite model which allows for layers to undergo affine transformations. This extension allows for affine object pose to be inferred whilst simultaneously learn- ing the object shape and appearance. Learning is carried out by applying an augmented variational inference algorithm which includes a global search over a discretised transform space followed by a local optimisa- tion. To aid correct convergence, we use bottom-up cues to restrict the space of possible affine transformations. We present results on a number of video sequences and show how the model can be extended to track an object whose appearance changes throughout the sequence.", "bibtex": "@inproceedings{NIPS2004_18ad9899,\n author = {Winn, John and Blake, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Generative Affine Localisation and Tracking},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/18ad9899f3f21a5a1583584d5f11c0c0-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/18ad9899f3f21a5a1583584d5f11c0c0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/18ad9899f3f21a5a1583584d5f11c0c0-Metadata.json", "review": "", "metareview": "", "pdf_size": 328375, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3498094178664102694&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "46f223c9af", "title": "Harmonising Chorales by Probabilistic Inference", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/b628386c9b92481fab68fbf284bd6a64-Abstract.html", "author": "Moray Allan; Christopher Williams", "abstract": "We describe how we used a data set of chorale harmonisations composed by Johann Sebastian Bach to train Hidden Markov Models. Using a prob- abilistic framework allows us to create a harmonisation system which learns from examples, and which can compose new harmonisations. We make a quantitative comparison of our system's harmonisation perfor- mance against simpler models, and provide example harmonisations.", "bibtex": "@inproceedings{NIPS2004_b628386c,\n author = {Allan, Moray and Williams, Christopher},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Harmonising Chorales by Probabilistic Inference},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/b628386c9b92481fab68fbf284bd6a64-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/b628386c9b92481fab68fbf284bd6a64-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/b628386c9b92481fab68fbf284bd6a64-Metadata.json", "review": "", "metareview": "", "pdf_size": 85477, "gs_citation": 276, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12360883840399409403&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "School of Informatics, University of Edinburgh; School of Informatics, University of Edinburgh", "aff_domain": "ed.ac.uk;ed.ac.uk", "email": "ed.ac.uk;ed.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Edinburgh", "aff_unique_dep": "School of Informatics", "aff_unique_url": "https://www.ed.ac.uk", "aff_unique_abbr": "Edinburgh", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Edinburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "9d1b5405e3", "title": "Heuristics for Ordering Cue Search in Decision Making", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/ed277964a8959e72a0d987e598dfbe72-Abstract.html", "author": "Peter M. Todd; Anja Dieckmann", "abstract": "Simple lexicographic decision heuristics that consider cues one at a \n time in a particular order and stop searching for cues as soon as a \n decision can be made have been shown to be both accurate and \n frugal in their use of information. But much of the simplicity and \n success of these heuristics comes from using an appropriate cue \n order. For instance, the Take The Best heuristic uses validity order \n for cues, which requires considerable computation, potentially \n undermining the computational advantages of the simple decision \n mechanism. But many cue orders can achieve good decision \n performance, and studies of sequential search for data records have \n proposed a number of simple ordering rules that may be of use in \n constructing appropriate decision cue orders as well. Here we \n consider a range of simple cue ordering mechanisms, including \n tallying, swapping, and move-to-front rules, and show that they can \n find cue orders that lead to reasonable accuracy and considerable \n frugality when used with lexicographic decision heuristics.", "bibtex": "@inproceedings{NIPS2004_ed277964,\n author = {Todd, Peter and Dieckmann, Anja},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Heuristics for Ordering Cue Search in Decision Making},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/ed277964a8959e72a0d987e598dfbe72-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/ed277964a8959e72a0d987e598dfbe72-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/ed277964a8959e72a0d987e598dfbe72-Metadata.json", "review": "", "metareview": "", "pdf_size": 179835, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1285428433339552377&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Center for Adaptive Behavior and Cognition MPI for Human Development Lentzeallee 94, 14195 Berlin, Germany; Center for Adaptive Behavior and Cognition MPI for Human Development Lentzeallee 94, 14195 Berlin, Germany", "aff_domain": "mpib-berlin.mpg.de;mpib-berlin.mpg.de", "email": "mpib-berlin.mpg.de;mpib-berlin.mpg.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Max Planck Institute for Human Development", "aff_unique_dep": "Center for Adaptive Behavior and Cognition", "aff_unique_url": "https://www.mpi-hd.mpg.de", "aff_unique_abbr": "MPI for Human Development", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berlin", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "154a386d01", "title": "Hierarchical Bayesian Inference in Networks of Spiking Neurons", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/38181d991caac98be8fb2ecb8bd0f166-Abstract.html", "author": "Rajesh P. Rao", "abstract": "There is growing evidence from psychophysical and neurophysiological studies that the brain utilizes Bayesian principles for inference and de- cision making. An important open question is how Bayesian inference for arbitrary graphical models can be implemented in networks of spik- ing neurons. In this paper, we show that recurrent networks of noisy integrate-and-fire neurons can perform approximate Bayesian inference for dynamic and hierarchical graphical models. The membrane potential dynamics of neurons is used to implement belief propagation in the log domain. The spiking probability of a neuron is shown to approximate the posterior probability of the preferred state encoded by the neuron, given past inputs. We illustrate the model using two examples: (1) a motion de- tection network in which the spiking probability of a direction-selective neuron becomes proportional to the posterior probability of motion in a preferred direction, and (2) a two-level hierarchical network that pro- duces attentional effects similar to those observed in visual cortical areas V2 and V4. The hierarchical model offers a new Bayesian interpretation of attentional modulation in V2 and V4.", "bibtex": "@inproceedings{NIPS2004_38181d99,\n author = {Rao, Rajesh PN},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Hierarchical Bayesian Inference in Networks of Spiking Neurons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/38181d991caac98be8fb2ecb8bd0f166-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/38181d991caac98be8fb2ecb8bd0f166-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/38181d991caac98be8fb2ecb8bd0f166-Metadata.json", "review": "", "metareview": "", "pdf_size": 112588, "gs_citation": 113, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15318499934034914518&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Department of Computer Science and Engineering, University of Washington, Seattle, WA 98195", "aff_domain": "cs.washington.edu", "email": "cs.washington.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of Washington", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.washington.edu", "aff_unique_abbr": "UW", "aff_campus_unique_index": "0", "aff_campus_unique": "Seattle", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "fbb4d891d3", "title": "Hierarchical Clustering of a Mixture Model", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/36e729ec173b94133d8fa552e4029f8b-Abstract.html", "author": "Jacob Goldberger; Sam T. Roweis", "abstract": "In this paper we propose an efficient algorithm for reducing a large mixture of Gaussians into a smaller mixture while still preserv- ing the component structure of the original model; this is achieved by clustering (grouping) the components. The method minimizes a new, easily computed distance measure between two Gaussian mixtures that can be motivated from a suitable stochastic model and the iterations of the algorithm use only the model parameters, avoiding the need for explicit resampling of datapoints. We demon- strate the method by performing hierarchical clustering of scenery images and handwritten digits.", "bibtex": "@inproceedings{NIPS2004_36e729ec,\n author = {Goldberger, Jacob and Roweis, Sam},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Hierarchical Clustering of a Mixture Model},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/36e729ec173b94133d8fa552e4029f8b-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/36e729ec173b94133d8fa552e4029f8b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/36e729ec173b94133d8fa552e4029f8b-Metadata.json", "review": "", "metareview": "", "pdf_size": 164015, "gs_citation": 225, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11199131867866979883&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, University of Toronto; Department of Computer Science, University of Toronto", "aff_domain": "cs.toronto.edu;cs.toronto.edu", "email": "cs.toronto.edu;cs.toronto.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Toronto", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.utoronto.ca", "aff_unique_abbr": "U of T", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Toronto", "aff_country_unique_index": "0;0", "aff_country_unique": "Canada" }, { "id": "aaeead2b3e", "title": "Hierarchical Distributed Representations for Statistical Language Modeling", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/110eec23201d80e40d0c4a48954e2ff5-Abstract.html", "author": "John Blitzer; Fernando Pereira; Kilian Q. Weinberger; Lawrence K. Saul", "abstract": "Statistical language models estimate the probability of a word occurring in a given context. The most common language models rely on a discrete enumeration of predictive contexts (e.g., n-grams) and consequently fail to capture and exploit statistical regularities across these contexts. In this paper, we show how to learn hierarchical, distributed representations of word contexts that maximize the predictive value of a statistical language model. The representations are initialized by unsupervised algorithms for linear and nonlinear dimensionality reduction [14], then fed as input into a hierarchical mixture of experts, where each expert is a multinomial dis- tribution over predicted words [12]. While the distributed representations in our model are inspired by the neural probabilistic language model of Bengio et al. [2, 3], our particular architecture enables us to work with significantly larger vocabularies and training corpora. For example, on a large-scale bigram modeling task involving a sixty thousand word vocab- ulary and a training corpus of three million sentences, we demonstrate consistent improvement over class-based bigram models [10, 13]. We also discuss extensions of our approach to longer multiword contexts.", "bibtex": "@inproceedings{NIPS2004_110eec23,\n author = {Blitzer, John and Pereira, Fernando and Weinberger, Kilian Q and Saul, Lawrence},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Hierarchical Distributed Representations for Statistical Language Modeling},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/110eec23201d80e40d0c4a48954e2ff5-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/110eec23201d80e40d0c4a48954e2ff5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/110eec23201d80e40d0c4a48954e2ff5-Metadata.json", "review": "", "metareview": "", "pdf_size": 726424, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12676223952189814628&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "Department of Computer and Information Science, University of Pennsylvania; Department of Computer and Information Science, University of Pennsylvania; Department of Computer and Information Science, University of Pennsylvania; Department of Computer and Information Science, University of Pennsylvania", "aff_domain": "cis.upenn.edu;cis.upenn.edu;cis.upenn.edu;cis.upenn.edu", "email": "cis.upenn.edu;cis.upenn.edu;cis.upenn.edu;cis.upenn.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Pennsylvania", "aff_unique_dep": "Department of Computer and Information Science", "aff_unique_url": "https://www.upenn.edu", "aff_unique_abbr": "UPenn", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "ee235325df", "title": "Hierarchical Eigensolver for Transition Matrices in Spectral Methods", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/e727fa59ddefcefb5d39501167623132-Abstract.html", "author": "Chakra Chennubhotla; Allan D. Jepson", "abstract": "We show how to build hierarchical, reduced-rank representation for large stochastic matrices and use this representation to design an ef\ufb01cient al- gorithm for computing the largest eigenvalues, and the corresponding eigenvectors. In particular, the eigen problem is \ufb01rst solved at the coars- est level of the representation. The approximate eigen solution is then interpolated over successive levels of the hierarchy. A small number of power iterations are employed at each stage to correct the eigen solution. The typical speedups obtained by a Matlab implementation of our fast eigensolver over a standard sparse matrix eigensolver [13] are at least a factor of ten for large image sizes. The hierarchical representation has proven to be effective in a min-cut based segmentation algorithm that we proposed recently [8].", "bibtex": "@inproceedings{NIPS2004_e727fa59,\n author = {Chennubhotla, Chakra and Jepson, Allan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Hierarchical Eigensolver for Transition Matrices in Spectral Methods},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/e727fa59ddefcefb5d39501167623132-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/e727fa59ddefcefb5d39501167623132-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/e727fa59ddefcefb5d39501167623132-Metadata.json", "review": "", "metareview": "", "pdf_size": 238122, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14211858440609333651&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Computational Biology, University of Pittsburgh; Department of Computer Science, University of Toronto", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Pittsburgh;University of Toronto", "aff_unique_dep": "Department of Computational Biology;Department of Computer Science", "aff_unique_url": "https://www.pitt.edu;https://www.utoronto.ca", "aff_unique_abbr": "Pitt;U of T", "aff_campus_unique_index": "1", "aff_campus_unique": ";Toronto", "aff_country_unique_index": "0;1", "aff_country_unique": "United States;Canada" }, { "id": "7ee4ecb05e", "title": "Identifying Protein-Protein Interaction Sites on a Genome-Wide Scale", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/dc2b690516158a874dd8aabe1365c6a0-Abstract.html", "author": "Haidong Wang; Eran Segal; Asa Ben-Hur; Daphne Koller; Douglas L. Brutlag", "abstract": "Protein interactions typically arise from a physical interaction of one or more small sites on the surface of the two proteins. Identifying these sites is very important for drug and protein design. In this paper, we propose a computational method based on probabilistic relational model that at- tempts to address this task using high-throughput protein interaction data and a set of short sequence motifs. We learn the model using the EM algorithm, with a branch-and-bound algorithm as an approximate infer- ence for the E-step. Our method searches for motifs whose presence in a pair of interacting proteins can explain their observed interaction. It also tries to determine which motif pairs have high affinity, and can therefore lead to an interaction. We show that our method is more accurate than others at predicting new protein-protein interactions. More importantly, by examining solved structures of protein complexes, we find that 2/3 of the predicted active motifs correspond to actual interaction sites.", "bibtex": "@inproceedings{NIPS2004_dc2b6905,\n author = {Wang, Haidong and Segal, Eran and Ben-Hur, Asa and Koller, Daphne and Brutlag, Douglas},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Identifying Protein-Protein Interaction Sites on a Genome-Wide Scale},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/dc2b690516158a874dd8aabe1365c6a0-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/dc2b690516158a874dd8aabe1365c6a0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/dc2b690516158a874dd8aabe1365c6a0-Metadata.json", "review": "", "metareview": "", "pdf_size": 103276, "gs_citation": 42, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11846787281092153439&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Computer Science Department, Stanford University, CA 94305; Computer Science Department, Stanford University, CA 94305 + Center for Studies in Physics and Biology, Rockefeller University, NY 10021; Department of Genome Sciences, University of Washington, WA 98195; Computer Science Department, Stanford University, CA 94305; Department of Biochemistry, Stanford University, CA 94305", "aff_domain": "cs.stanford.edu;cs.stanford.edu;gs.washington.edu;cs.stanford.edu;stanford.edu", "email": "cs.stanford.edu;cs.stanford.edu;gs.washington.edu;cs.stanford.edu;stanford.edu", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0+1;2;0;0", "aff_unique_norm": "Stanford University;Rockefeller University;University of Washington", "aff_unique_dep": "Computer Science Department;Center for Studies in Physics and Biology;Department of Genome Sciences", "aff_unique_url": "https://www.stanford.edu;https://www.rockefeller.edu;https://www.washington.edu", "aff_unique_abbr": "Stanford;RU;UW", "aff_campus_unique_index": "0;0+1;2;0;0", "aff_campus_unique": "Stanford;New York;Seattle", "aff_country_unique_index": "0;0+0;0;0;0", "aff_country_unique": "United States" }, { "id": "befe2844cc", "title": "Implicit Wiener Series for Higher-Order Image Analysis", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/321cf86b4c9f5ddd04881a44067c2a5a-Abstract.html", "author": "Matthias O. Franz; Bernhard Sch\u00f6lkopf", "abstract": "The computation of classical higher-order statistics such as higher-order moments or spectra is difficult for images due to the huge number of terms to be estimated and interpreted. We propose an alternative ap- proach in which multiplicative pixel interactions are described by a se- ries of Wiener functionals. Since the functionals are estimated implicitly via polynomial kernels, the combinatorial explosion associated with the classical higher-order statistics is avoided. First results show that image structures such as lines or corners can be predicted correctly, and that pixel interactions up to the order of five play an important role in natural images.", "bibtex": "@inproceedings{NIPS2004_321cf86b,\n author = {Franz, Matthias and Sch\\\"{o}lkopf, Bernhard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Implicit Wiener Series for Higher-Order Image Analysis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/321cf86b4c9f5ddd04881a44067c2a5a-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/321cf86b4c9f5ddd04881a44067c2a5a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/321cf86b4c9f5ddd04881a44067c2a5a-Metadata.json", "review": "", "metareview": "", "pdf_size": 220606, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2593601374207896311&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "8c01e0a814", "title": "Incremental Algorithms for Hierarchical Classification", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/78f7d96ea21ccae89a7b581295f34135-Abstract.html", "author": "Nicol\u00f2 Cesa-bianchi; Claudio Gentile; Andrea Tironi; Luca Zaniboni", "abstract": "We study the problem of hierarchical classification when labels corre- sponding to partial and/or multiple paths in the underlying taxonomy are allowed. We introduce a new hierarchical loss function, the H-loss, im- plementing the simple intuition that additional mistakes in the subtree of a mistaken class should not be charged for. Based on a probabilistic data model introduced in earlier work, we derive the Bayes-optimal classifier for the H-loss. We then empirically compare two incremental approx- imations of the Bayes-optimal classifier with a flat SVM classifier and with classifiers obtained by using hierarchical versions of the Perceptron and SVM algorithms. The experiments show that our simplest incremen- tal approximation of the Bayes-optimal classifier performs, after just one training epoch, nearly as well as the hierarchical SVM classifier (which performs best). For the same incremental algorithm we also derive an H-loss bound showing, when data are generated by our probabilistic data model, exponentially fast convergence to the H-loss of the hierarchical classifier based on the true model parameters.", "bibtex": "@inproceedings{NIPS2004_78f7d96e,\n author = {Cesa-bianchi, Nicol\\`{o} and Gentile, Claudio and Tironi, Andrea and Zaniboni, Luca},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Incremental Algorithms for Hierarchical Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/78f7d96ea21ccae89a7b581295f34135-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/78f7d96ea21ccae89a7b581295f34135-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/78f7d96ea21ccae89a7b581295f34135-Metadata.json", "review": "", "metareview": "", "pdf_size": 98808, "gs_citation": 372, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18279155889357549467&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 26, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "c5c3c79599", "title": "Incremental Learning for Visual Tracking", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/f21e255f89e0f258accbe4e984eef486-Abstract.html", "author": "Jongwoo Lim; David A. Ross; Ruei-sung Lin; Ming-Hsuan Yang", "abstract": "Most existing tracking algorithms construct a representation of a target object prior to the tracking task starts, and utilize invariant features to handle appearance variation of the target caused by lighting, pose, and view angle change. In this paper, we present an efficient and effec- tive online algorithm that incrementally learns and adapts a low dimen- sional eigenspace representation to reflect appearance changes of the tar- get, thereby facilitating the tracking task. Furthermore, our incremental method correctly updates the sample mean and the eigenbasis, whereas existing incremental subspace update methods ignore the fact the sample mean varies over time. The tracking problem is formulated as a state inference problem within a Markov Chain Monte Carlo framework and a particle filter is incorporated for propagating sample distributions over time. Numerous experiments demonstrate the effectiveness of the pro- posed tracking algorithm in indoor and outdoor environments where the target objects undergo large pose and lighting changes.", "bibtex": "@inproceedings{NIPS2004_f21e255f,\n author = {Lim, Jongwoo and Ross, David and Lin, Ruei-sung and Yang, Ming-Hsuan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Incremental Learning for Visual Tracking},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/f21e255f89e0f258accbe4e984eef486-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/f21e255f89e0f258accbe4e984eef486-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/f21e255f89e0f258accbe4e984eef486-Metadata.json", "review": "", "metareview": "", "pdf_size": 296524, "gs_citation": 422, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1324961078262771712&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 19, "aff": "University of Illinois; University of Toronto; University of Illinois + Honda Research Institute; Honda Research Institute", "aff_domain": "uiuc.edu;cs.toronto.edu;uiuc.edu;honda-ri.com", "email": "uiuc.edu;cs.toronto.edu;uiuc.edu;honda-ri.com", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0+2;2", "aff_unique_norm": "University of Illinois;University of Toronto;Honda Research Institute", "aff_unique_dep": ";;", "aff_unique_url": "https://www.illinois.edu;https://www.utoronto.ca;https://www.honda-ri.com", "aff_unique_abbr": "UIUC;U of T;HRI", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0+2;2", "aff_country_unique": "United States;Canada;Japan" }, { "id": "77fb215ff7", "title": "Inference, Attention, and Decision in a Bayesian Neural Architecture", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/0e4a2c65bdaddd66a53422d93daebe68-Abstract.html", "author": "Angela J. Yu; Peter Dayan", "abstract": "We study the synthesis of neural coding, selective attention and percep- tual decision making. A hierarchical neural architecture is proposed, which implements Bayesian integration of noisy sensory input and top- down attentional priors, leading to sound perceptual discrimination. The model offers an explicit explanation for the experimentally observed modulation that prior information in one stimulus feature (location) can have on an independent feature (orientation). The network's intermediate levels of representation instantiate known physiological properties of vi- sual cortical neurons. The model also illustrates a possible reconciliation of cortical and neuromodulatory representations of uncertainty.", "bibtex": "@inproceedings{NIPS2004_0e4a2c65,\n author = {Yu, Angela J and Dayan, Peter},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Inference, Attention, and Decision in a Bayesian Neural Architecture},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/0e4a2c65bdaddd66a53422d93daebe68-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/0e4a2c65bdaddd66a53422d93daebe68-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/0e4a2c65bdaddd66a53422d93daebe68-Metadata.json", "review": "", "metareview": "", "pdf_size": 251809, "gs_citation": 102, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6771841461904306005&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Gatsby Computational Neuroscience Unit, UCL; Gatsby Computational Neuroscience Unit, UCL", "aff_domain": "gatsby.ucl.ac.uk;gatsby.ucl.ac.uk", "email": "gatsby.ucl.ac.uk;gatsby.ucl.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University College London", "aff_unique_dep": "Gatsby Computational Neuroscience Unit", "aff_unique_url": "https://www.ucl.ac.uk", "aff_unique_abbr": "UCL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "a256167ee9", "title": "Instance-Based Relevance Feedback for Image Retrieval", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/5acdc9ca5d99ae66afdfe1eea0e3b26b-Abstract.html", "author": "Giorgio Gia\\-cin\\-to; Fabio Roli", "abstract": "High retrieval precision in content-based image retrieval can be attained by adopting relevance feedback mechanisms. These mechanisms require that the user judges the quality of the results of the query by marking all the retrieved images as being either relevant or not. Then, the search engine exploits this information to adapt the search to better meet user's needs. At present, the vast majority of proposed relevance feedback mechanisms are formulated in terms of search model that has to be optimized. Such an optimization involves the modification of some search parameters so that the nearest neighbor of the query vector contains the largest number of relevant images. In this paper, a different approach to relevance feedback is proposed. After the user provides the first feedback, following retrievals are not based on k- nn search, but on the computation of a relevance score for each image of the database. This score is computed as a function of two distances, namely the distance from the nearest non-relevant image and the distance from the nearest relevant one. Images are then ranked according to this score and the top k images are displayed. Reported results on three image data sets show that the proposed mechanism outperforms other state-of-the-art relevance feedback mechanisms.", "bibtex": "@inproceedings{NIPS2004_5acdc9ca,\n author = {Gia\\textbackslash -cin\\textbackslash -to, Giorgio and Roli, Fabio},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Instance-Based Relevance Feedback for Image Retrieval},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/5acdc9ca5d99ae66afdfe1eea0e3b26b-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/5acdc9ca5d99ae66afdfe1eea0e3b26b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/5acdc9ca5d99ae66afdfe1eea0e3b26b-Metadata.json", "review": "", "metareview": "", "pdf_size": 1043424, "gs_citation": 65, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5365565510895295390&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 12, "aff": "Department of Electrical and Electronic Engineering, University of Cagliari; Department of Electrical and Electronic Engineering, University of Cagliari", "aff_domain": "diee.unica.it;diee.unica.it", "email": "diee.unica.it;diee.unica.it", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Cagliari", "aff_unique_dep": "Department of Electrical and Electronic Engineering", "aff_unique_url": "https://www.unica.it", "aff_unique_abbr": "", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Italy" }, { "id": "5aad82e591", "title": "Instance-Specific Bayesian Model Averaging for Classification", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/e6acf4b0f69f6f6e60e9a815938aa1ff-Abstract.html", "author": "Shyam Visweswaran; Gregory F. Cooper", "abstract": "Classification algorithms typically induce population-wide models that are trained to perform well on average on expected future instances. We introduce a Bayesian framework for learning instance-specific models from data that are optimized to predict well for a particular instance. Based on this framework, we present a that performs selective model averaging over a restricted class of Bayesian networks. On experimental evaluation, this algorithm shows superior performance over model selection. We intend to apply such instance-specific algorithms to improve the performance of patient-specific predictive models induced from medical data.", "bibtex": "@inproceedings{NIPS2004_e6acf4b0,\n author = {Visweswaran, Shyam and Cooper, Gregory},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Instance-Specific Bayesian Model Averaging for Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/e6acf4b0f69f6f6e60e9a815938aa1ff-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/e6acf4b0f69f6f6e60e9a815938aa1ff-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/e6acf4b0f69f6f6e60e9a815938aa1ff-Metadata.json", "review": "", "metareview": "", "pdf_size": 169407, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13486627389544901450&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Center for Biomedical Informatics + Intelligent Systems Program; Center for Biomedical Informatics + Intelligent Systems Program", "aff_domain": "cbmi.pitt.edu;cbmi.pitt.edu", "email": "cbmi.pitt.edu;cbmi.pitt.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0+1;0+1", "aff_unique_norm": "Center for Biomedical Informatics;Intelligent Systems Program", "aff_unique_dep": "Biomedical Informatics;Intelligent Systems", "aff_unique_url": ";", "aff_unique_abbr": ";", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": ";", "aff_country_unique": "" }, { "id": "fd121f6552", "title": "Integrating Topics and Syntax", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/ef0917ea498b1665ad6c701057155abe-Abstract.html", "author": "Thomas L. Griffiths; Mark Steyvers; David M. Blei; Joshua B. Tenenbaum", "abstract": "Statistical approaches to language learning typically focus on either short-range syntactic dependencies or long-range semantic dependencies between words. We present a generative model that uses both kinds of dependencies, and can be used to simultaneously find syntactic classes and semantic topics despite having no representation of syntax or seman- tics beyond statistical dependency. This model is competitive on tasks like part-of-speech tagging and document classification with models that exclusively use short- and long-range dependencies respectively.", "bibtex": "@inproceedings{NIPS2004_ef0917ea,\n author = {Griffiths, Thomas and Steyvers, Mark and Blei, David and Tenenbaum, Joshua},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Integrating Topics and Syntax},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/ef0917ea498b1665ad6c701057155abe-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/ef0917ea498b1665ad6c701057155abe-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/ef0917ea498b1665ad6c701057155abe-Metadata.json", "review": "", "metareview": "", "pdf_size": 70083, "gs_citation": 756, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7764814070934529683&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 22, "aff": "Massachusetts Institute of Technology; University of California, Irvine; University of California, Berkeley; Massachusetts Institute of Technology", "aff_domain": "mit.edu;uci.edu;cs.berkeley.edu;mit.edu", "email": "mit.edu;uci.edu;cs.berkeley.edu;mit.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "Massachusetts Institute of Technology;University of California, Irvine;University of California, Berkeley", "aff_unique_dep": ";;", "aff_unique_url": "https://web.mit.edu;https://www.uci.edu;https://www.berkeley.edu", "aff_unique_abbr": "MIT;UCI;UC Berkeley", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Irvine;Berkeley", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "099744dc73", "title": "Intrinsically Motivated Reinforcement Learning", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/4be5a36cbaca8ab9d2066debfe4e65c1-Abstract.html", "author": "Nuttapong Chentanez; Andrew G. Barto; Satinder P. Singh", "abstract": "Psychologists call behavior intrinsically motivated when it is engaged in for its own sake rather than as a step toward solving a specific problem of clear practical value. But what we learn during intrinsically motivated behavior is essential for our development as competent autonomous en- tities able to efficiently solve a wide range of practical problems as they arise. In this paper we present initial results from a computational study of intrinsically motivated reinforcement learning aimed at allowing arti- ficial agents to construct and extend hierarchies of reusable skills that are needed for competent autonomy.", "bibtex": "@inproceedings{NIPS2004_4be5a36c,\n author = {Chentanez, Nuttapong and Barto, Andrew and Singh, Satinder},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Intrinsically Motivated Reinforcement Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/4be5a36cbaca8ab9d2066debfe4e65c1-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/4be5a36cbaca8ab9d2066debfe4e65c1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/4be5a36cbaca8ab9d2066debfe4e65c1-Metadata.json", "review": "", "metareview": "", "pdf_size": 821317, "gs_citation": 1097, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9736217847061704054&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 20, "aff": "Computer Science & Eng., University of Michigan; Dept. of Computer Science, University of Massachusetts; Computer Science & Eng., University of Michigan", "aff_domain": "umich.edu;cs.umass.edu;umich.edu", "email": "umich.edu;cs.umass.edu;umich.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Michigan;University of Massachusetts", "aff_unique_dep": "Computer Science & Engineering;Dept. of Computer Science", "aff_unique_url": "https://www.umich.edu;https://www.umass.edu", "aff_unique_abbr": "UM;UMass", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Ann Arbor;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "42b306ad57", "title": "Joint MRI Bias Removal Using Entropy Minimization Across Images", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/7c4bf50b715509a963ce81b168ca674b-Abstract.html", "author": "Erik G. Learned-miller; Parvez Ahammad", "abstract": "The correction of bias in magnetic resonance images is an important problem in medical image processing. Most previous approaches have used a maximum likelihood method to increase the likelihood of the pix- els in a single image by adaptively estimating a correction to the unknown image bias field. The pixel likelihoods are defined either in terms of a pre-existing tissue model, or non-parametrically in terms of the image's own pixel values. In both cases, the specific location of a pixel in the im- age is not used to calculate the likelihoods. We suggest a new approach in which we simultaneously eliminate the bias from a set of images of the same anatomy, but from different patients. We use the statistics from the same location across different images, rather than within an image, to eliminate bias fields from all of the images simultaneously. The method builds a \"multi-resolution\" non-parametric tissue model conditioned on image location while eliminating the bias fields associated with the orig- inal image set. We present experiments on both synthetic and real MR data sets, and present comparisons with other methods.", "bibtex": "@inproceedings{NIPS2004_7c4bf50b,\n author = {Learned-miller, Erik and Ahammad, Parvez},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Joint MRI Bias Removal Using Entropy Minimization Across Images},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/7c4bf50b715509a963ce81b168ca674b-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/7c4bf50b715509a963ce81b168ca674b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/7c4bf50b715509a963ce81b168ca674b-Metadata.json", "review": "", "metareview": "", "pdf_size": 886592, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6666578177655230300&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, University of Massachusetts, Amherst; Division of Electrical Engineering, University of California, Berkeley", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Massachusetts Amherst;University of California, Berkeley", "aff_unique_dep": "Department of Computer Science;Division of Electrical Engineering", "aff_unique_url": "https://www.umass.edu;https://www.berkeley.edu", "aff_unique_abbr": "UMass Amherst;UC Berkeley", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Amherst;Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "16f4a9fea0", "title": "Joint Probabilistic Curve Clustering and Alignment", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/4de81d9105c85bca6e6e4666e6dd536a-Abstract.html", "author": "Scott J. Gaffney; Padhraic Smyth", "abstract": "Clustering and prediction of sets of curves is an important problem in many areas of science and engineering. It is often the case that curves tend to be misaligned from each other in a continuous manner, either in space (across the measurements) or in time. We develop a probabilistic framework that allows for joint clustering and continuous alignment of sets of curves in curve space (as opposed to a fixed-dimensional feature- vector space). The proposed methodology integrates new probabilistic alignment models with model-based curve clustering algorithms. The probabilistic approach allows for the derivation of consistent EM learn- ing algorithms for the joint clustering-alignment problem. Experimental results are shown for alignment of human growth data, and joint cluster- ing and alignment of gene expression time-course data.", "bibtex": "@inproceedings{NIPS2004_4de81d91,\n author = {Gaffney, Scott and Smyth, Padhraic},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Joint Probabilistic Curve Clustering and Alignment},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/4de81d9105c85bca6e6e4666e6dd536a-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/4de81d9105c85bca6e6e4666e6dd536a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/4de81d9105c85bca6e6e4666e6dd536a-Metadata.json", "review": "", "metareview": "", "pdf_size": 171366, "gs_citation": 129, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14603824681432211931&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "School of Information and Computer Science, University of California, Irvine, CA 92697-3425; School of Information and Computer Science, University of California, Irvine, CA 92697-3425", "aff_domain": "ics.uci.edu;ics.uci.edu", "email": "ics.uci.edu;ics.uci.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Irvine", "aff_unique_dep": "School of Information and Computer Science", "aff_unique_url": "https://www.uci.edu", "aff_unique_abbr": "UCI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Irvine", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "14327ab5d9", "title": "Joint Tracking of Pose, Expression, and Texture using Conditionally Gaussian Filters", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/361440528766bbaaaa1901845cf4152b-Abstract.html", "author": "Tim K. Marks; J. C. Roddey; Javier R. Movellan; John R. Hershey", "abstract": "We present a generative model and stochastic filtering algorithm for si- multaneous tracking of 3D position and orientation, non-rigid motion, object texture, and background texture using a single camera. We show that the solution to this problem is formally equivalent to stochastic fil- tering of conditionally Gaussian processes, a problem for which well known approaches exist [3, 8]. We propose an approach based on Monte Carlo sampling of the nonlinear component of the process (object mo- tion) and exact filtering of the object and background textures given the sampled motion. The smoothness of image sequences in time and space is exploited by using Laplace's method to generate proposal distributions for importance sampling [7]. The resulting inference algorithm encom- passes both optic flow and template-based tracking as special cases, and elucidates the conditions under which these methods are optimal. We demonstrate an application of the system to 3D non-rigid face tracking.", "bibtex": "@inproceedings{NIPS2004_36144052,\n author = {Marks, Tim and Roddey, J. and Movellan, Javier and Hershey, John},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Joint Tracking of Pose, Expression, and Texture using Conditionally Gaussian Filters},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/361440528766bbaaaa1901845cf4152b-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/361440528766bbaaaa1901845cf4152b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/361440528766bbaaaa1901845cf4152b-Metadata.json", "review": "", "metareview": "", "pdf_size": 153043, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4419628474255803138&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Cognitive Science, University of California San Diego; Microsoft; Institute for Neural Computation, University of California San Diego; Institute for Neural Computation, University of California San Diego", "aff_domain": "cogsci.ucsd.edu;microsoft.com;sccn.ucsd.edu;mplab.ucsd.edu", "email": "cogsci.ucsd.edu;microsoft.com;sccn.ucsd.edu;mplab.ucsd.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "University of California, San Diego;Microsoft", "aff_unique_dep": "Department of Cognitive Science;Microsoft Corporation", "aff_unique_url": "https://ucsd.edu;https://www.microsoft.com", "aff_unique_abbr": "UCSD;Microsoft", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "San Diego;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "a780edb935", "title": "Kernel Methods for Implicit Surface Modeling", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/64a08e5f1e6c39faeb90108c430eb120-Abstract.html", "author": "Joachim Giesen; Simon Spalinger; Bernhard Sch\u00f6lkopf", "abstract": "We describe methods for computing an implicit model of a hypersurface that is given only by a finite sampling. The methods work by mapping the sample points into a reproducing kernel Hilbert space and then deter- mining regions in terms of hyperplanes.", "bibtex": "@inproceedings{NIPS2004_64a08e5f,\n author = {Giesen, Joachim and Spalinger, Simon and Sch\\\"{o}lkopf, Bernhard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Kernel Methods for Implicit Surface Modeling},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/64a08e5f1e6c39faeb90108c430eb120-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/64a08e5f1e6c39faeb90108c430eb120-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/64a08e5f1e6c39faeb90108c430eb120-Metadata.json", "review": "", "metareview": "", "pdf_size": 174591, "gs_citation": 89, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2177986613903272319&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Max Planck Institute for Biological Cybernetics, 72076 T\u00fcbingen, Germany; Department of Computer Science, ETH Z\u00fcrich, Switzerland; Department of Computer Science, ETH Z\u00fcrich, Switzerland", "aff_domain": "tuebingen.mpg.de;inf.ethz.ch;inf.ethz.ch", "email": "tuebingen.mpg.de;inf.ethz.ch;inf.ethz.ch", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics;ETH Zurich", "aff_unique_dep": ";Department of Computer Science", "aff_unique_url": "https://www.biocybernetics.mpg.de;https://www.ethz.ch", "aff_unique_abbr": "MPIBC;ETHZ", "aff_campus_unique_index": "0", "aff_campus_unique": "T\u00fcbingen;", "aff_country_unique_index": "0;1;1", "aff_country_unique": "Germany;Switzerland" }, { "id": "9d13453dd3", "title": "Kernel Projection Machine: a New Tool for Pattern Recognition", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/17b3c7061788dbe82de5abe9f6fe22b3-Abstract.html", "author": "Laurent Zwald; Gilles Blanchard; Pascal Massart; R\u00e9gis Vert", "abstract": "This paper investigates the effect of Kernel Principal Component Analy- sis (KPCA) within the classification framework, essentially the regular- ization properties of this dimensionality reduction method. KPCA has been previously used as a pre-processing step before applying an SVM but we point out that this method is somewhat redundant from a reg- ularization point of view and we propose a new algorithm called Ker- nel Projection Machine to avoid this redundancy, based on an analogy with the statistical framework of regression for a Gaussian white noise model. Preliminary experimental results show that this algorithm reaches the same performances as an SVM.", "bibtex": "@inproceedings{NIPS2004_17b3c706,\n author = {Zwald, Laurent and Blanchard, Gilles and Massart, Pascal and Vert, R\\'{e}gis},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Kernel Projection Machine: a New Tool for Pattern Recognition},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/17b3c7061788dbe82de5abe9f6fe22b3-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/17b3c7061788dbe82de5abe9f6fe22b3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/17b3c7061788dbe82de5abe9f6fe22b3-Metadata.json", "review": "", "metareview": "", "pdf_size": 79830, "gs_citation": 49, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3846167966768308799&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Fraunhofer First (IDA), K\u00b4ekul\u00b4estr. 7, D-12489 Berlin, Germany; D\u00b4epartement de Math \u00b4ematiques, Universit \u00b4e Paris-Sud, Bat. 425, F-91405 Orsay, France; LRI, Universit \u00b4e Paris-Sud, Bat. 490, F-91405 Orsay, France + Masagroup, 24 Bd de l\u2019Hopital, F-75005 Paris, France; D\u00b4epartement de Math \u00b4ematiques, Universit \u00b4e Paris-Sud, Bat. 425, F-91405 Orsay, France", "aff_domain": "first.fhg.de;math.u-psud.fr;lri.fr;math.u-psud.fr", "email": "first.fhg.de;math.u-psud.fr;lri.fr;math.u-psud.fr", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2+3;1", "aff_unique_norm": "Fraunhofer Institute for Digital Data Space (IDA);Universit u00e9 Paris-Sud;Universit\u00e9 Paris-Sud;Masagroup", "aff_unique_dep": ";D u00e9partement de Math u00e9matiques;LRI;", "aff_unique_url": "https://www.iza.berlin/;https://www.universite-paris-sud.fr;https://www.universite-paris-sud.fr;", "aff_unique_abbr": "Fraunhofer IDA;UPS;UPS;", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Orsay", "aff_country_unique_index": "0;1;1+1;1", "aff_country_unique": "Germany;France" }, { "id": "371c046f55", "title": "Kernels for Multi--task Learning", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/c4f796afbc6267501964b46427b3f6ba-Abstract.html", "author": "Charles A. Micchelli; Massimiliano Pontil", "abstract": "This paper provides a foundation for multitask learning using reproducing ker- nel Hilbert spaces of vectorvalued functions. In this setting, the kernel is a matrixvalued function. Some explicit examples will be described which go be- yond our earlier results in [7]. In particular, we characterize classes of matrix valued kernels which are linear and are of the dot product or the translation invari- ant type. We discuss how these kernels can be used to model relations between the tasks and present linear multitask learning algorithms. Finally, we present a novel proof of the representer theorem for a minimizer of a regularization func- tional which is based on the notion of minimal norm interpolation.", "bibtex": "@inproceedings{NIPS2004_c4f796af,\n author = {Micchelli, Charles and Pontil, Massimiliano},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Kernels for Multi--task Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/c4f796afbc6267501964b46427b3f6ba-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/c4f796afbc6267501964b46427b3f6ba-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/c4f796afbc6267501964b46427b3f6ba-Metadata.json", "review": "", "metareview": "", "pdf_size": 80883, "gs_citation": 212, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11013483784093798200&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "Department of Mathematics and Statistics, State University of New York, The University at Albany; Department of Computer Sciences, University College London", "aff_domain": "; ", "email": "; ", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "State University of New York at Albany;University College London", "aff_unique_dep": "Department of Mathematics and Statistics;Department of Computer Sciences", "aff_unique_url": "https://www.albany.edu;https://www.ucl.ac.uk", "aff_unique_abbr": "SUNY Albany;UCL", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Albany;London", "aff_country_unique_index": "0;1", "aff_country_unique": "United States;United Kingdom" }, { "id": "484194c73b", "title": "Large-Scale Prediction of Disulphide Bond Connectivity", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/53c6de78244e9f528eb3e1cda69699bb-Abstract.html", "author": "Jianlin Cheng; Alessandro Vullo; Pierre F. Baldi", "abstract": "The formation of disulphide bridges among cysteines is an important fea- ture of protein structures. Here we develop new methods for the predic- tion of disulphide bond connectivity. We first build a large curated data set of proteins containing disulphide bridges and then use 2-Dimensional Recursive Neural Networks to predict bonding probabilities between cys- teine pairs. These probabilities in turn lead to a weighted graph matching problem that can be addressed efficiently. We show how the method con- sistently achieves better results than previous approaches on the same validation data. In addition, the method can easily cope with chains with arbitrary numbers of bonded cysteines. Therefore, it overcomes one of the major limitations of previous approaches restricting predictions to chains containing no more than 10 oxidized cysteines. The method can be applied both to situations where the bonded state of each cysteine is known or unknown, in which case bonded state can be predicted with 85% precision and 90% recall. The method also yields an estimate for the total number of disulphide bridges in each chain.", "bibtex": "@inproceedings{NIPS2004_53c6de78,\n author = {Cheng, Jianlin and Vullo, Alessandro and Baldi, Pierre},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Large-Scale Prediction of Disulphide Bond Connectivity},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/53c6de78244e9f528eb3e1cda69699bb-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/53c6de78244e9f528eb3e1cda69699bb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/53c6de78244e9f528eb3e1cda69699bb-Metadata.json", "review": "", "metareview": "", "pdf_size": 93972, "gs_citation": 68, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1822290458593175054&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "School of Information and Computer Science, University of California, Irvine; School of Information and Computer Science, University of California, Irvine; Computer Science Department, University College Dublin", "aff_domain": "ics.uci.edu;ics.uci.edu;ucd.ie", "email": "ics.uci.edu;ics.uci.edu;ucd.ie", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of California, Irvine;University College Dublin", "aff_unique_dep": "School of Information and Computer Science;Computer Science Department", "aff_unique_url": "https://www.uci.edu;https://www.ucd.ie", "aff_unique_abbr": "UCI;UCD", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Irvine;", "aff_country_unique_index": "0;0;1", "aff_country_unique": "United States;Ireland" }, { "id": "89b7ba208f", "title": "Learning Efficient Auditory Codes Using Spikes Predicts Cochlear Filters", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/8bd39eae38511daad6152e84545e504d-Abstract.html", "author": "Evan C. Smith; Michael S. Lewicki", "abstract": "The representation of acoustic signals at the cochlear nerve must serve a wide range of auditory tasks that require exquisite sensitivity in both time and frequency. Lewicki (2002) demonstrated that many of the filtering properties of the cochlea could be explained in terms of efficient coding of natural sounds. This model, however, did not account for properties such as phase-locking or how sound could be encoded in terms of action potentials. Here, we extend this theoretical approach with algorithm for learning efficient auditory codes using a spiking population code. Here, we propose an algorithm for learning efficient auditory codes using a theoretical model for coding sound in terms of spikes. In this model, each spike encodes the precise time position and magnitude of a local- ized, time varying kernel function. By adapting the kernel functions to the statistics natural sounds, we show that, compared to conventional signal representations, the spike code achieves far greater coding effi- ciency. Furthermore, the inferred kernels show both striking similarities to measured cochlear filters and a similar bandwidth versus frequency dependence.", "bibtex": "@inproceedings{NIPS2004_8bd39eae,\n author = {Smith, Evan and Lewicki, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Efficient Auditory Codes Using Spikes Predicts Cochlear Filters},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/8bd39eae38511daad6152e84545e504d-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/8bd39eae38511daad6152e84545e504d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/8bd39eae38511daad6152e84545e504d-Metadata.json", "review": "", "metareview": "", "pdf_size": 249605, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14114083134105247814&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Departments of Psychology1& Computer Science2; Departments of Psychology1& Computer Science2", "aff_domain": "cnbc.cmu.edu;cnbc.cmu.edu", "email": "cnbc.cmu.edu;cnbc.cmu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Departments of Psychology and Computer Science", "aff_unique_dep": "Psychology, Computer Science", "aff_unique_url": "", "aff_unique_abbr": "", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "", "aff_country_unique": "" }, { "id": "58908f6a85", "title": "Learning Gaussian Process Kernels via Hierarchical Bayes", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/453fadbd8a1a3af50a9df4df899537b5-Abstract.html", "author": "Anton Schwaighofer; Volker Tresp; Kai Yu", "abstract": "We present a novel method for learning with Gaussian process regres- sion in a hierarchical Bayesian framework. In a first step, kernel matri- ces on a fixed set of input points are learned from data using a simple and efficient EM algorithm. This step is nonparametric, in that it does not require a parametric form of covariance function. In a second step, kernel functions are fitted to approximate the learned covariance matrix using a generalized Nystrom method, which results in a complex, data driven kernel. We evaluate our approach as a recommendation engine for art images, where the proposed hierarchical Bayesian method leads to excellent prediction performance.", "bibtex": "@inproceedings{NIPS2004_453fadbd,\n author = {Schwaighofer, Anton and Tresp, Volker and Yu, Kai},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Gaussian Process Kernels via Hierarchical Bayes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/453fadbd8a1a3af50a9df4df899537b5-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/453fadbd8a1a3af50a9df4df899537b5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/453fadbd8a1a3af50a9df4df899537b5-Metadata.json", "review": "", "metareview": "", "pdf_size": 140390, "gs_citation": 237, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=910447207371028615&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "Fraunhofer FIRST Intelligent Data Analysis (IDA) Kekul\u00b4estrasse 7, 12489 Berlin; Siemens Corporate Technology Information and Communications 81730 Munich, Germany; Siemens Corporate Technology Information and Communications 81730 Munich, Germany", "aff_domain": "first.fhg.de;siemens.com;siemens.com", "email": "first.fhg.de;siemens.com;siemens.com", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "Fraunhofer Institute for Software and Systems Engineering;Siemens AG", "aff_unique_dep": "Intelligent Data Analysis (IDA);Corporate Technology Information and Communications", "aff_unique_url": "https://www.first.fraunhofer.de/;https://www.siemens.com", "aff_unique_abbr": "Fraunhofer FIRST;Siemens", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Munich", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany" }, { "id": "2effccab12", "title": "Learning Hyper-Features for Visual Identification", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/1d49780520898fe37f0cd6b41c5311bf-Abstract.html", "author": "Andras D. Ferencz; Erik G. Learned-miller; Jitendra Malik", "abstract": "We address the problem of identifying specific instances of a class (cars) from a set of images all belonging to that class. Although we cannot build a model for any particular instance (as we may be provided with only one \"training\" example of it), we can use information extracted from observ- ing other members of the class. We pose this task as a learning problem, in which the learner is given image pairs, labeled as matching or not, and must discover which image features are most consistent for matching in- stances and discriminative for mismatches. We explore a patch based representation, where we model the distributions of similarity measure- ments defined on the patches. Finally, we describe an algorithm that selects the most salient patches based on a mutual information criterion. This algorithm performs identification well for our challenging dataset of car images, after matching only a few, well chosen patches.", "bibtex": "@inproceedings{NIPS2004_1d497805,\n author = {Ferencz, Andras and Learned-miller, Erik and Malik, Jitendra},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Hyper-Features for Visual Identification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/1d49780520898fe37f0cd6b41c5311bf-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/1d49780520898fe37f0cd6b41c5311bf-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/1d49780520898fe37f0cd6b41c5311bf-Metadata.json", "review": "", "metareview": "", "pdf_size": 241375, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8513902080207356612&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "b44fded17a", "title": "Learning Preferences for Multiclass Problems", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/5b168fdba5ee5ea262cc2d4c0b457697-Abstract.html", "author": "Fabio Aiolli; Alessandro Sperduti", "abstract": "Many interesting multiclass problems can be cast in the general frame- work of label ranking defined on a given set of classes. The evaluation for such a ranking is generally given in terms of the number of violated order constraints between classes. In this paper, we propose the Prefer- ence Learning Model as a unifying framework to model and solve a large class of multiclass problems in a large margin perspective. In addition, an original kernel-based method is proposed and evaluated on a ranking dataset with state-of-the-art results.", "bibtex": "@inproceedings{NIPS2004_5b168fdb,\n author = {Aiolli, Fabio and Sperduti, Alessandro},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Preferences for Multiclass Problems},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/5b168fdba5ee5ea262cc2d4c0b457697-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/5b168fdba5ee5ea262cc2d4c0b457697-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/5b168fdba5ee5ea262cc2d4c0b457697-Metadata.json", "review": "", "metareview": "", "pdf_size": 90833, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8003365103382261096&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Dept. of Computer Science, University of Pisa, Italy; Dept. of Pure and Applied Mathematics, University of Padova, Italy", "aff_domain": "di.unipi.it;math.unipd.it", "email": "di.unipi.it;math.unipd.it", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Pisa;University of Padova", "aff_unique_dep": "Dept. of Computer Science;Dept. of Pure and Applied Mathematics", "aff_unique_url": "https://www.unipi.it;https://www.unipd.it", "aff_unique_abbr": "UNIP;UP", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Italy" }, { "id": "eeb785d982", "title": "Learning Syntactic Patterns for Automatic Hypernym Discovery", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/358aee4cc897452c00244351e4d91f69-Abstract.html", "author": "Rion Snow; Daniel Jurafsky; Andrew Y. Ng", "abstract": "Semantic taxonomies such as WordNet provide a rich source of knowl-\n edge for natural language processing applications, but are expensive to\n build, maintain, and extend. Motivated by the problem of automatically\n constructing and extending such taxonomies, in this paper we present a\n new algorithm for automatically learning hypernym (is-a) relations from\n text. Our method generalizes earlier work that had relied on using small\n numbers of hand-crafted regular expression patterns to identify hyper-\n nym pairs. Using \"dependency path\" features extracted from parse trees,\n we introduce a general-purpose formalization and generalization of these\n patterns. Given a training set of text containing known hypernym pairs,\n our algorithm automatically extracts useful dependency paths and applies\n them to new corpora to identify novel pairs. On our evaluation task (de-\n termining whether two nouns in a news article participate in a hypernym\n relationship), our automatically extracted database of hypernyms attains\n both higher precision and higher recall than WordNet.", "bibtex": "@inproceedings{NIPS2004_358aee4c,\n author = {Snow, Rion and Jurafsky, Daniel and Ng, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Syntactic Patterns for Automatic Hypernym Discovery},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/358aee4cc897452c00244351e4d91f69-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/358aee4cc897452c00244351e4d91f69-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/358aee4cc897452c00244351e4d91f69-Metadata.json", "review": "", "metareview": "", "pdf_size": 187141, "gs_citation": 1054, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15961022435297501470&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 29, "aff": "Computer Science Department, Stanford University; Linguistics Department, Stanford University; Computer Science Department, Stanford University", "aff_domain": "cs.stanford.edu;stanford.edu;cs.stanford.edu", "email": "cs.stanford.edu;stanford.edu;cs.stanford.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "1d4c816297", "title": "Learning first-order Markov models for control", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/02f657d55eaf1c4840ce8d66fcdaf90c-Abstract.html", "author": "Pieter Abbeel; Andrew Y. Ng", "abstract": "First-order Markov models have been successfully applied to many prob- lems, for example in modeling sequential data using Markov chains, and modeling control problems using the Markov decision processes (MDP) formalism. If a \ufb01rst-order Markov model\u2019s parameters are estimated from data, the standard maximum likelihood estimator considers only the \ufb01rst-order (single-step) transitions. But for many problems, the \ufb01rst- order conditional independence assumptions are not satis\ufb01ed, and as a re- sult the higher order transition probabilities may be poorly approximated. Motivated by the problem of learning an MDP\u2019s parameters for control, we propose an algorithm for learning a \ufb01rst-order Markov model that ex- plicitly takes into account higher order interactions during training. Our algorithm uses an optimization criterion different from maximum likeli- hood, and allows us to learn models that capture longer range effects, but without giving up the bene\ufb01ts of using \ufb01rst-order Markov models. Our experimental results also show the new algorithm outperforming conven- tional maximum likelihood estimation in a number of control problems where the MDP\u2019s parameters are estimated from data.", "bibtex": "@inproceedings{NIPS2004_02f657d5,\n author = {Abbeel, Pieter and Ng, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Learning first-order Markov models for control},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/02f657d55eaf1c4840ce8d66fcdaf90c-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/02f657d55eaf1c4840ce8d66fcdaf90c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/02f657d55eaf1c4840ce8d66fcdaf90c-Metadata.json", "review": "", "metareview": "", "pdf_size": 91345, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5557612827646246751&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Computer Science Department, Stanford University, Stanford, CA 94305; Computer Science Department, Stanford University, Stanford, CA 94305", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "458afc4372", "title": "Learning, Regularization and Ill-Posed Inverse Problems", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/33267e5dc58fad346e92471c43fcccdc-Abstract.html", "author": "Lorenzo Rosasco; Andrea Caponnetto; Ernesto D. Vito; Francesca Odone; Umberto D. Giovannini", "abstract": "Many works have shown that strong connections relate learning from ex- amples to regularization techniques for ill-posed inverse problems. Nev- ertheless by now there was no formal evidence neither that learning from examples could be seen as an inverse problem nor that theoretical results in learning theory could be independently derived using tools from reg- ularization theory. In this paper we provide a positive answer to both questions. Indeed, considering the square loss, we translate the learning problem in the language of regularization theory and show that consis- tency results and optimal regularization parameter choice can be derived by the discretization of the corresponding inverse problem.", "bibtex": "@inproceedings{NIPS2004_33267e5d,\n author = {Rosasco, Lorenzo and Caponnetto, Andrea and Vito, Ernesto and Odone, Francesca and Giovannini, Umberto},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Learning, Regularization and Ill-Posed Inverse Problems},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/33267e5dc58fad346e92471c43fcccdc-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/33267e5dc58fad346e92471c43fcccdc-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/33267e5dc58fad346e92471c43fcccdc-Metadata.json", "review": "", "metareview": "", "pdf_size": 83615, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15217319064400072288&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "DISI, Universit `a di Genova; DISI, Universit `a di Genova; Dipartimento di Matematica, Universit `a di Modena + INFN, Sezione di Genova; DISI, Universit `a di Genova; DISI, Universit `a di Genova", "aff_domain": "disi.unige.it;disi.unige.it;unimo.it;fastwebnet.it;disi.unige.it", "email": "disi.unige.it;disi.unige.it;unimo.it;fastwebnet.it;disi.unige.it", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1+2;0;0", "aff_unique_norm": "Universit\u00e0 di Genova;Universit\u00e0 di Modena;Istituto Nazionale di Fisica Nucleare", "aff_unique_dep": "DISI;Dipartimento di Matematica;Sezione di Genova", "aff_unique_url": "https://www.unige.it;https://www.unimore.it;https://www.infn.it", "aff_unique_abbr": ";;INFN", "aff_campus_unique_index": "1", "aff_campus_unique": ";Genova", "aff_country_unique_index": "0;0;0+0;0;0", "aff_country_unique": "Italy" }, { "id": "d2be1c3ddd", "title": "Limits of Spectral Clustering", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/d790c9e6c0b5e02c87b375e782ac01bc-Abstract.html", "author": "Ulrike V. Luxburg; Olivier Bousquet; Mikhail Belkin", "abstract": "An important aspect of clustering algorithms is whether the partitions constructed on finite samples converge to a useful clustering of the whole data space as the sample size increases. This paper investigates this question for normalized and unnormalized versions of the popular spec- tral clustering algorithm. Surprisingly, the convergence of unnormalized spectral clustering is more difficult to handle than the normalized case. Even though recently some first results on the convergence of normal- ized spectral clustering have been obtained, for the unnormalized case we have to develop a completely new approach combining tools from numerical integration, spectral and perturbation theory, and probability. It turns out that while in the normalized case, spectral clustering usually converges to a nice partition of the data space, in the unnormalized case the same only holds under strong additional assumptions which are not always satisfied. We conclude that our analysis gives strong evidence for the superiority of normalized spectral clustering. It also provides a basis for future exploration of other Laplacian-based methods.", "bibtex": "@inproceedings{NIPS2004_d790c9e6,\n author = {Luxburg, Ulrike and Bousquet, Olivier and Belkin, Mikhail},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Limits of Spectral Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/d790c9e6c0b5e02c87b375e782ac01bc-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/d790c9e6c0b5e02c87b375e782ac01bc-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/d790c9e6c0b5e02c87b375e782ac01bc-Metadata.json", "review": "", "metareview": "", "pdf_size": 69033, "gs_citation": 162, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5662428200772281331&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics; The University of Chicago, Department of Computer Science", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de;cs.uchicago.edu", "email": "tuebingen.mpg.de;tuebingen.mpg.de;cs.uchicago.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics;University of Chicago", "aff_unique_dep": "Biological Cybernetics;Department of Computer Science", "aff_unique_url": "https://www.biocybernetics.mpg.de;https://www.uchicago.edu", "aff_unique_abbr": "MPIBC;UChicago", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1", "aff_country_unique": "Germany;United States" }, { "id": "647512f690", "title": "Linear Multilayer Independent Component Analysis for Large Natural Scenes", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/dbd22ba3bd0df8f385bdac3e9f8be207-Abstract.html", "author": "Yoshitatsu Matsuda; Kazunori Yamaguchi", "abstract": "In this paper, linear multilayer ICA (LMICA) is proposed for extracting independent components from quite high-dimensional observed signals such as large-size natural scenes. There are two phases in each layer of LMICA. One is the mapping phase, where a one-dimensional mapping is formed by a stochastic gradient algorithm which makes more highly- correlated (non-independent) signals be nearer incrementally. Another is the local-ICA phase, where each neighbor (namely, highly-correlated) pair of signals in the mapping is separated by the MaxKurt algorithm. Because LMICA separates only the highly-correlated pairs instead of all ones, it can extract independent components quite efficiently from ap- propriate observed signals. In addition, it is proved that LMICA always converges. Some numerical experiments verify that LMICA is quite ef- ficient and effective in large-size natural image processing.", "bibtex": "@inproceedings{NIPS2004_dbd22ba3,\n author = {Matsuda, Yoshitatsu and Yamaguchi, Kazunori},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Linear Multilayer Independent Component Analysis for Large Natural Scenes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/dbd22ba3bd0df8f385bdac3e9f8be207-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/dbd22ba3bd0df8f385bdac3e9f8be207-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/dbd22ba3bd0df8f385bdac3e9f8be207-Metadata.json", "review": "", "metareview": "", "pdf_size": 177288, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3672577271430061662&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of General Systems Studies, Graduate School of Arts and Sciences, The University of Tokyo; Department of General Systems Studies, Graduate School of Arts and Sciences, The University of Tokyo", "aff_domain": "graco.c.u-tokyo.ac.jp;graco.c.u-tokyo.ac.jp", "email": "graco.c.u-tokyo.ac.jp;graco.c.u-tokyo.ac.jp", "github": "", "project": "http://www.graco.c.u-tokyo.ac.jp/~matsuda", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Tokyo", "aff_unique_dep": "Department of General Systems Studies, Graduate School of Arts and Sciences", "aff_unique_url": "https://www.u-tokyo.ac.jp", "aff_unique_abbr": "UTokyo", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Japan" }, { "id": "e440282e82", "title": "Log-concavity Results on Gaussian Process Methods for Supervised and Unsupervised Learning", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/7876acb66640bad41f1e1371ef30c180-Abstract.html", "author": "Liam Paninski", "abstract": "Log-concavity is an important property in the context of optimization, Laplace approximation, and sampling; Bayesian methods based on Gaus- sian process priors have become quite popular recently for classification, regression, density estimation, and point process intensity estimation. Here we prove that the predictive densities corresponding to each of these applications are log-concave, given any observed data. We also prove that the likelihood is log-concave in the hyperparameters controlling the mean function of the Gaussian prior in the density and point process in- tensity estimation cases, and the mean, covariance, and observation noise parameters in the classification and regression cases; this result leads to a useful parameterization of these hyperparameters, indicating a suitably large class of priors for which the corresponding maximum a posteriori problem is log-concave.", "bibtex": "@inproceedings{NIPS2004_7876acb6,\n author = {Paninski, Liam},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Log-concavity Results on Gaussian Process Methods for Supervised and Unsupervised Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/7876acb66640bad41f1e1371ef30c180-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/7876acb66640bad41f1e1371ef30c180-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/7876acb66640bad41f1e1371ef30c180-Metadata.json", "review": "", "metareview": "", "pdf_size": 62180, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17132801432458220639&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "", "aff_domain": "", "email": "", "github": "", "project": "http://www.gatsby.ucl.ac.uk/~liam", "author_num": 1, "track": "main", "status": "Poster" }, { "id": "fffa53fb95", "title": "Machine Learning Applied to Perception: Decision Images for Gender Classification", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/1b113258af3968aaf3969ca67e744ff8-Abstract.html", "author": "Felix A. Wichmann; Arnulf B. Graf; Heinrich H. B\u00fclthoff; Eero P. Simoncelli; Bernhard Sch\u00f6lkopf", "abstract": "We study gender discrimination of human faces using a combination of psychophysical classi\ufb01cation and discrimination experiments together with methods from machine learning. We reduce the dimensionality of a set of face images using principal component analysis, and then train a set of linear classi\ufb01ers on this reduced representation (linear support vec- tor machines (SVMs), relevance vector machines (RVMs), Fisher linear discriminant (FLD), and prototype (prot) classi\ufb01ers) using human clas- si\ufb01cation data. Because we combine a linear preprocessor with linear classi\ufb01ers, the entire system acts as a linear classi\ufb01er, allowing us to visu- alise the decision-image corresponding to the normal vector of the separ- ating hyperplanes (SH) of each classi\ufb01er. We predict that the female-to- maleness transition along the normal vector for classi\ufb01ers closely mim- icking human classi\ufb01cation (SVM and RVM [1]) should be faster than the transition along any other direction. A psychophysical discrimina- tion experiment using the decision images as stimuli is consistent with this prediction.", "bibtex": "@inproceedings{NIPS2004_1b113258,\n author = {Wichmann, Felix A. and Graf, Arnulf and B\\\"{u}lthoff, Heinrich and Simoncelli, Eero and Sch\\\"{o}lkopf, Bernhard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Machine Learning Applied to Perception: Decision Images for Gender Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/1b113258af3968aaf3969ca67e744ff8-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/1b113258af3968aaf3969ca67e744ff8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/1b113258af3968aaf3969ca67e744ff8-Metadata.json", "review": "", "metareview": "", "pdf_size": 122727, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6411966707966626262&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Max Planck Institute for Biological Cybernetics, T\u00fcbingen, Germany; Max Planck Institute for Biological Cybernetics, T\u00fcbingen, Germany; Howard Hughes Medical Institute, Center for Neural Science, New York University, USA; Max Planck Institute for Biological Cybernetics, T\u00fcbingen, Germany; Max Planck Institute for Biological Cybernetics, T\u00fcbingen, Germany", "aff_domain": "tuebingen.mpg.de; ;nyu.edu; ; ", "email": "tuebingen.mpg.de; ;nyu.edu; ; ", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;0;0", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics;New York University", "aff_unique_dep": ";Center for Neural Science", "aff_unique_url": "https://www.biocybernetics.mpg.de;https://www.nyu.edu", "aff_unique_abbr": "MPIBC;NYU", "aff_campus_unique_index": "0;0;1;0;0", "aff_campus_unique": "T\u00fcbingen;New York", "aff_country_unique_index": "0;0;1;0;0", "aff_country_unique": "Germany;United States" }, { "id": "8faa6e54df", "title": "Making Latin Manuscripts Searchable using gHMM's", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/2e74c2cf88f68a68c84e9509abc7ea56-Abstract.html", "author": "Jaety Edwards; Yee W. Teh; Roger Bock; Michael Maire; Grace Vesom; David A. Forsyth", "abstract": "We describe a method that can make a scanned, handwritten mediaeval latin manuscript accessible to full text search. A generalized HMM is fitted, using transcribed latin to obtain a transition model and one exam- ple each of 22 letters to obtain an emission model. We show results for unigram, bigram and trigram models. Our method transcribes 25 pages of a manuscript of Terence with fair accuracy (75% of letters correctly transcribed). Search results are very strong; we use examples of vari- ant spellings to demonstrate that the search respects the ink of the doc- ument. Furthermore, our model produces fair searches on a document from which we obtained no training data.", "bibtex": "@inproceedings{NIPS2004_2e74c2cf,\n author = {Edwards, Jaety and Teh, Yee and Bock, Roger and Maire, Michael and Vesom, Grace and Forsyth, David},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Making Latin Manuscripts Searchable using gHMM\\textquotesingle s},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/2e74c2cf88f68a68c84e9509abc7ea56-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/2e74c2cf88f68a68c84e9509abc7ea56-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/2e74c2cf88f68a68c84e9509abc7ea56-Metadata.json", "review": "", "metareview": "", "pdf_size": 171967, "gs_citation": 94, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2470045249075545064&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "Department of Computer Science, UC Berkeley, Berkeley, CA 94720; Department of Computer Science, UC Berkeley, Berkeley, CA 94720; Department of Computer Science, UC Berkeley, Berkeley, CA 94720; Department of Computer Science, UC Berkeley, Berkeley, CA 94720; Department of Computer Science, UC Berkeley, Berkeley, CA 94720; Department of Computer Science, UC Berkeley, Berkeley, CA 94720", "aff_domain": "cs.berkeley.edu;cs.berkeley.edu;cs.berkeley.edu;cs.berkeley.edu;cs.berkeley.edu;cs.berkeley.edu", "email": "cs.berkeley.edu;cs.berkeley.edu;cs.berkeley.edu;cs.berkeley.edu;cs.berkeley.edu;cs.berkeley.edu", "github": "", "project": "", "author_num": 6, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "id": "00faaf306d", "title": "Markov Networks for Detecting Overalpping Elements in Sequence Data", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/92a0e7a415d64ebafcb16a8ca817cde4-Abstract.html", "author": "Mark Craven; Joseph Bockhorst", "abstract": "Many sequential prediction tasks involve locating instances of pat- terns in sequences. Generative probabilistic language models, such as hidden Markov models (HMMs), have been successfully applied to many of these tasks. A limitation of these models however, is that they cannot naturally handle cases in which pattern instances overlap in arbitrary ways. We present an alternative approach, based on conditional Markov networks, that can naturally repre- sent arbitrarily overlapping elements. We show how to efficiently train and perform inference with these models. Experimental re- sults from a genomics domain show that our models are more accu- rate at locating instances of overlapping patterns than are baseline models based on HMMs.", "bibtex": "@inproceedings{NIPS2004_92a0e7a4,\n author = {Craven, Mark and Bockhorst, Joseph},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Markov Networks for Detecting Overalpping Elements in Sequence Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/92a0e7a415d64ebafcb16a8ca817cde4-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/92a0e7a415d64ebafcb16a8ca817cde4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/92a0e7a415d64ebafcb16a8ca817cde4-Metadata.json", "review": "", "metareview": "", "pdf_size": 133266, "gs_citation": 80, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10153109029594630977&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Dept. of Computer Sciences, University of Wisconsin; Dept. of Biostatistics and Medical Informatics, University of Wisconsin", "aff_domain": "cs.wisc.edu;biostat.wisc.edu", "email": "cs.wisc.edu;biostat.wisc.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Wisconsin", "aff_unique_dep": "Dept. of Computer Sciences", "aff_unique_url": "https://www.wisc.edu", "aff_unique_abbr": "UW", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "b2b5a97368", "title": "Mass Meta-analysis in Talairach Space", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/5df07ecf4cea616e3eb384a9be3511bb-Abstract.html", "author": "Finn \\. Nielsen", "abstract": "We provide a method for mass meta-analysis in a neuroinformatics database containing stereotaxic Talairach coordinates from neu- roimaging experiments. Database labels are used to group the in- dividual experiments, e.g., according to cognitive function, and the consistent pattern of the experiments within the groups are de- termined. The method voxelizes each group of experiments via a kernel density estimation, forming probability density volumes. The values in the probability density volumes are compared to null-hypothesis distributions generated by resamplings from the entire unlabeled set of experiments, and the distances to the null- hypotheses are used to sort the voxels across groups of experi- ments. This allows for mass meta-analysis, with the construction of a list with the most prominent associations between brain ar- eas and group labels. Furthermore, the method can be used for functional labeling of voxels.", "bibtex": "@inproceedings{NIPS2004_5df07ecf,\n author = {Nielsen, Finn},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Mass Meta-analysis in Talairach Space},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/5df07ecf4cea616e3eb384a9be3511bb-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/5df07ecf4cea616e3eb384a9be3511bb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/5df07ecf4cea616e3eb384a9be3511bb-Metadata.json", "review": "", "metareview": "", "pdf_size": 156921, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10716912640571637519&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 19, "aff": "", "aff_domain": "", "email": "", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster" }, { "id": "d32eeafe15", "title": "Matrix Exponential Gradient Updates for On-line Learning and Bregman Projection", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/bd70364a8fcba02366697df66f50b4d4-Abstract.html", "author": "Koji Tsuda; Gunnar R\u00e4tsch; Manfred K. Warmuth", "abstract": "We address the problem of learning a symmetric positive definite matrix. The central issue is to design parameter updates that preserve positive definiteness. Our updates are motivated with the von Neumann diver- gence. Rather than treating the most general case, we focus on two key applications that exemplify our methods: On-line learning with a simple square loss and finding a symmetric positive definite matrix subject to symmetric linear constraints. The updates generalize the Exponentiated Gradient (EG) update and AdaBoost, respectively: the parameter is now a symmetric positive definite matrix of trace one instead of a probability vector (which in this context is a diagonal positive definite matrix with trace one). The generalized updates use matrix logarithms and exponen- tials to preserve positive definiteness. Most importantly, we show how the analysis of each algorithm generalizes to the non-diagonal case. We apply both new algorithms, called the Matrix Exponentiated Gradient (MEG) update and DefiniteBoost, to learn a kernel matrix from distance measurements.", "bibtex": "@inproceedings{NIPS2004_bd70364a,\n author = {Tsuda, Koji and R\\\"{a}tsch, Gunnar and Warmuth, Manfred K. K},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Matrix Exponential Gradient Updates for On-line Learning and Bregman Projection},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/bd70364a8fcba02366697df66f50b4d4-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/bd70364a8fcba02366697df66f50b4d4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/bd70364a8fcba02366697df66f50b4d4-Metadata.json", "review": "", "metareview": "", "pdf_size": 97662, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6963283717456344978&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Max Planck Institute for Biological Cybernetics; AIST CBRC + Fraunhofer FIRST; University of California at Santa Cruz", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de;cse.ucsc.edu", "email": "tuebingen.mpg.de;tuebingen.mpg.de;cse.ucsc.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1+2;3", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics;AIST;Fraunhofer Institute for Software and Systems Engineering;University of California, Santa Cruz", "aff_unique_dep": "Biological Cybernetics;CBRC;;", "aff_unique_url": "https://www.biocybernetics.mpg.de;https://www.aist.go.jp;https://www.first.fraunhofer.de/;https://www.ucsc.edu", "aff_unique_abbr": "MPIBC;AIST;Fraunhofer FIRST;UCSC", "aff_campus_unique_index": ";1", "aff_campus_unique": ";Santa Cruz", "aff_country_unique_index": "0;1+0;2", "aff_country_unique": "Germany;Japan;United States" }, { "id": "c45ac6de9a", "title": "Maximal Margin Labeling for Multi-Topic Text Categorization", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/0bed45bd5774ffddc95ffe500024f628-Abstract.html", "author": "Hideto Kazawa; Tomonori Izumitani; Hirotoshi Taira; Eisaku Maeda", "abstract": "In this paper, we address the problem of statistical learning for multi- topic text categorization (MTC), whose goal is to choose all relevant top- ics (a label) from a given set of topics. The proposed algorithm, Max- imal Margin Labeling (MML), treats all possible labels as independent classes and learns a multi-class classi\ufb01er on the induced multi-class cate- gorization problem. To cope with the data sparseness caused by the huge number of possible labels, MML combines some prior knowledge about label prototypes and a maximal margin criterion in a novel way. Experi- ments with multi-topic Web pages show that MML outperforms existing learning algorithms including Support Vector Machines.", "bibtex": "@inproceedings{NIPS2004_0bed45bd,\n author = {Kazawa, Hideto and Izumitani, Tomonori and Taira, Hirotoshi and Maeda, Eisaku},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Maximal Margin Labeling for Multi-Topic Text Categorization},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/0bed45bd5774ffddc95ffe500024f628-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/0bed45bd5774ffddc95ffe500024f628-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/0bed45bd5774ffddc95ffe500024f628-Metadata.json", "review": "", "metareview": "", "pdf_size": 130847, "gs_citation": 188, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4043474968672118496&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "NTT Communication Science Laboratories; NTT Communication Science Laboratories; NTT Communication Science Laboratories; NTT Communication Science Laboratories", "aff_domain": "cslab.kecl.ntt.co.jp;cslab.kecl.ntt.co.jp;cslab.kecl.ntt.co.jp;cslab.kecl.ntt.co.jp", "email": "cslab.kecl.ntt.co.jp;cslab.kecl.ntt.co.jp;cslab.kecl.ntt.co.jp;cslab.kecl.ntt.co.jp", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "NTT Communication Science Laboratories", "aff_unique_dep": "", "aff_unique_url": "https://www.ntt-csl.com", "aff_unique_abbr": "NTT CSL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Japan" }, { "id": "b645ff05ed", "title": "Maximising Sensitivity in a Spiking Network", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/a8345c3bb9e3896ea538ce77ffaf2c20-Abstract.html", "author": "Anthony J. Bell; Lucas C. Parra", "abstract": "We use unsupervised probabilistic machine learning ideas to try to ex- plain the kinds of learning observed in real neurons, the goal being to connect abstract principles of self-organisation to known biophysi- cal processes. For example, we would like to explain Spike Timing- Dependent Plasticity (see [5,6] and Figure 3A), in terms of information theory. Starting out, we explore the optimisation of a network sensitiv- ity measure related to maximising the mutual information between input spike timings and output spike timings. Our derivations are analogous to those in ICA, except that the sensitivity of output timings to input tim- ings is maximised, rather than the sensitivity of output \u2018\ufb01ring rates\u2019 to inputs. ICA and related approaches have been successful in explaining the learning of many properties of early visual receptive \ufb01elds in rate cod- ing models, and we are hoping for similar gains in understanding of spike coding in networks, and how this is supported, in principled probabilistic ways, by cellular biophysical processes. For now, in our initial simula- tions, we show that our derived rule can learn synaptic weights which can unmix, or demultiplex, mixed spike trains. That is, it can recover inde- pendent point processes embedded in distributed correlated input spike trains, using an adaptive single-layer feedforward spiking network.", "bibtex": "@inproceedings{NIPS2004_a8345c3b,\n author = {Bell, Anthony and Parra, Lucas},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Maximising Sensitivity in a Spiking Network},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/a8345c3bb9e3896ea538ce77ffaf2c20-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/a8345c3bb9e3896ea538ce77ffaf2c20-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/a8345c3bb9e3896ea538ce77ffaf2c20-Metadata.json", "review": "", "metareview": "", "pdf_size": 90493, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1108818257489445401&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Redwood Neuroscience Institute; Biomedical Engineering Department, City College of New York", "aff_domain": "rni.org;ccny.cuny.edu", "email": "rni.org;ccny.cuny.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Redwood Neuroscience Institute;City College of New York", "aff_unique_dep": "Neuroscience;Biomedical Engineering Department", "aff_unique_url": "http://www.redwoodneuro.org/;https://www.ccny.cuny.edu", "aff_unique_abbr": "RNI;CCNY", "aff_campus_unique_index": "1", "aff_campus_unique": ";New York", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "5d29606512", "title": "Maximum Likelihood Estimation of Intrinsic Dimension", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/74934548253bcab8490ebd74afed7031-Abstract.html", "author": "Elizaveta Levina; Peter J. Bickel", "abstract": "We propose a new method for estimating intrinsic dimension of a dataset derived by applying the principle of maximum likelihood to the distances between close neighbors. We derive the estimator by a Poisson process approximation, assess its bias and variance theo- retically and by simulations, and apply it to a number of simulated and real datasets. We also show it has the best overall performance compared with two other intrinsic dimension estimators.", "bibtex": "@inproceedings{NIPS2004_74934548,\n author = {Levina, Elizaveta and Bickel, Peter},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Maximum Likelihood Estimation of Intrinsic Dimension},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/74934548253bcab8490ebd74afed7031-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/74934548253bcab8490ebd74afed7031-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/74934548253bcab8490ebd74afed7031-Metadata.json", "review": "", "metareview": "", "pdf_size": 135780, "gs_citation": 1139, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5428801473201484119&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Department of Statistics, University of Michigan; Department of Statistics, University of California", "aff_domain": "umich.edu;stat.berkeley.edu", "email": "umich.edu;stat.berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Michigan;University of California", "aff_unique_dep": "Department of Statistics;Department of Statistics", "aff_unique_url": "https://www.umich.edu;https://www.universityofcalifornia.edu", "aff_unique_abbr": "UM;UC", "aff_campus_unique_index": "0", "aff_campus_unique": "Ann Arbor;", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "db413219ec", "title": "Maximum Margin Clustering", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/6403675579f6114559c90de0014cd3d6-Abstract.html", "author": "Linli Xu; James Neufeld; Bryce Larson; Dale Schuurmans", "abstract": "We propose a new method for clustering based on finding maximum mar- gin hyperplanes through data. By reformulating the problem in terms of the implied equivalence relation matrix, we can pose the problem as a convex integer program. Although this still yields a difficult com- putational problem, the hard-clustering constraints can be relaxed to a soft-clustering formulation which can be feasibly solved with a semidef- inite program. Since our clustering technique only depends on the data through the kernel matrix, we can easily achieve nonlinear clusterings in the same manner as spectral clustering. Experimental results show that our maximum margin clustering technique often obtains more accurate results than conventional clustering methods. The real benefit of our ap- proach, however, is that it leads naturally to a semi-supervised training method for support vector machines. By maximizing the margin simul- taneously on labeled and unlabeled training data, we achieve state of the art performance by using a single, integrated learning principle.", "bibtex": "@inproceedings{NIPS2004_64036755,\n author = {Xu, Linli and Neufeld, James and Larson, Bryce and Schuurmans, Dale},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Maximum Margin Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/6403675579f6114559c90de0014cd3d6-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/6403675579f6114559c90de0014cd3d6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/6403675579f6114559c90de0014cd3d6-Metadata.json", "review": "", "metareview": "", "pdf_size": 119724, "gs_citation": 647, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10889621952489392458&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "55647fcd89", "title": "Maximum-Margin Matrix Factorization", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/e0688d13958a19e087e123148555e4b4-Abstract.html", "author": "Nathan Srebro; Jason Rennie; Tommi S. Jaakkola", "abstract": "We present a novel approach to collaborative prediction, using low-norm instead of low-rank factorizations. The approach is inspired by, and has strong connections to, large-margin linear discrimination. We show how to learn low-norm factorizations by solving a semi-de\ufb01nite program, and discuss generalization error bounds for them.", "bibtex": "@inproceedings{NIPS2004_e0688d13,\n author = {Srebro, Nathan and Rennie, Jason and Jaakkola, Tommi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Maximum-Margin Matrix Factorization},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/e0688d13958a19e087e123148555e4b4-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/e0688d13958a19e087e123148555e4b4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/e0688d13958a19e087e123148555e4b4-Metadata.json", "review": "", "metareview": "", "pdf_size": 139685, "gs_citation": 1442, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17783398024066920055&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 24, "aff": "Dept. of Computer Science, University of Toronto, Toronto, ON, CANADA; Computer Science and Artificial Intelligence Lab, Massachusetts Institute of Technology, Cambridge, MA, USA; Computer Science and Artificial Intelligence Lab, Massachusetts Institute of Technology, Cambridge, MA, USA", "aff_domain": "cs.toronto.edu;csail.mit.edu;csail.mit.edu", "email": "cs.toronto.edu;csail.mit.edu;csail.mit.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "University of Toronto;Massachusetts Institute of Technology", "aff_unique_dep": "Dept. of Computer Science;Computer Science and Artificial Intelligence Lab", "aff_unique_url": "https://www.utoronto.ca;https://www.mit.edu", "aff_unique_abbr": "U of T;MIT", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "Toronto;Cambridge", "aff_country_unique_index": "0;1;1", "aff_country_unique": "Canada;United States" }, { "id": "a29d1c385d", "title": "Message Errors in Belief Propagation", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/9813b270ed0288e7c0388f0fd4ec68f5-Abstract.html", "author": "Alexander T. Ihler; John W. Fisher; Alan S. Willsky", "abstract": "Belief propagation (BP) is an increasingly popular method of perform- ing approximate inference on arbitrary graphical models. At times, even further approximations are required, whether from quantization or other simplified message representations or from stochastic approxima- tion methods. Introducing such errors into the BP message computations has the potential to adversely affect the solution obtained. We analyze this effect with respect to a particular measure of message error, and show bounds on the accumulation of errors in the system. This leads both to convergence conditions and error bounds in traditional and approximate BP message passing.", "bibtex": "@inproceedings{NIPS2004_9813b270,\n author = {Ihler, Alexander and Fisher, John and Willsky, Alan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Message Errors in Belief Propagation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/9813b270ed0288e7c0388f0fd4ec68f5-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/9813b270ed0288e7c0388f0fd4ec68f5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/9813b270ed0288e7c0388f0fd4ec68f5-Metadata.json", "review": "", "metareview": "", "pdf_size": 107009, "gs_citation": 55, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=365808736679892623&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Department of Electrical Engineering and Computer Science; Department of Electrical Engineering and Computer Science; Department of Electrical Engineering and Computer Science", "aff_domain": "mit.edu;csail.mit.edu;mit.edu", "email": "mit.edu;csail.mit.edu;mit.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "f3e66bfb73", "title": "Methods Towards Invasive Human Brain Computer Interfaces", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/98b418276d571e623651fc1d471c7811-Abstract.html", "author": "Thomas N. Lal; Thilo Hinterberger; Guido Widman; Michael Schr\u00f6der; N. J. Hill; Wolfgang Rosenstiel; Christian E. Elger; Niels Birbaumer; Bernhard Sch\u00f6lkopf", "abstract": "During the last ten years there has been growing interest in the develop- ment of Brain Computer Interfaces (BCIs). The field has mainly been driven by the needs of completely paralyzed patients to communicate. With a few exceptions, most human BCIs are based on extracranial elec- troencephalography (EEG). However, reported bit rates are still low. One reason for this is the low signal-to-noise ratio of the EEG [16]. We are currently investigating if BCIs based on electrocorticography (ECoG) are a viable alternative. In this paper we present the method and examples of intracranial EEG recordings of three epilepsy patients with electrode grids placed on the motor cortex. The patients were asked to repeat- edly imagine movements of two kinds, e.g., tongue or finger movements. We analyze the classifiability of the data using Support Vector Machines (SVMs) [18, 21] and Recursive Channel Elimination (RCE) [11].", "bibtex": "@inproceedings{NIPS2004_98b41827,\n author = {Lal, Thomas and Hinterberger, Thilo and Widman, Guido and Schr\\\"{o}der, Michael and Hill, N. and Rosenstiel, Wolfgang and Elger, Christian and Birbaumer, Niels and Sch\\\"{o}lkopf, Bernhard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Methods Towards Invasive Human Brain Computer Interfaces},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/98b418276d571e623651fc1d471c7811-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/98b418276d571e623651fc1d471c7811-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/98b418276d571e623651fc1d471c7811-Metadata.json", "review": "", "metareview": "", "pdf_size": 224023, "gs_citation": 284, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9128741955838240270&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Max-Planck-Institute for Biological Cybernetics, T\u00fcbingen, Germany; Eberhard Karls University, Dept. of Medical Psychology and Behavioral Neurobiology, T\u00fcbingen, Germany; University of Bonn, Department of Epileptology, Bonn, Germany; Eberhard Karls University, Dept. of Computer Engineering, T\u00fcbingen, Germany; Max-Planck-Institute for Biological Cybernetics, T\u00fcbingen, Germany; Eberhard Karls University, Dept. of Computer Engineering, T\u00fcbingen, Germany; University of Bonn, Department of Epileptology, Bonn, Germany; Max-Planck-Institute for Biological Cybernetics, T\u00fcbingen, Germany; Eberhard Karls University, Dept. of Medical Psychology and Behavioral Neurobiology, T\u00fcbingen, Germany+Center for Cognitive Neuroscience, University of Trento, Italy", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;uni-tuebingen.de;uni-tuebingen.de;ukb.uni-bonn.de;ukb.uni-bonn.de;informatik.uni-tuebingen.de;informatik.uni-tuebingen.de", "email": "tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de;uni-tuebingen.de;uni-tuebingen.de;ukb.uni-bonn.de;ukb.uni-bonn.de;informatik.uni-tuebingen.de;informatik.uni-tuebingen.de", "github": "", "project": "", "author_num": 9, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2;1;0;1;2;0;1+3", "aff_unique_norm": "Max-Planck-Institute for Biological Cybernetics;Eberhard Karls University;University of Bonn;University of Trento", "aff_unique_dep": ";Dept. of Medical Psychology and Behavioral Neurobiology;Department of Epileptology;Center for Cognitive Neuroscience", "aff_unique_url": "https://www.biocybernetics.mpg.de;https://www.uni-tuebingen.de;https://www.uni-bonn.de;https://www.unitn.it", "aff_unique_abbr": "MPIBC;;UBonn;", "aff_campus_unique_index": "0;0;1;0;0;0;1;0;0", "aff_campus_unique": "T\u00fcbingen;Bonn;", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0+1", "aff_country_unique": "Germany;Italy" }, { "id": "e2a31bf182", "title": "Methods for Estimating the Computational Power and Generalization Capability of Neural Microcircuits", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/9ff7c9eb9d37f434db778f59178012da-Abstract.html", "author": "Wolfgang Maass; Robert A. Legenstein; Nils Bertschinger", "abstract": "What makes a neural microcircuit computationally powerful? Or more precisely, which measurable quantities could explain why one microcir- cuit C is better suited for a particular family of computational tasks than another microcircuit C ? We propose in this article quantitative measures for evaluating the computational power and generalization capability of a neural microcircuit, and apply them to generic neural microcircuit mod- els drawn from different distributions. We validate the proposed mea- sures by comparing their prediction with direct evaluations of the com- putational performance of these microcircuit models. This procedure is applied first to microcircuit models that differ with regard to the spatial range of synaptic connections and with regard to the scale of synaptic efficacies in the circuit, and then to microcircuit models that differ with regard to the level of background input currents and the level of noise on the membrane potential of neurons. In this case the proposed method allows us to quantify differences in the computational power and gen- eralization capability of circuits in different dynamic regimes (UP- and DOWN-states) that have been demonstrated through intracellular record- ings in vivo.", "bibtex": "@inproceedings{NIPS2004_9ff7c9eb,\n author = {Maass, Wolfgang and Legenstein, Robert and Bertschinger, Nils},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Methods for Estimating the Computational Power and Generalization Capability of Neural Microcircuits},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/9ff7c9eb9d37f434db778f59178012da-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/9ff7c9eb9d37f434db778f59178012da-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/9ff7c9eb9d37f434db778f59178012da-Metadata.json", "review": "", "metareview": "", "pdf_size": 158947, "gs_citation": 64, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14712280609557453361&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "0201bcb037", "title": "Mistake Bounds for Maximum Entropy Discrimination", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/b7f1f29db7c23648f2bb8d6a8ee0469b-Abstract.html", "author": "Philip M. Long; Xinyu Wu", "abstract": "We establish a mistake bound for an ensemble method for classification based on maximizing the entropy of voting weights subject to margin constraints. The bound is the same as a general bound proved for the Weighted Majority Algorithm, and similar to bounds for other variants of Winnow. We prove a more refined bound that leads to a nearly opti- mal algorithm for learning disjunctions, again, based on the maximum entropy principle. We describe a simplification of the on-line maximum entropy method in which, after each iteration, the margin constraints are replaced with a single linear inequality. The simplified algorithm, which takes a similar form to Winnow, achieves the same mistake bounds.", "bibtex": "@inproceedings{NIPS2004_b7f1f29d,\n author = {Long, Philip and Wu, Xinyu},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Mistake Bounds for Maximum Entropy Discrimination},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/b7f1f29db7c23648f2bb8d6a8ee0469b-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/b7f1f29db7c23648f2bb8d6a8ee0469b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/b7f1f29db7c23648f2bb8d6a8ee0469b-Metadata.json", "review": "", "metareview": "", "pdf_size": 83278, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14364082395640550325&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Center for Computational Learning Systems, Columbia University; Department of Computer Science, National University of Singapore", "aff_domain": "cs.columbia.edu;comp.nus.edu.sg", "email": "cs.columbia.edu;comp.nus.edu.sg", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Columbia University;National University of Singapore", "aff_unique_dep": "Center for Computational Learning Systems;Department of Computer Science", "aff_unique_url": "https://www.columbia.edu;https://www.nus.edu.sg", "aff_unique_abbr": "Columbia;NUS", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1", "aff_country_unique": "United States;Singapore" }, { "id": "85a52db97f", "title": "Modeling Conversational Dynamics as a Mixed-Memory Markov Process", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/f1748d6b0fd9d439f71450117eba2725-Abstract.html", "author": "Tanzeem Choudhury; Sumit Basu", "abstract": "influences", "bibtex": "@inproceedings{NIPS2004_f1748d6b,\n author = {Choudhury, Tanzeem and Basu, Sumit},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Modeling Conversational Dynamics as a Mixed-Memory Markov Process},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/f1748d6b0fd9d439f71450117eba2725-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/f1748d6b0fd9d439f71450117eba2725-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/f1748d6b0fd9d439f71450117eba2725-Metadata.json", "review": "", "metareview": "", "pdf_size": 1406541, "gs_citation": 93, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16092219717104828091&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Intel Research; Microsoft Research", "aff_domain": "intel.com;microsoft.com", "email": "intel.com;microsoft.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Intel;Microsoft", "aff_unique_dep": "Intel Research;Microsoft Research", "aff_unique_url": "https://www.intel.com;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Intel;MSR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "a94188e17d", "title": "Modeling Nonlinear Dependencies in Natural Images using Mixture of Laplacian Distribution", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/577fd60255d4bb0f466464849ffe6d8e-Abstract.html", "author": "Hyun J. Park; Te W. Lee", "abstract": "Capturing dependencies in images in an unsupervised manner is important for many image processing applications. We propose a new method for capturing nonlinear dependencies in images of natural scenes. This method is an extension of the linear Independent Component Analysis (ICA) method by building a hierarchical model based on ICA and mixture of Laplacian distribution. The model parameters are learned via an EM algorithm and it can accurately capture variance correlation and other high order structures in a simple manner. We visualize the learned variance structure and demonstrate applications to image segmentation and denoising.", "bibtex": "@inproceedings{NIPS2004_577fd602,\n author = {Park, Hyun and Lee, Te},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Modeling Nonlinear Dependencies in Natural Images using Mixture of Laplacian Distribution},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/577fd60255d4bb0f466464849ffe6d8e-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/577fd60255d4bb0f466464849ffe6d8e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/577fd60255d4bb0f466464849ffe6d8e-Metadata.json", "review": "", "metareview": "", "pdf_size": 395145, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=203885937082793544&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Institute for Neural Computation, UCSD; Institute for Neural Computation, UCSD", "aff_domain": "ucsd.edu;ucsd.edu", "email": "ucsd.edu;ucsd.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "Institute for Neural Computation", "aff_unique_url": "https://www.ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0;0", "aff_campus_unique": "San Diego", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "754318d05e", "title": "Modelling Uncertainty in the Game of Go", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/b38e5ff5f816ac6e4169bce9314b2996-Abstract.html", "author": "David H. Stern; Thore Graepel; David MacKay", "abstract": "Go is an ancient oriental game whose complexity has defeated at- tempts to automate it. We suggest using probability in a Bayesian sense to model the uncertainty arising from the vast complexity of the game tree. We present a simple conditional Markov ran- dom field model for predicting the pointwise territory outcome of a game. The topology of the model reflects the spatial structure of the Go board. We describe a version of the Swendsen-Wang pro- cess for sampling from the model during learning and apply loopy belief propagation for rapid inference and prediction. The model is trained on several hundred records of professional games. Our experimental results indicate that the model successfully learns to predict territory despite its simplicity.", "bibtex": "@inproceedings{NIPS2004_b38e5ff5,\n author = {Stern, David and Graepel, Thore and MacKay, David},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Modelling Uncertainty in the Game of Go},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/b38e5ff5f816ac6e4169bce9314b2996-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/b38e5ff5f816ac6e4169bce9314b2996-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/b38e5ff5f816ac6e4169bce9314b2996-Metadata.json", "review": "", "metareview": "", "pdf_size": 269266, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12160422017516853355&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Physics, Cambridge University; Microsoft Research, Cambridge, U.K.; Department of Physics, Cambridge University", "aff_domain": "cam.ac.uk;microsoft.com;mrao.cam.ac.uk", "email": "cam.ac.uk;microsoft.com;mrao.cam.ac.uk", "github": "", "project": "www.gobase.org", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Cambridge;Microsoft", "aff_unique_dep": "Department of Physics;Microsoft Research", "aff_unique_url": "https://www.cam.ac.uk;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Cambridge;MSR", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "id": "de05a1fa43", "title": "Multi-agent Cooperation in Diverse Population Games", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/573eec40e4ef4f2089531dd5cbf629f8-Abstract.html", "author": "K. Wong; S. W. Lim; Z. Gao", "abstract": "We consider multi-agent systems whose agents compete for resources by striving to be in the minority group. The agents adapt to the environment by reinforcement learning of the preferences of the policies they hold. Diversity of preferences of policies is introduced by adding random bi- ases to the initial cumulative payoffs of their policies. We explain and provide evidence that agent cooperation becomes increasingly important when diversity increases. Analyses of these mechanisms yield excellent agreement with simulations over nine decades of data.", "bibtex": "@inproceedings{NIPS2004_573eec40,\n author = {Wong, K. and Lim, S. and Gao, Z.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Multi-agent Cooperation in Diverse Population Games},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/573eec40e4ef4f2089531dd5cbf629f8-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/573eec40e4ef4f2089531dd5cbf629f8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/573eec40e4ef4f2089531dd5cbf629f8-Metadata.json", "review": "", "metareview": "", "pdf_size": 113159, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10662941060393858206&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "70efcec995", "title": "Multiple Alignment of Continuous Time Series", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/362387494f6be6613daea643a7706a42-Abstract.html", "author": "Jennifer Listgarten; Radford M. Neal; Sam T. Roweis; Andrew Emili", "abstract": "Multiple realizations of continuous-valued time series from a stochastic process often contain systematic variations in rate and amplitude. To leverage the information contained in such noisy replicate sets, we need to align them in an appropriate way (for example, to allow the data to be properly combined by adaptive averaging). We present the Continuous Profile Model (CPM), a generative model in which each observed time series is a non-uniformly subsampled version of a single latent trace, to which local rescaling and additive noise are applied. After unsupervised training, the learned trace represents a canonical, high resolution fusion of all the replicates. As well, an alignment in time and scale of each observation to this trace can be found by inference in the model. We apply CPM to successfully align speech signals from multiple speakers and sets of Liquid Chromatography-Mass Spectrometry proteomic data.", "bibtex": "@inproceedings{NIPS2004_36238749,\n author = {Listgarten, Jennifer and Neal, Radford and Roweis, Sam and Emili, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Multiple Alignment of Continuous Time Series},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/362387494f6be6613daea643a7706a42-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/362387494f6be6613daea643a7706a42-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/362387494f6be6613daea643a7706a42-Metadata.json", "review": "", "metareview": "", "pdf_size": 671247, "gs_citation": 178, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13861230934250449019&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 28, "aff": "Department of Computer Science; Department of Computer Science; Department of Computer Science; Banting and Best Department of Medical Research and Program in Proteomics and Bioinformatics", "aff_domain": "cs.toronto.edu;cs.toronto.edu;cs.toronto.edu;utoronto.ca", "email": "cs.toronto.edu;cs.toronto.edu;cs.toronto.edu;utoronto.ca", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "Unknown Institution;University of Toronto", "aff_unique_dep": "Department of Computer Science;Banting and Best Department of Medical Research", "aff_unique_url": ";https://www.utoronto.ca", "aff_unique_abbr": ";U of T", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "1", "aff_country_unique": ";Canada" }, { "id": "8fb2b5cf4f", "title": "Multiple Relational Embedding", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/148260a1ce4fe4907df4cd475c442e28-Abstract.html", "author": "Roland Memisevic; Geoffrey E. Hinton", "abstract": "We describe a way of using multiple different types of similarity rela- tionship to learn a low-dimensional embedding of a dataset. Our method chooses different, possibly overlapping representations of similarity by individually reweighting the dimensions of a common underlying latent space. When applied to a single similarity relation that is based on Eu- clidean distances between the input data points, the method reduces to simple dimensionality reduction. If additional information is available about the dataset or about subsets of it, we can use this information to clean up or otherwise improve the embedding. We demonstrate the po- tential usefulness of this form of semi-supervised dimensionality reduc- tion on some simple examples.", "bibtex": "@inproceedings{NIPS2004_148260a1,\n author = {Memisevic, Roland and Hinton, Geoffrey E},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Multiple Relational Embedding},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/148260a1ce4fe4907df4cd475c442e28-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/148260a1ce4fe4907df4cd475c442e28-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/148260a1ce4fe4907df4cd475c442e28-Metadata.json", "review": "", "metareview": "", "pdf_size": 192751, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12129612188893611066&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Department of Computer Science, University of Toronto; Department of Computer Science, University of Toronto", "aff_domain": "cs.toronto.edu;cs.toronto.edu", "email": "cs.toronto.edu;cs.toronto.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Toronto", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.utoronto.ca", "aff_unique_abbr": "U of T", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Toronto", "aff_country_unique_index": "0;0", "aff_country_unique": "Canada" }, { "id": "e9f65f5abf", "title": "Nearly Tight Bounds for the Continuum-Armed Bandit Problem", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/b75bd27b5a48a1b48987a18d831f6336-Abstract.html", "author": "Robert D. Kleinberg", "abstract": "In the multi-armed bandit problem, an online algorithm must choose from a set of strategies in a sequence of n trials so as to minimize the total cost of the chosen strategies. While nearly tight upper and lower bounds are known in the case when the strategy set is finite, much less is known when there is an infinite strategy set. Here we consider the case when the set of strategies is a subset of Rd, and the cost functions are continuous. In the d = 1 case, we improve on the best-known upper and lower bounds, closing the gap to a sublogarithmic factor. We also con- sider the case where d > 1 and the cost functions are convex, adapting a recent online convex optimization algorithm of Zinkevich to the sparser feedback model of the multi-armed bandit problem.", "bibtex": "@inproceedings{NIPS2004_b75bd27b,\n author = {Kleinberg, Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Nearly Tight Bounds for the Continuum-Armed Bandit Problem},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/b75bd27b5a48a1b48987a18d831f6336-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/b75bd27b5a48a1b48987a18d831f6336-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/b75bd27b5a48a1b48987a18d831f6336-Metadata.json", "review": "", "metareview": "", "pdf_size": 100740, "gs_citation": 502, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14914905310765903782&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "M.I.T. CSAIL, Cambridge, MA 02139", "aff_domain": "csail.mit.edu", "email": "csail.mit.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory", "aff_unique_url": "https://www.csail.mit.edu", "aff_unique_abbr": "MIT CSAIL", "aff_campus_unique_index": "0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "c08c1e2390", "title": "Neighbourhood Components Analysis", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/42fe880812925e520249e808937738d2-Abstract.html", "author": "Jacob Goldberger; Geoffrey E. Hinton; Sam T. Roweis; Ruslan Salakhutdinov", "abstract": "In this paper we propose a novel method for learning a Mahalanobis distance measure to be used in the KNN classification algorithm. The algorithm directly maximizes a stochastic variant of the leave-one-out KNN score on the training set. It can also learn a low-dimensional lin- ear embedding of labeled data that can be used for data visualization and fast classification. Unlike other methods, our classification model is non-parametric, making no assumptions about the shape of the class distributions or the boundaries between them. The performance of the method is demonstrated on several data sets, both for metric learning and linear dimensionality reduction.", "bibtex": "@inproceedings{NIPS2004_42fe8808,\n author = {Goldberger, Jacob and Hinton, Geoffrey E and Roweis, Sam and Salakhutdinov, Russ R},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Neighbourhood Components Analysis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/42fe880812925e520249e808937738d2-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/42fe880812925e520249e808937738d2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/42fe880812925e520249e808937738d2-Metadata.json", "review": "", "metareview": "", "pdf_size": 267467, "gs_citation": 3088, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3340208766465257854&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 23, "aff": "Department of Computer Science, University of Toronto; Department of Computer Science, University of Toronto; Department of Computer Science, University of Toronto; Department of Computer Science, University of Toronto", "aff_domain": "cs.toronto.edu;cs.toronto.edu;cs.toronto.edu;cs.toronto.edu", "email": "cs.toronto.edu;cs.toronto.edu;cs.toronto.edu;cs.toronto.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Toronto", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.utoronto.ca", "aff_unique_abbr": "U of T", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Toronto", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Canada" }, { "id": "7f80caf842", "title": "Neural Network Computation by In Vitro Transcriptional Circuits", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/65f2a94c8c2d56d5b43a1a3d9d811102-Abstract.html", "author": "Jongmin Kim; John Hopfield; Erik Winfree", "abstract": "The structural similarity of neural networks and genetic regulatory net- works to digital circuits, and hence to each other, was noted from the very beginning of their study [1, 2]. In this work, we propose a simple biochemical system whose architecture mimics that of genetic regula- tion and whose components allow for in vitro implementation of arbi- trary circuits. We use only two enzymes in addition to DNA and RNA molecules: RNA polymerase (RNAP) and ribonuclease (RNase). We develop a rate equation for in vitro transcriptional networks, and de- rive a correspondence with general neural network rate equations [3]. As proof-of-principle demonstrations, an associative memory task and a feedforward network computation are shown by simulation. A difference between the neural network and biochemical models is also highlighted: global coupling of rate equations through enzyme saturation can lead to global feedback regulation, thus allowing a simple network without explicit mutual inhibition to perform the winner-take-all computation. Thus, the full complexity of the cell is not necessary for biochemical computation: a wide range of functional behaviors can be achieved with a small set of biochemical components.", "bibtex": "@inproceedings{NIPS2004_65f2a94c,\n author = {Kim, Jongmin and Hopfield, John and Winfree, Erik},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Neural Network Computation by In Vitro Transcriptional Circuits},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/65f2a94c8c2d56d5b43a1a3d9d811102-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/65f2a94c8c2d56d5b43a1a3d9d811102-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/65f2a94c8c2d56d5b43a1a3d9d811102-Metadata.json", "review": "", "metareview": "", "pdf_size": 261817, "gs_citation": 115, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12956593805451239331&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Biology1, CNS and Computer Science2, California Institute of Technology; Molecular Biology3, Princeton University; Biology1, CNS and Computer Science2, California Institute of Technology", "aff_domain": "dna.caltech.edu;dna.caltech.edu;princeton.edu", "email": "dna.caltech.edu;dna.caltech.edu;princeton.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "California Institute of Technology;Princeton University", "aff_unique_dep": "Biology1, CNS and Computer Science2;Molecular Biology", "aff_unique_url": "https://www.caltech.edu;https://www.princeton.edu", "aff_unique_abbr": "Caltech;Princeton", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pasadena;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "f3a05368aa", "title": "New Criteria and a New Algorithm for Learning in Multi-Agent Systems", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/220a7f49d42406598587a66f02584ac3-Abstract.html", "author": "Rob Powers; Yoav Shoham", "abstract": "We propose a new set of criteria for learning algorithms in multi-agent systems, one that is more stringent and (we argue) better justified than previous proposed criteria. Our criteria, which apply most straightfor- wardly in repeated games with average rewards, consist of three require- ments: (a) against a specified class of opponents (this class is a parameter of the criterion) the algorithm yield a payoff that approaches the payoff of the best response, (b) against other opponents the algorithm's payoff at least approach (and possibly exceed) the security level payoff (or max- imin value), and (c) subject to these requirements, the algorithm achieve a close to optimal payoff in self-play. We furthermore require that these average payoffs be achieved quickly. We then present a novel algorithm, and show that it meets these new criteria for a particular parameter class, the class of stationary opponents. Finally, we show that the algorithm is effective not only in theory, but also empirically. Using a recently introduced comprehensive game theoretic test suite, we show that the algorithm almost universally outperforms previous learning algorithms.", "bibtex": "@inproceedings{NIPS2004_220a7f49,\n author = {Powers, Rob and Shoham, Yoav},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {New Criteria and a New Algorithm for Learning in Multi-Agent Systems},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/220a7f49d42406598587a66f02584ac3-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/220a7f49d42406598587a66f02584ac3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/220a7f49d42406598587a66f02584ac3-Metadata.json", "review": "", "metareview": "", "pdf_size": 78740, "gs_citation": 166, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11037632454053108242&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Computer Science Department, Stanford University; Computer Science Department, Stanford University", "aff_domain": "cs.stanford.edu;cs.stanford.edu", "email": "cs.stanford.edu;cs.stanford.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "e828b01317", "title": "Newscast EM", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/d8bf84be3800d12f74d8b05e9b89836f-Abstract.html", "author": "Wojtek Kowalczyk; Nikos Vlassis", "abstract": "We propose a gossip-based distributed algorithm for Gaussian mixture learning, Newscast EM. The algorithm operates on network topologies where each node observes a local quantity and can communicate with other nodes in an arbitrary point-to-point fashion. The main difference between Newscast EM and the standard EM algorithm is that the M-step in our case is implemented in a decentralized manner: (random) pairs of nodes repeatedly exchange their local parameter estimates and com- bine them by (weighted) averaging. We provide theoretical evidence and demonstrate experimentally that, under this protocol, nodes converge ex- ponentially fast to the correct estimates in each M-step of the EM algo- rithm.", "bibtex": "@inproceedings{NIPS2004_d8bf84be,\n author = {Kowalczyk, Wojtek and Vlassis, Nikos},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Newscast EM},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/d8bf84be3800d12f74d8b05e9b89836f-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/d8bf84be3800d12f74d8b05e9b89836f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/d8bf84be3800d12f74d8b05e9b89836f-Metadata.json", "review": "", "metareview": "", "pdf_size": 85516, "gs_citation": 130, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2468824385151922017&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 13, "aff": "Department of Computer Science, Vrije Universiteit Amsterdam, The Netherlands; Informatics Institute, University of Amsterdam, The Netherlands", "aff_domain": "cs.vu.nl;science.uva.nl", "email": "cs.vu.nl;science.uva.nl", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Vrije Universiteit Amsterdam;University of Amsterdam", "aff_unique_dep": "Department of Computer Science;Informatics Institute", "aff_unique_url": "https://www.vu.nl;https://www.uva.nl", "aff_unique_abbr": "VU Amsterdam;UvA", "aff_campus_unique_index": "0", "aff_campus_unique": "Amsterdam;", "aff_country_unique_index": "0;0", "aff_country_unique": "Netherlands" }, { "id": "5b611df90e", "title": "Non-Local Manifold Tangent Learning", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/0b7e926154c1274e8b602ff0d7c133d7-Abstract.html", "author": "Yoshua Bengio; Martin Monperrus", "abstract": "We claim and present arguments to the effect that a large class of man- ifold learning algorithms that are essentially local and can be framed as kernel learning algorithms will suffer from the curse of dimensionality, at the dimension of the true underlying manifold. This observation sug- gests to explore non-local manifold learning algorithms which attempt to discover shared structure in the tangent planes at different positions. A criterion for such an algorithm is proposed and experiments estimating a tangent plane prediction function are presented, showing its advantages with respect to local manifold learning algorithms: it is able to general- ize very far from training data (on learning handwritten character image rotations), where a local non-parametric method fails.", "bibtex": "@inproceedings{NIPS2004_0b7e9261,\n author = {Bengio, Yoshua and Monperrus, Martin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Non-Local Manifold Tangent Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/0b7e926154c1274e8b602ff0d7c133d7-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/0b7e926154c1274e8b602ff0d7c133d7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/0b7e926154c1274e8b602ff0d7c133d7-Metadata.json", "review": "", "metareview": "", "pdf_size": 91926, "gs_citation": 135, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4214643287677890739&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "6b93a773c3", "title": "Nonlinear Blind Source Separation by Integrating Independent Component Analysis and Slow Feature Analysis", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/47fd3c87f42f55d4b233417d49c34783-Abstract.html", "author": "Tobias Blaschke; Laurenz Wiskott", "abstract": "In contrast to the equivalence of linear blind source separation and linear independent component analysis it is not possible to recover the origi- nal source signal from some unknown nonlinear transformations of the sources using only the independence assumption. Integrating the ob- jectives of statistical independence and temporal slowness removes this indeterminacy leading to a new method for nonlinear blind source sepa- ration. The principle of temporal slowness is adopted from slow feature analysis, an unsupervised method to extract slowly varying features from a given observed vectorial signal. The performance of the algorithm is demonstrated on nonlinearly mixed speech data.", "bibtex": "@inproceedings{NIPS2004_47fd3c87,\n author = {Blaschke, Tobias and Wiskott, Laurenz},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Nonlinear Blind Source Separation by Integrating Independent Component Analysis and Slow Feature Analysis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/47fd3c87f42f55d4b233417d49c34783-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/47fd3c87f42f55d4b233417d49c34783-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/47fd3c87f42f55d4b233417d49c34783-Metadata.json", "review": "", "metareview": "", "pdf_size": 1936395, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1673050675721777454&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 7, "aff": "Institute for Theoretical Biology, Humboldt University Berlin; Institute for Theoretical Biology, Humboldt University Berlin", "aff_domain": "biologie.hu-berlin.de;biologie.hu-berlin.de", "email": "biologie.hu-berlin.de;biologie.hu-berlin.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Humboldt University Berlin", "aff_unique_dep": "Institute for Theoretical Biology", "aff_unique_url": "https://www.hu-berlin.de", "aff_unique_abbr": "HU Berlin", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berlin", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "99a74a19a5", "title": "Nonparametric Transforms of Graph Kernels for Semi-Supervised Learning", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/2e7ceec8361275c4e31fee5fe422740b-Abstract.html", "author": "Xiaojin Zhu; Jaz Kandola; Zoubin Ghahramani; John D. Lafferty", "abstract": "We present an algorithm based on convex optimization for constructing kernels for semi-supervised learning. The kernel matrices are derived from the spectral decomposition of graph Laplacians, and combine la- beled and unlabeled data in a systematic fashion. Unlike previous work using diffusion kernels and Gaussian random \ufb01eld kernels, a nonpara- metric kernel approach is presented that incorporates order constraints during optimization. This results in \ufb02exible kernels and avoids the need to choose among different parametric forms. Our approach relies on a quadratically constrained quadratic program (QCQP), and is compu- tationally feasible for large datasets. We evaluate the kernels on real datasets using support vector machines, with encouraging results.", "bibtex": "@inproceedings{NIPS2004_2e7ceec8,\n author = {Zhu, Jerry and Kandola, Jaz and Ghahramani, Zoubin and Lafferty, John},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Nonparametric Transforms of Graph Kernels for Semi-Supervised Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/2e7ceec8361275c4e31fee5fe422740b-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/2e7ceec8361275c4e31fee5fe422740b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/2e7ceec8361275c4e31fee5fe422740b-Metadata.json", "review": "", "metareview": "", "pdf_size": 112921, "gs_citation": 222, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1262623096725589829&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "29bb233c99", "title": "Object Classification from a Single Example Utilizing Class Relevance Metrics", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/ef1e491a766ce3127556063d49bc2f98-Abstract.html", "author": "Michael Fink", "abstract": "We describe a framework for learning an object classifier from a single example. This goal is achieved by emphasizing the relevant dimensions for classification using available examples of related classes. Learning to accurately classify objects from a single training example is often un- feasible due to overfitting effects. However, if the instance representa- tion provides that the distance between each two instances of the same class is smaller than the distance between any two instances from dif- ferent classes, then a nearest neighbor classifier could achieve perfect performance with a single training example. We therefore suggest a two stage strategy. First, learn a metric over the instances that achieves the distance criterion mentioned above, from available examples of other related classes. Then, using the single examples, define a nearest neigh- bor classifier where distance is evaluated by the learned class relevance metric. Finding a metric that emphasizes the relevant dimensions for classification might not be possible when restricted to linear projections. We therefore make use of a kernel based metric learning algorithm. Our setting encodes object instances as sets of locality based descriptors and adopts an appropriate image kernel for the class relevance metric learn- ing. The proposed framework for learning from a single example is demonstrated in a synthetic setting and on a character classification task.", "bibtex": "@inproceedings{NIPS2004_ef1e491a,\n author = {Fink, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Object Classification from a Single Example Utilizing Class Relevance Metrics},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/ef1e491a766ce3127556063d49bc2f98-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/ef1e491a766ce3127556063d49bc2f98-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/ef1e491a766ce3127556063d49bc2f98-Metadata.json", "review": "", "metareview": "", "pdf_size": 141863, "gs_citation": 383, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2128554957973684218&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "", "aff_domain": "", "email": "", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster" }, { "id": "799622ea50", "title": "On Semi-Supervised Classification", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/7aee26c309def8c5a2a076eb250b8f36-Abstract.html", "author": "Balaji Krishnapuram; David Williams; Ya Xue; Lawrence Carin; M\u00e1rio Figueiredo; Alexander J. Hartemink", "abstract": "A graph-based prior is proposed for parametric semi-supervised classi- fication. The prior utilizes both labelled and unlabelled data; it also in- tegrates features from multiple views of a given sample (e.g., multiple sensors), thus implementing a Bayesian form of co-training. An EM algorithm for training the classifier automatically adjusts the tradeoff be- tween the contributions of: (a) the labelled data; (b) the unlabelled data; and (c) the co-training information. Active label query selection is per- formed using a mutual information based criterion that explicitly uses the unlabelled data and the co-training information. Encouraging results are presented on public benchmarks and on measured data from single and multiple sensors.", "bibtex": "@inproceedings{NIPS2004_7aee26c3,\n author = {Krishnapuram, Balaji and Williams, David and Xue, Ya and Carin, Lawrence and Figueiredo, M\\'{a}rio and Hartemink, Alexander},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {On Semi-Supervised Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/7aee26c309def8c5a2a076eb250b8f36-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/7aee26c309def8c5a2a076eb250b8f36-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/7aee26c309def8c5a2a076eb250b8f36-Metadata.json", "review": "", "metareview": "", "pdf_size": 237384, "gs_citation": 165, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9860593143669137931&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "Duke University, USA; Duke University, USA; Duke University, USA; Duke University, USA; Duke University, USA; Instituto de Telecomunica\u00e7\u00f5es, Instituto Superior T\u00e9cnico, Portugal", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0;1", "aff_unique_norm": "Duke University;Instituto Superior T\u00e9cnico", "aff_unique_dep": ";Instituto de Telecomunica\u00e7\u00f5es", "aff_unique_url": "https://www.duke.edu;https://www.ist.utl.pt", "aff_unique_abbr": "Duke;IST", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;1", "aff_country_unique": "United States;Portugal" }, { "id": "755b3acc3a", "title": "On the Adaptive Properties of Decision Trees", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/6412fef87392ae8c987b0ecc79da1902-Abstract.html", "author": "Clayton Scott; Robert Nowak", "abstract": "Decision trees are surprisingly adaptive in three important respects: They automatically (1) adapt to favorable conditions near the Bayes decision boundary; (2) focus on data distributed on lower dimensional manifolds; (3) reject irrelevant features. In this paper we examine a decision tree based on dyadic splits that adapts to each of these conditions to achieve minimax optimal rates of convergence. The proposed classifier is the first known to achieve these optimal rates while being practical and im- plementable.", "bibtex": "@inproceedings{NIPS2004_6412fef8,\n author = {Scott, Clayton and Nowak, Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {On the Adaptive Properties of Decision Trees},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/6412fef87392ae8c987b0ecc79da1902-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/6412fef87392ae8c987b0ecc79da1902-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/6412fef87392ae8c987b0ecc79da1902-Metadata.json", "review": "", "metareview": "", "pdf_size": 82092, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1179927216627448137&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Statistics Department, Rice University; Electrical and Computer Engineering, University of Wisconsin", "aff_domain": "rice.edu;engr.wisc.edu", "email": "rice.edu;engr.wisc.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Rice University;University of Wisconsin", "aff_unique_dep": "Statistics Department;Electrical and Computer Engineering", "aff_unique_url": "https://www.rice.edu;https://www.wisc.edu", "aff_unique_abbr": "Rice;UW", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "957b01ea65", "title": "On-Chip Compensation of Device-Mismatch Effects in Analog VLSI Neural Networks", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/f6185f0ef02dcaec414a3171cd01c697-Abstract.html", "author": "Miguel Figueroa; Seth Bridges; Chris Diorio", "abstract": "Device mismatch in VLSI degrades the accuracy of analog arithmetic circuits and lowers the learning performance of large-scale neural net- works implemented in this technology. We show compact, low-power on-chip calibration techniques that compensate for device mismatch. Our techniques enable large-scale analog VLSI neural networks with learn- ing performance on the order of 10 bits. We demonstrate our techniques on a 64-synapse linear perceptron learning with the Least-Mean-Squares (LMS) algorithm, and fabricated in a 0.35m CMOS process.", "bibtex": "@inproceedings{NIPS2004_f6185f0e,\n author = {Figueroa, Miguel and Bridges, Seth and Diorio, Chris},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {On-Chip Compensation of Device-Mismatch Effects in Analog VLSI Neural Networks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/f6185f0ef02dcaec414a3171cd01c697-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/f6185f0ef02dcaec414a3171cd01c697-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/f6185f0ef02dcaec414a3171cd01c697-Metadata.json", "review": "", "metareview": "", "pdf_size": 447677, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12374611247478777115&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Department of Electrical Engineering, Universidad de Concepci \u00b4on; Computer Science & Engineering, University of Washington; Computer Science & Engineering, University of Washington", "aff_domain": "die.udec.cl;cs.washington.edu;cs.washington.edu", "email": "die.udec.cl;cs.washington.edu;cs.washington.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "Universidad de Concepci\u00f3n;University of Washington", "aff_unique_dep": "Department of Electrical Engineering;Computer Science & Engineering", "aff_unique_url": "https://www.udec.cl;https://www.washington.edu", "aff_unique_abbr": ";UW", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Seattle", "aff_country_unique_index": "0;1;1", "aff_country_unique": "Chile;United States" }, { "id": "a8ab972b48", "title": "Online Bounds for Bayesian Algorithms", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/c60d870eaad6a3946ab3e8734466e532-Abstract.html", "author": "Sham M. Kakade; Andrew Y. Ng", "abstract": "We present a competitive analysis of Bayesian learning algorithms in the online learning setting and show that many simple Bayesian algorithms (such as Gaussian linear regression and Bayesian logistic regression) per- form favorably when compared, in retrospect, to the single best model in the model class. The analysis does not assume that the Bayesian algo- rithms\u2019 modeling assumptions are \u201ccorrect,\u201d and our bounds hold even if the data is adversarially chosen. For Gaussian linear regression (us- ing logloss), our error bounds are comparable to the best bounds in the online learning literature, and we also provide a lower bound showing that Gaussian linear regression is optimal in a certain worst case sense. We also give bounds for some widely used maximum a posteriori (MAP) estimation algorithms, including regularized logistic regression.", "bibtex": "@inproceedings{NIPS2004_c60d870e,\n author = {Kakade, Sham M and Ng, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Online Bounds for Bayesian Algorithms},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/c60d870eaad6a3946ab3e8734466e532-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/c60d870eaad6a3946ab3e8734466e532-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/c60d870eaad6a3946ab3e8734466e532-Metadata.json", "review": "", "metareview": "", "pdf_size": 114096, "gs_citation": 73, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18206866392676448822&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 20, "aff": "Computer and Information Science Department, University of Pennsylvania; Computer Science Department, Stanford University", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Pennsylvania;Stanford University", "aff_unique_dep": "Computer and Information Science Department;Computer Science Department", "aff_unique_url": "https://www.upenn.edu;https://www.stanford.edu", "aff_unique_abbr": "UPenn;Stanford", "aff_campus_unique_index": "1", "aff_campus_unique": ";Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "2252ca6879", "title": "Optimal Aggregation of Classifiers and Boosting Maps in Functional Magnetic Resonance Imaging", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/0415740eaa4d9decbc8da001d3fd805f-Abstract.html", "author": "Vladimir Koltchinskii; Manel Mart\u00ednez-ram\u00f3n; Stefan Posse", "abstract": "We study a method of optimal data-driven aggregation of classifiers in a convex combination and establish tight upper bounds on its excess risk with respect to a convex loss function under the assumption that the so- lution of optimal aggregation problem is sparse. We use a boosting type algorithm of optimal aggregation to develop aggregate classifiers of ac- tivation patterns in fMRI based on locally trained SVM classifiers. The aggregation coefficients are then used to design a \"boosting map\" of the brain needed to identify the regions with most significant impact on clas- sification.", "bibtex": "@inproceedings{NIPS2004_0415740e,\n author = {Koltchinskii, Vladimir and Mart\\'{\\i}nez-ram\\'{o}n, Manel and Posse, Stefan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Optimal Aggregation of Classifiers and Boosting Maps in Functional Magnetic Resonance Imaging},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/0415740eaa4d9decbc8da001d3fd805f-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/0415740eaa4d9decbc8da001d3fd805f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/0415740eaa4d9decbc8da001d3fd805f-Metadata.json", "review": "", "metareview": "", "pdf_size": 118653, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12448938500639054328&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "086e9b7e99", "title": "Optimal Information Decoding from Neuronal Populations with Specific Stimulus Selectivity", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/30f8f6b940d1073d8b6a5eebc46dd6e5-Abstract.html", "author": "Marcelo A. Montemurro; Stefano Panzeri", "abstract": "A typical neuron in visual cortex receives most inputs from other cortical neurons with a roughly similar stimulus preference. Does this arrange- ment of inputs allow efficient readout of sensory information by the tar- get cortical neuron? We address this issue by using simple modelling of neuronal population activity and information theoretic tools. We find that efficient synaptic information transmission requires that the tuning curve of the afferent neurons is approximately as wide as the spread of stim- ulus preferences of the afferent neurons reaching the target neuron. By meta analysis of neurophysiological data we found that this is the case for cortico-cortical inputs to neurons in visual cortex. We suggest that the organization of V1 cortico-cortical synaptic inputs allows optimal in- formation transmission.", "bibtex": "@inproceedings{NIPS2004_30f8f6b9,\n author = {Montemurro, Marcelo and Panzeri, Stefano},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Optimal Information Decoding from Neuronal Populations with Specific Stimulus Selectivity},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/30f8f6b940d1073d8b6a5eebc46dd6e5-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/30f8f6b940d1073d8b6a5eebc46dd6e5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/30f8f6b940d1073d8b6a5eebc46dd6e5-Metadata.json", "review": "", "metareview": "", "pdf_size": 142205, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6153647009202566208&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "The University of Manchester Faculty of Life Sciences Moffat Building PO Box 88, Manchester M60 1QD, UK; The University of Manchester Faculty of Life Sciences Moffat Building PO Box 88, Manchester M60 1QD, UK", "aff_domain": "manchester.ac.uk;manchester.ac.uk", "email": "manchester.ac.uk;manchester.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Manchester", "aff_unique_dep": "Faculty of Life Sciences", "aff_unique_url": "https://www.manchester.ac.uk", "aff_unique_abbr": "UoM", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Manchester", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "ad9a8aa564", "title": "Optimal sub-graphical models", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/f15eda31a2da646eea513b0f81a5414d-Abstract.html", "author": "Mukund Narasimhan; Jeff A. Bilmes", "abstract": "We investigate the problem of reducing the complexity of a graphical model (G, PG) by finding a subgraph H of G, chosen from a class of subgraphs H, such that H is optimal with respect to KL-divergence. We do this by first defining a decomposition tree representation for G, which is closely related to the junction-tree representation for G. We then give an algorithm which uses this representation to compute the optimal H H. Gavril [2] and Tarjan [3] have used graph separation properties to solve several combinatorial optimization problems when the size of the minimal separators in the graph is bounded. We present an extension of this technique which applies to some important choices of H even when the size of the minimal separators of G are arbitrarily large. In particular, this applies to problems such as finding an optimal subgraphical model over a (k - 1)-tree of a graphical model over a k-tree (for arbitrary k) and selecting an optimal subgraphical model with (a constant) d fewer edges with respect to KL-divergence can be solved in time polynomial in |V (G)| using this formulation. 1 Introduction and Preliminaries", "bibtex": "@inproceedings{NIPS2004_f15eda31,\n author = {Narasimhan, Mukund and Bilmes, Jeff A},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Optimal sub-graphical models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/f15eda31a2da646eea513b0f81a5414d-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/f15eda31a2da646eea513b0f81a5414d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/f15eda31a2da646eea513b0f81a5414d-Metadata.json", "review": "", "metareview": "", "pdf_size": 78343, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8869510778805529400&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "f50ccf0ade", "title": "Outlier Detection with One-class Kernel Fisher Discriminants", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/1680e9fa7b4dd5d62ece800239bb53bd-Abstract.html", "author": "Volker Roth", "abstract": "The problem of detecting \"atypical objects\" or \"outliers\" is one of the classical topics in (robust) statistics. Recently, it has been proposed to address this problem by means of one-class SVM classifiers. The main conceptual shortcoming of most one-class approaches, however, is that in a strict sense they are unable to detect outliers, since the expected fraction of outliers has to be specified in advance. The method presented in this paper overcomes this problem by relating kernelized one-class classifica- tion to Gaussian density estimation in the induced feature space. Having established this relation, it is possible to identify \"atypical objects\" by quantifying their deviations from the Gaussian model. For RBF kernels it is shown that the Gaussian model is \"rich enough\" in the sense that it asymptotically provides an unbiased estimator for the true density. In or- der to overcome the inherent model selection problem, a cross-validated likelihood criterion for selecting all free model parameters is applied.", "bibtex": "@inproceedings{NIPS2004_1680e9fa,\n author = {Roth, Volker},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Outlier Detection with One-class Kernel Fisher Discriminants},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/1680e9fa7b4dd5d62ece800239bb53bd-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/1680e9fa7b4dd5d62ece800239bb53bd-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/1680e9fa7b4dd5d62ece800239bb53bd-Metadata.json", "review": "", "metareview": "", "pdf_size": 154677, "gs_citation": 87, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13458962717376912350&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "ETH Zurich, Institute of Computational Science", "aff_domain": "inf.ethz.ch", "email": "inf.ethz.ch", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Institute of Computational Science", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_country_unique_index": "0", "aff_country_unique": "Switzerland" }, { "id": "734bdd9a23", "title": "PAC-Bayes Learning of Conjunctions and Classification of Gene-Expression Data", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/026a39ae63343c68b5223a95f3e17616-Abstract.html", "author": "Mario Marchand; Mohak Shah", "abstract": "We propose a \u201csoft greedy\u201d learning algorithm for building small conjunctions of simple threshold functions, called rays, de\ufb01ned on single real-valued attributes. We also propose a PAC-Bayes risk bound which is minimized for classi\ufb01ers achieving a non-trivial tradeo\ufb00 between sparsity (the number of rays used) and the mag- nitude of the separating margin of each ray. Finally, we test the soft greedy algorithm on four DNA micro-array data sets.", "bibtex": "@inproceedings{NIPS2004_026a39ae,\n author = {Marchand, Mario and Shah, Mohak},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {PAC-Bayes Learning of Conjunctions and Classification of Gene-Expression Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/026a39ae63343c68b5223a95f3e17616-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/026a39ae63343c68b5223a95f3e17616-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/026a39ae63343c68b5223a95f3e17616-Metadata.json", "review": "", "metareview": "", "pdf_size": 186526, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=975803813352603302&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "IFT-GLO, Universit\u00e9 Laval; SITE, University of Ottawa", "aff_domain": "ift.ulaval.ca;site.uottawa.ca", "email": "ift.ulaval.ca;site.uottawa.ca", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Universit\u00e9 Laval;University of Ottawa", "aff_unique_dep": "IFT-GLO;SITE", "aff_unique_url": "https://www.ulaval.ca;https://www.uottawa.ca", "aff_unique_abbr": "UL;U Ottawa", "aff_campus_unique_index": "0", "aff_campus_unique": "Quebec;", "aff_country_unique_index": "0;0", "aff_country_unique": "Canada" }, { "id": "d78645af4c", "title": "Parallel Support Vector Machines: The Cascade SVM", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/d756d3d2b9dac72449a6a6926534558a-Abstract.html", "author": "Hans P. Graf; Eric Cosatto; L\u00e9on Bottou; Igor Dourdanovic; Vladimir Vapnik", "abstract": "We describe an algorithm for support vector machines (SVM) that can be parallelized efficiently and scales to very large problems with hundreds of thousands of training vectors. Instead of analyzing the whole training set in one optimization step, the data are split into subsets and optimized separately with multiple SVMs. The partial results are combined and filtered again in a \u2018Cascade\u2019 of SVMs, until the global optimum is reached. The Cascade SVM can be spread over multiple processors with minimal communication overhead and requires far less memory, since the kernel matrices are much smaller than for a regular SVM. Convergence to the global optimum is guaranteed with multiple passes through the Cascade, but already a single pass provides good generalization. A single pass is 5x \u2013 10x faster than a regular SVM for problems of 100,000 vectors when implemented on a single processor. Parallel implementations on a cluster of 16 processors were tested with over 1 million vectors (2-class problems), converging in a day or two, while a regular SVM never converged in over a week.", "bibtex": "@inproceedings{NIPS2004_d756d3d2,\n author = {Graf, Hans and Cosatto, Eric and Bottou, L\\'{e}on and Dourdanovic, Igor and Vapnik, Vladimir},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Parallel Support Vector Machines: The Cascade SVM},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/d756d3d2b9dac72449a6a6926534558a-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/d756d3d2b9dac72449a6a6926534558a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/d756d3d2b9dac72449a6a6926534558a-Metadata.json", "review": "", "metareview": "", "pdf_size": 120151, "gs_citation": 642, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14779313686537086368&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster" }, { "id": "5822839467", "title": "Parametric Embedding for Class Visualization", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/1d94108e907bb8311d8802b48fd54b4a-Abstract.html", "author": "Tomoharu Iwata; Kazumi Saito; Naonori Ueda; Sean Stromsten; Thomas L. Griffiths; Joshua B. Tenenbaum", "abstract": "In this paper, we propose a new method, Parametric Embedding (PE), for visualizing the posteriors estimated over a mixture model. PE simultane- ously embeds both objects and their classes in a low-dimensional space. PE takes as input a set of class posterior vectors for given data points, and tries to preserve the posterior structure in an embedding space by minimizing a sum of Kullback-Leibler divergences, under the assump- tion that samples are generated by a Gaussian mixture with equal covari- ances in the embedding space. PE has many potential uses depending on the source of the input data, providing insight into the classi\ufb01er\u2019s be- havior in supervised, semi-supervised and unsupervised settings. The PE algorithm has a computational advantage over conventional embedding methods based on pairwise object relations since its complexity scales with the product of the number of objects and the number of classes. We demonstrate PE by visualizing supervised categorization of web pages, semi-supervised categorization of digits, and the relations of words and latent topics found by an unsupervised algorithm, Latent Dirichlet Allo- cation.", "bibtex": "@inproceedings{NIPS2004_1d94108e,\n author = {Iwata, Tomoharu and Saito, Kazumi and Ueda, Naonori and Stromsten, Sean and Griffiths, Thomas and Tenenbaum, Joshua},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Parametric Embedding for Class Visualization},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/1d94108e907bb8311d8802b48fd54b4a-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/1d94108e907bb8311d8802b48fd54b4a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/1d94108e907bb8311d8802b48fd54b4a-Metadata.json", "review": "", "metareview": "", "pdf_size": 545145, "gs_citation": 117, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17818108039355293900&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 28, "aff": "NTT Communication Science Laboratories, NTT Corporation; NTT Communication Science Laboratories, NTT Corporation; NTT Communication Science Laboratories, NTT Corporation; Department of Brain and Cognitive Sciences, Massachusetts Institute of Technology; Department of Brain and Cognitive Sciences, Massachusetts Institute of Technology; Department of Brain and Cognitive Sciences, Massachusetts Institute of Technology", "aff_domain": "cslab.kecl.ntt.co.jp;cslab.kecl.ntt.co.jp;cslab.kecl.ntt.co.jp;mit.edu;mit.edu;mit.edu", "email": "cslab.kecl.ntt.co.jp;cslab.kecl.ntt.co.jp;cslab.kecl.ntt.co.jp;mit.edu;mit.edu;mit.edu", "github": "", "project": "", "author_num": 6, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;1;1;1", "aff_unique_norm": "NTT Corporation;Massachusetts Institute of Technology", "aff_unique_dep": "Communication Science Laboratories;Department of Brain and Cognitive Sciences", "aff_unique_url": "https://www.ntt.co.jp;https://web.mit.edu", "aff_unique_abbr": "NTT;MIT", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Cambridge", "aff_country_unique_index": "0;0;0;1;1;1", "aff_country_unique": "Japan;United States" }, { "id": "c5987701de", "title": "Pictorial Structures for Molecular Modeling: Interpreting Density Maps", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/2288f691b58edecadcc9a8691762b4fd-Abstract.html", "author": "Frank Dimaio; George Phillips; Jude W. Shavlik", "abstract": "X-ray crystallography is currently the most common way protein structures are elucidated. One of the most time-consuming steps in the crystallographic process is interpretation of the electron density map, a task that involves finding patterns in a three-dimensional picture of a protein. This paper describes DEFT (DEFormable Template), an algorithm using pictorial structures to build a flexible protein model from the protein's amino-acid sequence. Matching this pictorial structure into the density map is a way of automating density-map interpretation. Also described are several extensions to the pictorial structure matching algorithm necessary for this automated interpretation. DEFT is tested on a set of density maps ranging from 2 to 4\u00c5 resolution, producing root- mean-squared errors ranging from 1.38 to 1.84\u00c5.", "bibtex": "@inproceedings{NIPS2004_2288f691,\n author = {Dimaio, Frank and Phillips, George and Shavlik, Jude},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Pictorial Structures for Molecular Modeling: Interpreting Density Maps},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/2288f691b58edecadcc9a8691762b4fd-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/2288f691b58edecadcc9a8691762b4fd-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/2288f691b58edecadcc9a8691762b4fd-Metadata.json", "review": "", "metareview": "", "pdf_size": 384011, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13986750347830193833&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Department of Computer Sciences; Department of Computer Sciences; Department of Biochemistry", "aff_domain": "cs.wisc.edu;cs.wisc.edu;biochem.wisc.edu", "email": "cs.wisc.edu;cs.wisc.edu;biochem.wisc.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of Wisconsin-Madison;Institution not specified", "aff_unique_dep": "Department of Computer Sciences;Department of Biochemistry", "aff_unique_url": "https://www.cs.wisc.edu;", "aff_unique_abbr": "UW-Madison;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States;" }, { "id": "fc2770143a", "title": "Planning for Markov Decision Processes with Sparse Stochasticity", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/23fc4cba066f390a8cc729c7592b6ee8-Abstract.html", "author": "Maxim Likhachev; Sebastian Thrun; Geoffrey J. Gordon", "abstract": "Planning algorithms designed for deterministic worlds, such as A* search, usually run much faster than algorithms designed for worlds with uncertain action outcomes, such as value iteration. Real-world planning problems often exhibit uncertainty, which forces us to use the slower algorithms to solve them. Many real-world planning problems exhibit sparse uncertainty: there are long sequences of deterministic actions which accomplish tasks like moving sensor platforms into place, inter- spersed with a small number of sensing actions which have uncertain out- comes. In this paper we describe a new planning algorithm, called MCP (short for MDP Compression Planning), which combines A* search with value iteration for solving Stochastic Shortest Path problem in MDPs with sparse stochasticity. We present experiments which show that MCP can run substantially faster than competing planners in domains with sparse uncertainty; these experiments are based on a simulation of a ground robot cooperating with a helicopter to fill in a partial map and move to a goal location.", "bibtex": "@inproceedings{NIPS2004_23fc4cba,\n author = {Likhachev, Maxim and Thrun, Sebastian and Gordon, Geoffrey J},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Planning for Markov Decision Processes with Sparse Stochasticity},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/23fc4cba066f390a8cc729c7592b6ee8-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/23fc4cba066f390a8cc729c7592b6ee8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/23fc4cba066f390a8cc729c7592b6ee8-Metadata.json", "review": "", "metareview": "", "pdf_size": 258615, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=445580218708508389&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "School of Computer Science, Carnegie Mellon University; School of Computer Science, Carnegie Mellon University; Dept. of Computer Science, Stanford University", "aff_domain": "cs.cmu.edu;cs.cmu.edu;stanford.edu", "email": "cs.cmu.edu;cs.cmu.edu;stanford.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Carnegie Mellon University;Stanford University", "aff_unique_dep": "School of Computer Science;Department of Computer Science", "aff_unique_url": "https://www.cmu.edu;https://www.stanford.edu", "aff_unique_abbr": "CMU;Stanford", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Pittsburgh;Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "70e4650f60", "title": "Probabilistic Computation in Spiking Populations", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/a8aa681aaa4588a8dbd3b42b26d59a1a-Abstract.html", "author": "Richard S. Zemel; Rama Natarajan; Peter Dayan; Quentin J. Huys", "abstract": "As animals interact with their environments, they must constantly update estimates about their states. Bayesian models combine prior probabil- ities, a dynamical model and sensory evidence to update estimates op- timally. These models are consistent with the results of many diverse psychophysical studies. However, little is known about the neural rep- resentation and manipulation of such Bayesian information, particularly in populations of spiking neurons. We consider this issue, suggesting a model based on standard neural architecture and activations. We illus- trate the approach on a simple random walk example, and apply it to a sensorimotor integration task that provides a particularly compelling example of dynamic probabilistic computation.", "bibtex": "@inproceedings{NIPS2004_a8aa681a,\n author = {Zemel, Richard and Natarajan, Rama and Dayan, Peter and Huys, Quentin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Probabilistic Computation in Spiking Populations},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/a8aa681aaa4588a8dbd3b42b26d59a1a-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/a8aa681aaa4588a8dbd3b42b26d59a1a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/a8aa681aaa4588a8dbd3b42b26d59a1a-Metadata.json", "review": "", "metareview": "", "pdf_size": 217754, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16850423763429060138&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 17, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "168f0be141", "title": "Probabilistic Inference of Alternative Splicing Events in Microarray Data", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/94aef38441efa3380a3bed3faf1f9d5d-Abstract.html", "author": "Ofer Shai; Brendan J. Frey; Quaid D. Morris; Qun Pan; Christine Misquitta; Benjamin J. Blencowe", "abstract": "Alternative splicing (AS) is an important and frequent step in mammalian gene expression that allows a single gene to specify multiple products, and is crucial for the regulation of fundamental biological processes. The extent of AS regulation, and the mechanisms involved, are not well un- derstood. We have developed a custom DNA microarray platform for surveying AS levels on a large scale. We present here a generative model for the AS Array Platform (GenASAP) and demonstrate its utility for quantifying AS levels in different mouse tissues. Learning is performed using a variational expectation maximization algorithm, and the parame- ters are shown to correctly capture expected AS trends. A comparison of the results obtained with a well-established but low through-put experi- mental method demonstrate that AS levels obtained from GenASAP are highly predictive of AS levels in mammalian tissues.", "bibtex": "@inproceedings{NIPS2004_94aef384,\n author = {Shai, Ofer and Frey, Brendan J and Morris, Quaid and Pan, Qun and Misquitta, Christine and Blencowe, Benjamin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Probabilistic Inference of Alternative Splicing Events in Microarray Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/94aef38441efa3380a3bed3faf1f9d5d-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/94aef38441efa3380a3bed3faf1f9d5d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/94aef38441efa3380a3bed3faf1f9d5d-Metadata.json", "review": "", "metareview": "", "pdf_size": 133706, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3377902935780096234&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "track": "main", "status": "Poster" }, { "id": "d9a225d11a", "title": "Proximity Graphs for Clustering and Manifold Learning", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/dcda54e29207294d8e7e1b537338b1c0-Abstract.html", "author": "Richard S. Zemel; Miguel \u00c1. Carreira-Perpi\u00f1\u00e1n", "abstract": "Many machine learning algorithms for clustering or dimensionality re- duction take as input a cloud of points in Euclidean space, and construct a graph with the input data points as vertices. This graph is then parti- tioned (clustering) or used to rede\ufb01ne metric information (dimensional- ity reduction). There has been much recent work on new methods for graph-based clustering and dimensionality reduction, but not much on constructing the graph itself. Graphs typically used include the fully- connected graph, a local \ufb01xed-grid graph (for image segmentation) or a nearest-neighbor graph. We suggest that the graph should adapt locally to the structure of the data. This can be achieved by a graph ensemble that combines multiple minimum spanning trees, each \ufb01t to a perturbed version of the data set. We show that such a graph ensemble usually pro- duces a better representation of the data manifold than standard methods; and that it provides robustness to a subsequent clustering or dimension- ality reduction algorithm based on the graph.", "bibtex": "@inproceedings{NIPS2004_dcda54e2,\n author = {Zemel, Richard and Carreira-Perpi\\~{n}\\'{a}n, Miguel},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Proximity Graphs for Clustering and Manifold Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/dcda54e29207294d8e7e1b537338b1c0-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/dcda54e29207294d8e7e1b537338b1c0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/dcda54e29207294d8e7e1b537338b1c0-Metadata.json", "review": "", "metareview": "", "pdf_size": 239902, "gs_citation": 186, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4539431203588850132&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Dept. of Computer Science, University of Toronto; Dept. of Computer Science, University of Toronto", "aff_domain": "cs.toronto.edu;cs.toronto.edu", "email": "cs.toronto.edu;cs.toronto.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Toronto", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.utoronto.ca", "aff_unique_abbr": "U of T", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Toronto", "aff_country_unique_index": "0;0", "aff_country_unique": "Canada" }, { "id": "af1fdd627e", "title": "Rate- and Phase-coded Autoassociative Memory", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/8420d359404024567b5aefda1231af24-Abstract.html", "author": "M\u00e1t\u00e9 Lengyel; Peter Dayan", "abstract": "Areas of the brain involved in various forms of memory exhibit patterns of neural activity quite unlike those in canonical computational models. We show how to use well-founded Bayesian probabilistic autoassociative recall to derive biologically reasonable neuronal dynamics in recurrently coupled models, together with appropriate values for parameters such as the membrane time constant and inhibition. We explicitly treat two cases. One arises from a standard Hebbian learning rule, and involves activity patterns that are coded by graded firing rates. The other arises from a spike timing dependent learning rule, and involves patterns coded by the phase of spike times relative to a coherent local field potential oscillation. Our model offers a new and more complete understanding of how neural dynamics may support autoassociation.", "bibtex": "@inproceedings{NIPS2004_8420d359,\n author = {Lengyel, M\\'{a}t\\'{e} and Dayan, Peter},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Rate- and Phase-coded Autoassociative Memory},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/8420d359404024567b5aefda1231af24-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/8420d359404024567b5aefda1231af24-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/8420d359404024567b5aefda1231af24-Metadata.json", "review": "", "metareview": "", "pdf_size": 150166, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17229311439802734076&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "2948071780", "title": "Real-Time Pitch Determination of One or More Voices by Nonnegative Matrix Factorization", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/e113bb92c69391dd39e2488f9f588382-Abstract.html", "author": "Fei Sha; Lawrence K. Saul", "abstract": "An auditory \"scene\", composed of overlapping acoustic sources, can be viewed as a complex object whose constituent parts are the individual sources. Pitch is known to be an important cue for auditory scene analy- sis. In this paper, with the goal of building agents that operate in human environments, we describe a real-time system to identify the presence of one or more voices and compute their pitch. The signal processing in the front end is based on instantaneous frequency estimation, a method for tracking the partials of voiced speech, while the pattern-matching in the back end is based on nonnegative matrix factorization, an unsupervised algorithm for learning the parts of complex objects. While supporting a framework to analyze complicated auditory scenes, our system maintains real-time operability and state-of-the-art performance in clean speech.", "bibtex": "@inproceedings{NIPS2004_e113bb92,\n author = {Sha, Fei and Saul, Lawrence},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Real-Time Pitch Determination of One or More Voices by Nonnegative Matrix Factorization},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/e113bb92c69391dd39e2488f9f588382-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/e113bb92c69391dd39e2488f9f588382-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/e113bb92c69391dd39e2488f9f588382-Metadata.json", "review": "", "metareview": "", "pdf_size": 108676, "gs_citation": 97, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4460891329528399350&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Dept. of Computer and Information Science, University of Pennsylvania, Philadelphia, PA 19104; Dept. of Computer and Information Science, University of Pennsylvania, Philadelphia, PA 19104", "aff_domain": "cis.upenn.edu;cis.upenn.edu", "email": "cis.upenn.edu;cis.upenn.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Pennsylvania", "aff_unique_dep": "Dept. of Computer and Information Science", "aff_unique_url": "https://www.upenn.edu", "aff_unique_abbr": "UPenn", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Philadelphia", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "31f5e5e71f", "title": "Reducing Spike Train Variability: A Computational Theory Of Spike-Timing Dependent Plasticity", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/743c41a921516b04afde48bb48e28ce6-Abstract.html", "author": "Sander M. Bohte; Michael Mozer", "abstract": "Experimental studies have observed synaptic potentiation when a presynaptic neuron fires shortly before a postsynaptic neuron, and synaptic depression when the presynaptic neuron fires shortly af- ter. The dependence of synaptic modulation on the precise tim- ing of the two action potentials is known as spike-timing depen- dent plasticity or STDP. We derive STDP from a simple compu- tational principle: synapses adapt so as to minimize the postsy- naptic neuron's variability to a given presynaptic input, causing the neuron's output to become more reliable in the face of noise. Using an entropy-minimization objective function and the biophys- ically realistic spike-response model of Gerstner (2001), we simu- late neurophysiological experiments and obtain the characteristic STDP curve along with other phenomena including the reduction in synaptic plasticity as synaptic efficacy increases. We compare our account to other efforts to derive STDP from computational princi- ples, and argue that our account provides the most comprehensive coverage of the phenomena. Thus, reliability of neural response in the face of noise may be a key goal of cortical adaptation.", "bibtex": "@inproceedings{NIPS2004_743c41a9,\n author = {Bohte, Sander and Mozer, Michael C},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Reducing Spike Train Variability: A Computational Theory Of Spike-Timing Dependent Plasticity},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/743c41a921516b04afde48bb48e28ce6-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/743c41a921516b04afde48bb48e28ce6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/743c41a921516b04afde48bb48e28ce6-Metadata.json", "review": "", "metareview": "", "pdf_size": 238166, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=54517219010644929&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Dept. Software Engineering CWI, Amsterdam, The Netherlands+Dept. of Computer Science University of Colorado, Boulder, USA; Dept. of Computer Science University of Colorado, Boulder, USA", "aff_domain": "cwi.nl;cs.colorado.edu", "email": "cwi.nl;cs.colorado.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0+1;1", "aff_unique_norm": "Centrum Wiskunde & Informatica;University of Colorado Boulder", "aff_unique_dep": "Dept. Software Engineering;Department of Computer Science", "aff_unique_url": "https://www.cwi.nl;https://www.colorado.edu", "aff_unique_abbr": "CWI;CU Boulder", "aff_campus_unique_index": "0+1;1", "aff_campus_unique": "Amsterdam;Boulder", "aff_country_unique_index": "0+1;1", "aff_country_unique": "Netherlands;United States" }, { "id": "cd32f86c87", "title": "Resolving Perceptual Aliasing In The Presence Of Noisy Sensors", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/c315f0320b7cd4ec85756fac52d78076-Abstract.html", "author": "Guy Shani; Ronen I. Brafman", "abstract": "Agents learning to act in a partially observable domain may need to overcome the problem of perceptual aliasing i.e., different states that appear similar but require different responses. This problem is exacer- bated when the agent's sensors are noisy, i.e., sensors may produce dif- ferent observations in the same state. We show that many well-known reinforcement learning methods designed to deal with perceptual alias- ing, such as Utile Suffix Memory, finite size history windows, eligibility traces, and memory bits, do not handle noisy sensors well. We suggest a new algorithm, Noisy Utile Suffix Memory (NUSM), based on USM, that uses a weighted classification of observed trajectories. We compare NUSM to the above methods and show it to be more robust to noise.", "bibtex": "@inproceedings{NIPS2004_c315f032,\n author = {Shani, Guy and Brafman, Ronen},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Resolving Perceptual Aliasing In The Presence Of Noisy Sensors},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/c315f0320b7cd4ec85756fac52d78076-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/c315f0320b7cd4ec85756fac52d78076-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/c315f0320b7cd4ec85756fac52d78076-Metadata.json", "review": "", "metareview": "", "pdf_size": 163394, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17810261335723318849&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, Ben-Gurion University; Department of Computer Science, Ben-Gurion University", "aff_domain": "cs.bgu.ac.il;cs.bgu.ac.il", "email": "cs.bgu.ac.il;cs.bgu.ac.il", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Ben-Gurion University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.bgu.ac.il", "aff_unique_abbr": "BGU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "id": "01188d0038", "title": "Responding to Modalities with Different Latencies", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/868b7df964b1af24c8c0a9e43a330c6a-Abstract.html", "author": "Fredrik Bissmarck; Hiroyuki Nakahara; Kenji Doya; Okihide Hikosaka", "abstract": "Motor control depends on sensory feedback in multiple modalities with different latencies. In this paper we consider within the framework of re- inforcement learning how different sensory modalities can be combined and selected for real-time, optimal movement control. We propose an actor-critic architecture with multiple modules, whose output are com- bined using a softmax function. We tested our architecture in a simu- lation of a sequential reaching task. Reaching was initially guided by visual feedback with a long latency. Our learning scheme allowed the agent to utilize the somatosensory feedback with shorter latency when the hand is near the experienced trajectory. In simulations with different latencies for visual and somatosensory feedback, we found that the agent depended more on feedback with shorter latency.", "bibtex": "@inproceedings{NIPS2004_868b7df9,\n author = {Bissmarck, Fredrik and Nakahara, Hiroyuki and Doya, Kenji and Hikosaka, Okihide},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Responding to Modalities with Different Latencies},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/868b7df964b1af24c8c0a9e43a330c6a-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/868b7df964b1af24c8c0a9e43a330c6a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/868b7df964b1af24c8c0a9e43a330c6a-Metadata.json", "review": "", "metareview": "", "pdf_size": 194034, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3172802661786063691&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Computational Neuroscience Labs, ATR International, Hikari-dai 2-2-2, Seika, Soraku, Kyoto 619-0288 JAPAN; Laboratory for Mathematical Neuroscience, RIKEN Brain Science Institute, Hirosawa 2-1-1, Wako, Saitama 351-0198 JAPAN; Initial Research Project, Okinawa Institute of Science and Technology, 12-22 Suzaki, Gushikawa, Okinawa 904-2234 JAPAN; Laboratory of Sensorimotor Research, National Eye Institute, NIH, Building 49, Room 2A50, Bethesda, MD 20892", "aff_domain": "atr.jp;brain.riken.jp;irp.oist.jp;lsr.nei.nih.gov", "email": "atr.jp;brain.riken.jp;irp.oist.jp;lsr.nei.nih.gov", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "ATR International;RIKEN Brain Science Institute;Okinawa Institute of Science and Technology;National Eye Institute", "aff_unique_dep": "Computational Neuroscience Labs;Laboratory for Mathematical Neuroscience;Initial Research Project;Laboratory of Sensorimotor Research", "aff_unique_url": ";https://bSI.riken.jp;https://www.oist.jp;https://www.nei.nih.gov", "aff_unique_abbr": ";RIKEN BSI;OIST;NEI", "aff_campus_unique_index": "0;1;2;3", "aff_campus_unique": "Kyoto;Wako;Okinawa;Bethesda", "aff_country_unique_index": "0;0;0;1", "aff_country_unique": "Japan;United States" }, { "id": "a223bf0c64", "title": "Result Analysis of the NIPS 2003 Feature Selection Challenge", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/5e751896e527c862bf67251a474b3819-Abstract.html", "author": "Isabelle Guyon; Steve Gunn; Asa Ben-Hur; Gideon Dror", "abstract": "The NIPS 2003 workshops included a feature selection competi- tion organized by the authors. We provided participants with five datasets from different application domains and called for classifica- tion results using a minimal number of features. The competition took place over a period of 13 weeks and attracted 78 research groups. Participants were asked to make on-line submissions on the validation and test sets, with performance on the validation set being presented immediately to the participant and performance on the test set presented to the participants at the workshop. In total 1863 entries were made on the validation sets during the development period and 135 entries on all test sets for the final competition. The winners used a combination of Bayesian neu- ral networks with ARD priors and Dirichlet diffusion trees. Other top entries used a variety of methods for feature selection, which combined filters and/or wrapper or embedded methods using Ran- dom Forests, kernel methods, or neural networks as a classification engine. The results of the benchmark (including the predictions made by the participants and the features they selected) and the scoring software are publicly available. The benchmark is available at www.nipsfsc.ecs.soton.ac.uk for post-challenge submissions to stimulate further research.", "bibtex": "@inproceedings{NIPS2004_5e751896,\n author = {Guyon, Isabelle and Gunn, Steve and Ben-Hur, Asa and Dror, Gideon},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Result Analysis of the NIPS 2003 Feature Selection Challenge},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/5e751896e527c862bf67251a474b3819-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/5e751896e527c862bf67251a474b3819-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/5e751896e527c862bf67251a474b3819-Metadata.json", "review": "", "metareview": "", "pdf_size": 154294, "gs_citation": 880, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2139987056304114695&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "ClopiNet, Berkeley, CA 94708, USA; School of Electronics and Computer Science, University of Southampton, U.K.; Department of Genome Sciences, University of Washington, USA; Department of Computer Science, Academic College of Tel-Aviv-Ya\ufb00o, Israel", "aff_domain": "clopinet.com;ecs.soton.ac.uk;gs.washington.edu;mta.ac.il", "email": "clopinet.com;ecs.soton.ac.uk;gs.washington.edu;mta.ac.il", "github": "", "project": "www.nipsfsc.ecs.soton.ac.uk", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "University of California, Berkeley;University of Southampton;University of Washington;Academic College of Tel-Aviv-Ya\ufb00o", "aff_unique_dep": ";School of Electronics and Computer Science;Department of Genome Sciences;Department of Computer Science", "aff_unique_url": "https://www.berkeley.edu;https://www.southampton.ac.uk;https://www.washington.edu;https://www.acultyaffo.ac.il", "aff_unique_abbr": "UC Berkeley;Southampton;UW;", "aff_campus_unique_index": "0;2", "aff_campus_unique": "Berkeley;;Seattle", "aff_country_unique_index": "0;1;0;2", "aff_country_unique": "United States;United Kingdom;Israel" }, { "id": "a5efcdeb0f", "title": "Saliency-Driven Image Acuity Modulation on a Reconfigurable Array of Spiking Silicon Neurons", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/cf9b2d0406020c56599f9a93708832b5-Abstract.html", "author": "R. J. Vogelstein; Udayan Mallik; Eugenio Culurciello; Gert Cauwenberghs; Ralph Etienne-Cummings", "abstract": "We have constructed a system that uses an array of 9,600 spiking sili- con neurons, a fast microcontroller, and digital memory, to implement a recon\ufb01gurable network of integrate-and-\ufb01re neurons. The system is designed for rapid prototyping of spiking neural networks that require high-throughput communication with external address-event hardware. Arbitrary network topologies can be implemented by selectively rout- ing address-events to speci\ufb01c internal or external targets according to a memory-based projective \ufb01eld mapping. The utility and versatility of the system is demonstrated by con\ufb01guring it as a three-stage network that accepts input from an address-event imager, detects salient regions of the image, and performs spatial acuity modulation around a high-resolution fovea that is centered on the location of highest salience.", "bibtex": "@inproceedings{NIPS2004_cf9b2d04,\n author = {Vogelstein, R. and Mallik, Udayan and Culurciello, Eugenio and Cauwenberghs, Gert and Etienne-Cummings, Ralph},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Saliency-Driven Image Acuity Modulation on a Reconfigurable Array of Spiking Silicon Neurons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/cf9b2d0406020c56599f9a93708832b5-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/cf9b2d0406020c56599f9a93708832b5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/cf9b2d0406020c56599f9a93708832b5-Metadata.json", "review": "", "metareview": "", "pdf_size": 205841, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11913047735469009689&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Dept. of Biomedical Engineering, Johns Hopkins University, Baltimore, MD; Dept. of Electrical & Computer Engineering, Johns Hopkins University, Baltimore, MD; Dept. of Electrical Engineering, Yale University, New Haven, CT; Dept. of Electrical & Computer Engineering, Johns Hopkins University, Baltimore, MD; Dept. of Electrical & Computer Engineering, Johns Hopkins University, Baltimore, MD", "aff_domain": "jhu.edu;jhu.edu;yale.edu;jhu.edu;jhu.edu", "email": "jhu.edu;jhu.edu;yale.edu;jhu.edu;jhu.edu", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;0;0", "aff_unique_norm": "Johns Hopkins University;Yale University", "aff_unique_dep": "Dept. of Biomedical Engineering;Dept. of Electrical Engineering", "aff_unique_url": "https://www.jhu.edu;https://www.yale.edu", "aff_unique_abbr": "JHU;Yale", "aff_campus_unique_index": "0;0;1;0;0", "aff_campus_unique": "Baltimore;New Haven", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "id": "8f5a51b64b", "title": "Sampling Methods for Unsupervised Learning", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/d7619beb6eb189509885fbc192d2874b-Abstract.html", "author": "Rob Fergus; Andrew Zisserman; Pietro Perona", "abstract": "We present an algorithm to overcome the local maxima problem in es-\n timating the parameters of mixture models. It combines existing ap-\n proaches from both EM and a robust fitting algorithm, RANSAC, to give\n a data-driven stochastic learning scheme. Minimal subsets of data points,\n sufficient to constrain the parameters of the model, are drawn from pro-\n posal densities to discover new regions of high likelihood. The proposal\n densities are learnt using EM and bias the sampling toward promising\n solutions. The algorithm is computationally efficient, as well as effective\n at escaping from local maxima. We compare it with alternative methods,\n including EM and RANSAC, on both challenging synthetic data and the\n computer vision problem of alpha-matting.", "bibtex": "@inproceedings{NIPS2004_d7619beb,\n author = {Fergus, Rob and Zisserman, Andrew and Perona, Pietro},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Sampling Methods for Unsupervised Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/d7619beb6eb189509885fbc192d2874b-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/d7619beb6eb189509885fbc192d2874b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/d7619beb6eb189509885fbc192d2874b-Metadata.json", "review": "", "metareview": "", "pdf_size": 342169, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7272786852059422866&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Dept. of Engineering Science, University of Oxford; Dept. of Engineering Science, University of Oxford; Dept. Electrical Engineering, California Institute of Technology", "aff_domain": "robots.ox.ac.uk;robots.ox.ac.uk;vision.caltech.edu", "email": "robots.ox.ac.uk;robots.ox.ac.uk;vision.caltech.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of Oxford;California Institute of Technology", "aff_unique_dep": "Dept. of Engineering Science;Electrical Engineering", "aff_unique_url": "https://www.ox.ac.uk;https://www.caltech.edu", "aff_unique_abbr": "Oxford;Caltech", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Oxford;Pasadena", "aff_country_unique_index": "0;0;1", "aff_country_unique": "United Kingdom;United States" }, { "id": "5cc1e063ed", "title": "Schema Learning: Experience-Based Construction of Predictive Action Models", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/18bb68e2b38e4a8ce7cf4f6b2625768c-Abstract.html", "author": "Michael P. Holmes; Charles Jr.", "abstract": "Schema learning is a way to discover probabilistic, constructivist, pre- dictive action models (schemas) from experience. It includes meth- ods for \ufb01nding and using hidden state to make predictions more accu- rate. We extend the original schema mechanism [1] to handle arbitrary discrete-valued sensors, improve the original learning criteria to handle POMDP domains, and better maintain hidden state by using schema pre- dictions. These extensions show large improvement over the original schema mechanism in several rewardless POMDPs, and achieve very low prediction error in a dif\ufb01cult speech modeling task. Further, we compare extended schema learning to the recently introduced predictive state rep- resentations [2], and \ufb01nd their predictions of next-step action effects to be approximately equal in accuracy. This work lays the foundation for a schema-based system of integrated learning and planning.", "bibtex": "@inproceedings{NIPS2004_18bb68e2,\n author = {Holmes, Michael and Jr., Charles},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Schema Learning: Experience-Based Construction of Predictive Action Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/18bb68e2b38e4a8ce7cf4f6b2625768c-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/18bb68e2b38e4a8ce7cf4f6b2625768c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/18bb68e2b38e4a8ce7cf4f6b2625768c-Metadata.json", "review": "", "metareview": "", "pdf_size": 82431, "gs_citation": 58, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14079387210740184016&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "College of Computing, Georgia Institute of Technology; College of Computing, Georgia Institute of Technology", "aff_domain": "cc.gatech.edu;cc.gatech.edu", "email": "cc.gatech.edu;cc.gatech.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Georgia Institute of Technology", "aff_unique_dep": "College of Computing", "aff_unique_url": "https://www.gatech.edu", "aff_unique_abbr": "Georgia Tech", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Atlanta", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "ff19d9bbfb", "title": "Seeing through water", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/17f98ddf040204eda0af36a108cbdea4-Abstract.html", "author": "Alexei Efros; Volkan Isler; Jianbo Shi; Mirk\u00f3 Visontai", "abstract": "We consider the problem of recovering an underwater image distorted by surface waves. A large amount of video data of the distorted image is acquired. The problem is posed in terms of finding an undistorted im- age patch at each spatial location. This challenging reconstruction task can be formulated as a manifold learning problem, such that the center of the manifold is the image of the undistorted patch. To compute the center, we present a new technique to estimate global distances on the manifold. Our technique achieves robustness through convex flow com- putations and solves the \"leakage\" problem inherent in recent manifold embedding techniques.", "bibtex": "@inproceedings{NIPS2004_17f98ddf,\n author = {Efros, Alexei and Isler, Volkan and Shi, Jianbo and Visontai, Mirk\\'{o}},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Seeing through water},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/17f98ddf040204eda0af36a108cbdea4-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/17f98ddf040204eda0af36a108cbdea4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/17f98ddf040204eda0af36a108cbdea4-Metadata.json", "review": "", "metareview": "", "pdf_size": 407853, "gs_citation": 99, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2898728713325605146&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 17, "aff": "School of Computer Science, Carnegie Mellon University; Dept. of Computer and Information Science, University of Pennsylvania; Dept. of Computer and Information Science, University of Pennsylvania; Dept. of Computer and Information Science, University of Pennsylvania", "aff_domain": "cs.cmu.edu;cis.upenn.edu;cis.upenn.edu;cis.upenn.edu", "email": "cs.cmu.edu;cis.upenn.edu;cis.upenn.edu;cis.upenn.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "Carnegie Mellon University;University of Pennsylvania", "aff_unique_dep": "School of Computer Science;Dept. of Computer and Information Science", "aff_unique_url": "https://www.cmu.edu;https://www.upenn.edu", "aff_unique_abbr": "CMU;UPenn", "aff_campus_unique_index": "0", "aff_campus_unique": "Pittsburgh;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "86c83c76e4", "title": "Self-Tuning Spectral Clustering", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/40173ea48d9567f1f393b20c855bb40b-Abstract.html", "author": "Lihi Zelnik-manor; Pietro Perona", "abstract": "We study a number of open issues in spectral clustering: (i) Selecting the appropriate scale of analysis, (ii) Handling multi-scale data, (iii) Cluster- ing with irregular background clutter, and, (iv) Finding automatically the number of groups. We \ufb01rst propose that a \u2018local\u2019 scale should be used to compute the af\ufb01nity between each pair of points. This local scaling leads to better clustering especially when the data includes multiple scales and when the clusters are placed within a cluttered background. We further suggest exploiting the structure of the eigenvectors to infer automatically the number of groups. This leads to a new algorithm in which the \ufb01nal randomly initialized k-means stage is eliminated.", "bibtex": "@inproceedings{NIPS2004_40173ea4,\n author = {Zelnik-manor, Lihi and Perona, Pietro},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Self-Tuning Spectral Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/40173ea48d9567f1f393b20c855bb40b-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/40173ea48d9567f1f393b20c855bb40b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/40173ea48d9567f1f393b20c855bb40b-Metadata.json", "review": "", "metareview": "", "pdf_size": 307529, "gs_citation": 2974, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16362313194106973609&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Department of Electrical Engineering, California Institute of Technology; Department of Electrical Engineering, California Institute of Technology", "aff_domain": "vision.caltech.edu;vision.caltech.edu", "email": "vision.caltech.edu;vision.caltech.edu", "github": "", "project": "http://www.vision.caltech.edu/lihi/Demos/SelfTuningClustering.html", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "California Institute of Technology", "aff_unique_dep": "Department of Electrical Engineering", "aff_unique_url": "https://www.caltech.edu", "aff_unique_abbr": "Caltech", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pasadena", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "c2eeae4279", "title": "Semi-Markov Conditional Random Fields for Information Extraction", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/eb06b9db06012a7a4179b8f3cb5384d3-Abstract.html", "author": "Sunita Sarawagi; William W. Cohen", "abstract": "We describe semi-Markov conditional random \ufb01elds (semi-CRFs), a con- ditionally trained version of semi-Markov chains. Intuitively, a semi- CRF on an input sequence x outputs a \u201csegmentation\u201d of x, in which labels are assigned to segments (i.e., subsequences) of x rather than to individual elements xi of x. Importantly, features for semi-CRFs can measure properties of segments, and transitions within a segment can be non-Markovian. In spite of this additional power, exact learning and inference algorithms for semi-CRFs are polynomial-time\u2014often only a small constant factor slower than conventional CRFs. In experiments on \ufb01ve named entity recognition problems, semi-CRFs generally outper- form conventional CRFs.", "bibtex": "@inproceedings{NIPS2004_eb06b9db,\n author = {Sarawagi, Sunita and Cohen, William W},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Semi-Markov Conditional Random Fields for Information Extraction},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/eb06b9db06012a7a4179b8f3cb5384d3-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/eb06b9db06012a7a4179b8f3cb5384d3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/eb06b9db06012a7a4179b8f3cb5384d3-Metadata.json", "review": "", "metareview": "", "pdf_size": 87343, "gs_citation": 908, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8397365025010857125&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 23, "aff": "Indian Institute of Technology Bombay, India; Center for Automated Learning & Discovery Carnegie Mellon University", "aff_domain": "iitb.ac.in;cs.cmu.edu", "email": "iitb.ac.in;cs.cmu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Indian Institute of Technology Bombay;Carnegie Mellon University", "aff_unique_dep": ";Center for Automated Learning & Discovery", "aff_unique_url": "https://www.iitb.ac.in;https://www.cmu.edu", "aff_unique_abbr": "IIT Bombay;CMU", "aff_campus_unique_index": "0", "aff_campus_unique": "Bombay;", "aff_country_unique_index": "0;1", "aff_country_unique": "India;United States" }, { "id": "90917c7f65", "title": "Semi-parametric Exponential Family PCA", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/3bd4017318837e92a66298c7855f4427-Abstract.html", "author": "Sajama Sajama; Alon Orlitsky", "abstract": "We present a semi-parametric latent variable model based technique for density modelling, dimensionality reduction and visualization. Unlike previous methods, we estimate the latent distribution non-parametrically which enables us to model data generated by an underlying low dimen- sional, multimodal distribution. In addition, we allow the components of latent variable models to be drawn from the exponential family which makes the method suitable for special data types, for example binary or count data. Simulations on real valued, binary and count data show fa- vorable comparison to other related schemes both in terms of separating different populations and generalization to unseen samples.", "bibtex": "@inproceedings{NIPS2004_3bd40173,\n author = {Sajama, Sajama and Orlitsky, Alon},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Semi-parametric Exponential Family PCA},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/3bd4017318837e92a66298c7855f4427-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/3bd4017318837e92a66298c7855f4427-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/3bd4017318837e92a66298c7855f4427-Metadata.json", "review": "", "metareview": "", "pdf_size": 145784, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15273610602006314344&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Electrical and Computer Engineering, University of California at San Diego, La Jolla, CA 92093; Department of Electrical and Computer Engineering, University of California at San Diego, La Jolla, CA 92093", "aff_domain": "ucsd.edu;ece.ucsd.edu", "email": "ucsd.edu;ece.ucsd.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0;0", "aff_campus_unique": "La Jolla", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "d592a606b1", "title": "Semi-supervised Learning by Entropy Minimization", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/96f2b50b5d3613adf9c27049b2a888c7-Abstract.html", "author": "Yves Grandvalet; Yoshua Bengio", "abstract": "We consider the semi-supervised learning problem, where a decision rule is to be learned from labeled and unlabeled data. In this framework, we motivate minimum entropy regularization, which enables to incorporate unlabeled data in the standard supervised learning. Our approach in- cludes other approaches to the semi-supervised problem as particular or limiting cases. A series of experiments illustrates that the proposed solu- tion benefits from unlabeled data. The method challenges mixture mod- els when the data are sampled from the distribution class spanned by the generative model. The performances are definitely in favor of minimum entropy regularization when generative models are misspecified, and the weighting of unlabeled data provides robustness to the violation of the \"cluster assumption\". Finally, we also illustrate that the method can also be far superior to manifold learning in high dimension spaces.", "bibtex": "@inproceedings{NIPS2004_96f2b50b,\n author = {Grandvalet, Yves and Bengio, Yoshua},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Semi-supervised Learning by Entropy Minimization},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/96f2b50b5d3613adf9c27049b2a888c7-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/96f2b50b5d3613adf9c27049b2a888c7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/96f2b50b5d3613adf9c27049b2a888c7-Metadata.json", "review": "", "metareview": "", "pdf_size": 73831, "gs_citation": 2810, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4164500255359381635&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 20, "aff": "Heudiasyc, CNRS/UTC; Dept. IRO, Universit\u00e9 de Montr\u00eal", "aff_domain": "utc.fr;iro.umontreal.ca", "email": "utc.fr;iro.umontreal.ca", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "CNRS/UTC;Universit\u00e9 de Montr\u00e9al", "aff_unique_dep": "Heudiasyc;D\u00e9partement d'informatique et de recherche op\u00e9rationnelle", "aff_unique_url": "https://www.utc.fr;https://www.umontreal.ca", "aff_unique_abbr": ";UdeM", "aff_campus_unique_index": "1", "aff_campus_unique": ";Montr\u00e9al", "aff_country_unique_index": "0;1", "aff_country_unique": "France;Canada" }, { "id": "8f46b6cd42", "title": "Semi-supervised Learning on Directed Graphs", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/ad47a008a2f806aa6eb1b53852cd8b37-Abstract.html", "author": "Dengyong Zhou; Thomas Hofmann; Bernhard Sch\u00f6lkopf", "abstract": "Given a directed graph in which some of the nodes are labeled, we inves- tigate the question of how to exploit the link structure of the graph to infer the labels of the remaining unlabeled nodes. To that extent we propose a regularization framework for functions de(cid:2)ned over nodes of a directed graph that forces the classi(cid:2)cation function to change slowly on densely linked subgraphs. A powerful, yet computationally simple classi(cid:2)cation algorithm is derived within the proposed framework. The experimental evaluation on real-world Web classi(cid:2)cation problems demonstrates en- couraging results that validate our approach.", "bibtex": "@inproceedings{NIPS2004_ad47a008,\n author = {Zhou, Dengyong and Hofmann, Thomas and Sch\\\"{o}lkopf, Bernhard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Semi-supervised Learning on Directed Graphs},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/ad47a008a2f806aa6eb1b53852cd8b37-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/ad47a008a2f806aa6eb1b53852cd8b37-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/ad47a008a2f806aa6eb1b53852cd8b37-Metadata.json", "review": "", "metareview": "", "pdf_size": 196300, "gs_citation": 275, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15055596397234271104&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics; Department of Computer Science, Brown University", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de;cs.brown.edu", "email": "tuebingen.mpg.de;tuebingen.mpg.de;cs.brown.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics;Brown University", "aff_unique_dep": "Biological Cybernetics;Department of Computer Science", "aff_unique_url": "https://www.biocybernetics.mpg.de;https://www.brown.edu", "aff_unique_abbr": "MPIBC;Brown", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1", "aff_country_unique": "Germany;United States" }, { "id": "958f34e552", "title": "Semi-supervised Learning via Gaussian Processes", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/d3fad7d3634dbfb61018813546edbccb-Abstract.html", "author": "Neil D. Lawrence; Michael I. Jordan", "abstract": "We present a probabilistic approach to learning a Gaussian Process classifier in the presence of unlabeled data. Our approach involves a \"null category noise model\" (NCNM) inspired by ordered cate- gorical noise models. The noise model reflects an assumption that the data density is lower between the class-conditional densities. We illustrate our approach on a toy problem and present compar- ative results for the semi-supervised classification of handwritten digits.", "bibtex": "@inproceedings{NIPS2004_d3fad7d3,\n author = {Lawrence, Neil and Jordan, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Semi-supervised Learning via Gaussian Processes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/d3fad7d3634dbfb61018813546edbccb-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/d3fad7d3634dbfb61018813546edbccb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/d3fad7d3634dbfb61018813546edbccb-Metadata.json", "review": "", "metareview": "", "pdf_size": 167765, "gs_citation": 225, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14917195577824552667&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, University of Sheffield, Sheffield, S1 4DP, U.K.; Computer Science and Statistics, University of California, Berkeley, CA 94720, U.S.A.", "aff_domain": "dcs.shef.ac.uk;cs.berkeley.edu", "email": "dcs.shef.ac.uk;cs.berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Sheffield;University of California, Berkeley", "aff_unique_dep": "Department of Computer Science;Computer Science and Statistics", "aff_unique_url": "https://www.sheffield.ac.uk;https://www.berkeley.edu", "aff_unique_abbr": "Sheffield;UC Berkeley", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Sheffield;Berkeley", "aff_country_unique_index": "0;1", "aff_country_unique": "United Kingdom;United States" }, { "id": "569d4c786e", "title": "Semi-supervised Learning with Penalized Probabilistic Clustering", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/8dc5983b8c4ef1d8fcd5f325f9a65511-Abstract.html", "author": "Zhengdong Lu; Todd K. Leen", "abstract": "While clustering is usually an unsupervised operation, there are circum- stances in which we believe (with varying degrees of certainty) that items A and B should be assigned to the same cluster, while items A and C should not. We would like such pairwise relations to influence cluster assignments of out-of-sample data in a manner consistent with the prior knowledge expressed in the training set. Our starting point is proba- bilistic clustering based on Gaussian mixture models (GMM) of the data distribution. We express clustering preferences in the prior distribution over assignments of data points to clusters. This prior penalizes cluster assignments according to the degree with which they violate the prefer- ences. We fit the model parameters with EM. Experiments on a variety of data sets show that PPC can consistently improve clustering results.", "bibtex": "@inproceedings{NIPS2004_8dc5983b,\n author = {Lu, Zhengdong and Leen, Todd},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Semi-supervised Learning with Penalized Probabilistic Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/8dc5983b8c4ef1d8fcd5f325f9a65511-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/8dc5983b8c4ef1d8fcd5f325f9a65511-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/8dc5983b8c4ef1d8fcd5f325f9a65511-Metadata.json", "review": "", "metareview": "", "pdf_size": 168561, "gs_citation": 156, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8869587444756482076&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science and Engineering, OGI School of Science and Engineering, OHSU; Department of Computer Science and Engineering, OGI School of Science and Engineering, OHSU", "aff_domain": "cse.ogi.edu;cse.ogi.edu", "email": "cse.ogi.edu;cse.ogi.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Oregon Health & Science University", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.ohsu.edu", "aff_unique_abbr": "OHSU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "OGI School of Science and Engineering", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "798066f3a8", "title": "Semigroup Kernels on Finite Sets", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/5a99158e0c52f9e7d290906c9d08268d-Abstract.html", "author": "Marco Cuturi; Jean-philippe Vert", "abstract": "Complex objects can often be conveniently represented by finite sets of simpler components, such as images by sets of patches or texts by bags of words. We study the class of positive definite (p.d.) kernels for two such objects that can be expressed as a function of the merger of their respective sets of components. We prove a general integral representa- tion of such kernels and present two particular examples. One of them leads to a kernel for sets of points living in a space endowed itself with a positive definite kernel. We provide experimental results on a benchmark experiment of handwritten digits image classification which illustrate the validity of the approach.", "bibtex": "@inproceedings{NIPS2004_5a99158e,\n author = {Cuturi, Marco and Vert, Jean-philippe},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Semigroup Kernels on Finite Sets},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/5a99158e0c52f9e7d290906c9d08268d-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/5a99158e0c52f9e7d290906c9d08268d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/5a99158e0c52f9e7d290906c9d08268d-Metadata.json", "review": "", "metareview": "", "pdf_size": 131446, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11559338775761198883&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Computational Biology Group, Ecole des Mines de Paris; Computational Biology Group, Ecole des Mines de Paris", "aff_domain": "ensmp.fr;ensmp.fr", "email": "ensmp.fr;ensmp.fr", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Ecole des Mines de Paris", "aff_unique_dep": "Computational Biology Group", "aff_unique_url": "https://www.mines-paris.psl.eu", "aff_unique_abbr": "Mines ParisTech", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "France" }, { "id": "a1e3be68d1", "title": "Sharing Clusters among Related Groups: Hierarchical Dirichlet Processes", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/fb4ab556bc42d6f0ee0f9e24ec4d1af0-Abstract.html", "author": "Yee W. Teh; Michael I. Jordan; Matthew J. Beal; David M. Blei", "abstract": "We propose the hierarchical Dirichlet process (HDP), a nonparametric Bayesian model for clustering problems involving multiple groups of data. Each group of data is modeled with a mixture, with the number of components being open-ended and inferred automatically by the model. Further, components can be shared across groups, allowing dependencies across groups to be modeled effectively as well as conferring generaliza- tion to new groups. Such grouped clustering problems occur often in practice, e.g. in the problem of topic discovery in document corpora. We report experimental results on three text corpora showing the effective and superior performance of the HDP over previous models.", "bibtex": "@inproceedings{NIPS2004_fb4ab556,\n author = {Teh, Yee and Jordan, Michael and Beal, Matthew and Blei, David},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Sharing Clusters among Related Groups: Hierarchical Dirichlet Processes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/fb4ab556bc42d6f0ee0f9e24ec4d1af0-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/fb4ab556bc42d6f0ee0f9e24ec4d1af0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/fb4ab556bc42d6f0ee0f9e24ec4d1af0-Metadata.json", "review": "", "metareview": "", "pdf_size": 112459, "gs_citation": 5575, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3790421305912736096&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 68, "aff": "Computer Science Div. + Dept. of Statistics, University of California at Berkeley; Computer Science Div. + Dept. of Statistics, University of California at Berkeley; Dept. of Computer Science, University of Toronto; Computer Science Div. + Dept. of Statistics, University of California at Berkeley", "aff_domain": "cs.berkeley.edu;cs.berkeley.edu;cs.toronto.edu;cs.berkeley.edu", "email": "cs.berkeley.edu;cs.berkeley.edu;cs.toronto.edu;cs.berkeley.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0+1;0+1;2;0+1", "aff_unique_norm": "Computer Science Division;University of California, Berkeley;University of Toronto", "aff_unique_dep": "Computer Science;Department of Statistics;Department of Computer Science", "aff_unique_url": ";https://www.berkeley.edu;https://www.utoronto.ca", "aff_unique_abbr": ";UC Berkeley;U of T", "aff_campus_unique_index": "1;1;2;1", "aff_campus_unique": ";Berkeley;Toronto", "aff_country_unique_index": "1;1;2;1", "aff_country_unique": ";United States;Canada" }, { "id": "17891711b6", "title": "Similarity and Discrimination in Classical Conditioning: A Latent Variable Account", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/65fc9fb4897a89789352e211ca2d398f-Abstract.html", "author": "Aaron C. Courville; Nathaniel D. Daw; David S. Touretzky", "abstract": "We propose a probabilistic, generative account of configural learning phenomena in classical conditioning. Configural learning experiments probe how animals discriminate and generalize between patterns of si- multaneously presented stimuli (such as tones and lights) that are dif- ferentially predictive of reinforcement. Previous models of these issues have been successful more on a phenomenological than an explanatory level: they reproduce experimental findings but, lacking formal founda- tions, provide scant basis for understanding why animals behave as they do. We present a theory that clarifies seemingly arbitrary aspects of pre- vious models while also capturing a broader set of data. Key patterns of data, e.g. concerning animals' readiness to distinguish patterns with varying degrees of overlap, are shown to follow from statistical inference.", "bibtex": "@inproceedings{NIPS2004_65fc9fb4,\n author = {Courville, Aaron C and Daw, Nathaniel and Touretzky, David},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Similarity and Discrimination in Classical Conditioning: A Latent Variable Account},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/65fc9fb4897a89789352e211ca2d398f-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/65fc9fb4897a89789352e211ca2d398f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/65fc9fb4897a89789352e211ca2d398f-Metadata.json", "review": "", "metareview": "", "pdf_size": 84184, "gs_citation": 67, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3043673233963939874&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Robotics Institute+Center for the Neural Basis of Cognition; Gatsby Computational Neuroscience Unit; Computer Science Department+Center for the Neural Basis of Cognition", "aff_domain": "cs.cmu.edu;gatsby.ucl.ac.uk;cs.cmu.edu", "email": "cs.cmu.edu;gatsby.ucl.ac.uk;cs.cmu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1;2;3+1", "aff_unique_norm": "Robotics Institute;Center for the Neural Basis of Cognition;University College London;Computer Science Department", "aff_unique_dep": ";;Gatsby Computational Neuroscience Unit;Computer Science", "aff_unique_url": ";;https://www.ucl.ac.uk;", "aff_unique_abbr": ";;UCL;", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "1;2;1", "aff_country_unique": ";United States;United Kingdom" }, { "id": "a9b2a64ffb", "title": "Solitaire: Man Versus Machine", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/48c3ec5c3a93a9e294a8a6392ccedeb4-Abstract.html", "author": "Xiang Yan; Persi Diaconis; Paat Rusmevichientong; Benjamin V. Roy", "abstract": "In this paper, we use the rollout method for policy improvement to an- alyze a version of Klondike solitaire. This version, sometimes called thoughtful solitaire, has all cards revealed to the player, but then follows the usual Klondike rules. A strategy that we establish, using iterated roll- outs, wins about twice as many games on average as an expert human player does.", "bibtex": "@inproceedings{NIPS2004_48c3ec5c,\n author = {Yan, Xiang and Diaconis, Persi and Rusmevichientong, Paat and Roy, Benjamin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Solitaire: Man Versus Machine},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/48c3ec5c3a93a9e294a8a6392ccedeb4-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/48c3ec5c3a93a9e294a8a6392ccedeb4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/48c3ec5c3a93a9e294a8a6392ccedeb4-Metadata.json", "review": "", "metareview": "", "pdf_size": 102448, "gs_citation": 76, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9085875737726766346&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Stanford University; Stanford University; Cornell University; Stanford University", "aff_domain": "stanford.edu;stanford.edu;orie.cornell.edu;stanford.edu", "email": "stanford.edu;stanford.edu;orie.cornell.edu;stanford.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "Stanford University;Cornell University", "aff_unique_dep": ";", "aff_unique_url": "https://www.stanford.edu;https://www.cornell.edu", "aff_unique_abbr": "Stanford;Cornell", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "d3a99f93a9", "title": "Sparse Coding of Natural Images Using an Overcomplete Set of Limited Capacity Units", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/309a8e73b2cdb95fc1affa8845504e87-Abstract.html", "author": "Eizaburo Doi; Michael S. Lewicki", "abstract": "It has been suggested that the primary goal of the sensory system is to represent input in such a way as to reduce the high degree of redun- dancy. Given a noisy neural representation, however, solely reducing redundancy is not desirable, since redundancy is the only clue to reduce the effects of noise. Here we propose a model that best balances redun- dancy reduction and redundant representation. Like previous models, our model accounts for the localized and oriented structure of simple cells, but it also predicts a different organization for the population. With noisy, limited-capacity units, the optimal representation becomes an overcom- plete, multi-scale representation, which, compared to previous models, is in closer agreement with physiological data. These results offer a new perspective on the expansion of the number of neurons from retina to V1 and provide a theoretical model of incorporating useful redundancy into efficient neural representations.", "bibtex": "@inproceedings{NIPS2004_309a8e73,\n author = {Doi, Eizaburo and Lewicki, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Sparse Coding of Natural Images Using an Overcomplete Set of Limited Capacity Units},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/309a8e73b2cdb95fc1affa8845504e87-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/309a8e73b2cdb95fc1affa8845504e87-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/309a8e73b2cdb95fc1affa8845504e87-Metadata.json", "review": "", "metareview": "", "pdf_size": 651139, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14556861497245086560&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Center for the Neural Basis of Cognition, Carnegie Mellon University; Center for the Neural Basis of Cognition, Computer Science Department, Carnegie Mellon University", "aff_domain": "cnbc.cmu.edu;cnbc.cmu.edu", "email": "cnbc.cmu.edu;cnbc.cmu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Center for the Neural Basis of Cognition", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "89ef1a4e40", "title": "Spike Sorting: Bayesian Clustering of Non-Stationary Data", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/b5baa9c23ac3e015ad287b17a3d4afa3-Abstract.html", "author": "Aharon Bar-hillel; Adam Spiro; Eran Stark", "abstract": "Spike sorting involves clustering spike trains recorded by a micro- electrode according to the source neuron. It is a complicated problem, which requires a lot of human labor, partly due to the non-stationary na- ture of the data. We propose an automated technique for the clustering of non-stationary Gaussian sources in a Bayesian framework. At a first search stage, data is divided into short time frames and candidate descrip- tions of the data as a mixture of Gaussians are computed for each frame. At a second stage transition probabilities between candidate mixtures are computed, and a globally optimal clustering is found as the MAP so- lution of the resulting probabilistic model. Transition probabilities are computed using local stationarity assumptions and are based on a Gaus- sian version of the Jensen-Shannon divergence. The method was applied to several recordings. The performance appeared almost indistinguish- able from humans in a wide range of scenarios, including movement, merges, and splits of clusters.", "bibtex": "@inproceedings{NIPS2004_b5baa9c2,\n author = {Bar-hillel, Aharon and Spiro, Adam and Stark, Eran},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Spike Sorting: Bayesian Clustering of Non-Stationary Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/b5baa9c23ac3e015ad287b17a3d4afa3-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/b5baa9c23ac3e015ad287b17a3d4afa3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/b5baa9c23ac3e015ad287b17a3d4afa3-Metadata.json", "review": "", "metareview": "", "pdf_size": 256287, "gs_citation": 100, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3459786211549712389&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 17, "aff": "Neural Computation Center, The Hebrew University of Jerusalem; School of Computer Science and Engineering, The Hebrew University of Jerusalem; Department of Physiology, The Hebrew University of Jerusalem", "aff_domain": "cs.huji.ac.il;cs.huji.ac.il;md.huji.ac.il", "email": "cs.huji.ac.il;cs.huji.ac.il;md.huji.ac.il", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Hebrew University of Jerusalem", "aff_unique_dep": "Neural Computation Center", "aff_unique_url": "http://www.huji.ac.il", "aff_unique_abbr": "HUJI", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Jerusalem", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Israel" }, { "id": "b2d98907e7", "title": "Spike-timing Dependent Plasticity and Mutual Information Maximization for a Spiking Neuron Model", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/fb89fd138b104dcf8e2077ad2a23954d-Abstract.html", "author": "Taro Toyoizumi; Jean-pascal Pfister; Kazuyuki Aihara; Wulfram Gerstner", "abstract": "We derive an optimal learning rule in the sense of mutual information maximization for a spiking neuron model. Under the assumption of small \ufb02uctuations of the input, we \ufb01nd a spike-timing dependent plas- ticity (STDP) function which depends on the time course of excitatory postsynaptic potentials (EPSPs) and the autocorrelation function of the postsynaptic neuron. We show that the STDP function has both positive and negative phases. The positive phase is related to the shape of the EPSP while the negative phase is controlled by neuronal refractoriness.", "bibtex": "@inproceedings{NIPS2004_fb89fd13,\n author = {Toyoizumi, Taro and Pfister, Jean-pascal and Aihara, Kazuyuki and Gerstner, Wulfram},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Spike-timing Dependent Plasticity and Mutual Information Maximization for a Spiking Neuron Model},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/fb89fd138b104dcf8e2077ad2a23954d-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/fb89fd138b104dcf8e2077ad2a23954d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/fb89fd138b104dcf8e2077ad2a23954d-Metadata.json", "review": "", "metareview": "", "pdf_size": 105382, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9428194652917735751&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "Department of Complexity Science and Engineering, The University of Tokyo, 153-8505 Tokyo, Japan; Ecole Polytechnique F\u00e9d\u00e9rale de Lausanne (EPFL), School of Computer and Communication Sciences and Brain-Mind Institute, 1015 Lausanne, Switzerland; Graduate School of Information Science and Technology, The University of Tokyo, 153-8505 Tokyo, Japan + ERATO Aihara Complexity Modeling Project, JST, 45-18 Oyama, Shibuya-ku, 151-0065 Tokyo, Japan; Ecole Polytechnique F\u00e9d\u00e9rale de Lausanne (EPFL), School of Computer and Communication Sciences and Brain-Mind Institute, 1015 Lausanne, Switzerland", "aff_domain": "sat.t.u-tokyo.ac.jp;epfl.ch;sat.t.u-tokyo.ac.jp;epfl.ch", "email": "sat.t.u-tokyo.ac.jp;epfl.ch;sat.t.u-tokyo.ac.jp;epfl.ch", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0+2;1", "aff_unique_norm": "University of Tokyo;EPFL;Japan Science and Technology Agency (JST)", "aff_unique_dep": "Department of Complexity Science and Engineering;School of Computer and Communication Sciences and Brain-Mind Institute;ERATO Aihara Complexity Modeling Project", "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.epfl.ch;https://www.jst.go.jp", "aff_unique_abbr": "UTokyo;EPFL;JST", "aff_campus_unique_index": "0;1;0+0;1", "aff_campus_unique": "Tokyo;Lausanne", "aff_country_unique_index": "0;1;0+0;1", "aff_country_unique": "Japan;Switzerland" }, { "id": "1702605228", "title": "Stable adaptive control with online learning", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/8e68c3c7bf14ad0bcaba52babfa470bd-Abstract.html", "author": "H. J. Kim; Andrew Y. Ng", "abstract": "Learning algorithms have enjoyed numerous successes in robotic control tasks. In problems with time-varying dynamics, online learning methods have also proved to be a powerful tool for automatically tracking and/or adapting to the changing circumstances. However, for safety-critical ap- plications such as airplane flight, the adoption of these algorithms has been significantly hampered by their lack of safety, such as \"stability,\" guarantees. Rather than trying to show difficult, a priori, stability guar- antees for specific learning methods, in this paper we propose a method for \"monitoring\" the controllers suggested by the learning algorithm on- line, and rejecting controllers leading to instability. We prove that even if an arbitrary online learning method is used with our algorithm to control a linear dynamical system, the resulting system is stable.", "bibtex": "@inproceedings{NIPS2004_8e68c3c7,\n author = {Kim, H. and Ng, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Stable adaptive control with online learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/8e68c3c7bf14ad0bcaba52babfa470bd-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/8e68c3c7bf14ad0bcaba52babfa470bd-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/8e68c3c7bf14ad0bcaba52babfa470bd-Metadata.json", "review": "", "metareview": "", "pdf_size": 280319, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11089008873039101547&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Stanford University; Seoul National University", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Stanford University;Seoul National University", "aff_unique_dep": ";", "aff_unique_url": "https://www.stanford.edu;https://www.snu.ac.kr", "aff_unique_abbr": "Stanford;SNU", "aff_campus_unique_index": "0", "aff_campus_unique": "Stanford;", "aff_country_unique_index": "0;1", "aff_country_unique": "United States;South Korea" }, { "id": "018bff3279", "title": "Sub-Microwatt Analog VLSI Support Vector Machine for Pattern Classification and Sequence Estimation", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/60519c3dd22587d6de04d5f1e28bd41d-Abstract.html", "author": "Shantanu Chakrabartty; Gert Cauwenberghs", "abstract": "An analog system-on-chip for kernel-based pattern classification and se- quence estimation is presented. State transition probabilities conditioned on input data are generated by an integrated support vector machine. Dot product based kernels and support vector coefficients are implemented in analog programmable floating gate translinear circuits, and probabil- ities are propagated and normalized using sub-threshold current-mode circuits. A 14-input, 24-state, and 720-support vector forward decod- ing kernel machine is integrated on a 3mm3mm chip in 0.5m CMOS technology. Experiments with the processor trained for speaker verifica- tion and phoneme sequence estimation demonstrate real-time recognition accuracy at par with floating-point software, at sub-microwatt power.", "bibtex": "@inproceedings{NIPS2004_60519c3d,\n author = {Chakrabartty, Shantanu and Cauwenberghs, Gert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Sub-Microwatt Analog VLSI Support Vector Machine for Pattern Classification and Sequence Estimation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/60519c3dd22587d6de04d5f1e28bd41d-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/60519c3dd22587d6de04d5f1e28bd41d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/60519c3dd22587d6de04d5f1e28bd41d-Metadata.json", "review": "", "metareview": "", "pdf_size": 192371, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5862224551470544278&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "Department of Electrical and Computer Engineering, Johns Hopkins University, Baltimore, MD 21218; Department of Electrical and Computer Engineering, Johns Hopkins University, Baltimore, MD 21218", "aff_domain": "jhu.edu;jhu.edu", "email": "jhu.edu;jhu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Johns Hopkins University", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.jhu.edu", "aff_unique_abbr": "JHU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Baltimore", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "f7c176eadd", "title": "Supervised Graph Inference", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/a4613e8d72a61b3b69b32d040f89ad81-Abstract.html", "author": "Jean-philippe Vert; Yoshihiro Yamanishi", "abstract": "We formulate the problem of graph inference where part of the graph is known as a supervised learning problem, and propose an algorithm to solve it. The method involves the learning of a mapping of the vertices to a Euclidean space where the graph is easy to infer, and can be formu- lated as an optimization problem in a reproducing kernel Hilbert space. We report encouraging results on the problem of metabolic network re- construction from genomic data.", "bibtex": "@inproceedings{NIPS2004_a4613e8d,\n author = {Vert, Jean-philippe and Yamanishi, Yoshihiro},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Supervised Graph Inference},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/a4613e8d72a61b3b69b32d040f89ad81-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/a4613e8d72a61b3b69b32d040f89ad81-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/a4613e8d72a61b3b69b32d040f89ad81-Metadata.json", "review": "", "metareview": "", "pdf_size": 112777, "gs_citation": 124, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1613579358821824493&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 19, "aff": "Centre de G\u00b4eostatistique Ecole des Mines de Paris; Bioinformatics Center Institute for Chemical Research Kyoto University", "aff_domain": "mines.org;kuicr.kyoto-u.ac.jp", "email": "mines.org;kuicr.kyoto-u.ac.jp", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Ecole des Mines de Paris;Kyoto University", "aff_unique_dep": "Centre de G\u00e9ostatistique;Bioinformatics Center Institute for Chemical Research", "aff_unique_url": "https://www.mines-paris.psl.eu;https://www.kyoto-u.ac.jp", "aff_unique_abbr": "Mines ParisTech;Kyoto U", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1", "aff_country_unique": "France;Japan" }, { "id": "5b608707e8", "title": "Support Vector Classification with Input Data Uncertainty", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/22b1f2e0983160db6f7bb9f62f4dbb39-Abstract.html", "author": "Jinbo Bi; Tong Zhang", "abstract": "This paper investigates a new learning model in which the input data is corrupted with noise. We present a general statistical framework to tackle this problem. Based on the statistical reasoning, we propose a novel formulation of support vector classi\ufb01cation, which allows uncer- tainty in input data. We derive an intuitive geometric interpretation of the proposed formulation, and develop algorithms to ef\ufb01ciently solve it. Empirical results are included to show that the newly formed method is superior to the standard SVM for problems with noisy input.", "bibtex": "@inproceedings{NIPS2004_22b1f2e0,\n author = {Bi, Jinbo and Zhang, Tong},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Support Vector Classification with Input Data Uncertainty},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/22b1f2e0983160db6f7bb9f62f4dbb39-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/22b1f2e0983160db6f7bb9f62f4dbb39-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/22b1f2e0983160db6f7bb9f62f4dbb39-Metadata.json", "review": "", "metareview": "", "pdf_size": 89801, "gs_citation": 324, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6647430670768045822&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 14, "aff": "Computer-Aided Diagnosis & Therapy Group, Siemens Medical Solutions, Inc., Malvern, PA 19355; IBM T. J. Watson Research Center, Yorktown Heights, NY 10598", "aff_domain": "siemens.com;watson.ibm.com", "email": "siemens.com;watson.ibm.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Siemens Medical Solutions, Inc.;IBM", "aff_unique_dep": "Computer-Aided Diagnosis & Therapy Group;IBM T. J. Watson Research Center", "aff_unique_url": "https://www.siemens-healthineers.com;https://www.ibm.com/research/watson", "aff_unique_abbr": "Siemens Med;IBM Watson", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Malvern;Yorktown Heights", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "4b63024c48", "title": "Surface Reconstruction using Learned Shape Models", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/7180cffd6a8e829dacfc2a31b3f72ece-Abstract.html", "author": "Jan E. Solem; Fredrik Kahl", "abstract": "We consider the problem of geometrical surface reconstruction from one or several images using learned shape models. While humans can effort- lessly retrieve 3D shape information, this inverse problem has turned out to be difficult to perform automatically. We introduce a framework based on level set surface reconstruction and shape models for achieving this goal. Through this merging, we obtain an efficient and robust method for reconstructing surfaces of an object category of interest. The shape model includes surface cues such as point, curve and silhou- ette features. Based on ideas from Active Shape Models, we show how both the geometry and the appearance of these features can be modelled consistently in a multi-view context. The complete surface is obtained by evolving a level set driven by a PDE, which tries to fit the surface to the inferred 3D features. In addition, an a priori 3D surface model is used to regularize the solution, in particular, where surface features are sparse. Experiments are demonstrated on a database of real face images.", "bibtex": "@inproceedings{NIPS2004_7180cffd,\n author = {Solem, Jan and Kahl, Fredrik},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Surface Reconstruction using Learned Shape Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/7180cffd6a8e829dacfc2a31b3f72ece-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/7180cffd6a8e829dacfc2a31b3f72ece-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/7180cffd6a8e829dacfc2a31b3f72ece-Metadata.json", "review": "", "metareview": "", "pdf_size": 238999, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13030139490631908823&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "School of Technology and Society, Malm\u00f6 University, Sweden; RSISE, Australian National University, ACT 0200, Australia", "aff_domain": "ts.mah.se;maths.lth.se", "email": "ts.mah.se;maths.lth.se", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Malm\u00f6 University;Australian National University", "aff_unique_dep": "School of Technology and Society;RSISE", "aff_unique_url": "https://www.mau.se;https://www.anu.edu.au", "aff_unique_abbr": ";ANU", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Malm\u00f6;ACT", "aff_country_unique_index": "0;1", "aff_country_unique": "Sweden;Australia" }, { "id": "edffeed76f", "title": "Synchronization of neural networks by mutual learning and its application to cryptography", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/5a45828dead8c065099cb653a2185df1-Abstract.html", "author": "Einat Klein; Rachel Mislovaty; Ido Kanter; Andreas Ruttor; Wolfgang Kinzel", "abstract": "Two neural networks that are trained on their mutual output synchronize to an identical time dependant weight vector. This novel phenomenon can be used for creation of a secure cryptographic secret-key using a public channel. Several models for this cryptographic system have been suggested, and have been tested for their security under different sophis- ticated attack strategies. The most promising models are networks that involve chaos synchronization. The synchronization process of mutual learning is described analytically using statistical physics methods.", "bibtex": "@inproceedings{NIPS2004_5a45828d,\n author = {Klein, Einat and Mislovaty, Rachel and Kanter, Ido and Ruttor, Andreas and Kinzel, Wolfgang},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Synchronization of neural networks by mutual learning and its application to cryptography},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/5a45828dead8c065099cb653a2185df1-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/5a45828dead8c065099cb653a2185df1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/5a45828dead8c065099cb653a2185df1-Metadata.json", "review": "", "metareview": "", "pdf_size": 145016, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10299510818478819469&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Physics, Bar-Ilan University, Ramat-Gan, 52900 Israel; Department of Physics, Bar-Ilan University, Ramat-Gan, 52900 Israel; Department of Physics, Bar-Ilan University, Ramat-Gan, 52900 Israel; Institut f\u00a8ur Theoretische Physik, Universit\u00a8at W\u00a8urzburg, Am Hubland 97074 W\u00a8urzburg, Germany; Institut f\u00a8ur Theoretische Physik, Universit\u00a8at W\u00a8urzburg, Am Hubland 97074 W\u00a8urzburg, Germany", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;1;1", "aff_unique_norm": "Bar-Ilan University;Universit\u00a8at W\u00a8urzburg", "aff_unique_dep": "Department of Physics;Institut f\u00a8ur Theoretische Physik", "aff_unique_url": "https://www.biu.ac.il;https://www.uni-wuerzburg.de", "aff_unique_abbr": "BIU;", "aff_campus_unique_index": "0;0;0;1;1", "aff_campus_unique": "Ramat-Gan;W\u00a8urzburg", "aff_country_unique_index": "0;0;0;1;1", "aff_country_unique": "Israel;Germany" }, { "id": "e7f75952ba", "title": "Synergies between Intrinsic and Synaptic Plasticity in Individual Model Neurons", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/08f38e0434442128fab5ead6217ca759-Abstract.html", "author": "Jochen Triesch", "abstract": "This paper explores the computational consequences of simultaneous in- trinsic and synaptic plasticity in individual model neurons. It proposes a new intrinsic plasticity mechanism for a continuous activation model neuron based on low order moments of the neuron's firing rate distribu- tion. The goal of the intrinsic plasticity mechanism is to enforce a sparse distribution of the neuron's activity level. In conjunction with Hebbian learning at the neuron's synapses, the neuron is shown to discover sparse directions in the input.", "bibtex": "@inproceedings{NIPS2004_08f38e04,\n author = {Triesch, Jochen},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Synergies between Intrinsic and Synaptic Plasticity in Individual Model Neurons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/08f38e0434442128fab5ead6217ca759-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/08f38e0434442128fab5ead6217ca759-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/08f38e0434442128fab5ead6217ca759-Metadata.json", "review": "", "metareview": "", "pdf_size": 195244, "gs_citation": 225, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14092509881890926941&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 20, "aff": "Dept. of Cognitive Science, UC San Diego, La Jolla, CA, 92093-0515, USA + Frankfurt Institute for Advanced Studies, Frankfurt am Main, Germany", "aff_domain": "ucsd.edu", "email": "ucsd.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0+1", "aff_unique_norm": "University of California, San Diego;Frankfurt Institute for Advanced Studies", "aff_unique_dep": "Department of Cognitive Science;", "aff_unique_url": "https://www.ucsd.edu;https://www.fias.uni-frankfurt.de/", "aff_unique_abbr": "UCSD;FIAS", "aff_campus_unique_index": "0+1", "aff_campus_unique": "La Jolla;Frankfurt am Main", "aff_country_unique_index": "0+1", "aff_country_unique": "United States;Germany" }, { "id": "6613925e8d", "title": "Synergistic Face Detection and Pose Estimation with Energy-Based Models", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/06c284d3f757b15c02f47f3ff06dc275-Abstract.html", "author": "Margarita Osadchy; Matthew L. Miller; Yann L. Cun", "abstract": "We describe a novel method for real-time, simultaneous multi-view face detection and facial pose estimation. The method employs a convolu- tional network to map face images to points on a manifold, parametrized by pose, and non-face images to points far from that manifold. This network is trained by optimizing a loss function of three variables: im- age, pose, and face/non-face label. We test the resulting system, in a single configuration, on three standard data sets one for frontal pose, one for rotated faces, and one for profiles and find that its performance on each set is comparable to previous multi-view face detectors that can only handle one form of pose variation. We also show experimentally that the system's accuracy on both face detection and pose estimation is improved by training for the two tasks together.", "bibtex": "@inproceedings{NIPS2004_06c284d3,\n author = {Osadchy, Margarita and Miller, Matthew and Cun, Yann},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Synergistic Face Detection and Pose Estimation with Energy-Based Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/06c284d3f757b15c02f47f3ff06dc275-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/06c284d3f757b15c02f47f3ff06dc275-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/06c284d3f757b15c02f47f3ff06dc275-Metadata.json", "review": "", "metareview": "", "pdf_size": 197285, "gs_citation": 533, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12128070097362252213&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 19, "aff": "NEC Labs America, Princeton NJ 08540; NEC Labs America, Princeton NJ 08540; The Courant Institute, New York University", "aff_domain": "osadchy.net;nec-labs.com;cs.nyu.edu", "email": "osadchy.net;nec-labs.com;cs.nyu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "NEC Labs America;New York University", "aff_unique_dep": ";The Courant Institute", "aff_unique_url": "https://www.nec-labs.com;https://www.courant.nyu.edu", "aff_unique_abbr": "NEC LA;NYU", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Princeton;New York", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "dc105f60b5", "title": "Temporal-Difference Networks", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/9d28de8ff9bb6a3fa41fddfdc28f3bc1-Abstract.html", "author": "Richard S. Sutton; Brian Tanner", "abstract": "We introduce a generalization of temporal-difference (TD) learning to networks of interrelated predictions. Rather than relating a single pre- diction to itself at a later time, as in conventional TD methods, a TD network relates each prediction in a set of predictions to other predic- tions in the set at a later time. TD networks can represent and apply TD learning to a much wider class of predictions than has previously been possible. Using a random-walk example, we show that these networks can be used to learn to predict by a fixed interval, which is not possi- ble with conventional TD methods. Secondly, we show that if the inter- predictive relationships are made conditional on action, then the usual learning-efficiency advantage of TD methods over Monte Carlo (super- vised learning) methods becomes particularly pronounced. Thirdly, we demonstrate that TD networks can learn predictive state representations that enable exact solution of a non-Markov problem. A very broad range of inter-predictive temporal relationships can be expressed in these net- works. Overall we argue that TD networks represent a substantial ex- tension of the abilities of TD methods and bring us closer to the goal of representing world knowledge in entirely predictive, grounded terms.", "bibtex": "@inproceedings{NIPS2004_9d28de8f,\n author = {Sutton, Richard S and Tanner, Brian},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Temporal-Difference Networks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/9d28de8ff9bb6a3fa41fddfdc28f3bc1-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/9d28de8ff9bb6a3fa41fddfdc28f3bc1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/9d28de8ff9bb6a3fa41fddfdc28f3bc1-Metadata.json", "review": "", "metareview": "", "pdf_size": 138543, "gs_citation": 159, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4911012497015837967&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 25, "aff": "Department of Computing Science, University of Alberta; Department of Computing Science, University of Alberta", "aff_domain": "cs.ualberta.ca;cs.ualberta.ca", "email": "cs.ualberta.ca;cs.ualberta.ca", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Alberta", "aff_unique_dep": "Department of Computing Science", "aff_unique_url": "https://www.ualberta.ca", "aff_unique_abbr": "UAlberta", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Canada" }, { "id": "e877b83a34", "title": "The Cerebellum Chip: an Analog VLSI Implementation of a Cerebellar Model of Classical Conditioning", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/220c77af02f8ad8561b150d93000ddff-Abstract.html", "author": "Constanze Hofstoetter; Manuel Gil; Kynan Eng; Giacomo Indiveri; Matti Mintz; J\u00f6rg Kramer; Paul F. Verschure", "abstract": "We present a biophysically constrained cerebellar model of classical conditioning, implemented using a neuromorphic analog VLSI (aVLSI) chip. Like its biological counterpart, our cerebellar model is able to control adaptive behavior by predicting the precise timing of events. Here we describe the functionality of the chip and present its learning performance, as evaluated in simulated conditioning experiments at the circuit level and in behavioral experiments using a mobile robot. We show that this aVLSI model supports the acquisition and extinction of adaptively timed conditioned responses under real-world conditions with ultra-low power consumption.", "bibtex": "@inproceedings{NIPS2004_220c77af,\n author = {Hofstoetter, Constanze and Gil, Manuel and Eng, Kynan and Indiveri, Giacomo and Mintz, Matti and Kramer, J\\\"{o}rg and Verschure, Paul},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {The Cerebellum Chip: an Analog VLSI Implementation of a Cerebellar Model of Classical Conditioning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/220c77af02f8ad8561b150d93000ddff-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/220c77af02f8ad8561b150d93000ddff-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/220c77af02f8ad8561b150d93000ddff-Metadata.json", "review": "", "metareview": "", "pdf_size": 289842, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15853474024165525611&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Institute of Neuroinformatics University/ETH Zurich CH-8057 Zurich, Switzerland; Institute of Neuroinformatics University/ETH Zurich CH-8057 Zurich, Switzerland; Institute of Neuroinformatics University/ETH Zurich CH-8057 Zurich, Switzerland; Institute of Neuroinformatics University/ETH Zurich CH-8057 Zurich, Switzerland; Institute of Neuroinformatics University/ETH Zurich CH-8057 Zurich, Switzerland; Institute of Neuroinformatics University/ETH Zurich CH-8057 Zurich, Switzerland; Institute of Neuroinformatics University/ETH Zurich CH-8057 Zurich, Switzerland", "aff_domain": "ini.phys.ethz.ch; ; ; ; ; ;ini.phys.ethz.ch", "email": "ini.phys.ethz.ch; ; ; ; ; ;ini.phys.ethz.ch", "github": "", "project": "", "author_num": 7, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0;0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Institute of Neuroinformatics", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "0;0;0;0;0;0;0", "aff_campus_unique": "Zurich", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "Switzerland" }, { "id": "28502743d9", "title": "The Convergence of Contrastive Divergences", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/75e33da9b103b7b91dcd8da0abe1354b-Abstract.html", "author": "Alan L. Yuille", "abstract": "This paper analyses the Contrastive Divergence algorithm for learning statistical parameters. We relate the algorithm to the stochastic approxi- mation literature. This enables us to specify conditions under which the algorithm is guaranteed to converge to the optimal solution (with proba- bility 1). This includes necessary and sufficient conditions for the solu- tion to be unbiased.", "bibtex": "@inproceedings{NIPS2004_75e33da9,\n author = {Yuille, Alan L},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {The Convergence of Contrastive Divergences},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/75e33da9b103b7b91dcd8da0abe1354b-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/75e33da9b103b7b91dcd8da0abe1354b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/75e33da9b103b7b91dcd8da0abe1354b-Metadata.json", "review": "", "metareview": "", "pdf_size": 78417, "gs_citation": 147, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4117480007593520119&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Statistics, University of California at Los Angeles", "aff_domain": "stat.ucla.edu", "email": "stat.ucla.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "Department of Statistics", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "78f65eca5e", "title": "The Correlated Correspondence Algorithm for Unsupervised Registration of Nonrigid Surfaces", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/e02e27e04fdff967ba7d76fb24b8069d-Abstract.html", "author": "Dragomir Anguelov; Praveen Srinivasan; Hoi-cheung Pang; Daphne Koller; Sebastian Thrun; James Davis", "abstract": "We present an unsupervised algorithm for registering 3D surface scans of an object undergoing significant deformations. Our algorithm does not need markers, nor does it assume prior knowledge about object shape, the dynamics of its deformation, or scan alignment. The algorithm registers two meshes by optimizing a joint probabilistic model over all point-to- point correspondences between them. This model enforces preservation of local mesh geometry, as well as more global constraints that capture the preservation of geodesic distance between corresponding point pairs. The algorithm applies even when one of the meshes is an incomplete range scan; thus, it can be used to automatically fill in the remaining sur- faces for this partial scan, even if those surfaces were previously only seen in a different configuration. We evaluate the algorithm on several real-world datasets, where we demonstrate good results in the presence of significant movement of articulated parts and non-rigid surface defor- mation. Finally, we show that the output of the algorithm can be used for compelling computer graphics tasks such as interpolation between two scans of a non-rigid object and automatic recovery of articulated object models.", "bibtex": "@inproceedings{NIPS2004_e02e27e0,\n author = {Anguelov, Dragomir and Srinivasan, Praveen and Pang, Hoi-cheung and Koller, Daphne and Thrun, Sebastian and Davis, James},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {The Correlated Correspondence Algorithm for Unsupervised Registration of Nonrigid Surfaces},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/e02e27e04fdff967ba7d76fb24b8069d-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/e02e27e04fdff967ba7d76fb24b8069d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/e02e27e04fdff967ba7d76fb24b8069d-Metadata.json", "review": "", "metareview": "", "pdf_size": 253591, "gs_citation": 310, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2050659949660009746&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 22, "aff": "Stanford University; Stanford University; Stanford University; Stanford University; Stanford University; University of California, Santa Cruz", "aff_domain": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "email": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "github": "", "project": "http://robotics.stanford.edu/~drago/cc/video.mp4", "author_num": 6, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0;1", "aff_unique_norm": "Stanford University;University of California, Santa Cruz", "aff_unique_dep": ";", "aff_unique_url": "https://www.stanford.edu;https://www.ucsc.edu", "aff_unique_abbr": "Stanford;UCSC", "aff_campus_unique_index": "0;0;0;0;0;1", "aff_campus_unique": "Stanford;Santa Cruz", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "id": "d778cfde22", "title": "The Entire Regularization Path for the Support Vector Machine", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/403ea2e851b9ab04a996beab4a480a30-Abstract.html", "author": "Saharon Rosset; Robert Tibshirani; Ji Zhu; Trevor J. Hastie", "abstract": "In this paper we argue that the choice of the SVM cost parameter can be critical. We then derive an algorithm that can fit the entire path of SVM solutions for every value of the cost parameter, with essentially the same computational cost as fitting one SVM model.", "bibtex": "@inproceedings{NIPS2004_403ea2e8,\n author = {Rosset, Saharon and Tibshirani, Robert and Zhu, Ji and Hastie, Trevor},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {The Entire Regularization Path for the Support Vector Machine},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/403ea2e851b9ab04a996beab4a480a30-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/403ea2e851b9ab04a996beab4a480a30-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/403ea2e851b9ab04a996beab4a480a30-Metadata.json", "review": "", "metareview": "", "pdf_size": 162580, "gs_citation": 972, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1505526605412672813&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 21, "aff": "Department of Statistics, Stanford University; IBM Watson Research Center; Department of Statistics, Stanford University; Department of Statistics, University of Michigan", "aff_domain": "stanford.edu;us.ibm.com;stanford.edu;umich.edu", "email": "stanford.edu;us.ibm.com;stanford.edu;umich.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0;2", "aff_unique_norm": "Stanford University;IBM;University of Michigan", "aff_unique_dep": "Department of Statistics;Watson Research Center;Department of Statistics", "aff_unique_url": "https://www.stanford.edu;https://www.ibm.com/watson;https://www.umich.edu", "aff_unique_abbr": "Stanford;IBM Watson;UM", "aff_campus_unique_index": "0;1;0;2", "aff_campus_unique": "Stanford;Yorktown Heights;Ann Arbor", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "4cabfd21c3", "title": "The Laplacian PDF Distance: A Cost Function for Clustering in a Kernel Feature Space", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/41ab1b1d6bf108f388dfb5cd282fb76c-Abstract.html", "author": "Robert Jenssen; Deniz Erdogmus; Jose Principe; Torbj\u00f8rn Eltoft", "abstract": "A new distance measure between probability density functions (pdfs) is introduced, which we refer to as the Laplacian pdf dis- tance. The Laplacian pdf distance exhibits a remarkable connec- tion to Mercer kernel based learning theory via the Parzen window technique for density estimation. In a kernel feature space defined by the eigenspectrum of the Laplacian data matrix, this pdf dis- tance is shown to measure the cosine of the angle between cluster mean vectors. The Laplacian data matrix, and hence its eigenspec- trum, can be obtained automatically based on the data at hand, by optimal Parzen window selection. We show that the Laplacian pdf distance has an interesting interpretation as a risk function connected to the probability of error.", "bibtex": "@inproceedings{NIPS2004_41ab1b1d,\n author = {Jenssen, Robert and Erdogmus, Deniz and Principe, Jose and Eltoft, Torbj\\o rn},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {The Laplacian PDF Distance: A Cost Function for Clustering in a Kernel Feature Space},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/41ab1b1d6bf108f388dfb5cd282fb76c-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/41ab1b1d6bf108f388dfb5cd282fb76c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/41ab1b1d6bf108f388dfb5cd282fb76c-Metadata.json", "review": "", "metareview": "", "pdf_size": 149555, "gs_citation": 57, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3618280079105715846&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Department of Physics, University of Troms\u00f8, Norway; Computational NeuroEngineering Laboratory, University of Florida, USA; Computational NeuroEngineering Laboratory, University of Florida, USA; Department of Physics, University of Troms\u00f8, Norway", "aff_domain": "phys.uit.no; ; ; ", "email": "phys.uit.no; ; ; ", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1;0", "aff_unique_norm": "University of Troms\u00f8;University of Florida", "aff_unique_dep": "Department of Physics;Computational NeuroEngineering Laboratory", "aff_unique_url": "https://uit.no;https://www.ufl.edu", "aff_unique_abbr": ";UF", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1;0", "aff_country_unique": "Norway;United States" }, { "id": "6051417328", "title": "The Power of Selective Memory: Self-Bounded Learning of Prediction Suffix Trees", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/b21f9f98829dea9a48fd8aaddc1f159d-Abstract.html", "author": "Ofer Dekel; Shai Shalev-shwartz; Yoram Singer", "abstract": "Prediction suffix trees (PST) provide a popular and effective tool for tasks such as compression, classification, and language modeling. In this pa- per we take a decision theoretic view of PSTs for the task of sequence prediction. Generalizing the notion of margin to PSTs, we present an on- line PST learning algorithm and derive a loss bound for it. The depth of the PST generated by this algorithm scales linearly with the length of the input. We then describe a self-bounded enhancement of our learning al- gorithm which automatically grows a bounded-depth PST. We also prove an analogous mistake-bound for the self-bounded algorithm. The result is an efficient algorithm that neither relies on a-priori assumptions on the shape or maximal depth of the target PST nor does it require any param- eters. To our knowledge, this is the first provably-correct PST learning algorithm which generates a bounded-depth PST while being competi- tive with any fixed PST determined in hindsight.", "bibtex": "@inproceedings{NIPS2004_b21f9f98,\n author = {Dekel, Ofer and Shalev-shwartz, Shai and Singer, Yoram},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {The Power of Selective Memory: Self-Bounded Learning of Prediction Suffix Trees},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/b21f9f98829dea9a48fd8aaddc1f159d-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/b21f9f98829dea9a48fd8aaddc1f159d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/b21f9f98829dea9a48fd8aaddc1f159d-Metadata.json", "review": "", "metareview": "", "pdf_size": 95516, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10935818624074522551&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "School of Computer Science & Engineering, The Hebrew University, Jerusalem 91904, Israel; School of Computer Science & Engineering, The Hebrew University, Jerusalem 91904, Israel; School of Computer Science & Engineering, The Hebrew University, Jerusalem 91904, Israel", "aff_domain": "cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il", "email": "cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Hebrew University", "aff_unique_dep": "School of Computer Science & Engineering", "aff_unique_url": "http://www.huji.ac.il", "aff_unique_abbr": "HUJI", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Jerusalem", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Israel" }, { "id": "4b5912d9f1", "title": "The Rescorla-Wagner Algorithm and Maximum Likelihood Estimation of Causal Parameters", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/8f125da0b3432ed853c0b6f7ee5aaa6b-Abstract.html", "author": "Alan L. Yuille", "abstract": "This paper analyzes generalization of the classic Rescorla-Wagner (R- W) learning algorithm and studies their relationship to Maximum Like- lihood estimation of causal parameters. We prove that the parameters of two popular causal models, P and P C, can be learnt by the same generalized linear Rescorla-Wagner (GLRW) algorithm provided gener- icity conditions apply. We characterize the fixed points of these GLRW algorithms and calculate the fluctuations about them, assuming that the input is a set of i.i.d. samples from a fixed (unknown) distribution. We describe how to determine convergence conditions and calculate conver- gence rates for the GLRW algorithms under these conditions.", "bibtex": "@inproceedings{NIPS2004_8f125da0,\n author = {Yuille, Alan L},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {The Rescorla-Wagner Algorithm and Maximum Likelihood Estimation of Causal Parameters},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/8f125da0b3432ed853c0b6f7ee5aaa6b-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/8f125da0b3432ed853c0b6f7ee5aaa6b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/8f125da0b3432ed853c0b6f7ee5aaa6b-Metadata.json", "review": "", "metareview": "", "pdf_size": 94536, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12776122113613343141&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Statistics, University of California at Los Angeles", "aff_domain": "stat.ucla.edu", "email": "stat.ucla.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "Department of Statistics", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "4a4961a620", "title": "The Variational Ising Classifier (VIC) Algorithm for Coherently Contaminated Data", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/a3d06db1f8c85b2837b4603a51834425-Abstract.html", "author": "Oliver Williams; Andrew Blake; Roberto Cipolla", "abstract": "There has been substantial progress in the past decade in the development of object classifiers for images, for example of faces, humans and vehi- cles. Here we address the problem of contaminations (e.g. occlusion, shadows) in test images which have not explicitly been encountered in training data. The Variational Ising Classifier (VIC) algorithm models contamination as a mask (a field of binary variables) with a strong spa- tial coherence prior. Variational inference is used to marginalize over contamination and obtain robust classification. In this way the VIC ap- proach can turn a kernel classifier for clean data into one that can tolerate contamination, without any specific training on contaminated positives.", "bibtex": "@inproceedings{NIPS2004_a3d06db1,\n author = {Williams, Oliver and Blake, Andrew and Cipolla, Roberto},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {The Variational Ising Classifier (VIC) Algorithm for Coherently Contaminated Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/a3d06db1f8c85b2837b4603a51834425-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/a3d06db1f8c85b2837b4603a51834425-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/a3d06db1f8c85b2837b4603a51834425-Metadata.json", "review": "", "metareview": "", "pdf_size": 102451, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8938057567353857243&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Dept. of Engineering, University of Cambridge; Microsoft Research Ltd., Cambridge, UK; Dept. of Engineering, University of Cambridge", "aff_domain": "cam.ac.uk; ; ", "email": "cam.ac.uk; ; ", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Cambridge;Microsoft", "aff_unique_dep": "Dept. of Engineering;Microsoft Research", "aff_unique_url": "https://www.cam.ac.uk;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Cambridge;MSR", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "id": "af35b44451", "title": "The power of feature clustering: An application to object detection", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/06fe1c234519f6812fc4c1baae25d6af-Abstract.html", "author": "Shai Avidan; Moshe Butman", "abstract": "We give a fast rejection scheme that is based on image segments and demonstrate it on the canonical example of face detection. However, in- stead of focusing on the detection step we focus on the rejection step and show that our method is simple and fast to be learned, thus making it an excellent pre-processing step to accelerate standard machine learning classifiers, such as neural-networks, Bayes classifiers or SVM. We de- compose a collection of face images into regions of pixels with similar behavior over the image set. The relationships between the mean and variance of image segments are used to form a cascade of rejectors that can reject over 99.8% of image patches, thus only a small fraction of the image patches must be passed to a full-scale classifier. Moreover, the training time for our method is much less than an hour, on a standard PC. The shape of the features (i.e. image segments) we use is data-driven, they are very cheap to compute and they form a very low dimensional feature space in which exhaustive search for the best features is tractable.", "bibtex": "@inproceedings{NIPS2004_06fe1c23,\n author = {Avidan, Shai and Butman, Moshe},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {The power of feature clustering: An application to object detection},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/06fe1c234519f6812fc4c1baae25d6af-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/06fe1c234519f6812fc4c1baae25d6af-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/06fe1c234519f6812fc4c1baae25d6af-Metadata.json", "review": "", "metareview": "", "pdf_size": 78746, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9395873508306500830&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Mitsibishi Electric Research Labs; Adyoron Intelligent Systems LTD.", "aff_domain": "merl.com;adyoron.com", "email": "merl.com;adyoron.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Mitsubishi Electric Research Laboratories;Adyoron Intelligent Systems", "aff_unique_dep": ";", "aff_unique_url": "https://www.merl.com;", "aff_unique_abbr": "MERL;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1", "aff_country_unique": "United States;Unknown" }, { "id": "39d61526bd", "title": "Theories of Access Consciousness", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/ff2cc3b8c7caeaa068f2abbc234583f5-Abstract.html", "author": "Michael D. Colagrosso; Michael Mozer", "abstract": "Theories of access consciousness address how it is that some mental states but not others are available for evaluation, choice behavior, and verbal report. Farah, O'Reilly, and Vecera (1994) argue that quality of representation is critical; De- haene, Sergent, and Changeux (2003) argue that the ability to communicate rep- resentations is critical. We present a probabilistic information transmission or PIT model that suggests both of these conditions are essential for access con- sciousness. Having successfully modeled data from the repetition priming litera- ture in the past, we use the PIT model to account for data from two experiments on subliminal priming, showing that the model produces priming even in the ab- sence of accessibility and reportability of internal states. The model provides a mechanistic basis for understanding the dissociation of priming and awareness.", "bibtex": "@inproceedings{NIPS2004_ff2cc3b8,\n author = {Colagrosso, Michael and Mozer, Michael C},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Theories of Access Consciousness},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/ff2cc3b8c7caeaa068f2abbc234583f5-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/ff2cc3b8c7caeaa068f2abbc234583f5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/ff2cc3b8c7caeaa068f2abbc234583f5-Metadata.json", "review": "", "metareview": "", "pdf_size": 665035, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12952038524016948009&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science, Colorado School of Mines, Golden, CO 80401 USA; Institute of Cognitive Science, University of Colorado, Boulder, CO 80309 USA", "aff_domain": "mines.edu;colorado.edu", "email": "mines.edu;colorado.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Colorado School of Mines;University of Colorado Boulder", "aff_unique_dep": "Department of Computer Science;Institute of Cognitive Science", "aff_unique_url": "https://www.mines.edu;https://www.colorado.edu", "aff_unique_abbr": "CSM;CU Boulder", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Golden;Boulder", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "29be59dd29", "title": "Theory of localized synfire chain: characteristic propagation speed of stable spike pattern", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/ab452534c5ce28c4fbb0e102d4a4fb2e-Abstract.html", "author": "Kosuke Hamaguchi; Masato Okada; Kazuyuki Aihara", "abstract": "Repeated spike patterns have often been taken as evidence for the synfire chain, a phenomenon that a stable spike synchrony propagates through a feedforward network. Inter-spike intervals which represent a repeated spike pattern are influenced by the propagation speed of a spike packet. However, the relation between the propagation speed and network struc- ture is not well understood. While it is apparent that the propagation speed depends on the excitatory synapse strength, it might also be related to spike patterns. We analyze a feedforward network with Mexican-Hat- type connectivity (FMH) using the Fokker-Planck equation. We show that both a uniform and a localized spike packet are stable in the FMH in a certain parameter region. We also demonstrate that the propagation speed depends on the distinct firing patterns in the same network.", "bibtex": "@inproceedings{NIPS2004_ab452534,\n author = {Hamaguchi, Kosuke and Okada, Masato and Aihara, Kazuyuki},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Theory of localized synfire chain: characteristic propagation speed of stable spike pattern},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/ab452534c5ce28c4fbb0e102d4a4fb2e-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/ab452534c5ce28c4fbb0e102d4a4fb2e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/ab452534c5ce28c4fbb0e102d4a4fb2e-Metadata.json", "review": "", "metareview": "", "pdf_size": 182404, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12814771555762430405&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "RIKEN Brain Science Institute, Wako, Saitama 351-0198, JAPAN; Dept. of Complexity Science and Engineering, University of Tokyo, Kashiwa, Chiba, 277-8561, JAPAN; Institute of Industrial Science, University of Tokyo & ERATO Aihara Complexity Modeling Project JST, Meguro, Tokyo 153-8505, JAPAN", "aff_domain": "brain.riken.jp;brain.riken.jp;sat.t.u-tokyo.ac.jp", "email": "brain.riken.jp;brain.riken.jp;sat.t.u-tokyo.ac.jp", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "RIKEN Brain Science Institute;University of Tokyo", "aff_unique_dep": "Brain Science Institute;Dept. of Complexity Science and Engineering", "aff_unique_url": "https://briken.org;https://www.u-tokyo.ac.jp", "aff_unique_abbr": "RIKEN BSI;UTokyo", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "Wako;Kashiwa;Tokyo", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Japan" }, { "id": "e3411824c2", "title": "Trait Selection for Assessing Beef Meat Quality Using Non-linear SVM", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/fa2e8c4385712f9a1d24c363a2cbe5b8-Abstract.html", "author": "Juan Coz; Gustavo F. Bay\u00f3n; Jorge D\u00edez; Oscar Luaces; Antonio Bahamonde; Carlos Sa\u00f1udo", "abstract": "In this paper we show that it is possible to model sensory impressions of consumers about beef meat. This is not a straightforward task; the reason is that when we are aiming to induce a function that maps object descriptions into ratings, we must consider that consumers' ratings are just a way to express their preferences about the products presented in the same testing session. Therefore, we had to use a special purpose SVM polynomial kernel. The training data set used collects the ratings of panels of experts and consumers; the meat was provided by 103 bovines of 7 Spanish breeds with different carcass weights and aging periods. Additionally, to gain insight into consumer preferences, we used feature subset selection tools. The result is that aging is the most important trait for improving consumers' appreciation of beef meat.", "bibtex": "@inproceedings{NIPS2004_fa2e8c43,\n author = {Coz, Juan and Bay\\'{o}n, Gustavo and D\\'{\\i}ez, Jorge and Luaces, Oscar and Bahamonde, Antonio and Sa\\~{n}udo, Carlos},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Trait Selection for Assessing Beef Meat Quality Using Non-linear SVM},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/fa2e8c4385712f9a1d24c363a2cbe5b8-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/fa2e8c4385712f9a1d24c363a2cbe5b8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/fa2e8c4385712f9a1d24c363a2cbe5b8-Metadata.json", "review": "", "metareview": "", "pdf_size": 99672, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16129175225049320938&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Artificial Intelligence Center, University of Oviedo at Gij\u00f3n; Artificial Intelligence Center, University of Oviedo at Gij\u00f3n; Artificial Intelligence Center, University of Oviedo at Gij\u00f3n; Artificial Intelligence Center, University of Oviedo at Gij\u00f3n; Artificial Intelligence Center, University of Oviedo at Gij\u00f3n; Facultad de Veterinaria, University of Zaragoza", "aff_domain": "aic.uniovi.es; ; ; ; ;posta.unizar.es", "email": "aic.uniovi.es; ; ; ; ;posta.unizar.es", "github": "", "project": "", "author_num": 6, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0;1", "aff_unique_norm": "University of Oviedo;University of Zaragoza", "aff_unique_dep": "Artificial Intelligence Center;Facultad de Veterinaria", "aff_unique_url": "https://www.uniovi.es;https://www.unizar.es", "aff_unique_abbr": ";UniZar", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Gij\u00f3n;", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "Spain" }, { "id": "d3eb058c2b", "title": "Triangle Fixing Algorithms for the Metric Nearness Problem", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/4ebccfb3e317c7789f04f7a558df4537-Abstract.html", "author": "Suvrit Sra; Joel Tropp; Inderjit S. Dhillon", "abstract": "Various problems in machine learning, databases, and statistics involve pairwise distances among a set of objects. It is often desirable for these distances to satisfy the properties of a metric, especially the triangle in- equality. Applications where metric data is useful include clustering, classification, metric-based indexing, and approximation algorithms for various graph problems. This paper presents the Metric Nearness Prob- lem: Given a dissimilarity matrix, find the \"nearest\" matrix of distances that satisfy the triangle inequalities. For p nearness measures, this pa- per develops efficient triangle fixing algorithms that compute globally optimal solutions by exploiting the inherent structure of the problem. Empirically, the algorithms have time and storage costs that are linear in the number of triangle constraints. The methods can also be easily parallelized for additional speed.", "bibtex": "@inproceedings{NIPS2004_4ebccfb3,\n author = {Sra, Suvrit and Tropp, Joel and Dhillon, Inderjit},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Triangle Fixing Algorithms for the Metric Nearness Problem},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/4ebccfb3e317c7789f04f7a558df4537-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/4ebccfb3e317c7789f04f7a558df4537-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/4ebccfb3e317c7789f04f7a558df4537-Metadata.json", "review": "", "metareview": "", "pdf_size": 80126, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18126282438142881658&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Dept. of Computer Sciences, The Univ. of Texas at Austin; Dept. of Computer Sciences, The Univ. of Texas at Austin; Dept. of Mathematics, The Univ. of Michigan at Ann Arbor", "aff_domain": "cs.utexas.edu;cs.utexas.edu;umich.edu", "email": "cs.utexas.edu;cs.utexas.edu;umich.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of Texas at Austin;University of Michigan", "aff_unique_dep": "Department of Computer Sciences;Department of Mathematics", "aff_unique_url": "https://www.utexas.edu;https://www.umich.edu", "aff_unique_abbr": "UT Austin;UM", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Austin;Ann Arbor", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "5df2fe4f84", "title": "Two-Dimensional Linear Discriminant Analysis", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/86ecfcbc1e9f1ae5ee2d71910877da36-Abstract.html", "author": "Jieping Ye; Ravi Janardan; Qi Li", "abstract": "Linear Discriminant Analysis (LDA) is a well-known scheme for feature extraction and dimension reduction. It has been used widely in many ap- plications involving high-dimensional data, such as face recognition and image retrieval. An intrinsic limitation of classical LDA is the so-called singularity problem, that is, it fails when all scatter matrices are singu- lar. A well-known approach to deal with the singularity problem is to apply an intermediate dimension reduction stage using Principal Com- ponent Analysis (PCA) before LDA. The algorithm, called PCA+LDA, is used widely in face recognition. However, PCA+LDA has high costs in time and space, due to the need for an eigen-decomposition involving the scatter matrices. In this paper, we propose a novel LDA algorithm, namely 2DLDA, which stands for 2-Dimensional Linear Discriminant Analysis. 2DLDA over- comes the singularity problem implicitly, while achieving ef\ufb01ciency. The key difference between 2DLDA and classical LDA lies in the model for data representation. Classical LDA works with vectorized representa- tions of data, while the 2DLDA algorithm works with data in matrix representation. To further reduce the dimension by 2DLDA, the combi- nation of 2DLDA and classical LDA, namely 2DLDA+LDA, is studied, where LDA is preceded by 2DLDA. The proposed algorithms are ap- plied on face recognition and compared with PCA+LDA. Experiments show that 2DLDA and 2DLDA+LDA achieve competitive recognition accuracy, while being much more ef\ufb01cient.", "bibtex": "@inproceedings{NIPS2004_86ecfcbc,\n author = {Ye, Jieping and Janardan, Ravi and Li, Qi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Two-Dimensional Linear Discriminant Analysis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/86ecfcbc1e9f1ae5ee2d71910877da36-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/86ecfcbc1e9f1ae5ee2d71910877da36-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/86ecfcbc1e9f1ae5ee2d71910877da36-Metadata.json", "review": "", "metareview": "", "pdf_size": 83520, "gs_citation": 910, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18291101942268589458&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Department of CSE, University of Minnesota; Department of CSE, University of Minnesota; Department of CIS, University of Delaware", "aff_domain": "cs.umn.edu;cs.umn.edu;cis.udel.edu", "email": "cs.umn.edu;cs.umn.edu;cis.udel.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of Minnesota;University of Delaware", "aff_unique_dep": "Department of Computer Science and Engineering;Department of CIS", "aff_unique_url": "https://www.umn.edu;https://www.udel.edu", "aff_unique_abbr": "UMN;UD", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Minneapolis;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "dd214449f2", "title": "Unsupervised Variational Bayesian Learning of Nonlinear Models", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/8169e05e2a0debcb15458f2cc1eff0ea-Abstract.html", "author": "Antti Honkela; Harri Valpola", "abstract": "In this paper we present a framework for using multi-layer per- ceptron (MLP) networks in nonlinear generative models trained by variational Bayesian learning. The nonlinearity is handled by linearizing it using a Gauss\u2013Hermite quadrature at the hidden neu- rons. This yields an accurate approximation for cases of large pos- terior variance. The method can be used to derive nonlinear coun- terparts for linear algorithms such as factor analysis, independent component/factor analysis and state-space models. This is demon- strated with a nonlinear factor analysis experiment in which even 20 sources can be estimated from a real world speech data set.", "bibtex": "@inproceedings{NIPS2004_8169e05e,\n author = {Honkela, Antti and Valpola, Harri},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Unsupervised Variational Bayesian Learning of Nonlinear Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/8169e05e2a0debcb15458f2cc1eff0ea-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/8169e05e2a0debcb15458f2cc1eff0ea-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/8169e05e2a0debcb15458f2cc1eff0ea-Metadata.json", "review": "", "metareview": "", "pdf_size": 100581, "gs_citation": 74, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1693227737982293584&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "http://www.cis.hut.fi/projects/bayes/", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "7147acb58b", "title": "Using Machine Learning to Break Visual Human Interaction Proofs (HIPs)", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/283085d30e10513624c8cece7993f4de-Abstract.html", "author": "Kumar Chellapilla; Patrice Y. Simard", "abstract": "Machine learning is often used to automatically solve human tasks. \n In this paper, we look for tasks where machine learning algorithms \n are not as good as humans with the hope of gaining insight into \n their current limitations. We studied various Human Interactive \n Proofs (HIPs) on the market, because they are systems designed to \n tell computers and humans apart by posing challenges presumably \n too hard for computers. We found that most HIPs are pure \n recognition tasks which can easily be broken using machine \n learning. The harder HIPs use a combination of segmentation and \n recognition tasks. From this observation, we found that building \n segmentation tasks is the most effective way to confuse machine \n learning algorithms. This has enabled us to build effective HIPs \n (which we deployed in MSN Passport), as well as design \n challenging segmentation tasks for machine learning algorithms.", "bibtex": "@inproceedings{NIPS2004_283085d3,\n author = {Chellapilla, Kumar and Simard, Patrice},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Using Machine Learning to Break Visual Human Interaction Proofs (HIPs)},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/283085d30e10513624c8cece7993f4de-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/283085d30e10513624c8cece7993f4de-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/283085d30e10513624c8cece7993f4de-Metadata.json", "review": "", "metareview": "", "pdf_size": 365006, "gs_citation": 412, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16844487243595945688&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Microsoft Research; Microsoft Research", "aff_domain": "microsoft.com;microsoft.com", "email": "microsoft.com;microsoft.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Microsoft", "aff_unique_dep": "Microsoft Research", "aff_unique_url": "https://www.microsoft.com/en-us/research", "aff_unique_abbr": "MSR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "ca7d47d131", "title": "Using Random Forests in the Structured Language Model", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/7edccc661418aeb5761dbcdc06ad490c-Abstract.html", "author": "Peng Xu; Frederick Jelinek", "abstract": "In this paper, we explore the use of Random Forests (RFs) in the struc- tured language model (SLM), which uses rich syntactic information in predicting the next word based on words already seen. The goal in this work is to construct RFs by randomly growing Decision Trees (DTs) us- ing syntactic information and investigate the performance of the SLM modeled by the RFs in automatic speech recognition. RFs, which were originally developed as classi\ufb01ers, are a combination of decision tree classi\ufb01ers. Each tree is grown based on random training data sampled independently and with the same distribution for all trees in the forest, and a random selection of possible questions at each node of the decision tree. Our approach extends the original idea of RFs to deal with the data sparseness problem encountered in language modeling. RFs have been studied in the context of n-gram language modeling and have been shown to generalize well to unseen data. We show in this paper that RFs using syntactic information can also achieve better performance in both perplexity (PPL) and word error rate (WER) in a large vocabulary speech recognition system, compared to a baseline that uses Kneser-Ney smoothing.", "bibtex": "@inproceedings{NIPS2004_7edccc66,\n author = {Xu, Peng and Jelinek, Frederick},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Using Random Forests in the Structured Language Model},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/7edccc661418aeb5761dbcdc06ad490c-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/7edccc661418aeb5761dbcdc06ad490c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/7edccc661418aeb5761dbcdc06ad490c-Metadata.json", "review": "", "metareview": "", "pdf_size": 83776, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7184353539562162674&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Center for Language and Speech Processing, Department of Electrical and Computer Engineering, The Johns Hopkins University; Center for Language and Speech Processing, Department of Electrical and Computer Engineering, The Johns Hopkins University", "aff_domain": "jhu.edu;jhu.edu", "email": "jhu.edu;jhu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Johns Hopkins University", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.jhu.edu", "aff_unique_abbr": "JHU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "7e23056e19", "title": "Using the Equivalent Kernel to Understand Gaussian Process Regression", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/d89a66c7c80a29b1bdbab0f2a1a94af8-Abstract.html", "author": "Peter Sollich; Christopher Williams", "abstract": "The equivalent kernel [1] is a way of understanding how Gaussian pro- cess regression works for large sample sizes based on a continuum limit. In this paper we show (1) how to approximate the equivalent kernel of the widely-used squared exponential (or Gaussian) kernel and related ker- nels, and (2) how analysis using the equivalent kernel helps to understand the learning curves for Gaussian processes.", "bibtex": "@inproceedings{NIPS2004_d89a66c7,\n author = {Sollich, Peter and Williams, Christopher},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Using the Equivalent Kernel to Understand Gaussian Process Regression},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/d89a66c7c80a29b1bdbab0f2a1a94af8-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/d89a66c7c80a29b1bdbab0f2a1a94af8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/d89a66c7c80a29b1bdbab0f2a1a94af8-Metadata.json", "review": "", "metareview": "", "pdf_size": 305923, "gs_citation": 57, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15620220803170787114&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Dept of Mathematics, King\u2019s College London; School of Informatics, University of Edinburgh", "aff_domain": "kcl.ac.uk;ed.ac.uk", "email": "kcl.ac.uk;ed.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "King\u2019s College London;University of Edinburgh", "aff_unique_dep": "Dept of Mathematics;School of Informatics", "aff_unique_url": "https://www.kcl.ac.uk;https://www.ed.ac.uk", "aff_unique_abbr": "KCL;Edinburgh", "aff_campus_unique_index": "0;1", "aff_campus_unique": "London;Edinburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "a2ec43d349", "title": "VDCBPI: an Approximate Scalable Algorithm for Large POMDPs", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/81c2f886f91e18fe16d6f4e865877cb6-Abstract.html", "author": "Pascal Poupart; Craig Boutilier", "abstract": "Existing algorithms for discrete partially observable Markov decision processes can at best solve problems of a few thousand states due to two important sources of intractability: the curse of dimensionality and the policy space complexity. This paper describes a new algorithm (VDCBPI) that mitigates both sources of intractability by combining the Value Directed Compression (VDC) technique [13] with Bounded Pol- icy Iteration (BPI) [14]. The scalability of VDCBPI is demonstrated on synthetic network management problems with up to 33 million states.", "bibtex": "@inproceedings{NIPS2004_81c2f886,\n author = {Poupart, Pascal and Boutilier, Craig},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {VDCBPI: an Approximate Scalable Algorithm for Large POMDPs},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/81c2f886f91e18fe16d6f4e865877cb6-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/81c2f886f91e18fe16d6f4e865877cb6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/81c2f886f91e18fe16d6f4e865877cb6-Metadata.json", "review": "", "metareview": "", "pdf_size": 441256, "gs_citation": 128, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5850346656123454216&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Department of Computer Science, University of Toronto; Department of Computer Science, University of Toronto", "aff_domain": "cs.toronto.edu;cs.toronto.edu", "email": "cs.toronto.edu;cs.toronto.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Toronto", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.utoronto.ca", "aff_unique_abbr": "U of T", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Toronto", "aff_country_unique_index": "0;0", "aff_country_unique": "Canada" }, { "id": "b0a8e21c1f", "title": "Validity Estimates for Loopy Belief Propagation on Binary Real-world Networks", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/cdcb2f5c7b071143529ef7f2705dfbc4-Abstract.html", "author": "Joris M. Mooij; Hilbert J. Kappen", "abstract": "We introduce a computationally efficient method to estimate the valid- ity of the BP method as a function of graph topology, the connectiv- ity strength, frustration and network size. We present numerical results that demonstrate the correctness of our estimates for the uniform random model and for a real-world network (\"C. Elegans\"). Although the method is restricted to pair-wise interactions, no local evidence (zero \"biases\") and binary variables, we believe that its predictions correctly capture the limitations of BP for inference and MAP estimation on arbitrary graphi- cal models. Using this approach, we find that BP always performs better than MF. Especially for large networks with broad degree distributions (such as scale-free networks) BP turns out to significantly outperform MF.", "bibtex": "@inproceedings{NIPS2004_cdcb2f5c,\n author = {Mooij, Joris M and Kappen, Hilbert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Validity Estimates for Loopy Belief Propagation on Binary Real-world Networks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/cdcb2f5c7b071143529ef7f2705dfbc4-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/cdcb2f5c7b071143529ef7f2705dfbc4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/cdcb2f5c7b071143529ef7f2705dfbc4-Metadata.json", "review": "", "metareview": "", "pdf_size": 106695, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15773944081162722857&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Dept. of Biophysics, Inst. for Neuroscience, Radboud Univ. Nijmegen; Dept. of Biophysics, Inst. for Neuroscience, Radboud Univ. Nijmegen", "aff_domain": "science.ru.nl;science.ru.nl", "email": "science.ru.nl;science.ru.nl", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Radboud University Nijmegen", "aff_unique_dep": "Department of Biophysics, Institute for Neuroscience", "aff_unique_url": "https://www.ru.nl", "aff_unique_abbr": "Radboud Univ.", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Netherlands" }, { "id": "87323aaa3d", "title": "Variational Minimax Estimation of Discrete Distributions under KL Loss", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/c57168a952f5d46724cf35dfc3d48a7f-Abstract.html", "author": "Liam Paninski", "abstract": "We develop a family of upper and lower bounds on the worst-case ex- pected KL loss for estimating a discrete distribution on a finite number m of points, given N i.i.d. samples. Our upper bounds are approximation- theoretic, similar to recent bounds for estimating discrete entropy; the lower bounds are Bayesian, based on averages of the KL loss under Dirichlet distributions. The upper bounds are convex in their parameters and thus can be minimized by descent methods to provide estimators with low worst-case error; the lower bounds are indexed by a one-dimensional parameter and are thus easily maximized. Asymptotic analysis of the bounds demonstrates the uniform KL-consistency of a wide class of es- timators as c = N/m (no matter how slowly), and shows that no estimator is consistent for c bounded (in contrast to entropy estima- tion). Moreover, the bounds are asymptotically tight as c 0 or , and are shown numerically to be tight within a factor of two for all c. Finally, in the sparse-data limit c 0, we find that the Dirichlet-Bayes (add-constant) estimator with parameter scaling like -c log(c) optimizes both the upper and lower bounds, suggesting an optimal choice of the \"add-constant\" parameter in this regime.", "bibtex": "@inproceedings{NIPS2004_c57168a9,\n author = {Paninski, Liam},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Variational Minimax Estimation of Discrete Distributions under KL Loss},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/c57168a952f5d46724cf35dfc3d48a7f-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/c57168a952f5d46724cf35dfc3d48a7f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/c57168a952f5d46724cf35dfc3d48a7f-Metadata.json", "review": "", "metareview": "", "pdf_size": 80212, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2899931029967976804&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Gatsby Computational Neuroscience Unit, University College London", "aff_domain": "gatsby.ucl.ac.uk", "email": "gatsby.ucl.ac.uk", "github": "", "project": "http://www.gatsby.ucl.ac.uk/~liam", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University College London", "aff_unique_dep": "Gatsby Computational Neuroscience Unit", "aff_unique_url": "https://www.ucl.ac.uk", "aff_unique_abbr": "UCL", "aff_campus_unique_index": "0", "aff_campus_unique": "London", "aff_country_unique_index": "0", "aff_country_unique": "United Kingdom" }, { "id": "a2ee4fa486", "title": "Who's In the Picture", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/03fa2f7502f5f6b9169e67d17cbf51bb-Abstract.html", "author": "Tamara L. Berg; Alexander C. Berg; Jaety Edwards; David A. Forsyth", "abstract": "The context in which a name appears in a caption provides powerful cues as to who is depicted in the associated image. We obtain 44,773 face im- ages, using a face detector, from approximately half a million captioned news images and automatically link names, obtained using a named en- tity recognizer, with these faces. A simple clustering method can pro- duce fair results. We improve these results significantly by combining the clustering process with a model of the probability that an individual is depicted given its context. Once the labeling procedure is over, we have an accurately labeled set of faces, an appearance model for each individual depicted, and a natural language model that can produce ac- curate results on captions in isolation.", "bibtex": "@inproceedings{NIPS2004_03fa2f75,\n author = {Berg, Tamara and Berg, Alexander and Edwards, Jaety and Forsyth, David},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Who\\textquotesingle s In the Picture},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/03fa2f7502f5f6b9169e67d17cbf51bb-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/03fa2f7502f5f6b9169e67d17cbf51bb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/03fa2f7502f5f6b9169e67d17cbf51bb-Metadata.json", "review": "", "metareview": "", "pdf_size": 185626, "gs_citation": 243, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3447488730675052876&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 30, "aff": "Computer Science Division, U.C. Berkeley; Computer Science Division, U.C. Berkeley; Computer Science Division, U.C. Berkeley; Computer Science Division, U.C. Berkeley", "aff_domain": "cs.berkeley.edu; ; ; ", "email": "cs.berkeley.edu; ; ; ", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Computer Science Division", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "247cc931c5", "title": "Worst-Case Analysis of Selective Sampling for Linear-Threshold Algorithms", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/92426b262d11b0ade77387cf8416e153-Abstract.html", "author": "Nicol\u00f2 Cesa-bianchi; Claudio Gentile; Luca Zaniboni", "abstract": "We provide a worst-case analysis of selective sampling algorithms for learning linear threshold functions. The algorithms considered in this paper are Perceptron-like algorithms, i.e., algorithms which can be ef\ufb01- ciently run in any reproducing kernel Hilbert space. Our algorithms ex- ploit a simple margin-based randomized rule to decide whether to query the current label. We obtain selective sampling algorithms achieving on average the same bounds as those proven for their deterministic coun- terparts, but using much fewer labels. We complement our theoretical \ufb01ndings with an empirical comparison on two text categorization tasks. The outcome of these experiments is largely predicted by our theoreti- cal results: Our selective sampling algorithms tend to perform as good as the algorithms receiving the true label after each classi\ufb01cation, while observing in practice substantially fewer labels.", "bibtex": "@inproceedings{NIPS2004_92426b26,\n author = {Cesa-bianchi, Nicol\\`{o} and Gentile, Claudio and Zaniboni, Luca},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {Worst-Case Analysis of Selective Sampling for Linear-Threshold Algorithms},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/92426b262d11b0ade77387cf8416e153-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/92426b262d11b0ade77387cf8416e153-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/92426b262d11b0ade77387cf8416e153-Metadata.json", "review": "", "metareview": "", "pdf_size": 99483, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2790189233489474963&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "DSI, University of Milan; Universit\u00e0 dell\u2019Insubria; DTI, University of Milan", "aff_domain": "dsi.unimi.it;dsi.unimi.it;dti.unimi.it", "email": "dsi.unimi.it;dsi.unimi.it;dti.unimi.it", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Milan;Universit\u00e0 dell\u2019Insubria", "aff_unique_dep": "DSI;", "aff_unique_url": "https://www.unimi.it;https://www.uninsubria.it", "aff_unique_abbr": "UniMi;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Italy" }, { "id": "3048844117", "title": "\u2113\u2080-norm Minimization for Basis Selection", "site": "https://papers.nips.cc/paper_files/paper/2004/hash/b1c00bcd4b5183705c134b3365f8c45e-Abstract.html", "author": "David P. Wipf; Bhaskar D. Rao", "abstract": "Finding the sparsest, or minimum \u21130-norm, representation of a signal given an overcomplete dictionary of basis vectors is an important prob- lem in many application domains. Unfortunately, the required optimiza- tion problem is often intractable because there is a combinatorial increase in the number of local minima as the number of candidate basis vectors increases. This de\ufb01ciency has prompted most researchers to instead min- imize surrogate measures, such as the \u21131-norm, that lead to more tractable computational methods. The downside of this procedure is that we have now introduced a mismatch between our ultimate goal and our objective function. In this paper, we demonstrate a sparse Bayesian learning-based method of minimizing the \u21130-norm while reducing the number of trou- blesome local minima. Moreover, we derive necessary conditions for local minima to occur via this approach and empirically demonstrate that there are typically many fewer for general problems of interest.", "bibtex": "@inproceedings{NIPS2004_b1c00bcd,\n author = {Wipf, David and Rao, Bhaskar},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {L. Saul and Y. Weiss and L. Bottou},\n pages = {},\n publisher = {MIT Press},\n title = {\\mathscr{l}\u2080-norm Minimization for Basis Selection},\n url = {https://proceedings.neurips.cc/paper_files/paper/2004/file/b1c00bcd4b5183705c134b3365f8c45e-Paper.pdf},\n volume = {17},\n year = {2004}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2004/file/b1c00bcd4b5183705c134b3365f8c45e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2004/file/b1c00bcd4b5183705c134b3365f8c45e-Metadata.json", "review": "", "metareview": "", "pdf_size": 82447, "gs_citation": 0, "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:0sqYTxx4WfoJ:scholar.google.com/&scioq=%E2%84%93%E2%82%80-norm+Minimization+for+Basis+Selection&hl=en&as_sdt=0,33", "gs_version_total": 2, "aff": "Department of Electrical and Computer Engineering, University of California, San Diego; Department of Electrical and Computer Engineering, University of California, San Diego", "aff_domain": "ucsd.edu;ece.ucsd.edu", "email": "ucsd.edu;ece.ucsd.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0;0", "aff_campus_unique": "San Diego", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" } ]