[ { "id": "59aa76267c", "title": "A Bayesian Approach to Diffusion Models of Decision-Making and Response Time", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/4b86ca48d90bd5f0978afa3a012503a4-Abstract.html", "author": "Michael D. Lee; Ian G. Fuss; Daniel J. Navarro", "abstract": "We present a computational Bayesian approach for Wiener diffusion models, which are prominent accounts of response time distributions in decision-making. We first develop a general closed-form analytic approximation to the response time distributions for one-dimensional diffusion processes, and derive the required Wiener diffusion as a special case. We use this result to undertake Bayesian modeling of benchmark data, using posterior sampling to draw inferences about the interesting psychological parameters. With the aid of the benchmark data, we show the Bayesian account has several advantages, including dealing naturally with the parameter variation needed to account for some key features of the data, and providing quantitative measures to guide decisions about model construction.", "bibtex": "@inproceedings{NIPS2006_4b86ca48,\n author = {Lee, Michael and Fuss, Ian and Navarro, Daniel},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A Bayesian Approach to Diffusion Models of Decision-Making and Response Time},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/4b86ca48d90bd5f0978afa3a012503a4-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/4b86ca48d90bd5f0978afa3a012503a4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/4b86ca48d90bd5f0978afa3a012503a4-Metadata.json", "review": "", "metareview": "", "pdf_size": 202628, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16666908498473015806&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Department of Cognitive Sciences, University of California, Irvine; Defence Science and Technology Organisation; School of Psychology, University of Adelaide", "aff_domain": "uci.edu;dsto.defence.gov.au;adelaide.edu.au", "email": "uci.edu;dsto.defence.gov.au;adelaide.edu.au", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "University of California, Irvine;Defence Science and Technology Organisation;University of Adelaide", "aff_unique_dep": "Department of Cognitive Sciences;;School of Psychology", "aff_unique_url": "https://www.uci.edu;https://www.dst.defence.gov.au;https://www.adelaide.edu.au", "aff_unique_abbr": "UCI;DSTO;Adelaide", "aff_campus_unique_index": "0", "aff_campus_unique": "Irvine;", "aff_country_unique_index": "0;1;1", "aff_country_unique": "United States;Australia" }, { "id": "af661637b4", "title": "A Collapsed Variational Bayesian Inference Algorithm for Latent Dirichlet Allocation", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/532b7cbe070a3579f424988a040752f2-Abstract.html", "author": "Yee W. Teh; David Newman; Max Welling", "abstract": "Latent Dirichlet allocation (LDA) is a Bayesian network that has recently gained much popularity in applications ranging from document modeling to computer vision. Due to the large scale nature of these applications, current inference pro- cedures like variational Bayes and Gibbs sampling have been found lacking. In this paper we propose the collapsed variational Bayesian inference algorithm for LDA, and show that it is computationally ef\ufb01cient, easy to implement and signi\ufb01- cantly more accurate than standard variational Bayesian inference for LDA.", "bibtex": "@inproceedings{NIPS2006_532b7cbe,\n author = {Teh, Yee and Newman, David and Welling, Max},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A Collapsed Variational Bayesian Inference Algorithm for Latent Dirichlet Allocation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/532b7cbe070a3579f424988a040752f2-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/532b7cbe070a3579f424988a040752f2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/532b7cbe070a3579f424988a040752f2-Metadata.json", "review": "", "metareview": "", "pdf_size": 93512, "gs_citation": 850, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11787922141817141068&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 22, "aff": "Gatsby Computational Neuroscience Unit, University College London; Bren School of Information and Computer Science, University of California, Irvine; Bren School of Information and Computer Science, University of California, Irvine", "aff_domain": "gatsby.ucl.ac.uk;ics.uci.edu;ics.uci.edu", "email": "gatsby.ucl.ac.uk;ics.uci.edu;ics.uci.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "University College London;University of California, Irvine", "aff_unique_dep": "Gatsby Computational Neuroscience Unit;Bren School of Information and Computer Science", "aff_unique_url": "https://www.ucl.ac.uk;https://www.uci.edu", "aff_unique_abbr": "UCL;UCI", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "London;Irvine", "aff_country_unique_index": "0;1;1", "aff_country_unique": "United Kingdom;United States" }, { "id": "6ca0341428", "title": "A Complexity-Distortion Approach to Joint Pattern Alignment", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/b6e710870acb098e584277457ba89d68-Abstract.html", "author": "Andrea Vedaldi; Stefano Soatto", "abstract": "Image Congealing (IC) is a non-parametric method for the joint alignment of a col- lection of images affected by systematic and unwanted deformations. The method attempts to undo the deformations by minimizing a measure of complexity of the image ensemble, such as the averaged per-pixel entropy. This enables alignment without an explicit model of the aligned dataset as required by other methods (e.g. transformed component analysis). While IC is simple and general, it may intro- duce degenerate solutions when the transformations allow minimizing the com- plexity of the data by collapsing them to a constant. Such solutions need to be explicitly removed by regularization. In this paper we propose an alternative formulation which solves this regulariza- tion issue on a more principled ground. We make the simple observation that alignment should simplify the data while preserving the useful information car- ried by them. Therefore we trade off \ufb01delity and complexity of the aligned en- semble rather than minimizing the complexity alone. This eliminates the need for an explicit regularization of the transformations, and has a number of other useful properties such as noise suppression. We show the modeling and computa- tional bene\ufb01ts of the approach to the some of the problems on which IC has been demonstrated.", "bibtex": "@inproceedings{NIPS2006_b6e71087,\n author = {Vedaldi, Andrea and Soatto, Stefano},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A Complexity-Distortion Approach to Joint Pattern Alignment},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/b6e710870acb098e584277457ba89d68-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/b6e710870acb098e584277457ba89d68-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/b6e710870acb098e584277457ba89d68-Metadata.json", "review": "", "metareview": "", "pdf_size": 498429, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3633976215936330227&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Department of Computer Science, University of California at Los Angeles; Department of Computer Science, University of California at Los Angeles", "aff_domain": "cs.ucla.edu;cs.ucla.edu", "email": "cs.ucla.edu;cs.ucla.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "8ab3ed8594", "title": "A Humanlike Predictor of Facial Attractiveness", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/9e6a921fbc428b5638b3986e365d4f21-Abstract.html", "author": "Amit Kagian; Gideon Dror; Tommer Leyvand; Daniel Cohen-or; Eytan Ruppin", "abstract": "This work presents a method for estimating human facial attractiveness, based on supervised learning techniques. Numerous facial features that describe facial geometry, color and texture, combined with an average human attractiveness score for each facial image, are used to train various predictors. Facial attractiveness ratings produced by the final predictor are found to be highly correlated with human ratings, markedly improving previous machine learning achievements. Simulated psychophysical experiments with virtually manipulated images reveal preferences in the machine's judgments which are remarkably similar to those of humans. These experiments shed new light on existing theories of facial attractiveness such as the averageness, smoothness and symmetry hypotheses. It is intriguing to find that a machine trained explicitly to capture an operational performance criteria such as attractiveness rating, implicitly captures basic human psychophysical biases characterizing the perception of facial attractiveness in general.", "bibtex": "@inproceedings{NIPS2006_9e6a921f,\n author = {Kagian, Amit and Dror, Gideon and Leyvand, Tommer and Cohen-or, Daniel and Ruppin, Eytan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A Humanlike Predictor of Facial Attractiveness},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/9e6a921fbc428b5638b3986e365d4f21-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/9e6a921fbc428b5638b3986e365d4f21-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/9e6a921fbc428b5638b3986e365d4f21-Metadata.json", "review": "", "metareview": "", "pdf_size": 82230, "gs_citation": 114, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=869428001784106973&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "School of Computer Sciences, Tel-Aviv University, Tel-Aviv, 69978, Israel; The Academic College of Tel-Aviv-Yaffo, Tel-Aviv, 64044, Israel; School of Computer Sciences, Tel-Aviv University, Tel-Aviv, 69978, Israel; School of Computer Sciences, Tel-Aviv University, Tel-Aviv, 69978, Israel; School of Computer Sciences, Tel-Aviv University, Tel-Aviv, 69978, Israel", "aff_domain": "post.tau.ac.il;mta.ac.il;post.tau.ac.il;post.tau.ac.il;post.tau.ac.il", "email": "post.tau.ac.il;mta.ac.il;post.tau.ac.il;post.tau.ac.il;post.tau.ac.il", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0;0;0", "aff_unique_norm": "Tel-Aviv University;Academic College of Tel-Aviv-Yaffo", "aff_unique_dep": "School of Computer Sciences;", "aff_unique_url": "https://www.tau.ac.il;", "aff_unique_abbr": "TAU;", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Tel-Aviv", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "Israel" }, { "id": "575c708cf9", "title": "A Kernel Method for the Two-Sample-Problem", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/e9fb2eda3d9c55a0d89c98d6c54b5b3e-Abstract.html", "author": "Arthur Gretton; Karsten Borgwardt; Malte Rasch; Bernhard Sch\u00f6lkopf; Alex J. Smola", "abstract": "We propose two statistical tests to determine if two samples are from different dis- tributions. Our test statistic is in both cases the distance between the means of the two samples mapped into a reproducing kernel Hilbert space (RKHS). The \ufb01rst test is based on a large deviation bound for the test statistic, while the second is based on the asymptotic distribution of this statistic. The test statistic can be com- puted in O(m2) time. We apply our approach to a variety of problems, including attribute matching for databases using the Hungarian marriage method, where our test performs strongly. We also demonstrate excellent performance when compar- ing distributions over graphs, for which no alternative tests currently exist.", "bibtex": "@inproceedings{NIPS2006_e9fb2eda,\n author = {Gretton, Arthur and Borgwardt, Karsten and Rasch, Malte and Sch\\\"{o}lkopf, Bernhard and Smola, Alex},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A Kernel Method for the Two-Sample-Problem},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/e9fb2eda3d9c55a0d89c98d6c54b5b3e-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/e9fb2eda3d9c55a0d89c98d6c54b5b3e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/e9fb2eda3d9c55a0d89c98d6c54b5b3e-Metadata.json", "review": "", "metareview": "", "pdf_size": 103300, "gs_citation": 2915, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6158929029454567825&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 32, "aff": "MPI for Biological Cybernetics, T\u00a8ubingen, Germany; Ludwig-Maximilians-Univ., Munich, Germany; Graz Univ. of Technology, Graz, Austria; MPI for Biological Cybernetics, T\u00a8ubingen, Germany; NICTA, ANU, Canberra, Australia", "aff_domain": "tuebingen.mpg.de;dbs.ifi.lmu.de;igi.tu-graz.ac.at;tuebingen.mpg.de;anu.edu.au", "email": "tuebingen.mpg.de;dbs.ifi.lmu.de;igi.tu-graz.ac.at;tuebingen.mpg.de;anu.edu.au", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2;0;3", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics;Ludwig-Maximilians-Universit\u00e4t M\u00fcnchen;Graz University of Technology;Australian National University", "aff_unique_dep": "Biological Cybernetics;;;", "aff_unique_url": "https://www.biological-cybernetics.de;https://www.lmu.de;https://www.tugraz.at;https://www.anu.edu.au", "aff_unique_abbr": "MPIBC;LMU;TUGraz;ANU", "aff_campus_unique_index": "0;1;2;0;3", "aff_campus_unique": "T\u00fcbingen;Munich;Graz;Canberra", "aff_country_unique_index": "0;0;1;0;2", "aff_country_unique": "Germany;Austria;Australia" }, { "id": "ceb2737791", "title": "A Kernel Subspace Method by Stochastic Realization for Learning Nonlinear Dynamical Systems", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/96629f1aac6ddb7a7cfa82574b6722d4-Abstract.html", "author": "Yoshinobu Kawahara; Takehisa Yairi; Kazuo Machida", "abstract": "In this paper, we present a subspace method for learning nonlinear dynamical systems based on stochastic realization, in which state vectors are chosen using kernel canonical correlation analysis, and then state-space systems are identified through regression with the state vectors. We construct the theoretical underpinning and derive a concrete algorithm for nonlinear identification. The obtained algorithm needs no iterative optimization procedure and can be implemented on the basis of fast and reliable numerical schemes. The simulation result shows that our algorithm can express dynamics with a high degree of accuracy.", "bibtex": "@inproceedings{NIPS2006_96629f1a,\n author = {Kawahara, Yoshinobu and Yairi, Takehisa and Machida, Kazuo},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A Kernel Subspace Method by Stochastic Realization for Learning Nonlinear Dynamical Systems},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/96629f1aac6ddb7a7cfa82574b6722d4-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/96629f1aac6ddb7a7cfa82574b6722d4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/96629f1aac6ddb7a7cfa82574b6722d4-Metadata.json", "review": "", "metareview": "", "pdf_size": 125685, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=117966118060942814&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Dept. of Aeronautics & Astronautics Research Center for Advanced Science and Technology The University of Tokyo; Dept. of Aeronautics & Astronautics Research Center for Advanced Science and Technology The University of Tokyo; Dept. of Aeronautics & Astronautics Research Center for Advanced Science and Technology The University of Tokyo", "aff_domain": "space.rcast.u-tokyo.ac.jp;space.rcast.u-tokyo.ac.jp;space.rcast.u-tokyo.ac.jp", "email": "space.rcast.u-tokyo.ac.jp;space.rcast.u-tokyo.ac.jp;space.rcast.u-tokyo.ac.jp", "github": "", "project": "www.space.rcast.u-tokyo.ac.jp/kawahara/index e.html", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Tokyo", "aff_unique_dep": "Dept. of Aeronautics & Astronautics", "aff_unique_url": "https://www.u-tokyo.ac.jp", "aff_unique_abbr": "UTokyo", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Japan" }, { "id": "fb30065ba3", "title": "A Local Learning Approach for Clustering", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/366f0bc7bd1d4bf414073cabbadfdfcd-Abstract.html", "author": "Mingrui Wu; Bernhard Sch\u00f6lkopf", "abstract": "We present a local learning approach for clustering. The basic idea is that a good clustering result should have the property that the cluster label of each data point can be well predicted based on its neighboring data and their cluster labels, using current supervised learning methods. An optimization problem is formulated such that its solution has the above property. Relaxation and eigen-decomposition are applied to solve this optimization problem. We also briefly investigate the parameter selection issue and provide a simple parameter selection method for the proposed algorithm. Experimental results are provided to validate the effectiveness of the proposed approach.", "bibtex": "@inproceedings{NIPS2006_366f0bc7,\n author = {Wu, Mingrui and Sch\\\"{o}lkopf, Bernhard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A Local Learning Approach for Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/366f0bc7bd1d4bf414073cabbadfdfcd-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/366f0bc7bd1d4bf414073cabbadfdfcd-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/366f0bc7bd1d4bf414073cabbadfdfcd-Metadata.json", "review": "", "metareview": "", "pdf_size": 127718, "gs_citation": 371, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10445145716456290085&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de", "email": "tuebingen.mpg.de;tuebingen.mpg.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics", "aff_unique_dep": "Biological Cybernetics", "aff_unique_url": "https://www.biocybernetics.mpg.de", "aff_unique_abbr": "MPIBC", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "f9422ebe34", "title": "A Nonparametric Approach to Bottom-Up Visual Saliency", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/a2d10d355cdebc879e4fc6ecc6f63dd7-Abstract.html", "author": "Wolf Kienzle; Felix A. Wichmann; Matthias O. Franz; Bernhard Sch\u00f6lkopf", "abstract": "This paper addresses the bottom-up influence of local image information on human eye movements. Most existing computational models use a set of biologically plausible linear filters, e.g., Gabor or Difference-of-Gaussians filters as a front-end, the outputs of which are nonlinearly combined into a real number that indicates visual saliency. Unfortunately, this requires many design parameters such as the number, type, and size of the front-end filters, as well as the choice of nonlinearities, weighting and normalization schemes etc., for which biological plausibility cannot always be justified. As a result, these parameters have to be chosen in a more or less ad hoc way. Here, we propose to learn a visual saliency model directly from human eye movement data. The model is rather simplistic and essentially parameter-free, and therefore contrasts recent developments in the field that usually aim at higher prediction rates at the cost of additional parameters and increasing model complexity. Experimental results show that--despite the lack of any biological prior knowledge--our model performs comparably to existing approaches, and in fact learns image features that resemble findings from several previous studies. In particular, its maximally excitatory stimuli have center-surround structure, similar to receptive fields in the early human visual system.", "bibtex": "@inproceedings{NIPS2006_a2d10d35,\n author = {Kienzle, Wolf and Wichmann, Felix A. and Franz, Matthias and Sch\\\"{o}lkopf, Bernhard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A Nonparametric Approach to Bottom-Up Visual Saliency},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/a2d10d355cdebc879e4fc6ecc6f63dd7-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/a2d10d355cdebc879e4fc6ecc6f63dd7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/a2d10d355cdebc879e4fc6ecc6f63dd7-Metadata.json", "review": "", "metareview": "", "pdf_size": 196717, "gs_citation": 268, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15073766894807363334&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "68220de48a", "title": "A Nonparametric Bayesian Method for Inferring Features From Similarity Judgments", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/2ecd2bd94734e5dd392d8678bc64cdab-Abstract.html", "author": "Daniel J. Navarro; Thomas L. Griffiths", "abstract": "The additive clustering model is widely used to infer the features of a set of stimuli from their similarities, on the assumption that similarity is a weighted linear function of common features. This paper develops a fully Bayesian formulation of the additive clustering model, using methods from nonparametric Bayesian statistics to allow the number of features to vary. We use this to explore several approaches to parameter estimation, showing that the nonparametric Bayesian approach provides a straightforward way to obtain estimates of both the number of features used in producing similarity judgments and their importance.", "bibtex": "@inproceedings{NIPS2006_2ecd2bd9,\n author = {Navarro, Daniel and Griffiths, Thomas},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A Nonparametric Bayesian Method for Inferring Features From Similarity Judgments},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/2ecd2bd94734e5dd392d8678bc64cdab-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/2ecd2bd94734e5dd392d8678bc64cdab-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/2ecd2bd94734e5dd392d8678bc64cdab-Metadata.json", "review": "", "metareview": "", "pdf_size": 150622, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1579737433751725248&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 16, "aff": "School of Psychology, University of Adelaide; Department of Psychology, UCBerkeley", "aff_domain": "adelaide.edu.au;berkeley.edu", "email": "adelaide.edu.au;berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Adelaide;University of California, Berkeley", "aff_unique_dep": "School of Psychology;Department of Psychology", "aff_unique_url": "https://www.adelaide.edu.au;https://www.berkeley.edu", "aff_unique_abbr": "Adelaide;UC Berkeley", "aff_campus_unique_index": "1", "aff_campus_unique": ";Berkeley", "aff_country_unique_index": "0;1", "aff_country_unique": "Australia;United States" }, { "id": "9d4c8437e6", "title": "A Novel Gaussian Sum Smoother for Approximate Inference in Switching Linear Dynamical Systems", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/b922ede9c9eb9eabec1c1fecbdecb45d-Abstract.html", "author": "David Barber; Bertrand Mesot", "abstract": "We introduce a method for approximate smoothed inference in a class of switching linear dynamical systems, based on a novel form of Gaussian Sum smoother. This class includes the switching Kalman Filter and the more general case of switch transitions dependent on the continuous latent state. The method improves on the standard Kim smoothing approach by dispensing with one of the key approximations, thus making fuller use of the available future information. Whilst the only central assumption required is projection to a mixture of Gaussians, we show that an additional conditional independence assumption results in a simpler but stable and accurate alternative. Unlike the alternative unstable Expectation Propagation procedure, our method consists only of a single forward and backward pass and is reminiscent of the standard smoothing `correction' recursions in the simpler linear dynamical system. The algorithm performs well on both toy experiments and in a large scale application to noise robust speech recognition.", "bibtex": "@inproceedings{NIPS2006_b922ede9,\n author = {Barber, David and Mesot, Bertrand},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A Novel Gaussian Sum Smoother for Approximate Inference in Switching Linear Dynamical Systems},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/b922ede9c9eb9eabec1c1fecbdecb45d-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/b922ede9c9eb9eabec1c1fecbdecb45d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/b922ede9c9eb9eabec1c1fecbdecb45d-Metadata.json", "review": "", "metareview": "", "pdf_size": 92843, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6390636887191559964&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "f9c12e7ea2", "title": "A PAC-Bayes Risk Bound for General Loss Functions", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/f231f2107df69eab0a3862d50018a9b2-Abstract.html", "author": "Pascal Germain; Alexandre Lacasse; Fran\u00e7ois Laviolette; Mario Marchand", "abstract": "We provide a PAC-Bayesian bound for the expected loss of convex combinations of classi\ufb01ers under a wide class of loss functions (which includes the exponential loss and the logistic loss). Our numerical experiments with Adaboost indicate that the proposed upper bound, computed on the training set, behaves very similarly as the true loss estimated on the testing set.", "bibtex": "@inproceedings{NIPS2006_f231f210,\n author = {Germain, Pascal and Lacasse, Alexandre and Laviolette, Fran\\c{c}ois and Marchand, Mario},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A PAC-Bayes Risk Bound for General Loss Functions},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/f231f2107df69eab0a3862d50018a9b2-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/f231f2107df69eab0a3862d50018a9b2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/f231f2107df69eab0a3862d50018a9b2-Metadata.json", "review": "", "metareview": "", "pdf_size": 250737, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8830706828240294067&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 19, "aff": "D\u00b4epartement IFT-GLO, Universit \u00b4e Laval, Qu\u00b4ebec, Canada; D\u00b4epartement IFT-GLO, Universit \u00b4e Laval, Qu\u00b4ebec, Canada; D\u00b4epartement IFT-GLO, Universit \u00b4e Laval, Qu\u00b4ebec, Canada; D\u00b4epartement IFT-GLO, Universit \u00b4e Laval, Qu\u00b4ebec, Canada", "aff_domain": "ulaval.ca;ift.ulaval.ca;ift.ulaval.ca;ift.ulaval.ca", "email": "ulaval.ca;ift.ulaval.ca;ift.ulaval.ca;ift.ulaval.ca", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Universit\u00e9 Laval", "aff_unique_dep": "D\u00e9partement IFT-GLO", "aff_unique_url": "https://www.ulaval.ca", "aff_unique_abbr": "UL", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Qu\u00e9bec", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Canada" }, { "id": "4984f632ca", "title": "A Probabilistic Algorithm Integrating Source Localization and Noise Suppression of MEG and EEG data", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/4aa0e93b918848be0b7728b4b1568d8a-Abstract.html", "author": "Johanna M. Zumer; Hagai T. Attias; Kensuke Sekihara; Srikantan S. Nagarajan", "abstract": "We have developed a novel algorithm for integrating source localization and noise suppression based on a probabilistic graphical model of stimulus-evoked MEG/EEG data. Our algorithm localizes multiple dipoles while suppressing noise sources with the computational complexity equivalent to a single dipole scan, and is therefore more ef(cid:2)cient than traditional multidipole (cid:2)tting procedures. In simulation, the algorithm can accurately localize and estimate the time course of several simultaneously-active dipoles, with rotating or (cid:2)xed orientation, at noise levels typical for averaged MEG data. Furthermore, the algorithm is superior to beamforming techniques, which we show to be an approximation to our graphical model, in estimation of temporally correlated sources. Success of this algorithm for localizing auditory cortex in a tumor patient and for localizing an epileptic spike source are also demonstrated.", "bibtex": "@inproceedings{NIPS2006_4aa0e93b,\n author = {Zumer, Johanna and Attias, Hagai and Sekihara, Kensuke and Nagarajan, Srikantan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A Probabilistic Algorithm Integrating Source Localization and Noise Suppression of MEG and EEG data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/4aa0e93b918848be0b7728b4b1568d8a-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/4aa0e93b918848be0b7728b4b1568d8a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/4aa0e93b918848be0b7728b4b1568d8a-Metadata.json", "review": "", "metareview": "", "pdf_size": 1546174, "gs_citation": 82, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4799517750480820868&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 21, "aff": "Biomagnetic Imaging Lab, Department of Radiology, Joint Graduate Group in Bioengineering, University of California, San Francisco; Golden Metallic, Inc.; Dept. of Systems Design and Engineering, Tokyo Metropolitan University; Biomagnetic Imaging Lab, Department of Radiology, Joint Graduate Group in Bioengineering, University of California, San Francisco", "aff_domain": "mrsc.ucsf.edu;goldenmetallic.com;cc.tmit.ac.jp;mrsc.ucsf.edu", "email": "mrsc.ucsf.edu;goldenmetallic.com;cc.tmit.ac.jp;mrsc.ucsf.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "University of California, San Francisco;Meta;Tokyo Metropolitan University", "aff_unique_dep": "Department of Radiology;Golden Metallic, Inc.;Dept. of Systems Design and Engineering", "aff_unique_url": "https://www.ucsf.edu;;https://www.tmuedu.net", "aff_unique_abbr": "UCSF;;TMU", "aff_campus_unique_index": "0;2;0", "aff_campus_unique": "San Francisco;;Tokyo", "aff_country_unique_index": "0;0;1;0", "aff_country_unique": "United States;Japan" }, { "id": "af2d4664ca", "title": "A Scalable Machine Learning Approach to Go", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/512fc3c5227f637e41437c999a2d3169-Abstract.html", "author": "Lin Wu; Pierre F. Baldi", "abstract": "Go is an ancient board game that poses unique opportunities and challenges for AI and machine learning. Here we develop a machine learning approach to Go, and related board games, focusing primarily on the problem of learning a good eval- uation function in a scalable way. Scalability is essential at multiple levels, from the library of local tactical patterns, to the integration of patterns across the board, to the size of the board itself. The system we propose is capable of automatically learning the propensity of local patterns from a library of games. Propensity and other local tactical information are fed into a recursive neural network, derived from a Bayesian network architecture. The network integrates local information across the board and produces local outputs that represent local territory owner- ship probabilities. The aggregation of these probabilities provides an effective strategic evaluation function that is an estimate of the expected area at the end (or at other stages) of the game. Local area targets for training can be derived from datasets of human games. A system trained using only 9 \u00d7 9 amateur game data performs surprisingly well on a test set derived from 19 \u00d7 19 professional game data. Possible directions for further improvements are brie\ufb02y discussed.", "bibtex": "@inproceedings{NIPS2006_512fc3c5,\n author = {Wu, Lin and Baldi, Pierre},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A Scalable Machine Learning Approach to Go},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/512fc3c5227f637e41437c999a2d3169-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/512fc3c5227f637e41437c999a2d3169-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/512fc3c5227f637e41437c999a2d3169-Metadata.json", "review": "", "metareview": "", "pdf_size": 96677, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7714427666903688065&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "School of Information and Computer Sciences, University of California, Irvine; School of Information and Computer Sciences, University of California, Irvine", "aff_domain": "ics.uci.edu;ics.uci.edu", "email": "ics.uci.edu;ics.uci.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Irvine", "aff_unique_dep": "School of Information and Computer Sciences", "aff_unique_url": "https://www.uci.edu", "aff_unique_abbr": "UCI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Irvine", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "4b09fb4893", "title": "A Small World Threshold for Economic Network Formation", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/6917ff2a7b53421ff4066020e2d89eec-Abstract.html", "author": "Eyal Even-dar; Michael Kearns", "abstract": "We introduce a game-theoretic model for network formation inspired by earlier stochastic models that mix localized and long-distance connectivity. In this model, players may purchase edges at distance d at a cost of d , and wish to minimize the sum of their edge purchases and their average distance to other players. In this model, we show there is a striking \"small world\" threshold phenomenon: in two dimensions, if < 2 then every Nash equilibrium results in a network of constant diameter (independent of network size), and if > 2 then every Nash equilibrium results in a network whose diameter grows as a root of the network size, and thus is unbounded. We contrast our results with those of Kleinberg [8] in a stochastic model, and empirically investigate the \"navigability\" of equilibrium networks. Our theoretical results all generalize to higher dimensions.", "bibtex": "@inproceedings{NIPS2006_6917ff2a,\n author = {Even-dar, Eyal and Kearns, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A Small World Threshold for Economic Network Formation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/6917ff2a7b53421ff4066020e2d89eec-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/6917ff2a7b53421ff4066020e2d89eec-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/6917ff2a7b53421ff4066020e2d89eec-Metadata.json", "review": "", "metareview": "", "pdf_size": 145370, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5834230117675133697&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Computer and Information Science, University of Pennsylvania; Computer and Information Science, University of Pennsylvania", "aff_domain": "seas.upenn.edu;cis.upenn.edu", "email": "seas.upenn.edu;cis.upenn.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Pennsylvania", "aff_unique_dep": "Computer and Information Science", "aff_unique_url": "https://www.upenn.edu", "aff_unique_abbr": "UPenn", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "2d7f066935", "title": "A Switched Gaussian Process for Estimating Disparity and Segmentation in Binocular Stereo", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/60ad83801910ec976590f69f638e0d6d-Abstract.html", "author": "Oliver Williams", "abstract": "This paper describes a Gaussian process framework for inferring pixel-wise disparity and bi-layer segmentation of a scene given a stereo pair of images. The Gaussian process covariance is parameterized by a foreground-backgroundocclusion segmentation label to model both smooth regions and discontinuities. As such, we call our model a switched Gaussian process. We propose a greedy incremental algorithm for adding observations from the data and assigning segmentation labels. Two observation schedules are proposed: the first treats scanlines as independent, the second uses an active learning criterion to select a sparse subset of points to measure. We show that this probabilistic framework has comparable performance to the state-of-the-art.", "bibtex": "@inproceedings{NIPS2006_60ad8380,\n author = {Williams, Oliver},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A Switched Gaussian Process for Estimating Disparity and Segmentation in Binocular Stereo},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/60ad83801910ec976590f69f638e0d6d-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/60ad83801910ec976590f69f638e0d6d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/60ad83801910ec976590f69f638e0d6d-Metadata.json", "review": "", "metareview": "", "pdf_size": 972418, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8879279256022810409&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Microsoft Research Ltd.", "aff_domain": "cam.ac.uk", "email": "cam.ac.uk", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Microsoft", "aff_unique_dep": "Microsoft Research", "aff_unique_url": "https://www.microsoft.com/en-us/research", "aff_unique_abbr": "MSR", "aff_country_unique_index": "0", "aff_country_unique": "United Kingdom" }, { "id": "c785f1acd6", "title": "A Theory of Retinal Population Coding", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/bbb001ba009ed11717eaec9305b2feb6-Abstract.html", "author": "Eizaburo Doi; Michael S. Lewicki", "abstract": "Efficient coding models predict that the optimal code for natural images is a population of oriented Gabor receptive fields. These results match response properties of neurons in primary visual cortex, but not those in the retina. Does the retina use an optimal code, and if so, what is it optimized for? Previous theories of retinal coding have assumed that the goal is to encode the maximal amount of information about the sensory signal. However, the image sampled by retinal photoreceptors is degraded both by the optics of the eye and by the photoreceptor noise. Therefore, de-blurring and de-noising of the retinal signal should be important aspects of retinal coding. Furthermore, the ideal retinal code should be robust to neural noise and make optimal use of all available neurons. Here we present a theoretical framework to derive codes that simultaneously satisfy all of these desiderata. When optimized for natural images, the model yields filters that show strong similarities to retinal ganglion cell (RGC) receptive fields. Importantly, the characteristics of receptive fields vary with retinal eccentricities where the optical blur and the number of RGCs are significantly different. The proposed model provides a unified account of retinal coding, and more generally, it may be viewed as an extension of the Wiener filter with an arbitrary number of noisy units.", "bibtex": "@inproceedings{NIPS2006_bbb001ba,\n author = {Doi, Eizaburo and Lewicki, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A Theory of Retinal Population Coding},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/bbb001ba009ed11717eaec9305b2feb6-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/bbb001ba009ed11717eaec9305b2feb6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/bbb001ba009ed11717eaec9305b2feb6-Metadata.json", "review": "", "metareview": "", "pdf_size": 1336822, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5323033286735203354&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Center for the Neural Basis of Cognition, Carnegie Mellon University; Center for the Neural Basis of Cognition, Carnegie Mellon University", "aff_domain": "cnbc.cmu.edu;cnbc.cmu.edu", "email": "cnbc.cmu.edu;cnbc.cmu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Center for the Neural Basis of Cognition", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "be7b8866c9", "title": "A recipe for optimizing a time-histogram", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/747d3443e319a22747fbb873e8b2f9f2-Abstract.html", "author": "Hideaki Shimazaki; Shigeru Shinomoto", "abstract": "The time-histogram method is a handy tool for capturing the instantaneous rate of spike occurrence. In most of the neurophysiological literature, the bin size that critically determines the goodness of the fit of the time-histogram to the underlying rate has been selected by individual researchers in an unsystematic manner. We propose an objective method for selecting the bin size of a time-histogram from the spike data, so that the time-histogram best approximates the unknown underlying rate. The resolution of the histogram increases, or the optimal bin size decreases, with the number of spike sequences sampled. It is notable that the optimal bin size diverges if only a small number of experimental trials are available from a moderately fluctuating rate process. In this case, any attempt to characterize the underlying spike rate will lead to spurious results. Given a paucity of data, our method can also suggest how many more trials are needed until the set of data can be analyzed with the required resolution.", "bibtex": "@inproceedings{NIPS2006_747d3443,\n author = {Shimazaki, Hideaki and Shinomoto, Shigeru},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A recipe for optimizing a time-histogram},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/747d3443e319a22747fbb873e8b2f9f2-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/747d3443e319a22747fbb873e8b2f9f2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/747d3443e319a22747fbb873e8b2f9f2-Metadata.json", "review": "", "metareview": "", "pdf_size": 158545, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9031008924804376324&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Department of Physics, Graduate School of Science, Kyoto University; Department of Physics, Graduate School of Science, Kyoto University", "aff_domain": "ton.scphys.kyoto-u.ac.jp;scphys.kyoto-u.ac.jp", "email": "ton.scphys.kyoto-u.ac.jp;scphys.kyoto-u.ac.jp", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Kyoto University", "aff_unique_dep": "Department of Physics", "aff_unique_url": "https://www.kyoto-u.ac.jp", "aff_unique_abbr": "Kyoto U", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Kyoto", "aff_country_unique_index": "0;0", "aff_country_unique": "Japan" }, { "id": "e0caa3c45f", "title": "A selective attention multi--chip system with dynamic synapses and spiking neurons", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/7edfd52220e2032e7281061c82401195-Abstract.html", "author": "Chiara Bartolozzi; Giacomo Indiveri", "abstract": "Selective attention is the strategy used by biological sensory systems to solve the problem of limited parallel processing capacity: salient subregions of the input stimuli are serially processed, while nonsalient regions are suppressed. We present an mixed mode analog/digital Very Large Scale Integration implementation of a building block for a multichip neuromorphic hardware model of selective attention. We describe the chip's architecture and its behavior, when its is part of a multichip system with a spiking retina as input, and show how it can be used to implement in real-time flexible models of bottom-up attention.", "bibtex": "@inproceedings{NIPS2006_7edfd522,\n author = {Bartolozzi, Chiara and Indiveri, Giacomo},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {A selective attention multi--chip system with dynamic synapses and spiking neurons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/7edfd52220e2032e7281061c82401195-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/7edfd52220e2032e7281061c82401195-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/7edfd52220e2032e7281061c82401195-Metadata.json", "review": "", "metareview": "", "pdf_size": 477466, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3478313349902192759&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Institute of neuroinformatics UNI-ETH Zurich; Institute of neuroinformatics UNI-ETH Zurich", "aff_domain": "ini.phys.ethz.ch;ini.phys.ethz.ch", "email": "ini.phys.ethz.ch;ini.phys.ethz.ch", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Institute of neuroinformatics", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Switzerland" }, { "id": "b2b60490a0", "title": "Accelerated Variational Dirichlet Process Mixtures", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/2bd235c31c97855b7ef2dc8b414779af-Abstract.html", "author": "Kenichi Kurihara; Max Welling; Nikos Vlassis", "abstract": "Dirichlet Process (DP) mixture models are promising candidates for clustering applications where the number of clusters is unknown a priori. Due to compu- tational considerations these models are unfortunately unsuitable for large scale data-mining applications. We propose a class of deterministic accelerated DP mixture models that can routinely handle millions of data-cases. The speedup is achieved by incorporating kd-trees into a variational Bayesian algorithm for DP mixtures in the stick-breaking representation, similar to that of Blei and Jordan (2005). Our algorithm differs in the use of kd-trees and in the way we handle truncation: we only assume that the variational distributions are \ufb01xed at their pri- ors after a certain level. Experiments show that speedups relative to the standard variational algorithm can be signi\ufb01cant.", "bibtex": "@inproceedings{NIPS2006_2bd235c3,\n author = {Kurihara, Kenichi and Welling, Max and Vlassis, Nikos},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Accelerated Variational Dirichlet Process Mixtures},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/2bd235c31c97855b7ef2dc8b414779af-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/2bd235c31c97855b7ef2dc8b414779af-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/2bd235c31c97855b7ef2dc8b414779af-Metadata.json", "review": "", "metareview": "", "pdf_size": 213137, "gs_citation": 201, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9576631435472497416&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Dept. of Computer Science, Tokyo Institute of Technology, Tokyo, Japan; Bren School of Information and Computer Science, UC Irvine, Irvine, CA 92697-3425; Informatics Institute, University of Amsterdam, The Netherlands", "aff_domain": "mi.cs.titech.ac.jp;ics.uci.edu;science.uva.nl", "email": "mi.cs.titech.ac.jp;ics.uci.edu;science.uva.nl", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "Tokyo Institute of Technology;University of California, Irvine;University of Amsterdam", "aff_unique_dep": "Dept. of Computer Science;Bren School of Information and Computer Science;Informatics Institute", "aff_unique_url": "https://www.titech.ac.jp;https://www.uci.edu;https://www.uva.nl", "aff_unique_abbr": "Titech;UCI;UvA", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Tokyo;Irvine;", "aff_country_unique_index": "0;1;2", "aff_country_unique": "Japan;United States;Netherlands" }, { "id": "afe86bb93e", "title": "Active learning for misspecified generalized linear models", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/3e60e09c222f206c725385f53d7e567c-Abstract.html", "author": "Francis R. Bach", "abstract": "Active learning refers to algorithmic frameworks aimed at selecting training data points in order to reduce the number of required training data points and/or im- prove the generalization performance of a learning method. In this paper, we present an asymptotic analysis of active learning for generalized linear models. Our analysis holds under the common practical situation of model misspeci\ufb01ca- tion, and is based on realistic assumptions regarding the nature of the sampling distributions, which are usually neither independent nor identical. We derive un- biased estimators of generalization performance, as well as estimators of expected reduction in generalization error after adding a new training data point, that allow us to optimize its sampling distribution through a convex optimization problem. Our analysis naturally leads to an algorithm for sequential active learning which is applicable for all tasks supported by generalized linear models (e.g., binary clas- si\ufb01cation, multi-class classi\ufb01cation, regression) and can be applied in non-linear settings through the use of Mercer kernels.", "bibtex": "@inproceedings{NIPS2006_3e60e09c,\n author = {Bach, Francis},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Active learning for misspecified generalized linear models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/3e60e09c222f206c725385f53d7e567c-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/3e60e09c222f206c725385f53d7e567c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/3e60e09c222f206c725385f53d7e567c-Metadata.json", "review": "", "metareview": "", "pdf_size": 164837, "gs_citation": 84, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8019547995674147167&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "", "aff_domain": "", "email": "", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster" }, { "id": "a76946b45c", "title": "AdaBoost is Consistent", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/b887d8d5e65ac4dec3934028fe23ad72-Abstract.html", "author": "Peter L. Bartlett; Mikhail Traskin", "abstract": "The risk, or probability of error, of the classifier produced by the AdaBoost algorithm is investigated. In particular, we consider the stopping strategy to be used in AdaBoost to achieve universal consistency. We show that provided AdaBoost is stopped after n iterations--for sample size n and < 1--the sequence of risks of the classifiers it produces approaches the Bayes risk if Bayes risk L > 0.", "bibtex": "@inproceedings{NIPS2006_b887d8d5,\n author = {Bartlett, Peter and Traskin, Mikhail},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {AdaBoost is Consistent},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/b887d8d5e65ac4dec3934028fe23ad72-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/b887d8d5e65ac4dec3934028fe23ad72-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/b887d8d5e65ac4dec3934028fe23ad72-Metadata.json", "review": "", "metareview": "", "pdf_size": 176445, "gs_citation": 241, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12422075237837366571&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 23, "aff": "Department of Statistics and Computer Science Division, University of California, Berkeley; Department of Statistics, University of California, Berkeley", "aff_domain": "stat.berkeley.edu;stat.berkeley.edu", "email": "stat.berkeley.edu;stat.berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Department of Statistics and Computer Science Division", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "6701a5a454", "title": "Adaptive Spatial Filters with predefined Region of Interest for EEG based Brain-Computer-Interfaces", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/7221e5c8ec6b08ef6d3f9ff3ce6eb1d1-Abstract.html", "author": "Moritz Grosse-wentrup; Klaus Gramann; Martin Buss", "abstract": "The performance of EEG-based Brain-Computer-Interfaces (BCIs) critically depends on the extraction of features from the EEG carrying information relevant for the classification of different mental states. For BCIs employing imaginary movements of different limbs, the method of Common Spatial Patterns (CSP) has been shown to achieve excellent classification results. The CSP-algorithm however suffers from a lack of robustness, requiring training data without artifacts for good performance. To overcome this lack of robustness, we propose an adaptive spatial filter that replaces the training data in the CSP approach by a-priori information. More specifically, we design an adaptive spatial filter that maximizes the ratio of the variance of the electric field originating in a predefined region of interest (ROI) and the overall variance of the measured EEG. Since it is known that the component of the EEG used for discriminating imaginary movements originates in the motor cortex, we design two adaptive spatial filters with the ROIs centered in the hand areas of the left and right motor cortex. We then use these to classify EEG data recorded during imaginary movements of the right and left hand of three subjects, and show that the adaptive spatial filters outperform the CSP-algorithm, enabling classification rates of up to 94.7 % without artifact rejection.", "bibtex": "@inproceedings{NIPS2006_7221e5c8,\n author = {Grosse-wentrup, Moritz and Gramann, Klaus and Buss, Martin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Adaptive Spatial Filters with predefined Region of Interest for EEG based Brain-Computer-Interfaces},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/7221e5c8ec6b08ef6d3f9ff3ce6eb1d1-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/7221e5c8ec6b08ef6d3f9ff3ce6eb1d1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/7221e5c8ec6b08ef6d3f9ff3ce6eb1d1-Metadata.json", "review": "", "metareview": "", "pdf_size": 128811, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7642970909718110564&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Institute of Automatic Control Engineering, Technische Universit\u00e4t M\u00fcnchen; Department Psychology, Ludwig-Maximilians-Universit\u00e4t M\u00fcnchen; Institute of Automatic Control Engineering, Technische Universit\u00e4t M\u00fcnchen", "aff_domain": "tum.de;psy.uni-muenchen.de;tum.de", "email": "tum.de;psy.uni-muenchen.de;tum.de", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "Technische Universit\u00e4t M\u00fcnchen;Ludwig-Maximilians-Universit\u00e4t M\u00fcnchen", "aff_unique_dep": "Institute of Automatic Control Engineering;Department of Psychology", "aff_unique_url": "https://www.tum.de;https://www.lmu.de", "aff_unique_abbr": "TUM;LMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany" }, { "id": "8d17594d85", "title": "Adaptor Grammars: A Framework for Specifying Compositional Nonparametric Bayesian Models", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/62f91ce9b820a491ee78c108636db089-Abstract.html", "author": "Mark Johnson; Thomas L. Griffiths; Sharon Goldwater", "abstract": "This paper introduces adaptor grammars, a class of probabilistic models of lan- guage that generalize probabilistic context-free grammars (PCFGs). Adaptor grammars augment the probabilistic rules of PCFGs with \u201cadaptors\u201d that can in- duce dependencies among successive uses. With a particular choice of adaptor, based on the Pitman-Yor process, nonparametric Bayesian models of language using Dirichlet processes and hierarchical Dirichlet processes can be written as simple grammars. We present a general-purpose inference algorithm for adaptor grammars, making it easy to de\ufb01ne and use such models, and illustrate how several existing nonparametric Bayesian models can be expressed within this framework.", "bibtex": "@inproceedings{NIPS2006_62f91ce9,\n author = {Johnson, Mark and Griffiths, Thomas and Goldwater, Sharon},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Adaptor Grammars: A Framework for Specifying Compositional Nonparametric Bayesian Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/62f91ce9b820a491ee78c108636db089-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/62f91ce9b820a491ee78c108636db089-Paper.pdf", "supp": "https://papers.nips.cc/paper_files/paper/2006/file/62f91ce9b820a491ee78c108636db089-Supplemental.zip", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/62f91ce9b820a491ee78c108636db089-Metadata.json", "review": "", "metareview": "", "pdf_size": 84508, "gs_citation": 356, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13635137736349711803&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 26, "aff": "Microsoft Research / Brown University; University of California, Berkeley; Stanford University", "aff_domain": "Brown.edu;Berkeley.edu;gmail.com", "email": "Brown.edu;Berkeley.edu;gmail.com", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "Microsoft;University of California, Berkeley;Stanford University", "aff_unique_dep": "Microsoft Research;;", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.berkeley.edu;https://www.stanford.edu", "aff_unique_abbr": "MSR;UC Berkeley;Stanford", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Berkeley;Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "53aa23b577", "title": "Aggregating Classification Accuracy across Time: Application to Single Trial EEG", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/2b45e8d6abf59038a975faeeb6dc0782-Abstract.html", "author": "Steven Lemm; Christin Sch\u00e4fer; Gabriel Curio", "abstract": "We present a method for binary on-line classification of triggered but temporally blurred events that are embedded in noisy time series in the context of on-line discrimination between left and right imaginary hand-movement. In particular the goal of the binary classification problem is to obtain the decision, as fast and as reliably as possible from the recorded EEG single trials. To provide a probabilistic decision at every time-point t the presented method gathers information from two distinct sequences of features across time. In order to incorporate decisions from prior time-points we suggest an appropriate weighting scheme, that emphasizes time instances, providing a higher discriminatory power between the instantaneous class distributions of each feature, where the discriminatory power is quantified in terms of the Bayes error of misclassification. The effectiveness of this procedure is verified by its successful application in the 3rd BCI competition. Disclosure of the data after the competition revealed this approach to be superior with single trial error rates as low as 10.7, 11.5 and 16.7% for the three different sub jects under study.", "bibtex": "@inproceedings{NIPS2006_2b45e8d6,\n author = {Lemm, Steven and Sch\\\"{a}fer, Christin and Curio, Gabriel},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Aggregating Classification Accuracy across Time: Application to Single Trial EEG},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/2b45e8d6abf59038a975faeeb6dc0782-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/2b45e8d6abf59038a975faeeb6dc0782-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/2b45e8d6abf59038a975faeeb6dc0782-Metadata.json", "review": "", "metareview": "", "pdf_size": 175312, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4476594582046077035&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Intelligent Data Analysis Group, Fraunhofer Institute FIRST, Kekulestr. 7, 12489 Berlin, Germany; Intelligent Data Analysis Group, Fraunhofer Institute FIRST, Kekulestr. 7, 12489 Berlin, Germany; Neurophysics Group, Dept. of Neurology, Campus Benjamin Franklin, Charit\u00b4 e, University Medicine Berlin, Hindenburgdamm 20, 12200 Berlin, Germany", "aff_domain": "\ufb01rst.fhg.de; ; ", "email": "\ufb01rst.fhg.de; ; ", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Fraunhofer Institute FIRST;Charit\u00e8 - University Medicine Berlin", "aff_unique_dep": "Intelligent Data Analysis Group;Dept. of Neurology", "aff_unique_url": "https://www.first.fraunhofer.de/;https://www.charite.de", "aff_unique_abbr": "Fraunhofer FIRST;Charit\u00e8", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Berlin;Benjamin Franklin", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany" }, { "id": "f195ba6eaf", "title": "An Application of Reinforcement Learning to Aerobatic Helicopter Flight", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/98c39996bf1543e974747a2549b3107c-Abstract.html", "author": "Pieter Abbeel; Adam Coates; Morgan Quigley; Andrew Y. Ng", "abstract": "Autonomous helicopter flight is widely regarded to be a highly challenging control problem. This paper presents the first successful autonomous completion on a real RC helicopter of the following four aerobatic maneuvers: forward flip and sideways roll at low speed, tail-in funnel, and nose-in funnel. Our experimental results significantly extend the state of the art in autonomous helicopter flight. We used the following approach: First we had a pilot fly the helicopter to help us find a helicopter dynamics model and a reward (cost) function. Then we used a reinforcement learning (optimal control) algorithm to find a controller that is optimized for the resulting model and reward function. More specifically, we used differential dynamic programming (DDP), an extension of the linear quadratic regulator (LQR).", "bibtex": "@inproceedings{NIPS2006_98c39996,\n author = {Abbeel, Pieter and Coates, Adam and Quigley, Morgan and Ng, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {An Application of Reinforcement Learning to Aerobatic Helicopter Flight},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/98c39996bf1543e974747a2549b3107c-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/98c39996bf1543e974747a2549b3107c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/98c39996bf1543e974747a2549b3107c-Metadata.json", "review": "", "metareview": "", "pdf_size": 159783, "gs_citation": 987, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6664075984555560149&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 22, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "6a4d1b4e5c", "title": "An Approach to Bounded Rationality", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/bd85282513da4089c441926e1975898c-Abstract.html", "author": "Eli Ben-sasson; Ehud Kalai; Adam Kalai", "abstract": "A central question in game theory and artificial intelligence is how a rational agent should behave in a complex environment, given that it cannot perform unbounded computations. We study strategic aspects of this question by formulating a simple model of a game with additional costs (computational or otherwise) for each strategy. First we connect this to zero-sum games, proving a counter-intuitive generalization of the classic min-max theorem to zero-sum games with the addition of strategy costs. We then show that potential games with strategy costs remain potential games. Both zero-sum and potential games with strategy costs maintain a very appealing property: simple learning dynamics converge to equilibrium.\n\n1\n\nThe Approach and Basic Model\n\nHow should an intelligent agent play a complicated game like chess, given that it does not have unlimited time to think? This question reflects one fundamental aspect of \"bounded rationality,\" a term coined by Herbert Simon [1]. However, bounded rationality has proven to be a slippery concept to formalize (prior work has focused largely on finite automata playing simple repeated games such as prisoner's dilemma, e.g. [2, 3, 4, 5]). This paper focuses on the strategic aspects of decisionmaking in complex multi-agent environments, i.e., on how a player should choose among strategies of varying complexity, given that its opponents are making similar decisions. Our model applies to general strategic games and allows for a variety of complexities that arise in real-world applications. For this reason, it is applicable to one-shot games, to extensive games, and to repeated games, and it generalizes existing models such as repeated games played by finite automata. To easily see that bounded rationality can drastically affect the outcome of a game, consider the following factoring game. Player 1 chooses an n-bit number and sends it to Player 2, who attempts to find its prime factorization. If Player 2 is correct, he is paid 1 by Player 1, otherwise he pays 1 to Player 1. Ignoring complexity costs, the game is a trivial win for Player 2. However, for large n, the game should is essentially a win for Player 1, who can easily output a large random number that Player 2 cannot factor (under appropriate complexity assumptions). In general, the outcome of a game (even a zero-sum game like chess) with bounded rationality is not so clear. To concretely model such games, we consider a set of available strategies along with strategy costs. Consider an example of two players preparing to play a computerized chess game for $100K prize. Suppose the players simultaneously choose among two available options: to use a $10K program A or an advanced program B, which costs $50K. We refer to the row chooser as white and to the column chooser as black, with the corresponding advantages reflected by the win\n\n\n\nprobabilities of white described in Table 1a. For example, when both players use program A, white wins 55% of the time and black wins 45% of the time (we ignore draws). The players naturally want to choose strategies to maximize their expected net payoffs, i.e., their expected payoff minus their cost. Each cell in Table 1b contains a pair of payoffs in units of thousands of dollars; the first is white's net expected payoff and the second is black's. a) A B A 55% 93% B 13% 51% b) A (-10) B (-50) A (-10) 45, 35 43,-3 B (-50) 3, 37 1,-1\n\nFigure 1: a) Table of first-player winning probabilities based on program choices. b) Table of expected net earnings in thousands of dollars. The unique equilibrium is (A,B) which strongly favors the second player. A surprising property is evident in the above game. Everything about the game seems to favor white. Yet due to the (symmetric) costs, at the unique Nash equilibrium (A,B) of Table 1b, black wins 87% of the time and nets $34K more than white. In fact, it is a dominant strategy for white to play A and for black to play B. To see this, note that playing B increases white's probability of winning by 38%, independent of what black chooses. Since the pot is $100K, this is worth $38K in expectation, but B costs $40K more than A. On the other hand, black enjoys a 42% increase in probability of winning due to B, independent of what white does, and hence is willing to pay the extra $40K. Before formulating the general model, we comment on some important aspects of the chess example. First, traditional game theory states that chess can be solved in \"only\" two rounds of elimination of dominated strategies [10], and the outcome with optimal play should always be the same: either a win for white or a win for black. This theoretical prediction fails in practice: in top play, the outcome is very nondeterministic with white winning roughly twice as often as black. The game is too large and complex to be solved by brute force. Second, we have been able to analyze the above chess program selection example exactly because we formulated as a game with a small number of available strategies per player. Another formulation that would fit into our model would be to include all strategies of chess, with some reasonable computational costs. However, it is beyond our means to analyze such a large game. Third, in the example above we used monetary software cost to illustrate a type of strategy cost. But the same analysis could accommodate many other types of costs that can be measured numerically and subtracted from the payoffs, such as time or effort involved in the development or execution of a strategy, and other resource costs. Additional examples in this paper include the number of states in a finite automaton, the number of gates in a circuit, and the number of turns on a commuter's route. Our analysis is limited, however, to cost functions that depend only on the strategy of the player and not the strategy chosen by its opponent. For example, if our players above were renting computers A or B and paying for the time of actual usage, then the cost of using A would depend on the choice of computer made by the opponent. Generalizing the example above, we consider a normal form game with the addition of strategy costs, a player-dependent cost for playing each available strategy. Our main results regard two important classes of games: constant-sum and potential games. Potential games with strategy costs remain potential games. While two-person constant-sum games are no longer constant, we give a basic structural description of optimal play in these games. Lastly, we show that known learning dynamics converge in both classes of games.\n\n2 Definition of strategy costs We first define an N -person normal-form game G = (N , S, p) consisting of finite sets of (available) pure strategies S = (S1 , . . . , SN ) for the N players, and a payoff function p : S1 . . . SN RN . Players simultaneously choose strategies si Si after which player i is rewarded with pi (s1 , . . . , sN ). A randomized or mixed strategy i for player i is a probability distribution over its pure strategies Si , x . x i i = R|Si | : = 1, xj 0 j\n\n\n\nWe extend p to 1 . . . N in the natural way, i.e., pi (1 , . . . , N ) = E[pi (s1 , . . . , sN )] where each si is drawn from i , independently. Denote by s-i = (s1 , s2 , . . . , si-1 , si+1 , . . . , sN ) and similarly for -i . A best response by player i to -i is i i such that pi (i , -i ) = maxi i pi (i , -i ). A (mixed strategy) Nash equilibrium of G is a vector of strategies (1 , . . . , N ) 1 . . . N such that each i is a best response to -i . We now define G-c , the game G with strategy costs c = (c1 , . . . , cN ), where ci : Si R. It is simply an N -person normal-form game G-c = (N , S, p-c ) with the same sets of pure strategies as G, but with a new payoff function p-c : S1 . . . SN RN where, p-c (s1 , . . . , sN ) = pi (s1 , . . . , sN ) - ci (si ), for i = 1, . . . , N . i We similarly extend ci to i in the natural way.\n\n3 Two-person constant-sum games with strategy costs Recall that a game is constant-sum (k -sum for short) if at every combination of individual strategies, the players' payoffs sum to some constant k. Two-person k -sum games have some important properties, not shared by general sum games, which result in more effective game-theoretic analysis. In particular, every k -sum game has a unique value v R. A mixed strategy for player 1 is called optimal if it guarantees payoff v against any strategy of player 2. A mixed strategy for player 2 is optimal if it guarantees k - v against any strategy of player 1. The term optimal is used because optimal strategies guarantee as much as possible (v + k - v = k ) and playing anything that is not optimal can result in a lesser payoff, if the opponent responds appropriately. (This fact is easily illustrated in the game rock-paper-scissors randomizing uniformly among the strategies guarantees each player 50% of the pot, while playing anything other than uniformly random enables the opponent to win strictly more often.) The existence of optimal strategies for both players follows from the min-max theorem. An easy corollary is that the Nash equilibria of a k -sum game are exchangeable: they are simply the cross-product of the sets of optimal mixed strategies for both players. Lastly, it is well-known that equilibria in two-person k -sum games can be learned in repeated play by simple dynamics that are guaranteed to converge [17]. With the addition of strategy costs, a k -sum game is no longer k -sum and hence it is not clear, at first, what optimal strategies there are, if any. (Many examples of general-sum games do not have optimal strategies.) We show the following generalization of the above properties for zero-sum games with strategies costs. Theorem 1. Let G be a finite two-person k -sum game and G-c be the game with strategy costs c = (c1 , c2 ). 1. There is a value v R for G-c and nonempty sets OPT1 and OPT2 of optimal mixed strategies for the two players. OPT1 is the set of strategies that guarantee player 1 payoff v - c2 (2 ), against any strategy 2 chosen by player 2. Similarly, OPT2 is the set of strategies that guarantee player 2 payoff k - v - c1 (1 ) against any 1 . 2. The Nash equilibria of G-c are exchangeable: the set of Nash equilibria is OPT1 OPT2 . 3. The set of net payoffs possible at equilibrium is an axis-parallel rectangle in R2 . For zero-sum games, the term optimal strategy was natural: the players could guarantee v and k - v , respectively, and this is all that there was to share. Moreover, it is easy to see that only pairs of optimal strategies can have the Nash equilibria property, being best responses to each other. In the case of zero-sum games with strategy costs, the optimal structure is somewhat counterintuitive. First, it is strange that the amount guaranteed by either player depends on the cost of the other player's action, when in reality each player pays the cost of its own action. Second, it is not even clear why we call these optimal strategies. To get a feel for this latter issue, notice that the sum of the net payoffs to the two players is always k - c1 (1 ) - c2 (2 ), which is exactly the total of what optimal strategies guarantee, v - c2 (2 ) + k - v - c1 (1 ). Hence, if both players play what we call optimal strategies, then neither player can improve and they are at Nash equilibrium. On the other hand, suppose player 1 selects a strategy 1 that does not guarantee him payoff at least\n\n\n\nv - c2 (2 ). This means that there is some response 2 by player 2 for which player 1's payoff is < v - c2 (2 ) and hence player 2's payoff is > k - v - c1 (1 ). Thus player 2's best response to 1 must give player 2 payoff > k - v - c1 (1 ) and leave player 1 with < v - c2 (2 ). The proof of the theorem (the above reasoning only implies part 2 from part 1) is based on the following simple observation. Consider the k -sum game H = (N , S, q ) with the following payoffs: q1 (s1 , s2 ) = p1 (s1 , s2 ) - c1 (s1 ) + c2 (s2 ) = p-c (s1 , s2 ) + c2 (s2 ) 1 q2 (s1 , s2 ) = p2 (s1 , s2 ) - c2 (s1 ) + c1 (s1 ) = p-c (s1 , s2 ) + c1 (s1 ) 2 That is to say, Player 1 pays its strategy cost to Player 2 and vice versa. It is easy to verify that, 1 , 1 1 , 2 2 q1 (1 , 2 ) - q1 (1 , 2 ) = p-c (1 , 2 ) - p-c (1 , 2 ) 1 1 (1)\n\nThis means that the relative advantage in switching strategies in games G-c and H are the same. In particular, 1 is a best response to 2 in G-c if and only if it is in H . A similar equality holds for player 2's payoffs. Note that these conditions imply that the games G-c and H are strategically equivalent in the sense defined by Moulin and Vial [16]. Proof of Theorem 1. Let v be the value of the game H . For any strategy 1 that guarantees player 1 payoff v in H , 1 guarantees player 1 v - c2 (2 ) in G-c . This follows from the definition of H . Similarly, any strategy 2 that guarantees player 2 payoff k - v in H will guarantee k - v - c1 (1 ) in G-c . Thus the sets OPT1 and OPT2 are non-empty. Since v - c2 (2 ) + k - v - c1 (1 ) = k - c1 (1 ) - c2 (2 ) is the sum of the payoffs in G-c , nothing greater can be guaranteed by either player. Since the best responses of G-c and H are the same, the Nash equilibria of the two games are the same. Since H is a k -sum game, its Nash equilibria are exchangeable, and thus we have part 2. (This holds for any game that is strategically equivalent to k -sum.) Finally, the optimal mixed strategies OPT1 , OPT2 of any k -sum game are convex sets. If we look at the achievable costs of the mixed strategies in OPTi , by the definition of the cost of a mixed strategy, this will be a convex subset of R, i.e., an interval. By parts 1 and 2, the set of achievable net payoffs at equilibria of G-c are therefore the cross-product of intervals. To illustrate Theorem 1 graphically, Figure 2 gives a 4 4 example with costs of 1, 2, 3, and 4, respectively. It illustrates a situation with multiple optimal strategies. Notice that player 1 is completely indifferent between its optimal choices A and B, and player 2 is completely indifferent between C and D. Thus the only question is how kind they would like to be to their opponent. The (A,C) equilibrium is perhaps most natural as it is yields the highest payoffs for both parties. Note that the proof of the above theorem actually shows that zero-sum games with costs share additional appealing properties of zero-sum games. For example, computing optimal strategies is a polynomial time-computation in an n n game, as it amounts to computing the equilibria of H . We next show that they also have appealing learning properties, though they do not share all properties of zero-sum games.1 3.1 Learning in repeated two-person k -sum games with strategy costs Another desirable property of k -sum games is that, in repeated play, natural learning dynamics converge to the set of Nash equilibria. Before we state the analogous conditions for k -sum games with costs, we briefly give a few definitions. A repeated game is one in which players chooses a sequence of strategies vectors s1 , s2 , . . ., where each st = (st , . . . , st ) is a strategy vector of some 1 N fixed stage game G = (N , S, p). Under perfect monitoring, when selecting an action in any period the players know all the previous selected actions.As we shall discuss, it is possible to learn to play without perfect monitoring as well. 1 One property that is violated by the chess example is the \"advantage of an advantage\" property. Say Player 1 has the advantage over Player 2 in a square game if p1 (s1 , s2 ) p2 (s2 , s1 ) for all strategies s1 , s2 . At equilibrium of a k-sum game, a player with the advantage must have a payoff at least as large as its opponent. This is no longer the case after incorporating strategy costs, as seen in the chess example, where Player 1 has the advantage (even including strategy costs), yet his equilibrium payoff is smaller than 2's.\n\n\n\na) A B C D b) A (-1) B (-2) C (-3) D (-4)\n\nA 6, 4 7, 3 7.5, 2.5 8.5, 1.5 A (-1) 5, 3 5, 2 4.5, 1.5 4.5, 0.5\n\nB 5, 5 6, 4 6.5, 3.5 7, 3 B (-2) 4, 3 4, 2 3.5, 1.5 3, 1\n\nC 3, 7 4, 6 4.5, 5.5 5.5, 4.5 C (-3) 2, 4 2, 3 1.5, 2.5 1.5, 1.5\n\nD 2, 8 3, 7 3.5, 6.5 4.5, 5.5 D (-4) 1, 4 1, 3 0.5, 2.5 0.5, 1.5\n\nPLAYER 2 NET PAYOFF", "bibtex": "@inproceedings{NIPS2006_bd852825,\n author = {Ben-sasson, Eli and Kalai, Ehud and Kalai, Adam},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {An Approach to Bounded Rationality},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/bd85282513da4089c441926e1975898c-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/bd85282513da4089c441926e1975898c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/bd85282513da4089c441926e1975898c-Metadata.json", "review": "", "metareview": "", "pdf_size": 119542, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1943070151390967101&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 25, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "2c4c2f512a", "title": "An EM Algorithm for Localizing Multiple Sound Sources in Reverberant Environments", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/99ef04eb612baf0e86671a5109e22154-Abstract.html", "author": "Michael I. Mandel; Daniel P. Ellis; Tony Jebara", "abstract": "We present a method for localizing and separating sound sources in stereo recordings that is robust to reverberation and does not make any assumptions about the source statistics. The method consists of a probabilistic model of binaural multisource recordings and an expectation maximization algorithm for finding the maximum likelihood parameters of that model. These parameters include distributions over delays and assignments of time-frequency regions to sources. We evaluate this method against two comparable algorithms on simulations of simultaneous speech from two or three sources. Our method outperforms the others in anechoic conditions and performs as well as the better of the two in the presence of reverberation.", "bibtex": "@inproceedings{NIPS2006_99ef04eb,\n author = {Mandel, Michael and Ellis, Daniel and Jebara, Tony},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {An EM Algorithm for Localizing Multiple Sound Sources in Reverberant Environments},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/99ef04eb612baf0e86671a5109e22154-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/99ef04eb612baf0e86671a5109e22154-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/99ef04eb612baf0e86671a5109e22154-Metadata.json", "review": "", "metareview": "", "pdf_size": 264696, "gs_citation": 149, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15767422849284638578&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "LabROSA, Dept. of Electrical Engineering, Columbia University, New York, NY; LabROSA, Dept. of Electrical Engineering, Columbia University, New York, NY; Dept. of Computer Science, Columbia University, New York, NY", "aff_domain": "ee.columbia.edu;ee.columbia.edu;cs.columbia.edu", "email": "ee.columbia.edu;ee.columbia.edu;cs.columbia.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Columbia University", "aff_unique_dep": "Dept. of Electrical Engineering", "aff_unique_url": "https://www.columbia.edu", "aff_unique_abbr": "Columbia", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "New York", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "f7378fc09e", "title": "An Efficient Method for Gradient-Based Adaptation of Hyperparameters in SVM Models", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/cc431fd7ec4437de061c2577a4603995-Abstract.html", "author": "S. S. Keerthi; Vikas Sindhwani; Olivier Chapelle", "abstract": "We consider the task of tuning hyperparameters in SVM models based on minimizing a smooth performance validation function, e.g., smoothed k-fold crossvalidation error, using non-linear optimization techniques. The key computation in this approach is that of the gradient of the validation function with respect to hyperparameters. We show that for large-scale problems involving a wide choice of kernel-based models and validation functions, this computation can be very efficiently done; often within just a fraction of the training time. Empirical results show that a near-optimal set of hyperparameters can be identified by our approach with very few training rounds and gradient computations. .", "bibtex": "@inproceedings{NIPS2006_cc431fd7,\n author = {Keerthi, S. and Sindhwani, Vikas and Chapelle, Olivier},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {An Efficient Method for Gradient-Based Adaptation of Hyperparameters in SVM Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/cc431fd7ec4437de061c2577a4603995-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/cc431fd7ec4437de061c2577a4603995-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/cc431fd7ec4437de061c2577a4603995-Metadata.json", "review": "", "metareview": "", "pdf_size": 194048, "gs_citation": 212, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4568022275217244821&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 19, "aff": "Yahoo! Research; Department of Computer Science, University of Chicago; MPI for Biological Cybernetics", "aff_domain": "yahoo-inc.com;cs.uchicago.edu;tuebingen.mpg.de", "email": "yahoo-inc.com;cs.uchicago.edu;tuebingen.mpg.de", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "Yahoo!;University of Chicago;Max Planck Institute for Biological Cybernetics", "aff_unique_dep": "Yahoo! Research;Department of Computer Science;Biological Cybernetics", "aff_unique_url": "https://research.yahoo.com;https://www.uchicago.edu;https://www.biological-cybernetics.de", "aff_unique_abbr": "Yahoo!;UChicago;MPIBC", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1", "aff_country_unique": "United States;Germany" }, { "id": "4f43d336f1", "title": "An Information Theoretic Framework for Eukaryotic Gradient Sensing", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/cb12d7f933e7d102c52231bf62b8a678-Abstract.html", "author": "Joseph M. Kimmel; Richard M. Salter; Peter J. Thomas", "abstract": "Chemical reaction networks by which individual cells gather and process information about their chemical environments have been dubbed \"signal transduction\" networks. Despite this suggestive terminology, there have been few attempts to analyze chemical signaling systems with the quantitative tools of information theory. Gradient sensing in the social amoeba Dictyostelium discoideum is a well characterized signal transduction system in which a cell estimates the direction of a source of diffusing chemoattractant molecules based on the spatiotemporal sequence of ligand-receptor binding events at the cell membrane. Using Monte Carlo techniques (MCell) we construct a simulation in which a collection of individual ligand particles undergoing Brownian diffusion in a three-dimensional volume interact with receptors on the surface of a static amoeboid cell. Adapting a method for estimation of spike train entropies described by Victor (originally due to Kozachenko and Leonenko), we estimate lower bounds on the mutual information between the transmitted signal (direction of ligand source) and the received signal (spatiotemporal pattern of receptor binding/unbinding events). Hence we provide a quantitative framework for addressing the question: how much could the cell know, and when could it know it? We show that the time course of the mutual information between the cell's surface receptors and the (unknown) gradient direction is consistent with experimentally measured cellular response times. We find that the acquisition of directional information depends strongly on the time constant at which the intracellular response is filtered.", "bibtex": "@inproceedings{NIPS2006_cb12d7f9,\n author = {Kimmel, Joseph and Salter, Richard and Thomas, Peter},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {An Information Theoretic Framework for Eukaryotic Gradient Sensing},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/cb12d7f933e7d102c52231bf62b8a678-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/cb12d7f933e7d102c52231bf62b8a678-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/cb12d7f933e7d102c52231bf62b8a678-Metadata.json", "review": "", "metareview": "", "pdf_size": 376073, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14058539175736686156&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 12, "aff": "Computer Science Program, Oberlin College+Computational Neuroscience Graduate Program, The University of Chicago; Oberlin Center for Computation and Modeling, Oberlin College; Departments of Mathematics, Biology and Cognitive Science, Case Western Reserve University+Oberlin College Research Associate", "aff_domain": "uchicago.edu;cs.oberlin.edu;case.edu", "email": "uchicago.edu;cs.oberlin.edu;case.edu", "github": "", "project": "http://occam.oberlin.edu/; http://www.case.edu/artsci/math/thomas/thomas.html", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1;0;2+0", "aff_unique_norm": "Oberlin College;University of Chicago;Case Western Reserve University", "aff_unique_dep": "Computer Science Program;Computational Neuroscience Graduate Program;Departments of Mathematics, Biology and Cognitive Science", "aff_unique_url": "https://www.oberlin.edu;https://www.uchicago.edu;https://www.case.edu", "aff_unique_abbr": "Oberlin;UChicago;CWRU", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0+0", "aff_country_unique": "United States" }, { "id": "2083c24a07", "title": "An Oracle Inequality for Clipped Regularized Risk Minimizers", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/d8ab1a52f058358b947cdf8261b5e1a2-Abstract.html", "author": "Ingo Steinwart; Don Hush; Clint Scovel", "abstract": "We establish a general oracle inequality for clipped approximate minimizers of regularized empirical risks and apply this inequality to support vector machine (SVM) type algorithms. We then show that for SVMs using Gaussian RBF kernels for classification this oracle inequality leads to learning rates that are faster than the ones established in [9]. Finally, we use our oracle inequality to show that a simple parameter selection approach based on a validation set can yield the same fast learning rates without knowing the noise exponents which were required to be known a-priori in [9].", "bibtex": "@inproceedings{NIPS2006_d8ab1a52,\n author = {Steinwart, Ingo and Hush, Don and Scovel, Clint},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {An Oracle Inequality for Clipped Regularized Risk Minimizers},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/d8ab1a52f058358b947cdf8261b5e1a2-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/d8ab1a52f058358b947cdf8261b5e1a2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/d8ab1a52f058358b947cdf8261b5e1a2-Metadata.json", "review": "", "metareview": "", "pdf_size": 129960, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14929205482197710741&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Modelling, Algorithms and Informatics Group, CCS-3; Modelling, Algorithms and Informatics Group, CCS-3; Modelling, Algorithms and Informatics Group, CCS-3", "aff_domain": "lanl.gov;lanl.gov;lanl.gov", "email": "lanl.gov;lanl.gov;lanl.gov", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Modelling, Algorithms and Informatics Group", "aff_unique_dep": "CCS-3", "aff_unique_url": "", "aff_unique_abbr": "", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "", "aff_country_unique": "" }, { "id": "20821367db", "title": "Analysis of Contour Motions", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/9023effe3c16b0477df9b93e26d57e2c-Abstract.html", "author": "Ce Liu; William T. Freeman; Edward H. Adelson", "abstract": "A reliable motion estimation algorithm must function under a wide range of con- ditions. One regime, which we consider here, is the case of moving objects with contours but no visible texture. Tracking distinctive features such as corners can disambiguate the motion of contours, but spurious features such as T-junctions can be badly misleading. It is dif\ufb01cult to determine the reliability of motion from local measurements, since a full rank covariance matrix can result from both real and spurious features. We propose a novel approach that avoids these points al- together, and derives global motion estimates by utilizing information from three levels of contour analysis: edgelets, boundary fragments and contours. Boundary fragment are chains of orientated edgelets, for which we derive motion estimates from local evidence. The uncertainties of the local estimates are disambiguated after the boundary fragments are properly grouped into contours. The grouping is done by constructing a graphical model and marginalizing it using importance sampling. We propose two equivalent representations in this graphical model, re- versible switch variables attached to the ends of fragments and fragment chains, to capture both local and global statistics of boundaries. Our system is success- fully applied to both synthetic and real video sequences containing high-contrast boundaries and textureless regions. The system produces good motion estimates along with properly grouped and completed contours.", "bibtex": "@inproceedings{NIPS2006_9023effe,\n author = {Liu, Ce and Freeman, William and Adelson, Edward},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Analysis of Contour Motions},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/9023effe3c16b0477df9b93e26d57e2c-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/9023effe3c16b0477df9b93e26d57e2c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/9023effe3c16b0477df9b93e26d57e2c-Metadata.json", "review": "", "metareview": "", "pdf_size": 581091, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18108337419370218431&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "5bff5348cd", "title": "Analysis of Empirical Bayesian Methods for Neuroelectromagnetic Source Localization", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/ccd2e3eaa5c991ac880991328c8f1463-Abstract.html", "author": "Rey Ram\u00edrez; Jason Palmer; Scott Makeig; Bhaskar D. Rao; David P. Wipf", "abstract": "The ill-posed nature of the MEG/EEG source localization problem requires the incorporation of prior assumptions when choosing an appropriate solution out of an infinite set of candidates. Bayesian methods are useful in this capacity because they allow these assumptions to be explicitly quantified. Recently, a number of empirical Bayesian approaches have been proposed that attempt a form of model selection by using the data to guide the search for an appropriate prior. While seemingly quite different in many respects, we apply a unifying framework based on automatic relevance determination (ARD) that elucidates various attributes of these methods and suggests directions for improvement. We also derive theoretical properties of this methodology related to convergence, local minima, and localization bias and explore connections with established algorithms.", "bibtex": "@inproceedings{NIPS2006_ccd2e3ea,\n author = {Ram\\'{\\i}rez, Rey and Palmer, Jason and Makeig, Scott and Rao, Bhaskar and Wipf, David},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Analysis of Empirical Bayesian Methods for Neuroelectromagnetic Source Localization},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/ccd2e3eaa5c991ac880991328c8f1463-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/ccd2e3eaa5c991ac880991328c8f1463-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/ccd2e3eaa5c991ac880991328c8f1463-Metadata.json", "review": "", "metareview": "", "pdf_size": 118423, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9165911827725678742&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Signal Processing and Intelligent Systems Lab; Swartz Center for Computational Neuroscience; Signal Processing and Intelligent Systems Lab+Swartz Center for Computational Neuroscience; Swartz Center for Computational Neuroscience; Signal Processing and Intelligent Systems Lab", "aff_domain": "ucsd.edu;sccn.ucsd.edu;ucsd.edu;sccn.ucsd.edu;ucsd.edu", "email": "ucsd.edu;sccn.ucsd.edu;ucsd.edu;sccn.ucsd.edu;ucsd.edu", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0+1;1;0", "aff_unique_norm": "Signal Processing and Intelligent Systems Lab;Swartz Center for Computational Neuroscience", "aff_unique_dep": "Signal Processing and Intelligent Systems;Computational Neuroscience", "aff_unique_url": ";", "aff_unique_abbr": ";", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "1;1;1", "aff_country_unique": ";United States" }, { "id": "5ab531f8c6", "title": "Analysis of Representations for Domain Adaptation", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/b1b0432ceafb0ce714426e9114852ac7-Abstract.html", "author": "Shai Ben-David; John Blitzer; Koby Crammer; Fernando Pereira", "abstract": "Discriminative learning methods for classification perform well when training and test data are drawn from the same distribution. In many situations, though, we have labeled training data for a source domain, and we wish to learn a classifier which performs well on a target domain with a different distribution. Under what conditions can we adapt a classifier trained on the source domain for use in the target domain? Intuitively, a good feature representation is a crucial factor in the success of domain adaptation. We formalize this intuition theoretically with a generalization bound for domain adaption. Our theory illustrates the tradeoffs inherent in designing a representation for domain adaptation and gives a new justification for a recently proposed model. It also points toward a promising new model for domain adaptation: one which explicitly minimizes the difference between the source and target domains, while at the same time maximizing the margin of the training set.", "bibtex": "@inproceedings{NIPS2006_b1b0432c,\n author = {Ben-David, Shai and Blitzer, John and Crammer, Koby and Pereira, Fernando},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Analysis of Representations for Domain Adaptation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/b1b0432ceafb0ce714426e9114852ac7-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/b1b0432ceafb0ce714426e9114852ac7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/b1b0432ceafb0ce714426e9114852ac7-Metadata.json", "review": "", "metareview": "", "pdf_size": 73446, "gs_citation": 2911, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3218895349072063266&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 20, "aff": "School of Computer Science, University of Waterloo; Department of Computer and Information Science, University of Pennsylvania; Department of Computer and Information Science, University of Pennsylvania; Department of Computer and Information Science, University of Pennsylvania", "aff_domain": "cs.uwaterloo.ca;cis.upenn.edu;cis.upenn.edu;cis.upenn.edu", "email": "cs.uwaterloo.ca;cis.upenn.edu;cis.upenn.edu;cis.upenn.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "University of Waterloo;University of Pennsylvania", "aff_unique_dep": "School of Computer Science;Department of Computer and Information Science", "aff_unique_url": "https://uwaterloo.ca;https://www.upenn.edu", "aff_unique_abbr": "UWaterloo;UPenn", "aff_campus_unique_index": "0", "aff_campus_unique": "Waterloo;", "aff_country_unique_index": "0;1;1;1", "aff_country_unique": "Canada;United States" }, { "id": "fb3af2e150", "title": "Approximate Correspondences in High Dimensions", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/2d95666e2649fcfc6e3af75e09f5adb9-Abstract.html", "author": "Kristen Grauman; Trevor Darrell", "abstract": "Pyramid intersection is an ef\ufb01cient method for computing an approximate partial matching between two sets of feature vectors. We introduce a novel pyramid em- bedding based on a hierarchy of non-uniformly shaped bins that takes advantage of the underlying structure of the feature space and remains accurate even for sets with high-dimensional feature vectors. The matching similarity is computed in linear time and forms a Mercer kernel. Whereas previous matching approxima- tion algorithms suffer from distortion factors that increase linearly with the fea- ture dimension, we demonstrate that our approach can maintain constant accuracy even as the feature dimension increases. When used as a kernel in a discrimina- tive classi\ufb01er, our approach achieves improved object recognition results over a state-of-the-art set kernel.", "bibtex": "@inproceedings{NIPS2006_2d95666e,\n author = {Grauman, Kristen and Darrell, Trevor},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Approximate Correspondences in High Dimensions},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/2d95666e2649fcfc6e3af75e09f5adb9-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/2d95666e2649fcfc6e3af75e09f5adb9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/2d95666e2649fcfc6e3af75e09f5adb9-Metadata.json", "review": "", "metareview": "", "pdf_size": 1211299, "gs_citation": 161, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7857256043037373427&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Department of Computer Sciences, University of Texas at Austin; CS and AI Laboratory, Massachusetts Institute of Technology", "aff_domain": "cs.utexas.edu;csail.mit.edu", "email": "cs.utexas.edu;csail.mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Texas at Austin;Massachusetts Institute of Technology", "aff_unique_dep": "Department of Computer Sciences;CS and AI Laboratory", "aff_unique_url": "https://www.utexas.edu;https://www.mit.edu", "aff_unique_abbr": "UT Austin;MIT", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Austin;Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "0983cea40c", "title": "Approximate inference using planar graph decomposition", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/975e6107778ce7a40b9878bfb96a16a7-Abstract.html", "author": "Amir Globerson; Tommi S. Jaakkola", "abstract": "A number of exact and approximate methods are available for inference calculations in graphical models. Many recent approximate methods for graphs with cycles are based on tractable algorithms for tree structured graphs. Here we base the approximation on a different tractable model, planar graphs with binary variables and pure interaction potentials (no external field). The partition function for such models can be calculated exactly using an algorithm introduced by Fisher and Kasteleyn in the 1960s. We show how such tractable planar models can be used in a decomposition to derive upper bounds on the partition function of non-planar models. The resulting algorithm also allows for the estimation of marginals. We compare our planar decomposition to the tree decomposition method of Wainwright et. al., showing that it results in a much tighter bound on the partition function, improved pairwise marginals, and comparable singleton marginals. Graphical models are a powerful tool for modeling multivariate distributions, and have been successfully applied in various fields such as coding theory and image processing. Applications of graphical models typically involve calculating two types of quantities, namely marginal distributions, and MAP assignments. The evaluation of the model partition function is closely related to calculating marginals [12]. These three problems can rarely be solved exactly in polynomial time, and are provably computationally hard in the general case [1]. When the model conforms to a tree structure, however, all these problems can be solved in polynomial time. This has prompted extensive research into tree based methods. For example, the junction tree method [6] converts a graphical model into a tree by clustering nodes into cliques, such that the graph over cliques is a tree. The resulting maximal clique size (cf. tree width) may nevertheless be prohibitively large. Wainwright et. al. [9, 11] proposed an approximate method based on trees known as tree reweighting (TRW). The TRW approach decomposes the potential vector of a graphical model into a mixture over spanning trees of the model, and then uses convexity arguments to bound various quantities, such as the partition function. One key advantage of this approach is that it provides bounds on partition function value, a property which is not shared by approximations based on Bethe free energies [13]. In this paper we focus on a different class of tractable models: planar graphs. A graph is called planar if it can be drawn in the plane without crossing edges. Works in the 1960s by physicists Fisher [5] and Kasteleyn [7], among others, have shown that the partition function for planar graphs may be calculated in polynomial time. This, however, is true under two key restrictions. One is that the variables xi are binary. The other is that the interaction potential depends only on xi xj (where xi {1}), and not on their individual values (i.e., the zero external field case). Here we show how the above method can be used to obtain upper bounds on the partition function for non-planar graphs. As in TRW, we decompose the potential of a non-planar graph into a sum", "bibtex": "@inproceedings{NIPS2006_975e6107,\n author = {Globerson, Amir and Jaakkola, Tommi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Approximate inference using planar graph decomposition},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/975e6107778ce7a40b9878bfb96a16a7-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/975e6107778ce7a40b9878bfb96a16a7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/975e6107778ce7a40b9878bfb96a16a7-Metadata.json", "review": "", "metareview": "", "pdf_size": 286989, "gs_citation": 65, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11078164535915557412&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "b0844bc16f", "title": "Attentional Processing on a Spike-Based VLSI Neural Network", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/4aec1b3435c52abbdf8334ea0e7141e0-Abstract.html", "author": "Yingxue Wang; Rodney J. Douglas; Shih-Chii Liu", "abstract": "The neurons of the neocortex communicate by asynchronous events called action potentials (or 'spikes'). However, for simplicity of simulation, most models of processing by cortical neural networks have assumed that the activations of their neurons can be approximated by event rates rather than taking account of individual spikes. The obstacle to exploring the more detailed spike processing of these networks has been reduced considerably in recent years by the development of hybrid analog-digital Very-Large Scale Integrated (hVLSI) neural networks composed of spiking neurons that are able to operate in real-time. In this paper we describe such a hVLSI neural network that performs an interesting task of selective attentional processing that was previously described for a simulated 'pointer-map' rate model by Hahnloser and colleagues. We found that most of the computational features of their rate model can be reproduced in the spiking implementation; but, that spike-based processing requires a modification of the original network architecture in order to memorize a previously attended target.", "bibtex": "@inproceedings{NIPS2006_4aec1b34,\n author = {Wang, Yingxue and Douglas, Rodney and Liu, Shih-Chii},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Attentional Processing on a Spike-Based VLSI Neural Network},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/4aec1b3435c52abbdf8334ea0e7141e0-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/4aec1b3435c52abbdf8334ea0e7141e0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/4aec1b3435c52abbdf8334ea0e7141e0-Metadata.json", "review": "", "metareview": "", "pdf_size": 508298, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4808917743584926649&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Institute of Neuroinformatics, University of Zurich and ETH Zurich; Institute of Neuroinformatics, University of Zurich and ETH Zurich; Institute of Neuroinformatics, University of Zurich and ETH Zurich", "aff_domain": "ini.phys.ethz.ch;ini.phys.ethz.ch;ini.phys.ethz.ch", "email": "ini.phys.ethz.ch;ini.phys.ethz.ch;ini.phys.ethz.ch", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Zurich", "aff_unique_dep": "Institute of Neuroinformatics", "aff_unique_url": "https://www.neuro.ethz.ch/", "aff_unique_abbr": "UZH", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Switzerland" }, { "id": "b6eb4fd47a", "title": "Attribute-efficient learning of decision lists and linear threshold functions under unconcentrated distributions", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/a724b9124acc7b5058ed75a31a9c2919-Abstract.html", "author": "Philip M. Long; Rocco Servedio", "abstract": "We consider the well-studied problem of learning decision lists using few examples when many irrelevant features are present. We show that smooth boosting algorithms such as MadaBoost can efficiently learn decision lists of length k over n boolean variables using poly(k , log n) many examples provided that the marginal distribution over the relevant variables is \"not too concentrated\" in an L 2 -norm sense. Using a recent result of Hastad, we extend the analysis to obtain a similar (though quantitatively weaker) result for learning arbitrary linear threshold functions with k nonzero coefficients. Experimental results indicate that the use of a smooth boosting algorithm, which plays a crucial role in our analysis, has an impact on the actual performance of the algorithm.", "bibtex": "@inproceedings{NIPS2006_a724b912,\n author = {Long, Philip and Servedio, Rocco},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Attribute-efficient learning of decision lists and linear threshold functions under unconcentrated distributions},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/a724b9124acc7b5058ed75a31a9c2919-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/a724b9124acc7b5058ed75a31a9c2919-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/a724b9124acc7b5058ed75a31a9c2919-Metadata.json", "review": "", "metareview": "", "pdf_size": 115948, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14558937228629272238&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 24, "aff": "Google Mountain View, CA; Department of Computer Science Columbia University New York, NY", "aff_domain": "google.com;cs.columbia.edu", "email": "google.com;cs.columbia.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Google;Columbia University", "aff_unique_dep": "Google;Department of Computer Science", "aff_unique_url": "https://www.google.com;https://www.columbia.edu", "aff_unique_abbr": "Google;Columbia", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Mountain View;New York", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "3bfc560e2e", "title": "Automated Hierarchy Discovery for Planning in Partially Observable Environments", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/62da8c91ce7b10846231921795d6059e-Abstract.html", "author": "Laurent Charlin; Pascal Poupart; Romy Shioda", "abstract": "Planning in partially observable domains is a notoriously dif\ufb01cult problem. How- ever, in many real-world scenarios, planning can be simpli\ufb01ed by decomposing the task into a hierarchy of smaller planning problems. Several approaches have been proposed to optimize a policy that decomposes according to a hierarchy speci\ufb01ed a priori. In this paper, we investigate the problem of automatically discovering the hierarchy. More precisely, we frame the optimization of a hierarchical policy as a non-convex optimization problem that can be solved with general non-linear solvers, a mixed-integer non-linear approximation or a form of bounded hierar- chical policy iteration. By encoding the hierarchical structure as variables of the optimization problem, we can automatically discover a hierarchy. Our method is \ufb02exible enough to allow any parts of the hierarchy to be speci\ufb01ed based on prior knowledge while letting the optimization discover the unknown parts. It can also discover hierarchical policies, including recursive policies, that are more compact (potentially in\ufb01nitely fewer parameters) and often easier to understand given the decomposition induced by the hierarchy.", "bibtex": "@inproceedings{NIPS2006_62da8c91,\n author = {Charlin, Laurent and Poupart, Pascal and Shioda, Romy},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Automated Hierarchy Discovery for Planning in Partially Observable Environments},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/62da8c91ce7b10846231921795d6059e-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/62da8c91ce7b10846231921795d6059e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/62da8c91ce7b10846231921795d6059e-Metadata.json", "review": "", "metareview": "", "pdf_size": 84135, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16404812437105284074&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 19, "aff": "David R. Cheriton School of Computer Science, Faculty of Mathematics, University of Waterloo, Waterloo, Ontario; David R. Cheriton School of Computer Science, Faculty of Mathematics, University of Waterloo, Waterloo, Ontario; Dept of Combinatorics and Optimization, Faculty of Mathematics, University of Waterloo, Waterloo, Ontario", "aff_domain": "cs.uwaterloo.ca;cs.uwaterloo.ca;math.uwaterloo.ca", "email": "cs.uwaterloo.ca;cs.uwaterloo.ca;math.uwaterloo.ca", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Waterloo", "aff_unique_dep": "David R. Cheriton School of Computer Science", "aff_unique_url": "https://uwaterloo.ca", "aff_unique_abbr": "UWaterloo", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Waterloo", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Canada" }, { "id": "df406b1614", "title": "Balanced Graph Matching", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/d1c373ab1570cfb9a7dbb53c186b37a2-Abstract.html", "author": "Timothee Cour; Praveen Srinivasan; Jianbo Shi", "abstract": "Graph matching is a fundamental problem in Computer Vision and Machine Learning. We present two contributions. First, we give a new spectral relaxation technique for approximate solutions to matching problems, that naturally incorporates one-to-one or one-to-many constraints within the relaxation scheme. The second is a normalization procedure for existing graph matching scoring functions that can dramatically improve the matching accuracy. It is based on a reinterpretation of the graph matching compatibility matrix as a bipartite graph on edges for which we seek a bistochastic normalization. We evaluate our two contributions on a comprehensive test set of random graph matching problems, as well as on image correspondence problem. Our normalization procedure can be used to improve the performance of many existing graph matching algorithms, including spectral matching, graduated assignment and semidefinite programming.", "bibtex": "@inproceedings{NIPS2006_d1c373ab,\n author = {Cour, Timothee and Srinivasan, Praveen and Shi, Jianbo},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Balanced Graph Matching},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/d1c373ab1570cfb9a7dbb53c186b37a2-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/d1c373ab1570cfb9a7dbb53c186b37a2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/d1c373ab1570cfb9a7dbb53c186b37a2-Metadata.json", "review": "", "metareview": "", "pdf_size": 228816, "gs_citation": 613, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2529439063411197984&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 21, "aff": "Department of Computer and Information Science, University of Pennsylvania; Department of Computer and Information Science, University of Pennsylvania; Department of Computer and Information Science, University of Pennsylvania", "aff_domain": "seas.upenn.edu;seas.upenn.edu;seas.upenn.edu", "email": "seas.upenn.edu;seas.upenn.edu;seas.upenn.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Pennsylvania", "aff_unique_dep": "Department of Computer and Information Science", "aff_unique_url": "https://www.upenn.edu", "aff_unique_abbr": "UPenn", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "3a076a2dd3", "title": "Bayesian Detection of Infrequent Differences in Sets of Time Series with Shared Structure", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/248024541dbda1d3fd75fe49d1a4df4d-Abstract.html", "author": "Jennifer Listgarten; Radford M. Neal; Sam T. Roweis; Rachel Puckrin; Sean Cutler", "abstract": "We present a hierarchical Bayesian model for sets of related, but different, classes of time series data. Our model performs alignment simultaneously across all classes, while detecting and characterizing class-specific differences. During inference the model produces, for each class, a distribution over a canonical representation of the class. These class-specific canonical representations are automatically aligned to one another -- preserving common sub-structures, and highlighting differences. We apply our model to compare and contrast solenoid valve current data, and also, liquid-chromatography-ultraviolet-diode array data from a study of the plant Arabidopsis thaliana.", "bibtex": "@inproceedings{NIPS2006_24802454,\n author = {Listgarten, Jennifer and Neal, Radford and Roweis, Sam and Puckrin, Rachel and Cutler, Sean},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Bayesian Detection of Infrequent Differences in Sets of Time Series with Shared Structure},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/248024541dbda1d3fd75fe49d1a4df4d-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/248024541dbda1d3fd75fe49d1a4df4d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/248024541dbda1d3fd75fe49d1a4df4d-Metadata.json", "review": "", "metareview": "", "pdf_size": 250840, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13256773400300959084&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Computer Science; Department of Computer Science; Department of Computer Science; Department of Botany; Department of Botany", "aff_domain": "cs.toronto.edu;cs.toronto.edu;cs.toronto.edu;hotmail.com;botany.utoronto.ca", "email": "cs.toronto.edu;cs.toronto.edu;cs.toronto.edu;hotmail.com;botany.utoronto.ca", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Unknown Institution;", "aff_unique_dep": "Department of Computer Science;", "aff_unique_url": ";", "aff_unique_abbr": ";", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "", "aff_country_unique": "" }, { "id": "649ba292fd", "title": "Bayesian Ensemble Learning", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/1706f191d760c78dfcec5012e43b6714-Abstract.html", "author": "Hugh A. Chipman; Edward I. George; Robert E. Mcculloch", "abstract": "We develop a Bayesian \"sum-of-trees\" model, named BART, where each tree is constrained by a prior to be a weak learner. Fitting and inference are accomplished via an iterative backfitting MCMC algorithm. This model is motivated by ensemble methods in general, and boosting algorithms in particular. Like boosting, each weak learner (i.e., each weak tree) contributes a small amount to the overall model. However, our procedure is defined by a statistical model: a prior and a likelihood, while boosting is defined by an algorithm. This model-based approach enables a full and accurate assessment of uncertainty in model predictions, while remaining highly competitive in terms of predictive accuracy.", "bibtex": "@inproceedings{NIPS2006_1706f191,\n author = {Chipman, Hugh and George, Edward and Mcculloch, Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Bayesian Ensemble Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/1706f191d760c78dfcec5012e43b6714-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/1706f191d760c78dfcec5012e43b6714-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/1706f191d760c78dfcec5012e43b6714-Metadata.json", "review": "", "metareview": "", "pdf_size": 113962, "gs_citation": 169, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3253511192610333566&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "bd21ca95f8", "title": "Bayesian Image Super-resolution, Continued", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/10ce03a1ed01077e3e289f3e53c72813-Abstract.html", "author": "Lyndsey C. Pickup; David P. Capel; Stephen J. Roberts; Andrew Zisserman", "abstract": "This paper develops a multi-frame image super-resolution approach from a Bayesian view-point by marginalizing over the unknown registration parameters relating the set of input low-resolution views. In Tipping and Bishop\u2019s Bayesian image super-resolution approach [16], the marginalization was over the super- resolution image, necessitating the use of an unfavorable image prior. By inte- grating over the registration parameters rather than the high-resolution image, our method allows for more realistic prior distributions, and also reduces the dimen- sion of the integral considerably, removing the main computational bottleneck of the other algorithm. In addition to the motion model used by Tipping and Bishop, illumination components are introduced into the generative model, allowing us to handle changes in lighting as well as motion. We show results on real and synthetic datasets to illustrate the ef\ufb01cacy of this approach.", "bibtex": "@inproceedings{NIPS2006_10ce03a1,\n author = {Pickup, Lyndsey and Capel, David and Roberts, Stephen J and Zisserman, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Bayesian Image Super-resolution, Continued},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/10ce03a1ed01077e3e289f3e53c72813-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/10ce03a1ed01077e3e289f3e53c72813-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/10ce03a1ed01077e3e289f3e53c72813-Metadata.json", "review": "", "metareview": "", "pdf_size": 99509, "gs_citation": 108, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13331826128923785240&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 18, "aff": "Information Engineering Building, Dept. of Eng. Science, Parks Road, Oxford, OX1 3PJ, UK; 2D3; Information Engineering Building, Dept. of Eng. Science, Parks Road, Oxford, OX1 3PJ, UK; Information Engineering Building, Dept. of Eng. Science, Parks Road, Oxford, OX1 3PJ, UK", "aff_domain": "robots.ox.ac.uk;2d3.com;robots.ox.ac.uk;robots.ox.ac.uk", "email": "robots.ox.ac.uk;2d3.com;robots.ox.ac.uk;robots.ox.ac.uk", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Oxford;", "aff_unique_dep": "Department of Engineering Science;", "aff_unique_url": "https://www.ox.ac.uk;", "aff_unique_abbr": "Oxford;", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Oxford;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom;" }, { "id": "8773c9823c", "title": "Bayesian Model Scoring in Markov Random Fields", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/32b3ee0272954b956a7d1f86f76afa21-Abstract.html", "author": "Sridevi Parise; Max Welling", "abstract": "Scoring structures of undirected graphical models by means of evaluating the marginal likelihood is very hard. The main reason is the presence of the parti- tion function which is intractable to evaluate, let alone integrate over. We propose to approximate the marginal likelihood by employing two levels of approximation: we assume normality of the posterior (the Laplace approximation) and approxi- mate all remaining intractable quantities using belief propagation and the linear response approximation. This results in a fast procedure for model scoring. Em- pirically, we \ufb01nd that our procedure has about two orders of magnitude better accuracy than standard BIC methods for small datasets, but deteriorates when the size of the dataset grows.", "bibtex": "@inproceedings{NIPS2006_32b3ee02,\n author = {Parise, Sridevi and Welling, Max},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Bayesian Model Scoring in Markov Random Fields},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/32b3ee0272954b956a7d1f86f76afa21-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/32b3ee0272954b956a7d1f86f76afa21-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/32b3ee0272954b956a7d1f86f76afa21-Metadata.json", "review": "", "metareview": "", "pdf_size": 232310, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17868241606897161651&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Bren School of Information and Computer Science, UC Irvine, Irvine, CA 92697-3425; Bren School of Information and Computer Science, UC Irvine, Irvine, CA 92697-3425", "aff_domain": "ics.uci.edu;ics.uci.edu", "email": "ics.uci.edu;ics.uci.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Irvine", "aff_unique_dep": "Bren School of Information and Computer Science", "aff_unique_url": "https://www.uci.edu", "aff_unique_abbr": "UCI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Irvine", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "ec5621b2f0", "title": "Bayesian Policy Gradient Algorithms", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/58aaee7ae94b52697ad3b9275d46ec7f-Abstract.html", "author": "Mohammad Ghavamzadeh; Yaakov Engel", "abstract": "Policy gradient methods are reinforcement learning algorithms that adapt a param- eterized policy by following a performance gradient estimate. Conventional pol- icy gradient methods use Monte-Carlo techniques to estimate this gradient. Since Monte Carlo methods tend to have high variance, a large number of samples is required, resulting in slow convergence. In this paper, we propose a Bayesian framework that models the policy gradient as a Gaussian process. This reduces the number of samples needed to obtain accurate gradient estimates. Moreover, estimates of the natural gradient as well as a measure of the uncertainty in the gradient estimates are provided at little extra cost.", "bibtex": "@inproceedings{NIPS2006_58aaee7a,\n author = {Ghavamzadeh, Mohammad and Engel, Yaakov},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Bayesian Policy Gradient Algorithms},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/58aaee7ae94b52697ad3b9275d46ec7f-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/58aaee7ae94b52697ad3b9275d46ec7f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/58aaee7ae94b52697ad3b9275d46ec7f-Metadata.json", "review": "", "metareview": "", "pdf_size": 180652, "gs_citation": 120, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=691039453892660210&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "cc78724a54", "title": "Blind Motion Deblurring Using Image Statistics", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/2bb0502c80b7432eee4c5847a5fd077b-Abstract.html", "author": "Anat Levin", "abstract": "We address the problem of blind motion deblurring from a single image, caused by a few moving objects. In such situations only part of the image may be blurred, and the scene consists of layers blurred in different degrees. Most of of existing blind deconvolution research concentrates at recovering a single blurring kernel for the entire image. However, in the case of different motions, the blur cannot be modeled with a single kernel, and trying to deconvolve the entire image with the same kernel will cause serious artifacts. Thus, the task of deblurring needs to involve segmentation of the image into regions with different blurs. Our approach relies on the observation that the statistics of derivative filters in images are significantly changed by blur. Assuming the blur results from a constant velocity motion, we can limit the search to one dimensional box filter blurs. This enables us to model the expected derivatives distributions as a function of the width of the blur kernel. Those distributions are surprisingly powerful in discriminating regions with different blurs. The approach produces convincing deconvolution results on real world images with rich texture.", "bibtex": "@inproceedings{NIPS2006_2bb0502c,\n author = {Levin, Anat},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Blind Motion Deblurring Using Image Statistics},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/2bb0502c80b7432eee4c5847a5fd077b-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/2bb0502c80b7432eee4c5847a5fd077b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/2bb0502c80b7432eee4c5847a5fd077b-Metadata.json", "review": "", "metareview": "", "pdf_size": 341491, "gs_citation": 615, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7001565659202915109&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "School of Computer Science and Engineering, The Hebrew University of Jerusalem + MIT CSAIL", "aff_domain": "csail.mit.edu", "email": "csail.mit.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0+1", "aff_unique_norm": "Hebrew University of Jerusalem;Massachusetts Institute of Technology", "aff_unique_dep": "School of Computer Science and Engineering;Computer Science and Artificial Intelligence Laboratory", "aff_unique_url": "https://www.huji.ac.il;https://www.csail.mit.edu", "aff_unique_abbr": "HUJI;MIT CSAIL", "aff_campus_unique_index": "0+1", "aff_campus_unique": "Jerusalem;Cambridge", "aff_country_unique_index": "0+1", "aff_country_unique": "Israel;United States" }, { "id": "5f6a133c05", "title": "Blind source separation for over-determined delayed mixtures", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/0cdf61037d7053ca59347ab230818335-Abstract.html", "author": "Lars Omlor; Martin Giese", "abstract": "Blind source separation, i.e. the extraction of unknown sources from a set of given signals, is relevant for many applications. A special case of this problem is dimension reduction, where the goal is to approximate a given set of signals by superpositions of a minimal number of sources. Since in this case the signals outnumber the sources the problem is over-determined. Most popular approaches for addressing this problem are based on purely linear mixing models. However, many applications like the modeling of acoustic signals, EMG signals, or movement trajectories, require temporal shift-invariance of the extracted components. This case has only rarely been treated in the computational literature, and specifically for the case of dimension reduction almost no algorithms have been proposed. We present a new algorithm for the solution of this problem, which is based on a timefrequency transformation (Wigner-Ville distribution) of the generative model. We show that this algorithm outperforms classical source separation algorithms for linear mixtures, and also a related method for mixtures with delays. In addition, applying the new algorithm to trajectories of human gaits, we demonstrate that it is suitable for the extraction of spatio-temporal components that are easier to interpret than components extracted with other classical algorithms.", "bibtex": "@inproceedings{NIPS2006_0cdf6103,\n author = {Omlor, Lars and Giese, Martin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Blind source separation for over-determined delayed mixtures},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/0cdf61037d7053ca59347ab230818335-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/0cdf61037d7053ca59347ab230818335-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/0cdf61037d7053ca59347ab230818335-Metadata.json", "review": "", "metareview": "", "pdf_size": 220349, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4286536196733389292&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Laboratory for Action Representation and Learning, Department of Cognitive Neurology, Hertie Institute for Clinical Brain Research, University of T\u00fcbingen, Germany; Laboratory for Action Representation and Learning, Department of Cognitive Neurology, Hertie Institute for Clinical Brain Research, University of T\u00fcbingen, Germany", "aff_domain": "; ", "email": "; ", "github": "", "project": "http://www.uni-tuebingen.de/uni/knv/arl/index.html", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of T\u00fcbingen", "aff_unique_dep": "Department of Cognitive Neurology", "aff_unique_url": "https://www.uni-tuebingen.de", "aff_unique_abbr": "Uni T\u00fcbingen", "aff_campus_unique_index": "0;0", "aff_campus_unique": "T\u00fcbingen", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "fc3ac7c169", "title": "Boosting Structured Prediction for Imitation Learning", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/fdbd31f2027f20378b1a80125fc862db-Abstract.html", "author": "J. A. Bagnell; Joel Chestnutt; David M. Bradley; Nathan D. Ratliff", "abstract": "The Maximum Margin Planning (MMP) (Ratliff et al., 2006) algorithm solves imitation learning problems by learning linear mappings from features to cost functions in a planning domain. The learned policy is the result of minimum-cost planning using these cost functions. These mappings are chosen so that example policies (or trajectories) given by a teacher appear to be lower cost (with a lossscaled margin) than any other policy for a given planning domain. We provide a novel approach, M M P B O O S T , based on the functional gradient descent view of boosting (Mason et al., 1999; Friedman, 1999a) that extends MMP by \"boosting\" in new features. This approach uses simple binary classification or regression to improve performance of MMP imitation learning, and naturally extends to the class of structured maximum margin prediction problems. (Taskar et al., 2005) Our technique is applied to navigation and planning problems for outdoor mobile robots and robotic legged locomotion.", "bibtex": "@inproceedings{NIPS2006_fdbd31f2,\n author = {Bagnell, J. and Chestnutt, Joel and Bradley, David and Ratliff, Nathan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Boosting Structured Prediction for Imitation Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/fdbd31f2027f20378b1a80125fc862db-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/fdbd31f2027f20378b1a80125fc862db-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/fdbd31f2027f20378b1a80125fc862db-Metadata.json", "review": "", "metareview": "", "pdf_size": 239198, "gs_citation": 218, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4513265936993669035&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Robotics Institute, Carnegie Mellon University; Robotics Institute, Carnegie Mellon University; Robotics Institute, Carnegie Mellon University; Robotics Institute, Carnegie Mellon University", "aff_domain": "ri.cmu.edu;ri.cmu.edu;ri.cmu.edu;ri.cmu.edu", "email": "ri.cmu.edu;ri.cmu.edu;ri.cmu.edu;ri.cmu.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Robotics Institute", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "cc1e772733", "title": "Branch and Bound for Semi-Supervised Support Vector Machines", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/8b8388180314a337c9aa3c5aa8e2f37a-Abstract.html", "author": "Olivier Chapelle; Vikas Sindhwani; S. S. Keerthi", "abstract": "Semi-supervised SVMs (S3 VM) attempt to learn low-density separators by maximizing the margin over labeled and unlabeled examples. The associated optimization problem is non-convex. To examine the full potential of S3 VMs modulo local minima problems in current implementations, we apply branch and bound techniques for obtaining exact, global ly optimal solutions. Empirical evidence suggests that the globally optimal solution can return excellent generalization performance in situations where other implementations fail completely. While our current implementation is only applicable to small datasets, we discuss variants that can potentially lead to practically useful algorithms.", "bibtex": "@inproceedings{NIPS2006_8b838818,\n author = {Chapelle, Olivier and Sindhwani, Vikas and Keerthi, S.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Branch and Bound for Semi-Supervised Support Vector Machines},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/8b8388180314a337c9aa3c5aa8e2f37a-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/8b8388180314a337c9aa3c5aa8e2f37a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/8b8388180314a337c9aa3c5aa8e2f37a-Metadata.json", "review": "", "metareview": "", "pdf_size": 186098, "gs_citation": 190, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3719641277890800729&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Max Planck Institute T\u00a8ubingen, Germany + Yahoo! Research; University of Chicago Chicago, USA; Yahoo! Research Santa Clara, USA", "aff_domain": "tuebingen.mpg.de;cs.uchicago.edu;yahoo-inc.com", "email": "tuebingen.mpg.de;cs.uchicago.edu;yahoo-inc.com", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1;2;3", "aff_unique_norm": "Max Planck Institute;Yahoo!;University of Chicago;Yahoo! Research", "aff_unique_dep": ";Yahoo! Research;;", "aff_unique_url": "https://www.mpg.de;https://research.yahoo.com;https://www.uchicago.edu;https://research.yahoo.com", "aff_unique_abbr": "MPI;Yahoo!;UChicago;Yahoo! Res.", "aff_campus_unique_index": "0;2;3", "aff_campus_unique": "T\u00fcbingen;;Chicago;Santa Clara", "aff_country_unique_index": "0+1;1;1", "aff_country_unique": "Germany;United States" }, { "id": "97682d7800", "title": "Causal inference in sensorimotor integration", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/92a08bf918f44ccd961477be30023da1-Abstract.html", "author": "Konrad P. K\u00f6rding; Joshua B. Tenenbaum", "abstract": "Many recent studies analyze how data from different modalities can be combined. Often this is modeled as a system that optimally combines several sources of information about the same variable. However, it has long been realized that this information combining depends on the interpretation of the data. Two cues that are perceived by different modalities can have different causal relationships: (1) They can both have the same cause, in this case we should fully integrate both cues into a joint estimate. (2) They can have distinct causes, in which case information should be processed independently. In many cases we will not know if there is one joint cause or two independent causes that are responsible for the cues. Here we model this situation as a Bayesian estimation problem. We are thus able to explain some experiments on visual auditory cue combination as well as some experiments on visual proprioceptive cue integration. Our analysis shows that the problem solved by people when they combine cues to produce a movement is much more complicated than is usually assumed, because they need to infer the causal structure that is underlying their sensory experience.", "bibtex": "@inproceedings{NIPS2006_92a08bf9,\n author = {K\\\"{o}rding, Konrad and Tenenbaum, Joshua},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Causal inference in sensorimotor integration},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/92a08bf918f44ccd961477be30023da1-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/92a08bf918f44ccd961477be30023da1-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/92a08bf918f44ccd961477be30023da1-Metadata.json", "review": "", "metareview": "", "pdf_size": 152458, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12586928244027804442&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 21, "aff": "Department of Physiology and PM&R, Northwestern University, Chicago, IL 60611; Massachusetts Institute of Technology, Cambridge, MA 02139", "aff_domain": "koerding.com;mit.edu", "email": "koerding.com;mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Northwestern University;Massachusetts Institute of Technology", "aff_unique_dep": "Department of Physiology and PM&R;", "aff_unique_url": "https://www.northwestern.edu;https://www.mit.edu", "aff_unique_abbr": "NU;MIT", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Chicago;Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "ca54a6d098", "title": "Chained Boosting", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/9ff0525c64bf3d4c9957a1d4397f1b40-Abstract.html", "author": "Christian R. Shelton; Wesley Huie; Kin F. Kan", "abstract": "We describe a method to learn to make sequential stopping decisions, such as those made along a processing pipeline. We envision a scenario in which a series of decisions must be made as to whether to continue to process. Further processing costs time and resources, but may add value. Our goal is to create, based on his- toric data, a series of decision rules (one at each stage in the pipeline) that decide, based on information gathered up to that point, whether to continue processing the part. We demonstrate how our framework encompasses problems from manu- facturing to vision processing. We derive a quadratic (in the number of decisions) bound on testing performance and provide empirical results on object detection.", "bibtex": "@inproceedings{NIPS2006_9ff0525c,\n author = {Shelton, Christian and Huie, Wesley and Kan, Kin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Chained Boosting},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/9ff0525c64bf3d4c9957a1d4397f1b40-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/9ff0525c64bf3d4c9957a1d4397f1b40-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/9ff0525c64bf3d4c9957a1d4397f1b40-Metadata.json", "review": "", "metareview": "", "pdf_size": 82339, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15746330658657381169&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "University of California, Riverside CA 92521; University of California, Riverside CA 92521; University of California, Riverside CA 92521", "aff_domain": "cs.ucr.edu;cs.ucr.edu;cs.ucr.edu", "email": "cs.ucr.edu;cs.ucr.edu;cs.ucr.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Riverside", "aff_unique_dep": "", "aff_unique_url": "https://www.ucr.edu", "aff_unique_abbr": "UCR", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Riverside", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "56058f1f10", "title": "Clustering Under Prior Knowledge with Application to Image Segmentation", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/c02f9de3c2f3040751818aacc7f60b74-Abstract.html", "author": "Dong S. Cheng; Vittorio Murino; M\u00e1rio Figueiredo", "abstract": "This paper proposes a new approach to model-based clustering under prior knowl- edge. The proposed formulation can be interpreted from two different angles: as penalized logistic regression, where the class labels are only indirectly observed (via the probability density of each class); as \ufb01nite mixture learning under a group- ing prior. To estimate the parameters of the proposed model, we derive a (gener- alized) EM algorithm with a closed-form E-step, in contrast with other recent approaches to semi-supervised probabilistic clustering which require Gibbs sam- pling or suboptimal shortcuts. We show that our approach is ideally suited for image segmentation: it avoids the combinatorial nature Markov random \ufb01eld pri- ors, and opens the door to more sophisticated spatial priors (e.g., wavelet-based) in a simple and computationally ef\ufb01cient way. Finally, we extend our formulation to work in unsupervised, semi-supervised, or discriminative modes.", "bibtex": "@inproceedings{NIPS2006_c02f9de3,\n author = {Cheng, Dong and Murino, Vittorio and Figueiredo, M\\'{a}rio},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Clustering Under Prior Knowledge with Application to Image Segmentation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/c02f9de3c2f3040751818aacc7f60b74-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/c02f9de3c2f3040751818aacc7f60b74-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/c02f9de3c2f3040751818aacc7f60b74-Metadata.json", "review": "", "metareview": "", "pdf_size": 253101, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7330842428385905361&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Instituto de Telecomunica\u00e7\u00f5es, Instituto Superior T\u00e9cnico, Technical University of Lisbon, Portugal; Vision, Image Processing, and Sound Laboratory, Dipartimento di Informatica, University of Verona, Italy; Vision, Image Processing, and Sound Laboratory, Dipartimento di Informatica, University of Verona, Italy", "aff_domain": "lx.it.pt;sci.univr.it;univr.it", "email": "lx.it.pt;sci.univr.it;univr.it", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "Technical University of Lisbon;University of Verona", "aff_unique_dep": "Instituto de Telecomunica\u00e7\u00f5es, Instituto Superior T\u00e9cnico;Dipartimento di Informatica", "aff_unique_url": "https://www.tecnico.ulisboa.pt;https://www.univr.it", "aff_unique_abbr": "UTL;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1", "aff_country_unique": "Portugal;Italy" }, { "id": "b71481b595", "title": "Clustering appearance and shape by learning jigsaws", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/d9b1655c134b831076d6c45620a78c33-Abstract.html", "author": "Anitha Kannan; John Winn; Carsten Rother", "abstract": "Patch-based appearance models are used in a wide range of computer vision ap- plications. To learn such models it has previously been necessary to specify a suitable set of patch sizes and shapes by hand. In the jigsaw model presented here, the shape, size and appearance of patches are learned automatically from the repeated structures in a set of training images. By learning such irregularly shaped \u2018jigsaw pieces\u2019, we are able to discover both the shape and the appearance of object parts without supervision. When applied to face images, for example, the learned jigsaw pieces are surprisingly strongly associated with face parts of different shapes and scales such as eyes, noses, eyebrows and cheeks, to name a few. We conclude that learning the shape of the patch not only improves the accuracy of appearance-based part detection but also allows for shape-based part detection. This enables parts of similar appearance but different shapes to be dis- tinguished; for example, while foreheads and cheeks are both skin colored, they have markedly different shapes.", "bibtex": "@inproceedings{NIPS2006_d9b1655c,\n author = {Kannan, Anitha and Winn, John and Rother, Carsten},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Clustering appearance and shape by learning jigsaws},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/d9b1655c134b831076d6c45620a78c33-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/d9b1655c134b831076d6c45620a78c33-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/d9b1655c134b831076d6c45620a78c33-Metadata.json", "review": "", "metareview": "", "pdf_size": 338470, "gs_citation": 60, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6361407100433189316&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 20, "aff": "Microsoft Research Cambridge; Microsoft Research Cambridge; Microsoft Research Cambridge", "aff_domain": "microsoft.com;microsoft.com;microsoft.com", "email": "microsoft.com;microsoft.com;microsoft.com", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Microsoft", "aff_unique_dep": "Microsoft Research", "aff_unique_url": "https://www.microsoft.com/en-us/research/group/microsoft-research-cambridge", "aff_unique_abbr": "MSR Cambridge", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "id": "0735e15875", "title": "Combining causal and similarity-based reasoning", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/1aa057313c28fa4a40c5bc084b11d276-Abstract.html", "author": "Charles Kemp; Patrick Shafto; Allison Berke; Joshua B. Tenenbaum", "abstract": "Everyday inductive reasoning draws on many kinds of knowledge, including knowledge about relationships between properties and knowledge about relationships between objects. Previous accounts of inductive reasoning generally focus on just one kind of knowledge: models of causal reasoning often focus on relationships between properties, and models of similarity-based reasoning often focus on similarity relationships between objects. We present a Bayesian model of inductive reasoning that incorporates both kinds of knowledge, and show that it accounts well for human inferences about the properties of biological species.", "bibtex": "@inproceedings{NIPS2006_1aa05731,\n author = {Kemp, Charles and Shafto, Patrick and Berke, Allison and Tenenbaum, Joshua},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Combining causal and similarity-based reasoning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/1aa057313c28fa4a40c5bc084b11d276-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/1aa057313c28fa4a40c5bc084b11d276-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/1aa057313c28fa4a40c5bc084b11d276-Metadata.json", "review": "", "metareview": "", "pdf_size": 270179, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3779766641324896113&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 20, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "54d6d91415", "title": "Comparative Gene Prediction using Conditional Random Fields", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/65a31da7ede4dc9b03fb5bbf8f442ce9-Abstract.html", "author": "Jade P. Vinson; David Decaprio; Matthew D. Pearson; Stacey Luoma; James E. Galagan", "abstract": "Computational gene prediction using generative models has reached a plateau, with several groups converging to a generalized hidden Markov model (GHMM) incorporating phylogenetic models of nucleotide sequence evolution. Further improvements in gene calling accuracy are likely to come through new methods that incorporate additional data, both comparative and species specific. Conditional Random Fields (CRFs), which directly model the conditional probability P (y |x) of a vector of hidden states conditioned on a set of observations, provide a unified framework for combining probabilistic and non-probabilistic information and have been shown to outperform HMMs on sequence labeling tasks in natural language processing. We describe the use of CRFs for comparative gene prediction. We implement a model that encapsulates both a phylogenetic-GHMM (our baseline comparative model) and additional non-probabilistic features. We tested our model on the genome sequence of the fungal human pathogen Cryptococcus neoformans. Our baseline comparative model displays accuracy comparable to the the best available gene prediction tool for this organism. Moreover, we show that discriminative training and the incorporation of non-probabilistic evidence significantly improve performance. Our software implementation, Conrad, is freely available with an open source license at http://www.broad.mit.edu/annotation/conrad/.", "bibtex": "@inproceedings{NIPS2006_65a31da7,\n author = {Vinson, Jade and Decaprio, David and Pearson, Matthew and Luoma, Stacey and Galagan, James},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Comparative Gene Prediction using Conditional Random Fields},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/65a31da7ede4dc9b03fb5bbf8f442ce9-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/65a31da7ede4dc9b03fb5bbf8f442ce9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/65a31da7ede4dc9b03fb5bbf8f442ce9-Metadata.json", "review": "", "metareview": "", "pdf_size": 418857, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15369542814465479628&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "The Broad Institute of MIT and Harvard; The Broad Institute of MIT and Harvard; The Broad Institute of MIT and Harvard; The Broad Institute of MIT and Harvard; The Broad Institute of MIT and Harvard", "aff_domain": "rentec.com;broad.mit.edu;broad.mit.edu;broad.mit.edu;broad.mit.edu", "email": "rentec.com;broad.mit.edu;broad.mit.edu;broad.mit.edu;broad.mit.edu", "github": "", "project": "http://www.broad.mit.edu/annotation/conrad/", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Broad Institute", "aff_unique_dep": "", "aff_unique_url": "https://www.broadinstitute.org", "aff_unique_abbr": "Broad Institute", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "id": "bafe146b2a", "title": "Computation of Similarity Measures for Sequential Data using Generalized Suffix Trees", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/a36e841c5230a79c2102036d2e259848-Abstract.html", "author": "Konrad Rieck; Pavel Laskov; S\u00f6ren Sonnenburg", "abstract": "We propose a generic algorithm for computation of similarity measures for se- quential data. The algorithm uses generalized suf\ufb01x trees for ef\ufb01cient calculation of various kernel, distance and non-metric similarity functions. Its worst-case run-time is linear in the length of sequences and independent of the underlying embedding language, which can cover words, k-grams or all contained subse- quences. Experiments with network intrusion detection, DNA analysis and text processing applications demonstrate the utility of distances and similarity coef\ufb01- cients for sequences as alternatives to classical kernel functions.", "bibtex": "@inproceedings{NIPS2006_a36e841c,\n author = {Rieck, Konrad and Laskov, Pavel and Sonnenburg, S\\\"{o}ren},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Computation of Similarity Measures for Sequential Data using Generalized Suffix Trees},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/a36e841c5230a79c2102036d2e259848-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/a36e841c5230a79c2102036d2e259848-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/a36e841c5230a79c2102036d2e259848-Metadata.json", "review": "", "metareview": "", "pdf_size": 89288, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10806053468195078858&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Fraunhofer FIRST.IDA; Fraunhofer FIRST.IDA; Fraunhofer FIRST.IDA", "aff_domain": "first.fhg.de;first.fhg.de;first.fhg.de", "email": "first.fhg.de;first.fhg.de;first.fhg.de", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Fraunhofer Institute for Software and Systems Engineering", "aff_unique_dep": "FIRST.IDA", "aff_unique_url": "https://www.first.ida.fraunhofer.de/", "aff_unique_abbr": "Fraunhofer FIRST.IDA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany" }, { "id": "b162406600", "title": "Conditional Random Sampling: A Sketch-based Sampling Technique for Sparse Data", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/aa6b7ad9d68bf3443c35d23de844463b-Abstract.html", "author": "Ping Li; Kenneth W. Church; Trevor J. Hastie", "abstract": "We1 develop Conditional Random Sampling (CRS), a technique particularly suit- able for sparse data. In large-scale applications, the data are often highly sparse. CRS combines sketching and sampling in that it converts sketches of the data into conditional random samples online in the estimation stage, with the sample size determined retrospectively. This paper focuses on approximating pairwise l2 and l1 distances and comparing CRS with random projections. For boolean (0/1) data, CRS is provably better than random projections. We show using real-world data that CRS often outperforms random projections. This technique can be applied in learning, data mining, information retrieval, and database query optimizations.", "bibtex": "@inproceedings{NIPS2006_aa6b7ad9,\n author = {Li, Ping and Church, Kenneth and Hastie, Trevor},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Conditional Random Sampling: A Sketch-based Sampling Technique for Sparse Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/aa6b7ad9d68bf3443c35d23de844463b-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/aa6b7ad9d68bf3443c35d23de844463b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/aa6b7ad9d68bf3443c35d23de844463b-Metadata.json", "review": "", "metareview": "", "pdf_size": 993295, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6505027940361229641&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Statistics, Stanford University; Microsoft Research; Department of Statistics, Stanford University", "aff_domain": "stat.stanford.edu;microsoft.com;stanford.edu", "email": "stat.stanford.edu;microsoft.com;stanford.edu", "github": "", "project": "www.stanford.edu/~pingli98/publications/CRS_tr.pdf", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "Stanford University;Microsoft", "aff_unique_dep": "Department of Statistics;Microsoft Research", "aff_unique_url": "https://www.stanford.edu;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Stanford;MSR", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "41e799c7c8", "title": "Conditional mean field", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/f0031c7a91d74015a9addfbc589f3fe5-Abstract.html", "author": "Peter Carbonetto; Nando D. Freitas", "abstract": "Despite all the attention paid to variational methods based on sum-product message passing (loopy belief propagation, tree-reweighted sum-product), these methods are still bound to inference on a small set of probabilistic models. Mean field approximations have been applied to a broader set of problems, but the solutions are often poor. We propose a new class of conditionally-specified variational approximations based on mean field theory. While not usable on their own, combined with sequential Monte Carlo they produce guaranteed improvements over conventional mean field. Moreover, experiments on a well-studied problem-- inferring the stable configurations of the Ising spin glass--show that the solutions can be significantly better than those obtained using sum-product-based methods.", "bibtex": "@inproceedings{NIPS2006_f0031c7a,\n author = {Carbonetto, Peter and Freitas, Nando},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Conditional mean field},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/f0031c7a91d74015a9addfbc589f3fe5-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/f0031c7a91d74015a9addfbc589f3fe5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/f0031c7a91d74015a9addfbc589f3fe5-Metadata.json", "review": "", "metareview": "", "pdf_size": 224564, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15516772403604322545&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Department of Computer Science, University of British Columbia, Vancouver, BC, Canada V6T 1Z4; Department of Computer Science, University of British Columbia, Vancouver, BC, Canada V6T 1Z4", "aff_domain": "cs.ubc.ca;cs.ubc.ca", "email": "cs.ubc.ca;cs.ubc.ca", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of British Columbia", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ubc.ca", "aff_unique_abbr": "UBC", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Vancouver", "aff_country_unique_index": "0;0", "aff_country_unique": "Canada" }, { "id": "3c31c50a62", "title": "Context Effects in Category Learning: An Investigation of Four Probabilistic Models", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/9808ae38758804501ca3fc0697050e03-Abstract.html", "author": "Michael Mozer; Michael Shettel; Michael P. Holmes", "abstract": "Categorization is a central activity of human cognition. When an individual is asked to categorize a sequence of items, context effects arise: categorization of one item influences category decisions for subsequent items. Specifically, when experimental subjects are shown an exemplar of some target category, the category prototype appears to be pulled toward the exemplar, and the prototypes of all nontarget categories appear to be pushed away. These push and pull effects diminish with experience, and likely reflect long-term learning of category boundaries. We propose and evaluate four principled probabilistic (Bayesian) accounts of context effects in categorization. In all four accounts, the probability of an exemplar given a category is encoded as a Gaussian density in feature space, and categorization involves computing category posteriors given an exemplar. The models differ in how the uncertainty distribution of category prototypes is represented (localist or distributed), and how it is updated following each experience (using a maximum likelihood gradient ascent, or a Kalman filter update). We find that the distributed maximum-likelihood model can explain the key experimental phenomena. Further, the model predicts other phenomena that were confirmed via reanalysis of the experimental data.", "bibtex": "@inproceedings{NIPS2006_9808ae38,\n author = {Mozer, Michael C and Shettel, Michael and Holmes, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Context Effects in Category Learning: An Investigation of Four Probabilistic Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/9808ae38758804501ca3fc0697050e03-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/9808ae38758804501ca3fc0697050e03-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/9808ae38758804501ca3fc0697050e03-Metadata.json", "review": "", "metareview": "", "pdf_size": 490388, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15016086421241283070&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Dept. of Computer Science+Institute of Cognitive Science; Dept. of Psychology+Institute of Cognitive Science; Dept. of Computer Science", "aff_domain": "colorado.edu;colorado.edu;colorado.edu", "email": "colorado.edu;colorado.edu;colorado.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1;0+1;0", "aff_unique_norm": "University Affiliation Not Specified;Institute of Cognitive Science", "aff_unique_dep": "Department of Computer Science;Cognitive Science", "aff_unique_url": ";", "aff_unique_abbr": ";", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": ";", "aff_country_unique": "" }, { "id": "03e12bbe11", "title": "Context dependent amplification of both rate and event-correlation in a VLSI network of spiking neurons", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/769675d7c11f336ae6573e7e533570ec-Abstract.html", "author": "Elisabetta Chicca; Giacomo Indiveri; Rodney J. Douglas", "abstract": "Cooperative competitive networks are believed to play a central role in cortical processing and have been shown to exhibit a wide set of useful computational properties. We propose a VLSI implementation of a spiking cooperative competitive network and show how it can perform context dependent computation both in the mean firing rate domain and in spike timing correlation space. In the mean rate case the network amplifies the activity of neurons belonging to the selected stimulus and suppresses the activity of neurons receiving weaker stimuli. In the event correlation case, the recurrent network amplifies with a higher gain the correlation between neurons which receive highly correlated inputs while leaving the mean firing rate unaltered. We describe the network architecture and present experimental data demonstrating its context dependent computation capabilities.", "bibtex": "@inproceedings{NIPS2006_769675d7,\n author = {Chicca, Elisabetta and Indiveri, Giacomo and Douglas, Rodney},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Context dependent amplification of both rate and event-correlation in a VLSI network of spiking neurons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/769675d7c11f336ae6573e7e533570ec-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/769675d7c11f336ae6573e7e533570ec-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/769675d7c11f336ae6573e7e533570ec-Metadata.json", "review": "", "metareview": "", "pdf_size": 391938, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5906257312902349504&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Institute of Neuroinformatics University - ETH Zurich; Institute of Neuroinformatics University - ETH Zurich; Institute of Neuroinformatics University - ETH Zurich", "aff_domain": "ini.phys.ethz.ch;ini.phys.ethz.ch;ini.phys.ethz.ch", "email": "ini.phys.ethz.ch;ini.phys.ethz.ch;ini.phys.ethz.ch", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Institute of Neuroinformatics", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Switzerland" }, { "id": "bd20b20acf", "title": "Convergence of Laplacian Eigenmaps", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/5848ad959570f87753a60ce8be1567f3-Abstract.html", "author": "Mikhail Belkin; Partha Niyogi", "abstract": "Geometrically based methods for various tasks of machine learning have attracted considerable attention over the last few years. In this paper we show convergence of eigenvectors of the point cloud Laplacian to the eigen- functions of the Laplace-Beltrami operator on the underlying manifold, thus establishing the \ufb01rst convergence results for a spectral dimensionality re- duction algorithm in the manifold setting.", "bibtex": "@inproceedings{NIPS2006_5848ad95,\n author = {Belkin, Mikhail and Niyogi, Partha},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Convergence of Laplacian Eigenmaps},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/5848ad959570f87753a60ce8be1567f3-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/5848ad959570f87753a60ce8be1567f3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/5848ad959570f87753a60ce8be1567f3-Metadata.json", "review": "", "metareview": "", "pdf_size": 104552, "gs_citation": 404, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13342929784207250433&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 13, "aff": "Department of Computer Science, Ohio State University; Department of Computer Science, The University of Chicago", "aff_domain": "cse.ohio-state.edu;cs.uchicago.edu", "email": "cse.ohio-state.edu;cs.uchicago.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Ohio State University;University of Chicago", "aff_unique_dep": "Department of Computer Science;Department of Computer Science", "aff_unique_url": "https://www.osu.edu;https://www.uchicago.edu", "aff_unique_abbr": "OSU;UChicago", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "6342fbd61d", "title": "Convex Repeated Games and Fenchel Duality", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/1cfead9959b76ce44a847c850b61c587-Abstract.html", "author": "Shai Shalev-shwartz; Yoram Singer", "abstract": "We describe an algorithmic framework for an abstract game which we term a convex repeated game. We show that various online learning and boosting algorithms can be all derived as special cases of our algorithmic framework. This unified view explains the properties of existing algorithms and also enables us to derive several new interesting algorithms. Our algorithmic framework stems from a connection that we build between the notions of regret in game theory and weak duality in convex optimization.", "bibtex": "@inproceedings{NIPS2006_1cfead99,\n author = {Shalev-shwartz, Shai and Singer, Yoram},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Convex Repeated Games and Fenchel Duality},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/1cfead9959b76ce44a847c850b61c587-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/1cfead9959b76ce44a847c850b61c587-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/1cfead9959b76ce44a847c850b61c587-Metadata.json", "review": "", "metareview": "", "pdf_size": 171766, "gs_citation": 147, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11705421553506192197&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "80a2d11540", "title": "Correcting Sample Selection Bias by Unlabeled Data", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/a2186aa7c086b46ad4e8bf81e2a3a19b-Abstract.html", "author": "Jiayuan Huang; Arthur Gretton; Karsten Borgwardt; Bernhard Sch\u00f6lkopf; Alex J. Smola", "abstract": "We consider the scenario where training and test data are drawn from different distributions, commonly referred to as sample selection bias. Most algorithms for this setting try to \ufb01rst recover sampling distributions and then make appro- priate corrections based on the distribution estimate. We present a nonparametric method which directly produces resampling weights without distribution estima- tion. Our method works by matching distributions between training and testing sets in feature space. Experimental results demonstrate that our method works well in practice.", "bibtex": "@inproceedings{NIPS2006_a2186aa7,\n author = {Huang, Jiayuan and Gretton, Arthur and Borgwardt, Karsten and Sch\\\"{o}lkopf, Bernhard and Smola, Alex},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Correcting Sample Selection Bias by Unlabeled Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/a2186aa7c086b46ad4e8bf81e2a3a19b-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/a2186aa7c086b46ad4e8bf81e2a3a19b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/a2186aa7c086b46ad4e8bf81e2a3a19b-Metadata.json", "review": "", "metareview": "", "pdf_size": 124219, "gs_citation": 2324, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2008444391610768434&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 24, "aff": "School of Computer Science, Univ. of Waterloo, Canada; NICTA, ANU, Canberra, Australia; MPI for Biological Cybernetics, T\u00fcbingen, Germany; Ludwig-Maximilians-University, Munich, Germany; MPI for Biological Cybernetics, T\u00fcbingen, Germany", "aff_domain": "cs.uwaterloo.ca;anu.edu.au;tuebingen.mpg.de;dbs.ifi.lmu.de;tuebingen.mpg.de", "email": "cs.uwaterloo.ca;anu.edu.au;tuebingen.mpg.de;dbs.ifi.lmu.de;tuebingen.mpg.de", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2;3;2", "aff_unique_norm": "University of Waterloo;Australian National University;Max Planck Institute for Biological Cybernetics;Ludwig-Maximilians-University", "aff_unique_dep": "School of Computer Science;;Biological Cybernetics;", "aff_unique_url": "https://uwaterloo.ca;https://www.anu.edu.au;https://www.biological-cybernetics.de;https://www.lmu.de", "aff_unique_abbr": "UW;ANU;MPIBC;LMU", "aff_campus_unique_index": "1;2;3;2", "aff_campus_unique": ";Canberra;T\u00fcbingen;Munich", "aff_country_unique_index": "0;1;2;2;2", "aff_country_unique": "Canada;Australia;Germany" }, { "id": "88fbd2c8e9", "title": "Cross-Validation Optimization for Large Scale Hierarchical Classification Kernel Methods", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/b8af7d0fbf094517781e0382102d7b27-Abstract.html", "author": "Matthias Seeger", "abstract": "We propose a highly efficient framework for kernel multi-class models with a large and structured set of classes. Kernel parameters are learned automatically by maximizing the cross-validation log likelihood, and predictive probabilities are estimated. We demonstrate our approach on large scale text classification tasks with hierarchical class structure, achieving state-of-the-art results in an order of magnitude less time than previous work.", "bibtex": "@inproceedings{NIPS2006_b8af7d0f,\n author = {Seeger, Matthias},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Cross-Validation Optimization for Large Scale Hierarchical Classification Kernel Methods},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/b8af7d0fbf094517781e0382102d7b27-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/b8af7d0fbf094517781e0382102d7b27-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/b8af7d0fbf094517781e0382102d7b27-Metadata.json", "review": "", "metareview": "", "pdf_size": 99235, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3194675120540634857&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Max Planck Institute for Biological Cybernetics", "aff_domain": "tuebingen.mpg.de", "email": "tuebingen.mpg.de", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics", "aff_unique_dep": "Biological Cybernetics", "aff_unique_url": "https://www.biocybernetics.mpg.de", "aff_unique_abbr": "MPIBC", "aff_country_unique_index": "0", "aff_country_unique": "Germany" }, { "id": "db18bc3f47", "title": "Data Integration for Classification Problems Employing Gaussian Process Priors", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/6fec24eac8f18ed793f5eaad3dd7977c-Abstract.html", "author": "Mark Girolami; Mingjun Zhong", "abstract": "By adopting Gaussian process priors a fully Bayesian solution to the problem of integrating possibly heterogeneous data sets within a classification setting is presented. Approximate inference schemes employing Variational & Expectation Propagation based methods are developed and rigorously assessed. We demonstrate our approach to integrating multiple data sets on a large scale protein fold prediction problem where we infer the optimal combinations of covariance functions and achieve state-of-the-art performance without resorting to any ad hoc parameter tuning and classifier combination.", "bibtex": "@inproceedings{NIPS2006_6fec24ea,\n author = {Girolami, Mark and Zhong, Mingjun},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Data Integration for Classification Problems Employing Gaussian Process Priors},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/6fec24eac8f18ed793f5eaad3dd7977c-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/6fec24eac8f18ed793f5eaad3dd7977c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/6fec24eac8f18ed793f5eaad3dd7977c-Metadata.json", "review": "", "metareview": "", "pdf_size": 148835, "gs_citation": 64, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3450782482619256133&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Computing Science, University of Glasgow, Scotland, UK; IRISA, Campus de Beaulieu, F-35042 Rennes Cedex, France", "aff_domain": "dcs.gla.ac.uk;irisa.fr", "email": "dcs.gla.ac.uk;irisa.fr", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Glasgow;IRISA", "aff_unique_dep": "Department of Computing Science;", "aff_unique_url": "https://www.gla.ac.uk;https://www.irisa.fr", "aff_unique_abbr": "UofG;", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Glasgow;Beaulieu", "aff_country_unique_index": "0;1", "aff_country_unique": "United Kingdom;France" }, { "id": "00e437ce44", "title": "Denoising and Dimension Reduction in Feature Space", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/fedf67d6f3d7341c1c1e8a54774987d3-Abstract.html", "author": "Mikio L. Braun; Klaus-Robert M\u00fcller; Joachim M. Buhmann", "abstract": "We show that the relevant information about a classification problem in feature space is contained up to negligible error in a finite number of leading kernel PCA components if the kernel matches the underlying learning problem. Thus, kernels not only transform data sets such that good generalization can be achieved even by linear discriminant functions, but this transformation is also performed in a manner which makes economic use of feature space dimensions. In the best case, kernels provide efficient implicit representations of the data to perform classification. Practically, we propose an algorithm which enables us to recover the subspace and dimensionality relevant for good classification. Our algorithm can therefore be applied (1) to analyze the interplay of data set and kernel in a geometric fashion, (2) to help in model selection, and to (3) de-noise in feature space in order to yield better classification results.", "bibtex": "@inproceedings{NIPS2006_fedf67d6,\n author = {Braun, Mikio and M\\\"{u}ller, Klaus-Robert and Buhmann, Joachim},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Denoising and Dimension Reduction in Feature Space},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/fedf67d6f3d7341c1c1e8a54774987d3-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/fedf67d6f3d7341c1c1e8a54774987d3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/fedf67d6f3d7341c1c1e8a54774987d3-Metadata.json", "review": "", "metareview": "", "pdf_size": 241715, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12785743341146803734&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Fraunhofer Institute; Inst. of Computational Science, ETH Zurich; Technical University of Berlin + Fraunhofer Institute", "aff_domain": "\ufb01rst.fhg.de;inf.ethz.ch;cs.tu-berlin.de", "email": "\ufb01rst.fhg.de;inf.ethz.ch;cs.tu-berlin.de", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2+0", "aff_unique_norm": "Fraunhofer Institute;ETH Zurich;Technical University of Berlin", "aff_unique_dep": ";Institute of Computational Science;", "aff_unique_url": "https://www.fraunhofer.de;https://www.ethz.ch;https://www.tu-berlin.de", "aff_unique_abbr": "Fraunhofer;ETHZ;TU Berlin", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0+0", "aff_country_unique": "Germany;Switzerland" }, { "id": "48b1ae7542", "title": "Detecting Humans via Their Pose", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/1bd4b29a8e0afccd9923fe29cecb4b29-Abstract.html", "author": "Alessandro Bissacco; Ming-Hsuan Yang; Stefano Soatto", "abstract": "We consider the problem of detecting humans and classifying their pose from a single image. Specifically, our goal is to devise a statistical model that simultaneously answers two questions: 1) is there a human in the image? and, if so, 2) what is a low-dimensional representation of her pose? We investigate models that can be learned in an unsupervised manner on unlabeled images of human poses, and provide information that can be used to match the pose of a new image to the ones present in the training set. Starting from a set of descriptors recently proposed for human detection, we apply the Latent Dirichlet Allocation framework to model the statistics of these features, and use the resulting model to answer the above questions. We show how our model can efficiently describe the space of images of humans with their pose, by providing an effective representation of poses for tasks such as classification and matching, while performing remarkably well in human/non human decision problems, thus enabling its use for human detection. We validate the model with extensive quantitative experiments and comparisons with other approaches on human detection and pose matching.", "bibtex": "@inproceedings{NIPS2006_1bd4b29a,\n author = {Bissacco, Alessandro and Yang, Ming-Hsuan and Soatto, Stefano},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Detecting Humans via Their Pose},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/1bd4b29a8e0afccd9923fe29cecb4b29-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/1bd4b29a8e0afccd9923fe29cecb4b29-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/1bd4b29a8e0afccd9923fe29cecb4b29-Metadata.json", "review": "", "metareview": "", "pdf_size": 316764, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=78899484544333483&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Computer Science Department, University of California, Los Angeles; Honda Research Institute; Computer Science Department, University of California, Los Angeles", "aff_domain": "cs.ucla.edu;ieee.org;cs.ucla.edu", "email": "cs.ucla.edu;ieee.org;cs.ucla.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of California, Los Angeles;Honda Research Institute", "aff_unique_dep": "Computer Science Department;", "aff_unique_url": "https://www.ucla.edu;https://www.honda-ri.com", "aff_unique_abbr": "UCLA;HRI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Los Angeles;", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;Japan" }, { "id": "ea076907de", "title": "Differential Entropic Clustering of Multivariate Gaussians", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/8c9a14ffebb7677d033ffce847991293-Abstract.html", "author": "Jason V. Davis; Inderjit S. Dhillon", "abstract": "Gaussian data is pervasive and many learning algorithms (e.g., k -means) model their inputs as a single sample drawn from a multivariate Gaussian. However, in many real-life settings, each input object is best described by multiple samples drawn from a multivariate Gaussian. Such data can arise, for example, in a movie review database where each movie is rated by several users, or in time-series domains such as sensor networks. Here, each input can be naturally described by both a mean vector and covariance matrix which parameterize the Gaussian distribution. In this paper, we consider the problem of clustering such input objects, each represented as a multivariate Gaussian. We formulate the problem using an information theoretic approach and draw several interesting theoretical connections to Bregman divergences and also Bregman matrix divergences. We evaluate our method across several domains, including synthetic data, sensor network data, and a statistical debugging application.", "bibtex": "@inproceedings{NIPS2006_8c9a14ff,\n author = {Davis, Jason and Dhillon, Inderjit},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Differential Entropic Clustering of Multivariate Gaussians},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/8c9a14ffebb7677d033ffce847991293-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/8c9a14ffebb7677d033ffce847991293-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/8c9a14ffebb7677d033ffce847991293-Metadata.json", "review": "", "metareview": "", "pdf_size": 121963, "gs_citation": 162, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7534545265928453464&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "5967ef70f7", "title": "Dirichlet-Enhanced Spam Filtering based on Biased Samples", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/55d99a37b2e1badba7c8df4ccd506a88-Abstract.html", "author": "Steffen Bickel; Tobias Scheffer", "abstract": "We study a setting that is motivated by the problem of filtering spam messages for many users. Each user receives messages according to an individual, unknown distribution, reflected only in the unlabeled inbox. The spam filter for a user is required to perform well with respect to this distribution. Labeled messages from publicly available sources can be utilized, but they are governed by a distinct distribution, not adequately representing most inboxes. We devise a method that minimizes a loss function with respect to a user's personal distribution based on the available biased sample. A nonparametric hierarchical Bayesian model furthermore generalizes across users by learning a common prior which is imposed on new email accounts. Empirically, we observe that bias-corrected learning outperforms naive reliance on the assumption of independent and identically distributed data; Dirichlet-enhanced generalization across users outperforms a single (\"one size fits all\") filter as well as independent filters for all users.", "bibtex": "@inproceedings{NIPS2006_55d99a37,\n author = {Bickel, Steffen and Scheffer, Tobias},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Dirichlet-Enhanced Spam Filtering based on Biased Samples},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/55d99a37b2e1badba7c8df4ccd506a88-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/55d99a37b2e1badba7c8df4ccd506a88-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/55d99a37b2e1badba7c8df4ccd506a88-Metadata.json", "review": "", "metareview": "", "pdf_size": 134831, "gs_citation": 121, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3178005385530790733&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "0478002f66", "title": "Distributed Inference in Dynamical Systems", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/f40438b554cc0e3d96ee6064c5798f55-Abstract.html", "author": "Stanislav Funiak; Carlos Guestrin; Rahul Sukthankar; Mark A. Paskin", "abstract": "We present a robust distributed algorithm for approximate probabilistic inference in dynamical systems, such as sensor networks and teams of mobile robots. Using assumed density filtering, the network nodes maintain a tractable representation of the belief state in a distributed fashion. At each time step, the nodes coordinate to condition this distribution on the observations made throughout the network, and to advance this estimate to the next time step. In addition, we identify a significant challenge for probabilistic inference in dynamical systems: message losses or network partitions can cause nodes to have inconsistent beliefs about the current state of the system. We address this problem by developing distributed algorithms that guarantee that nodes will reach an informative consistent distribution when communication is re-established. We present a suite of experimental results on real-world sensor data for two real sensor network deployments: one with 25 cameras and another with 54 temperature sensors.", "bibtex": "@inproceedings{NIPS2006_f40438b5,\n author = {Funiak, Stanislav and Guestrin, Carlos and Sukthankar, Rahul and Paskin, Mark},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Distributed Inference in Dynamical Systems},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/f40438b554cc0e3d96ee6064c5798f55-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/f40438b554cc0e3d96ee6064c5798f55-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/f40438b554cc0e3d96ee6064c5798f55-Metadata.json", "review": "", "metareview": "", "pdf_size": 213190, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8544830859135755583&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "767a3717d6", "title": "Doubly Stochastic Normalization for Spectral Clustering", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/4fa177df22864518b2d7818d4db5db2d-Abstract.html", "author": "Ron Zass; Amnon Shashua", "abstract": "In this paper we focus on the issue of normalization of the affinity matrix in spectral clustering. We show that the difference between N-cuts and Ratio-cuts is in the error measure being used (relative-entropy versus L1 norm) in finding the closest doubly-stochastic matrix to the input affinity matrix. We then develop a scheme for finding the optimal, under Frobenius norm, doubly-stochastic approximation using Von-Neumann's successive projections lemma. The new normalization scheme is simple and efficient and provides superior clustering performance over many of the standardized tests.", "bibtex": "@inproceedings{NIPS2006_4fa177df,\n author = {Zass, Ron and Shashua, Amnon},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Doubly Stochastic Normalization for Spectral Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/4fa177df22864518b2d7818d4db5db2d-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/4fa177df22864518b2d7818d4db5db2d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/4fa177df22864518b2d7818d4db5db2d-Metadata.json", "review": "", "metareview": "", "pdf_size": 223792, "gs_citation": 150, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14128061972863872549&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "School of Engineering and Computer Science, Hebrew University of Jerusalem, Jerusalem 91904, Israel; School of Engineering and Computer Science, Hebrew University of Jerusalem, Jerusalem 91904, Israel", "aff_domain": "; ", "email": "; ", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Hebrew University of Jerusalem", "aff_unique_dep": "School of Engineering and Computer Science", "aff_unique_url": "https://www.huji.ac.il", "aff_unique_abbr": "HUJI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Jerusalem", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "id": "296e7a4919", "title": "Dynamic Foreground/Background Extraction from Images and Videos using Random Patches", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/53a1320cb5d2f56130ad5222f93da374-Abstract.html", "author": "Le Lu; Gregory D. Hager", "abstract": "In this paper, we propose a novel exemplar-based approach to extract dynamic foreground regions from a changing background within a collection of images or a video sequence. By using image segmentation as a pre-processing step, we convert this traditional pixel-wise labeling problem into a lower-dimensional supervised, binary labeling procedure on image segments. Our approach consists of three steps. First, a set of random image patches are spatially and adaptively sampled within each segment. Second, these sets of extracted samples are formed into two \"bags of patches\" to model the foreground/background appearance, respectively. We perform a novel bidirectional consistency check between new patches from incoming frames and current \"bags of patches\" to reject outliers, control model rigidity and make the model adaptive to new observations. Within each bag, image patches are further partitioned and resampled to create an evolving appearance model. Finally, the foreground/background decision over segments in an image is formulated using an aggregation function defined on the similarity measurements of sampled patches relative to the foreground and background models. The essence of the algorithm is conceptually simple and can be easily implemented within a few hundred lines of Matlab code. We evaluate and validate the proposed approach by extensive real examples of the object-level image mapping and tracking within a variety of challenging environments. We also show that it is straightforward to apply our problem formulation on non-rigid object tracking with difficult surveillance videos.", "bibtex": "@inproceedings{NIPS2006_53a1320c,\n author = {Lu, Le and Hager, Gregory},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Dynamic Foreground/Background Extraction from Images and Videos using Random Patches},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/53a1320cb5d2f56130ad5222f93da374-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/53a1320cb5d2f56130ad5222f93da374-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/53a1320cb5d2f56130ad5222f93da374-Metadata.json", "review": "", "metareview": "", "pdf_size": 205056, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5535959914366701312&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "7aa86cade2", "title": "Effects of Stress and Genotype on Meta-parameter Dynamics in Reinforcement Learning", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/b91b1facf3b3a7890177f02ac188f14c-Abstract.html", "author": "Gediminas Luk\u0161ys; J\u00e9r\u00e9mie Kn\u00fcsel; Denis Sheynikhovich; Carmen Sandi; Wulfram Gerstner", "abstract": "Stress and genetic background regulate different aspects of behavioral learning through the action of stress hormones and neuromodulators. In reinforcement learning (RL) models, meta-parameters such as learning rate, future reward dis- count factor, and exploitation-exploration factor, control learning dynamics and performance. They are hypothesized to be related to neuromodulatory levels in the brain. We found that many aspects of animal learning and performance can be described by simple RL models using dynamic control of the meta-parameters. To study the effects of stress and genotype, we carried out 5-hole-box light condition- ing and Morris water maze experiments with C57BL/6 and DBA/2 mouse strains. The animals were exposed to different kinds of stress to evaluate its effects on immediate performance as well as on long-term memory. Then, we used RL mod- els to simulate their behavior. For each experimental session, we estimated a set of model meta-parameters that produced the best \ufb01t between the model and the animal performance. The dynamics of several estimated meta-parameters were qualitatively similar for the two simulated experiments, and with statistically sig- ni\ufb01cant differences between different genetic strains and stress conditions.", "bibtex": "@inproceedings{NIPS2006_b91b1fac,\n author = {Luk\\v{s}ys, Gediminas and Kn\\\"{u}sel, J\\'{e}r\\'{e}mie and Sheynikhovich, Denis and Sandi, Carmen and Gerstner, Wulfram},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Effects of Stress and Genotype on Meta-parameter Dynamics in Reinforcement Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/b91b1facf3b3a7890177f02ac188f14c-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/b91b1facf3b3a7890177f02ac188f14c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/b91b1facf3b3a7890177f02ac188f14c-Metadata.json", "review": "", "metareview": "", "pdf_size": 108730, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13849057692478376077&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Laboratory of Computational Neuroscience+Laboratory of Behavioral Genetics; Laboratory of Computational Neuroscience; Laboratory of Computational Neuroscience; Laboratory of Behavioral Genetics; Laboratory of Computational Neuroscience", "aff_domain": "epfl.ch;epfl.ch;epfl.ch;epfl.ch;epfl.ch", "email": "epfl.ch;epfl.ch;epfl.ch;epfl.ch;epfl.ch", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0+1;0;0;1;0", "aff_unique_norm": "Laboratory of Computational Neuroscience;Laboratory of Behavioral Genetics", "aff_unique_dep": "Computational Neuroscience;Behavioral Genetics", "aff_unique_url": ";", "aff_unique_abbr": ";", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "", "aff_country_unique": "" }, { "id": "7d0d522498", "title": "Efficient Learning of Sparse Representations with an Energy-Based Model", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/87f4d79e36d68c3031ccf6c55e9bbd39-Abstract.html", "author": "Marc'aurelio Ranzato; Christopher Poultney; Sumit Chopra; Yann L. Cun", "abstract": "We describe a novel unsupervised method for learning sparse, overcomplete features. The model uses a linear encoder, and a linear decoder preceded by a sparsifying non-linearity that turns a code vector into a quasi-binary sparse code vector. Given an input, the optimal code minimizes the distance between the output of the decoder and the input patch while being as similar as possible to the encoder output. Learning proceeds in a two-phase EM-like fashion: (1) compute the minimum-energy code vector, (2) adjust the parameters of the encoder and decoder so as to decrease the energy. The model produces \"stroke detectors\" when trained on handwritten numerals, and Gabor-like filters when trained on natural image patches. Inference and learning are very fast, requiring no preprocessing, and no expensive sampling. Using the proposed unsupervised method to initialize the first layer of a convolutional network, we achieved an error rate slightly lower than the best reported result on the MNIST dataset. Finally, an extension of the method is described to learn topographical filter maps.", "bibtex": "@inproceedings{NIPS2006_87f4d79e,\n author = {Ranzato, Marc\\textquotesingle aurelio and Poultney, Christopher and Chopra, Sumit and Cun, Yann},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Efficient Learning of Sparse Representations with an Energy-Based Model},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/87f4d79e36d68c3031ccf6c55e9bbd39-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/87f4d79e36d68c3031ccf6c55e9bbd39-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/87f4d79e36d68c3031ccf6c55e9bbd39-Metadata.json", "review": "", "metareview": "", "pdf_size": 139800, "gs_citation": 1941, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4717209966147864699&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 28, "aff": "Courant Institute of Mathematical Sciences, New York University, New York, NY 10003; Courant Institute of Mathematical Sciences, New York University, New York, NY 10003; Courant Institute of Mathematical Sciences, New York University, New York, NY 10003; Courant Institute of Mathematical Sciences, New York University, New York, NY 10003", "aff_domain": "cs.nyu.edu;cs.nyu.edu;cs.nyu.edu;cs.nyu.edu", "email": "cs.nyu.edu;cs.nyu.edu;cs.nyu.edu;cs.nyu.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "New York University", "aff_unique_dep": "Courant Institute of Mathematical Sciences", "aff_unique_url": "https://www.nyu.edu", "aff_unique_abbr": "NYU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "New York", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "e6d5cad18f", "title": "Efficient Methods for Privacy Preserving Face Detection", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/ce60ff163cab97029cc727e20e0fc3a7-Abstract.html", "author": "Shai Avidan; Moshe Butman", "abstract": "Bob offers a face-detection web service where clients can submit their images for analysis. Alice would very much like to use the service, but is reluctant to reveal the content of her images to Bob. Bob, for his part, is reluctant to release his face detector, as he spent a lot of time, energy and money constructing it. Secure Multi- Party computations use cryptographic tools to solve this problem without leaking any information. Unfortunately, these methods are slow to compute and we intro- duce a couple of machine learning techniques that allow the parties to solve the problem while leaking a controlled amount of information. The \ufb01rst method is an information-bottleneck variant of AdaBoost that lets Bob \ufb01nd a subset of features that are enough for classifying an image patch, but not enough to actually recon- struct it. The second machine learning technique is active learning that allows Alice to construct an online classi\ufb01er, based on a small number of calls to Bob\u2019s face detector. She can then use her online classi\ufb01er as a fast rejector before using a cryptographically secure classi\ufb01er on the remaining image patches.", "bibtex": "@inproceedings{NIPS2006_ce60ff16,\n author = {Avidan, Shai and Butman, Moshe},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Efficient Methods for Privacy Preserving Face Detection},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/ce60ff163cab97029cc727e20e0fc3a7-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/ce60ff163cab97029cc727e20e0fc3a7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/ce60ff163cab97029cc727e20e0fc3a7-Metadata.json", "review": "", "metareview": "", "pdf_size": 271624, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10345342987565202042&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Mitsubishi Electric Research Labs; Department of Computer Science, Bar Ilan University", "aff_domain": "merl.com;cs.biu.edu", "email": "merl.com;cs.biu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Mitsubishi Electric Research Laboratories;Bar-Ilan University", "aff_unique_dep": ";Department of Computer Science", "aff_unique_url": "https://www.merl.com;https://www.biu.ac.il", "aff_unique_abbr": "MERL;BIU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1", "aff_country_unique": "United States;Israel" }, { "id": "1eaeb09172", "title": "Efficient Structure Learning of Markov Networks using $L_1$-Regularization", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/a4380923dd651c195b1631af7c829187-Abstract.html", "author": "Su-in Lee; Varun Ganapathi; Daphne Koller", "abstract": "Markov networks are commonly used in a wide variety of applications, ranging from computer vision, to natural language, to computational biology. In most current applications, even those that rely heavily on learned models, the structure of the Markov network is constructed by hand, due to the lack of effective algorithms for learning Markov network structure from data. In this paper, we provide a computationally efficient method for learning Markov network structure from data. Our method is based on the use of L1 regularization on the weights of the log-linear model, which has the effect of biasing the model towards solutions where many of the parameters are zero. This formulation converts the Markov network learning problem into a convex optimization problem in a continuous space, which can be solved using efficient gradient methods. A key issue in this setting is the (unavoidable) use of approximate inference, which can lead to errors in the gradient computation when the network structure is dense. Thus, we explore the use of different feature introduction schemes and compare their performance. We provide results for our method on synthetic data, and on two real world data sets: pixel values in the MNIST data, and genetic sequence variations in the human HapMap data. We show that our L1 -based method achieves considerably higher generalization performance than the more standard L2 -based method (a Gaussian parameter prior) or pure maximum-likelihood learning. We also show that we can learn MRF network structure at a computational cost that is not much greater than learning parameters alone, demonstrating the existence of a feasible method for this important problem.", "bibtex": "@inproceedings{NIPS2006_a4380923,\n author = {Lee, Su-in and Ganapathi, Varun and Koller, Daphne},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Efficient Structure Learning of Markov Networks using L\\_1-Regularization},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/a4380923dd651c195b1631af7c829187-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/a4380923dd651c195b1631af7c829187-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/a4380923dd651c195b1631af7c829187-Metadata.json", "review": "", "metareview": "", "pdf_size": 233356, "gs_citation": 314, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14731059400971917779&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University", "aff_domain": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "email": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "49a068cf4b", "title": "Efficient sparse coding algorithms", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/2d71b2ae158c7c5912cc0bbde2bb9d95-Abstract.html", "author": "Honglak Lee; Alexis Battle; Rajat Raina; Andrew Y. Ng", "abstract": "Sparse coding provides a class of algorithms for finding succinct representations of stimuli; given only unlabeled input data, it discovers basis functions that capture higher-level features in the data. However, finding sparse codes remains a very difficult computational problem. In this paper, we present efficient sparse coding algorithms that are based on iteratively solving two convex optimization problems: an L1 -regularized least squares problem and an L2 -constrained least squares problem. We propose novel algorithms to solve both of these optimization problems. Our algorithms result in a significant speedup for sparse coding, allowing us to learn larger sparse codes than possible with previously described algorithms. We apply these algorithms to natural images and demonstrate that the inferred sparse codes exhibit end-stopping and non-classical receptive field surround suppression and, therefore, may provide a partial explanation for these two phenomena in V1 neurons.", "bibtex": "@inproceedings{NIPS2006_2d71b2ae,\n author = {Lee, Honglak and Battle, Alexis and Raina, Rajat and Ng, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Efficient sparse coding algorithms},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/2d71b2ae158c7c5912cc0bbde2bb9d95-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/2d71b2ae158c7c5912cc0bbde2bb9d95-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/2d71b2ae158c7c5912cc0bbde2bb9d95-Metadata.json", "review": "", "metareview": "", "pdf_size": 352283, "gs_citation": 3573, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8084672276816579611&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 20, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "6c1bb2b378", "title": "Emergence of conjunctive visual features by quadratic independent component analysis", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/ee16fa83c0f151ef85e617f5aa3867a6-Abstract.html", "author": "J.t. Lindgren; Aapo Hyv\u00e4rinen", "abstract": "In previous studies, quadratic modelling of natural images has resulted in cell models that react strongly to edges and bars. Here we apply quadratic Independent Component Analysis to natural image patches, and show that up to a small approximation error, the estimated components are computing conjunctions of two linear features. These conjunctive features appear to represent not only edges and bars, but also inherently two-dimensional stimuli, such as corners. In addition, we show that for many of the components, the underlying linear features have essentially V1 simple cell receptive field characteristics. Our results indicate that the development of the V2 cells preferring angles and corners may be partly explainable by the principle of unsupervised sparse coding of natural images.", "bibtex": "@inproceedings{NIPS2006_ee16fa83,\n author = {Lindgren, J.t. and Hyv\\\"{a}rinen, Aapo},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Emergence of conjunctive visual features by quadratic independent component analysis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/ee16fa83c0f151ef85e617f5aa3867a6-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/ee16fa83c0f151ef85e617f5aa3867a6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/ee16fa83c0f151ef85e617f5aa3867a6-Metadata.json", "review": "", "metareview": "", "pdf_size": 223397, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4925248990892422866&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Department of Computer Science, University of Helsinki, Finland; HIIT Basic Research Unit, University of Helsinki, Finland", "aff_domain": "cs.helsinki.fi;cs.helsinki.fi", "email": "cs.helsinki.fi;cs.helsinki.fi", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Helsinki", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.helsinki.fi", "aff_unique_abbr": "UH", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Finland" }, { "id": "62a0ee6fb0", "title": "Fast Computation of Graph Kernels", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/e37b08dd3015330dcbb5d6663667b8b8-Abstract.html", "author": "Karsten Borgwardt; Nicol N. Schraudolph; S.v.n. Vishwanathan", "abstract": "Using extensions of linear algebra concepts to Reproducing Kernel Hilbert Spaces (RKHS), we de\ufb01ne a unifying framework for random walk kernels on graphs. Re- duction to a Sylvester equation allows us to compute many of these kernels in O(n3) worst-case time. This includes kernels whose previous worst-case time complexity was O(n6), such as the geometric kernels of G\u00a8artner et al. [1] and the marginal graph kernels of Kashima et al. [2]. Our algebra in RKHS allow us to exploit sparsity in directed and undirected graphs more effectively than previ- ous methods, yielding sub-cubic computational complexity when combined with conjugate gradient solvers or \ufb01xed-point iterations. Experiments on graphs from bioinformatics and other application domains show that our algorithms are often more than 1000 times faster than existing approaches.", "bibtex": "@inproceedings{NIPS2006_e37b08dd,\n author = {Borgwardt, Karsten and Schraudolph, Nicol and Vishwanathan, S.v.n.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Fast Computation of Graph Kernels},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/e37b08dd3015330dcbb5d6663667b8b8-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/e37b08dd3015330dcbb5d6663667b8b8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/e37b08dd3015330dcbb5d6663667b8b8-Metadata.json", "review": "", "metareview": "", "pdf_size": 213403, "gs_citation": 192, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11238202270213238647&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 25, "aff": "Statistical Machine Learning, National ICT Australia, Locked Bag 8001, Canberra ACT 2601, Australia+Research School of Information Sciences & Engineering, Australian National University, Canberra ACT 0200, Australia; Institute for Computer Science, Ludwig-Maximilians-University Munich, Oettingenstr. 67, 80538 Munich, Germany; Statistical Machine Learning, National ICT Australia, Locked Bag 8001, Canberra ACT 2601, Australia+Research School of Information Sciences & Engineering, Australian National University, Canberra ACT 0200, Australia", "aff_domain": "nicta.com.au;dbs.ifi.lmu.de;nicta.com.au", "email": "nicta.com.au;dbs.ifi.lmu.de;nicta.com.au", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1;2;0+1", "aff_unique_norm": "National ICT Australia;Australian National University;Ludwig-Maximilians-University Munich", "aff_unique_dep": "Statistical Machine Learning;Research School of Information Sciences & Engineering;Institute for Computer Science", "aff_unique_url": "https://www.nicta.com.au;https://www.anu.edu.au;https://www.lmu.de", "aff_unique_abbr": "NICTA;ANU;LMU Munich", "aff_campus_unique_index": "0+0;1;0+0", "aff_campus_unique": "Canberra;Munich", "aff_country_unique_index": "0+0;1;0+0", "aff_country_unique": "Australia;Germany" }, { "id": "2327d31246", "title": "Fast Discriminative Visual Codebooks using Randomized Clustering Forests", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/d3157f2f0212a80a5d042c127522a2d5-Abstract.html", "author": "Frank Moosmann; Bill Triggs; Frederic Jurie", "abstract": "Some of the most effective recent methods for content-based image classification work by extracting dense or sparse local image descriptors, quantizing them according to a coding rule such as k-means vector quantization, accumulating histograms of the resulting \"visual word\" codes over the image, and classifying these with a conventional classifier such as an SVM. Large numbers of descriptors and large codebooks are needed for good results and this becomes slow using k-means. We introduce Extremely Randomized Clustering Forests ensembles of randomly created clustering trees and show that these provide more accurate results, much faster training and testing and good resistance to background clutter in several state-of-the-art image classification tasks.", "bibtex": "@inproceedings{NIPS2006_d3157f2f,\n author = {Moosmann, Frank and Triggs, Bill and Jurie, Frederic},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Fast Discriminative Visual Codebooks using Randomized Clustering Forests},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/d3157f2f0212a80a5d042c127522a2d5-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/d3157f2f0212a80a5d042c127522a2d5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/d3157f2f0212a80a5d042c127522a2d5-Metadata.json", "review": "", "metareview": "", "pdf_size": 356533, "gs_citation": 628, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4371679638689049320&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 24, "aff": "GRA VIR-CNRS-INRIA, 655 avenue de l\u2019Europe, Montbonnot 38330, France+Institute of Measurement and Control, University of Karlsruhe, Germany; GRA VIR-CNRS-INRIA, 655 avenue de l\u2019Europe, Montbonnot 38330, France; GRA VIR-CNRS-INRIA, 655 avenue de l\u2019Europe, Montbonnot 38330, France", "aff_domain": "inrialpes.fr;inrialpes.fr;inrialpes.fr", "email": "inrialpes.fr;inrialpes.fr;inrialpes.fr", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1;0;0", "aff_unique_norm": "INRIA;University of Karlsruhe", "aff_unique_dep": "GRA VIR-CNRS;Institute of Measurement and Control", "aff_unique_url": "https://www.inria.fr;https://www.kit.edu", "aff_unique_abbr": "INRIA;KIT", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+1;0;0", "aff_country_unique": "France;Germany" }, { "id": "6552f00535", "title": "Fast Iterative Kernel PCA", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/c5c1cb0bebd56ae38817b251ad72bedb-Abstract.html", "author": "Nicol N. Schraudolph; Simon G\u00fcnter; S.v.n. Vishwanathan", "abstract": "We introduce two methods to improve convergence of the Kernel Hebbian Algorithm (KHA) for iterative kernel PCA. KHA has a scalar gain parameter which is either held constant or decreased as 1/t, leading to slow convergence. Our KHA/et algorithm accelerates KHA by incorporating the reciprocal of the current estimated eigenvalues as a gain vector. We then derive and apply Stochastic MetaDescent (SMD) to KHA/et; this further speeds convergence by performing gain adaptation in RKHS. Experimental results for kernel PCA and spectral clustering of USPS digits as well as motion capture and image de-noising problems confirm that our methods converge substantially faster than conventional KHA.", "bibtex": "@inproceedings{NIPS2006_c5c1cb0b,\n author = {Schraudolph, Nicol and G\\\"{u}nter, Simon and Vishwanathan, S.v.n.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Fast Iterative Kernel PCA},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/c5c1cb0bebd56ae38817b251ad72bedb-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/c5c1cb0bebd56ae38817b251ad72bedb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/c5c1cb0bebd56ae38817b251ad72bedb-Metadata.json", "review": "", "metareview": "", "pdf_size": 370977, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17649678450001147429&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Statistical Machine Learning, National ICT Australia; Statistical Machine Learning, National ICT Australia; Statistical Machine Learning, National ICT Australia", "aff_domain": "nicta.com.au;nicta.com.au;nicta.com.au", "email": "nicta.com.au;nicta.com.au;nicta.com.au", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "National ICT Australia", "aff_unique_dep": "Statistical Machine Learning", "aff_unique_url": "https://www.nicta.com.au", "aff_unique_abbr": "NICTA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Australia" }, { "id": "468041086f", "title": "Fundamental Limitations of Spectral Clustering", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/bdb6920adcd0457aa17b53b22963dad9-Abstract.html", "author": "Boaz Nadler; Meirav Galun", "abstract": "Spectral clustering methods are common graph-based approaches to clustering of data. Spectral clustering algorithms typically start from local information encoded in a weighted graph on the data and cluster according to the global eigenvectors of the corresponding (normalized) similarity matrix. One contribution of this paper is to present fundamental limitations of this general local to global approach. We show that based only on local information, the normalized cut functional is not a suitable measure for the quality of clustering. Further, even with a suitable similarity measure, we show that the first few eigenvectors of such adjacency matrices cannot successfully cluster datasets that contain structures at different scales of size and density. Based on these findings, a second contribution of this paper is a novel diffusion based measure to evaluate the coherence of individual clusters. Our measure can be used in conjunction with any bottom-up graph-based clustering method, it is scale-free and can determine coherent clusters at all scales. We present both synthetic examples and real image segmentation problems where various spectral clustering algorithms fail. In contrast, using this coherence measure finds the expected clusters at all scales. Keywords: Clustering, kernels, learning theory.", "bibtex": "@inproceedings{NIPS2006_bdb6920a,\n author = {Nadler, Boaz and Galun, Meirav},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Fundamental Limitations of Spectral Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/bdb6920adcd0457aa17b53b22963dad9-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/bdb6920adcd0457aa17b53b22963dad9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/bdb6920adcd0457aa17b53b22963dad9-Metadata.json", "review": "", "metareview": "", "pdf_size": 398440, "gs_citation": 212, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2539058587414266587&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Department of Applied Mathematics and Computer Science, Weizmann Institute of Science, Rehovot, Israel 76100; Department of Applied Mathematics and Computer Science, Weizmann Institute of Science, Rehovot, Israel 76100", "aff_domain": "weizmann.ac.il;weizmann.ac.il", "email": "weizmann.ac.il;weizmann.ac.il", "github": "", "project": "www.wisdom.weizmann.ac.il/~nadler", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Weizmann Institute of Science", "aff_unique_dep": "Department of Applied Mathematics and Computer Science", "aff_unique_url": "https://www.weizmann.ac.il", "aff_unique_abbr": "Weizmann", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Rehovot", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "id": "94e3c02311", "title": "Game Theoretic Algorithms for Protein-DNA binding", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/05b8caaf6ba6f4bdb68675ab8b893bda-Abstract.html", "author": "Luis P\u00e9rez-breva; Luis E. Ortiz; Chen-hsiang Yeang; Tommi S. Jaakkola", "abstract": "We develop and analyze game-theoretic algorithms for predicting coordinate binding of multiple DNA binding regulators. The allocation of proteins to local neighborhoods and to sites is carried out with resource constraints while explicating competing and coordinate binding relations among proteins with affinity to the site or region. The focus of this paper is on mathematical foundations of the approach. We also briefly demonstrate the approach in the context of the -phage switch.", "bibtex": "@inproceedings{NIPS2006_05b8caaf,\n author = {P\\'{e}rez-breva, Luis and Ortiz, Luis E and Yeang, Chen-hsiang and Jaakkola, Tommi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Game Theoretic Algorithms for Protein-DNA binding},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/05b8caaf6ba6f4bdb68675ab8b893bda-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/05b8caaf6ba6f4bdb68675ab8b893bda-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/05b8caaf6ba6f4bdb68675ab8b893bda-Metadata.json", "review": "", "metareview": "", "pdf_size": 125676, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16335941716998857780&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "CSAIL-MIT; CSAIL - MIT; UCSC; CSAIL - MIT", "aff_domain": "csail.mit.edu;csail.mit.edu;soe.ucsc.edu;csail.mit.edu", "email": "csail.mit.edu;csail.mit.edu;soe.ucsc.edu;csail.mit.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "Massachusetts Institute of Technology;University of California, Santa Cruz", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory;", "aff_unique_url": "https://www.csail.mit.edu;https://www.ucsc.edu", "aff_unique_abbr": "MIT;UCSC", "aff_campus_unique_index": "0;0;1;0", "aff_campus_unique": "Cambridge;Santa Cruz", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "4d996728b3", "title": "Gaussian and Wishart Hyperkernels", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/b23f52202479e957b9bada847c1175d7-Abstract.html", "author": "Risi Kondor; Tony Jebara", "abstract": "We propose a new method for constructing hyperkenels and define two promising special cases that can be computed in closed form. These we call the Gaussian and Wishart hyperkernels. The former is especially attractive in that it has an interpretable regularization scheme reminiscent of that of the Gaussian RBF kernel. We discuss how kernel learning can be used not just for improving the performance of classification and regression methods, but also as a stand-alone algorithm for dimensionality reduction and relational or metric learning.", "bibtex": "@inproceedings{NIPS2006_b23f5220,\n author = {Kondor, Risi and Jebara, Tony},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Gaussian and Wishart Hyperkernels},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/b23f52202479e957b9bada847c1175d7-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/b23f52202479e957b9bada847c1175d7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/b23f52202479e957b9bada847c1175d7-Metadata.json", "review": "", "metareview": "", "pdf_size": 193627, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7054143895072514126&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "393e866ebc", "title": "Generalized Maximum Margin Clustering and Unsupervised Kernel Learning", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/be767243ca8f574c740fb4c26cc6dceb-Abstract.html", "author": "Hamed Valizadegan; Rong Jin", "abstract": "Maximum margin clustering was proposed lately and has shown promising performance in recent studies [1, 2]. It extends the theory of support vector machine to unsupervised learning. Despite its good performance, there are three ma jor problems with maximum margin clustering that question its efficiency for real-world applications. First, it is computationally expensive and difficult to scale to large-scale datasets because the number of parameters in maximum margin clustering is quadratic in the number of examples. Second, it requires data preprocessing to ensure that any clustering boundary will pass through the origins, which makes it unsuitable for clustering unbalanced dataset. Third, it is sensitive to the choice of kernel functions, and requires external procedure to determine the appropriate values for the parameters of kernel functions. In this paper, we propose \"generalized maximum margin clustering\" framework that addresses the above three problems simultaneously. The new framework generalizes the maximum margin clustering algorithm by allowing any clustering boundaries including those not passing through the origins. It significantly improves the computational efficiency by reducing the number of parameters. Furthermore, the new framework is able to automatically determine the appropriate kernel matrix without any labeled data. Finally, we show a formal connection between maximum margin clustering and spectral clustering. We demonstrate the efficiency of the generalized maximum margin clustering algorithm using both synthetic datasets and real datasets from the UCI repository.", "bibtex": "@inproceedings{NIPS2006_be767243,\n author = {Valizadegan, Hamed and Jin, Rong},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Generalized Maximum Margin Clustering and Unsupervised Kernel Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/be767243ca8f574c740fb4c26cc6dceb-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/be767243ca8f574c740fb4c26cc6dceb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/be767243ca8f574c740fb4c26cc6dceb-Metadata.json", "review": "", "metareview": "", "pdf_size": 185964, "gs_citation": 151, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11338266267676654338&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Computer Science and Engineering, Michigan State University, East Lansing, MI 48824; Computer Science and Engineering, Michigan State University, East Lansing, MI 48824", "aff_domain": "msu.edu;cse.msu.edu", "email": "msu.edu;cse.msu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Michigan State University", "aff_unique_dep": "Computer Science and Engineering", "aff_unique_url": "https://www.msu.edu", "aff_unique_abbr": "MSU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "East Lansing", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "41201bbfe3", "title": "Generalized Regularized Least-Squares Learning with Predefined Features in a Hilbert Space", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/77edbe5f897a5dbcde49d31bec1537b8-Abstract.html", "author": "Wenye Li; Kin-hong Lee; Kwong-sak Leung", "abstract": "Kernel-based regularized learning seeks a model in a hypothesis space by minimizing the empirical error and the model's complexity. Based on the representer theorem, the solution consists of a linear combination of translates of a kernel. This paper investigates a generalized form of representer theorem for kernel-based learning. After mapping predefined features and translates of a kernel simultaneously onto a hypothesis space by a specific way of constructing kernels, we proposed a new algorithm by utilizing a generalized regularizer which leaves part of the space unregularized. Using a squared-loss function in calculating the empirical error, a simple convex solution is obtained which combines predefined features with translates of the kernel. Empirical evaluations have confirmed the effectiveness of the algorithm for supervised learning tasks.", "bibtex": "@inproceedings{NIPS2006_77edbe5f,\n author = {Li, Wenye and Lee, Kin-hong and Leung, Kwong-sak},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Generalized Regularized Least-Squares Learning with Predefined Features in a Hilbert Space},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/77edbe5f897a5dbcde49d31bec1537b8-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/77edbe5f897a5dbcde49d31bec1537b8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/77edbe5f897a5dbcde49d31bec1537b8-Metadata.json", "review": "", "metareview": "", "pdf_size": 176566, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7237079163066923474&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science and Engineering, The Chinese University of Hong Kong; Department of Computer Science and Engineering, The Chinese University of Hong Kong; Department of Computer Science and Engineering, The Chinese University of Hong Kong", "aff_domain": "cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk", "email": "cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Chinese University of Hong Kong", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.cuhk.edu.hk", "aff_unique_abbr": "CUHK", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Hong Kong SAR", "aff_country_unique_index": "0;0;0", "aff_country_unique": "China" }, { "id": "9bfe054294", "title": "Geometric entropy minimization (GEM) for anomaly detection and localization", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/1160453108d3e537255e9f7b931f4e90-Abstract.html", "author": "Alfred O. Hero", "abstract": "We introduce a novel adaptive non-parametric anomaly detection approach, called GEM, that is based on the minimal covering properties of K-point entropic graphs when constructed on N training samples from a nominal probability distribution. Such graphs have the property that as N their span recovers the entropy minimizing set that supports at least = K/N (100)% of the mass of the Lebesgue part of the distribution. When a test sample falls outside of the entropy minimizing set an anomaly can be declared at a statistical level of significance = 1 - . A method for implementing this non-parametric anomaly detector is proposed that approximates this minimum entropy set by the influence region of a K-point entropic graph built on the training data. By implementing an incremental leave-one-out k-nearest neighbor graph on resampled subsets of the training data GEM can efficiently detect outliers at a given level of significance and compute their empirical p-values. We illustrate GEM for several simulated and real data sets in high dimensional feature spaces.", "bibtex": "@inproceedings{NIPS2006_11604531,\n author = {Hero, Alfred},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Geometric entropy minimization (GEM) for anomaly detection and localization},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/1160453108d3e537255e9f7b931f4e90-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/1160453108d3e537255e9f7b931f4e90-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/1160453108d3e537255e9f7b931f4e90-Metadata.json", "review": "", "metareview": "", "pdf_size": 294641, "gs_citation": 109, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12616513090648922926&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "University of Michigan", "aff_domain": "umich.edu", "email": "umich.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of Michigan", "aff_unique_dep": "", "aff_unique_url": "https://www.umich.edu", "aff_unique_abbr": "UM", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "bde7dfa74d", "title": "Graph Laplacian Regularization for Large-Scale Semidefinite Programming", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/0a5c79b1eaf15445da252ada718857e9-Abstract.html", "author": "Kilian Q. Weinberger; Fei Sha; Qihui Zhu; Lawrence K. Saul", "abstract": "In many areas of science and engineering, the problem arises how to discover low dimensional representations of high dimensional data. Recently, a number of researchers have converged on common solutions to this problem using methods from convex optimization. In particular, many results have been obtained by constructing semidefinite programs (SDPs) with low rank solutions. While the rank of matrix variables in SDPs cannot be directly constrained, it has been observed that low rank solutions emerge naturally by computing high variance or maximal trace solutions that respect local distance constraints. In this paper, we show how to solve very large problems of this type by a matrix factorization that leads to much smaller SDPs than those previously studied. The matrix factorization is derived by expanding the solution of the original problem in terms of the bottom eigenvectors of a graph Laplacian. The smaller SDPs obtained from this matrix factorization yield very good approximations to solutions of the original problem. Moreover, these approximations can be further refined by conjugate gradient descent. We illustrate the approach on localization in large scale sensor networks, where optimizations involving tens of thousands of nodes can be solved in just a few minutes.", "bibtex": "@inproceedings{NIPS2006_0a5c79b1,\n author = {Weinberger, Kilian Q and Sha, Fei and Zhu, Qihui and Saul, Lawrence},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Graph Laplacian Regularization for Large-Scale Semidefinite Programming},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/0a5c79b1eaf15445da252ada718857e9-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/0a5c79b1eaf15445da252ada718857e9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/0a5c79b1eaf15445da252ada718857e9-Metadata.json", "review": "", "metareview": "", "pdf_size": 895104, "gs_citation": 182, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11881223138451940190&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Dept of Computer and Information Science, U of Pennsylvania, Philadelphia; Computer Science Division, U of California, Berkeley; Dept of Computer and Information Science, U of Pennsylvania, Philadelphia; Dept of Computer Science and Engineering, U of California, San Diego", "aff_domain": "seas.upenn.edu;cs.berkeley.edu;seas.upenn.edu;cs.ucsd.edu", "email": "seas.upenn.edu;cs.berkeley.edu;seas.upenn.edu;cs.ucsd.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0;2", "aff_unique_norm": "University of Pennsylvania;University of California, Berkeley;University of California, San Diego", "aff_unique_dep": "Dept of Computer and Information Science;Computer Science Division;Department of Computer Science and Engineering", "aff_unique_url": "https://www.upenn.edu;https://www.berkeley.edu;https://www.ucsd.edu", "aff_unique_abbr": "UPenn;UC Berkeley;UCSD", "aff_campus_unique_index": "0;1;0;2", "aff_campus_unique": "Philadelphia;Berkeley;San Diego", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "10c7c0601f", "title": "Graph-Based Visual Saliency", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/4db0f8b0fc895da263fd77fc8aecabe4-Abstract.html", "author": "Jonathan Harel; Christof Koch; Pietro Perona", "abstract": "A new bottom-up visual saliency model, Graph-Based Visual Saliency (GBVS), is proposed. It consists of two steps: rst forming activation maps on certain feature channels, and then normalizing them in a way which highlights conspicuity and admits combination with other maps. The model is simple, and biologically plausible insofar as it is naturally parallelized. This model powerfully predicts human xations on 749 variations of 108 natural images, achieving 98% of the ROC area of a human-based control, whereas the classical algorithms of Itti & Koch ([2], [3], [4]) achieve only 84%.", "bibtex": "@inproceedings{NIPS2006_4db0f8b0,\n author = {Harel, Jonathan and Koch, Christof and Perona, Pietro},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Graph-Based Visual Saliency},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/4db0f8b0fc895da263fd77fc8aecabe4-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/4db0f8b0fc895da263fd77fc8aecabe4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/4db0f8b0fc895da263fd77fc8aecabe4-Metadata.json", "review": "", "metareview": "", "pdf_size": 232327, "gs_citation": 4852, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16889412420847908145&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 19, "aff": "California Institute of Technology; California Institute of Technology; California Institute of Technology", "aff_domain": "klab.caltech.edu;klab.caltech.edu;vision.caltech.edu", "email": "klab.caltech.edu;klab.caltech.edu;vision.caltech.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "California Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://www.caltech.edu", "aff_unique_abbr": "Caltech", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Pasadena", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "40df51b8fb", "title": "Greedy Layer-Wise Training of Deep Networks", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/5da713a690c067105aeb2fae32403405-Abstract.html", "author": "Yoshua Bengio; Pascal Lamblin; Dan Popovici; Hugo Larochelle", "abstract": "Recent analyses (Bengio, Delalleau, & Le Roux, 2006; Bengio & Le Cun, 2007) of modern nonparametric machine learning algorithms that are kernel machines, such as Support Vector Machines (SVMs), graph-based manifold and semi-supervised learning algorithms suggest fundamental limitations of some learning algorithms. The problem is clear in kernel-based approaches when the kernel is \"local\" (e.g., the Gaussian kernel), i.e., K (x, y ) converges to a constant when ||x - y || increases. These analyses point to the difficulty of learning \"highly-varying functions\", i.e., functions that have a large number of \"variations\" in the domain of interest, e.g., they would require a large number of pieces to be well represented by a piecewise-linear approximation. Since the number of pieces can be made to grow exponentially with the number of factors of variations in the input, this is connected with the well-known curse of dimensionality for classical non-parametric learning algorithms (for regression, classification and density estimation). If the shapes of all these pieces are unrelated, one needs enough examples for each piece in order to generalize properly. However, if these shapes are related and can be predicted from each other, \"non-local\" learning algorithms have the potential to generalize to pieces not covered by the training set. Such ability would seem necessary for learning in complex domains such as Artificial Intelligence tasks (e.g., related to vision, language, speech, robotics). Kernel machines (not only those with a local kernel) have a shallow architecture, i.e., only two levels of data-dependent computational elements. This is also true of feedforward neural networks with a single hidden layer (which can become SVMs when the number of hidden units becomes large (Bengio, Le Roux, Vincent, Delalleau, & Marcotte, 2006)). A serious problem with shallow architectures is that they can be very inefficient in terms of the number of computational units (e.g., bases, hidden units), and thus in terms of required examples (Bengio & Le Cun, 2007). One way to represent a highly-varying function compactly (with few parameters) is through the composition of many non-linearities, i.e., with a deep architecture. For example, the parity function with d inputs requires O(2d ) examples and parameters to be represented by a Gaussian SVM (Bengio et al., 2006), O(d2 ) parameters for a one-hidden-layer neural network, O(d) parameters and units for a multi-layer network with O(log2 d) layers, and O(1) parameters with a recurrent neural network. More generally,", "bibtex": "@inproceedings{NIPS2006_5da713a6,\n author = {Bengio, Yoshua and Lamblin, Pascal and Popovici, Dan and Larochelle, Hugo},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Greedy Layer-Wise Training of Deep Networks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/5da713a690c067105aeb2fae32403405-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/5da713a690c067105aeb2fae32403405-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/5da713a690c067105aeb2fae32403405-Metadata.json", "review": "", "metareview": "", "pdf_size": 187377, "gs_citation": 7432, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15455167514840141912&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 36, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "66cd94034e", "title": "Handling Advertisements of Unknown Quality in Search Advertising", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/5a01f0597ac4bdf35c24846734ee9a76-Abstract.html", "author": "Sandeep Pandey; Christopher Olston", "abstract": "We consider how a search engine should select advertisements to display with search results, in order to maximize its revenue. Under the standard \"pay-per-click\" arrangement, revenue depends on how well the displayed advertisements appeal to users. The main difficulty stems from new advertisements whose degree of appeal has yet to be determined. Often the only reliable way of determining appeal is exploration via display to users, which detracts from exploitation of other advertisements known to have high appeal. Budget constraints and finite advertisement lifetimes make it necessary to explore as well as exploit. In this paper we study the tradeoff between exploration and exploitation, modeling advertisement placement as a multi-armed bandit problem. We extend traditional bandit formulations to account for budget constraints that occur in search engine advertising markets, and derive theoretical bounds on the performance of a family of algorithms. We measure empirical performance via extensive experiments over real-world data.", "bibtex": "@inproceedings{NIPS2006_5a01f059,\n author = {Pandey, Sandeep and Olston, Christopher},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Handling Advertisements of Unknown Quality in Search Advertising},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/5a01f0597ac4bdf35c24846734ee9a76-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/5a01f0597ac4bdf35c24846734ee9a76-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/5a01f0597ac4bdf35c24846734ee9a76-Metadata.json", "review": "", "metareview": "", "pdf_size": 174393, "gs_citation": 116, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1499311886290687192&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 18, "aff": "Carnegie Mellon University; Yahoo! Research", "aff_domain": "cs.cmu.edu;yahoo-inc.com", "email": "cs.cmu.edu;yahoo-inc.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Carnegie Mellon University;Yahoo!", "aff_unique_dep": ";Yahoo! Research", "aff_unique_url": "https://www.cmu.edu;https://research.yahoo.com", "aff_unique_abbr": "CMU;Yahoo!", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "a43583b94f", "title": "Hidden Markov Dirichlet Process: Modeling Genetic Recombination in Open Ancestral Space", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/1c303b0eed3133200cf715285011b4e4-Abstract.html", "author": "Kyung-ah Sohn; Eric P. Xing", "abstract": "We present a new statistical framework called hidden Markov Dirichlet process (HMDP) to jointly model the genetic recombinations among possibly infinite number of founders and the coalescence-with-mutation events in the resulting genealogies. The HMDP posits that a haplotype of genetic markers is generated by a sequence of recombination events that select an ancestor for each locus from an unbounded set of founders according to a 1st-order Markov transition process. Conjoining this process with a mutation model, our method accommodates both between-lineage recombination and within-lineage sequence variations, and leads to a compact and natural interpretation of the population structure and inheritance process underlying haplotype data. We have developed an efficient sampling algo rithm for HMDP based on a two-level nested Polya urn scheme. On both simulated and real SNP haplotype data, our method performs competitively or significantly better than extant methods in uncovering the recombination hotspots along chromosomal loci; and in addition it also infers the ancestral genetic patterns and offers a highly accurate map of ancestral compositions of modern populations.", "bibtex": "@inproceedings{NIPS2006_1c303b0e,\n author = {Sohn, Kyung-ah and Xing, Eric},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Hidden Markov Dirichlet Process: Modeling Genetic Recombination in Open Ancestral Space},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/1c303b0eed3133200cf715285011b4e4-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/1c303b0eed3133200cf715285011b4e4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/1c303b0eed3133200cf715285011b4e4-Metadata.json", "review": "", "metareview": "", "pdf_size": 298351, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3809263483456965442&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "School of Computer Science, Carnegie Mellon University; School of Computer Science, Carnegie Mellon University", "aff_domain": "cs.cmu.edu;cs.cmu.edu", "email": "cs.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "School of Computer Science", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "cda72f4f79", "title": "Hierarchical Dirichlet Processes with Random Effects", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/2ef3e50fd7c1091dda165f25be7f64fd-Abstract.html", "author": "Seyoung Kim; Padhraic Smyth", "abstract": "Data sets involving multiple groups with shared characteristics frequently arise in practice. In this paper we extend hierarchical Dirichlet processes to model such data. Each group is assumed to be generated from a template mixture model with group level variability in both the mixing proportions and the component parameters. Variabilities in mixing proportions across groups are handled using hierarchical Dirichlet processes, also allowing for automatic determination of the number of components. In addition, each group is allowed to have its own compo- nent parameters coming from a prior described by a template mixture model. This group-level variability in the component parameters is handled using a random effects model. We present a Markov Chain Monte Carlo (MCMC) sampling algo- rithm to estimate model parameters and demonstrate the method by applying it to the problem of modeling spatial brain activation patterns across multiple images collected via functional magnetic resonance imaging (fMRI).", "bibtex": "@inproceedings{NIPS2006_2ef3e50f,\n author = {Kim, Seyoung and Smyth, Padhraic},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Hierarchical Dirichlet Processes with Random Effects},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/2ef3e50fd7c1091dda165f25be7f64fd-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/2ef3e50fd7c1091dda165f25be7f64fd-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/2ef3e50fd7c1091dda165f25be7f64fd-Metadata.json", "review": "", "metareview": "", "pdf_size": 135794, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13638946797113264392&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Department of Computer Science, University of California, Irvine; Department of Computer Science, University of California, Irvine", "aff_domain": "ics.uci.edu;ics.uci.edu", "email": "ics.uci.edu;ics.uci.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Irvine", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.uci.edu", "aff_unique_abbr": "UCI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Irvine", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "024eaa0bf8", "title": "High-Dimensional Graphical Model Selection Using $\\ell_1$-Regularized Logistic Regression", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/86b20716fbd5b253d27cec43127089bc-Abstract.html", "author": "Martin J. Wainwright; John D. Lafferty; Pradeep K. Ravikumar", "abstract": "We focus on the problem of estimating the graph structure associated with a discrete Markov random \ufb01eld. We describe a method based on", "bibtex": "@inproceedings{NIPS2006_86b20716,\n author = {Wainwright, Martin J and Lafferty, John and Ravikumar, Pradeep},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {High-Dimensional Graphical Model Selection Using \\textbackslash ell\\_1-Regularized Logistic Regression},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/86b20716fbd5b253d27cec43127089bc-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/86b20716fbd5b253d27cec43127089bc-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/86b20716fbd5b253d27cec43127089bc-Metadata.json", "review": "", "metareview": "", "pdf_size": 124582, "gs_citation": 281, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16365945551782938813&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "cc333c144e", "title": "Hyperparameter Learning for Graph Based Semi-supervised Learning Algorithms", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/fc325d4b598aaede18b53dca4ecfcb9c-Abstract.html", "author": "Xinhua Zhang; Wee S. Lee", "abstract": "Semi-supervised learning algorithms have been successfully applied in many applications with scarce labeled data, by utilizing the unlabeled data. One important category is graph based semi-supervised learning algorithms, for which the performance depends considerably on the quality of the graph, or its hyperparameters. In this paper, we deal with the less explored problem of learning the graphs. We propose a graph learning method for the harmonic energy minimization method; this is done by minimizing the leave-one-out prediction error on labeled data points. We use a gradient based method and designed an efficient algorithm which significantly accelerates the calculation of the gradient by applying the matrix inversion lemma and using careful pre-computation. Experimental results show that the graph learning method is effective in improving the performance of the classification algorithm.", "bibtex": "@inproceedings{NIPS2006_fc325d4b,\n author = {Zhang, Xinhua and Lee, Wee},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Hyperparameter Learning for Graph Based Semi-supervised Learning Algorithms},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/fc325d4b598aaede18b53dca4ecfcb9c-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/fc325d4b598aaede18b53dca4ecfcb9c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/fc325d4b598aaede18b53dca4ecfcb9c-Metadata.json", "review": "", "metareview": "", "pdf_size": 195250, "gs_citation": 80, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13830403041033731401&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 20, "aff": "Statistical Machine Learning Program, National ICT Australia, Canberra, Australia + CSL, RSISE, ANU, Canberra, Australia; Department of Computer Science, National University of Singapore", "aff_domain": "nicta.com.au;comp.nus.edu.sg", "email": "nicta.com.au;comp.nus.edu.sg", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0+1;2", "aff_unique_norm": "National ICT Australia;Australian National University;National University of Singapore", "aff_unique_dep": "Statistical Machine Learning Program;Research School of Information Sciences and Engineering;Department of Computer Science", "aff_unique_url": "https://www.nicta.com.au;https://www.anu.edu.au;https://www.nus.edu.sg", "aff_unique_abbr": "NICTA;ANU;NUS", "aff_campus_unique_index": "0+0", "aff_campus_unique": "Canberra;", "aff_country_unique_index": "0+0;1", "aff_country_unique": "Australia;Singapore" }, { "id": "6b6bbeb25f", "title": "Image Retrieval and Classification Using Local Distance Functions", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/9f8684e630c4c30cad7b1f0935cd62ab-Abstract.html", "author": "Andrea Frome; Yoram Singer; Jitendra Malik", "abstract": "In this paper we introduce and experiment with a framework for learning local perceptual distance functions for visual recognition. We learn a distance function for each training image as a combination of elementary distances between patch-based visual features. We apply these combined local distance functions to the tasks of image retrieval and classification of novel images. On the Caltech 101 object recognition benchmark, we achieve 60.3% mean recognition across classes using 15 training images per class, which is better than the best published performance by Zhang, et al.", "bibtex": "@inproceedings{NIPS2006_9f8684e6,\n author = {Frome, Andrea and Singer, Yoram and Malik, Jitendra},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Image Retrieval and Classification Using Local Distance Functions},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/9f8684e630c4c30cad7b1f0935cd62ab-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/9f8684e630c4c30cad7b1f0935cd62ab-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/9f8684e630c4c30cad7b1f0935cd62ab-Metadata.json", "review": "", "metareview": "", "pdf_size": 187366, "gs_citation": 304, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15421426746263923162&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "Department of Computer Science, UC Berkeley; Google, Inc.; Department of Computer Science, UC Berkeley", "aff_domain": "gmail.com;google.com;cs.berkeley.edu", "email": "gmail.com;google.com;cs.berkeley.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of California, Berkeley;Google", "aff_unique_dep": "Department of Computer Science;Google", "aff_unique_url": "https://www.berkeley.edu;https://www.google.com", "aff_unique_abbr": "UC Berkeley;Google", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Berkeley;Mountain View", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "1b1f60a58d", "title": "Implicit Surfaces with Globally Regularised and Compactly Supported Basis Functions", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/7b66e8931c93da8c88a0a8b6dec62f9e-Abstract.html", "author": "Christian Walder; Olivier Chapelle; Bernhard Sch\u00f6lkopf", "abstract": "We consider the problem of constructing a function whose zero set is to represent a surface, given sample points with surface normal vectors. The contributions include a novel means of regularising multi-scale compactly supported basis functions that leads to the desirable properties previously only associated with fully supported bases, and show equivalence to a Gaussian process with modified covariance function. We also provide a regularisation framework for simpler and more direct treatment of surface normals, along with a corresponding generalisation of the representer theorem. We demonstrate the techniques on 3D problems of up to 14 million data points, as well as 4D time series data.", "bibtex": "@inproceedings{NIPS2006_7b66e893,\n author = {Walder, Christian and Chapelle, Olivier and Sch\\\"{o}lkopf, Bernhard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Implicit Surfaces with Globally Regularised and Compactly Supported Basis Functions},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/7b66e8931c93da8c88a0a8b6dec62f9e-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/7b66e8931c93da8c88a0a8b6dec62f9e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/7b66e8931c93da8c88a0a8b6dec62f9e-Metadata.json", "review": "", "metareview": "", "pdf_size": 359717, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12619557468150617876&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics + The University of Queensland", "aff_domain": "tuebingen.mpg.de; ; ", "email": "tuebingen.mpg.de; ; ", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0+1", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics;University of Queensland", "aff_unique_dep": "Biological Cybernetics;", "aff_unique_url": "https://www.biocybernetics.mpg.de;https://www.uq.edu.au", "aff_unique_abbr": "MPIBC;UQ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0+1", "aff_country_unique": "Germany;Australia" }, { "id": "f371a1b04e", "title": "In-Network PCA and Anomaly Detection", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/2227d753dc18505031869d44673728e2-Abstract.html", "author": "Ling Huang; Xuanlong Nguyen; Minos Garofalakis; Michael I. Jordan; Anthony Joseph; Nina Taft", "abstract": "We consider the problem of network anomaly detection in large distributed systems. In this setting, Principal Component Analysis (PCA) has been proposed as a method for discover- ing anomalies by continuously tracking the projection of the data onto a residual subspace. This method was shown to work well empirically in highly aggregated networks, that is, those with a limited number of large nodes and at coarse time scales. This approach, how- ever, has scalability limitations. To overcome these limitations, we develop a PCA-based anomaly detector in which adaptive local data (cid:2)lters send to a coordinator just enough data to enable accurate global detection. Our method is based on a stochastic matrix perturba- tion analysis that characterizes the tradeoff between the accuracy of anomaly detection and the amount of data communicated over the network.", "bibtex": "@inproceedings{NIPS2006_2227d753,\n author = {Huang, Ling and Nguyen, XuanLong and Garofalakis, Minos and Jordan, Michael and Joseph, Anthony and Taft, Nina},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {In-Network PCA and Anomaly Detection},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/2227d753dc18505031869d44673728e2-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/2227d753dc18505031869d44673728e2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/2227d753dc18505031869d44673728e2-Metadata.json", "review": "", "metareview": "", "pdf_size": 192427, "gs_citation": 311, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1304158205669163026&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 28, "aff": "University of California, Berkeley, CA 94720; University of California, Berkeley, CA 94720; Intel Research, Berkeley, CA 94704; University of California, Berkeley, CA 94720; University of California, Berkeley, CA 94720; Intel Research, Berkeley, CA 94704", "aff_domain": "cs.berkeley.edu;cs.berkeley.edu;intel.com;cs.berkeley.edu;cs.berkeley.edu;intel.com", "email": "cs.berkeley.edu;cs.berkeley.edu;intel.com;cs.berkeley.edu;cs.berkeley.edu;intel.com", "github": "", "project": "", "author_num": 6, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;0;0;1", "aff_unique_norm": "University of California, Berkeley;Intel", "aff_unique_dep": ";Intel Research", "aff_unique_url": "https://www.berkeley.edu;https://www.intel.com", "aff_unique_abbr": "UC Berkeley;Intel", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "id": "c86bf94c93", "title": "Inducing Metric Violations in Human Similarity Judgements", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/748d6b6ed8e13f857ceaa6cfbdca14b8-Abstract.html", "author": "Julian Laub; Klaus-Robert M\u00fcller; Felix A. Wichmann; Jakob H. Macke", "abstract": "Attempting to model human categorization and similarity judgements is both a very interesting but also an exceedingly difficult challenge. Some of the difficulty arises because of conflicting evidence whether human categorization and similarity judgements should or should not be modelled as to operate on a mental representation that is essentially metric. Intuitively, this has a strong appeal as it would allow (dis)similarity to be represented geometrically as distance in some internal space. Here we show how a single stimulus, carefully constructed in a psychophysical experiment, introduces l2 violations in what used to be an internal similarity space that could be adequately modelled as Euclidean. We term this one influential data point a conflictual judgement. We present an algorithm of how to analyse such data and how to identify the crucial point. Thus there may not be a strict dichotomy between either a metric or a non-metric internal space but rather degrees to which potentially large subsets of stimuli are represented metrically with a small subset causing a global violation of metricity.", "bibtex": "@inproceedings{NIPS2006_748d6b6e,\n author = {Laub, Julian and M\\\"{u}ller, Klaus-Robert and Wichmann, Felix A. and Macke, Jakob H},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Inducing Metric Violations in Human Similarity Judgements},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/748d6b6ed8e13f857ceaa6cfbdca14b8-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/748d6b6ed8e13f857ceaa6cfbdca14b8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/748d6b6ed8e13f857ceaa6cfbdca14b8-Metadata.json", "review": "", "metareview": "", "pdf_size": 219750, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2424297830054718796&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Fraunhofer FIRST.IDA, Kekulestr. 7, 12489 Berlin, Germany; Max Planck Institut for Biological Cybernetics, Spemannstr. 38, 72076 T\u00fcbingen, Germany; Fraunhofer FIRST.IDA, Kekulestr. 7, 12489 Berlin, Germany + University of Potsdam, Department of Computer Science, August-Bebel-Strasse 89, 14482 Potsdam, Germany; Max Planck Institut for Biological Cybernetics, Spemannstr. 38, 72076 T\u00fcbingen, Germany", "aff_domain": "first.fhg.de;tuebingen.mpg.de;first.fhg.de;tuebingen.mpg.de", "email": "first.fhg.de;tuebingen.mpg.de;first.fhg.de;tuebingen.mpg.de", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0+2;1", "aff_unique_norm": "Fraunhofer Institute for Software and Systems Engineering;Max Planck Institute for Biological Cybernetics;University of Potsdam", "aff_unique_dep": "FIRST.IDA;Biological Cybernetics;Department of Computer Science", "aff_unique_url": "https://www.first.ida.fraunhofer.de/;https://www.biological-cybernetics.de;https://www.uni-potsdam.de", "aff_unique_abbr": "Fraunhofer FIRST.IDA;MPIBC;", "aff_campus_unique_index": "1;2;1", "aff_campus_unique": ";T\u00fcbingen;Potsdam", "aff_country_unique_index": "0;0;0+0;0", "aff_country_unique": "Germany" }, { "id": "0e208ab63e", "title": "Inferring Network Structure from Co-Occurrences", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/c1502ae5a4d514baec129f72948c266e-Abstract.html", "author": "Michael G. Rabbat; M\u00e1rio Figueiredo; Robert Nowak", "abstract": "We consider the problem of inferring the structure of a network from cooccurrence data: observations that indicate which nodes occur in a signaling pathway but do not directly reveal node order within the pathway. This problem is motivated by network inference problems arising in computational biology and communication systems, in which it is difficult or impossible to obtain precise time ordering information. Without order information, every permutation of the activated nodes leads to a different feasible solution, resulting in combinatorial explosion of the feasible set. However, physical principles underlying most networked systems suggest that not all feasible solutions are equally likely. Intuitively, nodes that co-occur more frequently are probably more closely connected. Building on this intuition, we model path co-occurrences as randomly shuffled samples of a random walk on the network. We derive a computationally efficient network inference algorithm and, via novel concentration inequalities for importance sampling estimators, prove that a polynomial complexity Monte Carlo version of the algorithm converges with high probability.", "bibtex": "@inproceedings{NIPS2006_c1502ae5,\n author = {Rabbat, Michael and Figueiredo, M\\'{a}rio and Nowak, Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Inferring Network Structure from Co-Occurrences},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/c1502ae5a4d514baec129f72948c266e-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/c1502ae5a4d514baec129f72948c266e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/c1502ae5a4d514baec129f72948c266e-Metadata.json", "review": "", "metareview": "", "pdf_size": 167146, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17551450493244621818&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Electrical and Computer Eng., University of Wisconsin, Madison, WI 53706; Instituto de Telecomunica\u00e7\u00f5es, Instituto Superior T\u00e9cnico, Lisboa, Portugal; Electrical and Computer Eng., University of Wisconsin, Madison, WI 53706", "aff_domain": "cae.wisc.edu;lx.it.pt;ece.wisc.edu", "email": "cae.wisc.edu;lx.it.pt;ece.wisc.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Wisconsin-Madison;Instituto Superior T\u00e9cnico", "aff_unique_dep": "Electrical and Computer Engineering;Instituto de Telecomunica\u00e7\u00f5es", "aff_unique_url": "https://www.wisc.edu;https://www.ist.utl.pt", "aff_unique_abbr": "UW-Madison;IST", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Madison;Lisboa", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;Portugal" }, { "id": "e77c3e4422", "title": "Information Bottleneck Optimization and Independent Component Extraction with Spiking Neurons", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/bb073f2855d769be5bf191f6378f7150-Abstract.html", "author": "Stefan Klampfl; Wolfgang Maass; Robert A. Legenstein", "abstract": "The extraction of statistically independent components from high-dimensional multi-sensory input streams is assumed to be an essential component of sensory processing in the brain. Such independent component analysis (or blind source separation) could provide a less redundant representation of information about the external world. Another powerful processing strategy is to extract preferentially those components from high-dimensional input streams that are related to other information sources, such as internal predictions or proprioceptive feedback. This strategy allows the optimization of internal representation according to the infor- mation bottleneck method. However, concrete learning rules that implement these general unsupervised learning principles for spiking neurons are still missing. We show how both information bottleneck optimization and the extraction of inde- pendent components can in principle be implemented with stochastically spiking neurons with refractoriness. The new learning rule that achieves this is derived from abstract information optimization principles.", "bibtex": "@inproceedings{NIPS2006_bb073f28,\n author = {Klampfl, Stefan and Maass, Wolfgang and Legenstein, Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Information Bottleneck Optimization and Independent Component Extraction with Spiking Neurons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/bb073f2855d769be5bf191f6378f7150-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/bb073f2855d769be5bf191f6378f7150-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/bb073f2855d769be5bf191f6378f7150-Metadata.json", "review": "", "metareview": "", "pdf_size": 577626, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13266523106206886711&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "979fad7d5d", "title": "Information Bottleneck for Non Co-Occurrence Data", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/494ba9ff03bdad881378a6fd4092a6c7-Abstract.html", "author": "Yevgeny Seldin; Noam Slonim; Naftali Tishby", "abstract": "We present a general model-independent approach to the analysis of data in cases when these data do not appear in the form of co-occurrence of two variables X, Y , but rather as a sample of values of an unknown (stochastic) function Z (X, Y ). For example, in gene expression data, the expression level Z is a function of gene X and condition Y ; or in movie ratings data the rating Z is a function of viewer X and movie Y . The approach represents a consistent extension of the Information Bottleneck method that has previously relied on the availability of co-occurrence statistics. By altering the relevance variable we eliminate the need in the sample of joint distribution of all input variables. This new formulation also enables simple MDL-like model complexity control and prediction of missing values of Z . The approach is analyzed and shown to be on a par with the best known clustering algorithms for a wide range of domains. For the prediction of missing values (collaborative filtering) it improves the currently best known results.", "bibtex": "@inproceedings{NIPS2006_494ba9ff,\n author = {Seldin, Yevgeny and Slonim, Noam and Tishby, Naftali},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Information Bottleneck for Non Co-Occurrence Data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/494ba9ff03bdad881378a6fd4092a6c7-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/494ba9ff03bdad881378a6fd4092a6c7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/494ba9ff03bdad881378a6fd4092a6c7-Metadata.json", "review": "", "metareview": "", "pdf_size": 73742, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13895021985097849136&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "School of Computer Science and Engineering + Interdisciplinary Center for Neural Computation; The Lewis-Sigler Institute for Integrative Genomics; School of Computer Science and Engineering + Interdisciplinary Center for Neural Computation", "aff_domain": "cs.huji.ac.il;princeton.edu;cs.huji.ac.il", "email": "cs.huji.ac.il;princeton.edu;cs.huji.ac.il", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1;2;0+1", "aff_unique_norm": "University Affiliation Not Specified;Interdisciplinary Center for Neural Computation;Lewis-Sigler Institute for Integrative Genomics", "aff_unique_dep": "School of Computer Science and Engineering;Neural Computation;Institute for Integrative Genomics", "aff_unique_url": ";;http://lsi.princeton.edu", "aff_unique_abbr": ";;LSI", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": ";1;", "aff_country_unique": ";United States" }, { "id": "c2a657cb14", "title": "Isotonic Conditional Random Fields and Local Sentiment Flow", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/32cbf687880eb1674a07bf717761dd3a-Abstract.html", "author": "Yi Mao; Guy Lebanon", "abstract": "We examine the problem of predicting local sentiment flow in documents, and its application to several areas of text analysis. Formally, the problem is stated as predicting an ordinal sequence based on a sequence of word sets. In the spirit of isotonic regression, we develop a variant of conditional random fields that is well suited to handle this problem. Using the Mobius transform, we express the model as a simple convex optimization problem. Experiments demonstrate the model and its applications to sentiment prediction, style analysis, and text summarization.", "bibtex": "@inproceedings{NIPS2006_32cbf687,\n author = {Mao, Yi and Lebanon, Guy},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Isotonic Conditional Random Fields and Local Sentiment Flow},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/32cbf687880eb1674a07bf717761dd3a-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/32cbf687880eb1674a07bf717761dd3a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/32cbf687880eb1674a07bf717761dd3a-Metadata.json", "review": "", "metareview": "", "pdf_size": 205831, "gs_citation": 171, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16266370778784302056&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "School of Elec. and Computer Engineering, Purdue University - West Lafayette, IN; Department of Statistics, Purdue University - West Lafayette, IN + School of Elec. and Computer Engineering, Purdue University - West Lafayette, IN", "aff_domain": "ecn.purdue.edu;stat.purdue.edu", "email": "ecn.purdue.edu;stat.purdue.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0+0", "aff_unique_norm": "Purdue University", "aff_unique_dep": "School of Electrical and Computer Engineering", "aff_unique_url": "https://www.purdue.edu", "aff_unique_abbr": "Purdue", "aff_campus_unique_index": "0;0+0", "aff_campus_unique": "West Lafayette", "aff_country_unique_index": "0;0+0", "aff_country_unique": "United States" }, { "id": "6d78917552", "title": "Kernel Maximum Entropy Data Transformation and an Enhanced Spectral Clustering Algorithm", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/097e26b2ffb0339458b55da17425a71f-Abstract.html", "author": "Robert Jenssen; Torbj\u00f8rn Eltoft; Mark Girolami; Deniz Erdogmus", "abstract": "We propose a new kernel-based data transformation technique. It is founded on the principle of maximum entropy (MaxEnt) preservation, hence named kernel MaxEnt. The key measure is Renyi's entropy estimated via Parzen windowing. We show that kernel MaxEnt is based on eigenvectors, and is in that sense similar to kernel PCA, but may produce strikingly different transformed data sets. An enhanced spectral clustering algorithm is proposed, by replacing kernel PCA by kernel MaxEnt as an intermediate step. This has a major impact on performance.", "bibtex": "@inproceedings{NIPS2006_097e26b2,\n author = {Jenssen, Robert and Eltoft, Torbj\\o rn and Girolami, Mark and Erdogmus, Deniz},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Kernel Maximum Entropy Data Transformation and an Enhanced Spectral Clustering Algorithm},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/097e26b2ffb0339458b55da17425a71f-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/097e26b2ffb0339458b55da17425a71f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/097e26b2ffb0339458b55da17425a71f-Metadata.json", "review": "", "metareview": "", "pdf_size": 234739, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3996244016378564514&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Physics and Technology, University of Troms\u00f8, Norway; Department of Physics and Technology, University of Troms\u00f8, Norway; Department of Computing Science, University of Glasgow, Scotland; Department of Computer Science and Engineering, Oregon Health and Science University, USA", "aff_domain": "phys.uit.no; ; ; ", "email": "phys.uit.no; ; ; ", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;2", "aff_unique_norm": "University of Troms\u00f8;University of Glasgow;Oregon Health and Science University", "aff_unique_dep": "Department of Physics and Technology;Department of Computing Science;Department of Computer Science and Engineering", "aff_unique_url": "https://uit.no;https://www.gla.ac.uk;https://www.ohsu.edu", "aff_unique_abbr": ";UofG;OHSU", "aff_campus_unique_index": "1", "aff_campus_unique": ";Glasgow", "aff_country_unique_index": "0;0;1;2", "aff_country_unique": "Norway;United Kingdom;United States" }, { "id": "50e278018d", "title": "Kernels on Structured Objects Through Nested Histograms", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/dc6e224a8d74ce03bf301152d6e33e97-Abstract.html", "author": "Marco Cuturi; Kenji Fukumizu", "abstract": "We propose a family of kernels for structured objects which is based on the bag-ofcomponents paradigm. However, rather than decomposing each complex object into the single histogram of its components, we use for each object a family of nested histograms, where each histogram in this hierarchy describes the object seen from an increasingly granular perspective. We use this hierarchy of histograms to define elementary kernels which can detect coarse and fine similarities between the objects. We compute through an efficient averaging trick a mixture of such specific kernels, to propose a final kernel value which weights efficiently local and global matches. We propose experimental results on an image retrieval experiment which show that this mixture is an effective template procedure to be used with kernels on histograms.", "bibtex": "@inproceedings{NIPS2006_dc6e224a,\n author = {Cuturi, Marco and Fukumizu, Kenji},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Kernels on Structured Objects Through Nested Histograms},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/dc6e224a8d74ce03bf301152d6e33e97-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/dc6e224a8d74ce03bf301152d6e33e97-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/dc6e224a8d74ce03bf301152d6e33e97-Metadata.json", "review": "", "metareview": "", "pdf_size": 105663, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3960624305878358274&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Institute of Statistical Mathematics; Institute of Statistical Mathematics", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Institute of Statistical Mathematics", "aff_unique_dep": "", "aff_unique_url": "https://www.ism.ac.jp", "aff_unique_abbr": "ISM", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Japan" }, { "id": "b95a6cdc6a", "title": "Large Margin Component Analysis", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/dc6a7e655d7e5840e66733e9ee67cc69-Abstract.html", "author": "Lorenzo Torresani; Kuang-chih Lee", "abstract": "Metric learning has been shown to signi\ufb01cantly improve the accuracy of k-nearest neighbor (kNN) classi\ufb01cation. In problems involving thousands of features, dis- tance learning algorithms cannot be used due to over\ufb01tting and high computa- tional complexity. In such cases, previous work has relied on a two-step solution: \ufb01rst apply dimensionality reduction methods to the data, and then learn a met- ric in the resulting low-dimensional subspace. In this paper we show that better classi\ufb01cation performance can be achieved by unifying the objectives of dimen- sionality reduction and metric learning. We propose a method that solves for the low-dimensional projection of the inputs, which minimizes a metric objective aimed at separating points in different classes by a large margin. This projection is de\ufb01ned by a signi\ufb01cantly smaller number of parameters than metrics learned in input space, and thus our optimization reduces the risks of over\ufb01tting. Theory and results are presented for both a linear as well as a kernelized version of the algorithm. Overall, we achieve classi\ufb01cation rates similar, and in several cases superior, to those of support vector machines.", "bibtex": "@inproceedings{NIPS2006_dc6a7e65,\n author = {Torresani, Lorenzo and Lee, Kuang-chih},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Large Margin Component Analysis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/dc6a7e655d7e5840e66733e9ee67cc69-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/dc6a7e655d7e5840e66733e9ee67cc69-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/dc6a7e655d7e5840e66733e9ee67cc69-Metadata.json", "review": "", "metareview": "", "pdf_size": 89136, "gs_citation": 296, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18118190686199561159&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "Riya, Inc.; Riya, Inc.", "aff_domain": "riya.com;riya.com", "email": "riya.com;riya.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Riya, Inc.", "aff_unique_dep": "", "aff_unique_url": "", "aff_unique_abbr": "", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "f3e49ea7d4", "title": "Large Margin Hidden Markov Models for Automatic Speech Recognition", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/f0b1d5879866f2c2eba77f39993d1184-Abstract.html", "author": "Fei Sha; Lawrence K. Saul", "abstract": "We study the problem of parameter estimation in continuous density hidden Markov models (CD-HMMs) for automatic speech recognition (ASR). As in support vector machines, we propose a learning algorithm based on the goal of margin maximization. Unlike earlier work on max-margin Markov networks, our approach is specifically geared to the modeling of real-valued observations (such as acoustic feature vectors) using Gaussian mixture models. Unlike previous discriminative frameworks for ASR, such as maximum mutual information and minimum classification error, our framework leads to a convex optimization, without any spurious local minima. The objective function for large margin training of CD-HMMs is defined over a parameter space of positive semidefinite matrices. Its optimization can be performed efficiently with simple gradient-based methods that scale well to large problems. We obtain competitive results for phonetic recognition on the TIMIT speech corpus.", "bibtex": "@inproceedings{NIPS2006_f0b1d587,\n author = {Sha, Fei and Saul, Lawrence},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Large Margin Hidden Markov Models for Automatic Speech Recognition},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/f0b1d5879866f2c2eba77f39993d1184-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/f0b1d5879866f2c2eba77f39993d1184-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/f0b1d5879866f2c2eba77f39993d1184-Metadata.json", "review": "", "metareview": "", "pdf_size": 113747, "gs_citation": 247, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4053223596570337209&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Computer Science Division, University of California, Berkeley, CA 94720-1776; Department of Computer Science and Engineering, University of California (San Diego), La Jolla, CA 92093-0404", "aff_domain": "cs.berkeley.edu;cs.ucsd.edu", "email": "cs.berkeley.edu;cs.ucsd.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of California, Berkeley;University of California, San Diego", "aff_unique_dep": "Computer Science Division;Department of Computer Science and Engineering", "aff_unique_url": "https://www.berkeley.edu;https://www.ucsd.edu", "aff_unique_abbr": "UC Berkeley;UCSD", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Berkeley;La Jolla", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "e686a044ac", "title": "Large Margin Multi-channel Analog-to-Digital Conversion with Applications to Neural Prosthesis", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/63f44623dd8686aba388944c8810087f-Abstract.html", "author": "Amit Gore; Shantanu Chakrabartty", "abstract": "A key challenge in designing analog-to-digital converters for cortically implanted prosthesis is to sense and process high-dimensional neural signals recorded by the micro-electrode arrays. In this paper, we describe a novel architecture for analog-to-digital (A/D) conversion that combines conversion with spatial de-correlation within a single module. The architecture called multiple-input multiple-output (MIMO) is based on a min-max gradient descent optimization of a regularized linear cost function that naturally lends to an A/D formulation. Using an online formulation, the architecture can adapt to slow variations in cross-channel correlations, observed due to relative motion of the microelectrodes with respect to the signal sources. Experimental results with real recorded multi-channel neural data demonstrate the effectiveness of the proposed algorithm in alleviating cross-channel redundancy across electrodes and performing data-compression directly at the A/D converter.", "bibtex": "@inproceedings{NIPS2006_63f44623,\n author = {Gore, Amit and Chakrabartty, Shantanu},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Large Margin Multi-channel Analog-to-Digital Conversion with Applications to Neural Prosthesis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/63f44623dd8686aba388944c8810087f-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/63f44623dd8686aba388944c8810087f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/63f44623dd8686aba388944c8810087f-Metadata.json", "review": "", "metareview": "", "pdf_size": 351152, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11133532349669833699&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "49436c34d1", "title": "Large Scale Hidden Semi-Markov SVMs", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/faa453efde4ac6a36849ba381feb9e87-Abstract.html", "author": "Gunnar R\u00e4tsch; S\u00f6ren Sonnenburg", "abstract": "We describe Hidden Semi-Markov Support Vector Machines (SHM SVMs), an extension of HM SVMs to semi-Markov chains. This allows us to predict segmentations of sequences based on segment-based features measuring properties such as the length of the segment. We propose a novel technique to partition the problem into sub-problems. The independently obtained partial solutions can then be recombined in an efficient way, which allows us to solve label sequence learning problems with several thousands of labeled sequences. We have tested our algorithm for predicting gene structures, an important problem in computational biology. Results on a well-known model organism illustrate the great potential of SHM SVMs in computational biology.", "bibtex": "@inproceedings{NIPS2006_faa453ef,\n author = {R\\\"{a}tsch, Gunnar and Sonnenburg, S\\\"{o}ren},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Large Scale Hidden Semi-Markov SVMs},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/faa453efde4ac6a36849ba381feb9e87-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/faa453efde4ac6a36849ba381feb9e87-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/faa453efde4ac6a36849ba381feb9e87-Metadata.json", "review": "", "metareview": "", "pdf_size": 251579, "gs_citation": 42, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2141711187828493504&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Friedrich Miescher Laboratoy, Max Planck Society; Fraunhofer FIRST.IDA", "aff_domain": "tuebingen.mpg.de;first.fhg.de", "email": "tuebingen.mpg.de;first.fhg.de", "github": "", "project": "http://www.fml.mpg.de/raetsch", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Max Planck Society;Fraunhofer Institute for Software and Systems Engineering", "aff_unique_dep": "Friedrich Miescher Laboratory;FIRST.IDA", "aff_unique_url": "https://www.mpg.de;https://www.first.ida.fraunhofer.de/", "aff_unique_abbr": "MPG;Fraunhofer FIRST.IDA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "01f861afc9", "title": "Large-Scale Sparsified Manifold Regularization", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/3a61ed715ee66c48bacf237fa7bb5289-Abstract.html", "author": "Ivor W. Tsang; James T. Kwok", "abstract": "Semi-supervised learning is more powerful than supervised learning by using both labeled and unlabeled data. In particular, the manifold regularization framework, together with kernel methods, leads to the Laplacian SVM (LapSVM) that has demonstrated state-of-the-art performance. However, the LapSVM solution typically involves kernel expansions of all the labeled and unlabeled examples, and is slow on testing. Moreover, existing semi-supervised learning methods, including the LapSVM, can only handle a small number of unlabeled examples. In this paper, we integrate manifold regularization with the core vector machine, which has been used for large-scale supervised and unsupervised learning. By using a sparsified manifold regularizer and formulating as a center-constrained minimum enclosing ball problem, the proposed method produces sparse solutions with low time and space complexities. Experimental results show that it is much faster than the LapSVM, and can handle a million unlabeled examples on a standard PC; while the LapSVM can only handle several thousand patterns.", "bibtex": "@inproceedings{NIPS2006_3a61ed71,\n author = {Tsang, Ivor and Kwok, James},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Large-Scale Sparsified Manifold Regularization},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/3a61ed715ee66c48bacf237fa7bb5289-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/3a61ed715ee66c48bacf237fa7bb5289-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/3a61ed715ee66c48bacf237fa7bb5289-Metadata.json", "review": "", "metareview": "", "pdf_size": 327716, "gs_citation": 104, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18022328862609097627&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "Department of Computer Science and Engineering, The Hong Kong University of Science and Technology; Department of Computer Science and Engineering, The Hong Kong University of Science and Technology", "aff_domain": "cse.ust.hk;cse.ust.hk", "email": "cse.ust.hk;cse.ust.hk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Hong Kong University of Science and Technology", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.ust.hk", "aff_unique_abbr": "HKUST", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Hong Kong SAR", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "id": "fb0cf89a3d", "title": "Learnability and the doubling dimension", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/f5496252609c43eb8a3d147ab9b9c006-Abstract.html", "author": "Yi Li; Philip M. Long", "abstract": "metric dimension).", "bibtex": "@inproceedings{NIPS2006_f5496252,\n author = {Li, Yi and Long, Philip},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Learnability and the doubling dimension},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/f5496252609c43eb8a3d147ab9b9c006-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/f5496252609c43eb8a3d147ab9b9c006-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/f5496252609c43eb8a3d147ab9b9c006-Metadata.json", "review": "", "metareview": "", "pdf_size": 129751, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14500143854003004775&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Genome Institute of Singapore; Google", "aff_domain": "gis.a-star.edu.sg;google.com", "email": "gis.a-star.edu.sg;google.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Genome Institute of Singapore;Google", "aff_unique_dep": ";Google", "aff_unique_url": "https://www.genome-institute-of-singapore.org;https://www.google.com", "aff_unique_abbr": "GIS;Google", "aff_campus_unique_index": "1", "aff_campus_unique": ";Mountain View", "aff_country_unique_index": "0;1", "aff_country_unique": "Singapore;United States" }, { "id": "dda22c1971", "title": "Learning Dense 3D Correspondence", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/e22cb9d6bbb4c290a94e4fff4d68a831-Abstract.html", "author": "Florian Steinke; Volker Blanz; Bernhard Sch\u00f6lkopf", "abstract": "Establishing correspondence between distinct objects is an important and nontrivial task: correctness of the correspondence hinges on properties which are difficult to capture in an a priori criterion. While previous work has used a priori criteria which in some cases led to very good results, the present paper explores whether it is possible to learn a combination of features that, for a given training set of aligned human heads, characterizes the notion of correct correspondence. By optimizing this criterion, we are then able to compute correspondence and morphs for novel heads.", "bibtex": "@inproceedings{NIPS2006_e22cb9d6,\n author = {Steinke, Florian and Blanz, Volker and Sch\\\"{o}lkopf, Bernhard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Dense 3D Correspondence},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/e22cb9d6bbb4c290a94e4fff4d68a831-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/e22cb9d6bbb4c290a94e4fff4d68a831-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/e22cb9d6bbb4c290a94e4fff4d68a831-Metadata.json", "review": "", "metareview": "", "pdf_size": 189208, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9019059787790694271&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics; Universit\u00e4t Siegen", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de;mpi-sb.mpg.de", "email": "tuebingen.mpg.de;tuebingen.mpg.de;mpi-sb.mpg.de", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics;University of Siegen", "aff_unique_dep": "Biological Cybernetics;", "aff_unique_url": "https://www.biocybernetics.mpg.de;https://www.uni-siegen.de", "aff_unique_abbr": "MPIBC;Uni Siegen", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany" }, { "id": "835e0b16cb", "title": "Learning Motion Style Synthesis from Perceptual Observations", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/d4a897919a124958e699170b2b1dc8f2-Abstract.html", "author": "Lorenzo Torresani; Peggy Hackney; Christoph Bregler", "abstract": "This paper presents an algorithm for synthesis of human motion in specified styles. We use a theory of movement observation (Laban Movement Analysis) to describe movement styles as points in a multi-dimensional perceptual space. We cast the task of learning to synthesize desired movement styles as a regression problem: sequences generated via space-time interpolation of motion capture data are used to learn a nonlinear mapping between animation parameters and movement styles in perceptual space. We demonstrate that the learned model can apply a variety of motion styles to pre-recorded motion sequences and it can extrapolate styles not originally included in the training data.", "bibtex": "@inproceedings{NIPS2006_d4a89791,\n author = {Torresani, Lorenzo and Hackney, Peggy and Bregler, Christoph},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Motion Style Synthesis from Perceptual Observations},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/d4a897919a124958e699170b2b1dc8f2-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/d4a897919a124958e699170b2b1dc8f2-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/d4a897919a124958e699170b2b1dc8f2-Metadata.json", "review": "", "metareview": "", "pdf_size": 115567, "gs_citation": 104, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4664866173043411621&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "Riya, Inc.; Integrated Movement Studies; New York University", "aff_domain": "riya.com;aol.com;nyu.edu", "email": "riya.com;aol.com;nyu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "Riya, Inc.;Integrated Movement Studies;New York University", "aff_unique_dep": ";;", "aff_unique_url": ";;https://www.nyu.edu", "aff_unique_abbr": ";;NYU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States;" }, { "id": "3b97d7fa7d", "title": "Learning Nonparametric Models for Probabilistic Imitation", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/3b5020bb891119b9f5130f1fea9bd773-Abstract.html", "author": "David B. Grimes; Daniel R. Rashid; Rajesh P. Rao", "abstract": "Learning by imitation represents an important mechanism for rapid acquisition of new behaviors in humans and robots. A critical requirement for learning by imitation is the ability to handle uncertainty arising from the observation process as well as the imitator's own dynamics and interactions with the environment. In this paper, we present a new probabilistic method for inferring imitative actions that takes into account both the observations of the teacher as well as the imitator's dynamics. Our key contribution is a nonparametric learning method which generalizes to systems with very different dynamics. Rather than relying on a known forward model of the dynamics, our approach learns a nonparametric forward model via exploration. Leveraging advances in approximate inference in graphical models, we show how the learned forward model can be directly used to plan an imitating sequence. We provide experimental results for two systems: a biomechanical model of the human arm and a 25-degrees-of-freedom humanoid robot. We demonstrate that the proposed method can be used to learn appropriate motor inputs to the model arm which imitates the desired movements. A second set of results demonstrates dynamically stable full-body imitation of a human teacher by the humanoid robot.", "bibtex": "@inproceedings{NIPS2006_3b5020bb,\n author = {Grimes, David and Rashid, Daniel and Rao, Rajesh PN},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Nonparametric Models for Probabilistic Imitation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/3b5020bb891119b9f5130f1fea9bd773-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/3b5020bb891119b9f5130f1fea9bd773-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/3b5020bb891119b9f5130f1fea9bd773-Metadata.json", "review": "", "metareview": "", "pdf_size": 595492, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14936782973316150139&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "d1bb346353", "title": "Learning Structural Equation Models for fMRI", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/c8862fc1a32725712838863fb1a260b9-Abstract.html", "author": "Enrico Simonotto; Heather Whalley; Stephen Lawrie; Lawrence Murray; David Mcgonigle; Amos J. Storkey", "abstract": "David McGonigle", "bibtex": "@inproceedings{NIPS2006_c8862fc1,\n author = {Simonotto, Enrico and Whalley, Heather and Lawrie, Stephen and Murray, Lawrence and Mcgonigle, David and Storkey, Amos J},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Structural Equation Models for fMRI},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/c8862fc1a32725712838863fb1a260b9-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/c8862fc1a32725712838863fb1a260b9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/c8862fc1a32725712838863fb1a260b9-Metadata.json", "review": "", "metareview": "", "pdf_size": 277626, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4660537819739573098&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "track": "main", "status": "Poster" }, { "id": "f5f5390141", "title": "Learning Time-Intensity Profiles of Human Activity using Non-Parametric Bayesian Models", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/8a56257ea05c74018291954fc56fc448-Abstract.html", "author": "Alexander T. Ihler; Padhraic Smyth", "abstract": "Data sets that characterize human activity over time through collections of timestamped events or counts are of increasing interest in application areas as humancomputer interaction, video surveillance, and Web data analysis. We propose a non-parametric Bayesian framework for modeling collections of such data. In particular, we use a Dirichlet process framework for learning a set of intensity functions corresponding to different categories, which form a basis set for representing individual time-periods (e.g., several days) depending on which categories the time-periods are assigned to. This allows the model to learn in a data-driven fashion what \"factors\" are generating the observations on a particular day, including (for example) weekday versus weekend effects or day-specific effects corresponding to unique (single-day) occurrences of unusual behavior, sharing information where appropriate to obtain improved estimates of the behavior associated with each category. Applications to realworld data sets of count data involving both vehicles and people are used to illustrate the technique.", "bibtex": "@inproceedings{NIPS2006_8a56257e,\n author = {Ihler, Alexander and Smyth, Padhraic},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Learning Time-Intensity Profiles of Human Activity using Non-Parametric Bayesian Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/8a56257ea05c74018291954fc56fc448-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/8a56257ea05c74018291954fc56fc448-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/8a56257ea05c74018291954fc56fc448-Metadata.json", "review": "", "metareview": "", "pdf_size": 213542, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13876774720528237006&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "Donald Bren School of Information and Computer Science, U.C. Irvine; Donald Bren School of Information and Computer Science, U.C. Irvine", "aff_domain": "ics.uci.edu;ics.uci.edu", "email": "ics.uci.edu;ics.uci.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Irvine", "aff_unique_dep": "Donald Bren School of Information and Computer Science", "aff_unique_url": "https://www.uci.edu", "aff_unique_abbr": "UCI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Irvine", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "b407321e18", "title": "Learning annotated hierarchies from relational data", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/663fd3c5144fd10bd5ca6611a9a5b92d-Abstract.html", "author": "Daniel M. Roy; Charles Kemp; Vikash K. Mansinghka; Joshua B. Tenenbaum", "abstract": "The objects in many real-world domains can be organized into hierarchies, where each internal node picks out a category of objects. Given a collection of fea- tures and relations de\ufb01ned over a set of objects, an annotated hierarchy includes a speci\ufb01cation of the categories that are most useful for describing each individual feature and relation. We de\ufb01ne a generative model for annotated hierarchies and the features and relations that they describe, and develop a Markov chain Monte Carlo scheme for learning annotated hierarchies. We show that our model discov- ers interpretable structure in several real-world data sets.", "bibtex": "@inproceedings{NIPS2006_663fd3c5,\n author = {Roy, Daniel M and Kemp, Charles and Mansinghka, Vikash and Tenenbaum, Joshua},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Learning annotated hierarchies from relational data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/663fd3c5144fd10bd5ca6611a9a5b92d-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/663fd3c5144fd10bd5ca6611a9a5b92d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/663fd3c5144fd10bd5ca6611a9a5b92d-Metadata.json", "review": "", "metareview": "", "pdf_size": 99578, "gs_citation": 79, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7300448632414998042&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 24, "aff": "CSAIL, Dept. of Brain & Cognitive Sciences, MIT, Cambridge, MA 02139; CSAIL, Dept. of Brain & Cognitive Sciences, MIT, Cambridge, MA 02139; CSAIL, Dept. of Brain & Cognitive Sciences, MIT, Cambridge, MA 02139; CSAIL, Dept. of Brain & Cognitive Sciences, MIT, Cambridge, MA 02139", "aff_domain": "mit.edu;mit.edu;mit.edu;mit.edu", "email": "mit.edu;mit.edu;mit.edu;mit.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Department of Brain & Cognitive Sciences", "aff_unique_url": "https://www.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "69153171ea", "title": "Learning from Multiple Sources", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/0f21f0349462cacdc5796990d37760ae-Abstract.html", "author": "Koby Crammer; Michael Kearns; Jennifer Wortman", "abstract": "We consider the problem of learning accurate models from multiple sources of \"nearby\" data. Given distinct samples from multiple data sources and estimates of the dissimilarities between these sources, we provide a general theory of which samples should be used to learn models for each source. This theory is applicable in a broad decision-theoretic learning framework, and yields results for classification and regression generally, and for density estimation within the exponential family. A key component of our approach is the development of approximate triangle inequalities for expected loss, which may be of independent interest.", "bibtex": "@inproceedings{NIPS2006_0f21f034,\n author = {Crammer, Koby and Kearns, Michael and Wortman, Jennifer},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Learning from Multiple Sources},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/0f21f0349462cacdc5796990d37760ae-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/0f21f0349462cacdc5796990d37760ae-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/0f21f0349462cacdc5796990d37760ae-Metadata.json", "review": "", "metareview": "", "pdf_size": 322962, "gs_citation": 352, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4961023066923606421&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "5ee93ef07f", "title": "Learning on Graph with Laplacian Regularization", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/d87c68a56bc8eb803b44f25abb627786-Abstract.html", "author": "Rie K. Ando; Tong Zhang", "abstract": "We consider a general form of transductive learning on graphs with Laplacian regularization, and derive margin-based generalization bounds using appropriate geometric properties of the graph. We use this analysis to obtain a better understanding of the role of normalization of the graph Laplacian matrix as well as the effect of dimension reduction. The results suggest a limitation of the standard degree-based normalization. We propose a remedy from our analysis and demonstrate empirically that the remedy leads to improved classification performance.", "bibtex": "@inproceedings{NIPS2006_d87c68a5,\n author = {Ando, Rie and Zhang, Tong},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Learning on Graph with Laplacian Regularization},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/d87c68a56bc8eb803b44f25abb627786-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/d87c68a56bc8eb803b44f25abb627786-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/d87c68a56bc8eb803b44f25abb627786-Metadata.json", "review": "", "metareview": "", "pdf_size": 138505, "gs_citation": 208, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8306247845429622757&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "IBM T.J. Watson Research Center, Hawthorne, NY 10532, U.S.A.; Yahoo! Inc., New York City, NY 10011, U.S.A.", "aff_domain": "us.ibm.com;yahoo-inc.com", "email": "us.ibm.com;yahoo-inc.com", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "IBM;Yahoo! Inc.", "aff_unique_dep": "IBM T.J. Watson Research Center;", "aff_unique_url": "https://www.ibm.com/research/watson;https://www.yahoo.com", "aff_unique_abbr": "IBM Watson;Yahoo!", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Hawthorne;New York City", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "8f1eef48fe", "title": "Learning to Model Spatial Dependency: Semi-Supervised Discriminative Random Fields", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/a0ba2648acd23dc7a5829968ce531a7d-Abstract.html", "author": "Chi-hoon Lee; Shaojun Wang; Feng Jiao; Dale Schuurmans; Russell Greiner", "abstract": "We present a novel, semi-supervised approach to training discriminative random fields (DRFs) that efficiently exploits labeled and unlabeled training data to achieve improved accuracy in a variety of image processing tasks. We formulate DRF training as a form of MAP estimation that combines conditional loglikelihood on labeled data, given a data-dependent prior, with a conditional entropy regularizer defined on unlabeled data. Although the training objective is no longer concave, we develop an efficient local optimization procedure that produces classifiers that are more accurate than ones based on standard supervised DRF training. We then apply our semi-supervised approach to train DRFs to segment both synthetic and real data sets, and demonstrate significant improvements over supervised DRFs in each case.", "bibtex": "@inproceedings{NIPS2006_a0ba2648,\n author = {Lee, Chi-hoon and Wang, Shaojun and Jiao, Feng and Schuurmans, Dale and Greiner, Russell},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Learning to Model Spatial Dependency: Semi-Supervised Discriminative Random Fields},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/a0ba2648acd23dc7a5829968ce531a7d-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/a0ba2648acd23dc7a5829968ce531a7d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/a0ba2648acd23dc7a5829968ce531a7d-Metadata.json", "review": "", "metareview": "", "pdf_size": 220477, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18336856032909842531&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 13, "aff": "Department of Computing Science, University of Alberta; Department of Computer Science and Engineering, Wright State University; Department of Computing Science, University of Waterloo; Department of Computing Science, University of Alberta; Department of Computing Science, University of Alberta", "aff_domain": "cs.ualberta.ca;wright.edu;cs.uwaterloo.ca;cs.ualberta.ca;cs.ualberta.ca", "email": "cs.ualberta.ca;wright.edu;cs.uwaterloo.ca;cs.ualberta.ca;cs.ualberta.ca", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2;0;0", "aff_unique_norm": "University of Alberta;Wright State University;University of Waterloo", "aff_unique_dep": "Department of Computing Science;Department of Computer Science and Engineering;Department of Computing Science", "aff_unique_url": "https://www.ualberta.ca;https://www.wright.edu;https://uwaterloo.ca", "aff_unique_abbr": "UAlberta;WSU;UW", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0;0;0", "aff_country_unique": "Canada;United States" }, { "id": "e0138133d6", "title": "Learning to Rank with Nonsmooth Cost Functions", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/af44c4c56f385c43f2529f9b1b018f6a-Abstract.html", "author": "Christopher J. Burges; Robert Ragno; Quoc V. Le", "abstract": "The quality measures used in information retrieval are particularly dif\ufb01cult to op- timize directly, since they depend on the model scores only through the sorted order of the documents returned for a given query. Thus, the derivatives of the cost with respect to the model parameters are either zero, or are unde\ufb01ned. In this paper, we propose a class of simple, \ufb02exible algorithms, called LambdaRank, which avoids these dif\ufb01culties by working with implicit cost functions. We de- scribe LambdaRank using neural network models, although the idea applies to any differentiable function class. We give necessary and suf\ufb01cient conditions for the resulting implicit cost function to be convex, and we show that the general method has a simple mechanical interpretation. We demonstrate signi\ufb01cantly im- proved accuracy, over a state-of-the-art ranking algorithm, on several datasets. We also show that LambdaRank provides a method for signi\ufb01cantly speeding up the training phase of that ranking algorithm. Although this paper is directed towards ranking, the proposed method can be extended to any non-smooth and multivariate cost functions.", "bibtex": "@inproceedings{NIPS2006_af44c4c5,\n author = {Burges, Christopher and Ragno, Robert and Le, Quoc},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Learning to Rank with Nonsmooth Cost Functions},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/af44c4c56f385c43f2529f9b1b018f6a-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/af44c4c56f385c43f2529f9b1b018f6a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/af44c4c56f385c43f2529f9b1b018f6a-Metadata.json", "review": "", "metareview": "", "pdf_size": 145595, "gs_citation": 1147, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3135755332709536274&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 17, "aff": "Microsoft Research; Microsoft Research; Statistical Machine Learning Program, NICTA", "aff_domain": "microsoft.com;microsoft.com;anu.edu.au", "email": "microsoft.com;microsoft.com;anu.edu.au", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Microsoft;NICTA", "aff_unique_dep": "Microsoft Research;Statistical Machine Learning Program", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://nicta.com.au", "aff_unique_abbr": "MSR;NICTA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1", "aff_country_unique": "United States;Australia" }, { "id": "43352abde9", "title": "Learning to Traverse Image Manifolds", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/03bfc1d4783966c69cc6aef8247e0103-Abstract.html", "author": "Piotr Doll\u00e1r; Vincent Rabaud; Serge J. Belongie", "abstract": "We present a new algorithm, Locally Smooth Manifold Learning (LSML), that learns a warping function from a point on an manifold to its neighbors. Important characteristics of LSML include the ability to recover the structure of the manifold in sparsely populated regions and beyond the support of the provided data. Appli- cations of our proposed technique include embedding with a natural out-of-sample extension and tasks such as tangent distance estimation, frame rate up-conversion, video compression and motion transfer.", "bibtex": "@inproceedings{NIPS2006_03bfc1d4,\n author = {Doll\\'{a}r, Piotr and Rabaud, Vincent and Belongie, Serge},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Learning to Traverse Image Manifolds},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/03bfc1d4783966c69cc6aef8247e0103-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/03bfc1d4783966c69cc6aef8247e0103-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/03bfc1d4783966c69cc6aef8247e0103-Metadata.json", "review": "", "metareview": "", "pdf_size": 818333, "gs_citation": 70, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=398817736168938197&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 19, "aff": "University of California, San Diego; University of California, San Diego; University of California, San Diego", "aff_domain": "cs.ucsd.edu;cs.ucsd.edu;cs.ucsd.edu", "email": "cs.ucsd.edu;cs.ucsd.edu;cs.ucsd.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "", "aff_unique_url": "https://www.ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "San Diego", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "be1093904e", "title": "Learning to be Bayesian without Supervision", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/908c9a564a86426585b29f5335b619bc-Abstract.html", "author": "Martin Raphan; Eero P. Simoncelli", "abstract": "Abstract Unavailable", "bibtex": "@inproceedings{NIPS2006_908c9a56,\n author = {Raphan, Martin and Simoncelli, Eero},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Learning to be Bayesian without Supervision},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/908c9a564a86426585b29f5335b619bc-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/908c9a564a86426585b29f5335b619bc-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/908c9a564a86426585b29f5335b619bc-Metadata.json", "review": "", "metareview": "", "pdf_size": 166113, "gs_citation": 82, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1424779422344095510&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Courant Inst. of Mathematical Sciences, New York University; Center for Neural Science + Courant Inst. of Mathematical Sciences, New York University", "aff_domain": "cims.nyu.edu;nyu.edu", "email": "cims.nyu.edu;nyu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0+0", "aff_unique_norm": "New York University", "aff_unique_dep": "Courant Institute of Mathematical Sciences", "aff_unique_url": "https://www.courant.nyu.edu", "aff_unique_abbr": "NYU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "New York;", "aff_country_unique_index": "0;0+0", "aff_country_unique": "United States" }, { "id": "7e5f0ac394", "title": "Learning to parse images of articulated bodies", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/a209ca7b50dcaab2db7c2d4d1223d4d5-Abstract.html", "author": "Deva Ramanan", "abstract": "We consider the machine vision task of pose estimation from static images, specifically for the case of articulated objects. This problem is hard because of the large number of degrees of freedom to be estimated. Following a established line of research, pose estimation is framed as inference in a probabilistic model. In our experience however, the success of many approaches often lie in the power of the features. Our primary contribution is a novel casting of visual inference as an iterative parsing process, where one sequentially learns better and better features tuned to a particular image. We show quantitative results for human pose estimation on a database of over 300 images that suggest our algorithm is competitive with or surpasses the state-of-the-art. Since our procedure is quite general (it does not rely on face or skin detection), we also use it to estimate the poses of horses in the Weizmann database.", "bibtex": "@inproceedings{NIPS2006_a209ca7b,\n author = {Ramanan, Deva},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Learning to parse images of articulated bodies},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/a209ca7b50dcaab2db7c2d4d1223d4d5-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/a209ca7b50dcaab2db7c2d4d1223d4d5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/a209ca7b50dcaab2db7c2d4d1223d4d5-Metadata.json", "review": "", "metareview": "", "pdf_size": 765413, "gs_citation": 597, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6221018783515981041&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "Toyota Technological Institute at Chicago", "aff_domain": "tti-c.org", "email": "tti-c.org", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Toyota Technological Institute at Chicago", "aff_unique_dep": "", "aff_unique_url": "https://www.tti-chicago.org", "aff_unique_abbr": "TTI Chicago", "aff_campus_unique_index": "0", "aff_campus_unique": "Chicago", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "c7c4f433c9", "title": "Learning with Hypergraphs: Clustering, Classification, and Embedding", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/dff8e9c2ac33381546d96deea9922999-Abstract.html", "author": "Dengyong Zhou; Jiayuan Huang; Bernhard Sch\u00f6lkopf", "abstract": "We usually endow the investigated objects with pairwise relationships, which can be illustrated as graphs. In many real-world problems, however, relationships among the objects of our interest are more complex than pair- wise. Naively squeezing the complex relationships into pairwise ones will inevitably lead to loss of information which can be expected valuable for our learning tasks however. Therefore we consider using hypergraphs in- stead to completely represent complex relationships among the objects of our interest, and thus the problem of learning with hypergraphs arises. Our main contribution in this paper is to generalize the powerful methodology of spectral clustering which originally operates on undirected graphs to hy- pergraphs, and further develop algorithms for hypergraph embedding and transductive classi\ufb01cation on the basis of the spectral hypergraph cluster- ing approach. Our experiments on a number of benchmarks showed the advantages of hypergraphs over usual graphs.", "bibtex": "@inproceedings{NIPS2006_dff8e9c2,\n author = {Zhou, Dengyong and Huang, Jiayuan and Sch\\\"{o}lkopf, Bernhard},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Learning with Hypergraphs: Clustering, Classification, and Embedding},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/dff8e9c2ac33381546d96deea9922999-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/dff8e9c2ac33381546d96deea9922999-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/dff8e9c2ac33381546d96deea9922999-Metadata.json", "review": "", "metareview": "", "pdf_size": 231127, "gs_citation": 1820, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16827060469393133178&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "NEC Laboratories America, Inc.; School of Computer Science, University of Waterloo; Max Planck Institute for Biological Cybernetics", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de", "email": "tuebingen.mpg.de;tuebingen.mpg.de;tuebingen.mpg.de", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "NEC Laboratories America;University of Waterloo;Max Planck Institute for Biological Cybernetics", "aff_unique_dep": ";School of Computer Science;Biological Cybernetics", "aff_unique_url": "https://www.nec-labs.com;https://uwaterloo.ca;https://www.biocybernetics.mpg.de", "aff_unique_abbr": "NEC Labs America;UWaterloo;MPIBC", "aff_campus_unique_index": "1", "aff_campus_unique": ";Waterloo", "aff_country_unique_index": "0;1;2", "aff_country_unique": "United States;Canada;Germany" }, { "id": "1bd1497264", "title": "Linearly-solvable Markov decision problems", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/d806ca13ca3449af72a1ea5aedbed26a-Abstract.html", "author": "Emanuel Todorov", "abstract": "We introduce a class of MPDs which greatly simplify Reinforcement Learning. They have discrete state spaces and continuous control spaces. The controls have the effect of rescaling the transition probabilities of an underlying Markov chain. A control cost penalizing KL divergence between controlled and uncontrolled transition probabilities makes the minimization problem convex, and allows analytical computation of the optimal controls given the optimal value function. An exponential transformation of the optimal value function makes the minimized Bellman equation linear. Apart from their theoretical signi cance, the new MDPs enable ef cient approximations to traditional MDPs. Shortest path problems are approximated to arbitrary precision with largest eigenvalue problems, yielding an O (n) algorithm. Accurate approximations to generic MDPs are obtained via continuous embedding reminiscent of LP relaxation in integer programming. Offpolicy learning of the optimal value function is possible without need for stateaction values; the new algorithm (Z-learning) outperforms Q-learning. This work was supported by NSF grant ECS0524761.", "bibtex": "@inproceedings{NIPS2006_d806ca13,\n author = {Todorov, Emanuel},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Linearly-solvable Markov decision problems},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/d806ca13ca3449af72a1ea5aedbed26a-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/d806ca13ca3449af72a1ea5aedbed26a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/d806ca13ca3449af72a1ea5aedbed26a-Metadata.json", "review": "", "metareview": "", "pdf_size": 230892, "gs_citation": 566, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4842534586720177019&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Department of Cognitive Science, University of California San Diego", "aff_domain": "cogsci.ucsd.edu", "email": "cogsci.ucsd.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "Department of Cognitive Science", "aff_unique_url": "https://ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0", "aff_campus_unique": "San Diego", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "79c4161604", "title": "Logarithmic Online Regret Bounds for Undiscounted Reinforcement Learning", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/c1b70d965ca504aa751ddb62ad69c63f-Abstract.html", "author": "Peter Auer; Ronald Ortner", "abstract": "We present a learning algorithm for undiscounted reinforcement learning. Our interest lies in bounds for the algorithm's online performance after some finite number of steps. In the spirit of similar methods already successfully applied for the exploration-exploitation tradeoff in multi-armed bandit problems, we use upper confidence bounds to show that our UCRL algorithm achieves logarithmic online regret in the number of steps taken with respect to an optimal policy.", "bibtex": "@inproceedings{NIPS2006_c1b70d96,\n author = {Auer, Peter and Ortner, Ronald},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Logarithmic Online Regret Bounds for Undiscounted Reinforcement Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/c1b70d965ca504aa751ddb62ad69c63f-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/c1b70d965ca504aa751ddb62ad69c63f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/c1b70d965ca504aa751ddb62ad69c63f-Metadata.json", "review": "", "metareview": "", "pdf_size": 123046, "gs_citation": 326, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5119541865623619984&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "University of Leoben, Franz-Josef-Strasse 18, 8700 Leoben, Austria; University of Leoben, Franz-Josef-Strasse 18, 8700 Leoben, Austria", "aff_domain": "unileoben.ac.at;unileoben.ac.at", "email": "unileoben.ac.at;unileoben.ac.at", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Leoben", "aff_unique_dep": "", "aff_unique_url": "https://www.unileoben.ac.at", "aff_unique_abbr": "Uni Leoben", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Austria" }, { "id": "2bf55d665e", "title": "Logistic Regression for Single Trial EEG Classification", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/35937e34256cf4e5b2f7da08871d2a0b-Abstract.html", "author": "Ryota Tomioka; Kazuyuki Aihara; Klaus-Robert M\u00fcller", "abstract": "We propose a novel framework for the classification of single trial ElectroEncephaloGraphy (EEG), based on regularized logistic regression. Framed in this robust statistical framework no prior feature extraction or outlier removal is required. We present two variations of parameterizing the regression function: (a) with a full rank symmetric matrix coefficient and (b) as a difference of two rank=1 matrices. In the first case, the problem is convex and the logistic regression is optimal under a generative model. The latter case is shown to be related to the Common Spatial Pattern (CSP) algorithm, which is a popular technique in Brain Computer Interfacing. The regression coefficients can also be topographically mapped onto the scalp similarly to CSP pro jections, which allows neuro-physiological interpretation. Simulations on 162 BCI datasets demonstrate that classification accuracy and robustness compares favorably against conventional CSP based classifiers.", "bibtex": "@inproceedings{NIPS2006_35937e34,\n author = {Tomioka, Ryota and Aihara, Kazuyuki and M\\\"{u}ller, Klaus-Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Logistic Regression for Single Trial EEG Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/35937e34256cf4e5b2f7da08871d2a0b-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/35937e34256cf4e5b2f7da08871d2a0b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/35937e34256cf4e5b2f7da08871d2a0b-Metadata.json", "review": "", "metareview": "", "pdf_size": 648909, "gs_citation": 139, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8272288933842577588&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "Dept. of Mathematical Informatics, IST, The University of Tokyo, 113-8656 Tokyo, Japan + Fraunhofer FIRST.IDA, Kekul\u00b4estr. 7, 12489 Berlin, Germany; Dept. of Mathematical Informatics, IST, The University of Tokyo, 113-8656 Tokyo, Japan + ERATO Aihara Complexity Modeling Project, JST, 153-8505 Tokyo, Japan; Dept. of Computer Science, Technical University of Berlin, Franklinstr. 28/29, 10587 Berlin, Germany + Fraunhofer FIRST.IDA, Kekul\u00b4estr. 7, 12489 Berlin, Germany", "aff_domain": "first.fhg.de;sat.t.u-tokyo.ac.jp;first.fhg.de", "email": "first.fhg.de;sat.t.u-tokyo.ac.jp;first.fhg.de", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1;0+2;3+1", "aff_unique_norm": "University of Tokyo;Fraunhofer Institute for Software and Systems Engineering;ERATO Aihara Complexity Modeling Project;Technical University of Berlin", "aff_unique_dep": "Dept. of Mathematical Informatics;FIRST.IDA;JST;Dept. of Computer Science", "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.first.fraunhofer.de/;;https://www.tu-berlin.de", "aff_unique_abbr": "UTokyo;Fraunhofer FIRST;;TU Berlin", "aff_campus_unique_index": "0;0;2", "aff_campus_unique": "Tokyo;;Berlin", "aff_country_unique_index": "0+1;0+0;1+1", "aff_country_unique": "Japan;Germany" }, { "id": "8103e9c924", "title": "MLLE: Modified Locally Linear Embedding Using Multiple Weights", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/fb2606a5068901da92473666256e6e5b-Abstract.html", "author": "Zhenyue Zhang; Jing Wang", "abstract": "The locally linear embedding (LLE) is improved by introducing multiple linearly independent local weight vectors for each neighborhood. We characterize the reconstruction weights and show the existence of the linearly independent weight vectors at each neighborhood. The modi\ufb01ed locally linear embedding (MLLE) proposed in this paper is much stable. It can retrieve the ideal embedding if MLLE is applied on data points sampled from an isometric manifold. MLLE is also compared with the local tangent space alignment (LTSA). Numerical examples are given that show the improvement and ef\ufb01ciency of MLLE.", "bibtex": "@inproceedings{NIPS2006_fb2606a5,\n author = {Zhang, Zhenyue and Wang, Jing},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {MLLE: Modified Locally Linear Embedding Using Multiple Weights},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/fb2606a5068901da92473666256e6e5b-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/fb2606a5068901da92473666256e6e5b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/fb2606a5068901da92473666256e6e5b-Metadata.json", "review": "", "metareview": "", "pdf_size": 601252, "gs_citation": 272, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10964761170198163801&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Mathematics, Zhejiang University; College of Information Science and Engineering, Huaqiao University + Dep. of Mathematics, Zhejiang University", "aff_domain": "zju.edu.cn;yahoo.com.cn", "email": "zju.edu.cn;yahoo.com.cn", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1+0", "aff_unique_norm": "Zhejiang University;Huaqiao University", "aff_unique_dep": "Department of Mathematics;College of Information Science and Engineering", "aff_unique_url": "http://www.zju.edu.cn;https://www.hqu.edu.cn", "aff_unique_abbr": "ZJU;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0+0", "aff_country_unique": "China" }, { "id": "9c551f883d", "title": "Manifold Denoising", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/a0b83c02d720415dada82e08bc09e9f3-Abstract.html", "author": "Matthias Hein; Markus Maier", "abstract": "We consider the problem of denoising a noisily sampled submanifold M in Rd, where the submanifold M is a priori unknown and we are only given a noisy point sample. The presented denoising algorithm is based on a graph-based diffusion process of the point sample. We analyze this diffusion process using recent re- sults about the convergence of graph Laplacians. In the experiments we show that our method is capable of dealing with non-trivial high-dimensional noise. More- over using the denoising algorithm as pre-processing method we can improve the results of a semi-supervised learning algorithm.", "bibtex": "@inproceedings{NIPS2006_a0b83c02,\n author = {Hein, Matthias and Maier, Markus},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Manifold Denoising},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/a0b83c02d720415dada82e08bc09e9f3-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/a0b83c02d720415dada82e08bc09e9f3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/a0b83c02d720415dada82e08bc09e9f3-Metadata.json", "review": "", "metareview": "", "pdf_size": 174952, "gs_citation": 254, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3842549634141898041&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Max Planck Institute for Biological Cybernetics; Max Planck Institute for Biological Cybernetics", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de", "email": "tuebingen.mpg.de;tuebingen.mpg.de", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics", "aff_unique_dep": "Biological Cybernetics", "aff_unique_url": "https://www.biocybernetics.mpg.de", "aff_unique_abbr": "MPIBC", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "id": "c0224b859e", "title": "Map-Reduce for Machine Learning on Multicore", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/77ee3bc58ce560b86c2b59363281e914-Abstract.html", "author": "Cheng-tao Chu; Sang K. Kim; Yi-an Lin; Yuanyuan Yu; Gary Bradski; Kunle Olukotun; Andrew Y. Ng", "abstract": "We are at the beginning of the multicore era. Computers will have increasingly many cores (processors), but there is still no good programming framework for these architectures, and thus no simple and unified way for machine learning to take advantage of the potential speed up. In this paper, we develop a broadly applicable parallel programming method, one that is easily applied to many different learning algorithms. Our work is in distinct contrast to the tradition in machine learning of designing (often ingenious) ways to speed up a single algorithm at a time. Specifically, we show that algorithms that fit the Statistical Query model [15] can be written in a certain \"summation form,\" which allows them to be easily parallelized on multicore computers. We adapt Google's map-reduce [7] paradigm to demonstrate this parallel speed up technique on a variety of learning algorithms including locally weighted linear regression (LWLR), k-means, logistic regression (LR), naive Bayes (NB), SVM, ICA, PCA, gaussian discriminant analysis (GDA), EM, and backpropagation (NN). Our experimental results show basically linear speedup with an increasing number of processors.", "bibtex": "@inproceedings{NIPS2006_77ee3bc5,\n author = {Chu, Cheng-tao and Kim, Sang and Lin, Yi-an and Yu, Yuanyuan and Bradski, Gary and Olukotun, Kunle and Ng, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Map-Reduce for Machine Learning on Multicore},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/77ee3bc58ce560b86c2b59363281e914-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/77ee3bc58ce560b86c2b59363281e914-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/77ee3bc58ce560b86c2b59363281e914-Metadata.json", "review": "", "metareview": "", "pdf_size": 175253, "gs_citation": 1934, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11078327679366920595&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 32, "aff": "CS. Department, Stanford University; CS. Department, Stanford University; CS. Department, Stanford University; CS. Department, Stanford University; CS. Department, Stanford University + RexeeInc.; CS. Department, Stanford University; CS. Department, Stanford University", "aff_domain": "stanford.edu;stanford.edu;stanford.edu;stanford.edu;gmail;cs.stanford.edu;cs.stanford.edu", "email": "stanford.edu;stanford.edu;stanford.edu;stanford.edu;gmail;cs.stanford.edu;cs.stanford.edu", "github": "", "project": "", "author_num": 7, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0+1;0;0", "aff_unique_norm": "Stanford University;Rexee Inc.", "aff_unique_dep": "Department of Computer Science;", "aff_unique_url": "https://www.stanford.edu;", "aff_unique_abbr": "Stanford;", "aff_campus_unique_index": "0;0;0;0;0;0;0", "aff_campus_unique": "Stanford;", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United States;" }, { "id": "d626d55ce2", "title": "Max-margin classification of incomplete data", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/e5522f7ac7cd76f19b396595c9c25b40-Abstract.html", "author": "Gal Chechik; Geremy Heitz; Gal Elidan; Pieter Abbeel; Daphne Koller", "abstract": "We consider the problem of learning classifiers for structurally incomplete data, where some ob jects have a subset of features inherently absent due to complex relationships between the features. The common approach for handling missing features is to begin with a preprocessing phase that completes the missing features, and then use a standard classification procedure. In this paper we show how incomplete data can be classified directly without any completion of the missing features using a max-margin learning framework. We formulate this task using a geometrically-inspired ob jective function, and discuss two optimization approaches: The linearly separable case is written as a set of convex feasibility problems, and the non-separable case has a non-convex ob jective that we optimize iteratively. By avoiding the pre-processing phase in which the data is completed, these approaches offer considerable computational savings. More importantly, we show that by elegantly handling complex patterns of missing values, our approach is both competitive with other methods when the values are missing at random and outperforms them when the missing values have non-trivial structure. We demonstrate our results on two real-world problems: edge prediction in metabolic pathways, and automobile detection in natural images.", "bibtex": "@inproceedings{NIPS2006_e5522f7a,\n author = {Chechik, Gal and Heitz, Geremy and Elidan, Gal and Abbeel, Pieter and Koller, Daphne},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Max-margin classification of incomplete data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/e5522f7ac7cd76f19b396595c9c25b40-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/e5522f7ac7cd76f19b396595c9c25b40-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/e5522f7ac7cd76f19b396595c9c25b40-Metadata.json", "review": "", "metareview": "", "pdf_size": 256968, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1567870103537313551&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 20, "aff": "Department of Computer Science, Stanford University, Stanford CA, 94305; Department of Electrical Engineering, Stanford University, Stanford CA, 94305; Department of Computer Science, Stanford University, Stanford CA, 94305; Department of Computer Science, Stanford University, Stanford CA, 94305; Department of Computer Science, Stanford University, Stanford CA, 94305", "aff_domain": "ai.stanford.edu; ; ; ; ", "email": "ai.stanford.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "id": "9fb7c263ed", "title": "Mixture Regression for Covariate Shift", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/a74c3bae3e13616104c1b25f9da1f11f-Abstract.html", "author": "Masashi Sugiyama; Amos J. Storkey", "abstract": "In supervised learning there is a typical presumption that the training and test points are taken from the same distribution. In practice this assumption is commonly violated. The situations where the training and test data are from different distributions is called covariate shift. Recent work has examined techniques for dealing with covariate shift in terms of minimisation of generalisation error. As yet the literature lacks a Bayesian generative perspective on this problem. This paper tackles this issue for regression models. Recent work on covariate shift can be understood in terms of mixture regression. Using this view, we obtain a general approach to regression under covariate shift, which reproduces previous work as a special case. The main advantages of this new formulation over previous models for covariate shift are that we no longer need to presume the test and training densities are known, the regression and density estimation are combined into a single procedure, and previous methods are reproduced as special cases of this procedure, shedding light on the implicit assumptions the methods are making.", "bibtex": "@inproceedings{NIPS2006_a74c3bae,\n author = {Sugiyama, Masashi and Storkey, Amos J},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Mixture Regression for Covariate Shift},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/a74c3bae3e13616104c1b25f9da1f11f-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/a74c3bae3e13616104c1b25f9da1f11f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/a74c3bae3e13616104c1b25f9da1f11f-Metadata.json", "review": "", "metareview": "", "pdf_size": 280539, "gs_citation": 152, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11631436626570977612&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 20, "aff": "Institute of Adaptive and Neural Computation, School of Informatics, University of Edinburgh; Department of Computer Science, Tokyo Institute of Technology", "aff_domain": "ed.ac.uk;cs.titech.ac.jp", "email": "ed.ac.uk;cs.titech.ac.jp", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of Edinburgh;Tokyo Institute of Technology", "aff_unique_dep": "School of Informatics;Department of Computer Science", "aff_unique_url": "https://www.ed.ac.uk;https://www.titech.ac.jp", "aff_unique_abbr": "Edinburgh;Titech", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Edinburgh;Tokyo", "aff_country_unique_index": "0;1", "aff_country_unique": "United Kingdom;Japan" }, { "id": "00923f8dfe", "title": "Modeling Dyadic Data with Binary Latent Factors", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/272e11700558e27be60f7489d2d782e7-Abstract.html", "author": "Edward Meeds; Zoubin Ghahramani; Radford M. Neal; Sam T. Roweis", "abstract": "We introduce binary matrix factorization, a novel model for unsupervised ma- trix decomposition. The decomposition is learned by \ufb01tting a non-parametric Bayesian probabilistic model with binary latent variables to a matrix of dyadic data. Unlike bi-clustering models, which assign each row or column to a single cluster based on a categorical hidden feature, our binary feature model re\ufb02ects the prior belief that items and attributes can be associated with more than one latent cluster at a time. We provide simple learning and inference rules for this new model and show how to extend it to an in\ufb01nite model in which the number of features is not a priori \ufb01xed but is allowed to grow with the size of the data.", "bibtex": "@inproceedings{NIPS2006_272e1170,\n author = {Meeds, Edward and Ghahramani, Zoubin and Neal, Radford and Roweis, Sam},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Modeling Dyadic Data with Binary Latent Factors},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/272e11700558e27be60f7489d2d782e7-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/272e11700558e27be60f7489d2d782e7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/272e11700558e27be60f7489d2d782e7-Metadata.json", "review": "", "metareview": "", "pdf_size": 165046, "gs_citation": 234, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4169550296191180725&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Department of Computer Science, University of Toronto; Department of Engineering, Cambridge University; Department of Computer Science, University of Toronto; Department of Computer Science, University of Toronto", "aff_domain": "cs.toronto.edu;eng.cam.ac.uk;cs.toronto.edu;cs.toronto.edu", "email": "cs.toronto.edu;eng.cam.ac.uk;cs.toronto.edu;cs.toronto.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "University of Toronto;University of Cambridge", "aff_unique_dep": "Department of Computer Science;Department of Engineering", "aff_unique_url": "https://www.utoronto.ca;https://www.cam.ac.uk", "aff_unique_abbr": "U of T;Cambridge", "aff_campus_unique_index": "0;1;0;0", "aff_campus_unique": "Toronto;Cambridge", "aff_country_unique_index": "0;1;0;0", "aff_country_unique": "Canada;United Kingdom" }, { "id": "67cd50f3f1", "title": "Modeling General and Specific Aspects of Documents with a Probabilistic Topic Model", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/ec47a5de1ebd60f559fee4afd739d59b-Abstract.html", "author": "Chaitanya Chemudugunta; Padhraic Smyth; Mark Steyvers", "abstract": "Techniques such as probabilistic topic models and latent-semantic indexing have been shown to be broadly useful at automatically extracting the topical or seman- tic content of documents, or more generally for dimension-reduction of sparse count data. These types of models and algorithms can be viewed as generating an abstraction from the words in a document to a lower-dimensional latent variable representation that captures what the document is generally about beyond the spe- ci\ufb01c words it contains. In this paper we propose a new probabilistic model that tempers this approach by representing each document as a combination of (a) a background distribution over common words, (b) a mixture distribution over gen- eral topics, and (c) a distribution over words that are treated as being speci\ufb01c to that document. We illustrate how this model can be used for information retrieval by matching documents both at a general topic level and at a speci\ufb01c word level, providing an advantage over techniques that only match documents at a general level (such as topic models or latent-sematic indexing) or that only match docu- ments at the speci\ufb01c word level (such as TF-IDF).", "bibtex": "@inproceedings{NIPS2006_ec47a5de,\n author = {Chemudugunta, Chaitanya and Smyth, Padhraic and Steyvers, Mark},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Modeling General and Specific Aspects of Documents with a Probabilistic Topic Model},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/ec47a5de1ebd60f559fee4afd739d59b-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/ec47a5de1ebd60f559fee4afd739d59b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/ec47a5de1ebd60f559fee4afd739d59b-Metadata.json", "review": "", "metareview": "", "pdf_size": 84383, "gs_citation": 286, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16724394917690913599&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 21, "aff": "Department of Computer Science, University of California, Irvine; Department of Computer Science, University of California, Irvine; Department of Cognitive Sciences, University of California, Irvine", "aff_domain": "ics.uci.edu;ics.uci.edu;uci.edu", "email": "ics.uci.edu;ics.uci.edu;uci.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Irvine", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.uci.edu", "aff_unique_abbr": "UCI", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Irvine", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "f7b3ed4561", "title": "Modeling Human Motion Using Binary Latent Variables", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/1091660f3dff84fd648efe31391c5524-Abstract.html", "author": "Graham W. Taylor; Geoffrey E. Hinton; Sam T. Roweis", "abstract": "We propose a non-linear generative model for human motion data that uses an undirected model with binary latent variables and real-valued \"visible\" variables that represent joint angles. The latent and visible variables at each time step receive directed connections from the visible variables at the last few time-steps. Such an architecture makes on-line inference efficient and allows us to use a simple approximate learning procedure. After training, the model finds a single set of parameters that simultaneously capture several different kinds of motion. We demonstrate the power of our approach by synthesizing various motion sequences and by performing on-line filling in of data lost during motion capture. Website: http://www.cs.toronto.edu/gwtaylor/publications/nips2006mhmublv/", "bibtex": "@inproceedings{NIPS2006_1091660f,\n author = {Taylor, Graham W and Hinton, Geoffrey E and Roweis, Sam},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Modeling Human Motion Using Binary Latent Variables},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/1091660f3dff84fd648efe31391c5524-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/1091660f3dff84fd648efe31391c5524-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/1091660f3dff84fd648efe31391c5524-Metadata.json", "review": "", "metareview": "", "pdf_size": 222927, "gs_citation": 1060, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16491311537166390878&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Dept. of Computer Science, University of Toronto; Dept. of Computer Science, University of Toronto; Dept. of Computer Science, University of Toronto", "aff_domain": "cs.toronto.edu;cs.toronto.edu;cs.toronto.edu", "email": "cs.toronto.edu;cs.toronto.edu;cs.toronto.edu", "github": "", "project": "http://www.cs.toronto.edu/~gwtaylor/publications/nips2006mhmublv/", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Toronto", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.utoronto.ca", "aff_unique_abbr": "U of T", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Toronto", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Canada" }, { "id": "bd369a32a0", "title": "Modelling transcriptional regulation using Gaussian Processes", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/f42c7f9c8aeab0fc412031e192e2119d-Abstract.html", "author": "Neil D. Lawrence; Guido Sanguinetti; Magnus Rattray", "abstract": "Modelling the dynamics of transcriptional processes in the cell requires the knowledge of a number of key biological quantities. While some of them are relatively easy to measure, such as mRNA decay rates and mRNA abundance levels, it is still very hard to measure the active concentration levels of the transcription factor proteins that drive the process and the sensitivity of target genes to these concentrations. In this paper we show how these quantities for a given transcription factor can be inferred from gene expression levels of a set of known target genes. We treat the protein concentration as a latent function with a Gaussian process prior, and include the sensitivities, mRNA decay rates and baseline expression levels as hyperparameters. We apply this procedure to a human leukemia dataset, focusing on the tumour repressor p53 and obtaining results in good accordance with recent biological studies.", "bibtex": "@inproceedings{NIPS2006_f42c7f9c,\n author = {Lawrence, Neil and Sanguinetti, Guido and Rattray, Magnus},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Modelling transcriptional regulation using Gaussian Processes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/f42c7f9c8aeab0fc412031e192e2119d-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/f42c7f9c8aeab0fc412031e192e2119d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/f42c7f9c8aeab0fc412031e192e2119d-Metadata.json", "review": "", "metareview": "", "pdf_size": 177738, "gs_citation": 143, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10796734888621817989&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 17, "aff": "School of Computer Science, University of Manchester, U.K.; Department of Computer Science, University of Shef\ufb01eld, U.K.; School of Computer Science, University of Manchester, U.K.", "aff_domain": "cs.man.ac.uk;dcs.shef.ac.uk;cs.man.ac.uk", "email": "cs.man.ac.uk;dcs.shef.ac.uk;cs.man.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Manchester;University of Sheffield", "aff_unique_dep": "School of Computer Science;Department of Computer Science", "aff_unique_url": "https://www.manchester.ac.uk;https://www.sheffield.ac.uk", "aff_unique_abbr": "UoM;Sheffield", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Manchester;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "id": "d07021b21c", "title": "Multi-Instance Multi-Label Learning with Application to Scene Classification", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/8e489b4966fe8f703b5be647f1cbae63-Abstract.html", "author": "Zhi-Li Zhang; Min-ling Zhang", "abstract": "In this paper, we formalize multi-instance multi-label learning, where each train- ing example is associated with not only multiple instances but also multiple class labels. Such a problem can occur in many real-world tasks, e.g. an image usually contains multiple patches each of which can be described by a feature vector, and the image can belong to multiple categories since its semantics can be recognized in different ways. We analyze the relationship between multi-instance multi-label learning and the learning frameworks of traditional supervised learning, multi- instance learning and multi-label learning. Then, we propose the MIMLBOOST and MIMLSVM algorithms which achieve good performance in an application to scene classi\ufb01cation.", "bibtex": "@inproceedings{NIPS2006_8e489b49,\n author = {Zhang, Zhi-Li and Zhang, Min-ling},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Multi-Instance Multi-Label Learning with Application to Scene Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/8e489b4966fe8f703b5be647f1cbae63-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/8e489b4966fe8f703b5be647f1cbae63-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/8e489b4966fe8f703b5be647f1cbae63-Metadata.json", "review": "", "metareview": "", "pdf_size": 121746, "gs_citation": 652, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4206292260723238574&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "National Laboratory for Novel Software Technology, Nanjing University, Nanjing 210093, China; National Laboratory for Novel Software Technology, Nanjing University, Nanjing 210093, China", "aff_domain": "lamda.nju.edu.cn;lamda.nju.edu.cn", "email": "lamda.nju.edu.cn;lamda.nju.edu.cn", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Nanjing University", "aff_unique_dep": "National Laboratory for Novel Software Technology", "aff_unique_url": "http://www.nju.edu.cn", "aff_unique_abbr": "Nanjing U", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Nanjing", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "id": "6d84994a60", "title": "Multi-Robot Negotiation: Approximating the Set of Subgame Perfect Equilibria in General-Sum Stochastic Games", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/ada5e0b63ef60e2239fa8abdd4aa2f8e-Abstract.html", "author": "Chris Murray; Geoffrey J. Gordon", "abstract": "In real-world planning problems, we must reason not only about our own goals, but about the goals of other agents with which we may interact. Often these agents' goals are neither completely aligned with our own nor directly opposed to them. Instead there are opportunities for cooperation: by joining forces, the agents can all achieve higher utility than they could separately. But, in order to cooperate, the agents must negotiate a mutually acceptable plan from among the many possible ones, and each agent must trust that the others will follow their parts of the deal. Research in multi-agent planning has often avoided the problem of making sure that all agents have an incentive to follow a proposed joint plan. On the other hand, while game theoretic algorithms handle incentives correctly, they often don't scale to large planning problems. In this paper we attempt to bridge the gap between these two lines of research: we present an efficient game-theoretic approximate planning algorithm, along with a negotiation protocol which encourages agents to compute and agree on joint plans that are fair and optimal in a sense defined below. We demonstrate our algorithm and protocol on two simple robotic planning problems.1", "bibtex": "@inproceedings{NIPS2006_ada5e0b6,\n author = {Murray, Chris and Gordon, Geoffrey J},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Multi-Robot Negotiation: Approximating the Set of Subgame Perfect Equilibria in General-Sum Stochastic Games},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/ada5e0b63ef60e2239fa8abdd4aa2f8e-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/ada5e0b63ef60e2239fa8abdd4aa2f8e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/ada5e0b63ef60e2239fa8abdd4aa2f8e-Metadata.json", "review": "", "metareview": "", "pdf_size": 169129, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14361884477969297344&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "Carnegie Mellon University; Carnegie Mellon University", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "4840c486ca", "title": "Multi-Task Feature Learning", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/0afa92fc0f8a9cf051bf2961b06ac56b-Abstract.html", "author": "Andreas Argyriou; Theodoros Evgeniou; Massimiliano Pontil", "abstract": "We present a method for learning a low-dimensional representation which is shared across a set of multiple related tasks. The method builds upon the well- known 1-norm regularization problem using a new regularizer which controls the number of learned features common for all the tasks. We show that this problem is equivalent to a convex optimization problem and develop an iterative algorithm for solving it. The algorithm has a simple interpretation: it alternately performs a supervised and an unsupervised step, where in the latter step we learn common- across-tasks representations and in the former step we learn task-speci\ufb01c functions using these representations. We report experiments on a simulated and a real data set which demonstrate that the proposed method dramatically improves the per- formance relative to learning each task independently. Our algorithm can also be used, as a special case, to simply select \u2013 not learn \u2013 a few common features across the tasks.", "bibtex": "@inproceedings{NIPS2006_0afa92fc,\n author = {Argyriou, Andreas and Evgeniou, Theodoros and Pontil, Massimiliano},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Multi-Task Feature Learning},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/0afa92fc0f8a9cf051bf2961b06ac56b-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/0afa92fc0f8a9cf051bf2961b06ac56b-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/0afa92fc0f8a9cf051bf2961b06ac56b-Metadata.json", "review": "", "metareview": "", "pdf_size": 92152, "gs_citation": 1903, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2974480211453267567&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 18, "aff": "Department of Computer Science, University College London; Technology Management and Decision Sciences, INSEAD; Department of Computer Science, University College London", "aff_domain": "cs.ucl.ac.uk;insead.edu;cs.ucl.ac.uk", "email": "cs.ucl.ac.uk;insead.edu;cs.ucl.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University College London;INSEAD", "aff_unique_dep": "Department of Computer Science;Technology Management and Decision Sciences", "aff_unique_url": "https://www.ucl.ac.uk;https://www.insead.edu", "aff_unique_abbr": "UCL;INSEAD", "aff_campus_unique_index": "0;0", "aff_campus_unique": "London;", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United Kingdom;France" }, { "id": "5394153b62", "title": "Multi-dynamic Bayesian Networks", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/e4a93f0332b2519177ed55741ea4e5e7-Abstract.html", "author": "Karim Filali; Jeff A. Bilmes", "abstract": "We present a generalization of dynamic Bayesian networks to concisely describe complex probability distributions such as in problems with multiple interacting variable-length streams of random variables. Our framewor k incorporates recent graphical model constructs to account for existence uncert ainty, value-specific independence, aggregation relationships, and local and global constraints, while still retaining a Bayesian network interpretation and effic ient inference and learning techniques. We introduce one such general technique, which is an extension of Value Elimination, a backtracking search inference algo rithm. Multi-dynamic Bayesian networks are motivated by our work on Statistical Machine Translation (MT). We present results on MT word alignment in support of our claim that MDBNs are a promising framework for the rapid prototyping of new MT systems.", "bibtex": "@inproceedings{NIPS2006_e4a93f03,\n author = {Filali, Karim and Bilmes, Jeff A},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Multi-dynamic Bayesian Networks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/e4a93f0332b2519177ed55741ea4e5e7-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/e4a93f0332b2519177ed55741ea4e5e7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/e4a93f0332b2519177ed55741ea4e5e7-Metadata.json", "review": "", "metareview": "", "pdf_size": 112796, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16713305807684730927&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "21f5d4f0f1", "title": "Multiple Instance Learning for Computer Aided Diagnosis", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/7c78335a8924215ea5c22fda1aac7b75-Abstract.html", "author": "Murat Dundar; Balaji Krishnapuram; R. B. Rao; Glenn M. Fung", "abstract": "Many computer aided diagnosis (CAD) problems can be best modelled as a multiple-instance learning (MIL) problem with unbalanced data: i.e. , the training data typically consists of a few positive bags, and a very large number of negative instances. Existing MIL algorithms are much too computationally expensive for these datasets. We describe CH, a framework for learning a Convex Hull representation of multiple instances that is significantly faster than existing MIL algorithms. Our CH framework applies to any standard hyperplane-based learning algorithm, and for some algorithms, is guaranteed to find the global optimal solution. Experimental studies on two different CAD applications further demonstrate that the proposed algorithm significantly improves diagnostic accuracy when compared to both MIL and traditional classifiers. Although not designed for standard MIL problems (which have both positive and negative bags and relatively balanced datasets), comparisons against other MIL methods on benchmark problems also indicate that the proposed method is competitive with the state-of-the-art.", "bibtex": "@inproceedings{NIPS2006_7c78335a,\n author = {Dundar, Murat and Krishnapuram, Balaji and Rao, R. and Fung, Glenn},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Multiple Instance Learning for Computer Aided Diagnosis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/7c78335a8924215ea5c22fda1aac7b75-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/7c78335a8924215ea5c22fda1aac7b75-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/7c78335a8924215ea5c22fda1aac7b75-Metadata.json", "review": "", "metareview": "", "pdf_size": 81657, "gs_citation": 147, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4926676189150168449&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "CAD & Knowledge Solutions, Siemens Medical Solutions USA, Malvern, PA 19355; CAD & Knowledge Solutions, Siemens Medical Solutions USA, Malvern, PA 19355; CAD & Knowledge Solutions, Siemens Medical Solutions USA, Malvern, PA 19355; CAD & Knowledge Solutions, Siemens Medical Solutions USA, Malvern, PA 19355", "aff_domain": "siemens.com;siemens.com;siemens.com;siemens.com", "email": "siemens.com;siemens.com;siemens.com;siemens.com", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Siemens Medical Solutions USA", "aff_unique_dep": "CAD & Knowledge Solutions", "aff_unique_url": "https://www.siemens-healthineers.com", "aff_unique_abbr": "SMS", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Malvern", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "d93cb5ec9f", "title": "Multiple timescales and uncertainty in motor adaptation", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/0ebf197205c00fc6e0aac7261a8c1bdc-Abstract.html", "author": "Konrad P. K\u00f6rding; Joshua B. Tenenbaum; Reza Shadmehr", "abstract": "Our motor system changes due to causes that span multiple timescales. For example, muscle response can change because of fatigue, a condition where the disturbance has a fast timescale or because of disease where the disturbance is much slower. Here we hypothesize that the nervous system adapts in a way that reflects the temporal properties of such potential disturbances. According to a Bayesian formulation of this idea, movement error results in a credit assignment problem: what timescale is responsible for this disturbance? The adaptation schedule influences the behavior of the optimal learner, changing estimates at different timescales as well as the uncertainty. A system that adapts in this way predicts many properties observed in saccadic gain adaptation. It well predicts the timecourses of motor adaptation in cases of partial sensory deprivation and reversals of the adaptation direction.", "bibtex": "@inproceedings{NIPS2006_0ebf1972,\n author = {K\\\"{o}rding, Konrad and Tenenbaum, Joshua and Shadmehr, Reza},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Multiple timescales and uncertainty in motor adaptation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/0ebf197205c00fc6e0aac7261a8c1bdc-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/0ebf197205c00fc6e0aac7261a8c1bdc-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/0ebf197205c00fc6e0aac7261a8c1bdc-Metadata.json", "review": "", "metareview": "", "pdf_size": 1493361, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11062372977979269491&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Rehabilitation Institute of Chicago + Northwestern University, Dept. PM&R; Massachusetts Institute of Technology; Johns Hopkins University", "aff_domain": "koerding.com;mit.edu;bme.jhu.edu", "email": "koerding.com;mit.edu;bme.jhu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1;2;3", "aff_unique_norm": "Rehabilitation Institute of Chicago;Northwestern University;Massachusetts Institute of Technology;Johns Hopkins University", "aff_unique_dep": ";Dept. PM&R;;", "aff_unique_url": "https://www.ric.org/;https://www.northwestern.edu;https://web.mit.edu;https://www.jhu.edu", "aff_unique_abbr": ";NU;MIT;JHU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "United States" }, { "id": "2e7ad9a774", "title": "Mutagenetic tree Fisher kernel improves prediction of HIV drug resistance from viral genotype", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/9edcc1391c208ba0b503fe9a22574251-Abstract.html", "author": "Tobias Sing; Niko Beerenwinkel", "abstract": "Starting with the work of Jaakkola and Haussler, a variety of approaches have been proposed for coupling domain-specific generative models with statistical learning methods. The link is established by a kernel function which provides a similarity measure based inherently on the underlying model. In computational biology, the full promise of this framework has rarely ever been exploited, as most kernels are derived from very generic models, such as sequence profiles or hidden Markov models. Here, we introduce the MTreeMix kernel, which is based on a generative model tailored to the underlying biological mechanism. Specifically, the kernel quantifies the similarity of evolutionary escape from antiviral drug pressure between two viral sequence samples. We compare this novel kernel to a standard, evolution-agnostic amino acid encoding in the prediction of HIV drug resistance from genotype, using support vector regression. The results show significant improvements in predictive performance across 17 anti-HIV drugs. Thus, in our study, the generative-discriminative paradigm is key to bridging the gap between population genetic modeling and clinical decision making.", "bibtex": "@inproceedings{NIPS2006_9edcc139,\n author = {Sing, Tobias and Beerenwinkel, Niko},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Mutagenetic tree Fisher kernel improves prediction of HIV drug resistance from viral genotype},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/9edcc1391c208ba0b503fe9a22574251-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/9edcc1391c208ba0b503fe9a22574251-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/9edcc1391c208ba0b503fe9a22574251-Metadata.json", "review": "", "metareview": "", "pdf_size": 88496, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12722604893558535160&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Department of Computational Biology, Max Planck Institute for Informatics, Saarbr\u00fccken, Germany; Department of Mathematics, University of California, Berkeley, CA 94720 + Program for Evolutionary Dynamics, Harvard University, Cambridge, MA 02138", "aff_domain": "mpi-sb.mpg.de;fas.harvard.edu", "email": "mpi-sb.mpg.de;fas.harvard.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1+2", "aff_unique_norm": "Max Planck Institute for Informatics;University of California, Berkeley;Harvard University", "aff_unique_dep": "Department of Computational Biology;Department of Mathematics;Program for Evolutionary Dynamics", "aff_unique_url": "https://mpi-inf.mpg.de;https://www.berkeley.edu;https://www.harvard.edu", "aff_unique_abbr": "MPII;UC Berkeley;Harvard", "aff_campus_unique_index": "0;1+2", "aff_campus_unique": "Saarbr\u00fccken;Berkeley;Cambridge", "aff_country_unique_index": "0;1+1", "aff_country_unique": "Germany;United States" }, { "id": "9eee317c62", "title": "Natural Actor-Critic for Road Traffic Optimisation", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/78bc62d08a9a0b9b0b9c0ad339ef82d3-Abstract.html", "author": "Silvia Richter; Douglas Aberdeen; Jin Yu", "abstract": "Current road-traffic optimisation practice around the world is a combination of hand tuned policies with a small degree of automatic adaption. Even state-ofthe-art research controllers need good models of the road traffic, which cannot be obtained directly from existing sensors. We use a policy-gradient reinforcement learning approach to directly optimise the traffic signals, mapping currently deployed sensor observations to control signals. Our trained controllers are (theoretically) compatible with the traffic system used in Sydney and many other cities around the world. We apply two policy-gradient methods: (1) the recent natural actor-critic algorithm, and (2) a vanilla policy-gradient algorithm for comparison. Along the way we extend natural-actor critic approaches to work for distributed and online infinite-horizon problems.", "bibtex": "@inproceedings{NIPS2006_78bc62d0,\n author = {Richter, Silvia and Aberdeen, Douglas and Yu, Jin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Natural Actor-Critic for Road Traffic Optimisation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/78bc62d08a9a0b9b0b9c0ad339ef82d3-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/78bc62d08a9a0b9b0b9c0ad339ef82d3-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/78bc62d08a9a0b9b0b9c0ad339ef82d3-Metadata.json", "review": "", "metareview": "", "pdf_size": 233586, "gs_citation": 112, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11399584739596254577&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Albert-Ludwigs-Universit \u00a8at, Freiburg, Germany; National ICT Australia, Canberra, Australia; National ICT Australia, Canberra, Australia.", "aff_domain": "web.de;anu.edu.au;anu.edu.au", "email": "web.de;anu.edu.au;anu.edu.au", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1", "aff_unique_norm": "Albert-Ludwigs-Universit\u00e4t Freiburg;National ICT Australia", "aff_unique_dep": ";", "aff_unique_url": "https://www.uni-freiburg.de;https://www.nicta.com.au", "aff_unique_abbr": "ALU Freiburg;NICTA", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "Freiburg;Canberra", "aff_country_unique_index": "0;1;1", "aff_country_unique": "Germany;Australia" }, { "id": "2a9799075f", "title": "Near-Uniform Sampling of Combinatorial Spaces Using XOR Constraints", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/4110a1994471c595f7583ef1b74ba4cb-Abstract.html", "author": "Carla P. Gomes; Ashish Sabharwal; Bart Selman", "abstract": "We propose a new technique for sampling the solutions of combinatorial problems in a near-uniform manner. We focus on problems specified as a Boolean formula, i.e., on SAT instances. Sampling for SAT problems has been shown to have interesting connections with probabilistic reasoning, making practical sampling algorithms for SAT highly desirable. The best current approaches are based on Markov Chain Monte Carlo methods, which have some practical limitations. Our approach exploits combinatorial properties of random parity (X O R) constraints to prune away solutions near-uniformly. The final sample is identified amongst the remaining ones using a state-of-the-art SAT solver. The resulting sampling distribution is provably arbitrarily close to uniform. Our experiments show that our technique achieves a significantly better sampling quality than the best alternative.", "bibtex": "@inproceedings{NIPS2006_4110a199,\n author = {Gomes, Carla P and Sabharwal, Ashish and Selman, Bart},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Near-Uniform Sampling of Combinatorial Spaces Using XOR Constraints},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/4110a1994471c595f7583ef1b74ba4cb-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/4110a1994471c595f7583ef1b74ba4cb-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/4110a1994471c595f7583ef1b74ba4cb-Metadata.json", "review": "", "metareview": "", "pdf_size": 186248, "gs_citation": 157, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7016007152449145268&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 17, "aff": "Department of Computer Science, Cornell University, Ithaca NY 14853-7501, USA; Department of Computer Science, Cornell University, Ithaca NY 14853-7501, USA; Department of Computer Science, Cornell University, Ithaca NY 14853-7501, USA", "aff_domain": "cs.cornell.edu;cs.cornell.edu;cs.cornell.edu", "email": "cs.cornell.edu;cs.cornell.edu;cs.cornell.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Cornell University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.cornell.edu", "aff_unique_abbr": "Cornell", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Ithaca", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "0853b8b5a2", "title": "Neurophysiological Evidence of Cooperative Mechanisms for Stereo Computation", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/b1f62fa99de9f27a048344d55c5ef7a6-Abstract.html", "author": "Jason M. Samonds; Brian R. Potetz; Tai S. Lee", "abstract": "Although there has been substantial progress in understanding the neuro-\nphysiological mechanisms of stereopsis, how neurons interact in a network \nduring stereo computation remains unclear. Computational models on \nstereopsis suggest local competition and long-range cooperation are impor-\ntant for resolving ambiguity during stereo matching. To test these predic-\ntions, we simultaneously recorded from multiple neurons in V1 of awake, \nbehaving macaques while presenting surfaces of different depths rendered \nin dynamic random dot stereograms. We found that the interaction between \npairs of neurons was a function of similarity in receptive fields, as well as \nof the input stimulus. Neurons coding the same depth experienced common \ninhibition early in their responses for stimuli presented at their non-\npreferred disparities. They experienced mutual facilitation later in their re-\nsponses for stimulation at their preferred disparity. These findings are con-\nsistent with a local competition mechanism that first removes gross mis-\nmatches, and a global cooperative mechanism that further refines depth es-\ntimates.", "bibtex": "@inproceedings{NIPS2006_b1f62fa9,\n author = {Samonds, Jason and Potetz, Brian and Lee, Tai},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Neurophysiological Evidence of Cooperative Mechanisms for Stereo Computation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/b1f62fa99de9f27a048344d55c5ef7a6-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/b1f62fa99de9f27a048344d55c5ef7a6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/b1f62fa99de9f27a048344d55c5ef7a6-Metadata.json", "review": "", "metareview": "", "pdf_size": 248300, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11948932130607610876&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Center for the Neural Basis of Cognition (CNBC) + CNBC and Computer Science Department; CNBC and Computer Science Department; CNBC and Computer Science Department", "aff_domain": "cnbc.cmu.edu;cs.cmu.edu;cnbc.cmu.edu", "email": "cnbc.cmu.edu;cs.cmu.edu;cnbc.cmu.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1;1;1", "aff_unique_norm": "Center for the Neural Basis of Cognition;CNBC", "aff_unique_dep": "Neural Basis of Cognition;Computer Science Department", "aff_unique_url": ";", "aff_unique_abbr": "CNBC;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+1;1;1", "aff_country_unique": "United States;China" }, { "id": "1355a587c1", "title": "No-regret Algorithms for Online Convex Programs", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/f14bc21be7eaeed046fed206a492e652-Abstract.html", "author": "Geoffrey J. Gordon", "abstract": "Online convex programming has recently emerged as a powerful primitive for designing machine learning algorithms. For example, OCP can be used for learning a linear classifier, dynamically rebalancing a binary search tree, finding the shortest path in a graph with unknown edge lengths, solving a structured classification problem, or finding a good strategy in an extensive-form game. Several researchers have designed no-regret algorithms for OCP. But, compared to algorithms for special cases of OCP such as learning from expert advice, these algorithms are not very numerous or flexible. In learning from expert advice, one tool which has proved particularly valuable is the correspondence between no-regret algorithms and convex potential functions: by reasoning about these potential functions, researchers have designed algorithms with a wide variety of useful guarantees such as good performance when the target hypothesis is sparse. Until now, there has been no such recipe for the more general OCP problem, and therefore no ability to tune OCP algorithms to take advantage of properties of the problem or data. In this paper we derive a new class of no-regret learning algorithms for OCP. These Lagrangian Hedging algorithms are based on a general class of potential functions, and are a direct generalization of known learning rules like weighted majority and external-regret matching. In addition to proving regret bounds, we demonstrate our algorithms learning to play one-card poker.", "bibtex": "@inproceedings{NIPS2006_f14bc21b,\n author = {Gordon, Geoffrey J},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {No-regret Algorithms for Online Convex Programs},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/f14bc21be7eaeed046fed206a492e652-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/f14bc21be7eaeed046fed206a492e652-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/f14bc21be7eaeed046fed206a492e652-Metadata.json", "review": "", "metareview": "", "pdf_size": 83480, "gs_citation": 64, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10092866509574202500&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Machine Learning, Carnegie Mellon University", "aff_domain": "cs.cmu.edu", "email": "cs.cmu.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Department of Machine Learning", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "4ca04b0365", "title": "Non-rigid point set registration: Coherent Point Drift", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/3b2d8f129ae2f408f2153cd9ce663043-Abstract.html", "author": "Andriy Myronenko; Xubo Song; Miguel \u00c1. Carreira-Perpi\u00f1\u00e1n", "abstract": "We introduce Coherent Point Drift (CPD), a novel probabilistic method for nonrigid registration of point sets. The registration is treated as a Maximum Likelihood (ML) estimation problem with motion coherence constraint over the velocity field such that one point set moves coherently to align with the second set. We formulate the motion coherence constraint and derive a solution of regularized ML estimation through the variational approach, which leads to an elegant kernel form. We also derive the EM algorithm for the penalized ML optimization with deterministic annealing. The CPD method simultaneously finds both the non-rigid transformation and the correspondence between two point sets without making any prior assumption of the transformation model except that of motion coherence. This method can estimate complex non-linear non-rigid transformations, and is shown to be accurate on 2D and 3D examples and robust in the presence of outliers and missing points.", "bibtex": "@inproceedings{NIPS2006_3b2d8f12,\n author = {Myronenko, Andriy and Song, Xubo and Carreira-Perpi\\~{n}\\'{a}n, Miguel},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Non-rigid point set registration: Coherent Point Drift},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/3b2d8f129ae2f408f2153cd9ce663043-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/3b2d8f129ae2f408f2153cd9ce663043-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/3b2d8f129ae2f408f2153cd9ce663043-Metadata.json", "review": "", "metareview": "", "pdf_size": 357046, "gs_citation": 489, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16142974431381304630&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 21, "aff": "Department of Computer Science and Electrical Engineering, OGI School of Science and Engineering, Oregon Health and Science University; Department of Computer Science and Electrical Engineering, OGI School of Science and Engineering, Oregon Health and Science University; Department of Computer Science and Electrical Engineering, OGI School of Science and Engineering, Oregon Health and Science University", "aff_domain": "csee.ogi.edu;csee.ogi.edu;csee.ogi.edu", "email": "csee.ogi.edu;csee.ogi.edu;csee.ogi.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Oregon Health and Science University", "aff_unique_dep": "Department of Computer Science and Electrical Engineering", "aff_unique_url": "https://www.ohsu.edu", "aff_unique_abbr": "OHSU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "OGI School of Science and Engineering", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "920534c915", "title": "Nonlinear physically-based models for decoding motor-cortical population activity", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/ef72d53990bc4805684c9b61fa64a102-Abstract.html", "author": "Gregory Shakhnarovich; Sung-phil Kim; Michael J. Black", "abstract": "Neural motor prostheses (NMPs) require the accurate decoding of motor cortical population activity for the control of an arti\ufb01cial motor system. Previous work on cortical decoding for NMPs has focused on the recovery of hand kinematics. Human NMPs however may require the control of computer cursors or robotic devices with very different physical and dynamical properties. Here we show that the \ufb01ring rates of cells in the primary motor cortex of non-human primates can be used to control the parameters of an arti\ufb01cial physical system exhibiting realistic dynamics. The model represents 2D hand motion in terms of a point mass connected to a system of idealized springs. The nonlinear spring coef\ufb01cients are estimated from the \ufb01ring rates of neurons in the motor cortex. We evaluate linear and a nonlinear decoding algorithms using neural recordings from two monkeys performing two different tasks. We found that the decoded spring coef\ufb01cients produced accurate hand trajectories compared with state-of-the-art methods for direct decoding of hand kinematics. Furthermore, using a physically-based system produced decoded movements that were more \u201cnatural\u201d in that their frequency spectrum more closely matched that of natural hand movements.", "bibtex": "@inproceedings{NIPS2006_ef72d539,\n author = {Shakhnarovich, Gregory and Kim, Sung-phil and Black, Michael},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Nonlinear physically-based models for decoding motor-cortical population activity},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/ef72d53990bc4805684c9b61fa64a102-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/ef72d53990bc4805684c9b61fa64a102-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/ef72d53990bc4805684c9b61fa64a102-Metadata.json", "review": "", "metareview": "", "pdf_size": 223364, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18172775593286915786&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, Brown University; Department of Computer Science, Brown University; Department of Computer Science, Brown University", "aff_domain": "cs.brown.edu;cs.brown.edu;cs.brown.edu", "email": "cs.brown.edu;cs.brown.edu;cs.brown.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Brown University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.brown.edu", "aff_unique_abbr": "Brown", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "c82930a4fd", "title": "Nonnegative Sparse PCA", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/75b9b6dc7fe44437c6e0a69fd863dbab-Abstract.html", "author": "Ron Zass; Amnon Shashua", "abstract": "We describe a nonnegative variant of the \"Sparse PCA\" problem. The goal is to create a low dimensional representation from a collection of points which on the one hand maximizes the variance of the projected points and on the other uses only parts of the original coordinates, and thereby creating a sparse representation. What distinguishes our problem from other Sparse PCA formulations is that the projection involves only nonnegative weights of the original coordinates -- a desired quality in various fields, including economics, bioinformatics and computer vision. Adding nonnegativity contributes to sparseness, where it enforces a partitioning of the original coordinates among the new axes. We describe a simple yet efficient iterative coordinate-descent type of scheme which converges to a local optimum of our optimization criteria, giving good results on large real world datasets.", "bibtex": "@inproceedings{NIPS2006_75b9b6dc,\n author = {Zass, Ron and Shashua, Amnon},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Nonnegative Sparse PCA},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/75b9b6dc7fe44437c6e0a69fd863dbab-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/75b9b6dc7fe44437c6e0a69fd863dbab-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/75b9b6dc7fe44437c6e0a69fd863dbab-Metadata.json", "review": "", "metareview": "", "pdf_size": 214123, "gs_citation": 262, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3876830211379583647&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "School of Engineering and Computer Science, Hebrew University of Jerusalem, Jerusalem 91904, Israel; School of Engineering and Computer Science, Hebrew University of Jerusalem, Jerusalem 91904, Israel", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Hebrew University of Jerusalem", "aff_unique_dep": "School of Engineering and Computer Science", "aff_unique_url": "https://www.huji.ac.il", "aff_unique_abbr": "HUJI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Jerusalem", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "id": "27c6b11898", "title": "On Transductive Regression", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/639d79cc857a6c76c2723b7e014fccb0-Abstract.html", "author": "Corinna Cortes; Mehryar Mohri", "abstract": "In many modern large-scale learning applications, the amount of unlabeled data far exceeds that of labeled data. A common instance of this problem is the transductive setting where the unlabeled test points are known to the learning algorithm. This paper presents a study of regression problems in that setting. It presents explicit VC-dimension error bounds for transductive regression that hold for all bounded loss functions and coincide with the tight classification bounds of Vapnik when applied to classification. It also presents a new transductive regression algorithm inspired by our bound that admits a primal and kernelized closedform solution and deals efficiently with large amounts of unlabeled data. The algorithm exploits the position of unlabeled points to locally estimate their labels and then uses a global optimization to ensure robust predictions. Our study also includes the results of experiments with several publicly available regression data sets with up to 20,000 unlabeled examples. The comparison with other transductive regression algorithms shows that it performs well and that it can scale to large data sets.", "bibtex": "@inproceedings{NIPS2006_639d79cc,\n author = {Cortes, Corinna and Mohri, Mehryar},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {On Transductive Regression},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/639d79cc857a6c76c2723b7e014fccb0-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/639d79cc857a6c76c2723b7e014fccb0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/639d79cc857a6c76c2723b7e014fccb0-Metadata.json", "review": "", "metareview": "", "pdf_size": 97078, "gs_citation": 163, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2494424815645865467&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Google Research; Courant Institute of Mathematical Sciences and Google Research", "aff_domain": "google.com;cs.nyu.edu", "email": "google.com;cs.nyu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Google;Courant Institute of Mathematical Sciences", "aff_unique_dep": "Google Research;Mathematical Sciences", "aff_unique_url": "https://research.google;https://courant.nyu.edu", "aff_unique_abbr": "Google Research;Courant", "aff_campus_unique_index": "0", "aff_campus_unique": "Mountain View;", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "0805700453", "title": "On the Relation Between Low Density Separation, Spectral Clustering and Graph Cuts", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/e93028bdc1aacdfb3687181f2031765d-Abstract.html", "author": "Hariharan Narayanan; Mikhail Belkin; Partha Niyogi", "abstract": "One of the intuitions underlying many graph-based methods for clustering and semi-supervised learning, is that class or cluster boundaries pass through areas of low probability density. In this paper we provide some formal analysis of that notion for a probability distribution. We introduce a notion of weighted boundary volume, which measures the length of the class/cluster boundary weighted by the density of the underlying probability distribution. We show that sizes of the cuts of certain commonly used data adjacency graphs converge to this continuous weighted volume of the boundary.", "bibtex": "@inproceedings{NIPS2006_e93028bd,\n author = {Narayanan, Hariharan and Belkin, Mikhail and Niyogi, Partha},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {On the Relation Between Low Density Separation, Spectral Clustering and Graph Cuts},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/e93028bdc1aacdfb3687181f2031765d-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/e93028bdc1aacdfb3687181f2031765d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/e93028bdc1aacdfb3687181f2031765d-Metadata.json", "review": "", "metareview": "", "pdf_size": 233549, "gs_citation": 74, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15289101399458091237&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Department of Computer Science, University of Chicago; Department of Computer Science and Engineering, The Ohio State University; Department of Computer Science, University of Chicago", "aff_domain": "cs.uchicago.edu;cse.ohio-state.edu;cs.uchicago.edu", "email": "cs.uchicago.edu;cse.ohio-state.edu;cs.uchicago.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Chicago;Ohio State University", "aff_unique_dep": "Department of Computer Science;Department of Computer Science and Engineering", "aff_unique_url": "https://www.uchicago.edu;https://www.osu.edu", "aff_unique_abbr": "UChicago;OSU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "cabccc8492", "title": "Online Classification for Complex Problems Using Simultaneous Projections", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/1c67df9e0a5cfefa030b853983324004-Abstract.html", "author": "Yonatan Amit; Shai Shalev-shwartz; Yoram Singer", "abstract": "We describe and analyze an algorithmic framework for online classi\ufb01cation where each online trial consists of multiple prediction tasks that are tied together. We tackle the problem of updating the online hypothesis by de\ufb01ning a projection problem in which each prediction task corresponds to a single linear constraint. These constraints are tied together through a single slack parameter. We then in- troduce a general method for approximately solving the problem by projecting simultaneously and independently on each constraint which corresponds to a pre- diction sub-problem, and then averaging the individual solutions. We show that this approach constitutes a feasible, albeit not necessarily optimal, solution for the original projection problem. We derive concrete simultaneous projection schemes and analyze them in the mistake bound model. We demonstrate the power of the proposed algorithm in experiments with online multiclass text categorization. Our experiments indicate that a combination of class-dependent features with the simultaneous projection method outperforms previously studied algorithms.", "bibtex": "@inproceedings{NIPS2006_1c67df9e,\n author = {Amit, Yonatan and Shalev-shwartz, Shai and Singer, Yoram},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Online Classification for Complex Problems Using Simultaneous Projections},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/1c67df9e0a5cfefa030b853983324004-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/1c67df9e0a5cfefa030b853983324004-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/1c67df9e0a5cfefa030b853983324004-Metadata.json", "review": "", "metareview": "", "pdf_size": 258126, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4329714428527735068&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "School of Computer Sci. & Eng., The Hebrew University, Jerusalem 91904, Israel + Google Inc. 1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA; School of Computer Sci. & Eng., The Hebrew University, Jerusalem 91904, Israel; School of Computer Sci. & Eng., The Hebrew University, Jerusalem 91904, Israel + Google Inc. 1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA", "aff_domain": "cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il", "email": "cs.huji.ac.il;cs.huji.ac.il;cs.huji.ac.il", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0+1;0;0+1", "aff_unique_norm": "Hebrew University;Google", "aff_unique_dep": "School of Computer Science & Engineering;Google", "aff_unique_url": "http://www.huji.ac.il;https://www.google.com", "aff_unique_abbr": "HUJI;Google", "aff_campus_unique_index": "0+1;0;0+1", "aff_campus_unique": "Jerusalem;Mountain View", "aff_country_unique_index": "0+1;0;0+1", "aff_country_unique": "Israel;United States" }, { "id": "dc471484aa", "title": "Online Clustering of Moving Hyperplanes", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/2bd2e3373dce441c6c3bfadd1daa953e-Abstract.html", "author": "Ren\u00e9 Vidal", "abstract": "We propose a recursive algorithm for clustering trajectories lying in multiple moving hyperplanes. Starting from a given or random initial condition, we use normalized gradient descent to update the coefficients of a time varying polynomial whose degree is the number of hyperplanes and whose derivatives at a trajectory give an estimate of the vector normal to the hyperplane containing that trajectory. As time proceeds, the estimates of the hyperplane normals are shown to track their true values in a stable fashion. The segmentation of the trajectories is then obtained by clustering their associated normal vectors. The final result is a simple recursive algorithm for segmenting a variable number of moving hyperplanes. We test our algorithm on the segmentation of dynamic scenes containing rigid motions and dynamic textures, e.g., a bird floating on water. Our method not only segments the bird motion from the surrounding water motion, but also determines patterns of motion in the scene (e.g., periodic motion) directly from the temporal evolution of the estimated polynomial coefficients. Our experiments also show that our method can deal with appearing and disappearing motions in the scene.", "bibtex": "@inproceedings{NIPS2006_2bd2e337,\n author = {Vidal, Ren\\'{e}},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Online Clustering of Moving Hyperplanes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/2bd2e3373dce441c6c3bfadd1daa953e-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/2bd2e3373dce441c6c3bfadd1daa953e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/2bd2e3373dce441c6c3bfadd1daa953e-Metadata.json", "review": "", "metareview": "", "pdf_size": 239420, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14204356898259346921&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Center for Imaging Science, Department of Biomedical Engineering, Johns Hopkins University", "aff_domain": "cis.jhu.edu", "email": "cis.jhu.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Johns Hopkins University", "aff_unique_dep": "Department of Biomedical Engineering", "aff_unique_url": "https://www.jhu.edu", "aff_unique_abbr": "JHU", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "24713b5a69", "title": "Optimal Change-Detection and Spiking Neurons", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/5f268dfb0fbef44de0f668a022707b86-Abstract.html", "author": "Angela J. Yu", "abstract": "Survival in a non-stationary, potentially adversarial environment requires animals to detect sensory changes rapidly yet accurately, two oft competing desiderata. Neurons subserving such detections are faced with the corresponding challenge to discern \"real\" changes in inputs as quickly as possible, while ignoring noisy fluctuations. Mathematically, this is an example of a change-detection problem that is actively researched in the controlled stochastic processes community. In this paper, we utilize sophisticated tools developed in that community to formalize an instantiation of the problem faced by the nervous system, and characterize the Bayes-optimal decision policy under certain assumptions. We will derive from this optimal strategy an information accumulation and decision process that remarkably resembles the dynamics of a leaky integrate-and-fire neuron. This correspondence suggests that neurons are optimized for tracking input changes, and sheds new light on the computational import of intracellular properties such as resting membrane potential, voltage-dependent conductance, and post-spike reset voltage. We also explore the influence that factors such as timing, uncertainty, neuromodulation, and reward should and do have on neuronal dynamics and sensitivity, as the optimal decision strategy depends critically on these factors.", "bibtex": "@inproceedings{NIPS2006_5f268dfb,\n author = {Yu, Angela J},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Optimal Change-Detection and Spiking Neurons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/5f268dfb0fbef44de0f668a022707b86-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/5f268dfb0fbef44de0f668a022707b86-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/5f268dfb0fbef44de0f668a022707b86-Metadata.json", "review": "", "metareview": "", "pdf_size": 112507, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6612025453686884570&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "SBMB, Princeton University", "aff_domain": "princeton.edu", "email": "princeton.edu", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Princeton University", "aff_unique_dep": "SBMB", "aff_unique_url": "https://www.princeton.edu", "aff_unique_abbr": "Princeton", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "id": "0f075b8f13", "title": "Optimal Single-Class Classification Strategies", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/ae1d2c2d957a01dcb3f3b39685cdb4fa-Abstract.html", "author": "Ran El-Yaniv; Mordechai Nisenson", "abstract": "We consider single-class classification (SCC) as a two-person game between the learner and an adversary. In this game the target distribution is completely known to the learner and the learner's goal is to construct a classifier capable of guaranteeing a given tolerance for the false-positive error while minimizing the false negative error. We identify both \"hard\" and \"soft\" optimal classification strategies for different types of games and demonstrate that soft classification can provide a significant advantage. Our optimal strategies and bounds provide worst-case lower bounds for standard, finite-sample SCC and also motivate new approaches to solving SCC.", "bibtex": "@inproceedings{NIPS2006_ae1d2c2d,\n author = {El-Yaniv, Ran and Nisenson, Mordechai},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Optimal Single-Class Classification Strategies},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/ae1d2c2d957a01dcb3f3b39685cdb4fa-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/ae1d2c2d957a01dcb3f3b39685cdb4fa-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/ae1d2c2d957a01dcb3f3b39685cdb4fa-Metadata.json", "review": "", "metareview": "", "pdf_size": 117038, "gs_citation": 67, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3240763912355070511&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Computer Science, Technion-Israel Institute of Technology, Technion, Israel 32000; Department of Computer Science, Technion-Israel Institute of Technology, Technion, Israel 32000", "aff_domain": "cs.technion.ac.il;cs.technion.ac.il", "email": "cs.technion.ac.il;cs.technion.ac.il", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Technion-Israel Institute of Technology", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.technion.ac.il", "aff_unique_abbr": "Technion", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Technion", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "id": "e5618e5930", "title": "Ordinal Regression by Extended Binary Classification", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/019f8b946a256d9357eadc5ace2c8678-Abstract.html", "author": "Ling Li; Hsuan-tien Lin", "abstract": "We present a reduction framework from ordinal regression to binary classification based on extended examples. The framework consists of three steps: extracting extended examples from the original examples, learning a binary classifier on the extended examples with any binary classification algorithm, and constructing a ranking rule from the binary classifier. A weighted 0/1 loss of the binary classifier would then bound the mislabeling cost of the ranking rule. Our framework allows not only to design good ordinal regression algorithms based on well-tuned binary classification approaches, but also to derive new generalization bounds for ordinal regression from known bounds for binary classification. In addition, our framework unifies many existing ordinal regression algorithms, such as perceptron ranking and support vector ordinal regression. When compared empirically on benchmark data sets, some of our newly designed algorithms enjoy advantages in terms of both training speed and generalization performance over existing algorithms, which demonstrates the usefulness of our framework.", "bibtex": "@inproceedings{NIPS2006_019f8b94,\n author = {Li, Ling and Lin, Hsuan-tien},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Ordinal Regression by Extended Binary Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/019f8b946a256d9357eadc5ace2c8678-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/019f8b946a256d9357eadc5ace2c8678-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/019f8b946a256d9357eadc5ace2c8678-Metadata.json", "review": "", "metareview": "", "pdf_size": 172354, "gs_citation": 345, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6759128906310439596&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 20, "aff": "Learning Systems Group, California Institute of Technology; Learning Systems Group, California Institute of Technology", "aff_domain": "caltech.edu;caltech.edu", "email": "caltech.edu;caltech.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "California Institute of Technology", "aff_unique_dep": "Learning Systems Group", "aff_unique_url": "https://www.caltech.edu", "aff_unique_abbr": "Caltech", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pasadena", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "7f57a1d02f", "title": "PAC-Bayes Bounds for the Risk of the Majority Vote and the Variance of the Gibbs Classifier", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/779efbd24d5a7e37ce8dc93e7c04d572-Abstract.html", "author": "Alexandre Lacasse; Fran\u00e7ois Laviolette; Mario Marchand; Pascal Germain; Nicolas Usunier", "abstract": "We propose new PAC-Bayes bounds for the risk of the weighted majority vote that depend on the mean and variance of the error of its associated Gibbs classi\ufb01er. We show that these bounds can be smaller than the risk of the Gibbs classi\ufb01er and can be arbitrarily close to zero even if the risk of the Gibbs classi\ufb01er is close to 1/2. Moreover, we show that these bounds can be uniformly estimated on the training data for all possible posteriors Q. Moreover, they can be improved by using a large sample of unlabelled data.", "bibtex": "@inproceedings{NIPS2006_779efbd2,\n author = {Lacasse, Alexandre and Laviolette, Fran\\c{c}ois and Marchand, Mario and Germain, Pascal and Usunier, Nicolas},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {PAC-Bayes Bounds for the Risk of the Majority Vote and the Variance of the Gibbs Classifier},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/779efbd24d5a7e37ce8dc93e7c04d572-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/779efbd24d5a7e37ce8dc93e7c04d572-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/779efbd24d5a7e37ce8dc93e7c04d572-Metadata.json", "review": "", "metareview": "", "pdf_size": 339059, "gs_citation": 135, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16094687976334523086&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 22, "aff": "D\u00b4epartement IFT-GLO, Universit\u00b4e Laval, Qu\u00b4ebec, Canada; D\u00b4epartement IFT-GLO, Universit\u00b4e Laval, Qu\u00b4ebec, Canada; D\u00b4epartement IFT-GLO, Universit\u00b4e Laval, Qu\u00b4ebec, Canada; D\u00b4epartement IFT-GLO, Universit\u00b4e Laval, Qu\u00b4ebec, Canada; Laboratoire d\u2019informatique de Paris 6, Universit\u00b4e Pierre et Marie Curie, Paris, France", "aff_domain": "ift.ulaval.ca;ift.ulaval.ca;ift.ulaval.ca;ulaval.ca;lip6.fr", "email": "ift.ulaval.ca;ift.ulaval.ca;ift.ulaval.ca;ulaval.ca;lip6.fr", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0;1", "aff_unique_norm": "Universit u00e9 Laval;Universit\u00b4e Pierre et Marie Curie", "aff_unique_dep": "D u00e9partement IFT-GLO;Laboratoire d\u2019informatique de Paris 6", "aff_unique_url": "https://www.ulaval.ca;https://www.upmc.fr", "aff_unique_abbr": "UL;UPMC", "aff_campus_unique_index": "0;0;0;0;1", "aff_campus_unique": "Qu u00e9bec;Paris", "aff_country_unique_index": "0;0;0;0;1", "aff_country_unique": "Canada;France" }, { "id": "24fd004b90", "title": "PG-means: learning the number of clusters in data", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/a9986cb066812f440bc2bb6e3c13696c-Abstract.html", "author": "Yu Feng; Greg Hamerly", "abstract": "We present a novel algorithm called PG-means which is able to learn the number of clusters in a classical Gaussian mixture model. Our method is robust and efficient; it uses statistical hypothesis tests on one-dimensional projections of the data and model to determine if the examples are well represented by the model. In so doing, we are applying a statistical test for the entire model at once, not just on a per-cluster basis. We show that our method works well in difficult cases such as non-Gaussian data, overlapping clusters, eccentric clusters, high dimension, and many true clusters. Further, our new method provides a much more stable estimate of the number of clusters than existing methods.", "bibtex": "@inproceedings{NIPS2006_a9986cb0,\n author = {Feng, Yu and Hamerly, Greg},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {PG-means: learning the number of clusters in data},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/a9986cb066812f440bc2bb6e3c13696c-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/a9986cb066812f440bc2bb6e3c13696c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/a9986cb066812f440bc2bb6e3c13696c-Metadata.json", "review": "", "metareview": "", "pdf_size": 222139, "gs_citation": 140, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15845272130534888601&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "Computer Science Department, Baylor University; Computer Science Department, Baylor University", "aff_domain": "baylor.edu;baylor.edu", "email": "baylor.edu;baylor.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Baylor University", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.baylor.edu", "aff_unique_abbr": "Baylor", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "e76f7714a4", "title": "Parameter Expanded Variational Bayesian Methods", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/9922f5774d88b203c4ec0fdd26616899-Abstract.html", "author": "Tommi S. Jaakkola; Yuan Qi", "abstract": "Bayesian inference has become increasingly important in statistical machine learning. Exact Bayesian calculations are often not feasible in practice, however. A number of approximate Bayesian methods have been proposed to make such calculations practical, among them the variational Bayesian (VB) approach. The VB approach, while useful, can nevertheless suffer from slow convergence to the approximate solution. To address this problem, we propose Parameter-eXpanded Variational Bayesian (PX-VB) methods to speed up VB. The new algorithm is inspired by parameter-expanded expectation maximization (PX-EM) and parameterexpanded data augmentation (PX-DA). Similar to PX-EM and -DA, PX-VB expands a model with auxiliary variables to reduce the coupling between variables in the original model. We analyze the convergence rates of VB and PX-VB and demonstrate the superior convergence rates of PX-VB in variational probit regression and automatic relevance determination.", "bibtex": "@inproceedings{NIPS2006_9922f577,\n author = {Jaakkola, Tommi and Qi, Yuan},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Parameter Expanded Variational Bayesian Methods},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/9922f5774d88b203c4ec0fdd26616899-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/9922f5774d88b203c4ec0fdd26616899-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/9922f5774d88b203c4ec0fdd26616899-Metadata.json", "review": "", "metareview": "", "pdf_size": 217783, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18369298895334898012&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "MIT CSAIL; MIT CSAIL", "aff_domain": "csail.mit.edu;csail.mit.edu", "email": "csail.mit.edu;csail.mit.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory", "aff_unique_url": "https://www.csail.mit.edu", "aff_unique_abbr": "MIT CSAIL", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "6b4192ef33", "title": "Part-based Probabilistic Point Matching using Equivalence Constraints", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/5d4ae76f053f8f2516ad12961ef7fe97-Abstract.html", "author": "Graham Mcneill; Sethu Vijayakumar", "abstract": "Correspondence algorithms typically struggle with shapes that display part-based variation. We present a probabilistic approach that matches shapes using independent part transformations, where the parts themselves are learnt during matching. Ideas from semi-supervised learning are used to bias the algorithm towards finding `perceptually valid' part structures. Shapes are represented by unlabeled point sets of arbitrary size and a background component is used to handle occlusion, local dissimilarity and clutter. Thus, unlike many shape matching techniques, our approach can be applied to shapes extracted from real images. Model parameters are estimated using an EM algorithm that alternates between finding a soft correspondence and computing the optimal part transformations using Procrustes analysis.", "bibtex": "@inproceedings{NIPS2006_5d4ae76f,\n author = {Mcneill, Graham and Vijayakumar, Sethu},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Part-based Probabilistic Point Matching using Equivalence Constraints},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/5d4ae76f053f8f2516ad12961ef7fe97-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/5d4ae76f053f8f2516ad12961ef7fe97-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/5d4ae76f053f8f2516ad12961ef7fe97-Metadata.json", "review": "", "metareview": "", "pdf_size": 1051780, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8622151304002555125&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 22, "aff": "Institute of Perception, Action and Behavior, School of Informatics, University of Edinburgh, Edinburgh, UK. EH9 3JZ; Institute of Perception, Action and Behavior, School of Informatics, University of Edinburgh, Edinburgh, UK. EH9 3JZ", "aff_domain": "ed.ac.uk;ed.ac.uk", "email": "ed.ac.uk;ed.ac.uk", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Edinburgh", "aff_unique_dep": "School of Informatics", "aff_unique_url": "https://www.ed.ac.uk", "aff_unique_abbr": "Edinburgh", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Edinburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "id": "41fbe5c729", "title": "Particle Filtering for Nonparametric Bayesian Matrix Factorization", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/38ed162a0dbef7b3fe0f628aa08b90e7-Abstract.html", "author": "Frank Wood; Thomas L. Griffiths", "abstract": "Many unsupervised learning problems can be expressed as a form of matrix factorization, reconstructing an observed data matrix as the product of two matrices of latent variables. A standard challenge in solving these problems is determining the dimensionality of the latent matrices. Nonparametric Bayesian matrix factorization is one way of dealing with this challenge, yielding a posterior distribution over possible factorizations of unbounded dimensionality. A drawback to this approach is that posterior estimation is typically done using Gibbs sampling, which can be slow for large problems and when conjugate priors cannot be used. As an alternative, we present a particle filter for posterior estimation in nonparametric Bayesian matrix factorization models. We illustrate this approach with two matrix factorization models and show favorable performance relative to Gibbs sampling.", "bibtex": "@inproceedings{NIPS2006_38ed162a,\n author = {Wood, Frank and Griffiths, Thomas},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Particle Filtering for Nonparametric Bayesian Matrix Factorization},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/38ed162a0dbef7b3fe0f628aa08b90e7-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/38ed162a0dbef7b3fe0f628aa08b90e7-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/38ed162a0dbef7b3fe0f628aa08b90e7-Metadata.json", "review": "", "metareview": "", "pdf_size": 114831, "gs_citation": 60, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12258277742216267753&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 18, "aff": "Department of Computer Science, Brown University; Department of Psychology, University of California, Berkeley", "aff_domain": "cs.brown.edu;berkeley.edu", "email": "cs.brown.edu;berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Brown University;University of California, Berkeley", "aff_unique_dep": "Department of Computer Science;Department of Psychology", "aff_unique_url": "https://www.brown.edu;https://www.berkeley.edu", "aff_unique_abbr": "Brown;UC Berkeley", "aff_campus_unique_index": "1", "aff_campus_unique": ";Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "220760d7c2", "title": "Predicting spike times from subthreshold dynamics of a neuron", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/0cd60efb5578cd967c3c23894f305800-Abstract.html", "author": "Ryota Kobayashi; Shigeru Shinomoto", "abstract": "It has been established that a neuron reproduces highly precise spike response to identical fluctuating input currents. We wish to accurately predict the firing times of a given neuron for any input current. For this purpose we adopt a model that mimics the dynamics of the membrane potential, and then take a cue from its dynamics for predicting the spike occurrence for a novel input current. It is found that the prediction is significantly improved by observing the state space of the membrane potential and its time derivative(s) in advance of a possible spike, in comparison to simply thresholding an instantaneous value of the estimated potential.", "bibtex": "@inproceedings{NIPS2006_0cd60efb,\n author = {Kobayashi, Ryota and Shinomoto, Shigeru},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Predicting spike times from subthreshold dynamics of a neuron},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/0cd60efb5578cd967c3c23894f305800-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/0cd60efb5578cd967c3c23894f305800-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/0cd60efb5578cd967c3c23894f305800-Metadata.json", "review": "", "metareview": "", "pdf_size": 230216, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17064439496045995685&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Physics, Kyoto University; Department of Physics, Kyoto University", "aff_domain": "ton.scphys.kyoto-u.ac.jp;scphys.kyoto-u.ac.jp", "email": "ton.scphys.kyoto-u.ac.jp;scphys.kyoto-u.ac.jp", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Kyoto University", "aff_unique_dep": "Department of Physics", "aff_unique_url": "https://www.kyoto-u.ac.jp", "aff_unique_abbr": "Kyoto U", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Kyoto", "aff_country_unique_index": "0;0", "aff_country_unique": "Japan" }, { "id": "067b69d931", "title": "Prediction on a Graph with a Perceptron", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/cc3d69ed781b16bce06687822ae56e6d-Abstract.html", "author": "Mark Herbster; Massimiliano Pontil", "abstract": "We study the problem of online prediction of a noisy labeling of a graph with the perceptron. We address both label noise and concept noise. Graph learning is framed as an instance of prediction on a finite set. To treat label noise we show that the hinge loss bounds derived by Gentile [1] for online perceptron learning can be transformed to relative mistake bounds with an optimal leading constant when applied to prediction on a finite set. These bounds depend crucially on the norm of the learned concept. Often the norm of a concept can vary dramatically with only small perturbations in a labeling. We analyze a simple transformation that stabilizes the norm under perturbations. We derive an upper bound that depends only on natural properties of the graph the graph diameter and the cut size of a partitioning of the graph which are only indirectly dependent on the size of the graph. The impossibility of such bounds for the graph geodesic nearest neighbors algorithm will be demonstrated.", "bibtex": "@inproceedings{NIPS2006_cc3d69ed,\n author = {Herbster, Mark and Pontil, Massimiliano},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Prediction on a Graph with a Perceptron},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/cc3d69ed781b16bce06687822ae56e6d-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/cc3d69ed781b16bce06687822ae56e6d-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/cc3d69ed781b16bce06687822ae56e6d-Metadata.json", "review": "", "metareview": "", "pdf_size": 236028, "gs_citation": 102, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15820037930263805481&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "c3d0ee71be", "title": "Randomized PCA Algorithms with Regret Bounds that are Logarithmic in the Dimension", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/a41b3bb3e6b050b6c9067c67f663b915-Abstract.html", "author": "Manfred K. Warmuth; Dima Kuzmin", "abstract": "We design an on-line algorithm for Principal Component Analysis. In each trial the current instance is projected onto a probabilistically chosen low dimensional subspace. The total expected quadratic approximation error equals the total quadratic approximation error of the best subspace chosen in hindsight plus some additional term that grows linearly in dimension of the subspace but logarithmically in the dimension of the instances.", "bibtex": "@inproceedings{NIPS2006_a41b3bb3,\n author = {Warmuth, Manfred K. K and Kuzmin, Dima},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Randomized PCA Algorithms with Regret Bounds that are Logarithmic in the Dimension},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/a41b3bb3e6b050b6c9067c67f663b915-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/a41b3bb3e6b050b6c9067c67f663b915-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/a41b3bb3e6b050b6c9067c67f663b915-Metadata.json", "review": "", "metareview": "", "pdf_size": 234333, "gs_citation": 76, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1821970403914198799&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Computer Science Department, University of California - Santa Cruz; Computer Science Department, University of California - Santa Cruz", "aff_domain": "cse.ucsc.edu;cse.ucsc.edu", "email": "cse.ucsc.edu;cse.ucsc.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Santa Cruz", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.ucsc.edu", "aff_unique_abbr": "UCSC", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Santa Cruz", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "94b62c34b3", "title": "Real-time adaptive information-theoretic optimization of neurophysiology experiments", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/8004d637b6236202217be3dfcdd8ce59-Abstract.html", "author": "Jeremy Lewi; Robert Butera; Liam Paninski", "abstract": "Adaptively optimizing experiments can significantly reduce the number of trials needed to characterize neural responses using parametric statistical models. However, the potential for these methods has been limited to date by severe computational challenges: choosing the stimulus which will provide the most information about the (typically high-dimensional) model parameters requires evaluating a high-dimensional integration and optimization in near-real time. Here we present a fast algorithm for choosing the optimal (most informative) stimulus based on a Fisher approximation of the Shannon information and specialized numerical linear algebra techniques. This algorithm requires only low-rank matrix manipulations and a one-dimensional linesearch to choose the stimulus and is therefore efficient even for high-dimensional stimulus and parameter spaces; for example, we require just 15 milliseconds on a desktop computer to optimize a 100-dimensional stimulus. Our algorithm therefore makes real-time adaptive experimental design feasible. Simulation results show that model parameters can be estimated much more efficiently using these adaptive techniques than by using random (nonadaptive) stimuli. Finally, we generalize the algorithm to efficiently handle both fast adaptation due to spike-history effects and slow, non-systematic drifts in the model parameters. Maximizing the efficiency of data collection is important in any experimental setting. In neurophysiology experiments, minimizing the number of trials needed to characterize a neural system is essential for maintaining the viability of a preparation and ensuring robust results. As a result, various approaches have been developed to optimize neurophysiology experiments online in order to choose the \"best\" stimuli given prior knowledge of the system and the observed history of the cell's responses. The \"best\" stimulus can be defined a number of different ways depending on the experimental objectives. One reasonable choice, if we are interested in finding a neuron's \"preferred stimulus,\" is the stimulus which maximizes the firing rate of the neuron [1, 2, 3, 4]. Alternatively, when investigating the coding properties of sensory cells it makes sense to define the optimal stimulus in terms of the mutual information between the stimulus and response [5]. Here we take a system identification approach: we define the optimal stimulus as the one which tells us the most about how a neural system responds to its inputs [6, 7]. We consider neural systems in", "bibtex": "@inproceedings{NIPS2006_8004d637,\n author = {Lewi, Jeremy and Butera, Robert and Paninski, Liam},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Real-time adaptive information-theoretic optimization of neurophysiology experiments},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/8004d637b6236202217be3dfcdd8ce59-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/8004d637b6236202217be3dfcdd8ce59-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/8004d637b6236202217be3dfcdd8ce59-Metadata.json", "review": "", "metareview": "", "pdf_size": 409393, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16996849683656618684&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "School of Bioengineering, Georgia Institute of Technology; School of Electrical and Computer Engineering, Georgia Institute of Technology; Department of Statistics, Columbia University", "aff_domain": "gatech.edu;ece.gatech.edu;stat.columbia.edu", "email": "gatech.edu;ece.gatech.edu;stat.columbia.edu", "github": "", "project": "http://www.prism.gatech.edu/~gtg120z; http://www.stat.columbia.edu/~liam", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "Georgia Institute of Technology;Columbia University", "aff_unique_dep": "School of Bioengineering;Department of Statistics", "aff_unique_url": "https://www.gatech.edu;https://www.columbia.edu", "aff_unique_abbr": "Georgia Tech;Columbia", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Atlanta;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "31f37af023", "title": "Recursive Attribute Factoring", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/466473650870501e3600d9a1b4ee5d44-Abstract.html", "author": "David Cohn; Deepak Verma; Karl Pfleger", "abstract": "Clustering, or factoring of a document collection attempts to \u201cexplain\u201d each ob- served document in terms of one or a small number of inferred prototypes. Prior work demonstrated that when links exist between documents in the corpus (as is the case with a collection of web pages or scienti\ufb01c papers), building a joint model of document contents and connections produces a better model than that built from contents or connections alone. Many problems arise when trying to apply these joint models to corpus at the scale of the World Wide Web, however; one of these is that the sheer overhead of representing a feature space on the order of billions of dimensions becomes impractical. We address this problem with a simple representational shift inspired by proba- bilistic relational models: instead of representing document linkage in terms of the identities of linking documents, we represent it by the explicit and inferred at- tributes of the linking documents. Several surprising results come with this shift: in addition to being computationally more tractable, the new model produces fac- tors that more cleanly decompose the document collection. We discuss several variations on this model and show how some can be seen as exact generalizations of the PageRank algorithm.", "bibtex": "@inproceedings{NIPS2006_46647365,\n author = {Cohn, David and Verma, Deepak and Pfleger, Karl},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Recursive Attribute Factoring},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/466473650870501e3600d9a1b4ee5d44-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/466473650870501e3600d9a1b4ee5d44-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/466473650870501e3600d9a1b4ee5d44-Metadata.json", "review": "", "metareview": "", "pdf_size": 145231, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6381521630207333546&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Google Inc., 1600 Amphitheatre Parkway, Mountain View, CA 94043; Dept. of CSE, Univ. of Washington, Seattle WA- 98195-2350; Google Inc., 1600 Amphitheatre Parkway, Mountain View, CA 94043", "aff_domain": "google.com;cs.washington.edu;google.com", "email": "google.com;cs.washington.edu;google.com", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "Google;University of Washington", "aff_unique_dep": "Google Inc.;Department of Computer Science and Engineering", "aff_unique_url": "https://www.google.com;https://www.washington.edu", "aff_unique_abbr": "Google;UW", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Mountain View;Seattle", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "447f5add62", "title": "Recursive ICA", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/9a96a2c73c0d477ff2a6da3bf538f4f4-Abstract.html", "author": "Honghao Shan; Lingyun Zhang; Garrison W. Cottrell", "abstract": "Independent Component Analysis (ICA) is a popular method for extracting independent features from visual data. However, as a fundamentally linear technique, there is always nonlinear residual redundancy that is not captured by ICA. Hence there have been many attempts to try to create a hierarchical version of ICA, but so far none of the approaches have a natural way to apply them more than once. Here we show that there is a relatively simple technique that transforms the absolute values of the outputs of a previous application of ICA into a normal distribution, to which ICA maybe applied again. This results in a recursive ICA algorithm that may be applied any number of times in order to extract higher order structure from previous layers.", "bibtex": "@inproceedings{NIPS2006_9a96a2c7,\n author = {Shan, Honghao and Zhang, Lingyun and Cottrell, Garrison},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Recursive ICA},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/9a96a2c73c0d477ff2a6da3bf538f4f4-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/9a96a2c73c0d477ff2a6da3bf538f4f4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/9a96a2c73c0d477ff2a6da3bf538f4f4-Metadata.json", "review": "", "metareview": "", "pdf_size": 432408, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5410147343370681458&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Department of Computer Science and Engineering, University of California, San Diego; Department of Computer Science and Engineering, University of California, San Diego; Department of Computer Science and Engineering, University of California, San Diego", "aff_domain": "cs.ucsd.edu;cs.ucsd.edu;cs.ucsd.edu", "email": "cs.ucsd.edu;cs.ucsd.edu;cs.ucsd.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "San Diego", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "41738c79fc", "title": "Reducing Calibration Time For Brain-Computer Interfaces: A Clustering Approach", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/275d7fb2fd45098ad5c3ece2ed4a2824-Abstract.html", "author": "Matthias Krauledat; Michael Schr\u00f6der; Benjamin Blankertz; Klaus-Robert M\u00fcller", "abstract": "Up to now even subjects that are experts in the use of machine learning based BCI systems still have to undergo a calibration session of about 20-30 min. From this data their (movement) intentions are so far infered. We now propose a new paradigm that allows to completely omit such calibration and instead transfer knowledge from prior sessions. To achieve this goal we first define normalized CSP features and distances in-between. Second, we derive prototypical features across sessions: (a) by clustering or (b) by feature concatenation methods. Finally, we construct a classifier based on these individualized prototypes and show that, indeed, classifiers can be successfully transferred to a new session for a number of subjects.", "bibtex": "@inproceedings{NIPS2006_275d7fb2,\n author = {Krauledat, Matthias and Schr\\\"{o}der, Michael and Blankertz, Benjamin and M\\\"{u}ller, Klaus-Robert},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Reducing Calibration Time For Brain-Computer Interfaces: A Clustering Approach},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/275d7fb2fd45098ad5c3ece2ed4a2824-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/275d7fb2fd45098ad5c3ece2ed4a2824-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/275d7fb2fd45098ad5c3ece2ed4a2824-Metadata.json", "review": "", "metareview": "", "pdf_size": 115348, "gs_citation": 93, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5861189415294060010&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Technical University Berlin; Fraunhofer FIRST.IDA; Fraunhofer FIRST.IDA; Technical University Berlin + Fraunhofer FIRST.IDA", "aff_domain": "first.fhg.de;first.fhg.de;first.fhg.de;first.fhg.de", "email": "first.fhg.de;first.fhg.de;first.fhg.de;first.fhg.de", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;1;1;0+1", "aff_unique_norm": "Technical University Berlin;Fraunhofer Institute for Software and Systems Engineering", "aff_unique_dep": ";FIRST.IDA", "aff_unique_url": "https://www.tu-berlin.de;https://www.first.ida.fraunhofer.de/", "aff_unique_abbr": "TU Berlin;Fraunhofer FIRST.IDA", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berlin;", "aff_country_unique_index": "0;0;0;0+0", "aff_country_unique": "Germany" }, { "id": "55ee779ebb", "title": "Relational Learning with Gaussian Processes", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/ffbd6cbb019a1413183c8d08f2929307-Abstract.html", "author": "Wei Chu; Vikas Sindhwani; Zoubin Ghahramani; S. S. Keerthi", "abstract": "Correlation between instances is often modelled via a kernel function using in- put attributes of the instances. Relational knowledge can further reveal additional pairwise correlations between variables of interest. In this paper, we develop a class of models which incorporates both reciprocal relational information and in- put attributes using Gaussian process techniques. This approach provides a novel non-parametric Bayesian framework with a data-dependent covariance function for supervised learning tasks. We also apply this framework to semi-supervised learning. Experimental results on several real world data sets verify the usefulness of this algorithm.", "bibtex": "@inproceedings{NIPS2006_ffbd6cbb,\n author = {Chu, Wei and Sindhwani, Vikas and Ghahramani, Zoubin and Keerthi, S.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Relational Learning with Gaussian Processes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/ffbd6cbb019a1413183c8d08f2929307-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/ffbd6cbb019a1413183c8d08f2929307-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/ffbd6cbb019a1413183c8d08f2929307-Metadata.json", "review": "", "metareview": "", "pdf_size": 134218, "gs_citation": 125, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4025017922340704565&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 19, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "ba88b8a049", "title": "Robotic Grasping of Novel Objects", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/22722a343513ed45f14905eb07621686-Abstract.html", "author": "Ashutosh Saxena; Justin Driemeyer; Justin Kearns; Andrew Y. Ng", "abstract": "We consider the problem of grasping novel objects, specifically ones that are being seen for the first time through vision. We present a learning algorithm that neither requires, nor tries to build, a 3-d model of the object. Instead it predicts, directly as a function of the images, a point at which to grasp the object. Our algorithm is trained via supervised learning, using synthetic images for the training set. We demonstrate on a robotic manipulation platform that this approach successfully grasps a wide variety of objects, such as wine glasses, duct tape, markers, a translucent box, jugs, knife-cutters, cellphones, keys, screwdrivers, staplers, toothbrushes, a thick coil of wire, a strangely shaped power horn, and others, none of which were seen in the training set.", "bibtex": "@inproceedings{NIPS2006_22722a34,\n author = {Saxena, Ashutosh and Driemeyer, Justin and Kearns, Justin and Ng, Andrew},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Robotic Grasping of Novel Objects},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/22722a343513ed45f14905eb07621686-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/22722a343513ed45f14905eb07621686-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/22722a343513ed45f14905eb07621686-Metadata.json", "review": "", "metareview": "", "pdf_size": 284120, "gs_citation": 226, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15772902512919332390&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Computer Science Department, Stanford University, Stanford, CA 94305; Computer Science Department, Stanford University, Stanford, CA 94305; Computer Science Department, Stanford University, Stanford, CA 94305; Computer Science Department, Stanford University, Stanford, CA 94305", "aff_domain": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "email": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "bbabc29a8b", "title": "Sample Complexity of Policy Search with Known Dynamics", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/f6b5f8c32c65fee991049a55dc97d1ce-Abstract.html", "author": "Peter L. Bartlett; Ambuj Tewari", "abstract": "We consider methods that try to find a good policy for a Markov decision process by choosing one from a given class. The policy is chosen based on its empirical performance in simulations. We are interested in conditions on the complexity of the policy class that ensure the success of such simulation based policy search methods. We show that under bounds on the amount of computation involved in computing policies, transition dynamics and rewards, uniform convergence of empirical estimates to true value functions occurs. Previously, such results were derived by assuming boundedness of pseudodimension and Lipschitz continuity. These assumptions and ours are both stronger than the usual combinatorial complexity measures. We show, via minimax inequalities, that this is essential: boundedness of pseudodimension or fat-shattering dimension alone is not sufficient.", "bibtex": "@inproceedings{NIPS2006_f6b5f8c3,\n author = {Bartlett, Peter and Tewari, Ambuj},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Sample Complexity of Policy Search with Known Dynamics},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/f6b5f8c32c65fee991049a55dc97d1ce-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/f6b5f8c32c65fee991049a55dc97d1ce-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/f6b5f8c32c65fee991049a55dc97d1ce-Metadata.json", "review": "", "metareview": "", "pdf_size": 96908, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13419670480307312683&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Division of Computer Science and Department of Statistics, University of California, Berkeley; Division of Computer Science, University of California, Berkeley", "aff_domain": "cs.berkeley.edu;cs.berkeley.edu", "email": "cs.berkeley.edu;cs.berkeley.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Division of Computer Science and Department of Statistics", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "203e96f1db", "title": "Scalable Discriminative Learning for Natural Language Parsing and Translation", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/e8bf0f27d70d480d3ab793bb7619aaa5-Abstract.html", "author": "Joseph Turian; Benjamin Wellington; I. D. Melamed", "abstract": "Parsing and translating natural languages can be viewed as problems of predicting tree structures. For machine learning approaches to these predictions, the diversity and high dimensionality of the structures involved mandate very large training sets. This paper presents a purely discriminative learning method that scales up well to problems of this size. Its accuracy was at least as good as other comparable methods on a standard parsing task. To our knowledge, it is the first purely discriminative learning algorithm for translation with treestructured models. Unlike other popular methods, this method does not require a great deal of feature engineering a priori, because it performs feature selection over a compound feature space as it learns. Experiments demonstrate the method's versatility, accuracy, and efficiency. Relevant software is freely available at http://nlp.cs.nyu.edu/parser and http://nlp.cs.nyu.edu/GenPar.", "bibtex": "@inproceedings{NIPS2006_e8bf0f27,\n author = {Turian, Joseph and Wellington, Benjamin and Melamed, I.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Scalable Discriminative Learning for Natural Language Parsing and Translation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/e8bf0f27d70d480d3ab793bb7619aaa5-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/e8bf0f27d70d480d3ab793bb7619aaa5-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/e8bf0f27d70d480d3ab793bb7619aaa5-Metadata.json", "review": "", "metareview": "", "pdf_size": 69693, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15184965370505611997&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Computer Science Department, New York University; Computer Science Department, New York University; Computer Science Department, New York University", "aff_domain": "cs.nyu.edu;cs.nyu.edu;cs.nyu.edu", "email": "cs.nyu.edu;cs.nyu.edu;cs.nyu.edu", "github": "", "project": "http://nlp.cs.nyu.edu/parser; http://nlp.cs.nyu.edu/GenPar", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "New York University", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.nyu.edu", "aff_unique_abbr": "NYU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "New York", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "ec24c557bf", "title": "Shifting, One-Inclusion Mistake Bounds and Tight Multiclass Expected Risk Bounds", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/a11ce019e96a4c60832eadd755a17a58-Abstract.html", "author": "Benjamin I. Rubinstein; Peter L. Bartlett; J. H. Rubinstein", "abstract": "Under the prediction model of learning, a prediction strategy is presented with an i.i.d. sample of n - 1 points in X and corresponding labels from a concept f F , and aims to minimize the worst-case probability of erring on an nth point. By exploiting the structure of F , Haussler et al. achieved a VC(F )/n bound for the natural one-inclusion prediction strategy, improving on bounds implied by PAC-type results by a O(log n) factor. The key data structure in their result is the natural subgraph of the hypercube--the one-inclusion graph; the key step is a d = VC(F ) bound on one-inclunion graph density. The first main result of this s /n -1 paper is a density bound of n d-1 ( d ) < d, which positively resolves a conjecture of Kuzmin & Warmuth relating to their unlabeled Peeling compression scheme and also leads to an improved mistake bound for the randomized (deterministic) one-inclusion strategy for all d (for d (n)). The proof uses a new form of VC-invariant shifting and a group-theoretic symmetrization. Our second main result is a k -class analogue of the d/n mistake bound, replacing the VC-dimension by the Pollard pseudo-dimension and the one-inclusion strategy by its natural hypergraph generalization. This bound on expected risk improves on known PAC-based results by a factor of O(log n) and is shown to be optimal up to a O(log k ) factor. The combinatorial technique of shifting takes a central role in understanding the one-inclusion (hyper)graph and is a running theme throughout.", "bibtex": "@inproceedings{NIPS2006_a11ce019,\n author = {Rubinstein, Benjamin and Bartlett, Peter and Rubinstein, J.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Shifting, One-Inclusion Mistake Bounds and Tight Multiclass Expected Risk Bounds},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/a11ce019e96a4c60832eadd755a17a58-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/a11ce019e96a4c60832eadd755a17a58-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/a11ce019e96a4c60832eadd755a17a58-Metadata.json", "review": "", "metareview": "", "pdf_size": 117890, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18065056214580492876&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Computer Science Division, University of California, Berkeley; Computer Science Division and Department of Statistics, University of California, Berkeley; Department of Mathematics & Statistics, The University of Melbourne", "aff_domain": "cs.berkeley.edu;cs.berkeley.edu;ms.unimelb.edu", "email": "cs.berkeley.edu;cs.berkeley.edu;ms.unimelb.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of California, Berkeley;University of Melbourne", "aff_unique_dep": "Computer Science Division;Department of Mathematics & Statistics", "aff_unique_url": "https://www.berkeley.edu;https://www.unimelb.edu.au", "aff_unique_abbr": "UC Berkeley;UniMelb", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0;1", "aff_country_unique": "United States;Australia" }, { "id": "fa29448fa3", "title": "Similarity by Composition", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/ca1d3153a1cf0ed998d4879fbb50d9ab-Abstract.html", "author": "Oren Boiman; Michal Irani", "abstract": "We propose a new approach for measuring similarity between two signals, which is applicable to many machine learning tasks, and to many signal types. We say that a signal S1 is \u201csimilar\u201d to a signal S2 if it is \u201ceasy\u201d to compose S1 from few large contiguous chunks of S2. Obviously, if we use small enough pieces, then any signal can be composed of any other. Therefore, the larger those pieces are, the more similar S1 is to S2. This induces a local similarity score at every point in the signal, based on the size of its supported surrounding region. These local scores can in turn be accumulated in a principled information-theoretic way into a global similarity score of the entire S1 to S2. \u201cSimilarity by Composition\u201d can be applied between pairs of signals, between groups of signals, and also between dif- ferent portions of the same signal. It can therefore be employed in a wide variety of machine learning problems (clustering, classi\ufb01cation, retrieval, segmentation, attention, saliency, labelling, etc.), and can be applied to a wide range of signal types (images, video, audio, biological data, etc.) We show a few such examples.", "bibtex": "@inproceedings{NIPS2006_ca1d3153,\n author = {Boiman, Oren and Irani, Michal},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Similarity by Composition},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/ca1d3153a1cf0ed998d4879fbb50d9ab-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/ca1d3153a1cf0ed998d4879fbb50d9ab-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/ca1d3153a1cf0ed998d4879fbb50d9ab-Metadata.json", "review": "", "metareview": "", "pdf_size": 249016, "gs_citation": 104, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2839598452884134985&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "85230506fd", "title": "Simplifying Mixture Models through Function Approximation", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/c8cd63e1bf13c5016881652983fb615a-Abstract.html", "author": "Kai Zhang; James T. Kwok", "abstract": "Finite mixture model is a powerful tool in many statistical learning problems. In this paper, we propose a general, structure-preserving approach to reduce its model complexity, which can bring signi\ufb01cant computational bene\ufb01ts in many applications. The basic idea is to group the original mixture components into compact clusters, and then minimize an upper bound on the approximation error between the original and simpli\ufb01ed models. By adopting the L2 norm as the dis- tance measure between mixture models, we can derive closed-form solutions that are more robust and reliable than using the KL-based distance measure. Moreover, the complexity of our algorithm is only linear in the sample size and dimensional- ity. Experiments on density estimation and clustering-based image segmentation demonstrate its outstanding performance in terms of both speed and accuracy.", "bibtex": "@inproceedings{NIPS2006_c8cd63e1,\n author = {Zhang, Kai and Kwok, James},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Simplifying Mixture Models through Function Approximation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/c8cd63e1bf13c5016881652983fb615a-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/c8cd63e1bf13c5016881652983fb615a-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/c8cd63e1bf13c5016881652983fb615a-Metadata.json", "review": "", "metareview": "", "pdf_size": 191975, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12915902091018597726&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "a0164f2040", "title": "Single Channel Speech Separation Using Factorial Dynamics", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/6788076842014c83cedadbe6b0ba0314-Abstract.html", "author": "John R. Hershey; Trausti Kristjansson; Steven Rennie; Peder A. Olsen", "abstract": "Human listeners have the extraordinary ability to hear and recognize speech even when more than one person is talking. Their machine counterparts have historically been unable to compete with this ability, until now. We present a modelbased system that performs on par with humans in the task of separating speech of two talkers from a single-channel recording. Remarkably, the system surpasses human recognition performance in many conditions. The models of speech use temporal dynamics to help infer the source speech signals, given mixed speech signals. The estimated source signals are then recognized using a conventional speech recognition system. We demonstrate that the system achieves its best performance when the model of temporal dynamics closely captures the grammatical constraints of the task. One of the hallmarks of human perception is our ability to solve the auditory cocktail party problem: we can direct our attention to a given speaker in the presence of interfering speech, and understand what was said remarkably well. Until now the same could not be said for automatic speech recognition systems. However, we have recently introduced a system which in many conditions performs this task better than humans [1][2]. The model addresses the Pascal Speech Separation Challenge task [3], and outperforms all other published results by more than 10% word error rate (WER). In this model, dynamics are modeled using a layered combination of one or two Markov chains: one for long-term dependencies and another for short-term dependencies. The combination of the two speakers was handled via an iterative Laplace approximation method known as Algonquin [4]. Here we describe experiments that show better performance on the same task with a simpler version of the model. The task we address is provided by the PASCAL Speech Separation Challenge [3], which provides standard training, development, and test data sets of single-channel speech mixtures following an arbitrary but simple grammar. In addition, the challenge organizers have conducted human-listening experiments to provide an interesting baseline for comparison of computational techniques. The overall system we developed is composed of the three components: a speaker identification and gain estimation component, a signal separation component, and a speech recognition system. In this paper we focus on the signal separation component, which is composed of the acoustic and grammatical models. The details of the other components are discussed in [2]. Single-channel speech separation has previously been attempted using Gaussian mixture models (GMMs) on individual frames of acoustic features. However such models tend to perform well only when speakers are of different gender or have rather different voices [4]. When speakers have similar voices, speaker-dependent mixture models cannot unambiguously identify the component speakers. In such cases it is helpful to model the temporal dynamics of the speech. Several models in the literature have attempted to do so either for recognition [5, 6] or enhancement [7, 8] of speech. Such", "bibtex": "@inproceedings{NIPS2006_67880768,\n author = {Hershey, John and Kristjansson, Trausti and Rennie, Steven and Olsen, Peder A},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Single Channel Speech Separation Using Factorial Dynamics},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/6788076842014c83cedadbe6b0ba0314-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/6788076842014c83cedadbe6b0ba0314-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/6788076842014c83cedadbe6b0ba0314-Metadata.json", "review": "", "metareview": "", "pdf_size": 124530, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5226800064257136345&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster" }, { "id": "70d12d73f0", "title": "Sparse Kernel Orthonormalized PLS for feature extraction in large data sets", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/6db291ac9963003618ca6aa15063c4d6-Abstract.html", "author": "Jer\u00f3nimo Arenas-garc\u00eda; Kaare B. Petersen; Lars K. Hansen", "abstract": "In this paper we are presenting a novel multivariate analysis method. Our scheme is based on a novel kernel orthonormalized partial least squares (PLS) variant for feature extraction, imposing sparsity constrains in the solution to improve scalability. The algorithm is tested on a benchmark of UCI data sets, and on the analysis of integrated short-time music features for genre prediction. The upshot is that the method has strong expressive power even with rather few features, is clearly outperforming the ordinary kernel PLS, and therefore is an appealing method for feature extraction of labelled data.", "bibtex": "@inproceedings{NIPS2006_6db291ac,\n author = {Arenas-garc\\'{\\i}a, Jer\\'{o}nimo and Petersen, Kaare and Hansen, Lars K},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Sparse Kernel Orthonormalized PLS for feature extraction in large data sets},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/6db291ac9963003618ca6aa15063c4d6-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/6db291ac9963003618ca6aa15063c4d6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/6db291ac9963003618ca6aa15063c4d6-Metadata.json", "review": "", "metareview": "", "pdf_size": 77841, "gs_citation": 62, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6455048167057919783&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 18, "aff": "Informatics and Mathematical Modelling, Technical University of Denmark; Informatics and Mathematical Modelling, Technical University of Denmark; Informatics and Mathematical Modelling, Technical University of Denmark", "aff_domain": "imm.dtu.dk;imm.dtu.dk;imm.dtu.dk", "email": "imm.dtu.dk;imm.dtu.dk;imm.dtu.dk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Technical University of Denmark", "aff_unique_dep": "Informatics and Mathematical Modelling", "aff_unique_url": "https://www.tu-dresden.de", "aff_unique_abbr": "DTU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Denmark" }, { "id": "d75f3f672a", "title": "Sparse Multinomial Logistic Regression via Bayesian L1 Regularisation", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/b22b257ad0519d4500539da3c8bcf4dd-Abstract.html", "author": "Gavin C. Cawley; Nicola L. Talbot; Mark Girolami", "abstract": "Multinomial logistic regression provides the standard penalised maximum- likelihood solution to multi-class pattern recognition problems. More recently, the development of sparse multinomial logistic regression models has found ap- plication in text processing and microarray classi\ufb01cation, where explicit identi\ufb01- cation of the most informative features is of value. In this paper, we propose a sparse multinomial logistic regression method, in which the sparsity arises from the use of a Laplace prior, but where the usual regularisation parameter is inte- grated out analytically. Evaluation over a range of benchmark datasets reveals this approach results in similar generalisation performance to that obtained using cross-validation, but at greatly reduced computational expense.", "bibtex": "@inproceedings{NIPS2006_b22b257a,\n author = {Cawley, Gavin and Talbot, Nicola and Girolami, Mark},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Sparse Multinomial Logistic Regression via Bayesian L1 Regularisation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/b22b257ad0519d4500539da3c8bcf4dd-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/b22b257ad0519d4500539da3c8bcf4dd-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/b22b257ad0519d4500539da3c8bcf4dd-Metadata.json", "review": "", "metareview": "", "pdf_size": 78570, "gs_citation": 302, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8564908856720680095&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "School of Computing Sciences, University of East Anglia, Norwich, Norfolk, NR4 7TJ, U.K.; School of Computing Sciences, University of East Anglia, Norwich, Norfolk, NR4 7TJ, U.K.; Department of Computing Science, University of Glasgow, Glasgow, Scotland, G12 8QQ, U.K.", "aff_domain": "cmp.uea.ac.uk;cmp.uea.ac.uk;dcs.gla.ac.uk", "email": "cmp.uea.ac.uk;cmp.uea.ac.uk;dcs.gla.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of East Anglia;University of Glasgow", "aff_unique_dep": "School of Computing Sciences;Department of Computing Science", "aff_unique_url": "https://www.uea.ac.uk;https://www.gla.ac.uk", "aff_unique_abbr": "UEA;UofG", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Norwich;Glasgow", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "id": "289729ad17", "title": "Sparse Representation for Signal Classification", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/c922de9e01cba8a4684f6c3471130e4c-Abstract.html", "author": "Ke Huang; Selin Aviyente", "abstract": "In this paper, application of sparse representation (factorization) of signals over an overcomplete basis (dictionary) for signal classi\ufb01cation is discussed. Search- ing for the sparse representation of a signal over an overcomplete dictionary is achieved by optimizing an objective function that includes two terms: one that measures the signal reconstruction error and another that measures the sparsity. This objective function works well in applications where signals need to be recon- structed, like coding and denoising. On the other hand, discriminative methods, such as linear discriminative analysis (LDA), are better suited for classi\ufb01cation tasks. However, discriminative methods are usually sensitive to corruption in sig- nals due to lacking crucial properties for signal reconstruction. In this paper, we present a theoretical framework for signal classi\ufb01cation with sparse representa- tion. The approach combines the discrimination power of the discriminative meth- ods with the reconstruction property and the sparsity of the sparse representation that enables one to deal with signal corruptions: noise, missing data and outliers. The proposed approach is therefore capable of robust classi\ufb01cation with a sparse representation of signals. The theoretical results are demonstrated with signal classi\ufb01cation tasks, showing that the proposed approach outperforms the standard discriminative methods and the standard sparse representation in the case of cor- rupted signals.", "bibtex": "@inproceedings{NIPS2006_c922de9e,\n author = {Huang, Ke and Aviyente, Selin},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Sparse Representation for Signal Classification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/c922de9e01cba8a4684f6c3471130e4c-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/c922de9e01cba8a4684f6c3471130e4c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/c922de9e01cba8a4684f6c3471130e4c-Metadata.json", "review": "", "metareview": "", "pdf_size": 81373, "gs_citation": 904, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2270797646854256454&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Department of Electrical and Computer Engineering, Michigan State University, East Lansing, MI48824; Department of Electrical and Computer Engineering, Michigan State University, East Lansing, MI48824", "aff_domain": "egr.msu.edu;egr.msu.edu", "email": "egr.msu.edu;egr.msu.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Michigan State University", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.msu.edu", "aff_unique_abbr": "MSU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "East Lansing", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "9752f0afcf", "title": "Speakers optimize information density through syntactic reduction", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/c6a01432c8138d46ba39957a8250e027-Abstract.html", "author": "T. F. Jaeger; Roger P. Levy", "abstract": "If language users are rational, they might choose to structure their utterances so as to optimize communicative properties. In particular, information-theoretic and psycholinguistic considerations suggest that this may include maximizing the uniformity of information density in an utterance. We investigate this possibility in the context of syntactic reduction, where the speaker has the option of either marking a higher-order unit (a phrase) with an extra word, or leaving it unmarked. We demonstrate that speakers are more likely to reduce less information-dense phrases. In a second step, we combine a stochastic model of structured utterance production with a logistic-regression model of syntactic reduction to study which types of cues speakers employ when estimating the predictability of upcoming elements. We demonstrate that the trend toward predictability-sensitive syntactic reduction (Jaeger, 2006) is robust in the face of a wide variety of control variables, and present evidence that speakers use both surface and structural cues for predictability estimation.", "bibtex": "@inproceedings{NIPS2006_c6a01432,\n author = {Jaeger, T. and Levy, Roger},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Speakers optimize information density through syntactic reduction},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/c6a01432c8138d46ba39957a8250e027-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/c6a01432c8138d46ba39957a8250e027-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/c6a01432c8138d46ba39957a8250e027-Metadata.json", "review": "", "metareview": "", "pdf_size": 69570, "gs_citation": 730, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4339994027250863058&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "Department of Linguistics, UCSanDiego; Department of Linguistics & Department of Psychology, Stanford University & UCSan Diego", "aff_domain": "ling.ucsd.edu;csli.stanford.edu", "email": "ling.ucsd.edu;csli.stanford.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "University of California, San Diego;Stanford University", "aff_unique_dep": "Department of Linguistics;Department of Linguistics", "aff_unique_url": "https://www.ucsd.edu;https://www.stanford.edu", "aff_unique_abbr": "UCSD;Stanford", "aff_campus_unique_index": "0;1", "aff_campus_unique": "San Diego;Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "fadd63bcb3", "title": "Stability of $K$-Means Clustering", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/58191d2a914c6dae66371c9dcdc91b41-Abstract.html", "author": "Alexander Rakhlin; Andrea Caponnetto", "abstract": "We phrase K -means clustering as an empirical risk minimization procedure over a class HK and explicitly calculate the covering number for this class. Next, we show that stability of K -means clustering is characterized by the geometry of HK with respect to the underlying distribution. We prove that in the case of a unique global minimizer, the clustering solution is stable with respect to complete changes of the data, while for the case of multiple minimizers, the change of (n1/2 ) samples defines the transition between stability and instability. While for a finite number of minimizers this result follows from multinomial distribution estimates, the case of infinite minimizers requires more refined tools. We conclude by proving that stability of the functions in HK implies stability of the actual centers of the clusters. Since stability is often used for selecting the number of clusters in practice, we hope that our analysis serves as a starting point for finding theoretically grounded recipes for the choice of K .", "bibtex": "@inproceedings{NIPS2006_58191d2a,\n author = {Rakhlin, Alexander and Caponnetto, Andrea},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Stability of K-Means Clustering},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/58191d2a914c6dae66371c9dcdc91b41-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/58191d2a914c6dae66371c9dcdc91b41-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/58191d2a914c6dae66371c9dcdc91b41-Metadata.json", "review": "", "metareview": "", "pdf_size": 89161, "gs_citation": 158, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17304263621031184017&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Department of Computer Science, UC Berkeley; Department of Computer Science, University of Chicago + D.I.S.I., Universit\u00e0 di Genova, Italy", "aff_domain": "cs.berkeley.edu;uchicago.edu", "email": "cs.berkeley.edu;uchicago.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1+2", "aff_unique_norm": "University of California, Berkeley;University of Chicago;Universit\u00e0 di Genova", "aff_unique_dep": "Department of Computer Science;Department of Computer Science;D.I.S.I.", "aff_unique_url": "https://www.berkeley.edu;https://www.uchicago.edu;https://www.unige.it", "aff_unique_abbr": "UC Berkeley;UChicago;", "aff_campus_unique_index": "0;", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0+1", "aff_country_unique": "United States;Italy" }, { "id": "421429abf0", "title": "Statistical Modeling of Images with Fields of Gaussian Scale Mixtures", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/3fd60983292458bf7dee75f12d5e9e05-Abstract.html", "author": "Siwei Lyu; Eero P. Simoncelli", "abstract": "The local statistical properties of photographic images, when represented in a multi-scale basis, have been described using Gaussian scale mixtures (GSMs). Here, we use this local description to construct a global field of Gaussian scale mixtures (FoGSM). Specifically, we model subbands of wavelet coefficients as a product of an exponentiated homogeneous Gaussian Markov random field (hGMRF) and a second independent hGMRF. We show that parameter estimation for FoGSM is feasible, and that samples drawn from an estimated FoGSM model have marginal and joint statistics similar to wavelet coefficients of photographic images. We develop an algorithm for image denoising based on the FoGSM model, and demonstrate substantial improvements over current state-ofthe-art denoising method based on the local GSM model. Many successful methods in image processing and computer vision rely on statistical models for images, and it is thus of continuing interest to develop improved models, both in terms of their ability to precisely capture image structures, and in terms of their tractability when used in applications. Constructing such a model is difficult, primarily because of the intrinsic high dimensionality of the space of images. Two simplifying assumptions are usually made to reduce model complexity. The first is Markovianity: the density of a pixel conditioned on a small neighborhood, is assumed to be independent from the rest of the image. The second assumption is homogeneity: the local density is assumed to be independent of its absolute position within the image. The set of models satisfying both of these assumptions constitute the class of homogeneous Markov random fields (hMRFs). Over the past two decades, studies of photographic images represented with multi-scale multiorientation image decompositions (loosely referred to as \"wavelets\") have revealed striking nonGaussian regularities and inter and intra-subband dependencies. For instance, wavelet coefficients generally have highly kurtotic marginal distributions [1, 2], and their amplitudes exhibit strong correlations with the amplitudes of nearby coefficients [3, 4]. One model that can capture the nonGaussian marginal behaviors is a product of non-Gaussian scalar variables [5]. A number of authors have developed non-Gaussian MRF models based on this sort of local description [6, 7, 8], among which the recently developed fields of experts model [7] has demonstrated impressive performance in denoising (albeit at an extremely high computational cost in learning model parameters). An alternative model that can capture non-Gaussian local structure is a scale mixture model [9, 10, 11]. An important special case is Gaussian scale mixtures (GSM), which consists of a Gaussian random vector whose amplitude is modulated by a hidden scaling variable. The GSM model provides a particularly good description of local image statistics, and the Gaussian substructure of the model leads to efficient algorithms for parameter estimation and inference. Local GSM-based methods represent the current state-of-the-art in image denoising [12]. The power of GSM models should be substantially improved when extended to describe more than a small neighborhood of wavelet coefficients. To this end, several authors have embedded local Gaussian mixtures into tree-structured", "bibtex": "@inproceedings{NIPS2006_3fd60983,\n author = {Lyu, Siwei and Simoncelli, Eero},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Statistical Modeling of Images with Fields of Gaussian Scale Mixtures},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/3fd60983292458bf7dee75f12d5e9e05-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/3fd60983292458bf7dee75f12d5e9e05-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/3fd60983292458bf7dee75f12d5e9e05-Metadata.json", "review": "", "metareview": "", "pdf_size": 253252, "gs_citation": 78, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2005301767552472334&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Howard Hughes Medical Institute + Center for Neural Science + Courant Institute of Mathematical Sciences, New York University; Howard Hughes Medical Institute + Center for Neural Science + Courant Institute of Mathematical Sciences, New York University", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0+1+1;0+1+1", "aff_unique_norm": "Howard Hughes Medical Institute;New York University", "aff_unique_dep": ";Center for Neural Science", "aff_unique_url": "https://www.hhmi.org;https://www.cns.nyu.edu", "aff_unique_abbr": "HHMI;CNS", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";New York", "aff_country_unique_index": "0+0+0;0+0+0", "aff_country_unique": "United States" }, { "id": "3fea22b5f4", "title": "Stochastic Relational Models for Discriminative Link Prediction", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/71887f62f073a78511cbac56f8cab53f-Abstract.html", "author": "Kai Yu; Wei Chu; Shipeng Yu; Volker Tresp; Zhao Xu", "abstract": "We introduce a Gaussian process (GP) framework, stochastic relational models (SRM), for learning social, physical, and other relational phenomena where interactions between entities are observed. The key idea is to model the stochastic structure of entity relationships (i.e., links) via a tensor interaction of multiple GPs, each defined on one type of entities. These models in fact define a set of nonparametric priors on infinite dimensional tensor matrices, where each element represents a relationship between a tuple of entities. By maximizing the marginalized likelihood, information is exchanged between the participating GPs through the entire relational network, so that the dependency structure of links is messaged to the dependency of entities, reflected by the adapted GP kernels. The framework offers a discriminative approach to link prediction, namely, predicting the existences, strengths, or types of relationships based on the partially observed linkage network as well as the attributes of entities (if given). We discuss properties and variants of SRM and derive an efficient learning algorithm. Very encouraging experimental results are achieved on a toy problem and a user-movie preference link prediction task. In the end we discuss extensions of SRM to general relational learning tasks.", "bibtex": "@inproceedings{NIPS2006_71887f62,\n author = {Yu, Kai and Chu, Wei and Yu, Shipeng and Tresp, Volker and Xu, Zhao},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Stochastic Relational Models for Discriminative Link Prediction},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/71887f62f073a78511cbac56f8cab53f-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/71887f62f073a78511cbac56f8cab53f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/71887f62f073a78511cbac56f8cab53f-Metadata.json", "review": "", "metareview": "", "pdf_size": 112245, "gs_citation": 267, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13279548103846992324&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 19, "aff": "NEC Laboratories America, Cupertino, CA 95014; CCLS, Columbia University, New York, NY 10115; Siemens AG, Corporate Research & Technology, 81739 Munich, Germany; Siemens AG, Corporate Research & Technology, 81739 Munich, Germany; Siemens AG, Corporate Research & Technology, 81739 Munich, Germany", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2;2;2", "aff_unique_norm": "NEC Laboratories America;Columbia University;Siemens AG", "aff_unique_dep": ";CCLS;Corporate Research & Technology", "aff_unique_url": "https://www.nec-labs.com;https://www.columbia.edu;https://www.siemens.com", "aff_unique_abbr": "NEC Labs;Columbia;Siemens", "aff_campus_unique_index": "0;1;2;2;2", "aff_campus_unique": "Cupertino;New York;Munich", "aff_country_unique_index": "0;0;1;1;1", "aff_country_unique": "United States;Germany" }, { "id": "8b04336ddd", "title": "Stratification Learning: Detecting Mixed Density and Dimensionality in High Dimensional Point Clouds", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/51be2fed6c55f5aa0c16ff14c140b187-Abstract.html", "author": "Gloria Haro; Gregory Randall; Guillermo Sapiro", "abstract": "The study of point cloud data sampled from a stratification, a collection of manifolds with possible different dimensions, is pursued in this paper. We present a technique for simultaneously soft clustering and estimating the mixed dimensionality and density of such structures. The framework is based on a maximum likelihood estimation of a Poisson mixture model. The presentation of the approach is completed with artificial and real examples demonstrating the importance of extending manifold learning to stratification learning.", "bibtex": "@inproceedings{NIPS2006_51be2fed,\n author = {Haro, Gloria and Randall, Gregory and Sapiro, Guillermo},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Stratification Learning: Detecting Mixed Density and Dimensionality in High Dimensional Point Clouds},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/51be2fed6c55f5aa0c16ff14c140b187-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/51be2fed6c55f5aa0c16ff14c140b187-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/51be2fed6c55f5aa0c16ff14c140b187-Metadata.json", "review": "", "metareview": "", "pdf_size": 267082, "gs_citation": 63, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2920281844349411991&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 17, "aff": "IMA and Electrical and Computer Engineering, University of Minnesota, Minneapolis, MN 55455; IMA and Electrical and Computer Engineering, University of Minnesota, Minneapolis, MN 55455; IMA and Electrical and Computer Engineering, University of Minnesota, Minneapolis, MN 55455", "aff_domain": "ima.umn.edu;fing.edu.uy;umn.edu", "email": "ima.umn.edu;fing.edu.uy;umn.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Minnesota", "aff_unique_dep": "Electrical and Computer Engineering", "aff_unique_url": "https://www.umn.edu", "aff_unique_abbr": "UMN", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Minneapolis", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "76c409500d", "title": "Subordinate class recognition using relational object models", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/54fda78aa8a09b4d77b5aaec57b75028-Abstract.html", "author": "Aharon B. Hillel; Daphna Weinshall", "abstract": "We address the problem of sub-ordinate class recognition, like the distinction between different types of motorcycles. Our approach is motivated by observations from cognitive psychology, which identify parts as the defining component of basic level categories (like motorcycles), while sub-ordinate categories are more often defined by part properties (like 'jagged wheels'). Accordingly, we suggest a two-stage algorithm: First, a relational part based object model is learnt using unsegmented object images from the inclusive class (e.g., motorcycles in general). The model is then used to build a class-specific vector representation for images, where each entry corresponds to a model's part. In the second stage we train a standard discriminative classifier to classify subclass instances (e.g., cross motorcycles) based on the class-specific vector representation. We describe extensive experimental results with several subclasses. The proposed algorithm typically gives better results than a competing one-step algorithm, or a two stage algorithm where classification is based on a model of the sub-ordinate class.", "bibtex": "@inproceedings{NIPS2006_54fda78a,\n author = {Hillel, Aharon and Weinshall, Daphna},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Subordinate class recognition using relational object models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/54fda78aa8a09b4d77b5aaec57b75028-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/54fda78aa8a09b4d77b5aaec57b75028-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/54fda78aa8a09b4d77b5aaec57b75028-Metadata.json", "review": "", "metareview": "", "pdf_size": 184670, "gs_citation": 59, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12571467550055412673&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Computer Science, The Hebrew university of Jerusalem; Department of Computer Science, The Hebrew university of Jerusalem", "aff_domain": "cs.huji.ac.il;cs.huji.ac.il", "email": "cs.huji.ac.il;cs.huji.ac.il", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Hebrew University of Jerusalem", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.huji.ac.il", "aff_unique_abbr": "HUJI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Jerusalem", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "id": "d583fe0be9", "title": "Support Vector Machines on a Budget", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/a081cab429ff7a3b96e0a07319f1049e-Abstract.html", "author": "Ofer Dekel; Yoram Singer", "abstract": "The standard Support Vector Machine formulation does not provide its user with the ability to explicitly control the number of support vectors used to de\ufb01ne the generated classi\ufb01er. We present a modi\ufb01ed version of SVM that allows the user to set a budget parameter B and focuses on minimizing the loss attained by the B worst-classi\ufb01ed examples while ignoring the remaining examples. This idea can be used to derive sparse versions of both L1-SVM and L2-SVM. Technically, we obtain these new SVM variants by replacing the 1-norm in the standard SVM for- mulation with various interpolation-norms. We also adapt the SMO optimization algorithm to our setting and report on some preliminary experimental results.", "bibtex": "@inproceedings{NIPS2006_a081cab4,\n author = {Dekel, Ofer and Singer, Yoram},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Support Vector Machines on a Budget},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/a081cab429ff7a3b96e0a07319f1049e-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/a081cab429ff7a3b96e0a07319f1049e-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/a081cab429ff7a3b96e0a07319f1049e-Metadata.json", "review": "", "metareview": "", "pdf_size": 112756, "gs_citation": 61, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1691619484297796372&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "School of Computer Science and Engineering, The Hebrew University, Jerusalem 91904, Israel; School of Computer Science and Engineering, The Hebrew University, Jerusalem 91904, Israel", "aff_domain": "cs.huji.ac.il;cs.huji.ac.il", "email": "cs.huji.ac.il;cs.huji.ac.il", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Hebrew University", "aff_unique_dep": "School of Computer Science and Engineering", "aff_unique_url": "http://www.huji.ac.il", "aff_unique_abbr": "HUJI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Jerusalem", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "id": "98b6e94881", "title": "Temporal Coding using the Response Properties of Spiking Neurons", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/2e0aca891f2a8aedf265edf533a6d9a8-Abstract.html", "author": "Thomas Voegtlin", "abstract": "In biological neurons, the timing of a spike depends on the timing of synaptic currents, in a way that is classically described by the Phase Response Curve. This has implications for temporal coding: an action potential that arrives on a synapse has an implicit meaning, that depends on the position of the postsynaptic neuron on the firing cycle. Here we show that this implicit code can be used to perform computations. Using theta neurons, we derive a spike-timing dependent learning rule from an error criterion. We demonstrate how to train an a uto-encoder neural network using this rule.", "bibtex": "@inproceedings{NIPS2006_2e0aca89,\n author = {Voegtlin, Thomas},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Temporal Coding using the Response Properties of Spiking Neurons},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/2e0aca891f2a8aedf265edf533a6d9a8-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/2e0aca891f2a8aedf265edf533a6d9a8-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/2e0aca891f2a8aedf265edf533a6d9a8-Metadata.json", "review": "", "metareview": "", "pdf_size": 336102, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17492598016273134057&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 10, "aff": "INRIA - Campus Scientifique, B.P. 239 F-54506 Vandoeuvre-Les-Nancy Cedex, FRANCE", "aff_domain": "loria.fr", "email": "loria.fr", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "INRIA", "aff_unique_dep": "", "aff_unique_url": "https://www.inria.fr", "aff_unique_abbr": "INRIA", "aff_campus_unique_index": "0", "aff_campus_unique": "Vandoeuvre-Les-Nancy", "aff_country_unique_index": "0", "aff_country_unique": "France" }, { "id": "7d4b1056bd", "title": "Temporal and Cross-Subject Probabilistic Models for fMRI Prediction Tasks", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/37e7897f62e8d91b1ce60515829ca282-Abstract.html", "author": "Alexis Battle; Gal Chechik; Daphne Koller", "abstract": "We present a probabilistic model applied to the fMRI video rating prediction task of the Pittsburgh Brain Activity Interpretation Competition (PBAIC) [2]. Our goal is to predict a time series of subjective, semantic ratings of a movie given functional MRI data acquired during viewing by three subjects. Our method uses conditionally trained Gaussian Markov random fields, which model both the relationships between the subjects' fMRI voxel measurements and the ratings, as well as the dependencies of the ratings across time steps and between subjects. We also employed non-traditional methods for feature selection and regularization that exploit the spatial structure of voxel activity in the brain. The model displayed good performance in predicting the scored ratings for the three subjects in test data sets, and a variant of this model was the third place entrant to the 2006 PBAIC.", "bibtex": "@inproceedings{NIPS2006_37e7897f,\n author = {Battle, Alexis and Chechik, Gal and Koller, Daphne},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Temporal and Cross-Subject Probabilistic Models for fMRI Prediction Tasks},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/37e7897f62e8d91b1ce60515829ca282-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/37e7897f62e8d91b1ce60515829ca282-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/37e7897f62e8d91b1ce60515829ca282-Metadata.json", "review": "", "metareview": "", "pdf_size": 101972, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8192624146606106852&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University", "aff_domain": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "email": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "id": "31e0861216", "title": "Temporal dynamics of information content carried by neurons in the primary visual cortex", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/60792d855cd8a912a97711f91a1f155c-Abstract.html", "author": "Danko Nikoli\u0107; Stefan Haeusler; Wolf Singer; Wolfgang Maass", "abstract": "We use multi-electrode recordings from cat primary visual cortex and investigate \nwhether a simple linear classifier can extract information about the presented stim(cid:173)\nuli. We find that information is extractable and that it even lasts for several hun(cid:173)\ndred milliseconds after the stimulus has been removed. In a fast sequence of stim(cid:173)\nulus presentation, information about both new and old stimuli is present simul(cid:173)\ntaneously and nonlinear relations between these stimuli can be extracted. These \nresults suggest nonlinear properties of cortical representations. The important im(cid:173)\nplications of these properties for the nonlinear brain theory are discussed.", "bibtex": "@inproceedings{NIPS2006_60792d85,\n author = {Nikoli\\'{c}, Danko and Haeusler, Stefan and Singer, Wolf and Maass, Wolfgang},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Temporal dynamics of information content carried by neurons in the primary visual cortex},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/60792d855cd8a912a97711f91a1f155c-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/60792d855cd8a912a97711f91a1f155c-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/60792d855cd8a912a97711f91a1f155c-Metadata.json", "review": "", "metareview": "", "pdf_size": 1541847, "gs_citation": 82, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3123762269655268398&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 19, "aff": "Max-Planck-Institute for Brain Research, Frankfurt (Main), Germany; Max-Planck-Institute for Brain Research, Frankfurt (Main), Germany; Institute for Theoretical Computer Science, Graz University of Technology, A-80lO Graz, Austria; Institute for Theoretical Computer Science, Graz University of Technology, A-80lO Graz, Austria", "aff_domain": "mpih-frankfurt.mpg.de;mpih-frankfurt.mpg.de;igi.tugraz.at;igi.tugraz.at", "email": "mpih-frankfurt.mpg.de;mpih-frankfurt.mpg.de;igi.tugraz.at;igi.tugraz.at", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;1", "aff_unique_norm": "Max-Planck-Institute for Brain Research;Graz University of Technology", "aff_unique_dep": ";Institute for Theoretical Computer Science", "aff_unique_url": "https://www.brain.mpg.de;https://www.tugraz.at", "aff_unique_abbr": "MPIBR;TU Graz", "aff_campus_unique_index": "0;0;1;1", "aff_campus_unique": "Frankfurt (Main);Graz", "aff_country_unique_index": "0;0;1;1", "aff_country_unique": "Germany;Austria" }, { "id": "b3692261ae", "title": "The Neurodynamics of Belief Propagation on Binary Markov Random Fields", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/9aeade7beada35c83d3b344fbafe43b0-Abstract.html", "author": "Thomas Ott; Ruedi Stoop", "abstract": "We rigorously establish a close relationship between message passing algorithms and models of neurodynamics by showing that the equations of a continuous Hop- (cid:2)eld network can be derived from the equations of belief propagation on a binary Markov random (cid:2)eld. As Hop(cid:2)eld networks are equipped with a Lyapunov func- tion, convergence is guaranteed. As a consequence, in the limit of many weak con- nections per neuron, Hop(cid:2)eld networks exactly implement a continuous-time vari- ant of belief propagation starting from message initialisations that prevent from running into convergence problems. Our results lead to a better understanding of the role of message passing algorithms in real biological neural networks.", "bibtex": "@inproceedings{NIPS2006_9aeade7b,\n author = {Ott, Thomas and Stoop, Ruedi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {The Neurodynamics of Belief Propagation on Binary Markov Random Fields},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/9aeade7beada35c83d3b344fbafe43b0-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/9aeade7beada35c83d3b344fbafe43b0-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/9aeade7beada35c83d3b344fbafe43b0-Metadata.json", "review": "", "metareview": "", "pdf_size": 149610, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11189133608296211106&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Institute of Neuroinformatics ETH/UNIZH Zurich Switzerland; Institute of Neuroinformatics ETH/UNIZH Zurich Switzerland", "aff_domain": "ini.phys.ethz.ch;ini.phys.ethz.ch", "email": "ini.phys.ethz.ch;ini.phys.ethz.ch", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Institute of Neuroinformatics", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETH", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Zurich", "aff_country_unique_index": "0;0", "aff_country_unique": "Switzerland" }, { "id": "e42f5103f1", "title": "The Robustness-Performance Tradeoff in Markov Decision Processes", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/177540c7bcb8db31697b601642eac8d4-Abstract.html", "author": "Huan Xu; Shie Mannor", "abstract": "Computation of a satisfactory control policy for a Markov decision process when the parameters of the model are not exactly known is a problem encountered in many practical applications. The traditional robust approach is based on a worstcase analysis and may lead to an overly conservative policy. In this paper we consider the tradeoff between nominal performance and the worst case performance over all possible models. Based on parametric linear programming, we propose a method that computes the whole set of Pareto efficient policies in the performancerobustness plane when only the reward parameters are subject to uncertainty. In the more general case when the transition probabilities are also subject to error, we show that the strategy with the \"optimal\" tradeoff might be non-Markovian and hence is in general not tractable.", "bibtex": "@inproceedings{NIPS2006_177540c7,\n author = {Xu, Huan and Mannor, Shie},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {The Robustness-Performance Tradeoff in Markov Decision Processes},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/177540c7bcb8db31697b601642eac8d4-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/177540c7bcb8db31697b601642eac8d4-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/177540c7bcb8db31697b601642eac8d4-Metadata.json", "review": "", "metareview": "", "pdf_size": 84232, "gs_citation": 69, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5784074564304173256&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Department of Electrical and Computer Engineering, McGill University; Department of Electrical and Computer Engineering, McGill University", "aff_domain": "cim.mcgill.ca;ece.mcgill.ca", "email": "cim.mcgill.ca;ece.mcgill.ca", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "McGill University", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.mcgill.ca", "aff_unique_abbr": "McGill", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Canada" }, { "id": "6d5f620839", "title": "Theory and Dynamics of Perceptual Bistability", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/2836be05e71a8f34902a6e6b37350134-Abstract.html", "author": "Paul R. Schrater; Rashmi Sundareswara", "abstract": "Perceptual Bistability refers to the phenomenon of spontaneously switching between two or more interpretations of an image under continuous viewing. Although switching behavior is increasingly well characterized, the origins remain elusive. We propose that perceptual switching naturally arises from the brain's search for best interpretations while performing Bayesian inference. In particular, we propose that the brain explores a posterior distribution over image interpretations at a rapid time scale via a sampling-like process and updates its interpretation when a sampled interpretation is better than the discounted value of its current interpretation. We formalize the theory, explicitly derive switching rate distributions and discuss qualitative properties of the theory including the effect of changes in the posterior distribution on switching rates. Finally, predictions of the theory are shown to be consistent with measured changes in human switching dynamics to Necker cube stimuli induced by context.", "bibtex": "@inproceedings{NIPS2006_2836be05,\n author = {Schrater, Paul R and Sundareswara, Rashmi},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Theory and Dynamics of Perceptual Bistability},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/2836be05e71a8f34902a6e6b37350134-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/2836be05e71a8f34902a6e6b37350134-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/2836be05e71a8f34902a6e6b37350134-Metadata.json", "review": "", "metareview": "", "pdf_size": 258103, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13005422441687924121&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Departments of Psychology and Computer Sci. & Eng., University of Minnesota; Department of Computer Sci. & Eng., University of Minnesota", "aff_domain": "umn.edu;cs.umn.edu", "email": "umn.edu;cs.umn.edu", "github": "", "project": "http://www.schrater.org", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "University of Minnesota", "aff_unique_dep": "Departments of Psychology and Computer Science & Engineering", "aff_unique_url": "https://www.umn.edu", "aff_unique_abbr": "UMN", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "95ec25d147", "title": "Tighter PAC-Bayes Bounds", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/3f5ee243547dee91fbd053c1c4a845aa-Abstract.html", "author": "Amiran Ambroladze; Emilio Parrado-hern\u00e1ndez; John S. Shawe-taylor", "abstract": "This paper proposes a PAC-Bayes bound to measure the performance of Support Vector Machine (SVM) classi\ufb01ers. The bound is based on learning a prior over the distribution of classi\ufb01ers with a part of the training samples. Experimental work shows that this bound is tighter than the original PAC-Bayes, resulting in an enhancement of the predictive capabilities of the PAC-Bayes bound. In addition, it is shown that the use of this bound as a means to estimate the hyperparameters of the classi\ufb01er compares favourably with cross validation in terms of accuracy of the model, while saving a lot of computational burden.", "bibtex": "@inproceedings{NIPS2006_3f5ee243,\n author = {Ambroladze, Amiran and Parrado-hern\\'{a}ndez, Emilio and Shawe-taylor, John},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Tighter PAC-Bayes Bounds},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/3f5ee243547dee91fbd053c1c4a845aa-Metadata.json", "review": "", "metareview": "", "pdf_size": 164138, "gs_citation": 158, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13548341739849492052&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Dep. of Mathematics, Lund University/LTH; Dep. of Signal Processing and Communications, University Carlos III of Madrid; Dep. of Computer Science, University College London", "aff_domain": "math.lth.se;tsc.uc3m.es;cs.ucl.ac.uk", "email": "math.lth.se;tsc.uc3m.es;cs.ucl.ac.uk", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;2", "aff_unique_norm": "Lund University;University Carlos III of Madrid;University College London", "aff_unique_dep": "Department of Mathematics;Department of Signal Processing and Communications;Department of Computer Science", "aff_unique_url": "https://www.lth.se;https://www.uc3m.es;https://www.ucl.ac.uk", "aff_unique_abbr": "LU;UC3M;UCL", "aff_campus_unique_index": "0;2", "aff_campus_unique": "Lund;;London", "aff_country_unique_index": "0;1;2", "aff_country_unique": "Sweden;Spain;United Kingdom" }, { "id": "9f3214e739", "title": "Towards a general independent subspace analysis", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/20479c788fb27378c2c99eadcf207e7f-Abstract.html", "author": "Fabian J. Theis", "abstract": "The increasingly popular independent component analysis (ICA) may only be applied to data following the generative ICA model in order to guarantee algorithmindependent and theoretically valid results. Subspace ICA models generalize the assumption of component independence to independence between groups of components. They are attractive candidates for dimensionality reduction methods, however are currently limited by the assumption of equal group sizes or less general semi-parametric models. By introducing the concept of irreducible independent subspaces or components, we present a generalization to a parameter-free mixture model. Moreover, we relieve the condition of at-most-one-Gaussian by including previous results on non-Gaussian component analysis. After introducing this general model, we discuss joint block diagonalization with unknown block sizes, on which we base a simple extension of JADE to algorithmically perform the subspace analysis. Simulations confirm the feasibility of the algorithm.", "bibtex": "@inproceedings{NIPS2006_20479c78,\n author = {Theis, Fabian},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Towards a general independent subspace analysis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/20479c788fb27378c2c99eadcf207e7f-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/20479c788fb27378c2c99eadcf207e7f-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/20479c788fb27378c2c99eadcf207e7f-Metadata.json", "review": "", "metareview": "", "pdf_size": 289834, "gs_citation": 128, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10042715623656878192&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Max Planck Institute for Dynamics and Self-Organisation & Bernstein Center for Computational Neuroscience", "aff_domain": "theis.name", "email": "theis.name", "github": "", "project": "", "author_num": 1, "track": "main", "status": "Poster", "aff_unique_index": "0", "aff_unique_norm": "Max Planck Institute for Dynamics and Self-Organisation", "aff_unique_dep": "Institute for Dynamics and Self-Organisation", "aff_unique_url": "https://www.mpidsys.mpg.de", "aff_unique_abbr": "MPI for Dynamics and Self-Organisation", "aff_country_unique_index": "0", "aff_country_unique": "Germany" }, { "id": "e1def89ecc", "title": "Training Conditional Random Fields for Maximum Labelwise Accuracy", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/24b43fb034a10d78bec71274033b4096-Abstract.html", "author": "Samuel S. Gross; Olga Russakovsky; Chuong B. Do; Serafim Batzoglou", "abstract": "We consider the problem of training a conditional random field (CRF) to maximize per-label predictive accuracy on a training set, an approach motivated by the principle of empirical risk minimization. We give a gradient-based procedure for minimizing an arbitrarily accurate approximation of the empirical risk under a Hamming loss function. In experiments with both simulated and real data, our optimization procedure gives significantly better testing performance than several current approaches for CRF training, especially in situations of high label noise.", "bibtex": "@inproceedings{NIPS2006_24b43fb0,\n author = {Gross, Samuel and Russakovsky, Olga and B., Chuong and Batzoglou, Serafim},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Training Conditional Random Fields for Maximum Labelwise Accuracy},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/24b43fb034a10d78bec71274033b4096-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/24b43fb034a10d78bec71274033b4096-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/24b43fb034a10d78bec71274033b4096-Metadata.json", "review": "", "metareview": "", "pdf_size": 110314, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8239862405108120362&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Computer Science Department, Stanford University, Stanford, CA, USA; Computer Science Department, Stanford University, Stanford, CA, USA; Computer Science Department, Stanford University, Stanford, CA, USA; Computer Science Department, Stanford University, Stanford, CA, USA", "aff_domain": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "email": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "7d06da5d6a", "title": "TrueSkill\u2122: A Bayesian Skill Rating System", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/f44ee263952e65b3610b8ba51229d1f9-Abstract.html", "author": "Ralf Herbrich; Tom Minka; Thore Graepel", "abstract": "Abstract Unavailable", "bibtex": "@inproceedings{NIPS2006_f44ee263,\n author = {Herbrich, Ralf and Minka, Tom and Graepel, Thore},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {TrueSkill\\texttrademark : A Bayesian Skill Rating System},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/f44ee263952e65b3610b8ba51229d1f9-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/f44ee263952e65b3610b8ba51229d1f9-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/f44ee263952e65b3610b8ba51229d1f9-Metadata.json", "review": "", "metareview": "", "pdf_size": 446305, "gs_citation": 1180, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2518007382676231842&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster" }, { "id": "4db2f04357", "title": "Uncertainty, phase and oscillatory hippocampal recall", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/c5d9256689c43036581f781c61f26e50-Abstract.html", "author": "M\u00e1t\u00e9 Lengyel; Peter Dayan", "abstract": "Many neural areas, notably, the hippocampus, show structured, dynamical, population behavior such as coordinated oscillations. It has long been observed that such oscillations provide a substrate for representing analog information in the firing phases of neurons relative to the underlying population rhythm. However, it has become increasingly clear that it is essential for neural populations to represent uncertainty about the information they capture, and the substantial recent work on neural codes for uncertainty has omitted any analysis of oscillatory systems. Here, we observe that, since neurons in an oscillatory network need not only fire once in each cycle (or even at all), uncertainty about the analog quantities each neuron represents by its firing phase might naturally be reported through the degree of concentration of the spikes that it fires. We apply this theory to memory in a model of oscillatory associative recall in hippocampal area CA3. Although it is not well treated in the literature, representing and manipulating uncertainty is fundamental to competent memory; our theory enables us to view CA3 as an effective uncertainty-aware, retrieval system.", "bibtex": "@inproceedings{NIPS2006_c5d92566,\n author = {Lengyel, M\\'{a}t\\'{e} and Dayan, Peter},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Uncertainty, phase and oscillatory hippocampal recall},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/c5d9256689c43036581f781c61f26e50-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/c5d9256689c43036581f781c61f26e50-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/c5d9256689c43036581f781c61f26e50-Metadata.json", "review": "", "metareview": "", "pdf_size": 537607, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3195057639786572809&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster" }, { "id": "821064a71c", "title": "Unified Inference for Variational Bayesian Linear Gaussian State-Space Models", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/09d37c08f7b129e96277388757530c72-Abstract.html", "author": "David Barber; Silvia Chiappa", "abstract": "Linear Gaussian State-Space Models are widely used and a Bayesian treatment of parameters is therefore of considerable interest. The approximate Variational Bayesian method applied to these models is an attractive approach, used successfully in applications ranging from acoustics to bioinformatics. The most challenging aspect of implementing the method is in performing inference on the hidden state sequence of the model. We show how to convert the inference problem so that standard Kalman Filtering/Smoothing recursions from the literature may be applied. This is in contrast to previously published approaches based on Belief Propagation. Our framework both simplifies and unifies the inference problem, so that future applications may be more easily developed. We demonstrate the elegance of the approach on Bayesian temporal ICA, with an application to finding independent dynamical processes underlying noisy EEG signals.", "bibtex": "@inproceedings{NIPS2006_09d37c08,\n author = {Barber, David and Chiappa, Silvia},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Unified Inference for Variational Bayesian Linear Gaussian State-Space Models},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/09d37c08f7b129e96277388757530c72-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/09d37c08f7b129e96277388757530c72-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/09d37c08f7b129e96277388757530c72-Metadata.json", "review": "", "metareview": "", "pdf_size": 208886, "gs_citation": 74, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17181202661029418526&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 24, "aff": "IDIAP Research Institute; IDIAP Research Institute", "aff_domain": "idiap.ch;idiap.ch", "email": "idiap.ch;idiap.ch", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;0", "aff_unique_norm": "Idiap Research Institute", "aff_unique_dep": "", "aff_unique_url": "https://www.idiap.ch", "aff_unique_abbr": "IDIAP", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Switzerland" }, { "id": "9f9cc07c19", "title": "Unsupervised Learning of a Probabilistic Grammar for Object Detection and Parsing", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/9cb9ed4f35cf7c2f295cc2bc6f732a84-Abstract.html", "author": "Yuanhao Chen; Long Zhu; Alan L. Yuille", "abstract": "We describe an unsupervised method for learning a probabilistic grammar of an object from a set of training examples. Our approach is invariant to the scale and rotation of the objects. We illustrate our approach using thirteen objects from the Caltech 101 database. In addition, we learn the model of a hybrid object class where we do not know the specific object or its position, scale or pose. This is illustrated by learning a hybrid class consisting of faces, motorbikes, and airplanes. The individual objects can be recovered as different aspects of the grammar for the object class. In all cases, we validate our results by learning the probability grammars from training datasets and evaluating them on the test datasets. We compare our method to alternative approaches. The advantages of our approach is the speed of inference (under one second), the parsing of the object, and increased accuracy of performance. Moreover, our approach is very general and can be applied to a large range of objects and structures.", "bibtex": "@inproceedings{NIPS2006_9cb9ed4f,\n author = {Chen, Yuanhao and Zhu, Long and Yuille, Alan L},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Unsupervised Learning of a Probabilistic Grammar for Object Detection and Parsing},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/9cb9ed4f35cf7c2f295cc2bc6f732a84-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/9cb9ed4f35cf7c2f295cc2bc6f732a84-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/9cb9ed4f35cf7c2f295cc2bc6f732a84-Metadata.json", "review": "", "metareview": "", "pdf_size": 495132, "gs_citation": 68, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17362756255860546027&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "Department of Statistics, University of California at Los Angeles; Department of Automation, University of Science and Technology of China; Department of Statistics, University of California at Los Angeles", "aff_domain": "stat.ucla.edu;ustc.edu;stat.ucla.edu", "email": "stat.ucla.edu;ustc.edu;stat.ucla.edu", "github": "", "project": "", "author_num": 3, "track": "main", "status": "Poster", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of California, Los Angeles;University of Science and Technology of China", "aff_unique_dep": "Department of Statistics;Department of Automation", "aff_unique_url": "https://www.ucla.edu;http://www.ustc.edu.cn", "aff_unique_abbr": "UCLA;USTC", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Los Angeles;", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;China" }, { "id": "3211ead77f", "title": "Unsupervised Regression with Applications to Nonlinear System Identification", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/f0204e1d3ee3e4b05de4e2ddbd39e076-Abstract.html", "author": "Ali Rahimi; Ben Recht", "abstract": "We derive a cost functional for estimating the relationship between highdimensional observations and the low-dimensional process that generated them with no input-output examples. Limiting our search to invertible observation functions confers numerous benefits, including a compact representation and no suboptimal local minima. Our approximation algorithms for optimizing this cost functional are fast and give diagnostic bounds on the quality of their solution. Our method can be viewed as a manifold learning algorithm that utilizes a prior on the low-dimensional manifold coordinates. The benefits of taking advantage of such priors in manifold learning and searching for the inverse observation functions in system identification are demonstrated empirically by learning to track moving targets from raw measurements in a sensor network setting and in an RFID tracking experiment.", "bibtex": "@inproceedings{NIPS2006_f0204e1d,\n author = {Rahimi, Ali and Recht, Ben},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Unsupervised Regression with Applications to Nonlinear System Identification},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/f0204e1d3ee3e4b05de4e2ddbd39e076-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/f0204e1d3ee3e4b05de4e2ddbd39e076-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/f0204e1d3ee3e4b05de4e2ddbd39e076-Metadata.json", "review": "", "metareview": "", "pdf_size": 1910435, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16438548839105666249&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "Intel Research Seattle; California Institute of Technology", "aff_domain": "intel.com;ist.caltech.edu", "email": "intel.com;ist.caltech.edu", "github": "", "project": "", "author_num": 2, "track": "main", "status": "Poster", "aff_unique_index": "0;1", "aff_unique_norm": "Intel;California Institute of Technology", "aff_unique_dep": "Intel Research;", "aff_unique_url": "https://www.intel.com;https://www.caltech.edu", "aff_unique_abbr": "Intel;Caltech", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Seattle;Pasadena", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "id": "fa3a64a189", "title": "Using Combinatorial Optimization within Max-Product Belief Propagation", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/8b313cbf30999888de32da1ec83ff503-Abstract.html", "author": "Daniel Tarlow; Gal Elidan; Daphne Koller; John C. Duchi", "abstract": "In general, the problem of computing a maximum a posteriori (MAP) assignment in a Markov random field (MRF) is computationally intractable. However, in certain subclasses of MRF, an optimal or close-to-optimal assignment can be found very efficiently using combinatorial optimization algorithms: certain MRFs with mutual exclusion constraints can be solved using bipartite matching, and MRFs with regular potentials can be solved using minimum cut methods. However, these solutions do not apply to the many MRFs that contain such tractable components as sub-networks, but also other non-complying potentials. In this paper, we present a new method, called C O M P O S E, for exploiting combinatorial optimization for sub-networks within the context of a max-product belief propagation algorithm. C O M P O S E uses combinatorial optimization for computing exact maxmarginals for an entire sub-network; these can then be used for inference in the context of the network as a whole. We describe highly efficient methods for computing max-marginals for subnetworks corresponding both to bipartite matchings and to regular networks. We present results on both synthetic and real networks encoding correspondence problems between images, which involve both matching constraints and pairwise geometric constraints. We compare to a range of current methods, showing that the ability of C O M P O S E to transmit information globally across the network leads to improved convergence, decreased running time, and higher-scoring assignments.", "bibtex": "@inproceedings{NIPS2006_8b313cbf,\n author = {Tarlow, Daniel and Elidan, Gal and Koller, Daphne and Duchi, John C},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {Using Combinatorial Optimization within Max-Product Belief Propagation},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/8b313cbf30999888de32da1ec83ff503-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/8b313cbf30999888de32da1ec83ff503-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/8b313cbf30999888de32da1ec83ff503-Metadata.json", "review": "", "metareview": "", "pdf_size": 173159, "gs_citation": 100, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8170992364411608573&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University", "aff_domain": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "email": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "id": "1cffb259b0", "title": "iLSTD: Eligibility Traces and Convergence Analysis", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/6ad4174eba19ecb5fed17411a34ff5e6-Abstract.html", "author": "Alborz Geramifard; Michael Bowling; Martin Zinkevich; Richard S. Sutton", "abstract": "We present new theoretical and empirical results with the iLSTD algorithm for policy evaluation in reinforcement learning with linear function approximation. iLSTD is an incremental method for achieving results similar to LSTD, the dataefficient, least-squares version of temporal difference learning, without incurring the full cost of the LSTD computation. LSTD is O(n2 ), where n is the number of parameters in the linear function approximator, while iLSTD is O(n). In this paper, we generalize the previous iLSTD algorithm and present three new results: (1) the first convergence proof for an iLSTD algorithm; (2) an extension to incorporate eligibility traces without changing the asymptotic computational complexity; and (3) the first empirical results with an iLSTD algorithm for a problem (mountain car) with feature vectors large enough (n = 10, 000) to show substantial computational advantages over LSTD.", "bibtex": "@inproceedings{NIPS2006_6ad4174e,\n author = {Geramifard, Alborz and Bowling, Michael and Zinkevich, Martin and Sutton, Richard S},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {iLSTD: Eligibility Traces and Convergence Analysis},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/6ad4174eba19ecb5fed17411a34ff5e6-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/6ad4174eba19ecb5fed17411a34ff5e6-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/6ad4174eba19ecb5fed17411a34ff5e6-Metadata.json", "review": "", "metareview": "", "pdf_size": 1017891, "gs_citation": 66, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=489781131323925239&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 24, "aff": "Department of Computing Science; Department of Computing Science; Department of Computing Science; Department of Computing Science", "aff_domain": "cs.ualberta.ca;cs.ualberta.ca;cs.ualberta.ca;cs.ualberta.ca", "email": "cs.ualberta.ca;cs.ualberta.ca;cs.ualberta.ca;cs.ualberta.ca", "github": "", "project": "", "author_num": 4, "track": "main", "status": "Poster", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Alberta", "aff_unique_dep": "Department of Computing Science", "aff_unique_url": "https://www.ualberta.ca", "aff_unique_abbr": "UAlberta", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Canada" }, { "id": "633956f815", "title": "implicit Online Learning with Kernels", "site": "https://papers.nips.cc/paper_files/paper/2006/hash/a92c274b8be496fb05d95033552eeddd-Abstract.html", "author": "Li Cheng; Dale Schuurmans; Shaojun Wang; Terry Caelli; S.v.n. Vishwanathan", "abstract": "We present two new algorithms for online learning in reproducing kernel Hilbert spaces. Our first algorithm, ILK (implicit online learning with kernels), employs a new, implicit update technique that can be applied to a wide variety of convex loss functions. We then introduce a bounded memory version, SILK (sparse ILK), that maintains a compact representation of the predictor without compromising solution quality, even in non-stationary environments. We prove loss bounds and analyze the convergence rate of both. Experimental evidence shows that our proposed algorithms outperform current methods on synthetic and real data.", "bibtex": "@inproceedings{NIPS2006_a92c274b,\n author = {Cheng, Li and Schuurmans, Dale and Wang, Shaojun and Caelli, Terry and Vishwanathan, S.v.n.},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {B. Sch\\\"{o}lkopf and J. Platt and T. Hoffman},\n pages = {},\n publisher = {MIT Press},\n title = {implicit Online Learning with Kernels},\n url = {https://proceedings.neurips.cc/paper_files/paper/2006/file/a92c274b8be496fb05d95033552eeddd-Paper.pdf},\n volume = {19},\n year = {2006}\n}", "pdf": "https://papers.nips.cc/paper_files/paper/2006/file/a92c274b8be496fb05d95033552eeddd-Paper.pdf", "supp": "", "metadata": "https://papers.nips.cc/paper_files/paper/2006/file/a92c274b8be496fb05d95033552eeddd-Metadata.json", "review": "", "metareview": "", "pdf_size": 389619, "gs_citation": 94, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10871594647354728597&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 23, "aff": "National ICT Australia; National ICT Australia; Department of Computing Science, University of Alberta, Canada; Department of Computer Science and Engineering, Wright State University; National ICT Australia", "aff_domain": "nicta.com.au;nicta.com.au;cs.ualberta.ca;wright.edu;nicta.com.au", "email": "nicta.com.au;nicta.com.au;cs.ualberta.ca;wright.edu;nicta.com.au", "github": "", "project": "", "author_num": 5, "track": "main", "status": "Poster", "aff_unique_index": "0;0;1;2;0", "aff_unique_norm": "National ICT Australia;University of Alberta;Wright State University", "aff_unique_dep": ";Department of Computing Science;Department of Computer Science and Engineering", "aff_unique_url": "https://www.nicta.com.au;https://www.ualberta.ca;https://www.wright.edu", "aff_unique_abbr": "NICTA;UAlberta;WSU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1;2;0", "aff_country_unique": "Australia;Canada;United States" } ]