// Global variables let papersData = []; let filteredPapers = []; let currentPage = 1; const papersPerPage = 9; // Initialize the application function initializeApp() { setupEventListeners(); loadStats(); loadPapers(); setupIntersectionObserver(); } // Setup event listeners function setupEventListeners() { // Search functionality const searchInput = document.getElementById('search-input'); if (searchInput) { searchInput.addEventListener('input', debounce(handleSearch, 300)); } // Category filter const categoryFilter = document.getElementById('category-filter'); if (categoryFilter) { categoryFilter.addEventListener('change', handleCategoryFilter); } // Smooth scroll for navigation links document.querySelectorAll('a[href^="#"]').forEach(anchor => { anchor.addEventListener('click', function (e) { e.preventDefault(); const target = document.querySelector(this.getAttribute('href')); if (target) { target.scrollIntoView({ behavior: 'smooth', block: 'start' }); } }); }); // Mobile menu toggle const mobileMenuButton = document.getElementById('mobile-menu-button'); const mobileMenu = document.getElementById('mobile-menu'); if (mobileMenuButton && mobileMenu) { mobileMenuButton.addEventListener('click', () => { mobileMenu.classList.toggle('hidden'); }); } } // Debounce function for search function debounce(func, wait) { let timeout; return function executedFunction(...args) { const later = () => { clearTimeout(timeout); func(...args); }; clearTimeout(timeout); timeout = setTimeout(later, wait); }; } // Handle search function handleSearch(e) { const query = e.target.value.toLowerCase().trim(); filterPapers(query, document.getElementById('category-filter')?.value || ''); } // Handle category filter function handleCategoryFilter(e) { const category = e.target.value; const searchQuery = document.getElementById('search-input')?.value.toLowerCase().trim() || ''; filterPapers(searchQuery, category); } // Filter papers function filterPapers(searchQuery, category) { filteredPapers = papersData.filter(paper => { const matchesSearch = !searchQuery || paper.title.toLowerCase().includes(searchQuery) || paper.authors.some(author => author.toLowerCase().includes(searchQuery)) || paper.abstract.toLowerCase().includes(searchQuery) || paper.keywords.some(keyword => keyword.toLowerCase().includes(searchQuery)); const matchesCategory = !category || paper.categories.includes(category); return matchesSearch && matchesCategory; }); currentPage = 1; renderPapers(); } // Load statistics function loadStats() { // Simulate loading stats with animation const stats = [ { id: 'papers-count', target: 47 }, { id: 'citations-count', target: 1234 }, { id: 'h-index', target: 18 }, { id: 'years-active', target: 8 } ]; stats.forEach(stat => { const element = document.getElementById(stat.id); if (element) { animateCounter(element, stat.target); } }); } // Animate counter function animateCounter(element, target) { let current = 0; const increment = target / 100; const timer = setInterval(() => { current += increment; if (current >= target) { element.textContent = target; clearInterval(timer); } else { element.textContent = Math.floor(current); } }, 20); } // Load papers from API or mock data async function loadPapers() { const loadingIndicator = document.getElementById('loading-indicator'); const papersContainer = document.getElementById('papers-container'); if (loadingIndicator) loadingIndicator.classList.remove('hidden'); if (papersContainer) papersContainer.innerHTML = ''; try { // Try to fetch from Semantic Scholar API first const response = await fetch('https://api.semanticscholar.org/graph/v1/author/144632858/papers?fields=title,authors,abstract,year,venue,referenceCount,citationCount,fieldsOfStudy,url,openAccessPdf'); if (response.ok) { const data = await response.json(); papersData = data.data.map(paper => ({ id: paper.paperId, title: paper.title, authors: paper.authors.map(author => author.name), abstract: paper.abstract || 'Abstract not available for this paper.', year: paper.year, venue: paper.venue, citations: paper.citationCount, references: paper.referenceCount, categories: paper.fieldsOfStudy || ['Computer Science'], keywords: paper.fieldsOfStudy || [], pdfUrl: paper.openAccessPdf?.url || null, url: paper.url })); } else { // Fallback to mock data if API fails throw new Error('API request failed'); } } catch (error) { console.warn('Failed to load from Semantic Scholar API, using mock data:', error); // Fallback to mock data papersData = generateMockPapers(); } filteredPapers = [...papersData]; if (loadingIndicator) loadingIndicator.classList.add('hidden'); renderPapers(); } // Generate mock papers data function generateMockPapers() { return [ { id: "attention-is-all-you-need", title: "Attention Is All You Need", authors: ["Ashish Vaswani", "Noam Shazeer", "Niki Parmar", "Jakob Uszkoreit", "Llion Jones", "Aidan N. Gomez", "Lukasz Kaiser", "Illia Polosukhin"], abstract: "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely.", year: 2017, venue: "NeurIPS", citations: 38000, references: 44, categories: ["nlp", "machine-learning"], keywords: ["transformer", "attention mechanism", "sequence-to-sequence", "neural networks"], pdfUrl: "https://arxiv.org/pdf/1706.03762.pdf", url: "https://arxiv.org/abs/1706.03762" }, { id: "bert-pretraining", title: "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", authors: ["Jacob Devlin", "Ming-Wei Chang", "Kenton Lee", "Kristina Toutanova"], abstract: "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers.", year: 2019, venue: "NAACL", citations: 28000, references: 73, categories: ["nlp", "machine-learning"], keywords: ["bert", "pre-training", "language model", "transformer"], pdfUrl: "https://arxiv.org/pdf/1810.04805.pdf", url: "https://arxiv.org/abs/1810.04805" }, { id: "gpt-4", title: "GPT-4 Technical Report", authors: ["OpenAI"], abstract: "We report the development of GPT-4, a large-scale, multimodal model which can accept image and text inputs and produce text outputs. While less capable than humans in many real-world scenarios, GPT-4 exhibits human-level performance on various professional and academic benchmarks, including passing a simulated bar exam with a score around the top 10% of test takers.", year: 2023, venue: "ArXiv", citations: 4500, references: 98, categories: ["nlp", "machine-learning", "artificial-intelligence"], keywords: ["gpt-4", "large language model", "multimodal", "few-shot learning"], pdfUrl: "https://arxiv.org/pdf/2303.08774.pdf", url: "https://arxiv.org/abs/2303.08774" }, { id: "imagenet-classification", title: "ImageNet Classification with Deep Convolutional Neural Networks", authors: ["Alex Krizhevsky", "Ilya Sutskever", "Geoffrey E. Hinton"], abstract: "We trained a large, deep convolutional neural network to classify the 1.2 million high-resolution images in the ImageNet LSVRC-2010 contest into the 1000 different classes. On the test data, we achieved top-1 and top-5 error rates of 37.5% and 17.0% which is considerably better than the previous state-of-the-art.", year: 2012, venue: "NeurIPS", citations: 120000, references: 36, categories: ["computer-vision", "machine-learning"], keywords: ["cnn", "imagenet", "deep learning", "image classification"], pdfUrl: "https://papers.nips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf", url: "https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks" }, { id: "resnet-deep-learning", title: "Deep Residual Learning for Image Recognition", authors: ["Kaiming He", "Xiangyu Zhang", "Shaoqing Ren", "Jian Sun"], abstract: "Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions.", year: 2016, venue: "CVPR", citations: 85000, references: 88, categories: ["computer-vision", "machine-learning"], keywords: ["resnet", "residual learning", "deep networks", "image recognition"], pdfUrl: "https://openaccess.thecvf.com/content_cvpr_2016/papers/He_Deep_Residual_Learning_CVPR_2016_paper.pdf", url: "https://arxiv.org/abs/1512.03385" }, { id: "yolov3-realtime", title: "YOLOv3: An Incremental Improvement", authors: ["Joseph Redmon", "Ali Farhadi"], abstract: "We present some updates to YOLO! We made a bunch of little design changes to make it better. We also trained this new network that's pretty swell. It's a little bigger than last time but more accurate. It's still fast though, don't worry. At 320x320 YOLOv3 runs in 22 ms at 28.2 mAP, as accurate as SSD but three times faster.", year: 2018, venue: "ArXiv", citations: 25000, references: 24, categories: ["computer-vision", "machine-learning"], keywords: ["yolov3", "object detection", "real-time", "cnn"], pdfUrl: "https://arxiv.org/pdf/1804.02767.pdf", url: "https://arxiv.org/abs/1804.02767" }, { id: "ddpg-continuous-control", title: "Continuous Control with Deep Reinforcement Learning", authors: ["Timothy P. Lillicrap", "Jonathan J. Hunt", "Alexander Pritzel", "Nicolas Heess", "Tom Erez", "Yuval Tassa", "David Silver", "Daan Wierstra"], abstract: "We adapt the ideas underlying the success of Deep Q-Learning to the continuous action domain. We present an actor-critic, model-free algorithm based on the deterministic policy gradient that can operate over continuous action spaces. Using the same learning algorithm, network architecture and hyper-parameters, our algorithm robustly solves more than 20 simulated physics tasks.", year: 2016, venue: "ICLR", citations: 21000, references: 42, categories: ["machine-learning", "robotics"], keywords: ["ddpg", "reinforcement learning", "continuous control", "actor-critic"], pdfUrl: "https://arxiv.org/pdf/1509.02971.pdf", url: "https://arxiv.org/abs/1509.02971" }, { id: "slam-orb", title: "ORB-SLAM: A Versatile and Accurate Monocular SLAM System", authors: ["Raul Mur-Artal", "J. M. M. Montiel", "Juan D. Tardos"], abstract: "We present a feature-based monocular simultaneous localization and mapping (SLAM) system that achieves robust performance in real-time. Our system uses ORB features for tracking, mapping, loop closing, and relocalization. The system is capable of automatically initializing, tracking, and creating a map of an unknown environment from a monocular camera.", year: 2015, venue: "T-RO", citations: 18000, references: 67, categories: ["robotics", "computer-vision"], keywords: ["slam", "orb-slam", "monocular vision", "real-time"], pdfUrl: "https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7219438", url: "https://ieeexplore.ieee.org/document/7219438" }, { id: "gan-generative-models", title: "Generative Adversarial Networks", authors: ["Ian J. Goodfellow", "Jean Pouget-Abadie", "Mehdi Mirza", "Bing Xu", "David Warde-Farley", "Sherjil Ozair", "Aaron Courville", "Yoshua Bengio"], abstract: "We propose a new framework for estimating generative models via an adversarial process, in which we simultaneously train two models: a generative model G that captures the data distribution, and a discriminative model D that estimates the probability that a sample came from the training data rather than G.", year: 2014, venue: "NeurIPS", citations: 105000, references: 28, categories: ["machine-learning", "computer-vision"], keywords: ["gan", "generative models", "adversarial training", "deep learning"], pdfUrl: "https://papers.nips.cc/paper/2014/file/5ca3e9b122f61f8f06494c97b1afccf3-Paper.pdf", url: "https://arxiv.org/abs/1406.2661" }, { id: "lstm-neural-networks", title: "Long Short-Term Memory", authors: ["Sepp Hochreiter", "Jürgen Schmidhuber"], abstract: "Learning to store information over extended time intervals by recurrent backpropagation takes a very long time, mostly because of insufficient, decaying error back flow. We briefly review Hochreiter's 1991 analysis of this problem, then address it by introducing a novel, efficient, gradient-based method called Long Short-Term Memory (LSTM).", year: 1997, venue: "Neural Computation", citations: 45000, references: 41, categories: ["machine-learning", "nlp"], keywords: ["lstm", "recurrent neural networks", "long-term dependencies", "gradient flow"], pdfUrl: "https://www.bioinf.jku.at/publications/older/2604.pdf", url: "https://ieeexplore.ieee.org/document/6795963" }, { id: "transformer-vision", title: "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale", authors: ["Alexey Dosovitskiy", "Lucas Beyer", "Alexander Kolesnikov", "Dirk Weissenborn", "Xiaohua Zhai", "Thomas Unterthiner", "Mostafa Dehghani", "Matthias Minderer", "Georg Heigold", "Sylvain Gelly", "Jakob Uszkoreit", "Neil Houlsby"], abstract: "While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place.", year: 2021, venue: "ICLR", citations: 35000, references: 56, categories: ["computer-vision", "machine-learning", "nlp"], keywords: ["vision transformer", "vit", "image classification", "transformer"], pdfUrl: "https://openreview.net/pdf?id=YicbFdNTTy", url: "https://arxiv.org/abs/2010.11929" }, { id: "diffusion-models", title: "Denoising Diffusion Probabilistic Models", authors: ["Jonathan Ho", "Ajay Jain", "Pieter Abbeel"], abstract: "We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching.", year: 2020, venue: "NeurIPS", citations: 22000, references: 52, categories: ["machine-learning", "computer-vision"], keywords: ["diffusion models", "generative models", "image synthesis", "denoising"], pdfUrl: "https://proceedings.neurips.cc/paper/2020/file/4c5bcfec8584af0d967f1ab10179ca4b-Paper.pdf", url: "https://arxiv.org/abs/2006.11239" }, { id: "ppo-policy-optimization", title: "Proximal Policy Optimization Algorithms", authors: ["John Schulman", "Filip Wolski", "Prafulla Dhariwal", "Alec Radford", "Oleg Klimov"], abstract: "We propose a new family of policy gradient methods for reinforcement learning, which alternate between sampling data through interaction with the environment, and optimizing a surrogate objective function using stochastic gradient ascent. Our methods have the stability and reliability of trust-region methods but are much simpler to implement, requiring only a few lines of code change from vanilla policy gradients.", year: 2017, venue: "ArXiv", citations: 38000, references: 35, categories: ["machine-learning", "robotics"], keywords: ["ppo", "reinforcement learning", "policy optimization", "trust region"], pdfUrl: "https://arxiv.org/pdf/1707.06347.pdf", url: "https://arxiv.org/abs/1707.06347" }, { id: "clip-contrastive-learning", title: "Learning Transferable Visual Models From Natural Language Supervision", authors: ["Alec Radford", "Jong Wook Kim", "Chris Hallacy", "Aditya Ramesh", "Gabriel Goh", "Sandhini Agarwal", "Girish Sastry", "Amanda Askell", "Pamela Mishkin", "Jack Clark", "Gretchen Krueger", "Ilya Sutskever"], abstract: "State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restrictive form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative.", year: 2021, venue: "ICML", citations: 25000, references: 71, categories: ["computer-vision", "machine-learning", "nlp"], keywords: ["clip", "contrastive learning", "vision-language", "zero-shot"], pdfUrl: "https://proceedings.mlr.press/v139/radford21a/radford21a.pdf", url: "https://arxiv.org/abs/2103.00020" }, { id: "scaling-laws", title: "Scaling Laws for Neural Language Models", authors: ["Jared Kaplan", "Sam McCandlish", "Tom Henighan", "Tom B. Brown", "Benjamin Chess", "Rewon Child", "Scott Gray", "Alec Radford", "Jeffrey Wu", "Dario Amodei"], abstract: "We study empirical scaling laws for language model performance on the cross-entropy loss. We find that model size, dataset size, and the amount of compute for training follow simple functional relationships that are independent of model architecture and training details. These relationships allow us to determine the optimal allocation of a fixed compute budget.", year: 2020, venue: "ArXiv", citations: 8500, references: 44, categories: ["machine-learning", "nlp"], keywords: ["scaling laws", "language models", "compute optimal", "transformer"], pdfUrl: "https://arxiv.org/pdf/2001.08361.pdf", url: "https://arxiv.org/abs/2001.08361" }, { id: "rag-retrieval", title: "Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks", authors: ["Patrick Lewis", "Ethan Perez", "Aleksandra Piktus", "Fabio Petroni", "Vladimir Karpukhin", "Naman Goyal", "Heinrich Küttler", "Mike Lewis", "Wen-tau Yih", "Tim Rocktäschel", "Sebastian Riedel", "Douwe Kiela"], abstract: "Large pre-trained language models have been shown to store factual knowledge in their parameters, and achieve state-of-the-art results when fine-tuned on downstream natural language processing tasks. However, their ability to access and precisely manipulate knowledge is still limited, and hence on knowledge-intensive tasks, their performance lags behind task-specific architectures.", year: 2020, venue: "NeurIPS", citations: 12000, references: 89, categories: ["nlp", "machine-learning"], keywords: ["rag", "retrieval-augmented generation", "knowledge-intensive tasks", "qa"], pdfUrl: "https://proceedings.neurips.cc/paper/2020/file/6b493230205f780e1bc26945df7481e5-Paper.pdf", url: "https://arxiv.org/abs/2005.11401" }, { id: "dqn-deep-q", title: "Playing Atari with Deep Reinforcement Learning", authors: ["Volodymyr Mnih", "Koray Kavukcuoglu", "David Silver", "Alex Graves", "Ioannis Antonoglou", "Daan Wierstra", "Martin Riedmiller"], abstract: "We present the first deep learning model to successfully learn control policies directly from high-dimensional sensory input using reinforcement learning. The model is a convolutional neural network, trained with a variant of Q-learning, whose input is raw pixels and whose output is a value function estimating future rewards.", year: 2013, venue: "ArXiv", citations: 42000, references: 31, categories: ["machine-learning", "robotics"], keywords: ["dqn", "reinforcement learning", "atari", "deep q-networks"], pdfUrl: "https://arxiv.org/pdf/1312.5602.pdf", url: "https://arxiv.org/abs/1312.5602" }, { id: "swin-transformer", title: "Swin Transformer: Hierarchical Vision Transformer using Shifted Windows", authors: ["Ze Liu", "Yutong Lin", "Yue Cao", "Han Hu", "Yixuan Wei", "Zheng Zhang", "Stephen Lin", "Baining Guo"], abstract: "This paper presents a new vision Transformer, called Swin Transformer, that computes hierarchical feature representations by shifting the computation across non-overlapping windows of the image. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to local windows while also allowing for cross-window connection.", year: 2021, venue: "ICCV", citations: 18000, references: 64, categories: ["computer-vision", "machine-learning"], keywords: ["swin transformer", "hierarchical", "vision transformer", "shifted windows"], pdfUrl: "https://openaccess.thecvf.com/content/ICCV2021/papers/Liu_Swin_Transformer_Hierarchical_Vision_Transformer_using_Shifted_Windows_ICCV_2021_paper.pdf", url: "https://arxiv.org/abs/2103.14030" }, { id: "mae-masked-autoencoder", title: "Masked Autoencoders Are Scalable Vision Learners", authors: ["Kaiming He", "Xinlei Chen", "Saining Xie", "Yanghao Li", "Piotr Dollár", "Ross Girshick"], abstract: "This paper shows that masked autoencoders (MAE) are scalable self-supervised learners for computer vision. Our approach is simple: we mask random patches of the input image and reconstruct the missing pixels. It is based on two core designs. First, we develop an asymmetric encoder-decoder architecture.", year: 2022, venue: "CVPR", citations: 9500, references: 58, categories: ["computer-vision", "machine-learning"], keywords: ["mae", "masked autoencoder", "self-supervised learning", "vision transformer"], pdfUrl: "https://openaccess.thecvf.com/content/CVPR2022/papers/He_Masked_Autoencoders_Are_Scalable_Vision_Learners_CVPR_2022_paper.pdf", url: "https://arxiv.org/abs/2111.06377" } ]; } // Render papers function renderPapers() { const papersContainer = document.getElementById('papers-container'); if (!papersContainer) return; const startIndex = 0; const endIndex = currentPage * papersPerPage; const papersToShow = filteredPapers.slice(startIndex, endIndex); if (papersToShow.length === 0) { papersContainer.innerHTML = `
Try adjusting your search terms or filters