{"payload":{"pageCount":41,"repositories":[{"type":"Public","name":"wult","owner":"intel","isFork":false,"description":"Wult stands for \"Wake Up Latency Tracer\", and this is a project that provides tools for measuring C-state latency in Linux.","allTopics":["latency","idle","c-states","aspm","linux"],"primaryLanguage":{"name":"C","color":"#555555"},"pullRequestCount":0,"issueCount":0,"starsCount":19,"forksCount":7,"license":"Other","participation":[27,28,4,4,0,7,3,10,7,11,9,2,25,9,3,7,3,5,3,11,6,0,1,3,15,5,2,6,0,1,0,1,9,0,1,4,5,5,1,3,1,1,0,0,1,13,9,1,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:52:46.412Z"}},{"type":"Public","name":"neural-speed","owner":"intel","isFork":false,"description":"An innovative library for efficient LLM inference via low-bit quantization","allTopics":["sparsity","cpu","gpu","int8","low-bit","int1","int4","fp8","llamacpp","llm-inference","gaudi2","nf4","fp4","mxformat","llm-fine-tuning","int3","int2","int5","int6","int7"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":4,"issueCount":14,"starsCount":282,"forksCount":31,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,1,15,20,7,15,2,2,13,11,5,9,8,9,12,11,4,8,8,3,9,11,25,5,13,0,5,8,10,6,10,9,6,5,4,5,8,1,8,3,7,10],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:51:47.371Z"}},{"type":"Public","name":"graph-compiler","owner":"intel","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":20,"issueCount":25,"starsCount":12,"forksCount":10,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,5,10,12,8,12,10],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:51:41.313Z"}},{"type":"Public","name":"stats-collect","owner":"intel","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":0,"forksCount":2,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:49:59.449Z"}},{"type":"Public","name":"torch-xpu-ops","owner":"intel","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":31,"issueCount":68,"starsCount":11,"forksCount":6,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,10,6,25,5,16,16,7,12,4,11,4,15,17,19,14],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:45:29.156Z"}},{"type":"Public","name":"xFasterTransformer","owner":"intel","isFork":false,"description":"","allTopics":["intel","inference","transformer","xeon","llama","model-serving","llm","chatglm","qwen"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":10,"issueCount":9,"starsCount":247,"forksCount":47,"license":"Apache License 2.0","participation":[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,0,0,6,12,5,10,9,16,9,12,7,10,0,14,4,11,12,8,0,10,12,9,4,2,8,10,11,16,11,10,2,7,12,3,10],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:43:08.567Z"}},{"type":"Public","name":"ipu6-drivers","owner":"intel","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"C","color":"#555555"},"pullRequestCount":17,"issueCount":52,"starsCount":147,"forksCount":47,"license":null,"participation":[0,1,0,0,0,0,0,0,2,3,0,2,2,1,0,2,2,0,7,2,0,1,2,1,2,1,0,0,0,0,0,0,0,0,0,0,0,2,0,2,0,0,5,1,5,1,0,1,2,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:41:50.203Z"}},{"type":"Public","name":"neural-compressor","owner":"intel","isFork":false,"description":"SOTA low-bit LLM quantization (INT8/FP8/INT4/FP4/NF4) & sparsity; leading model compression techniques on TensorFlow, PyTorch, and ONNX Runtime","allTopics":["sparsity","pruning","quantization","knowledge-distillation","auto-tuning","int8","low-precision","quantization-aware-training","post-training-quantization","awq","int4","large-language-models","gptq","smoothquant","sparsegpt","fp4","mxformat"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":23,"issueCount":25,"starsCount":2024,"forksCount":243,"license":"Apache License 2.0","participation":[23,27,10,20,10,15,14,9,19,8,18,19,20,27,25,13,10,8,11,11,14,6,5,17,23,11,14,15,13,11,6,11,23,15,14,5,1,20,13,14,12,13,10,6,7,8,17,3,7,19,11,8],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:41:37.438Z"}},{"type":"Public","name":"llvm","owner":"intel","isFork":false,"description":"Intel staging area for llvm.org contribution. Home for Intel LLVM-based projects.","allTopics":["sycl","oneapi","llvm","intel"],"primaryLanguage":null,"pullRequestCount":354,"issueCount":478,"starsCount":1182,"forksCount":700,"license":"Other","participation":[811,857,875,788,869,945,1022,898,751,905,849,856,654,859,770,832,717,608,605,890,823,862,747,743,654,907,827,785,458,285,720,772,998,834,842,793,664,717,824,845,865,845,654,769,792,796,818,704,749,757,223,67],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:32:31.008Z"}},{"type":"Public","name":"userspace-cni-network-plugin","owner":"intel","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Go","color":"#00ADD8"},"pullRequestCount":18,"issueCount":22,"starsCount":234,"forksCount":68,"license":"Apache License 2.0","participation":[4,1,1,0,21,11,0,3,0,1,1,0,0,1,6,1,4,3,0,0,0,12,3,8,5,5,1,9,0,3,17,0,1,1,0,2,3,0,0,0,0,0,2,3,0,0,0,0,8,4,0,6],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:30:37.768Z"}},{"type":"Public","name":"intel-graphics-compiler","owner":"intel","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":1,"issueCount":21,"starsCount":572,"forksCount":151,"license":"Other","participation":[69,55,69,51,52,56,64,49,68,39,53,42,38,48,64,45,34,42,51,38,37,41,31,31,53,80,62,48,36,23,48,43,49,52,47,52,31,44,50,47,43,53,38,45,53,52,48,33,78,40,32,19],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:19:13.508Z"}},{"type":"Public","name":"ai-containers","owner":"intel","isFork":false,"description":"This repository contains Dockerfiles, scripts, yaml files, Helm charts, etc. used to scale out AI containers with versions of TensorFlow and PyTorch that have been optimized for Intel platforms. Scaling is done with python, Docker, kubernetes, kubeflow, cnvrg.io, Helm, and other container orchestration frameworks for use in the cloud and on-premise","allTopics":["docker","kubernetes","docker-compose","tensorflow","scikit-learn","intel","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":7,"issueCount":0,"starsCount":14,"forksCount":11,"license":"Apache License 2.0","participation":[1,0,2,4,0,2,1,6,4,8,8,16,1,0,3,2,0,5,0,2,2,1,2,5,1,4,2,2,1,0,7,2,0,0,3,7,4,10,3,4,2,2,3,7,4,0,0,16,17,15,21,43],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:18:57.953Z"}},{"type":"Public","name":"scikit-learn-intelex","owner":"intel","isFork":false,"description":"Intel(R) Extension for Scikit-learn is a seamless way to speed up your Scikit-learn application","allTopics":["big-data","analytics","gpu","machine-learning-algorithms","intel","data-analysis","ai-training","oneapi","ai-inference","swrepo","ai-machine-learning","python","machine-learning","scikit-learn"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":31,"issueCount":44,"starsCount":1170,"forksCount":169,"license":"Apache License 2.0","participation":[3,2,6,5,3,6,6,9,3,6,5,12,7,21,7,12,12,3,5,5,13,3,5,5,5,7,7,2,6,0,6,6,8,15,7,9,13,11,5,6,8,5,6,3,12,10,10,7,3,3,5,4],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:18:29.656Z"}},{"type":"Public","name":"intel-extension-for-transformers","owner":"intel","isFork":false,"description":"⚡ Build your chatbot within minutes on your favorite device; offer SOTA compression techniques for LLMs; run LLMs efficiently on Intel Platforms⚡","allTopics":["retrieval","chatbot","rag","habana","large-language-model","chatpdf","llm-inference","4-bits","speculative-decoding","llm-cpu","streamingllm","intel-optimized-llamacpp","neural-chat","neural-chat-7b","autoround","gaudi3"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":27,"issueCount":32,"starsCount":2000,"forksCount":190,"license":"Apache License 2.0","participation":[23,19,8,32,35,26,15,28,12,19,12,10,44,60,39,60,14,11,42,49,39,43,43,37,64,61,43,37,81,9,16,16,36,31,45,7,14,25,18,20,24,25,20,11,12,21,10,5,14,6,16,4],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:04:46.353Z"}},{"type":"Public","name":"intel-cmt-cat","owner":"intel","isFork":false,"description":"User space software for Intel(R) Resource Director Technology","allTopics":["cat","cache","snmp","perl","swig","rdt","pqos","cmt","mbm","llc","mba","rdtset","c"],"primaryLanguage":{"name":"C","color":"#555555"},"pullRequestCount":3,"issueCount":17,"starsCount":678,"forksCount":179,"license":"Other","participation":[3,1,12,3,3,1,17,14,17,4,1,0,1,1,0,4,0,4,2,3,0,1,1,0,0,2,0,0,0,0,1,0,32,1,2,1,1,0,2,0,0,3,1,1,4,0,1,1,8,1,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:04:33.886Z"}},{"type":"Public","name":"idxd-config","owner":"intel","isFork":false,"description":"Accel-config / libaccel-config","allTopics":[],"primaryLanguage":{"name":"C","color":"#555555"},"pullRequestCount":2,"issueCount":10,"starsCount":54,"forksCount":34,"license":"Other","participation":[0,0,0,0,0,0,0,7,0,0,6,3,6,0,0,0,0,1,0,2,6,0,1,0,0,0,8,0,0,0,0,0,1,2,0,0,4,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:01:32.766Z"}},{"type":"Public","name":"intel-extension-for-tensorflow","owner":"intel","isFork":false,"description":"Intel® Extension for TensorFlow*","allTopics":["machine-learning","extension","cpu","ai","deep-learning","tensorflow","gpu"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":5,"starsCount":306,"forksCount":35,"license":"Other","participation":[24,17,15,8,21,16,18,19,14,10,4,7,9,8,14,7,1,7,10,14,17,14,7,10,4,17,4,7,7,5,6,3,5,4,5,2,3,14,11,9,4,8,3,4,9,4,5,2,4,5,10,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T08:00:54.250Z"}},{"type":"Public","name":"intel-extension-for-openxla","owner":"intel","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":3,"issueCount":3,"starsCount":29,"forksCount":6,"license":"Apache License 2.0","participation":[0,0,3,2,2,5,2,7,3,6,4,5,5,5,4,1,3,5,14,8,5,8,2,2,4,5,5,3,3,3,4,3,4,6,7,0,4,7,4,4,6,2,4,8,5,5,14,3,5,3,4,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T07:47:17.638Z"}},{"type":"Public","name":"intel-xpu-backend-for-triton","owner":"intel","isFork":false,"description":"OpenAI Triton backend for Intel® GPUs","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":20,"issueCount":144,"starsCount":85,"forksCount":26,"license":"MIT License","participation":[11,13,21,21,35,20,16,6,20,28,32,30,21,23,32,30,21,22,15,19,28,18,18,14,10,28,17,21,7,7,42,53,59,70,54,41,60,60,89,73,66,65,72,96,58,84,61,61,74,57,64,59],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T07:44:36.597Z"}},{"type":"Public","name":"llm-on-ray","owner":"intel","isFork":false,"description":"Pretrain, finetune and serve LLMs on Intel platforms with Ray","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":14,"issueCount":32,"starsCount":49,"forksCount":26,"license":"Apache License 2.0","participation":[21,3,0,3,6,3,1,4,4,0,2,1,3,4,2,4,1,1,0,3,5,7,6,10,9,7,11,1,5,5,12,9,7,0,9,1,0,8,4,7,8,5,7,6,11,7,7,0,10,3,4,6],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T07:37:07.860Z"}},{"type":"Public","name":"pepc","owner":"intel","isFork":false,"description":"Pepc - Power, Energy, and Performance Configurator","allTopics":["cpu","intel","power","pm","c-state","uncore","aspm","p-state","cpu-online","cpu-hotplug","linux"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":24,"forksCount":8,"license":"Other","participation":[23,12,7,8,16,27,29,11,9,3,12,0,12,12,7,4,34,15,48,23,41,27,33,57,45,12,51,24,87,96,39,21,27,64,31,59,52,30,78,36,34,5,2,9,11,6,18,15,0,2,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T07:30:55.819Z"}},{"type":"Public","name":"media-driver","owner":"intel","isFork":false,"description":"Intel Graphics Media Driver to support hardware decode, encode and video processing.","allTopics":[],"primaryLanguage":{"name":"C","color":"#555555"},"pullRequestCount":85,"issueCount":91,"starsCount":937,"forksCount":337,"license":"Other","participation":[16,12,15,10,17,7,24,11,19,15,17,18,21,30,25,15,7,7,13,21,18,17,26,10,10,18,14,14,12,27,11,12,7,17,10,0,8,10,19,8,11,13,9,9,9,14,9,3,7,11,17,4],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T07:19:51.793Z"}},{"type":"Public","name":"auto-round","owner":"intel","isFork":false,"description":"SOTA Weight-only Quantization Algorithm for LLMs. This is official implementation of \"Optimize Weight Rounding via Signed Gradient Descent for the Quantization of LLMs\"","allTopics":["rounding","quantization","awq","int4","gptq","neural-compressor","weight-only"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":7,"issueCount":10,"starsCount":85,"forksCount":11,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,21,13,35,10,5,5,11,16,4,12,9,8,3,3,4,9,3,1,5,12,6],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T06:58:27.120Z"}},{"type":"Public","name":"intel-sgx-ssl","owner":"intel","isFork":false,"description":"Intel® Software Guard Extensions SSL","allTopics":[],"primaryLanguage":{"name":"Assembly","color":"#6E4C13"},"pullRequestCount":3,"issueCount":11,"starsCount":220,"forksCount":74,"license":"Other","participation":[0,0,0,0,0,1,1,0,1,1,3,1,0,1,1,0,0,0,2,1,3,0,4,0,2,2,0,1,0,0,0,0,2,0,0,0,0,0,1,0,0,3,1,1,10,5,2,2,3,2,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T06:41:12.210Z"}},{"type":"Public","name":"intel-extension-for-pytorch","owner":"intel","isFork":false,"description":"A Python package for extending the official PyTorch that can easily obtain performance on Intel platform","allTopics":["machine-learning","neural-network","intel","quantization","deep-learning","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":17,"issueCount":200,"starsCount":1414,"forksCount":212,"license":"Apache License 2.0","participation":[9,6,2,3,8,9,5,6,11,6,5,6,4,24,12,13,6,5,2,8,13,9,14,9,4,5,14,15,13,10,13,16,15,26,18,6,4,7,18,15,14,13,10,4,9,12,12,3,19,9,17,12],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T06:38:03.777Z"}},{"type":"Public","name":"intel-inb-manageability","owner":"intel","isFork":false,"description":"The Intel® In-Band Manageability Framework enables an administrator to perform critical Device Management operations over-the-air remotely from the cloud. It also facilitates the publishing of telemetry and critical events and logs from an IoT device to the cloud enabling the administrator to take corrective actions if, and when necessary. The f…","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":7,"issueCount":1,"starsCount":21,"forksCount":20,"license":"Other","participation":[7,1,13,7,0,1,1,3,1,4,3,5,5,1,0,1,0,3,2,3,2,3,9,4,5,6,5,1,1,1,2,4,9,2,9,6,1,1,2,1,0,2,0,2,0,0,5,1,2,0,1,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T06:37:05.088Z"}},{"type":"Public","name":"dffml","owner":"intel","isFork":false,"description":"The easiest way to use Machine Learning. Mix and match underlying ML libraries and data set sources. Generate new datasets or modify existing ones with ease.","allTopics":["analytics","libraries","models","pipelines","data-flow","asyncio","flow-based-programming","datasets","dag","frameworks","event-based","dataflows","ai-training","hyperautomation","dffml","ai-inference","swrepo","ai-machine-learning","python","machine-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":29,"issueCount":381,"starsCount":243,"forksCount":136,"license":"MIT License","participation":[6,5,1,4,2,1,3,2,2,0,1,0,7,0,0,0,0,0,1,0,0,1,2,2,3,5,1,16,5,20,22,1,0,1,0,0,1,0,0,1,0,11,1,5,9,0,1,8,0,3,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T06:21:30.142Z"}},{"type":"Public","name":"opencl-clang","owner":"intel","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":7,"issueCount":17,"starsCount":132,"forksCount":59,"license":"Other","participation":[1,0,0,1,0,0,0,1,5,0,5,0,1,0,2,1,0,3,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,1,0,0,7],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T06:06:57.945Z"}},{"type":"Public","name":"ACON","owner":"intel","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Rust","color":"#dea584"},"pullRequestCount":1,"issueCount":10,"starsCount":54,"forksCount":15,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,2,0,1,3,5,4,2,1,4,4,3,9,3,3,6,6,6,10,3,0,0,0,2,1,2,3,1,0,0,0,2,1,0,1,2,2,1,0,1,1,0,1,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T06:03:51.097Z"}},{"type":"Public","name":"Predictive-Assets-Maintenance","owner":"intel","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":8,"forksCount":6,"license":"Apache License 2.0","participation":[0,3,0,0,0,1,2,0,0,0,0,0,0,0,0,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T06:01:14.215Z"}}],"repositoryCount":1210,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}