{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"3d-photo-inpainting","owner":"vt-vl-lab","isFork":false,"description":"[CVPR 2020] 3D Photography using Context-aware Layered Depth Inpainting","allTopics":["3d-photo","novel-view-synthesis"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":13,"issueCount":93,"starsCount":6914,"forksCount":1114,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-30T23:48:56.391Z"}},{"type":"Public","name":"SDN","owner":"vt-vl-lab","isFork":false,"description":"[NeurIPS 2019] Why Can't I Dance in the Mall? Learning to Mitigate Scene Bias in Action Recognition","allTopics":["activity-recognition","representation-learning","video-understanding","debiasisng","action-recognition"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":82,"forksCount":13,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-20T00:54:34.659Z"}},{"type":"Public","name":"Guided-pix2pix","owner":"vt-vl-lab","isFork":false,"description":"[ICCV 2019] Guided Image-to-Image Translation with Bi-Directional Feature Transformation","allTopics":["pix2pix","deep-learning","pytorch","gan"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":194,"forksCount":30,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-10T02:48:55.858Z"}},{"type":"Public","name":"pwc-net.pytorch","owner":"vt-vl-lab","isFork":false,"description":"Off-the-shelf PWC-Net module in PyTorch-1.0+","allTopics":["optical-flow"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":5,"issueCount":3,"starsCount":31,"forksCount":6,"license":"GNU Lesser General Public License v3.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-12-08T05:15:21.562Z"}},{"type":"Public","name":"cluster","owner":"vt-vl-lab","isFork":false,"description":"Instructions for using clusters at Virginia Tech","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":15,"forksCount":6,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-03-20T04:29:35.117Z"}},{"type":"Public","name":"DRG","owner":"vt-vl-lab","isFork":false,"description":"[ECCV 2020] DRG: Dual Relation Graph for Human-Object Interaction Detection","allTopics":["human-object-interaction","graph-network"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":64,"forksCount":19,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-03-11T16:50:53.401Z"}},{"type":"Public","name":"FGVC","owner":"vt-vl-lab","isFork":false,"description":"[ECCV 2020] Flow-edge Guided Video Completion ","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":14,"starsCount":1551,"forksCount":263,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-12-14T05:49:53.874Z"}},{"type":"Public","name":"video-data-aug","owner":"vt-vl-lab","isFork":false,"description":"Learning Representational Invariances for Data-Efficient Action Recognition","allTopics":["semi-supervised-learning","data-augmentation","action-recognition"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":1,"issueCount":1,"starsCount":32,"forksCount":5,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-10-26T13:49:53.201Z"}},{"type":"Public","name":"reading_group","owner":"vt-vl-lab","isFork":false,"description":"Virginia Tech Vision and Learning Reading Group","allTopics":["machine-learning","computer-vision","deep-learning"],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":125,"forksCount":15,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-04-28T01:35:33.223Z"}},{"type":"Public","name":"code_snippet","owner":"vt-vl-lab","isFork":false,"description":"Some code snippets.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":4,"forksCount":2,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2020-11-06T01:58:35.868Z"}},{"type":"Public","name":"iCAN","owner":"vt-vl-lab","isFork":false,"description":"[BMVC 2018] iCAN: Instance-Centric Attention Network for Human-Object Interaction Detection","allTopics":["action-recognition","human-object-interaction","visual-relationship-detection"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":12,"starsCount":259,"forksCount":60,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2020-08-24T06:42:33.901Z"}},{"type":"Public","name":"footskate_reducer","owner":"vt-vl-lab","isFork":false,"description":"[WACV 2020] Reducing Footskate in Human Motion Reconstruction with Ground Contact Constraints","allTopics":["human-sensing"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":43,"forksCount":9,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2020-02-27T03:47:46.671Z"}},{"type":"Public","name":"paper-gestalt","owner":"vt-vl-lab","isFork":false,"description":"Deep Paper Gestalt","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":5,"starsCount":442,"forksCount":38,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2018-12-23T05:22:28.120Z"}},{"type":"Public","name":"DF-Net","owner":"vt-vl-lab","isFork":false,"description":"[ECCV 2018] DF-Net: Unsupervised Joint Learning of Depth and Flow using Cross-Task Consistency","allTopics":["depth","optical-flow","self-supervised-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":3,"starsCount":210,"forksCount":30,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2018-09-11T09:58:18.983Z"}},{"type":"Public","name":"flownet2.tf","owner":"vt-vl-lab","isFork":false,"description":"Off-the-shelf FlowNet module in TensorFlow-1.2.0","allTopics":["optical-flow"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":4,"starsCount":37,"forksCount":12,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2018-07-03T07:20:36.694Z"}},{"type":"Public","name":"flownet2.pytorch","owner":"vt-vl-lab","isFork":false,"description":"Off-the-shelf FlowNet module in PyTorch-0.3.0","allTopics":["optical-flow"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":6,"starsCount":118,"forksCount":37,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2018-04-03T19:10:55.370Z"}}],"repositoryCount":16,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"vt-vl-lab repositories"}