{"payload":{"header_redesign_enabled":false,"results":[{"id":"726349745","archived":false,"color":"#3572A5","followers":207,"has_funding_file":false,"hl_name":"WisconsinAIVision/ViP-LLaVA","hl_trunc_description":"[CVPR2024] ViP-LLaVA: Making Large Multimodal Models Understand Arbitrary Visual Prompts","language":"Python","mirror":false,"owned_by_organization":true,"public":true,"repo":{"repository":{"id":726349745,"name":"ViP-LLaVA","owner_id":166878028,"owner_login":"WisconsinAIVision","updated_at":"2024-06-12T23:14:02.307Z","has_issues":true}},"sponsorable":false,"topics":["chatbot","llama","multi-modal","clip","vision-language","gpt-4","foundation-models","visual-prompting","llava","llama2","cvpr2024","gpt-4-vision"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":64,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253AWisconsinAIVision%252FViP-LLaVA%2B%2Blanguage%253APython","metadata":null,"csrf_tokens":{"/WisconsinAIVision/ViP-LLaVA/star":{"post":"HINhQAqwGbArQMnIU1YSbipaoZ4BMUaIug2UXS6wAUPQK2fWnQ7FIJElhTHUkSub6F4F937DOn5kS0kPdfaD1g"},"/WisconsinAIVision/ViP-LLaVA/unstar":{"post":"A-NPtc9B-wl5cyKCk_iji5W6-VtHthorXMrV1-LhJXY9z0-6bgfV6VWBmqpys7rAGradBT-Ek4ym3TW7v7rAjw"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"q2v0aW_QkT9pkgVMPljvqUV8cz6-j5P85nNktI8xZIaEhLKV51MrsDb44m3sWHZdFfswg2uuJv7NBxTowrHcsA"}}},"title":"Repository search results"}