{"payload":{"header_redesign_enabled":false,"results":[{"id":"697550408","archived":false,"color":"#3572A5","followers":62,"has_funding_file":false,"hl_name":"codefuse-ai/codefuse-evaluation","hl_trunc_description":"Industrial-level evaluation benchmarks for Coding LLMs in the full life-cycle of AI native software developing.企业级代码大模型评测体系,持续开放中","language":"Python","mirror":false,"owned_by_organization":true,"public":true,"repo":{"repository":{"id":697550408,"name":"codefuse-evaluation","owner_id":143480819,"owner_login":"codefuse-ai","updated_at":"2024-01-19T02:41:43.500Z","has_issues":true}},"sponsorable":false,"topics":["code-evaluation","lcc","evaluation-framework","repository-eval","codetranseval","codecommenteval","codefuse"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":89,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253Acodefuse-ai%252Fcodefuse-evaluation%2B%2Blanguage%253APython","metadata":null,"csrf_tokens":{"/codefuse-ai/codefuse-evaluation/star":{"post":"SqfsFy31Rq5CBHFtbm2_e2pcuSaGVTTHHlh8rqqSyLFqrvJwiBPzl_P7Rcx71GbuG7bpuo3QPOW9uHsVBnni9A"},"/codefuse-ai/codefuse-evaluation/unstar":{"post":"ICWP5F9g1fs-NDHR7jqiMdiGKfM5gJnVRWn7jbwkM1gM62DTpZ8QVS_ClNb0u2sVqWRHWQsMjqv4MaM39aCzfA"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"ytuen-w4kENUesQpshvMt3-nA921-2Bf47w_jTWEVrqlyBcxMzpIcuCjtY2arDDeC0CeUMd0MXa1B5ir6bBa3Q"}}},"title":"Repository search results"}