Skip to content

Commit

Permalink
Merge pull request #98 from rjmacarthy/feature/lm-studio-support
Browse files Browse the repository at this point in the history
Feature LM Studio support
  • Loading branch information
rjmacarthy authored Feb 8, 2024
2 parents 8ae2bed + bb9c7c9 commit da6c15d
Show file tree
Hide file tree
Showing 20 changed files with 1,073 additions and 264 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ You can find the settings inside the extension sidebar by clicking the gear icon

The main values which need to be updated to switch between Ollama and llama.cpp are:

- `apiUrl` - The url to your Ollama or llama.cpp server (default: localhost)
- `apiHostname` - The url to your Ollama or llama.cpp server (default: localhost)
- `apiPath` - The API path which defaults to `/api/generate` for Ollama and `/completion` for llama.cpp (See llama.cpp docs or Ollama docs).
- `apiPort` - The port of your Ollama (default 11434) or llama.cpp server (default 8080)

Expand Down
599 changes: 599 additions & 0 deletions assets/codicon.css

Large diffs are not rendered by default.

Binary file added assets/codicon.ttf
Binary file not shown.
7 changes: 6 additions & 1 deletion package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

59 changes: 35 additions & 24 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -225,55 +225,66 @@
"default": true,
"description": "Activates or deactivates the Twinny extension."
},
"twinny.apiUrl": {
"twinny.apiHostname": {
"order": 1,
"type": "string",
"default": "localhost",
"description": "URL for the completion API.",
"description": "Hostname for the completion API.",
"required": true
},
"twinny.chatApiPath": {
"twinny.apiProvider": {
"order": 2,
"type": "string",
"enum": [
"ollama",
"llamacpp",
"lmstudio"
],
"default": "ollama",
"description": "The API provider to use (sets the paths and port automatically to defaults)."
},
"twinny.chatApiPath": {
"order": 3,
"type": "string",
"default": "/api/generate",
"description": "Endpoint path for chat completions. Defaults to '/api/generate' for Ollama and '/completion' for llama.cpp.",
"required": true
},
"twinny.chatApiPort": {
"order": 3,
"order": 4,
"type": "number",
"default": 11434,
"description": "The API port usually `11434` for Ollama and `8080` for llama.cpp (May differ depending on API configuration)",
"required": true
},
"twinny.fimApiPort": {
"order": 4,
"order": 5,
"type": "number",
"default": 11434,
"description": "The API port usually `11434` for Ollama and `8080` for llama.cpp (May differ depending on API configuration)",
"required": true
},
"twinny.fimApiPath": {
"order": 5,
"order": 6,
"type": "string",
"default": "/api/generate",
"description": "Endpoint path for FIM completions. Defaults to '/api/generate' for Ollama and '/completion' for llama.cpp.",
"required": true
},
"twinny.chatModelName": {
"order": 6,
"order": 7,
"type": "string",
"default": "codellama:7b-instruct",
"description": "Model identifier for chat completions. Applicable only for Ollama API."
},
"twinny.fimModelName": {
"order": 7,
"order": 8,
"type": "string",
"default": "codellama:7b-code",
"description": "Model identifier for FIM completions. Applicable only for Ollama API."
},
"twinny.fimTemplateFormat": {
"order": 8,
"order": 9,
"type": "string",
"enum": [
"stable-code",
Expand All @@ -284,34 +295,34 @@
"description": "The prompt format to be used for FIM completions."
},
"twinny.disableAutoSuggest": {
"order": 9,
"order": 10,
"type": "boolean",
"default": false,
"description": "Disables automatic suggestions, manual trigger (default shortcut Alt+\\)."
},
"twinny.contextLength": {
"order": 10,
"order": 11,
"type": "number",
"default": 30,
"description": "Defines the number of lines before and after the current line to include in FIM prompts.",
"required": true
},
"twinny.debounceWait": {
"order": 11,
"order": 12,
"type": "number",
"default": 300,
"description": "Delay in milliseconds before triggering the next completion.",
"required": true
},
"twinny.temperature": {
"order": 12,
"order": 13,
"type": "number",
"default": 0.2,
"description": "Sets the model's creativity level (temperature) for generating completions.",
"required": true
},
"twinny.useMultiLineCompletions": {
"order": 13,
"order": 14,
"type": "boolean",
"default": false,
"description": "Use multiline completions, can be inaccurate in some cases."
Expand All @@ -320,51 +331,51 @@
"dependencies": {
"twinny.useMultiLineCompletions": true
},
"order": 14,
"order": 15,
"type": "number",
"default": 20,
"description": "Maximum number of lines to use for multi line completions. Applicable only when useMultiLineCompletions is enabled."
},
"twinny.useFileContext": {
"order": 15,
"order": 16,
"type": "boolean",
"default": false,
"description": "Enables scanning of neighbouring documents to enhance completion prompts. (Experimental)"
},
"twinny.enableCompletionCache": {
"order": 16,
"order": 17,
"type": "boolean",
"default": false,
"description": "Caches FIM completions for identical prompts to enhance performance."
},
"twinny.numPredictChat": {
"order": 17,
"order": 18,
"type": "number",
"default": 512,
"description": "Maximum token limit for chat completions.",
"required": true
},
"twinny.numPredictFim": {
"order": 18,
"order": 19,
"type": "number",
"default": -1,
"description": "Maximum token limit for FIM completions. Set to -1 for no limit. Twinny stops at logical line breaks.",
"required": true
},
"twinny.disableServerChecks": {
"order": 19,
"order": 20,
"type": "boolean",
"default": false,
"description": "Disables automatic LLM server checks at startup, preventing auto-downloads and prompts."
},
"twinny.useTls": {
"order": 20,
"type": "boolean",
"order": 21,
"type": "string",
"default": false,
"description": "Enables TLS encryption for API connections."
},
"twinny.apiBearerToken": {
"order": 21,
"order": 22,
"type": "string",
"default": "",
"description": "Bearer token for secure API authentication."
Expand Down Expand Up @@ -404,8 +415,8 @@
"webpack-cli": "^4.10.0"
},
"dependencies": {
"@microsoft/fast-react-wrapper": "^0.3.22",
"@types/react": "^18.2.46",
"@vscode/codicons": "^0.0.35",
"@vscode/webview-ui-toolkit": "^1.4.0",
"classnames": "^2.5.1",
"handlebars": "^4.7.8",
Expand Down
Loading

0 comments on commit da6c15d

Please sign in to comment.