Skip to content

Commit

Permalink
Ts/update stratify beaker context (#3584)
Browse files Browse the repository at this point in the history
  • Loading branch information
Tom-Szendrey authored May 9, 2024
1 parent 121c6c8 commit 30088dc
Show file tree
Hide file tree
Showing 8 changed files with 153 additions and 30 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ const props = defineProps<{
contextLanguage: string;
}>();
const emit = defineEmits(['llm-output', 'llm-thought-output']);
const emit = defineEmits(['question-asked', 'llm-output', 'llm-thought-output']);
const questionString = ref('');
const kernelStatus = ref<string>('');
Expand All @@ -73,6 +73,7 @@ const submitQuestion = () => {
const message = props.kernelManager.sendMessage('llm_request', {
request: questionString.value
});
emit('question-asked');
// May prefer to use a manual status rather than following this. TBD. Both options work for now
message.register('status', (data) => {
kernelStatus.value = data.content.execution_state;
Expand All @@ -84,6 +85,10 @@ const submitQuestion = () => {
thoughts.value = data;
emit('llm-thought-output', data);
});
message.register('llm_response', (data) => {
thoughts.value = data;
emit('llm-thought-output', data);
});
};
</script>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,22 @@ import { ref, computed } from 'vue';
import Button from 'primevue/button';
const props = defineProps<{
llmThought?: any;
llmThoughts: any[];
}>();
const showThoughts = ref(false);
const thought = computed(() => props?.llmThought?.content?.thought ?? '');
const thought = computed(() => {
let aString = '';
props.llmThoughts.forEach((ele) => {
const llmResponse = ele.content?.thought ?? ele.content?.text ?? '';
aString = aString.concat(llmResponse, '\n \n');
});
return aString;
});
</script>

<style scoped>
.thought-bubble {
white-space: pre-line;
border: 1px solid var(--surface-border-light);
border-radius: var(--border-radius);
padding: var(--gap-small);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,11 @@
:defaultOptions="sampleAgentQuestions"
:context-language="contextLanguage"
@llm-output="(data: any) => appendCode(data, 'code')"
@llm-thought-output="(data: any) => (llmThought = data)"
@llm-thought-output="(data: any) => llmThoughts.push(data)"
@question-asked="llmThoughts = []"
/>
</Suspense>
<tera-notebook-jupyter-thought-output :llm-thought="llmThought" />
<tera-notebook-jupyter-thought-output :llm-thoughts="llmThoughts" />
<v-ace-editor
v-model:value="codeText"
@init="initializeEditor"
Expand Down Expand Up @@ -118,7 +119,7 @@ const initializeEditor = (editorInstance: any) => {
editor = editorInstance;
};
const codeText = ref();
const llmThought = ref();
const llmThoughts = ref<any[]>([]);
const buildJupyterContext = () => ({
context: 'decapodes',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,11 @@
:kernelManager="kernelManager"
:defaultOptions="sampleAgentQuestions"
@llm-output="appendCode"
@llm-thought-output="(data: any) => (llmThought = data)"
@llm-thought-output="(data: any) => llmThoughts.push(data)"
@question-asked="llmThoughts = []"
:context-language="contextLanguage"
/>
<tera-notebook-jupyter-thought-output :llm-thought="llmThought" />
<tera-notebook-jupyter-thought-output :llm-thoughts="llmThoughts" />
</div>
<v-ace-editor
v-model:value="code"
Expand Down Expand Up @@ -191,7 +192,7 @@ const isLoadingStructuralComparisons = ref(false);
const structuralComparisons = ref<string[]>([]);
const llmAnswer = ref('');
const code = ref(props.node.state.notebookHistory?.[0]?.code ?? '');
const llmThought = ref();
const llmThoughts = ref<any[]>([]);
const isKernelReady = ref(false);
const modelsToCompare = ref<Model[]>([]);
const contextLanguage = ref<string>('python3');
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -240,10 +240,11 @@
:defaultOptions="sampleAgentQuestions"
:context-language="contextLanguage"
@llm-output="(data: any) => appendCode(data, 'code')"
@llm-thought-output="(data: any) => (llmThought = data)"
@llm-thought-output="(data: any) => llmThoughts.push(data)"
@question-asked="llmThoughts = []"
/>
</Suspense>
<tera-notebook-jupyter-thought-output :llm-thought="llmThought" />
<tera-notebook-jupyter-thought-output :llm-thoughts="llmThoughts" />
</div>
<v-ace-editor
v-model:value="codeText"
Expand Down Expand Up @@ -442,7 +443,7 @@ const buildJupyterContext = () => {
const codeText = ref(
'# This environment contains the variable "model_config" to be read and updated'
);
const llmThought = ref();
const llmThoughts = ref<any[]>([]);
const notebookResponse = ref();
const executeResponse = ref({
status: OperatorStatus.DEFAULT,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,11 @@
:default-options="sampleAgentQuestions"
:context-language="contextLanguage"
@llm-output="(data: any) => appendCode(data, 'code')"
@llm-thought-output="(data: any) => (llmThought = data)"
@llm-thought-output="(data: any) => llmThoughts.push(data)"
@question-asked="llmThoughts = []"
/>
</Suspense>
<tera-notebook-jupyter-thought-output :llm-thought="llmThought" />
<tera-notebook-jupyter-thought-output :llm-thoughts="llmThoughts" />
</div>
<v-ace-editor
v-model:value="codeText"
Expand Down Expand Up @@ -165,15 +166,16 @@ const sampleAgentQuestions = [
'Add a new transition from S (to nowhere) with a rate constant of v with unit Days. The Rate depends on R',
'Add an observable titled sample with the expression A * B * p.',
'Rename the state S to Susceptible in the infection transition.',
'Rename the transition infection to inf.'
'Rename the transition infection to inf.',
'Change rate law of inf to S * I * z.'
];
const contextLanguage = ref<string>('python3');
const defaultCodeText =
'# This environment contains the variable "model" \n# which is displayed on the right';
const codeText = ref(defaultCodeText);
const llmThought = ref();
const llmThoughts = ref<any[]>([]);
const executeResponse = ref({
status: OperatorStatus.DEFAULT,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,13 @@
<div class="toolbar">
<tera-notebook-jupyter-input
:kernel-manager="kernelManager"
:default-options="[]"
:default-options="sampleAgentQuestions"
:context-language="'python3'"
@llm-output="(data: any) => processLLMOutput(data)"
@llm-thought-output="(data: any) => (llmThought = data)"
@llm-thought-output="(data: any) => llmThoughts.push(data)"
@question-asked="llmThoughts = []"
/>
<tera-notebook-jupyter-thought-output :llm-thought="llmThought" />
<tera-notebook-jupyter-thought-output :llm-thoughts="llmThoughts" />
</div>
<v-ace-editor
v-model:value="codeText"
Expand Down Expand Up @@ -200,7 +201,13 @@ const kernelManager = new KernelSessionManager();
let editor: VAceEditorInstance['_editor'] | null;
const codeText = ref('');
const llmThought = ref();
const llmThoughts = ref<any[]>([]);
const sampleAgentQuestions = [
'Stratify my model by the ages young and old',
'Stratify my model by the locations Toronto and Montreal where Toronto and Montreal cannot interact',
'What is cartesian_control in stratify?'
];
const updateStratifyGroupForm = (config: StratifyGroup) => {
const state = _.cloneDeep(props.node.state);
Expand Down Expand Up @@ -253,16 +260,13 @@ const stratifyRequest = () => {
});
const messageContent = {
stratify_args: {
key: strataOption.name,
strata: strataOption.groupLabels.split(',').map((d) => d.trim()),
concepts_to_stratify: conceptsToStratify,
params_to_stratify: parametersToStratify,
cartesian_control: strataOption.cartesianProduct,
structure: strataOption.useStructure === true ? null : []
}
key: strataOption.name,
strata: strataOption.groupLabels.split(',').map((d) => d.trim()),
concepts_to_stratify: conceptsToStratify,
params_to_stratify: parametersToStratify,
cartesian_control: strataOption.cartesianProduct,
structure: strataOption.useStructure === true ? null : []
};
kernelManager.sendMessage('reset_request', {}).register('reset_response', () => {
kernelManager
.sendMessage('stratify_request', messageContent)
Expand Down Expand Up @@ -311,7 +315,7 @@ const buildJupyterContext = () => {
}
return {
context: 'mira_model',
context: 'mira_model_edit',
language: 'python3',
context_info: {
id: amr.value.id
Expand Down
101 changes: 101 additions & 0 deletions testing/Stratify-LLM-Tests.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
## [Name of the Test Scenario]
Please go through __every__ step of the test scenario.\
When blocked, an error, or a UI/UX anomaly occurs, please report which scenario and step to [\#askem-testing](https://unchartedsoftware.slack.com/archives/C06FGLXB2CE).

### 1. Begin test
1. Login to https://app.staging.terarium.ai using the test account
```
email: [email protected]
password: askem-quality-assurance
```
2. Create, or open, project named `Q&A [Your Name] [YYMMDD]`

### 2. Test Stratify LLM Responses
1. Make sure you have a model in your project.
2. Create a workflow
Drop in your model in the workflow
Create a Stratify node
Connect the model to the stratify node
3. Drill down into the Stratify node and go to the notebook section.

Ask the follow questions one at a time, wait for the response, check the response matches
reset the code block move on to the next.

Q) "Stratify my model by the ages young and old",
A)
model = stratify(
template_model=model,
key= "Age",
strata=['young', 'old'],
structure= [],
directed=False,
cartesian_control=False,
modify_names=True
)

Q) Stratify my model by the ages young and old where young can transition to old
A)
model = stratify(
template_model=model,
key= "Age",
strata=['young', 'old'],
structure= [['young', 'old']],
directed=True,
cartesian_control=False,
modify_names=True
)

Q) Stratify my model by the ages young and old where young and old can become old, but old cannot become young
A)
model = stratify(
template_model=model,
key= "Age",
strata=['young', 'old'],
structure= [['young', 'old']],
directed=False,
cartesian_control=False,
modify_names=True
)

Q) Stratify my model by the locations Toronto and Montreal where Toronto and Montreal cannot interact
A)
model = stratify(
template_model=model,
key= "Location",
strata=['Toronto', 'Montreal'],
structure= [],
directed=False,
cartesian_control=False,
modify_names=True
)

Q) Stratify my model by the locations Toronto and Montreal where Toronto and Montreal can interact
A)
model = stratify(
template_model=model,
key= "Location",
strata=['Toronto', 'Montreal'],
structure= [['Toronto', 'Montreal'], ['Montreal', 'Toronto']],
directed=False,
cartesian_control=True,
modify_names=True
)

OR
model = stratify(
template_model=model,
key= "Location",
strata=['Toronto', 'Montreal'],
structure= [['Toronto', 'Montreal']],
directed=True,
cartesian_control=True,
modify_names=True
)

Q) What is cartesian_control in stratify?
A)
No code response, instead just a message in the thought section.


### 4. End test
1. logout of the application

0 comments on commit 30088dc

Please sign in to comment.