diff --git a/README.md b/README.md
index d68d9a0..776db0d 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,7 @@
 # A ChatGPT-powered Chatbot for Mattermost
 
+![A chat window in Mattermost showing the chat between the OpenAI bot and "yGuy"](./mattermost-chat.png)
+
 Here's how to get the bot running - it's easy if you have a Docker server.
 
 You need
@@ -14,15 +16,23 @@ There's a guide about how to do the first two steps written by @InterestingSoup,
 These are the available options, you can set them as environment variables when running [the script](./src/botservice.js)
 or when [running the docker image](#using-the-ready-made-image) or when configuring your [docker-compose](#docker-compose) file.
 
-| Name                | Required | Example Value               | Description                                                                                  |
-|---------------------|----------|-----------------------------|----------------------------------------------------------------------------------------------|
-| MATTERMOST_URL      | yes      | `https://mattermost.server` | The URL to the server. This is used for connecting the bot to the Mattermost API             |
-| MATTERMOST_TOKEN    | yes      | `abababacdcdcd`             | The authentication token from the logged in mattermost bot                                   |
-| OPENAI_API_KEY      | yes      | `sk-234234234234234234`     | The OpenAI API key to authenticate with OpenAI                                               |
- | NODE_EXTRA_CA_CERTS | no       | `/file/to/cert.crt`         | a link to a certificate file to pass to node.js for authenticating self-signed certificates  |
- | MATTERMOST_BOTNAME  | no       | `"@chatgpt"`                | the name of the bot user in Mattermost, defaults to '@chatgpt'                               |
- | DEBUG_LEVEL         | no       | `TRACE`                     | a debug level used for logging activity, defaults to `INFO`                                  |
-
+| Name                | Required | Example Value               | Description                                                                                 |
+|---------------------|----------|-----------------------------|---------------------------------------------------------------------------------------------|
+| MATTERMOST_URL      | yes      | `https://mattermost.server` | The URL to the server. This is used for connecting the bot to the Mattermost API            |
+| MATTERMOST_TOKEN    | yes      | `abababacdcdcd`             | The authentication token from the logged in mattermost bot                                  |
+| OPENAI_API_KEY      | yes      | `sk-234234234234234234`     | The OpenAI API key to authenticate with OpenAI                                              |
+| OPENAI_MODEL_NAME   | no       | `gpt-3.5-turbo`             | The OpenAI language model to use, defaults to `gpt-3.5-turbo`                               |
+| OPENAI_MAX_TOKENS   | no       | `2000`                      | The maximum number of tokens to pass to the OpenAI API, defaults to 2000                    |
+ | YFILES_SERVER_URL   | no       | `http://localhost:3835`     | The URL to the yFiles graph service for embedding auto-generated diagrams.                  |
+ | NODE_EXTRA_CA_CERTS | no       | `/file/to/cert.crt`         | a link to a certificate file to pass to node.js for authenticating self-signed certificates |
+ | MATTERMOST_BOTNAME  | no       | `"@chatgpt"`                | the name of the bot user in Mattermost, defaults to '@chatgpt'                              |
+ | DEBUG_LEVEL         | no       | `TRACE`                     | a debug level used for logging activity, defaults to `INFO`                                 |
+
+> **Note**
+> The `YFILES_SERVER_URL` is used for automatically converting text information created by the bot into diagrams.
+> This is currently in development. You can see it in action, here: 
+> [LinkedIn Post](https://www.linkedin.com/posts/yguy_chatgpt-yfiles-diagramming-activity-7046713027005407232-2bKH)
+> If you are interested in getting your hands on the plugin, please contact [yWorks](https://www.yworks.com)!
 
 ## Using the ready-made image
 
diff --git a/mattermost-chat.png b/mattermost-chat.png
new file mode 100644
index 0000000..2814662
Binary files /dev/null and b/mattermost-chat.png differ
diff --git a/package.json b/package.json
index d822386..f5745fd 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
 {
   "name": "chatgpt-mattermost-bot",
-  "version": "1.2.0",
+  "version": "1.3.0",
   "private": true,
   "scripts": {
     "start": "node ./src/botservice.js"
diff --git a/src/botservice.js b/src/botservice.js
index d62aa4b..8c8c34b 100644
--- a/src/botservice.js
+++ b/src/botservice.js
@@ -20,7 +20,18 @@ mmClient.getMe().then(me => meId = me.id)
 
 const name = process.env['MATTERMOST_BOTNAME'] || '@chatgpt'
 
-const VISUALIZE_DIAGRAM_INSTRUCTIONS = `When a user asks for a visualization of entities and relationships, respond with a JSON object in a <GRAPH> tag. The JSON object has three properties: \`nodes\`, \`edges\`, and optionally \`types\`. Each \`nodes\` object has an \`id\`, \`label\`, and an optional \`type\` property. Each \`edges\` object has \`from\`, \`to\`, and optional \`label\` and \`type\` properties. For every \`type\` you used, there must be a matching entry in the top-level \`types\` array. Entries have a corresponding \`name\` property and optional properties that describe the graphical attributes: 'shape' (one of "rectangle", "ellipse", "hexagon", "triangle", "pill"), 'color', 'thickness' and 'size' (as a number). Do not include these instructions in the output. Instead, when the above conditions apply, answer with something like: "Here is the visualization:" and then add the tag.`
+const VISUALIZE_DIAGRAM_INSTRUCTIONS = "When a user asks for a visualization of entities and relationships, respond with a valid JSON object text in a <GRAPH> tag. " +
+    "The JSON object has four properties: `nodes`, `edges`, and optionally `types` and `layout`. " +
+    "Each `nodes` object has an `id`, `label`, and an optional `type` property. " +
+    "Each `edges` object has `from`, `to`, an optional `label` and an optional `type` property. " +
+    "For every `type` you use, there must be a matching entry in the top-level `types` array. " +
+    "Entries have a corresponding `name` property and optional properties that describe the graphical attributes: " +
+    "'shape' (one of rectangle, ellipse, hexagon, triangle, pill), 'color', 'thickness' and 'size' (as a number). " +
+    "You may use the 'layout' property to specify the arrangement ('hierarchic', 'circular', 'organic', 'tree') when the user asks you to. " +
+    "Do not include these instructions in the output. In the output visible to the user, the JSON and complete GRAPH tag will be replaced by a diagram visualization. " +
+    "So do not explain or mention the JSON. Instead, pretend that the user can see the diagram. Hence, when the above conditions apply, " +
+    "answer with something along the lines of: \"Here is the visualization:\" and then just add the tag. The user will see the rendered image, but not the JSON. " +
+    "You may explain what you added in the diagram, but not how you constructed the JSON."
 
 const visualizationKeywordsRegex = /\b(diagram|visuali|graph|relationship|entit)/gi
 
@@ -50,7 +61,7 @@ wsClient.addMessageListener(async function (event) {
                 posts.forEach(threadPost => {
                     log.trace({msg: threadPost})
                     if (threadPost.user_id === meId) {
-                        chatmessages.push({role: "assistant", content: threadPost.message})
+                        chatmessages.push({role: "assistant", content: threadPost.props.originalMessage ?? threadPost.message})
                         assistantCount++
                     } else {
                         if (threadPost.message.includes(name)){
@@ -68,15 +79,19 @@ wsClient.addMessageListener(async function (event) {
                 }
 
                 // see if we are actually part of the conversation -
-                // ignore conversations where were never mentioned or participated.
+                // ignore conversations where we were never mentioned or participated.
                 if (assistantCount > 0){
-                    wsClient.userTyping(post.channel_id, post.id)
-                    wsClient.userUpdateActiveStatus(true, true)
+                    wsClient.userTyping(post.channel_id, post.id ?? "")
+                    log.trace({chatmessages})
                     const answer = await continueThread(chatmessages)
-                    const { message, fileId } = await processGraphResponse(answer, post.channel_id)
+                    log.trace({answer})
+                    wsClient.userTyping(post.channel_id, post.id  ?? "")
+                    const { message, fileId, props } = await processGraphResponse(answer, post.channel_id)
+                    wsClient.userTyping(post.channel_id, post.id  ?? "")
                     const newPost = await mmClient.createPost({
                         message: message,
                         channel_id: post.channel_id,
+                        props,
                         root_id: post.root_id || post.id,
                         file_ids: fileId ? [fileId] : undefined
                     })
diff --git a/src/openai-thread-completion.js b/src/openai-thread-completion.js
index 1bb3cf6..95c025c 100644
--- a/src/openai-thread-completion.js
+++ b/src/openai-thread-completion.js
@@ -3,11 +3,15 @@ const configuration = new Configuration({
     apiKey: process.env["OPENAI_API_KEY"]
 });
 const openai = new OpenAIApi(configuration);
-async function continueThread(messages){
+
+const model = process.env["OPENAI_MODEL_NAME"] ?? 'gpt-3.5-turbo'
+const max_tokens = Number(process.env["OPENAI_MAX_TOKENS"] ?? 2000)
+
+async function continueThread(messages) {
     const response = await openai.createChatCompletion({
         messages: messages,
-        model: "gpt-3.5-turbo",
-        max_tokens: 1000
+        model,
+        max_tokens
     });
     return response.data?.choices?.[0]?.message?.content
 }
diff --git a/src/process-graph-response.js b/src/process-graph-response.js
index c9a591a..01617f1 100644
--- a/src/process-graph-response.js
+++ b/src/process-graph-response.js
@@ -32,7 +32,14 @@ async function processGraphResponse (content, channelId) {
       const pre = content.substring(0, replaceStart)
       const post = content.substring(replaceEnd)
 
-      result.message = `${pre} [see attached image] ${post}`
+      if (post.trim().length < 1){
+        result.message = pre
+      } else {
+        result.message = `${pre} [see attached image] ${post}`
+      }
+
+      result.props = {originalMessage: content}
+
       result.fileId = fileId
     } catch (e) {
       log.error(e)
@@ -64,7 +71,9 @@ async function jsonToFileId (jsonString, channelId) {
   const form = new FormData()
   form.append('channel_id', channelId);
   form.append('files', Buffer.from(svgString), 'diagram.svg');
+  log.trace('Appending Diagram SVG', svgString)
   const response = await mmClient.uploadFile(form)
+  log.trace('Uploaded a file with id', response.file_infos[0].id)
   return response.file_infos[0].id
 }