diff --git a/README.md b/README.md
index 0d16fcc..877690f 100644
--- a/README.md
+++ b/README.md
@@ -16,7 +16,7 @@ Join the [Slack Developer Program](https://api.slack.com/developer-program) for
Add this app to your workspace using either the Slack CLI or other development tooling, then read ahead to configuring LLM responses in the **[Providers](#providers)** section.
-### Using Slack CLI
+Using Slack CLI
Install the latest version of the Slack CLI for your operating system:
@@ -46,7 +46,11 @@ slack install
After the Slack app has been created you're all set to configure the LLM provider!
-### Using Terminal
+
+
+Using Terminal
+
+#### Create Your Slack App
1. Open [https://api.slack.com/apps/new](https://api.slack.com/apps/new) and choose "From an app manifest"
2. Choose the workspace you want to install the application to
@@ -91,6 +95,8 @@ source .venv/bin/activate # for Windows OS, .\.venv\Scripts\Activate instead sh
pip install -r requirements.txt
```
+
+
## Providers
### OpenAI Setup
diff --git a/agent/tools/dice.py b/agent/tools/dice.py
index 7ce5080..61553a3 100644
--- a/agent/tools/dice.py
+++ b/agent/tools/dice.py
@@ -1,5 +1,4 @@
import random
-import time
from openai.types.responses import FunctionToolParam
@@ -23,9 +22,6 @@ def roll_dice(sides: int = 6, count: int = 1) -> dict:
rolls = [random.randint(1, sides) for _ in range(count)]
total = sum(rolls)
- # Add a pause between rolls to demonstrate loading states
- time.sleep(2)
-
return {
"rolls": rolls,
"total": total,
diff --git a/listeners/assistant/assistant_thread_started.py b/listeners/assistant/assistant_thread_started.py
index a5fa3f1..0957f0d 100644
--- a/listeners/assistant/assistant_thread_started.py
+++ b/listeners/assistant/assistant_thread_started.py
@@ -26,7 +26,7 @@ def assistant_thread_started(
},
{
"title": "Roll dice for a random number",
- "message": "Roll two 12-sided dice and three 6-sided dice for a psuedo-random score.",
+ "message": "Roll two 12-sided dice and three 6-sided dice for a pseudo-random score.",
},
]
)
diff --git a/listeners/assistant/message.py b/listeners/assistant/message.py
index 9967020..3fece10 100644
--- a/listeners/assistant/message.py
+++ b/listeners/assistant/message.py
@@ -40,9 +40,22 @@ def message(
thread_ts = payload["thread_ts"]
user_id = context.user_id
- # The first example shows detailed thinking steps similar to tool calls
- # displayed as plan.
+ # The first example shows a message with thinking steps that has different
+ # chunks to construct and update a plan alongside text outputs.
if message["text"] == "Wonder a few deep thoughts.":
+ set_status(
+ status="thinking...",
+ loading_messages=[
+ "Teaching the hamsters to type faster…",
+ "Untangling the internet cables…",
+ "Consulting the office goldfish…",
+ "Polishing up the response just for you…",
+ "Convincing the AI to stop overthinking…",
+ ],
+ )
+
+ time.sleep(4)
+
streamer = client.chat_stream(
channel=channel_id,
recipient_team_id=team_id,
@@ -89,12 +102,12 @@ def message(
id="002",
title="Performing acrobatics...",
status="in_progress",
- details="- Jumping atop ropes\n- Juggling bowling pins\n- Riding a single wheel too",
),
],
)
time.sleep(4)
+ feedback_block = create_feedback_block()
streamer.stop(
chunks=[
PlanUpdateChunk(
@@ -104,11 +117,13 @@ def message(
id="002",
title="Performing acrobatics...",
status="complete",
+ details="- Jumped atop ropes\n- Juggled bowling pins\n- Rode a single wheel too",
),
MarkdownTextChunk(
text="The crowd appears to be astounded and applauds :popcorn:"
),
],
+ blocks=feedback_block,
)
# This second example shows a generated text response for a provided prompt