diff --git a/.env.sample b/.env.sample
index ce5d7e4..07ee27b 100644
--- a/.env.sample
+++ b/.env.sample
@@ -1,5 +1,9 @@
-SLACK_APP_TOKEN=YOUR_SLACK_APP_TOKEN
-SLACK_BOT_TOKEN=YOUR_SLACK_BOT_TOKEN
+# Optional, uncomment and set when running without the Slack CLI (python3 app.py).
+# SLACK_APP_TOKEN=YOUR_SLACK_APP_TOKEN
+# SLACK_BOT_TOKEN=YOUR_SLACK_BOT_TOKEN
+
+# Optional, uncomment and set when using a custom Slack instance.
# SLACK_API_URL=YOUR_SLACK_API_URL
-# This template uses OpenAI, but you can use any other provider!
+
+# Required, set your OpenAI API key.
OPENAI_API_KEY=YOUR_OPENAI_API_KEY
diff --git a/README.md b/README.md
index 4216662..877690f 100644
--- a/README.md
+++ b/README.md
@@ -16,7 +16,7 @@ Join the [Slack Developer Program](https://api.slack.com/developer-program) for
Add this app to your workspace using either the Slack CLI or other development tooling, then read ahead to configuring LLM responses in the **[Providers](#providers)** section.
-### Using Slack CLI
+Using Slack CLI
Install the latest version of the Slack CLI for your operating system:
@@ -46,7 +46,11 @@ slack install
After the Slack app has been created you're all set to configure the LLM provider!
-### Using Terminal
+
+
+Using Terminal
+
+#### Create Your Slack App
1. Open [https://api.slack.com/apps/new](https://api.slack.com/apps/new) and choose "From an app manifest"
2. Choose the workspace you want to install the application to
@@ -91,6 +95,8 @@ source .venv/bin/activate # for Windows OS, .\.venv\Scripts\Activate instead sh
pip install -r requirements.txt
```
+
+
## Providers
### OpenAI Setup
@@ -150,9 +156,11 @@ Configures the new Slack Assistant features, providing a dedicated side panel UI
- The `assistant_thread_started.py` file, which responds to new app threads with a list of suggested prompts.
- The `message.py` file, which responds to user messages sent to app threads or from the **Chat** and **History** tab with an LLM generated response.
-### `/ai`
+### `/agent`
+
+The `llm_caller.py` file calls the OpenAI API and streams the generated response into a Slack conversation.
-The `llm_caller.py` file, which handles OpenAI API integration and message formatting. It includes the `call_llm()` function that sends conversation threads to OpenAI's models.
+The `tools` directory contains app-specific functions for the LLM to call.
## App Distribution / OAuth
diff --git a/agent/llm_caller.py b/agent/llm_caller.py
new file mode 100644
index 0000000..4bc08ee
--- /dev/null
+++ b/agent/llm_caller.py
@@ -0,0 +1,101 @@
+import json
+import os
+
+import openai
+from openai.types.responses import ResponseInputParam
+from slack_sdk.models.messages.chunk import TaskUpdateChunk
+from slack_sdk.web.chat_stream import ChatStream
+
+from agent.tools.dice import roll_dice, roll_dice_definition
+
+
+def call_llm(
+ streamer: ChatStream,
+ prompts: ResponseInputParam,
+):
+ """
+ Stream an LLM response to prompts with an example dice rolling function
+
+ https://docs.slack.dev/tools/python-slack-sdk/web#sending-streaming-messages
+ https://platform.openai.com/docs/guides/text
+ https://platform.openai.com/docs/guides/streaming-responses
+ https://platform.openai.com/docs/guides/function-calling
+ """
+ llm = openai.OpenAI(
+ api_key=os.getenv("OPENAI_API_KEY"),
+ )
+ tool_calls = []
+ response = llm.responses.create(
+ model="gpt-4o-mini",
+ input=prompts,
+ tools=[
+ roll_dice_definition,
+ ],
+ stream=True,
+ )
+ for event in response:
+ # Markdown text from the LLM response is streamed in chat as it arrives
+ if event.type == "response.output_text.delta":
+ streamer.append(markdown_text=f"{event.delta}")
+
+ # Function calls are saved for later computation and a new task is shown
+ if event.type == "response.output_item.done":
+ if event.item.type == "function_call":
+ tool_calls.append(event.item)
+ if event.item.name == "roll_dice":
+ args = json.loads(event.item.arguments)
+ streamer.append(
+ chunks=[
+ TaskUpdateChunk(
+ id=f"{event.item.call_id}",
+ title=f"Rolling a {args['count']}d{args['sides']}...",
+ status="in_progress",
+ ),
+ ],
+ )
+
+ # Tool calls are performed and tasks are marked as completed in Slack
+ if tool_calls:
+ for call in tool_calls:
+ if call.name == "roll_dice":
+ args = json.loads(call.arguments)
+ prompts.append(
+ {
+ "id": call.id,
+ "call_id": call.call_id,
+ "type": "function_call",
+ "name": "roll_dice",
+ "arguments": call.arguments,
+ }
+ )
+ result = roll_dice(**args)
+ prompts.append(
+ {
+ "type": "function_call_output",
+ "call_id": call.call_id,
+ "output": json.dumps(result),
+ }
+ )
+ if result.get("error") is not None:
+ streamer.append(
+ chunks=[
+ TaskUpdateChunk(
+ id=f"{call.call_id}",
+ title=f"{result['error']}",
+ status="error",
+ ),
+ ],
+ )
+ else:
+ streamer.append(
+ chunks=[
+ TaskUpdateChunk(
+ id=f"{call.call_id}",
+ title=f"{result['description']}",
+ status="complete",
+ ),
+ ],
+ )
+
+ # Complete the LLM response after making tool calls
+ call_llm(streamer, prompts)
diff --git a/agent/tools/__init__.py b/agent/tools/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/agent/tools/dice.py b/agent/tools/dice.py
new file mode 100644
index 0000000..61553a3
--- /dev/null
+++ b/agent/tools/dice.py
@@ -0,0 +1,56 @@
+import random
+
+from openai.types.responses import FunctionToolParam
+
+
+def roll_dice(sides: int = 6, count: int = 1) -> dict:
+ if sides < 2:
+ return {
+ "error": "A die must have at least 2 sides",
+ "rolls": [],
+ "total": 0,
+ }
+
+ if count < 1:
+ return {
+ "error": "Must roll at least 1 die",
+ "rolls": [],
+ "total": 0,
+ }
+
+ # Roll the dice and calculate the total
+ rolls = [random.randint(1, sides) for _ in range(count)]
+ total = sum(rolls)
+
+ return {
+ "rolls": rolls,
+ "total": total,
+ "description": f"Rolled a {count}d{sides} to total {total}",
+ }
+
+
+# Tool definition for OpenAI API
+#
+# https://platform.openai.com/docs/guides/function-calling
+roll_dice_definition: FunctionToolParam = {
+ "type": "function",
+ "name": "roll_dice",
+ "description": "Roll one or more dice with a specified number of sides. Use this when the user wants to roll dice or generate random numbers within a range.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "sides": {
+ "type": "integer",
+ "description": "The number of sides on the die (e.g., 6 for a standard die, 20 for a d20)",
+ "default": 6,
+ },
+ "count": {
+ "type": "integer",
+ "description": "The number of dice to roll",
+ "default": 1,
+ },
+ },
+ "required": ["sides", "count"],
+ },
+ "strict": False,
+}
diff --git a/ai/llm_caller.py b/ai/llm_caller.py
deleted file mode 100644
index d0a0591..0000000
--- a/ai/llm_caller.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import os
-from typing import Dict, List
-
-import openai
-from openai import Stream
-from openai.types.responses import ResponseStreamEvent
-
-DEFAULT_SYSTEM_CONTENT = """
-You're an assistant in a Slack workspace.
-Users in the workspace will ask you to help them write something or to think better about a specific topic.
-You'll respond to those questions in a professional way.
-When you include markdown text, convert them to Slack compatible ones.
-When a prompt has Slack's special syntax like <@USER_ID> or <#CHANNEL_ID>, you must keep them as-is in your response.
-"""
-
-
-def call_llm(
- messages_in_thread: List[Dict[str, str]],
- system_content: str = DEFAULT_SYSTEM_CONTENT,
-) -> Stream[ResponseStreamEvent]:
- openai_client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
- messages = [{"role": "system", "content": system_content}]
- messages.extend(messages_in_thread)
- response = openai_client.responses.create(
- model="gpt-4o-mini", input=messages, stream=True
- )
- return response
diff --git a/app.py b/app.py
index 44ae23b..fbdac66 100644
--- a/app.py
+++ b/app.py
@@ -2,7 +2,6 @@
import os
from dotenv import load_dotenv
-
from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
from slack_sdk import WebClient
@@ -22,6 +21,7 @@
token=os.environ.get("SLACK_BOT_TOKEN"),
),
)
+
# Register Listeners
register_listeners(app)
diff --git a/listeners/assistant/assistant_thread_started.py b/listeners/assistant/assistant_thread_started.py
index 9d2990f..0957f0d 100644
--- a/listeners/assistant/assistant_thread_started.py
+++ b/listeners/assistant/assistant_thread_started.py
@@ -1,5 +1,4 @@
from logging import Logger
-from typing import Dict, List
from slack_bolt import Say, SetSuggestedPrompts
@@ -18,24 +17,19 @@ def assistant_thread_started(
logger: Logger instance for error tracking
"""
try:
- say("How can I help you?")
-
- prompts: List[Dict[str, str]] = [
- {
- "title": "What does Slack stand for?",
- "message": "Slack, a business communication service, was named after an acronym. Can you guess what it stands for?",
- },
- {
- "title": "Write a draft announcement",
- "message": "Can you write a draft announcement about a new feature my team just released? It must include how impactful it is.",
- },
- {
- "title": "Suggest names for my Slack app",
- "message": "Can you suggest a few names for my Slack app? The app helps my teammates better organize information and plan priorities and action items.",
- },
- ]
-
- set_suggested_prompts(prompts=prompts)
+ say("What would you like to do today?")
+ set_suggested_prompts(
+ prompts=[
+ {
+ "title": "Prompt a task with thinking steps",
+ "message": "Wonder a few deep thoughts.",
+ },
+ {
+ "title": "Roll dice for a random number",
+ "message": "Roll two 12-sided dice and three 6-sided dice for a pseudo-random score.",
+ },
+ ]
+ )
except Exception as e:
logger.exception(f"Failed to handle an assistant_thread_started event: {e}", e)
say(f":warning: Something went wrong! ({e})")
diff --git a/listeners/assistant/message.py b/listeners/assistant/message.py
index b61de90..3fece10 100644
--- a/listeners/assistant/message.py
+++ b/listeners/assistant/message.py
@@ -1,18 +1,24 @@
+import time
from logging import Logger
-from typing import Dict, List
+from openai.types.responses import ResponseInputParam
from slack_bolt import BoltContext, Say, SetStatus
from slack_sdk import WebClient
+from slack_sdk.models.messages.chunk import (
+ MarkdownTextChunk,
+ PlanUpdateChunk,
+ TaskUpdateChunk,
+)
-from ai.llm_caller import call_llm
-
-from ..views.feedback_block import create_feedback_block
+from agent.llm_caller import call_llm
+from listeners.views.feedback_block import create_feedback_block
def message(
client: WebClient,
context: BoltContext,
logger: Logger,
+ message: dict,
payload: dict,
say: Say,
set_status: SetStatus,
@@ -34,47 +40,125 @@ def message(
thread_ts = payload["thread_ts"]
user_id = context.user_id
- set_status(
- status="thinking...",
- loading_messages=[
- "Teaching the hamsters to type faster…",
- "Untangling the internet cables…",
- "Consulting the office goldfish…",
- "Polishing up the response just for you…",
- "Convincing the AI to stop overthinking…",
- ],
- )
+ # The first example shows a message with thinking steps that has different
+ # chunks to construct and update a plan alongside text outputs.
+ if message["text"] == "Wonder a few deep thoughts.":
+ set_status(
+ status="thinking...",
+ loading_messages=[
+ "Teaching the hamsters to type faster…",
+ "Untangling the internet cables…",
+ "Consulting the office goldfish…",
+ "Polishing up the response just for you…",
+ "Convincing the AI to stop overthinking…",
+ ],
+ )
+
+ time.sleep(4)
+
+ streamer = client.chat_stream(
+ channel=channel_id,
+ recipient_team_id=team_id,
+ recipient_user_id=user_id,
+ thread_ts=thread_ts,
+ task_display_mode="plan",
+ )
+ streamer.append(
+ chunks=[
+ MarkdownTextChunk(
+ text="Hello.\nI have received the task. ",
+ ),
+ MarkdownTextChunk(
+ text="This task appears manageable.\nThat is good.",
+ ),
+ TaskUpdateChunk(
+ id="001",
+ title="Understanding the task...",
+ status="in_progress",
+ details="- Identifying the goal\n- Identifying constraints",
+ ),
+ TaskUpdateChunk(
+ id="002",
+ title="Performing acrobatics...",
+ status="pending",
+ ),
+ ],
+ )
+ time.sleep(4)
- replies = client.conversations_replies(
- channel=context.channel_id,
- ts=context.thread_ts,
- oldest=context.thread_ts,
- limit=10,
- )
- messages_in_thread: List[Dict[str, str]] = []
- for message in replies["messages"]:
- role = "user" if message.get("bot_id") is None else "assistant"
- messages_in_thread.append({"role": role, "content": message["text"]})
+ streamer.append(
+ chunks=[
+ PlanUpdateChunk(
+ title="Adding the final pieces...",
+ ),
+ TaskUpdateChunk(
+ id="001",
+ title="Understanding the task...",
+ status="complete",
+ details="\n- Pretending this was obvious",
+ output="We'll continue to ramble now",
+ ),
+ TaskUpdateChunk(
+ id="002",
+ title="Performing acrobatics...",
+ status="in_progress",
+ ),
+ ],
+ )
+ time.sleep(4)
- returned_message = call_llm(messages_in_thread)
+ feedback_block = create_feedback_block()
+ streamer.stop(
+ chunks=[
+ PlanUpdateChunk(
+ title="Decided to put on a show",
+ ),
+ TaskUpdateChunk(
+ id="002",
+ title="Performing acrobatics...",
+ status="complete",
+ details="- Jumped atop ropes\n- Juggled bowling pins\n- Rode a single wheel too",
+ ),
+ MarkdownTextChunk(
+ text="The crowd appears to be astounded and applauds :popcorn:"
+ ),
+ ],
+ blocks=feedback_block,
+ )
- streamer = client.chat_stream(
- channel=channel_id,
- recipient_team_id=team_id,
- recipient_user_id=user_id,
- thread_ts=thread_ts,
- )
+ # This second example shows a generated text response for a provided prompt
+ # displayed as a timeline.
+ else:
+ set_status(
+ status="thinking...",
+ loading_messages=[
+ "Teaching the hamsters to type faster…",
+ "Untangling the internet cables…",
+ "Consulting the office goldfish…",
+ "Polishing up the response just for you…",
+ "Convincing the AI to stop overthinking…",
+ ],
+ )
- # Loop over OpenAI response stream
- # https://platform.openai.com/docs/api-reference/responses/create
- for event in returned_message:
- if event.type == "response.output_text.delta":
- streamer.append(markdown_text=f"{event.delta}")
- else:
- continue
+ streamer = client.chat_stream(
+ channel=channel_id,
+ recipient_team_id=team_id,
+ recipient_user_id=user_id,
+ thread_ts=thread_ts,
+ task_display_mode="timeline",
+ )
+ prompts: ResponseInputParam = [
+ {
+ "role": "user",
+ "content": message["text"],
+ },
+ ]
+ call_llm(streamer, prompts)
- feedback_block = create_feedback_block()
- streamer.stop(blocks=feedback_block)
+ feedback_block = create_feedback_block()
+ streamer.stop(
+ blocks=feedback_block,
+ )
except Exception as e:
logger.exception(f"Failed to handle a user message event: {e}")
diff --git a/listeners/events/app_mentioned.py b/listeners/events/app_mentioned.py
index 6ec8d25..89001b7 100644
--- a/listeners/events/app_mentioned.py
+++ b/listeners/events/app_mentioned.py
@@ -1,10 +1,11 @@
from logging import Logger
+from openai.types.responses import ResponseInputParam
from slack_bolt import Say
from slack_sdk import WebClient
-from ai.llm_caller import call_llm
-from ..views.feedback_block import create_feedback_block
+from agent.llm_caller import call_llm
+from listeners.views.feedback_block import create_feedback_block
def app_mentioned_callback(client: WebClient, event: dict, logger: Logger, say: Say):
@@ -38,25 +39,24 @@ def app_mentioned_callback(client: WebClient, event: dict, logger: Logger, say:
],
)
- returned_message = call_llm([{"role": "user", "content": text}])
-
streamer = client.chat_stream(
channel=channel_id,
recipient_team_id=team_id,
recipient_user_id=user_id,
thread_ts=thread_ts,
)
-
- # Loop over OpenAI response stream
- # https://platform.openai.com/docs/api-reference/responses/create
- for event in returned_message:
- if event.type == "response.output_text.delta":
- streamer.append(markdown_text=f"{event.delta}")
- else:
- continue
+ prompts: ResponseInputParam = [
+ {
+ "role": "user",
+ "content": text,
+ },
+ ]
+ call_llm(streamer, prompts)
feedback_block = create_feedback_block()
- streamer.stop(blocks=feedback_block)
+ streamer.stop(
+ blocks=feedback_block,
+ )
except Exception as e:
logger.exception(f"Failed to handle a user message event: {e}")
say(f":warning: Something went wrong! ({e})")
diff --git a/requirements.txt b/requirements.txt
index 2a18225..00a0ced 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-slack-sdk==3.39.0
+slack-sdk==3.40.0.dev0
slack-bolt==1.27.0
# If you use a different LLM vendor, replace this dependency