Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
f093e1a
Python: Provider-leading client design & OpenAI package extraction
eavanvalkenburg Mar 20, 2026
af17b5c
fix: missing Agent imports in samples, .model_id → .model in foundry_…
eavanvalkenburg Mar 20, 2026
80ac333
fix: CI failures — mypy errors, coverage targets, sample imports
eavanvalkenburg Mar 20, 2026
fa50017
fix: populate openai .pyi stub, fix broken README links, coverage tar…
eavanvalkenburg Mar 20, 2026
ce10fa7
fixes
eavanvalkenburg Mar 20, 2026
6b72bcc
updated observabilitty
eavanvalkenburg Mar 20, 2026
7ea540c
reset azure init.pyi
eavanvalkenburg Mar 20, 2026
c09530a
fix errors
eavanvalkenburg Mar 20, 2026
81581a9
updated adr number
eavanvalkenburg Mar 20, 2026
3493bab
fix foundry local
eavanvalkenburg Mar 21, 2026
75db7bc
fixed not renamed docstrings and comments, and added deprecated marke…
eavanvalkenburg Mar 23, 2026
c38ee9c
fix tests and pyprojects
eavanvalkenburg Mar 23, 2026
ef77a51
fix test vars
eavanvalkenburg Mar 23, 2026
d37dd27
updated function tests
eavanvalkenburg Mar 23, 2026
b1699e9
update durable
eavanvalkenburg Mar 23, 2026
af21656
updated test setup for functions
eavanvalkenburg Mar 24, 2026
82b5760
Fix Foundry auth in workflow samples
eavanvalkenburg Mar 24, 2026
2d62a69
Stabilize Python integration workflows
eavanvalkenburg Mar 24, 2026
45bb396
Update hosting samples for Foundry
eavanvalkenburg Mar 24, 2026
e12476a
Trigger full CI rerun
eavanvalkenburg Mar 24, 2026
71dc540
Trigger CI rerun again
eavanvalkenburg Mar 24, 2026
733b961
trigger rerun
eavanvalkenburg Mar 24, 2026
1d18d21
trigger rerun
eavanvalkenburg Mar 24, 2026
348d4af
fix for litellm
eavanvalkenburg Mar 24, 2026
0c8be84
undo durabletask changes
eavanvalkenburg Mar 24, 2026
7075758
Move Foundry APIs into foundry namespace
eavanvalkenburg Mar 24, 2026
5062748
Fix Foundry pyproject formatting
eavanvalkenburg Mar 24, 2026
7cddfad
Split provider samples by Foundry surface
eavanvalkenburg Mar 24, 2026
da8c38b
Restore hosting sample requirements
eavanvalkenburg Mar 24, 2026
0b9d994
updated tests
eavanvalkenburg Mar 24, 2026
9594f00
udpated foundry integration tests
eavanvalkenburg Mar 24, 2026
1a2637f
removed dist from azurefunctions tests
eavanvalkenburg Mar 24, 2026
e305de7
Use separate Foundry clients for concurrent agents
eavanvalkenburg Mar 24, 2026
791b483
fix client setup in azfunc and durable
eavanvalkenburg Mar 24, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
166 changes: 166 additions & 0 deletions .github/actions/setup-local-mcp-server/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
name: Setup Local MCP Server
description: Start and validate a local streamable HTTP MCP server for integration tests

inputs:
fallback_url:
description: Existing LOCAL_MCP_URL value to keep as a fallback if local startup fails
required: false
default: ''
host:
description: Host interface to bind the local MCP server
required: false
default: '127.0.0.1'
port:
description: Port to bind the local MCP server
required: false
default: '8011'
mount_path:
description: Mount path for the local streamable HTTP MCP endpoint
required: false
default: '/mcp'

outputs:
effective_url:
description: Local MCP URL when startup succeeds, otherwise the provided fallback URL
value: ${{ steps.start.outputs.effective_url }}
local_url:
description: URL of the local MCP server
value: ${{ steps.start.outputs.local_url }}
started:
description: Whether the local MCP server started and passed validation
value: ${{ steps.start.outputs.started }}
pid:
description: PID of the local MCP server process when startup succeeded
value: ${{ steps.start.outputs.pid }}

runs:
using: composite
steps:
- name: Start and validate local MCP server
id: start
shell: bash
run: |
set -euo pipefail
host="${{ inputs.host }}"
port="${{ inputs.port }}"
mount_path="${{ inputs.mount_path }}"
fallback_url="${{ inputs.fallback_url }}"
if [[ ! "$mount_path" =~ ^/ ]]; then
mount_path="/$mount_path"
fi
local_url="http://${host}:${port}${mount_path}"
health_url="http://${host}:${port}/healthz"
log_file="$RUNNER_TEMP/local-mcp-server.log"
pid_file="$RUNNER_TEMP/local-mcp-server.pid"
rm -f "$log_file" "$pid_file"
server_pid="$(
python3 - "$GITHUB_WORKSPACE/python" "$log_file" "$host" "$port" "$mount_path" <<'PY'
from __future__ import annotations
import subprocess
import sys
workspace, log_file, host, port, mount_path = sys.argv[1:]
with open(log_file, "w", encoding="utf-8") as log:
process = subprocess.Popen(
[
"uv",
"run",
"python",
"scripts/local_mcp_streamable_http_server.py",
"--host",
host,
"--port",
port,
"--mount-path",
mount_path,
],
cwd=workspace,
stdout=log,
stderr=subprocess.STDOUT,
start_new_session=True,
)
print(process.pid)
PY
)"
echo "$server_pid" > "$pid_file"
started=false
for _ in $(seq 1 30); do
if curl --silent --fail "$health_url" >/dev/null; then
started=true
break
fi
if ! kill -0 "$server_pid" 2>/dev/null; then
break
fi
sleep 1
done
if [[ "$started" == "true" ]]; then
if ! (
cd "$GITHUB_WORKSPACE/python"
LOCAL_MCP_URL="$local_url" uv run python - <<'PY'
from __future__ import annotations
import asyncio
import os
from agent_framework import Content, MCPStreamableHTTPTool
def result_to_text(result: str | list[Content]) -> str:
if isinstance(result, str):
return result
return "\n".join(content.text for content in result if content.type == "text" and content.text)
async def main() -> None:
tool = MCPStreamableHTTPTool(
name="local_ci_mcp",
url=os.environ["LOCAL_MCP_URL"],
approval_mode="never_require",
)
async with tool:
assert tool.functions, "Local MCP server did not expose any tools."
result = result_to_text(await tool.functions[0].invoke(query="What is Agent Framework?"))
assert result, "Local MCP server returned an empty response."
asyncio.run(main())
PY
); then
started=false
fi
fi
effective_url="$local_url"
pid="$server_pid"
if [[ "$started" != "true" ]]; then
effective_url="$fallback_url"
pid=""
if kill -0 "$server_pid" 2>/dev/null; then
kill -TERM -- "-$server_pid" 2>/dev/null || kill -TERM "$server_pid" || true
sleep 1
kill -KILL -- "-$server_pid" 2>/dev/null || kill -KILL "$server_pid" || true
fi
echo "Local MCP server was unavailable; continuing with fallback LOCAL_MCP_URL."
if [[ -f "$log_file" ]]; then
tail -n 100 "$log_file" || true
fi
else
echo "Using local MCP server at $local_url"
fi
echo "started=$started" >> "$GITHUB_OUTPUT"
echo "local_url=$local_url" >> "$GITHUB_OUTPUT"
echo "effective_url=$effective_url" >> "$GITHUB_OUTPUT"
echo "pid=$pid" >> "$GITHUB_OUTPUT"
3 changes: 1 addition & 2 deletions .github/workflows/python-check-coverage.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,7 @@
"packages.purview.agent_framework_purview",
"packages.anthropic.agent_framework_anthropic",
"packages.azure-ai-search.agent_framework_azure_ai_search",
"packages.core.agent_framework.azure",
"packages.core.agent_framework.openai",
"packages.openai.agent_framework_openai",
# Individual files (if you want to enforce specific files instead of whole packages)
"packages/core/agent_framework/observability.py",
# Add more targets here as coverage improves
Expand Down
50 changes: 42 additions & 8 deletions .github/workflows/python-integration-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,8 @@ jobs:
OPENAI_CHAT_MODEL_ID: ${{ vars.OPENAI__CHATMODELID }}
OPENAI_RESPONSES_MODEL_ID: ${{ vars.OPENAI__RESPONSESMODELID }}
OPENAI_EMBEDDINGS_MODEL_ID: ${{ vars.OPENAI_EMBEDDING_MODEL_ID }}
OPENAI_MODEL: ${{ vars.OPENAI__RESPONSESMODELID }}
OPENAI_EMBEDDING_MODEL: ${{ vars.OPENAI_EMBEDDING_MODEL_ID }}
OPENAI_API_KEY: ${{ secrets.OPENAI__APIKEY }}
defaults:
run:
Expand All @@ -81,7 +83,7 @@ jobs:
- name: Test with pytest (OpenAI integration)
run: >
uv run pytest --import-mode=importlib
packages/core/tests/openai
packages/openai/tests
-m integration
-n logical --dist worksteal
--timeout=120 --session-timeout=900 --timeout_method thread
Expand Down Expand Up @@ -121,7 +123,7 @@ jobs:
- name: Test with pytest (Azure OpenAI integration)
run: >
uv run pytest --import-mode=importlib
packages/core/tests/azure
packages/azure-ai/tests/azure_openai
-m integration
-n logical --dist worksteal
--timeout=120 --session-timeout=900 --timeout_method thread
Expand Down Expand Up @@ -151,6 +153,13 @@ jobs:
with:
python-version: ${{ env.UV_PYTHON }}
os: ${{ runner.os }}
- name: Start local MCP server
id: local-mcp
uses: ./.github/actions/setup-local-mcp-server
with:
fallback_url: ${{ env.LOCAL_MCP_URL }}
- name: Prefer local MCP URL when available
run: echo "LOCAL_MCP_URL=${{ steps.local-mcp.outputs.effective_url }}" >> "$GITHUB_ENV"
- name: Test with pytest (Anthropic, Ollama, MCP integration)
run: >
uv run pytest --import-mode=importlib
Expand All @@ -161,6 +170,26 @@ jobs:
-n logical --dist worksteal
--timeout=120 --session-timeout=900 --timeout_method thread
--retries 2 --retry-delay 5
- name: Stop local MCP server
if: always()
shell: bash
run: |
set -euo pipefail
server_pid="${{ steps.local-mcp.outputs.pid }}"
if [[ -z "$server_pid" ]]; then
exit 0
fi
if ! kill -0 "$server_pid" 2>/dev/null; then
exit 0
fi
kill -TERM -- "-$server_pid" 2>/dev/null || kill -TERM "$server_pid" 2>/dev/null || true
for _ in $(seq 1 10); do
if ! kill -0 "$server_pid" 2>/dev/null; then
exit 0
fi
sleep 1
done
kill -KILL -- "-$server_pid" 2>/dev/null || kill -KILL "$server_pid" 2>/dev/null || true

# Azure Functions + Durable Task integration tests
python-tests-functions:
Expand All @@ -172,10 +201,11 @@ jobs:
UV_PYTHON: "3.11"
OPENAI_CHAT_MODEL_ID: ${{ vars.OPENAI__CHATMODELID }}
OPENAI_RESPONSES_MODEL_ID: ${{ vars.OPENAI__RESPONSESMODELID }}
OPENAI_MODEL: ${{ vars.OPENAI__RESPONSESMODELID }}
OPENAI_API_KEY: ${{ secrets.OPENAI__APIKEY }}
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__CHATDEPLOYMENTNAME }}
AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }}
AZURE_OPENAI_ENDPOINT: ${{ vars.AZUREOPENAI__ENDPOINT }}
OPENAI_EMBEDDING_MODEL: ${{ vars.OPENAI_EMBEDDING_MODEL_ID }}
FOUNDRY_MODEL: ${{ vars.AZUREAI__DEPLOYMENTNAME }}
FOUNDRY_PROJECT_ENDPOINT: ${{ secrets.AZUREAI__ENDPOINT }}
FUNCTIONS_WORKER_RUNTIME: "python"
DURABLE_TASK_SCHEDULER_CONNECTION_STRING: "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None"
AzureWebJobsStorage: "UseDevelopmentStorage=true"
Expand Down Expand Up @@ -208,8 +238,8 @@ jobs:
packages/azurefunctions/tests/integration_tests
packages/durabletask/tests/integration_tests
-m integration
-n logical --dist worksteal
--timeout=120 --session-timeout=900 --timeout_method thread
-x
--timeout=360 --session-timeout=900 --timeout_method thread
--retries 2 --retry-delay 5

# Azure AI integration tests
Expand All @@ -221,6 +251,8 @@ jobs:
env:
AZURE_AI_PROJECT_ENDPOINT: ${{ secrets.AZUREAI__ENDPOINT }}
AZURE_AI_MODEL_DEPLOYMENT_NAME: ${{ vars.AZUREAI__DEPLOYMENTNAME }}
FOUNDRY_PROJECT_ENDPOINT: ${{ secrets.AZUREAI__ENDPOINT }}
FOUNDRY_MODEL: ${{ vars.AZUREAI__DEPLOYMENTNAME }}
LOCAL_MCP_URL: ${{ vars.LOCAL_MCP__URL }}
defaults:
run:
Expand All @@ -244,7 +276,9 @@ jobs:
subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- name: Test with pytest
timeout-minutes: 15
run: uv run --directory packages/azure-ai poe integration-tests -n logical --dist worksteal --timeout=120 --session-timeout=900 --timeout_method thread --retries 2 --retry-delay 5
run: |
uv run --directory packages/azure-ai poe integration-tests -n logical --dist worksteal --timeout=120 --session-timeout=900 --timeout_method thread --retries 2 --retry-delay 5
uv run --directory packages/foundry poe integration-tests -n logical --dist worksteal --timeout=120 --session-timeout=900 --timeout_method thread --retries 2 --retry-delay 5

# Azure Cosmos integration tests
python-tests-cosmos:
Expand Down
Loading
Loading