diff --git a/.github/workflows/pull-requests.yaml b/.github/workflows/pull-requests.yaml index 0520dc3..75a1265 100644 --- a/.github/workflows/pull-requests.yaml +++ b/.github/workflows/pull-requests.yaml @@ -19,7 +19,7 @@ jobs: timeout-minutes: 10 strategy: matrix: - container: [ "python:3.8", "python:3.9", "python:3.10", "python:3.11" ] + container: [ "python:3.9", "python:3.10", "python:3.11" ] container: image: ${{ matrix.container }} @@ -64,13 +64,13 @@ jobs: . - name: Run black formatter check - run: black --check confidence + run: black --check confidence --exclude="telemetry_pb2.py|_version.py" - name: Run flake8 formatter check - run: flake8 confidence - + run: flake8 confidence --exclude=telemetry_pb2.py,_version.py + - name: Run type linter check - run: mypy confidence + run: mypy confidence --follow-imports=skip --exclude=telemetry_pb2.py - name: Run tests with pytest run: pytest diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 675f4b9..66e028e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,10 +2,14 @@ We ask you to write well covered unit tests with your changes and please make sure you use `black` and `flake8` to lint your code before making a PR. There are CI checks that will fail otherwise. -Linting and tests will run on python [3.8, 3.9. 3.10 and 3.11](https://github.com/spotify/confidence-sdk-python/blob/nicklasl-patch-1/.github/workflows/pull-requests.yaml#L22). +Linting and tests will run on python [3.9. 3.10 and 3.11](https://github.com/spotify/confidence-sdk-python/blob/nicklasl-patch-1/.github/workflows/pull-requests.yaml#L22). We require pull request titles to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) and we also encourage individual commits to adher to that. We use "squash merge" and any merge PR title will show up in the changelog based on the title. +Run the following if you need to regenerate the telemetry protobuf code: +``` +./generate_proto.py +``` \ No newline at end of file diff --git a/README.md b/README.md index 0ad068a..8b6c17f 100644 --- a/README.md +++ b/README.md @@ -59,6 +59,18 @@ confidence.track("event_name", { }) ``` +## Telemetry + +The SDK includes telemetry functionality that helps monitor SDK performance and usage. By default, telemetry is enabled and collects metrics (anonymously) such as resolve latency and request status. This data is used by the Confidence team to improve the product, and in certain cases it is also available to the SDK adopters. + +You can disable telemetry by setting `disable_telemetry=True` when initializing the Confidence client: + +```python +confidence = Confidence("CLIENT_TOKEN", + disable_telemetry=True +) +``` + ## OpenFeature The library includes a `Provider` for diff --git a/confidence/confidence.py b/confidence/confidence.py index ebbdc55..1a9fd55 100644 --- a/confidence/confidence.py +++ b/confidence/confidence.py @@ -19,6 +19,7 @@ import requests import httpx from typing_extensions import TypeGuard +import time from confidence import __version__ from confidence.errors import ( @@ -30,6 +31,7 @@ ) from .flag_types import FlagResolutionDetails, Reason, ErrorCode from .names import FlagName, VariantName +from .telemetry import Telemetry, ProtoTraceId, ProtoStatus EU_RESOLVE_API_ENDPOINT = "https://resolver.eu.confidence.dev" US_RESOLVE_API_ENDPOINT = "https://resolver.us.confidence.dev" @@ -101,6 +103,7 @@ def __init__( timeout_ms: Optional[int] = DEFAULT_TIMEOUT_MS, logger: logging.Logger = logging.getLogger("confidence_logger"), async_client: httpx.AsyncClient = httpx.AsyncClient(), + disable_telemetry: bool = False, ): self._client_secret = client_secret self._region = region @@ -111,6 +114,17 @@ def __init__( self.async_client = async_client self._setup_logger(logger) self._custom_resolve_base_url = custom_resolve_base_url + self._telemetry = Telemetry(__version__, disabled=disable_telemetry) + + def _get_resolve_headers(self) -> Dict[str, str]: + headers = { + "Content-Type": "application/json", + "Accept": "application/json", + } + telemetry_header = self._telemetry.get_monitoring_header() + if telemetry_header: + headers["X-CONFIDENCE-TELEMETRY"] = telemetry_header + return headers def resolve_boolean_details( self, flag_key: str, default_value: bool @@ -367,7 +381,6 @@ def _send_event_internal(self, event_name: str, data: Dict[str, FieldType]) -> N ) if response.status_code == 200: json = response.json() - json_errors = json.get("errors") if json_errors: self.logger.warning("events emitted with errors:") @@ -407,6 +420,7 @@ def _handle_resolve_response( def _resolve( self, flag_name: FlagName, context: Dict[str, FieldType] ) -> ResolveResult: + start_time = time.perf_counter() request_body = { "clientSecret": self._client_secret, "evaluationContext": context, @@ -420,24 +434,49 @@ def _resolve( resolve_url = f"{base_url}/v1/flags:resolve" timeout_sec = None if self._timeout_ms is None else self._timeout_ms / 1000.0 + try: response = requests.post( - resolve_url, json=request_body, timeout=timeout_sec + resolve_url, + json=request_body, + headers=self._get_resolve_headers(), + timeout=timeout_sec, + ) + + result = self._handle_resolve_response(response, flag_name) + duration_ms = int((time.perf_counter() - start_time) * 1000) + self._telemetry.add_trace( + ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY, + duration_ms, + ProtoStatus.PROTO_STATUS_SUCCESS, ) - return self._handle_resolve_response(response, flag_name) + return result except requests.exceptions.Timeout: + duration_ms = int((time.perf_counter() - start_time) * 1000) + self._telemetry.add_trace( + ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY, + duration_ms, + ProtoStatus.PROTO_STATUS_TIMEOUT, + ) self.logger.warning( f"Request timed out after {timeout_sec}s" f" when resolving flag {flag_name}" ) raise TimeoutError() except requests.exceptions.RequestException as e: + duration_ms = int((time.perf_counter() - start_time) * 1000) + self._telemetry.add_trace( + ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY, + duration_ms, + ProtoStatus.PROTO_STATUS_ERROR, + ) self.logger.warning(f"Error resolving flag {flag_name}: {str(e)}") raise GeneralError(str(e)) async def _resolve_async( self, flag_name: FlagName, context: Dict[str, FieldType] ) -> ResolveResult: + start_time = time.perf_counter() request_body = { "clientSecret": self._client_secret, "evaluationContext": context, @@ -453,16 +492,38 @@ async def _resolve_async( timeout_sec = None if self._timeout_ms is None else self._timeout_ms / 1000.0 try: response = await self.async_client.post( - resolve_url, json=request_body, timeout=timeout_sec + resolve_url, + json=request_body, + headers=self._get_resolve_headers(), + timeout=timeout_sec, + ) + result = self._handle_resolve_response(response, flag_name) + duration_ms = int((time.perf_counter() - start_time) * 1000) + self._telemetry.add_trace( + ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY, + duration_ms, + ProtoStatus.PROTO_STATUS_SUCCESS, ) - return self._handle_resolve_response(response, flag_name) + return result except httpx.TimeoutException: + duration_ms = int((time.perf_counter() - start_time) * 1000) + self._telemetry.add_trace( + ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY, + duration_ms, + ProtoStatus.PROTO_STATUS_TIMEOUT, + ) self.logger.warning( f"Request timed out after {timeout_sec}s" f" when resolving flag {flag_name}" ) raise TimeoutError() except httpx.HTTPError as e: + duration_ms = int((time.perf_counter() - start_time) * 1000) + self._telemetry.add_trace( + ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY, + duration_ms, + ProtoStatus.PROTO_STATUS_ERROR, + ) self.logger.warning(f"Error resolving flag {flag_name}: {str(e)}") raise GeneralError(str(e)) diff --git a/confidence/openfeature_provider.py b/confidence/openfeature_provider.py index 6857ffb..2f7daae 100644 --- a/confidence/openfeature_provider.py +++ b/confidence/openfeature_provider.py @@ -91,7 +91,7 @@ def _to_openfeature_error_code( return openfeature.exception.ErrorCode.PROVIDER_NOT_READY -class ConfidenceOpenFeatureProvider(AbstractProvider): +class ConfidenceOpenFeatureProvider(AbstractProvider): # type: ignore[misc] def __init__(self, confidence_sdk: confidence.confidence.Confidence): self.confidence_sdk = confidence_sdk diff --git a/confidence/telemetry.proto b/confidence/telemetry.proto new file mode 100644 index 0000000..a1c6462 --- /dev/null +++ b/confidence/telemetry.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package confidence.telemetry.v1; + +enum ProtoPlatform { + PROTO_PLATFORM_UNSPECIFIED = 0; + PROTO_PLATFORM_JS_WEB = 4; + PROTO_PLATFORM_JS_SERVER = 5; + PROTO_PLATFORM_PYTHON = 6; + PROTO_PLATFORM_GO = 7; +} + +message ProtoMonitoring { + repeated ProtoLibraryTraces library_traces = 1; + ProtoPlatform platform = 2; +} + +message ProtoLibraryTraces { + ProtoLibrary library = 1; + string library_version = 2; + repeated ProtoTrace traces = 3; + + message ProtoTrace { + ProtoTraceId id = 1; + + // DEPRECATED + optional uint64 millisecond_duration = 2; + + oneof trace { + ProtoRequestTrace request_trace = 3; + ProtoCountTrace count_trace = 4; + } + + message ProtoCountTrace {} + + message ProtoRequestTrace { + uint64 millisecond_duration = 1; + ProtoStatus status = 2; + + enum ProtoStatus { + PROTO_STATUS_UNSPECIFIED = 0; + PROTO_STATUS_SUCCESS = 1; + PROTO_STATUS_ERROR = 2; + PROTO_STATUS_TIMEOUT = 3; + PROTO_STATUS_CACHED = 4; + } + } + } + + enum ProtoLibrary { + PROTO_LIBRARY_UNSPECIFIED = 0; + PROTO_LIBRARY_CONFIDENCE = 1; + PROTO_LIBRARY_OPEN_FEATURE = 2; + PROTO_LIBRARY_REACT = 3; + } + + enum ProtoTraceId { + PROTO_TRACE_ID_UNSPECIFIED = 0; + PROTO_TRACE_ID_RESOLVE_LATENCY = 1; + PROTO_TRACE_ID_STALE_FLAG = 2; + PROTO_TRACE_ID_FLAG_TYPE_MISMATCH = 3; + PROTO_TRACE_ID_WITH_CONTEXT = 4; + } +} \ No newline at end of file diff --git a/confidence/telemetry.py b/confidence/telemetry.py new file mode 100644 index 0000000..0cffcc0 --- /dev/null +++ b/confidence/telemetry.py @@ -0,0 +1,71 @@ +import base64 +from queue import Queue +from typing import Optional +from typing_extensions import TypeAlias + +from confidence.telemetry_pb2 import ( + ProtoMonitoring, + ProtoLibraryTraces, + ProtoPlatform, +) + +# Define type aliases for the protobuf classes +ProtoTrace: TypeAlias = ProtoLibraryTraces.ProtoTrace +ProtoLibrary: TypeAlias = ProtoLibraryTraces.ProtoLibrary +ProtoTraceId: TypeAlias = ProtoLibraryTraces.ProtoTraceId +ProtoStatus: TypeAlias = ProtoLibraryTraces.ProtoTrace.ProtoRequestTrace.ProtoStatus + + +class Telemetry: + _instance: Optional["Telemetry"] = None + _initialized: bool = False + version: str + _traces_queue: Queue[ProtoTrace] + _disabled: bool + + def __new__(cls, version: str, disabled: bool = False) -> "Telemetry": + if cls._instance is None: + cls._instance = super(Telemetry, cls).__new__(cls) + cls._initialized = False + cls._disabled = disabled + return cls._instance + + def __init__(self, version: str, disabled: bool = False) -> None: + if not self._initialized: + self.version = version + self._traces_queue = Queue() + self._disabled = disabled + self._initialized = True + + def add_trace( + self, trace_id: ProtoTraceId, duration_ms: int, status: ProtoStatus + ) -> None: + if self._disabled: + return + trace = ProtoTrace() + trace.id = trace_id + request_trace = ProtoTrace.ProtoRequestTrace() + request_trace.millisecond_duration = duration_ms + request_trace.status = status + trace.request_trace.CopyFrom(request_trace) + self._traces_queue.put(trace) + + def get_monitoring_header(self) -> str: + if self._disabled: + return "" + current_traces = [] + while not self._traces_queue.empty(): + try: + current_traces.append(self._traces_queue.get_nowait()) + except Exception: + break + + monitoring = ProtoMonitoring() + library_traces = monitoring.library_traces.add() + library_traces.library = ProtoLibrary.PROTO_LIBRARY_CONFIDENCE + library_traces.library_version = self.version + library_traces.traces.extend(current_traces) + monitoring.platform = ProtoPlatform.PROTO_PLATFORM_PYTHON + serialized = monitoring.SerializeToString() + encoded = base64.b64encode(serialized).decode() + return encoded diff --git a/confidence/telemetry_pb2.py b/confidence/telemetry_pb2.py new file mode 100644 index 0000000..5550a34 --- /dev/null +++ b/confidence/telemetry_pb2.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: telemetry.proto +# Protobuf Python Version: 5.29.3 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 3, + '', + 'telemetry.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0ftelemetry.proto\x12\x17\x63onfidence.telemetry.v1\"\x90\x01\n\x0fProtoMonitoring\x12\x43\n\x0elibrary_traces\x18\x01 \x03(\x0b\x32+.confidence.telemetry.v1.ProtoLibraryTraces\x12\x38\n\x08platform\x18\x02 \x01(\x0e\x32&.confidence.telemetry.v1.ProtoPlatform\"\x9f\t\n\x12ProtoLibraryTraces\x12I\n\x07library\x18\x01 \x01(\x0e\x32\x38.confidence.telemetry.v1.ProtoLibraryTraces.ProtoLibrary\x12\x17\n\x0flibrary_version\x18\x02 \x01(\t\x12\x46\n\x06traces\x18\x03 \x03(\x0b\x32\x36.confidence.telemetry.v1.ProtoLibraryTraces.ProtoTrace\x1a\x99\x05\n\nProtoTrace\x12\x44\n\x02id\x18\x01 \x01(\x0e\x32\x38.confidence.telemetry.v1.ProtoLibraryTraces.ProtoTraceId\x12!\n\x14millisecond_duration\x18\x02 \x01(\x04H\x01\x88\x01\x01\x12\x61\n\rrequest_trace\x18\x03 \x01(\x0b\x32H.confidence.telemetry.v1.ProtoLibraryTraces.ProtoTrace.ProtoRequestTraceH\x00\x12]\n\x0b\x63ount_trace\x18\x04 \x01(\x0b\x32\x46.confidence.telemetry.v1.ProtoLibraryTraces.ProtoTrace.ProtoCountTraceH\x00\x1a\x11\n\x0fProtoCountTrace\x1a\xaa\x02\n\x11ProtoRequestTrace\x12\x1c\n\x14millisecond_duration\x18\x01 \x01(\x04\x12\x64\n\x06status\x18\x02 \x01(\x0e\x32T.confidence.telemetry.v1.ProtoLibraryTraces.ProtoTrace.ProtoRequestTrace.ProtoStatus\"\x90\x01\n\x0bProtoStatus\x12\x1c\n\x18PROTO_STATUS_UNSPECIFIED\x10\x00\x12\x18\n\x14PROTO_STATUS_SUCCESS\x10\x01\x12\x16\n\x12PROTO_STATUS_ERROR\x10\x02\x12\x18\n\x14PROTO_STATUS_TIMEOUT\x10\x03\x12\x17\n\x13PROTO_STATUS_CACHED\x10\x04\x42\x07\n\x05traceB\x17\n\x15_millisecond_duration\"\x84\x01\n\x0cProtoLibrary\x12\x1d\n\x19PROTO_LIBRARY_UNSPECIFIED\x10\x00\x12\x1c\n\x18PROTO_LIBRARY_CONFIDENCE\x10\x01\x12\x1e\n\x1aPROTO_LIBRARY_OPEN_FEATURE\x10\x02\x12\x17\n\x13PROTO_LIBRARY_REACT\x10\x03\"\xb9\x01\n\x0cProtoTraceId\x12\x1e\n\x1aPROTO_TRACE_ID_UNSPECIFIED\x10\x00\x12\"\n\x1ePROTO_TRACE_ID_RESOLVE_LATENCY\x10\x01\x12\x1d\n\x19PROTO_TRACE_ID_STALE_FLAG\x10\x02\x12%\n!PROTO_TRACE_ID_FLAG_TYPE_MISMATCH\x10\x03\x12\x1f\n\x1bPROTO_TRACE_ID_WITH_CONTEXT\x10\x04*\x9a\x01\n\rProtoPlatform\x12\x1e\n\x1aPROTO_PLATFORM_UNSPECIFIED\x10\x00\x12\x19\n\x15PROTO_PLATFORM_JS_WEB\x10\x04\x12\x1c\n\x18PROTO_PLATFORM_JS_SERVER\x10\x05\x12\x19\n\x15PROTO_PLATFORM_PYTHON\x10\x06\x12\x15\n\x11PROTO_PLATFORM_GO\x10\x07\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'telemetry_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + DESCRIPTOR._loaded_options = None + _globals['_PROTOPLATFORM']._serialized_start=1378 + _globals['_PROTOPLATFORM']._serialized_end=1532 + _globals['_PROTOMONITORING']._serialized_start=45 + _globals['_PROTOMONITORING']._serialized_end=189 + _globals['_PROTOLIBRARYTRACES']._serialized_start=192 + _globals['_PROTOLIBRARYTRACES']._serialized_end=1375 + _globals['_PROTOLIBRARYTRACES_PROTOTRACE']._serialized_start=387 + _globals['_PROTOLIBRARYTRACES_PROTOTRACE']._serialized_end=1052 + _globals['_PROTOLIBRARYTRACES_PROTOTRACE_PROTOCOUNTTRACE']._serialized_start=700 + _globals['_PROTOLIBRARYTRACES_PROTOTRACE_PROTOCOUNTTRACE']._serialized_end=717 + _globals['_PROTOLIBRARYTRACES_PROTOTRACE_PROTOREQUESTTRACE']._serialized_start=720 + _globals['_PROTOLIBRARYTRACES_PROTOTRACE_PROTOREQUESTTRACE']._serialized_end=1018 + _globals['_PROTOLIBRARYTRACES_PROTOTRACE_PROTOREQUESTTRACE_PROTOSTATUS']._serialized_start=874 + _globals['_PROTOLIBRARYTRACES_PROTOTRACE_PROTOREQUESTTRACE_PROTOSTATUS']._serialized_end=1018 + _globals['_PROTOLIBRARYTRACES_PROTOLIBRARY']._serialized_start=1055 + _globals['_PROTOLIBRARYTRACES_PROTOLIBRARY']._serialized_end=1187 + _globals['_PROTOLIBRARYTRACES_PROTOTRACEID']._serialized_start=1190 + _globals['_PROTOLIBRARYTRACES_PROTOTRACEID']._serialized_end=1375 +# @@protoc_insertion_point(module_scope) diff --git a/demo.py b/demo.py index 7eddc97..f1700a7 100644 --- a/demo.py +++ b/demo.py @@ -9,8 +9,8 @@ async def get_flag(): random_uuid = uuid.uuid4() uuid_string = str(random_uuid) confidence = root.with_context({"targeting_key": uuid_string}) - #confidence.with_context({"app": "python"}).track("navigate", {}) - #print("Tracked navigate event") + # confidence.with_context({"app": "python"}).track("navigate", {}) + # print("Tracked navigate event") details = confidence.resolve_string_details("hawkflag.color", "default") print(f"Flag value: {details.value}") diff --git a/generate_proto.py b/generate_proto.py new file mode 100755 index 0000000..4382179 --- /dev/null +++ b/generate_proto.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +import os +import subprocess +import sys + + +def generate_proto(): + proto_file = "confidence/telemetry.proto" + output_dir = "confidence" + + # Check if protoc is installed + try: + version = subprocess.check_output(["protoc", "--version"]).decode().strip() + print(f"Found protoc version: {version}") + except FileNotFoundError: + print("Error: protoc compiler not found. Please install it first.") + print("You can install it via:") + print(" - macOS: brew install protobuf") + print(" - Linux: apt-get install protobuf-compiler") + print( + " - Windows: Download from " + "https://github.com/protocolbuffers/protobuf/releases" + ) + sys.exit(1) + + # Generate Python code + cmd = [ + "protoc", + f"--python_out={output_dir}", + f"--proto_path={os.path.dirname(proto_file)}", + proto_file, + ] + + print(f"Generating Python code from {proto_file}...") + try: + subprocess.check_call(cmd) + output_file = os.path.join( + output_dir, os.path.basename(os.path.splitext(proto_file)[0]) + "_pb2.py" + ) + print(f"Successfully generated {output_file}") + except subprocess.CalledProcessError as e: + print(f"Error generating proto code: {e}") + sys.exit(1) + + +if __name__ == "__main__": + generate_proto() diff --git a/pyproject.toml b/pyproject.toml index c190408..1989e73 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,9 +29,10 @@ dependencies = [ "requests==2.32.3", "openfeature-sdk==0.4.2", "typing_extensions==4.9.0", - "httpx==0.27.2" + "httpx==0.27.2", + "protobuf==5.29.3" ] -requires-python = ">=3.8" +requires-python = ">=3.9" [project.optional-dependencies] dev = [ diff --git a/tests/test_telemetry.py b/tests/test_telemetry.py new file mode 100644 index 0000000..c66246d --- /dev/null +++ b/tests/test_telemetry.py @@ -0,0 +1,259 @@ +import unittest +import base64 +import time +from unittest.mock import patch, MagicMock +from confidence.telemetry_pb2 import ProtoMonitoring, ProtoLibraryTraces, ProtoPlatform +from confidence.telemetry import Telemetry +from confidence.confidence import Confidence, Region +import requests + +# Get the nested classes from ProtoLibraryTraces +ProtoTrace = ProtoLibraryTraces.ProtoTrace +ProtoRequestTrace = ProtoTrace.ProtoRequestTrace +ProtoStatus = ProtoRequestTrace.ProtoStatus +ProtoLibrary = ProtoLibraryTraces.ProtoLibrary +ProtoTraceId = ProtoLibraryTraces.ProtoTraceId + + +class TestTelemetry(unittest.TestCase): + def setUp(self): + # Reset singleton state before each test + Telemetry._instance = None + Telemetry._initialized = False + + def test_add_trace(self): + telemetry = Telemetry("1.0.0") + telemetry.add_trace( + ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY, + 100, + ProtoStatus.PROTO_STATUS_SUCCESS, + ) + + header = telemetry.get_monitoring_header() + monitoring = ProtoMonitoring() + monitoring.ParseFromString(base64.b64decode(header)) + + self.assertEqual(monitoring.platform, ProtoPlatform.PROTO_PLATFORM_PYTHON) + self.assertEqual(len(monitoring.library_traces), 1) + + library_trace = monitoring.library_traces[0] + self.assertEqual(library_trace.library, ProtoLibrary.PROTO_LIBRARY_CONFIDENCE) + self.assertEqual(library_trace.library_version, "1.0.0") + + self.assertEqual(len(library_trace.traces), 1) + trace = library_trace.traces[0] + self.assertEqual(trace.id, ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY) + self.assertEqual(trace.request_trace.millisecond_duration, 100) + self.assertEqual(trace.request_trace.status, ProtoStatus.PROTO_STATUS_SUCCESS) + + def test_traces_are_consumed(self): + telemetry = Telemetry("1.0.0") + telemetry.add_trace( + ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY, + 100, + ProtoStatus.PROTO_STATUS_SUCCESS, + ) + telemetry.add_trace( + ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY, + 200, + ProtoStatus.PROTO_STATUS_ERROR, + ) + + header1 = telemetry.get_monitoring_header() + monitoring1 = ProtoMonitoring() + monitoring1.ParseFromString(base64.b64decode(header1)) + self.assertEqual(len(monitoring1.library_traces[0].traces), 2) + + header2 = telemetry.get_monitoring_header() + monitoring2 = ProtoMonitoring() + monitoring2.ParseFromString(base64.b64decode(header2)) + self.assertEqual(len(monitoring2.library_traces[0].traces), 0) + + def test_multiple_traces(self): + telemetry = Telemetry("1.0.0") + telemetry.add_trace( + ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY, + 100, + ProtoStatus.PROTO_STATUS_SUCCESS, + ) + telemetry.add_trace( + ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY, + 200, + ProtoStatus.PROTO_STATUS_ERROR, + ) + telemetry.add_trace( + ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY, + 300, + ProtoStatus.PROTO_STATUS_TIMEOUT, + ) + + header = telemetry.get_monitoring_header() + monitoring = ProtoMonitoring() + monitoring.ParseFromString(base64.b64decode(header)) + traces = monitoring.library_traces[0].traces + + self.assertEqual(len(traces), 3) + self.assertEqual(traces[0].request_trace.millisecond_duration, 100) + self.assertEqual( + traces[0].request_trace.status, ProtoStatus.PROTO_STATUS_SUCCESS + ) + self.assertEqual(traces[1].request_trace.millisecond_duration, 200) + self.assertEqual(traces[1].request_trace.status, ProtoStatus.PROTO_STATUS_ERROR) + self.assertEqual(traces[2].request_trace.millisecond_duration, 300) + self.assertEqual( + traces[2].request_trace.status, ProtoStatus.PROTO_STATUS_TIMEOUT + ) + + def test_singleton_behavior(self): + telemetry1 = Telemetry("1.0.0") + telemetry2 = Telemetry("2.0.0") + + telemetry1.add_trace( + ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY, + 100, + ProtoStatus.PROTO_STATUS_SUCCESS, + ) + + header = telemetry2.get_monitoring_header() + monitoring = ProtoMonitoring() + monitoring.ParseFromString(base64.b64decode(header)) + self.assertEqual(len(monitoring.library_traces[0].traces), 1) + + self.assertEqual(monitoring.library_traces[0].library_version, "1.0.0") + + @patch("requests.post") + def test_telemetry_during_resolve(self, mock_post): + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + "resolvedFlags": [{"value": True, "variant": "on"}], + "resolveToken": "test-token", + } + mock_response.raise_for_status.return_value = None + + def delayed_response(*args, **kwargs): + time.sleep(0.01) + return mock_response + + mock_post.side_effect = delayed_response + + confidence = Confidence(client_secret="test-secret", region=Region.GLOBAL) + + confidence.resolve_boolean_details("test-flag", False) + + final_header = confidence._telemetry.get_monitoring_header() + monitoring = ProtoMonitoring() + monitoring.ParseFromString(base64.b64decode(final_header)) + final_traces = monitoring.library_traces[0].traces + self.assertEqual(len(final_traces), 1) + trace = final_traces[0] + self.assertEqual(trace.id, ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY) + self.assertEqual(trace.request_trace.status, ProtoStatus.PROTO_STATUS_SUCCESS) + self.assertGreaterEqual(trace.request_trace.millisecond_duration, 10) + + @patch("requests.post") + def test_telemetry_during_resolve_error(self, mock_post): + mock_response = MagicMock() + mock_response.status_code = 500 + mock_response.raise_for_status.side_effect = ( + requests.exceptions.RequestException("Test error") + ) + mock_response.json.side_effect = requests.exceptions.RequestException( + "Test error" + ) + + def delayed_error(*args, **kwargs): + time.sleep(0.01) + return mock_response + + mock_post.side_effect = delayed_error + + confidence = Confidence(client_secret="test-secret", region=Region.GLOBAL) + + confidence.resolve_boolean_details("test-flag", False) + + final_header = confidence._telemetry.get_monitoring_header() + monitoring = ProtoMonitoring() + monitoring.ParseFromString(base64.b64decode(final_header)) + final_traces = monitoring.library_traces[0].traces + self.assertEqual(len(final_traces), 1) + trace = final_traces[0] + self.assertEqual(trace.id, ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY) + self.assertEqual(trace.request_trace.status, ProtoStatus.PROTO_STATUS_ERROR) + self.assertGreaterEqual(trace.request_trace.millisecond_duration, 10) + + @patch("requests.post") + def test_disabled_telemetry(self, mock_post): + # Create a confidence instance with telemetry disabled + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + "resolvedFlags": [{"value": True, "variant": "on"}], + "resolveToken": "test-token", + } + mock_response.raise_for_status.return_value = None + mock_post.return_value = mock_response + + confidence = Confidence(client_secret="test-secret", region=Region.GLOBAL, disable_telemetry=True) + + # Add a trace and verify it's not added + confidence._telemetry.add_trace( + ProtoTraceId.PROTO_TRACE_ID_RESOLVE_LATENCY, + 100, + ProtoStatus.PROTO_STATUS_SUCCESS, + ) + + # Get the header and verify it's empty + header = confidence._telemetry.get_monitoring_header() + self.assertEqual(header, "") + + # Make a resolve call and verify no telemetry header is sent + confidence.resolve_boolean_details("test-flag", False) + headers = mock_post.call_args[1]["headers"] + self.assertNotIn("X-CONFIDENCE-TELEMETRY", headers) + + @patch("requests.post") + def test_telemetry_shared_across_confidence_instances(self, mock_post): + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + "resolvedFlags": [{"value": True, "variant": "on"}], + "resolveToken": "test-token", + } + mock_response.raise_for_status.return_value = None + mock_post.return_value = mock_response + + # Create first confidence instance and resolve a flag + confidence1 = Confidence(client_secret="test-secret", region=Region.GLOBAL) + confidence1.resolve_boolean_details("test-flag", False) + + # Create second confidence instance using with_context and resolve another flag + confidence2 = confidence1.with_context({"user_id": "test-user"}) + confidence2.resolve_boolean_details("test-flag", False) + + # Verify both instances share the same telemetry instance + self.assertIs(confidence1._telemetry, confidence2._telemetry) + + self.assertEqual(mock_post.call_count, 2) + + # First request should have no trace + headers1 = mock_post.call_args_list[0][1]["headers"] + self.assertIn("X-CONFIDENCE-TELEMETRY", headers1) + monitoring1 = ProtoMonitoring() + print(f"Decoding telemetry header: {headers1['X-CONFIDENCE-TELEMETRY']}") + monitoring1.ParseFromString(base64.b64decode(headers1["X-CONFIDENCE-TELEMETRY"])) + traces1 = monitoring1.library_traces[0].traces + print(f"First request traces: {traces1}") + self.assertEqual(len(traces1), 0) + + # Second request should have the first traces + headers2 = mock_post.call_args_list[1][1]["headers"] + self.assertIn("X-CONFIDENCE-TELEMETRY", headers2) + monitoring2 = ProtoMonitoring() + print(f"Decoding telemetry header: {headers1['X-CONFIDENCE-TELEMETRY']}") + monitoring2.ParseFromString(base64.b64decode(headers2["X-CONFIDENCE-TELEMETRY"])) + traces2 = monitoring2.library_traces[0].traces + self.assertEqual(len(traces2), 1) + +if __name__ == "__main__": + unittest.main()