Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/pytest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
python-version: ['3.9', '3.10', '3.11', '3.12']
python-version: ['3.10', '3.11', '3.12']
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

While we're at it, would you like to add 3.13 and 3.14 to the matrix?


steps:
- name: Cloning repo
Expand Down
49 changes: 49 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,55 @@ provider = FlagsmithProvider(
The provider can then be used with the OpenFeature client as per
[the documentation](https://openfeature.dev/docs/reference/concepts/evaluation-api#setting-a-provider).

### Tracking

The provider supports the [OpenFeature tracking API](https://openfeature.dev/specification/sections/tracking/), which lets you associate user actions with feature flag evaluations for experimentation.

Tracking requires pipeline analytics to be enabled on the **Flagsmith client** (available from `flagsmith` version 5.2.0). The provider acts as a thin delegate — all buffering and flushing is managed by the client.

```python
from flagsmith import Flagsmith, PipelineAnalyticsConfig
from openfeature import api
from openfeature.evaluation_context import EvaluationContext
from openfeature.track import TrackingEventDetails
from openfeature_flagsmith.provider import FlagsmithProvider

# Enable pipeline analytics on the Flagsmith client
client = Flagsmith(
environment_key="your-environment-key",
pipeline_analytics_config=PipelineAnalyticsConfig(
analytics_server_url="https://analytics-collector.flagsmith.com/",
max_buffer=1000, # optional, default 1000
flush_interval_seconds=10, # optional, default 10s
),
)

api.set_provider(FlagsmithProvider(client=client))
of_client = api.get_client()

# Flag evaluations are tracked automatically — no extra code needed
variant = of_client.get_string_value(
"checkout-variant",
"control",
evaluation_context=EvaluationContext(targeting_key="user-123"),
)

# Track a custom event explicitly
of_client.track(
"purchase",
evaluation_context=EvaluationContext(
targeting_key="user-123",
attributes={"plan": "premium"},
),
tracking_event_details=TrackingEventDetails(
value=99.77,
attributes={"currency": "USD"},
),
)
```

If `pipeline_analytics_config` is not set on the Flagsmith client, calls to `track()` are silently ignored.

### Evaluation Context

The evaluation context supports traits in two ways:
Expand Down
46 changes: 45 additions & 1 deletion openfeature_flagsmith/provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@
TypeMismatchError,
)
from openfeature.flag_evaluation import FlagResolutionDetails, FlagType
from openfeature.provider import Metadata, AbstractProvider
from openfeature.provider import AbstractProvider, Metadata
from openfeature.track import TrackingEventDetails

from openfeature_flagsmith.exceptions import FlagsmithProviderError

Expand All @@ -37,6 +38,38 @@ def __init__(
self.use_flagsmith_defaults = use_flagsmith_defaults
self.use_boolean_config_value = use_boolean_config_value

def track(
self,
tracking_event_name: str,
evaluation_context: typing.Optional[EvaluationContext] = None,
tracking_event_details: typing.Optional[TrackingEventDetails] = None,
) -> None:
# Guard against older flagsmith versions or duck-typed clients
# that don't have track_event.
if not hasattr(self._client, "track_event"):
return

identity = evaluation_context.targeting_key if evaluation_context else None
traits = self._extract_traits(evaluation_context)

metadata: typing.Optional[typing.Dict[str, typing.Any]] = None
if tracking_event_details is not None:
metadata = dict(tracking_event_details.attributes)
if tracking_event_details.value is not None:
metadata["value"] = tracking_event_details.value
if not metadata:
metadata = None

try:
self._client.track_event(
tracking_event_name,
identity_identifier=identity,
traits=traits,
metadata=metadata,
)
except ValueError:
return

def get_metadata(self) -> Metadata:
return Metadata(name="FlagsmithProvider")

Expand Down Expand Up @@ -132,6 +165,17 @@ def _resolve(
% (flag_key, flag_type.value)
)

@staticmethod
def _extract_traits(
evaluation_context: typing.Optional[EvaluationContext],
) -> typing.Optional[typing.Dict[str, typing.Any]]:
if not evaluation_context or not evaluation_context.attributes:
return None
nested = evaluation_context.attributes.get("traits", {})
flat = {k: v for k, v in evaluation_context.attributes.items() if k != "traits"}
merged = {**flat, **nested}
return merged or None

def _get_flags(self, evaluation_context: EvaluationContext = EvaluationContext()):
if targeting_key := evaluation_context.targeting_key:
nested_traits = evaluation_context.attributes.pop("traits", {})
Expand Down
6 changes: 3 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@ authors = [
{ name = "Matthew Elwell", email = "matthew.elwell@flagsmith.com>" }
]
readme = "README.md"
requires-python = ">=3.9,<4.0"
requires-python = ">=3.10,<4.0"
dependencies = [
"flagsmith (>=3.6.0,<6.0.0)",
"openfeature-sdk (>=0.6.0,<0.9.0)",
"flagsmith (>=5.2.0)",
"openfeature-sdk (>=0.9.0,<0.10.0)",
]

[tool.poetry]
Expand Down
141 changes: 141 additions & 0 deletions tests/test_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
ParseError,
FlagNotFoundError,
)
from openfeature.track import TrackingEventDetails

from openfeature_flagsmith.exceptions import FlagsmithProviderError
from openfeature_flagsmith.provider import FlagsmithProvider
Expand Down Expand Up @@ -450,3 +451,143 @@ def test_resolve_boolean_details_uses_enabled_when_use_boolean_config_value_is_f
assert result.value is True
assert result.error_code is None
assert result.reason is None


# ---------------------------------------------------------------------------
# Tracking
# ---------------------------------------------------------------------------


def test_track_is_noop_without_track_event_on_client() -> None:
# Given - client without track_event (e.g. older flagsmith version)
client = MagicMock(spec=[])
provider = FlagsmithProvider(client)

# When / Then - no error raised
provider.track("purchase")


def test_track_is_noop_when_pipeline_analytics_not_configured(
mock_flagsmith_client: MagicMock,
) -> None:
# Given - client has track_event but raises ValueError (no analytics config)
mock_flagsmith_client.track_event = MagicMock(
side_effect=ValueError("Pipeline analytics is not configured")
)
provider = FlagsmithProvider(mock_flagsmith_client)

# When / Then - no error raised, ValueError caught silently
provider.track("purchase")


def test_track_delegates_to_client(mock_flagsmith_client: MagicMock) -> None:
# Given
mock_flagsmith_client.track_event = MagicMock()
provider = FlagsmithProvider(mock_flagsmith_client)

# When
provider.track(
"purchase",
evaluation_context=EvaluationContext(
targeting_key="user-123",
attributes={"plan": "premium"},
),
tracking_event_details=TrackingEventDetails(
value=99.77,
attributes={"currency": "USD"},
),
)

# Then
mock_flagsmith_client.track_event.assert_called_once_with(
"purchase",
identity_identifier="user-123",
traits={"plan": "premium"},
metadata={"value": 99.77, "currency": "USD"},
)


def test_track_with_minimal_args(mock_flagsmith_client: MagicMock) -> None:
# Given
mock_flagsmith_client.track_event = MagicMock()
provider = FlagsmithProvider(mock_flagsmith_client)

# When
provider.track("signup")

# Then
mock_flagsmith_client.track_event.assert_called_once_with(
"signup",
identity_identifier=None,
traits=None,
metadata=None,
)


def test_track_value_takes_precedence_over_attributes_value(
mock_flagsmith_client: MagicMock,
) -> None:
# Given - attributes also has a "value" key
mock_flagsmith_client.track_event = MagicMock()
provider = FlagsmithProvider(mock_flagsmith_client)

# When
provider.track(
"checkout",
tracking_event_details=TrackingEventDetails(
value=99.77,
attributes={"value": "should_be_overwritten", "other": "kept"},
),
)

# Then - explicit .value wins over attributes["value"]
mock_flagsmith_client.track_event.assert_called_once_with(
"checkout",
identity_identifier=None,
traits=None,
metadata={"value": 99.77, "other": "kept"},
)


def test_track_with_details_value_only(mock_flagsmith_client: MagicMock) -> None:
# Given
mock_flagsmith_client.track_event = MagicMock()
provider = FlagsmithProvider(mock_flagsmith_client)

# When
provider.track("checkout", tracking_event_details=TrackingEventDetails(value=99.77))

# Then
mock_flagsmith_client.track_event.assert_called_once_with(
"checkout",
identity_identifier=None,
traits=None,
metadata={"value": 99.77},
)


def test_track_extracts_traits_from_context(mock_flagsmith_client: MagicMock) -> None:
# Given - nested traits take precedence over flat attributes (same rule as _get_flags)
mock_flagsmith_client.track_event = MagicMock()
provider = FlagsmithProvider(mock_flagsmith_client)

# When
provider.track(
"page_view",
evaluation_context=EvaluationContext(
targeting_key="user-123",
attributes={
"shared_key": "flat_value",
"other": "kept",
"traits": {"shared_key": "nested_value"},
},
),
)

# Then
mock_flagsmith_client.track_event.assert_called_once_with(
"page_view",
identity_identifier="user-123",
traits={"shared_key": "nested_value", "other": "kept"},
metadata=None,
)
Loading