diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 9c4beaa4..0b57e9e3 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -21,7 +21,7 @@ jobs:
runs-on: ${{ github.repository == 'stainless-sdks/browserbase-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
if: (github.event_name == 'push' || github.event.pull_request.head.repo.fork) && (github.event_name != 'push' || github.event.head_commit.message != 'codegen metadata')
steps:
- - uses: actions/checkout@v6
+ - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Install Rye
run: |
@@ -46,7 +46,7 @@ jobs:
id-token: write
runs-on: ${{ github.repository == 'stainless-sdks/browserbase-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
steps:
- - uses: actions/checkout@v6
+ - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Install Rye
run: |
@@ -67,7 +67,7 @@ jobs:
github.repository == 'stainless-sdks/browserbase-python' &&
!startsWith(github.ref, 'refs/heads/stl/')
id: github-oidc
- uses: actions/github-script@v8
+ uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
with:
script: core.setOutput('github_token', await core.getIDToken());
@@ -87,7 +87,7 @@ jobs:
runs-on: ${{ github.repository == 'stainless-sdks/browserbase-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
steps:
- - uses: actions/checkout@v6
+ - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Install Rye
run: |
diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml
index 7fb6d449..ebe645f8 100644
--- a/.github/workflows/publish-pypi.yml
+++ b/.github/workflows/publish-pypi.yml
@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v6
+ - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Install Rye
run: |
diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml
index 5beedb0d..1760864d 100644
--- a/.github/workflows/release-doctor.yml
+++ b/.github/workflows/release-doctor.yml
@@ -12,7 +12,7 @@ jobs:
if: github.repository == 'browserbase/sdk-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next')
steps:
- - uses: actions/checkout@v6
+ - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Check release environment
run: |
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index c523ce19..c3c95522 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "1.8.0"
+ ".": "1.9.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index bf216680..4e2d03e1 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 21
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/browserbase%2Fbrowserbase-921d3c61c7aa06269f74bee63cee993597944f913429caa2aa2e00dd51fab60f.yml
-openapi_spec_hash: d35b9613c41bf172fa2b28aceef10b39
-config_hash: cf04ecfb8dad5fbd8b85be25d6e9ec55
+configured_endpoints: 23
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/browserbase/browserbase-2118fd938d408dda6ed82d06c48b0785fad91fd54b5397acc3421a49a386c791.yml
+openapi_spec_hash: 8e48a39a55a11b128028b47747aea775
+config_hash: 40fbac80e24faaa0dc19e93368bcd821
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3a2dbc4d..b08802a8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,37 @@
# Changelog
+## 1.9.0 (2026-05-13)
+
+Full Changelog: [v1.8.0...v1.9.0](https://github.com/browserbase/sdk-python/compare/v1.8.0...v1.9.0)
+
+### Features
+
+* [CORE-1928][apps/api] Add `PENDING` as a valid session state ([4f1248d](https://github.com/browserbase/sdk-python/commit/4f1248dfb1bf79194f65854a6fcd6a0d53433ba1))
+* [CORE-1979] [apps/api] Regenerate OpenAPI spec to match current routes ([0e366a6](https://github.com/browserbase/sdk-python/commit/0e366a6a0a01e2ce8e661d73f9fa312e56d1a582))
+* **api:** add replays ([58e18df](https://github.com/browserbase/sdk-python/commit/58e18df7d7b0376234a322a197584a7163eba4b4))
+* **internal/types:** support eagerly validating pydantic iterators ([9d56949](https://github.com/browserbase/sdk-python/commit/9d569494a2050437404866315561e341f8e38a92))
+* support setting headers via env ([308d35e](https://github.com/browserbase/sdk-python/commit/308d35edb58454e542fcf58f379061fb742bd83b))
+
+
+### Bug Fixes
+
+* **client:** add missing f-string prefix in file type error message ([e01f048](https://github.com/browserbase/sdk-python/commit/e01f0484313315bb9d99338b8486054d53f2b46b))
+* **client:** preserve hardcoded query params when merging with user params ([953fd3e](https://github.com/browserbase/sdk-python/commit/953fd3ecd54a7ffc2ee390eac67f2063df89b8e9))
+* ensure file data are only sent as 1 parameter ([a837357](https://github.com/browserbase/sdk-python/commit/a83735708037eec6cb4807e4220ca7452b5b6503))
+* use correct field name format for multipart file arrays ([9488fb3](https://github.com/browserbase/sdk-python/commit/9488fb39a656b0aaaf740ccec0a9dce57b996b02))
+
+
+### Performance Improvements
+
+* **client:** optimize file structure copying in multipart requests ([4146f22](https://github.com/browserbase/sdk-python/commit/4146f22bb6c054e7491e4c10021dff3e9e2c8824))
+
+
+### Chores
+
+* **internal:** more robust bootstrap script ([83d1f68](https://github.com/browserbase/sdk-python/commit/83d1f686936e4593c3017c719056573089bbd1e0))
+* **internal:** reformat pyproject.toml ([979436a](https://github.com/browserbase/sdk-python/commit/979436a2cb86944f8bf400a2e623e946188d87c1))
+* **tests:** bump steady to v0.22.1 ([bafb680](https://github.com/browserbase/sdk-python/commit/bafb68055a4f36ef6b7d6a308606db9e8d33257b))
+
## 1.8.0 (2026-04-06)
Full Changelog: [v1.7.0...v1.8.0](https://github.com/browserbase/sdk-python/compare/v1.7.0...v1.8.0)
diff --git a/api.md b/api.md
index b6066cb8..581574a3 100644
--- a/api.md
+++ b/api.md
@@ -128,3 +128,16 @@ from browserbase.types.sessions import UploadCreateResponse
Methods:
- client.sessions.uploads.create(id, \*\*params) -> UploadCreateResponse
+
+## Replays
+
+Types:
+
+```python
+from browserbase.types.sessions import ReplayRetrieveResponse
+```
+
+Methods:
+
+- client.sessions.replays.retrieve(id) -> ReplayRetrieveResponse
+- client.sessions.replays.retrieve_page(page_id, \*, id) -> BinaryAPIResponse
diff --git a/pyproject.toml b/pyproject.toml
index e39927c3..8f62bf0c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "browserbase"
-version = "1.8.0"
+version = "1.9.0"
description = "The official Python library for the Browserbase API"
dynamic = ["readme"]
license = "Apache-2.0"
@@ -177,7 +177,7 @@ show_error_codes = true
#
# We also exclude our `tests` as mypy doesn't always infer
# types correctly and Pyright will still catch any type errors.
-exclude = ['src/browserbase/_files.py', '_dev/.*.py', 'tests/.*']
+exclude = ["src/browserbase/_files.py", "_dev/.*.py", "tests/.*"]
strict_equality = true
implicit_reexport = true
diff --git a/scripts/bootstrap b/scripts/bootstrap
index b430fee3..fe8451e4 100755
--- a/scripts/bootstrap
+++ b/scripts/bootstrap
@@ -4,7 +4,7 @@ set -e
cd "$(dirname "$0")/.."
-if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ] && [ -t 0 ]; then
+if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "${SKIP_BREW:-}" != "1" ] && [ -t 0 ]; then
brew bundle check >/dev/null 2>&1 || {
echo -n "==> Install Homebrew dependencies? (y/N): "
read -r response
diff --git a/scripts/mock b/scripts/mock
index 5cd7c157..feebe5ed 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -22,9 +22,9 @@ echo "==> Starting mock server with URL ${URL}"
# Run steady mock on the given spec
if [ "$1" == "--daemon" ]; then
# Pre-install the package so the download doesn't eat into the startup timeout
- npm exec --package=@stdy/cli@0.20.2 -- steady --version
+ npm exec --package=@stdy/cli@0.22.1 -- steady --version
- npm exec --package=@stdy/cli@0.20.2 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log &
+ npm exec --package=@stdy/cli@0.22.1 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log &
# Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
@@ -48,5 +48,5 @@ if [ "$1" == "--daemon" ]; then
echo
else
- npm exec --package=@stdy/cli@0.20.2 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL"
+ npm exec --package=@stdy/cli@0.22.1 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL"
fi
diff --git a/scripts/test b/scripts/test
index b8143aa3..19acc916 100755
--- a/scripts/test
+++ b/scripts/test
@@ -43,7 +43,7 @@ elif ! steady_is_running ; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.20.2 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.22.1 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
echo
exit 1
diff --git a/src/browserbase/_base_client.py b/src/browserbase/_base_client.py
index 5bc9823d..bd88f594 100644
--- a/src/browserbase/_base_client.py
+++ b/src/browserbase/_base_client.py
@@ -540,6 +540,10 @@ def _build_request(
files = cast(HttpxRequestFiles, ForceMultipartDict())
prepared_url = self._prepare_url(options.url)
+ # preserve hard-coded query params from the url
+ if params and prepared_url.query:
+ params = {**dict(prepared_url.params.items()), **params}
+ prepared_url = prepared_url.copy_with(raw_path=prepared_url.raw_path.split(b"?", 1)[0])
if "_" in prepared_url.host:
# work around https://github.com/encode/httpx/discussions/2880
kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")}
diff --git a/src/browserbase/_client.py b/src/browserbase/_client.py
index d642273a..b64caef8 100644
--- a/src/browserbase/_client.py
+++ b/src/browserbase/_client.py
@@ -19,7 +19,11 @@
RequestOptions,
not_given,
)
-from ._utils import is_given, get_async_library
+from ._utils import (
+ is_given,
+ is_mapping_t,
+ get_async_library,
+)
from ._compat import cached_property
from ._version import __version__
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
@@ -95,6 +99,15 @@ def __init__(
if base_url is None:
base_url = f"https://api.browserbase.com"
+ custom_headers_env = os.environ.get("BROWSERBASE_CUSTOM_HEADERS")
+ if custom_headers_env is not None:
+ parsed: dict[str, str] = {}
+ for line in custom_headers_env.split("\n"):
+ colon = line.find(":")
+ if colon >= 0:
+ parsed[line[:colon].strip()] = line[colon + 1 :].strip()
+ default_headers = {**parsed, **(default_headers if is_mapping_t(default_headers) else {})}
+
super().__init__(
version=__version__,
base_url=base_url,
@@ -299,6 +312,15 @@ def __init__(
if base_url is None:
base_url = f"https://api.browserbase.com"
+ custom_headers_env = os.environ.get("BROWSERBASE_CUSTOM_HEADERS")
+ if custom_headers_env is not None:
+ parsed: dict[str, str] = {}
+ for line in custom_headers_env.split("\n"):
+ colon = line.find(":")
+ if colon >= 0:
+ parsed[line[:colon].strip()] = line[colon + 1 :].strip()
+ default_headers = {**parsed, **(default_headers if is_mapping_t(default_headers) else {})}
+
super().__init__(
version=__version__,
base_url=base_url,
diff --git a/src/browserbase/_files.py b/src/browserbase/_files.py
index ff951be7..8042111f 100644
--- a/src/browserbase/_files.py
+++ b/src/browserbase/_files.py
@@ -3,8 +3,8 @@
import io
import os
import pathlib
-from typing import overload
-from typing_extensions import TypeGuard
+from typing import Sequence, cast, overload
+from typing_extensions import TypeVar, TypeGuard
import anyio
@@ -17,7 +17,9 @@
HttpxFileContent,
HttpxRequestFiles,
)
-from ._utils import is_tuple_t, is_mapping_t, is_sequence_t
+from ._utils import is_list, is_mapping, is_tuple_t, is_mapping_t, is_sequence_t
+
+_T = TypeVar("_T")
def is_base64_file_input(obj: object) -> TypeGuard[Base64FileInput]:
@@ -97,7 +99,7 @@ async def async_to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles
elif is_sequence_t(files):
files = [(key, await _async_transform_file(file)) for key, file in files]
else:
- raise TypeError("Unexpected file type input {type(files)}, expected mapping or sequence")
+ raise TypeError(f"Unexpected file type input {type(files)}, expected mapping or sequence")
return files
@@ -121,3 +123,51 @@ async def async_read_file_content(file: FileContent) -> HttpxFileContent:
return await anyio.Path(file).read_bytes()
return file
+
+
+def deepcopy_with_paths(item: _T, paths: Sequence[Sequence[str]]) -> _T:
+ """Copy only the containers along the given paths.
+
+ Used to guard against mutation by extract_files without copying the entire structure.
+ Only dicts and lists that lie on a path are copied; everything else
+ is returned by reference.
+
+ For example, given paths=[["foo", "files", "file"]] and the structure:
+ {
+ "foo": {
+ "bar": {"baz": {}},
+ "files": {"file": }
+ }
+ }
+ The root dict, "foo", and "files" are copied (they lie on the path).
+ "bar" and "baz" are returned by reference (off the path).
+ """
+ return _deepcopy_with_paths(item, paths, 0)
+
+
+def _deepcopy_with_paths(item: _T, paths: Sequence[Sequence[str]], index: int) -> _T:
+ if not paths:
+ return item
+ if is_mapping(item):
+ key_to_paths: dict[str, list[Sequence[str]]] = {}
+ for path in paths:
+ if index < len(path):
+ key_to_paths.setdefault(path[index], []).append(path)
+
+ # if no path continues through this mapping, it won't be mutated and copying it is redundant
+ if not key_to_paths:
+ return item
+
+ result = dict(item)
+ for key, subpaths in key_to_paths.items():
+ if key in result:
+ result[key] = _deepcopy_with_paths(result[key], subpaths, index + 1)
+ return cast(_T, result)
+ if is_list(item):
+ array_paths = [path for path in paths if index < len(path) and path[index] == ""]
+
+ # if no path expects a list here, nothing will be mutated inside it - return by reference
+ if not array_paths:
+ return cast(_T, item)
+ return cast(_T, [_deepcopy_with_paths(entry, array_paths, index + 1) for entry in item])
+ return item
diff --git a/src/browserbase/_models.py b/src/browserbase/_models.py
index 29070e05..8c5ab260 100644
--- a/src/browserbase/_models.py
+++ b/src/browserbase/_models.py
@@ -25,7 +25,9 @@
ClassVar,
Protocol,
Required,
+ Annotated,
ParamSpec,
+ TypeAlias,
TypedDict,
TypeGuard,
final,
@@ -79,7 +81,15 @@
from ._constants import RAW_RESPONSE_HEADER
if TYPE_CHECKING:
+ from pydantic import GetCoreSchemaHandler, ValidatorFunctionWrapHandler
+ from pydantic_core import CoreSchema, core_schema
from pydantic_core.core_schema import ModelField, ModelSchema, LiteralSchema, ModelFieldsSchema
+else:
+ try:
+ from pydantic_core import CoreSchema, core_schema
+ except ImportError:
+ CoreSchema = None
+ core_schema = None
__all__ = ["BaseModel", "GenericModel"]
@@ -396,6 +406,76 @@ def model_dump_json(
)
+class _EagerIterable(list[_T], Generic[_T]):
+ """
+ Accepts any Iterable[T] input (including generators), consumes it
+ eagerly, and validates all items upfront.
+
+ Validation preserves the original container type where possible
+ (e.g. a set[T] stays a set[T]). Serialization (model_dump / JSON)
+ always emits a list — round-tripping through model_dump() will not
+ restore the original container type.
+ """
+
+ @classmethod
+ def __get_pydantic_core_schema__(
+ cls,
+ source_type: Any,
+ handler: GetCoreSchemaHandler,
+ ) -> CoreSchema:
+ (item_type,) = get_args(source_type) or (Any,)
+ item_schema: CoreSchema = handler.generate_schema(item_type)
+ list_of_items_schema: CoreSchema = core_schema.list_schema(item_schema)
+
+ return core_schema.no_info_wrap_validator_function(
+ cls._validate,
+ list_of_items_schema,
+ serialization=core_schema.plain_serializer_function_ser_schema(
+ cls._serialize,
+ info_arg=False,
+ ),
+ )
+
+ @staticmethod
+ def _validate(v: Iterable[_T], handler: "ValidatorFunctionWrapHandler") -> Any:
+ original_type: type[Any] = type(v)
+
+ # Normalize to list so list_schema can validate each item
+ if isinstance(v, list):
+ items: list[_T] = v
+ else:
+ try:
+ items = list(v)
+ except TypeError as e:
+ raise TypeError("Value is not iterable") from e
+
+ # Validate items against the inner schema
+ validated: list[_T] = handler(items)
+
+ # Reconstruct original container type
+ if original_type is list:
+ return validated
+ # str(list) produces the list's repr, not a string built from items,
+ # so skip reconstruction for str and its subclasses.
+ if issubclass(original_type, str):
+ return validated
+ try:
+ return original_type(validated)
+ except (TypeError, ValueError):
+ # If the type cannot be reconstructed, just return the validated list
+ return validated
+
+ @staticmethod
+ def _serialize(v: Iterable[_T]) -> list[_T]:
+ """Always serialize as a list so Pydantic's JSON encoder is happy."""
+ if isinstance(v, list):
+ return v
+ return list(v)
+
+
+EagerIterable: TypeAlias = Annotated[Iterable[_T], _EagerIterable]
+
+
def _construct_field(value: object, field: FieldInfo, key: str) -> object:
if value is None:
return field_get_default(field)
diff --git a/src/browserbase/_qs.py b/src/browserbase/_qs.py
index de8c99bc..4127c19c 100644
--- a/src/browserbase/_qs.py
+++ b/src/browserbase/_qs.py
@@ -2,17 +2,13 @@
from typing import Any, List, Tuple, Union, Mapping, TypeVar
from urllib.parse import parse_qs, urlencode
-from typing_extensions import Literal, get_args
+from typing_extensions import get_args
-from ._types import NotGiven, not_given
+from ._types import NotGiven, ArrayFormat, NestedFormat, not_given
from ._utils import flatten
_T = TypeVar("_T")
-
-ArrayFormat = Literal["comma", "repeat", "indices", "brackets"]
-NestedFormat = Literal["dots", "brackets"]
-
PrimitiveData = Union[str, int, float, bool, None]
# this should be Data = Union[PrimitiveData, "List[Data]", "Tuple[Data]", "Mapping[str, Data]"]
# https://github.com/microsoft/pyright/issues/3555
diff --git a/src/browserbase/_types.py b/src/browserbase/_types.py
index abefae08..b2ac81be 100644
--- a/src/browserbase/_types.py
+++ b/src/browserbase/_types.py
@@ -47,6 +47,9 @@
ModelT = TypeVar("ModelT", bound=pydantic.BaseModel)
_T = TypeVar("_T")
+ArrayFormat = Literal["comma", "repeat", "indices", "brackets"]
+NestedFormat = Literal["dots", "brackets"]
+
# Approximates httpx internal ProxiesTypes and RequestFiles types
# while adding support for `PathLike` instances
diff --git a/src/browserbase/_utils/__init__.py b/src/browserbase/_utils/__init__.py
index 10cb66d2..1c090e51 100644
--- a/src/browserbase/_utils/__init__.py
+++ b/src/browserbase/_utils/__init__.py
@@ -24,7 +24,6 @@
coerce_integer as coerce_integer,
file_from_path as file_from_path,
strip_not_given as strip_not_given,
- deepcopy_minimal as deepcopy_minimal,
get_async_library as get_async_library,
maybe_coerce_float as maybe_coerce_float,
get_required_header as get_required_header,
diff --git a/src/browserbase/_utils/_utils.py b/src/browserbase/_utils/_utils.py
index eec7f4a1..199cd231 100644
--- a/src/browserbase/_utils/_utils.py
+++ b/src/browserbase/_utils/_utils.py
@@ -17,11 +17,11 @@
)
from pathlib import Path
from datetime import date, datetime
-from typing_extensions import TypeGuard
+from typing_extensions import TypeGuard, get_args
import sniffio
-from .._types import Omit, NotGiven, FileTypes, HeadersLike
+from .._types import Omit, NotGiven, FileTypes, ArrayFormat, HeadersLike
_T = TypeVar("_T")
_TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
@@ -40,25 +40,45 @@ def extract_files(
query: Mapping[str, object],
*,
paths: Sequence[Sequence[str]],
+ array_format: ArrayFormat = "brackets",
) -> list[tuple[str, FileTypes]]:
"""Recursively extract files from the given dictionary based on specified paths.
A path may look like this ['foo', 'files', '', 'data'].
+ ``array_format`` controls how ```` segments contribute to the emitted
+ field name. Supported values: ``"brackets"`` (``foo[]``), ``"repeat"`` and
+ ``"comma"`` (``foo``), ``"indices"`` (``foo[0]``, ``foo[1]``).
+
Note: this mutates the given dictionary.
"""
files: list[tuple[str, FileTypes]] = []
for path in paths:
- files.extend(_extract_items(query, path, index=0, flattened_key=None))
+ files.extend(_extract_items(query, path, index=0, flattened_key=None, array_format=array_format))
return files
+def _array_suffix(array_format: ArrayFormat, array_index: int) -> str:
+ if array_format == "brackets":
+ return "[]"
+ if array_format == "indices":
+ return f"[{array_index}]"
+ if array_format == "repeat" or array_format == "comma":
+ # Both repeat the bare field name for each file part; there is no
+ # meaningful way to comma-join binary parts.
+ return ""
+ raise NotImplementedError(
+ f"Unknown array_format value: {array_format}, choose from {', '.join(get_args(ArrayFormat))}"
+ )
+
+
def _extract_items(
obj: object,
path: Sequence[str],
*,
index: int,
flattened_key: str | None,
+ array_format: ArrayFormat,
) -> list[tuple[str, FileTypes]]:
try:
key = path[index]
@@ -75,9 +95,11 @@ def _extract_items(
if is_list(obj):
files: list[tuple[str, FileTypes]] = []
- for entry in obj:
- assert_is_file_content(entry, key=flattened_key + "[]" if flattened_key else "")
- files.append((flattened_key + "[]", cast(FileTypes, entry)))
+ for array_index, entry in enumerate(obj):
+ suffix = _array_suffix(array_format, array_index)
+ emitted_key = (flattened_key + suffix) if flattened_key else suffix
+ assert_is_file_content(entry, key=emitted_key)
+ files.append((emitted_key, cast(FileTypes, entry)))
return files
assert_is_file_content(obj, key=flattened_key)
@@ -86,8 +108,9 @@ def _extract_items(
index += 1
if is_dict(obj):
try:
- # We are at the last entry in the path so we must remove the field
- if (len(path)) == index:
+ # Remove the field if there are no more dict keys in the path,
+ # only "" traversal markers or end.
+ if all(p == "" for p in path[index:]):
item = obj.pop(key)
else:
item = obj[key]
@@ -105,6 +128,7 @@ def _extract_items(
path,
index=index,
flattened_key=flattened_key,
+ array_format=array_format,
)
elif is_list(obj):
if key != "":
@@ -116,9 +140,12 @@ def _extract_items(
item,
path,
index=index,
- flattened_key=flattened_key + "[]" if flattened_key is not None else "[]",
+ flattened_key=(
+ (flattened_key if flattened_key is not None else "") + _array_suffix(array_format, array_index)
+ ),
+ array_format=array_format,
)
- for item in obj
+ for array_index, item in enumerate(obj)
]
)
@@ -176,21 +203,6 @@ def is_iterable(obj: object) -> TypeGuard[Iterable[object]]:
return isinstance(obj, Iterable)
-def deepcopy_minimal(item: _T) -> _T:
- """Minimal reimplementation of copy.deepcopy() that will only copy certain object types:
-
- - mappings, e.g. `dict`
- - list
-
- This is done for performance reasons.
- """
- if is_mapping(item):
- return cast(_T, {k: deepcopy_minimal(v) for k, v in item.items()})
- if is_list(item):
- return cast(_T, [deepcopy_minimal(entry) for entry in item])
- return item
-
-
# copied from https://github.com/Rapptz/RoboDanny
def human_join(seq: Sequence[str], *, delim: str = ", ", final: str = "or") -> str:
size = len(seq)
diff --git a/src/browserbase/_version.py b/src/browserbase/_version.py
index 10594de4..eaa01ae1 100644
--- a/src/browserbase/_version.py
+++ b/src/browserbase/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "browserbase"
-__version__ = "1.8.0" # x-release-please-version
+__version__ = "1.9.0" # x-release-please-version
diff --git a/src/browserbase/resources/extensions.py b/src/browserbase/resources/extensions.py
index 2d6fb1b0..9325d6b6 100644
--- a/src/browserbase/resources/extensions.py
+++ b/src/browserbase/resources/extensions.py
@@ -7,8 +7,9 @@
import httpx
from ..types import extension_create_params
+from .._files import deepcopy_with_paths
from .._types import Body, Query, Headers, NoneType, NotGiven, FileTypes, not_given
-from .._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
+from .._utils import extract_files, path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -66,7 +67,7 @@ def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal({"file": file})
+ body = deepcopy_with_paths({"file": file}, [["file"]])
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
@@ -193,7 +194,7 @@ async def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal({"file": file})
+ body = deepcopy_with_paths({"file": file}, [["file"]])
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
diff --git a/src/browserbase/resources/sessions/__init__.py b/src/browserbase/resources/sessions/__init__.py
index b3877e12..e66ee0ce 100644
--- a/src/browserbase/resources/sessions/__init__.py
+++ b/src/browserbase/resources/sessions/__init__.py
@@ -8,6 +8,14 @@
LogsResourceWithStreamingResponse,
AsyncLogsResourceWithStreamingResponse,
)
+from .replays import (
+ ReplaysResource,
+ AsyncReplaysResource,
+ ReplaysResourceWithRawResponse,
+ AsyncReplaysResourceWithRawResponse,
+ ReplaysResourceWithStreamingResponse,
+ AsyncReplaysResourceWithStreamingResponse,
+)
from .uploads import (
UploadsResource,
AsyncUploadsResource,
@@ -66,6 +74,12 @@
"AsyncUploadsResourceWithRawResponse",
"UploadsResourceWithStreamingResponse",
"AsyncUploadsResourceWithStreamingResponse",
+ "ReplaysResource",
+ "AsyncReplaysResource",
+ "ReplaysResourceWithRawResponse",
+ "AsyncReplaysResourceWithRawResponse",
+ "ReplaysResourceWithStreamingResponse",
+ "AsyncReplaysResourceWithStreamingResponse",
"SessionsResource",
"AsyncSessionsResource",
"SessionsResourceWithRawResponse",
diff --git a/src/browserbase/resources/sessions/replays.py b/src/browserbase/resources/sessions/replays.py
new file mode 100644
index 00000000..c9240356
--- /dev/null
+++ b/src/browserbase/resources/sessions/replays.py
@@ -0,0 +1,266 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Query, Headers, NotGiven, not_given
+from ..._utils import path_template
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ BinaryAPIResponse,
+ AsyncBinaryAPIResponse,
+ StreamedBinaryAPIResponse,
+ AsyncStreamedBinaryAPIResponse,
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ to_custom_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+ to_custom_streamed_response_wrapper,
+ async_to_custom_raw_response_wrapper,
+ async_to_custom_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.sessions.replay_retrieve_response import ReplayRetrieveResponse
+
+__all__ = ["ReplaysResource", "AsyncReplaysResource"]
+
+
+class ReplaysResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> ReplaysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/browserbase/sdk-python#accessing-raw-response-data-eg-headers
+ """
+ return ReplaysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ReplaysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/browserbase/sdk-python#with_streaming_response
+ """
+ return ReplaysResourceWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ReplayRetrieveResponse:
+ """
+ Returns page metadata for a session replay, including timing information and the
+ URL of each page's HLS playlist.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return self._get(
+ path_template("/v1/sessions/{id}/replays", id=id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ReplayRetrieveResponse,
+ )
+
+ def retrieve_page(
+ self,
+ page_id: str,
+ *,
+ id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> BinaryAPIResponse:
+ """
+ Returns an HLS VOD media playlist (.m3u8) for a specific page of a session
+ replay.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ if not page_id:
+ raise ValueError(f"Expected a non-empty value for `page_id` but received {page_id!r}")
+ extra_headers = {"Accept": "application/vnd.apple.mpegurl", **(extra_headers or {})}
+ return self._get(
+ path_template("/v1/sessions/{id}/replays/{page_id}", id=id, page_id=page_id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=BinaryAPIResponse,
+ )
+
+
+class AsyncReplaysResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncReplaysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/browserbase/sdk-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncReplaysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncReplaysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/browserbase/sdk-python#with_streaming_response
+ """
+ return AsyncReplaysResourceWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ReplayRetrieveResponse:
+ """
+ Returns page metadata for a session replay, including timing information and the
+ URL of each page's HLS playlist.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return await self._get(
+ path_template("/v1/sessions/{id}/replays", id=id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ReplayRetrieveResponse,
+ )
+
+ async def retrieve_page(
+ self,
+ page_id: str,
+ *,
+ id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncBinaryAPIResponse:
+ """
+ Returns an HLS VOD media playlist (.m3u8) for a specific page of a session
+ replay.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ if not page_id:
+ raise ValueError(f"Expected a non-empty value for `page_id` but received {page_id!r}")
+ extra_headers = {"Accept": "application/vnd.apple.mpegurl", **(extra_headers or {})}
+ return await self._get(
+ path_template("/v1/sessions/{id}/replays/{page_id}", id=id, page_id=page_id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AsyncBinaryAPIResponse,
+ )
+
+
+class ReplaysResourceWithRawResponse:
+ def __init__(self, replays: ReplaysResource) -> None:
+ self._replays = replays
+
+ self.retrieve = to_raw_response_wrapper(
+ replays.retrieve,
+ )
+ self.retrieve_page = to_custom_raw_response_wrapper(
+ replays.retrieve_page,
+ BinaryAPIResponse,
+ )
+
+
+class AsyncReplaysResourceWithRawResponse:
+ def __init__(self, replays: AsyncReplaysResource) -> None:
+ self._replays = replays
+
+ self.retrieve = async_to_raw_response_wrapper(
+ replays.retrieve,
+ )
+ self.retrieve_page = async_to_custom_raw_response_wrapper(
+ replays.retrieve_page,
+ AsyncBinaryAPIResponse,
+ )
+
+
+class ReplaysResourceWithStreamingResponse:
+ def __init__(self, replays: ReplaysResource) -> None:
+ self._replays = replays
+
+ self.retrieve = to_streamed_response_wrapper(
+ replays.retrieve,
+ )
+ self.retrieve_page = to_custom_streamed_response_wrapper(
+ replays.retrieve_page,
+ StreamedBinaryAPIResponse,
+ )
+
+
+class AsyncReplaysResourceWithStreamingResponse:
+ def __init__(self, replays: AsyncReplaysResource) -> None:
+ self._replays = replays
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ replays.retrieve,
+ )
+ self.retrieve_page = async_to_custom_streamed_response_wrapper(
+ replays.retrieve_page,
+ AsyncStreamedBinaryAPIResponse,
+ )
diff --git a/src/browserbase/resources/sessions/sessions.py b/src/browserbase/resources/sessions/sessions.py
index 18bb0a73..a54d7a72 100644
--- a/src/browserbase/resources/sessions/sessions.py
+++ b/src/browserbase/resources/sessions/sessions.py
@@ -16,6 +16,14 @@
AsyncLogsResourceWithStreamingResponse,
)
from ...types import session_list_params, session_create_params, session_update_params
+from .replays import (
+ ReplaysResource,
+ AsyncReplaysResource,
+ ReplaysResourceWithRawResponse,
+ AsyncReplaysResourceWithRawResponse,
+ ReplaysResourceWithStreamingResponse,
+ AsyncReplaysResourceWithStreamingResponse,
+)
from .uploads import (
UploadsResource,
AsyncUploadsResource,
@@ -77,6 +85,10 @@ def recording(self) -> RecordingResource:
def uploads(self) -> UploadsResource:
return UploadsResource(self._client)
+ @cached_property
+ def replays(self) -> ReplaysResource:
+ return ReplaysResource(self._client)
+
@cached_property
def with_raw_response(self) -> SessionsResourceWithRawResponse:
"""
@@ -255,7 +267,7 @@ def list(
self,
*,
q: str | Omit = omit,
- status: Literal["RUNNING", "ERROR", "TIMED_OUT", "COMPLETED"] | Omit = omit,
+ status: Literal["PENDING", "RUNNING", "ERROR", "TIMED_OUT", "COMPLETED"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -349,6 +361,10 @@ def recording(self) -> AsyncRecordingResource:
def uploads(self) -> AsyncUploadsResource:
return AsyncUploadsResource(self._client)
+ @cached_property
+ def replays(self) -> AsyncReplaysResource:
+ return AsyncReplaysResource(self._client)
+
@cached_property
def with_raw_response(self) -> AsyncSessionsResourceWithRawResponse:
"""
@@ -527,7 +543,7 @@ async def list(
self,
*,
q: str | Omit = omit,
- status: Literal["RUNNING", "ERROR", "TIMED_OUT", "COMPLETED"] | Omit = omit,
+ status: Literal["PENDING", "RUNNING", "ERROR", "TIMED_OUT", "COMPLETED"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -640,6 +656,10 @@ def recording(self) -> RecordingResourceWithRawResponse:
def uploads(self) -> UploadsResourceWithRawResponse:
return UploadsResourceWithRawResponse(self._sessions.uploads)
+ @cached_property
+ def replays(self) -> ReplaysResourceWithRawResponse:
+ return ReplaysResourceWithRawResponse(self._sessions.replays)
+
class AsyncSessionsResourceWithRawResponse:
def __init__(self, sessions: AsyncSessionsResource) -> None:
@@ -677,6 +697,10 @@ def recording(self) -> AsyncRecordingResourceWithRawResponse:
def uploads(self) -> AsyncUploadsResourceWithRawResponse:
return AsyncUploadsResourceWithRawResponse(self._sessions.uploads)
+ @cached_property
+ def replays(self) -> AsyncReplaysResourceWithRawResponse:
+ return AsyncReplaysResourceWithRawResponse(self._sessions.replays)
+
class SessionsResourceWithStreamingResponse:
def __init__(self, sessions: SessionsResource) -> None:
@@ -714,6 +738,10 @@ def recording(self) -> RecordingResourceWithStreamingResponse:
def uploads(self) -> UploadsResourceWithStreamingResponse:
return UploadsResourceWithStreamingResponse(self._sessions.uploads)
+ @cached_property
+ def replays(self) -> ReplaysResourceWithStreamingResponse:
+ return ReplaysResourceWithStreamingResponse(self._sessions.replays)
+
class AsyncSessionsResourceWithStreamingResponse:
def __init__(self, sessions: AsyncSessionsResource) -> None:
@@ -750,3 +778,7 @@ def recording(self) -> AsyncRecordingResourceWithStreamingResponse:
@cached_property
def uploads(self) -> AsyncUploadsResourceWithStreamingResponse:
return AsyncUploadsResourceWithStreamingResponse(self._sessions.uploads)
+
+ @cached_property
+ def replays(self) -> AsyncReplaysResourceWithStreamingResponse:
+ return AsyncReplaysResourceWithStreamingResponse(self._sessions.replays)
diff --git a/src/browserbase/resources/sessions/uploads.py b/src/browserbase/resources/sessions/uploads.py
index 7c776029..f5d22d96 100644
--- a/src/browserbase/resources/sessions/uploads.py
+++ b/src/browserbase/resources/sessions/uploads.py
@@ -6,8 +6,9 @@
import httpx
+from ..._files import deepcopy_with_paths
from ..._types import Body, Query, Headers, NotGiven, FileTypes, not_given
-from ..._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
+from ..._utils import extract_files, path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -69,7 +70,7 @@ def create(
"""
if not id:
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- body = deepcopy_minimal({"file": file})
+ body = deepcopy_with_paths({"file": file}, [["file"]])
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
@@ -132,7 +133,7 @@ async def create(
"""
if not id:
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- body = deepcopy_minimal({"file": file})
+ body = deepcopy_with_paths({"file": file}, [["file"]])
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
diff --git a/src/browserbase/types/session.py b/src/browserbase/types/session.py
index e983baaa..eac6a815 100644
--- a/src/browserbase/types/session.py
+++ b/src/browserbase/types/session.py
@@ -32,7 +32,7 @@ class Session(BaseModel):
started_at: datetime = FieldInfo(alias="startedAt")
- status: Literal["RUNNING", "ERROR", "TIMED_OUT", "COMPLETED"]
+ status: Literal["PENDING", "RUNNING", "ERROR", "TIMED_OUT", "COMPLETED"]
updated_at: datetime = FieldInfo(alias="updatedAt")
diff --git a/src/browserbase/types/session_create_params.py b/src/browserbase/types/session_create_params.py
index c7180636..3ff1d058 100644
--- a/src/browserbase/types/session_create_params.py
+++ b/src/browserbase/types/session_create_params.py
@@ -108,6 +108,12 @@ class BrowserSettings(TypedDict, total=False):
See [Upload Extension](/reference/api/upload-an-extension).
"""
+ ignore_certificate_errors: Annotated[bool, PropertyInfo(alias="ignoreCertificateErrors")]
+ """Enable or disable ignoring of certificate errors in the browser.
+
+ Defaults to `true`.
+ """
+
log_session: Annotated[bool, PropertyInfo(alias="logSession")]
"""Enable or disable session logging. Defaults to `true`."""
diff --git a/src/browserbase/types/session_list_params.py b/src/browserbase/types/session_list_params.py
index 54b0a05c..c21b98e1 100644
--- a/src/browserbase/types/session_list_params.py
+++ b/src/browserbase/types/session_list_params.py
@@ -16,4 +16,4 @@ class SessionListParams(TypedDict, total=False):
for the schema of this query.
"""
- status: Literal["RUNNING", "ERROR", "TIMED_OUT", "COMPLETED"]
+ status: Literal["PENDING", "RUNNING", "ERROR", "TIMED_OUT", "COMPLETED"]
diff --git a/src/browserbase/types/sessions/__init__.py b/src/browserbase/types/sessions/__init__.py
index 0cef6b19..c7ea4671 100644
--- a/src/browserbase/types/sessions/__init__.py
+++ b/src/browserbase/types/sessions/__init__.py
@@ -7,4 +7,5 @@
from .session_recording import SessionRecording as SessionRecording
from .upload_create_params import UploadCreateParams as UploadCreateParams
from .upload_create_response import UploadCreateResponse as UploadCreateResponse
+from .replay_retrieve_response import ReplayRetrieveResponse as ReplayRetrieveResponse
from .recording_retrieve_response import RecordingRetrieveResponse as RecordingRetrieveResponse
diff --git a/src/browserbase/types/sessions/replay_retrieve_response.py b/src/browserbase/types/sessions/replay_retrieve_response.py
new file mode 100644
index 00000000..7f0b02c0
--- /dev/null
+++ b/src/browserbase/types/sessions/replay_retrieve_response.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from pydantic import Field as FieldInfo
+
+from ..._models import BaseModel
+
+__all__ = ["ReplayRetrieveResponse", "Page"]
+
+
+class Page(BaseModel):
+ end_time_ms: float = FieldInfo(alias="endTimeMs")
+
+ page_id: str = FieldInfo(alias="pageId")
+
+ start_time_ms: float = FieldInfo(alias="startTimeMs")
+
+ url: str
+
+
+class ReplayRetrieveResponse(BaseModel):
+ page_count: int = FieldInfo(alias="pageCount")
+
+ pages: List[Page]
diff --git a/tests/api_resources/sessions/test_replays.py b/tests/api_resources/sessions/test_replays.py
new file mode 100644
index 00000000..a82c7880
--- /dev/null
+++ b/tests/api_resources/sessions/test_replays.py
@@ -0,0 +1,242 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import httpx
+import pytest
+from respx import MockRouter
+
+from browserbase import Browserbase, AsyncBrowserbase
+from tests.utils import assert_matches_type
+from browserbase._response import (
+ BinaryAPIResponse,
+ AsyncBinaryAPIResponse,
+ StreamedBinaryAPIResponse,
+ AsyncStreamedBinaryAPIResponse,
+)
+from browserbase.types.sessions import ReplayRetrieveResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestReplays:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_retrieve(self, client: Browserbase) -> None:
+ replay = client.sessions.replays.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(ReplayRetrieveResponse, replay, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: Browserbase) -> None:
+ response = client.sessions.replays.with_raw_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ replay = response.parse()
+ assert_matches_type(ReplayRetrieveResponse, replay, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Browserbase) -> None:
+ with client.sessions.replays.with_streaming_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ replay = response.parse()
+ assert_matches_type(ReplayRetrieveResponse, replay, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: Browserbase) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ client.sessions.replays.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ def test_method_retrieve_page(self, client: Browserbase, respx_mock: MockRouter) -> None:
+ respx_mock.get("/v1/sessions/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/replays/090").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+ replay = client.sessions.replays.retrieve_page(
+ page_id="090",
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert replay.is_closed
+ assert replay.json() == {"foo": "bar"}
+ assert cast(Any, replay.is_closed) is True
+ assert isinstance(replay, BinaryAPIResponse)
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ def test_raw_response_retrieve_page(self, client: Browserbase, respx_mock: MockRouter) -> None:
+ respx_mock.get("/v1/sessions/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/replays/090").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+
+ replay = client.sessions.replays.with_raw_response.retrieve_page(
+ page_id="090",
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert replay.is_closed is True
+ assert replay.http_request.headers.get("X-Stainless-Lang") == "python"
+ assert replay.json() == {"foo": "bar"}
+ assert isinstance(replay, BinaryAPIResponse)
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ def test_streaming_response_retrieve_page(self, client: Browserbase, respx_mock: MockRouter) -> None:
+ respx_mock.get("/v1/sessions/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/replays/090").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+ with client.sessions.replays.with_streaming_response.retrieve_page(
+ page_id="090",
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as replay:
+ assert not replay.is_closed
+ assert replay.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ assert replay.json() == {"foo": "bar"}
+ assert cast(Any, replay.is_closed) is True
+ assert isinstance(replay, StreamedBinaryAPIResponse)
+
+ assert cast(Any, replay.is_closed) is True
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ def test_path_params_retrieve_page(self, client: Browserbase) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ client.sessions.replays.with_raw_response.retrieve_page(
+ page_id="090",
+ id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `page_id` but received ''"):
+ client.sessions.replays.with_raw_response.retrieve_page(
+ page_id="",
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+
+class TestAsyncReplays:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncBrowserbase) -> None:
+ replay = await async_client.sessions.replays.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(ReplayRetrieveResponse, replay, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncBrowserbase) -> None:
+ response = await async_client.sessions.replays.with_raw_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ replay = await response.parse()
+ assert_matches_type(ReplayRetrieveResponse, replay, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncBrowserbase) -> None:
+ async with async_client.sessions.replays.with_streaming_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ replay = await response.parse()
+ assert_matches_type(ReplayRetrieveResponse, replay, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncBrowserbase) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ await async_client.sessions.replays.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ async def test_method_retrieve_page(self, async_client: AsyncBrowserbase, respx_mock: MockRouter) -> None:
+ respx_mock.get("/v1/sessions/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/replays/090").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+ replay = await async_client.sessions.replays.retrieve_page(
+ page_id="090",
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert replay.is_closed
+ assert await replay.json() == {"foo": "bar"}
+ assert cast(Any, replay.is_closed) is True
+ assert isinstance(replay, AsyncBinaryAPIResponse)
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ async def test_raw_response_retrieve_page(self, async_client: AsyncBrowserbase, respx_mock: MockRouter) -> None:
+ respx_mock.get("/v1/sessions/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/replays/090").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+
+ replay = await async_client.sessions.replays.with_raw_response.retrieve_page(
+ page_id="090",
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert replay.is_closed is True
+ assert replay.http_request.headers.get("X-Stainless-Lang") == "python"
+ assert await replay.json() == {"foo": "bar"}
+ assert isinstance(replay, AsyncBinaryAPIResponse)
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ async def test_streaming_response_retrieve_page(
+ self, async_client: AsyncBrowserbase, respx_mock: MockRouter
+ ) -> None:
+ respx_mock.get("/v1/sessions/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/replays/090").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+ async with async_client.sessions.replays.with_streaming_response.retrieve_page(
+ page_id="090",
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as replay:
+ assert not replay.is_closed
+ assert replay.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ assert await replay.json() == {"foo": "bar"}
+ assert cast(Any, replay.is_closed) is True
+ assert isinstance(replay, AsyncStreamedBinaryAPIResponse)
+
+ assert cast(Any, replay.is_closed) is True
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ async def test_path_params_retrieve_page(self, async_client: AsyncBrowserbase) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ await async_client.sessions.replays.with_raw_response.retrieve_page(
+ page_id="090",
+ id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `page_id` but received ''"):
+ await async_client.sessions.replays.with_raw_response.retrieve_page(
+ page_id="",
+ id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
diff --git a/tests/api_resources/test_sessions.py b/tests/api_resources/test_sessions.py
index eb07d3fc..41f2a0bd 100644
--- a/tests/api_resources/test_sessions.py
+++ b/tests/api_resources/test_sessions.py
@@ -41,6 +41,7 @@ def test_method_create_with_all_params(self, client: Browserbase) -> None:
"persist": True,
},
"extension_id": "extensionId",
+ "ignore_certificate_errors": True,
"log_session": True,
"os": "windows",
"record_session": True,
@@ -189,7 +190,7 @@ def test_method_list(self, client: Browserbase) -> None:
def test_method_list_with_all_params(self, client: Browserbase) -> None:
session = client.sessions.list(
q="q",
- status="RUNNING",
+ status="PENDING",
)
assert_matches_type(SessionListResponse, session, path=["response"])
@@ -275,6 +276,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncBrowserbas
"persist": True,
},
"extension_id": "extensionId",
+ "ignore_certificate_errors": True,
"log_session": True,
"os": "windows",
"record_session": True,
@@ -423,7 +425,7 @@ async def test_method_list(self, async_client: AsyncBrowserbase) -> None:
async def test_method_list_with_all_params(self, async_client: AsyncBrowserbase) -> None:
session = await async_client.sessions.list(
q="q",
- status="RUNNING",
+ status="PENDING",
)
assert_matches_type(SessionListResponse, session, path=["response"])
diff --git a/tests/test_client.py b/tests/test_client.py
index 1d0d68b3..95ae8e03 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -429,6 +429,30 @@ def test_default_query_option(self) -> None:
client.close()
+ def test_hardcoded_query_params_in_url(self, client: Browserbase) -> None:
+ request = client._build_request(FinalRequestOptions(method="get", url="/foo?beta=true"))
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true"}
+
+ request = client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/foo?beta=true",
+ params={"limit": "10", "page": "abc"},
+ )
+ )
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true", "limit": "10", "page": "abc"}
+
+ request = client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/files/a%2Fb?beta=true",
+ params={"limit": "10"},
+ )
+ )
+ assert request.url.raw_path == b"/files/a%2Fb?beta=true&limit=10"
+
def test_request_extra_json(self, client: Browserbase) -> None:
request = client._build_request(
FinalRequestOptions(
@@ -1330,6 +1354,30 @@ async def test_default_query_option(self) -> None:
await client.close()
+ async def test_hardcoded_query_params_in_url(self, async_client: AsyncBrowserbase) -> None:
+ request = async_client._build_request(FinalRequestOptions(method="get", url="/foo?beta=true"))
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true"}
+
+ request = async_client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/foo?beta=true",
+ params={"limit": "10", "page": "abc"},
+ )
+ )
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true", "limit": "10", "page": "abc"}
+
+ request = async_client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/files/a%2Fb?beta=true",
+ params={"limit": "10"},
+ )
+ )
+ assert request.url.raw_path == b"/files/a%2Fb?beta=true&limit=10"
+
def test_request_extra_json(self, client: Browserbase) -> None:
request = client._build_request(
FinalRequestOptions(
diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py
deleted file mode 100644
index a2a29e38..00000000
--- a/tests/test_deepcopy.py
+++ /dev/null
@@ -1,58 +0,0 @@
-from browserbase._utils import deepcopy_minimal
-
-
-def assert_different_identities(obj1: object, obj2: object) -> None:
- assert obj1 == obj2
- assert id(obj1) != id(obj2)
-
-
-def test_simple_dict() -> None:
- obj1 = {"foo": "bar"}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
-
-
-def test_nested_dict() -> None:
- obj1 = {"foo": {"bar": True}}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1["foo"], obj2["foo"])
-
-
-def test_complex_nested_dict() -> None:
- obj1 = {"foo": {"bar": [{"hello": "world"}]}}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1["foo"], obj2["foo"])
- assert_different_identities(obj1["foo"]["bar"], obj2["foo"]["bar"])
- assert_different_identities(obj1["foo"]["bar"][0], obj2["foo"]["bar"][0])
-
-
-def test_simple_list() -> None:
- obj1 = ["a", "b", "c"]
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
-
-
-def test_nested_list() -> None:
- obj1 = ["a", [1, 2, 3]]
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1[1], obj2[1])
-
-
-class MyObject: ...
-
-
-def test_ignores_other_types() -> None:
- # custom classes
- my_obj = MyObject()
- obj1 = {"foo": my_obj}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert obj1["foo"] is my_obj
-
- # tuples
- obj3 = ("a", "b")
- obj4 = deepcopy_minimal(obj3)
- assert obj3 is obj4
diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py
index 3c0fcb36..07deeb6d 100644
--- a/tests/test_extract_files.py
+++ b/tests/test_extract_files.py
@@ -4,7 +4,7 @@
import pytest
-from browserbase._types import FileTypes
+from browserbase._types import FileTypes, ArrayFormat
from browserbase._utils import extract_files
@@ -35,6 +35,12 @@ def test_multiple_files() -> None:
assert query == {"documents": [{}, {}]}
+def test_top_level_file_array() -> None:
+ query = {"files": [b"file one", b"file two"], "title": "hello"}
+ assert extract_files(query, paths=[["files", ""]]) == [("files[]", b"file one"), ("files[]", b"file two")]
+ assert query == {"title": "hello"}
+
+
@pytest.mark.parametrize(
"query,paths,expected",
[
@@ -62,3 +68,24 @@ def test_ignores_incorrect_paths(
expected: list[tuple[str, FileTypes]],
) -> None:
assert extract_files(query, paths=paths) == expected
+
+
+@pytest.mark.parametrize(
+ "array_format,expected_top_level,expected_nested",
+ [
+ ("brackets", [("files[]", b"a"), ("files[]", b"b")], [("items[][file]", b"a"), ("items[][file]", b"b")]),
+ ("repeat", [("files", b"a"), ("files", b"b")], [("items[file]", b"a"), ("items[file]", b"b")]),
+ ("comma", [("files", b"a"), ("files", b"b")], [("items[file]", b"a"), ("items[file]", b"b")]),
+ ("indices", [("files[0]", b"a"), ("files[1]", b"b")], [("items[0][file]", b"a"), ("items[1][file]", b"b")]),
+ ],
+)
+def test_array_format_controls_file_field_names(
+ array_format: ArrayFormat,
+ expected_top_level: list[tuple[str, FileTypes]],
+ expected_nested: list[tuple[str, FileTypes]],
+) -> None:
+ top_level = {"files": [b"a", b"b"]}
+ assert extract_files(top_level, paths=[["files", ""]], array_format=array_format) == expected_top_level
+
+ nested = {"items": [{"file": b"a"}, {"file": b"b"}]}
+ assert extract_files(nested, paths=[["items", "", "file"]], array_format=array_format) == expected_nested
diff --git a/tests/test_files.py b/tests/test_files.py
index d8842d61..713c5994 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -4,7 +4,8 @@
import pytest
from dirty_equals import IsDict, IsList, IsBytes, IsTuple
-from browserbase._files import to_httpx_files, async_to_httpx_files
+from browserbase._files import to_httpx_files, deepcopy_with_paths, async_to_httpx_files
+from browserbase._utils import extract_files
readme_path = Path(__file__).parent.parent.joinpath("README.md")
@@ -49,3 +50,99 @@ def test_string_not_allowed() -> None:
"file": "foo", # type: ignore
}
)
+
+
+def assert_different_identities(obj1: object, obj2: object) -> None:
+ assert obj1 == obj2
+ assert obj1 is not obj2
+
+
+class TestDeepcopyWithPaths:
+ def test_copies_top_level_dict(self) -> None:
+ original = {"file": b"data", "other": "value"}
+ result = deepcopy_with_paths(original, [["file"]])
+ assert_different_identities(result, original)
+
+ def test_file_value_is_same_reference(self) -> None:
+ file_bytes = b"contents"
+ original = {"file": file_bytes}
+ result = deepcopy_with_paths(original, [["file"]])
+ assert_different_identities(result, original)
+ assert result["file"] is file_bytes
+
+ def test_list_popped_wholesale(self) -> None:
+ files = [b"f1", b"f2"]
+ original = {"files": files, "title": "t"}
+ result = deepcopy_with_paths(original, [["files", ""]])
+ assert_different_identities(result, original)
+ result_files = result["files"]
+ assert isinstance(result_files, list)
+ assert_different_identities(result_files, files)
+
+ def test_nested_array_path_copies_list_and_elements(self) -> None:
+ elem1 = {"file": b"f1", "extra": 1}
+ elem2 = {"file": b"f2", "extra": 2}
+ original = {"items": [elem1, elem2]}
+ result = deepcopy_with_paths(original, [["items", "", "file"]])
+ assert_different_identities(result, original)
+ result_items = result["items"]
+ assert isinstance(result_items, list)
+ assert_different_identities(result_items, original["items"])
+ assert_different_identities(result_items[0], elem1)
+ assert_different_identities(result_items[1], elem2)
+
+ def test_empty_paths_returns_same_object(self) -> None:
+ original = {"foo": "bar"}
+ result = deepcopy_with_paths(original, [])
+ assert result is original
+
+ def test_multiple_paths(self) -> None:
+ f1 = b"file1"
+ f2 = b"file2"
+ original = {"a": f1, "b": f2, "c": "unchanged"}
+ result = deepcopy_with_paths(original, [["a"], ["b"]])
+ assert_different_identities(result, original)
+ assert result["a"] is f1
+ assert result["b"] is f2
+ assert result["c"] is original["c"]
+
+ def test_extract_files_does_not_mutate_original_top_level(self) -> None:
+ file_bytes = b"contents"
+ original = {"file": file_bytes, "other": "value"}
+
+ copied = deepcopy_with_paths(original, [["file"]])
+ extracted = extract_files(copied, paths=[["file"]])
+
+ assert extracted == [("file", file_bytes)]
+ assert original == {"file": file_bytes, "other": "value"}
+ assert copied == {"other": "value"}
+
+ def test_extract_files_does_not_mutate_original_nested_array_path(self) -> None:
+ file1 = b"f1"
+ file2 = b"f2"
+ original = {
+ "items": [
+ {"file": file1, "extra": 1},
+ {"file": file2, "extra": 2},
+ ],
+ "title": "example",
+ }
+
+ copied = deepcopy_with_paths(original, [["items", "", "file"]])
+ extracted = extract_files(copied, paths=[["items", "", "file"]])
+
+ assert [entry for _, entry in extracted] == [file1, file2]
+ assert original == {
+ "items": [
+ {"file": file1, "extra": 1},
+ {"file": file2, "extra": 2},
+ ],
+ "title": "example",
+ }
+ assert copied == {
+ "items": [
+ {"extra": 1},
+ {"extra": 2},
+ ],
+ "title": "example",
+ }
diff --git a/tests/test_models.py b/tests/test_models.py
index 1ecdeecf..d65d819a 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -1,7 +1,8 @@
import json
-from typing import TYPE_CHECKING, Any, Dict, List, Union, Optional, cast
+from typing import TYPE_CHECKING, Any, Dict, List, Union, Iterable, Optional, cast
from datetime import datetime, timezone
-from typing_extensions import Literal, Annotated, TypeAliasType
+from collections import deque
+from typing_extensions import Literal, Annotated, TypedDict, TypeAliasType
import pytest
import pydantic
@@ -9,7 +10,7 @@
from browserbase._utils import PropertyInfo
from browserbase._compat import PYDANTIC_V1, parse_obj, model_dump, model_json
-from browserbase._models import DISCRIMINATOR_CACHE, BaseModel, construct_type
+from browserbase._models import DISCRIMINATOR_CACHE, BaseModel, EagerIterable, construct_type
class BasicModel(BaseModel):
@@ -961,3 +962,56 @@ def __getattr__(self, attr: str) -> Item: ...
assert model.a.prop == 1
assert isinstance(model.a, Item)
assert model.other == "foo"
+
+
+# NOTE: Workaround for Pydantic Iterable behavior.
+# Iterable fields are replaced with a ValidatorIterator and may be consumed
+# during serialization, which can cause subsequent dumps to return empty data.
+# See: https://github.com/pydantic/pydantic/issues/9541
+@pytest.mark.parametrize(
+ "data, expected_validated",
+ [
+ ([1, 2, 3], [1, 2, 3]),
+ ((1, 2, 3), (1, 2, 3)),
+ (set([1, 2, 3]), set([1, 2, 3])),
+ (iter([1, 2, 3]), [1, 2, 3]),
+ ([], []),
+ ((x for x in [1, 2, 3]), [1, 2, 3]),
+ (map(lambda x: x, [1, 2, 3]), [1, 2, 3]),
+ (frozenset([1, 2, 3]), frozenset([1, 2, 3])),
+ (deque([1, 2, 3]), deque([1, 2, 3])),
+ ],
+ ids=["list", "tuple", "set", "iterator", "empty", "generator", "map", "frozenset", "deque"],
+)
+@pytest.mark.skipif(PYDANTIC_V1, reason="this is only supported in pydantic v2")
+def test_iterable_construction(data: Iterable[int], expected_validated: Iterable[int]) -> None:
+ class TypeWithIterable(TypedDict):
+ items: EagerIterable[int]
+
+ class Model(BaseModel):
+ data: TypeWithIterable
+
+ m = Model.model_validate({"data": {"items": data}})
+ assert m.data["items"] == expected_validated
+
+ # Verify repeated dumps don't lose data (the original bug)
+ assert m.model_dump()["data"]["items"] == list(expected_validated)
+ assert m.model_dump()["data"]["items"] == list(expected_validated)
+
+
+@pytest.mark.skipif(PYDANTIC_V1, reason="this is only supported in pydantic v2")
+def test_iterable_construction_str_falls_back_to_list() -> None:
+ # str is iterable (over chars), but str(list_of_chars) produces the list's repr
+ # rather than reconstructing a string from items. We special-case str to fall
+ # back to list instead of attempting reconstruction.
+ class TypeWithIterable(TypedDict):
+ items: EagerIterable[str]
+
+ class Model(BaseModel):
+ data: TypeWithIterable
+
+ m = Model.model_validate({"data": {"items": "hello"}})
+
+ # falls back to list of chars rather than calling str(["h", "e", "l", "l", "o"])
+ assert m.data["items"] == ["h", "e", "l", "l", "o"]
+ assert m.model_dump()["data"]["items"] == ["h", "e", "l", "l", "o"]