diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml
index f819d07310..f39b4c3e2c 100644
--- a/.github/workflows/create-releases.yml
+++ b/.github/workflows/create-releases.yml
@@ -16,7 +16,7 @@ jobs:
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- - uses: stainless-api/trigger-release-please@bb6677c5a04578eec1ccfd9e1913b5b78ed64c61 # v1
+ - uses: stainless-api/trigger-release-please@bb6677c5a04578eec1ccfd9e1913b5b78ed64c61 # v1.4.0
id: release
with:
repo: ${{ github.event.repository.full_name }}
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 7ef8288ed5..7a1c4674e4 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "2.33.0"
+ ".": "2.34.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 9a106e0f6d..723cea85fe 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 152
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-64c6ba619ccbf87e56b4f464230d04401fd78ad924d2606176309d19ca281af5.yml
-openapi_spec_hash: 5e4f2073040a12c26ce58e86a72fe47e
-config_hash: 50c98d8869a8cfdee2ab7dc664c4b6fe
+configured_endpoints: 233
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai/openai-21ecab7aeb61612b9da5e52ea4c0cb75a33d443d975022934b9305e97d1a7d62.yml
+openapi_spec_hash: cfc868a0bb3567183510c9b5629c510f
+config_hash: dd484e2cc01206d26516338d0f4596b0
diff --git a/CHANGELOG.md b/CHANGELOG.md
index effb1cc263..237a8c00a4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,51 @@
# Changelog
+## 2.34.0 (2026-05-04)
+
+Full Changelog: [v2.33.0...v2.34.0](https://github.com/openai/openai-python/compare/v2.33.0...v2.34.0)
+
+### Features
+
+* **api:** add external_key_id to projects, email/metadata params to users, update types ([2d232ee](https://github.com/openai/openai-python/commit/2d232eebb2fe021bb21f2576b17d1d588f81a608))
+* **api:** add support for Admin API Keys per endpoint ([b8b176a](https://github.com/openai/openai-python/commit/b8b176af84172f27d2fde8dca062ca4c41f94bf7))
+* **api:** admin API updates ([4ae1138](https://github.com/openai/openai-python/commit/4ae1138ae1f76e81a2267e4deb45b435c10774d5))
+* **api:** manual updates ([c1870f1](https://github.com/openai/openai-python/commit/c1870f1b881bb914e4e62a6c8b08d4c2b9a6fd54))
+* **api:** manual updates ([f6bb9c7](https://github.com/openai/openai-python/commit/f6bb9c7d7bdcc45425d37722358bed097e83d493))
+* support setting headers via env ([1e89d8b](https://github.com/openai/openai-python/commit/1e89d8b56aba12f99a8ef2b1b78fdee84751275a))
+
+
+### Bug Fixes
+
+* allow explicit Azure auth headers ([a0626ba](https://github.com/openai/openai-python/commit/a0626babf0548fb03cf3c2d054da116dd6466701))
+* **api:** correct prompt_cache_retention enum value from in-memory to in_memory ([d47d9f0](https://github.com/openai/openai-python/commit/d47d9f0f79c612c4d14005a0a3cf44e1968c9bff))
+* **api:** preserve python api key attribute type ([62607f6](https://github.com/openai/openai-python/commit/62607f61c542ed559ef114849e31307c0c290286))
+* **api:** resolve python auth type checks ([42a31a7](https://github.com/openai/openai-python/commit/42a31a7efb6784633108c1a73e1779ed79ab8bed))
+* **api:** support admin api key auth ([f029eb9](https://github.com/openai/openai-python/commit/f029eb937f976110c1a67b9342525a38a214072e))
+* avoid bearer fallback for admin auth ([22e01a8](https://github.com/openai/openai-python/commit/22e01a8cf791a143ecc576f46de50eee9b3c2147))
+* preserve selected auth credentials ([0d27f9d](https://github.com/openai/openai-python/commit/0d27f9dbd3b2ae82b2e8c2eeb9e7e78f3edecdf1))
+* require bearer auth for stream helpers ([d055539](https://github.com/openai/openai-python/commit/d0555390bcf4a704c10d318c7de2fe006750c3d0))
+* **types:** correct created_at and completed_at to float in Response ([7da4b88](https://github.com/openai/openai-python/commit/7da4b88c1985028f7ee9a98b919e71f863f979f0))
+* **types:** correct timestamp types to int in Response model ([e55631c](https://github.com/openai/openai-python/commit/e55631c868b1d0b720fda0abdbc342787cd95e2c))
+* use correct field name format for multipart file arrays ([9ee4825](https://github.com/openai/openai-python/commit/9ee482576c2bd6b33b6cf7458c37ab2e7d5bc725))
+
+
+### Performance Improvements
+
+* **client:** optimize file structure copying in multipart requests ([dca474e](https://github.com/openai/openai-python/commit/dca474e5beac7cc8e05855f042c3227843030c1b))
+
+
+### Chores
+
+* **internal:** more robust bootstrap script ([9ec1600](https://github.com/openai/openai-python/commit/9ec1600d48fda10abb144b2a62d07c5abd7e9ab1))
+* **internal:** reformat pyproject.toml ([12ad57b](https://github.com/openai/openai-python/commit/12ad57b8da5b5c0615641af273d4bbf2981d6bf7))
+* **tests:** bump steady to v0.22.1 ([486dfed](https://github.com/openai/openai-python/commit/486dfedfec8484bb00318b0ea798c2260f7a720c))
+
+
+### Documentation
+
+* **api:** add rate limit and vector store info to files create ([4f776df](https://github.com/openai/openai-python/commit/4f776df78d757fdbf25662c4be98b5c98183aaaf))
+* **api:** update files rate limit documentation ([b141a20](https://github.com/openai/openai-python/commit/b141a20e948b5af3b8fbe4261798c191d2857b4a))
+
## 2.33.0 (2026-04-28)
Full Changelog: [v2.32.0...v2.33.0](https://github.com/openai/openai-python/compare/v2.32.0...v2.33.0)
diff --git a/api.md b/api.md
index decf4e0129..e04636937c 100644
--- a/api.md
+++ b/api.md
@@ -707,6 +707,370 @@ Methods:
- client.uploads.parts.create(upload_id, \*\*params) -> UploadPart
+# Admin
+
+## Organization
+
+### AuditLogs
+
+Types:
+
+```python
+from openai.types.admin.organization import AuditLogListResponse
+```
+
+Methods:
+
+- client.admin.organization.audit_logs.list(\*\*params) -> SyncConversationCursorPage[AuditLogListResponse]
+
+### AdminAPIKeys
+
+Types:
+
+```python
+from openai.types.admin.organization import (
+ AdminAPIKey,
+ AdminAPIKeyCreateResponse,
+ AdminAPIKeyDeleteResponse,
+)
+```
+
+Methods:
+
+- client.admin.organization.admin_api_keys.create(\*\*params) -> AdminAPIKeyCreateResponse
+- client.admin.organization.admin_api_keys.retrieve(key_id) -> AdminAPIKey
+- client.admin.organization.admin_api_keys.list(\*\*params) -> SyncCursorPage[AdminAPIKey]
+- client.admin.organization.admin_api_keys.delete(key_id) -> AdminAPIKeyDeleteResponse
+
+### Usage
+
+Types:
+
+```python
+from openai.types.admin.organization import (
+ UsageAudioSpeechesResponse,
+ UsageAudioTranscriptionsResponse,
+ UsageCodeInterpreterSessionsResponse,
+ UsageCompletionsResponse,
+ UsageCostsResponse,
+ UsageEmbeddingsResponse,
+ UsageImagesResponse,
+ UsageModerationsResponse,
+ UsageVectorStoresResponse,
+)
+```
+
+Methods:
+
+- client.admin.organization.usage.audio_speeches(\*\*params) -> UsageAudioSpeechesResponse
+- client.admin.organization.usage.audio_transcriptions(\*\*params) -> UsageAudioTranscriptionsResponse
+- client.admin.organization.usage.code_interpreter_sessions(\*\*params) -> UsageCodeInterpreterSessionsResponse
+- client.admin.organization.usage.completions(\*\*params) -> UsageCompletionsResponse
+- client.admin.organization.usage.costs(\*\*params) -> UsageCostsResponse
+- client.admin.organization.usage.embeddings(\*\*params) -> UsageEmbeddingsResponse
+- client.admin.organization.usage.images(\*\*params) -> UsageImagesResponse
+- client.admin.organization.usage.moderations(\*\*params) -> UsageModerationsResponse
+- client.admin.organization.usage.vector_stores(\*\*params) -> UsageVectorStoresResponse
+
+### Invites
+
+Types:
+
+```python
+from openai.types.admin.organization import Invite, InviteDeleteResponse
+```
+
+Methods:
+
+- client.admin.organization.invites.create(\*\*params) -> Invite
+- client.admin.organization.invites.retrieve(invite_id) -> Invite
+- client.admin.organization.invites.list(\*\*params) -> SyncConversationCursorPage[Invite]
+- client.admin.organization.invites.delete(invite_id) -> InviteDeleteResponse
+
+### Users
+
+Types:
+
+```python
+from openai.types.admin.organization import OrganizationUser, UserDeleteResponse
+```
+
+Methods:
+
+- client.admin.organization.users.retrieve(user_id) -> OrganizationUser
+- client.admin.organization.users.update(user_id, \*\*params) -> OrganizationUser
+- client.admin.organization.users.list(\*\*params) -> SyncConversationCursorPage[OrganizationUser]
+- client.admin.organization.users.delete(user_id) -> UserDeleteResponse
+
+#### Roles
+
+Types:
+
+```python
+from openai.types.admin.organization.users import (
+ RoleCreateResponse,
+ RoleListResponse,
+ RoleDeleteResponse,
+)
+```
+
+Methods:
+
+- client.admin.organization.users.roles.create(user_id, \*\*params) -> RoleCreateResponse
+- client.admin.organization.users.roles.list(user_id, \*\*params) -> SyncNextCursorPage[RoleListResponse]
+- client.admin.organization.users.roles.delete(role_id, \*, user_id) -> RoleDeleteResponse
+
+### Groups
+
+Types:
+
+```python
+from openai.types.admin.organization import Group, GroupUpdateResponse, GroupDeleteResponse
+```
+
+Methods:
+
+- client.admin.organization.groups.create(\*\*params) -> Group
+- client.admin.organization.groups.update(group_id, \*\*params) -> GroupUpdateResponse
+- client.admin.organization.groups.list(\*\*params) -> SyncNextCursorPage[Group]
+- client.admin.organization.groups.delete(group_id) -> GroupDeleteResponse
+
+#### Users
+
+Types:
+
+```python
+from openai.types.admin.organization.groups import (
+ OrganizationGroupUser,
+ UserCreateResponse,
+ UserDeleteResponse,
+)
+```
+
+Methods:
+
+- client.admin.organization.groups.users.create(group_id, \*\*params) -> UserCreateResponse
+- client.admin.organization.groups.users.list(group_id, \*\*params) -> SyncNextCursorPage[OrganizationGroupUser]
+- client.admin.organization.groups.users.delete(user_id, \*, group_id) -> UserDeleteResponse
+
+#### Roles
+
+Types:
+
+```python
+from openai.types.admin.organization.groups import (
+ RoleCreateResponse,
+ RoleListResponse,
+ RoleDeleteResponse,
+)
+```
+
+Methods:
+
+- client.admin.organization.groups.roles.create(group_id, \*\*params) -> RoleCreateResponse
+- client.admin.organization.groups.roles.list(group_id, \*\*params) -> SyncNextCursorPage[RoleListResponse]
+- client.admin.organization.groups.roles.delete(role_id, \*, group_id) -> RoleDeleteResponse
+
+### Roles
+
+Types:
+
+```python
+from openai.types.admin.organization import Role, RoleDeleteResponse
+```
+
+Methods:
+
+- client.admin.organization.roles.create(\*\*params) -> Role
+- client.admin.organization.roles.update(role_id, \*\*params) -> Role
+- client.admin.organization.roles.list(\*\*params) -> SyncNextCursorPage[Role]
+- client.admin.organization.roles.delete(role_id) -> RoleDeleteResponse
+
+### Certificates
+
+Types:
+
+```python
+from openai.types.admin.organization import (
+ Certificate,
+ CertificateListResponse,
+ CertificateDeleteResponse,
+ CertificateActivateResponse,
+ CertificateDeactivateResponse,
+)
+```
+
+Methods:
+
+- client.admin.organization.certificates.create(\*\*params) -> Certificate
+- client.admin.organization.certificates.retrieve(certificate_id, \*\*params) -> Certificate
+- client.admin.organization.certificates.update(certificate_id, \*\*params) -> Certificate
+- client.admin.organization.certificates.list(\*\*params) -> SyncConversationCursorPage[CertificateListResponse]
+- client.admin.organization.certificates.delete(certificate_id) -> CertificateDeleteResponse
+- client.admin.organization.certificates.activate(\*\*params) -> SyncPage[CertificateActivateResponse]
+- client.admin.organization.certificates.deactivate(\*\*params) -> SyncPage[CertificateDeactivateResponse]
+
+### Projects
+
+Types:
+
+```python
+from openai.types.admin.organization import Project
+```
+
+Methods:
+
+- client.admin.organization.projects.create(\*\*params) -> Project
+- client.admin.organization.projects.retrieve(project_id) -> Project
+- client.admin.organization.projects.update(project_id, \*\*params) -> Project
+- client.admin.organization.projects.list(\*\*params) -> SyncConversationCursorPage[Project]
+- client.admin.organization.projects.archive(project_id) -> Project
+
+#### Users
+
+Types:
+
+```python
+from openai.types.admin.organization.projects import ProjectUser, UserDeleteResponse
+```
+
+Methods:
+
+- client.admin.organization.projects.users.create(project_id, \*\*params) -> ProjectUser
+- client.admin.organization.projects.users.retrieve(user_id, \*, project_id) -> ProjectUser
+- client.admin.organization.projects.users.update(user_id, \*, project_id, \*\*params) -> ProjectUser
+- client.admin.organization.projects.users.list(project_id, \*\*params) -> SyncConversationCursorPage[ProjectUser]
+- client.admin.organization.projects.users.delete(user_id, \*, project_id) -> UserDeleteResponse
+
+##### Roles
+
+Types:
+
+```python
+from openai.types.admin.organization.projects.users import (
+ RoleCreateResponse,
+ RoleListResponse,
+ RoleDeleteResponse,
+)
+```
+
+Methods:
+
+- client.admin.organization.projects.users.roles.create(user_id, \*, project_id, \*\*params) -> RoleCreateResponse
+- client.admin.organization.projects.users.roles.list(user_id, \*, project_id, \*\*params) -> SyncNextCursorPage[RoleListResponse]
+- client.admin.organization.projects.users.roles.delete(role_id, \*, project_id, user_id) -> RoleDeleteResponse
+
+#### ServiceAccounts
+
+Types:
+
+```python
+from openai.types.admin.organization.projects import (
+ ProjectServiceAccount,
+ ServiceAccountCreateResponse,
+ ServiceAccountDeleteResponse,
+)
+```
+
+Methods:
+
+- client.admin.organization.projects.service_accounts.create(project_id, \*\*params) -> ServiceAccountCreateResponse
+- client.admin.organization.projects.service_accounts.retrieve(service_account_id, \*, project_id) -> ProjectServiceAccount
+- client.admin.organization.projects.service_accounts.list(project_id, \*\*params) -> SyncConversationCursorPage[ProjectServiceAccount]
+- client.admin.organization.projects.service_accounts.delete(service_account_id, \*, project_id) -> ServiceAccountDeleteResponse
+
+#### APIKeys
+
+Types:
+
+```python
+from openai.types.admin.organization.projects import ProjectAPIKey, APIKeyDeleteResponse
+```
+
+Methods:
+
+- client.admin.organization.projects.api_keys.retrieve(api_key_id, \*, project_id) -> ProjectAPIKey
+- client.admin.organization.projects.api_keys.list(project_id, \*\*params) -> SyncConversationCursorPage[ProjectAPIKey]
+- client.admin.organization.projects.api_keys.delete(api_key_id, \*, project_id) -> APIKeyDeleteResponse
+
+#### RateLimits
+
+Types:
+
+```python
+from openai.types.admin.organization.projects import ProjectRateLimit
+```
+
+Methods:
+
+- client.admin.organization.projects.rate_limits.list_rate_limits(project_id, \*\*params) -> SyncConversationCursorPage[ProjectRateLimit]
+- client.admin.organization.projects.rate_limits.update_rate_limit(rate_limit_id, \*, project_id, \*\*params) -> ProjectRateLimit
+
+#### Groups
+
+Types:
+
+```python
+from openai.types.admin.organization.projects import ProjectGroup, GroupDeleteResponse
+```
+
+Methods:
+
+- client.admin.organization.projects.groups.create(project_id, \*\*params) -> ProjectGroup
+- client.admin.organization.projects.groups.list(project_id, \*\*params) -> SyncNextCursorPage[ProjectGroup]
+- client.admin.organization.projects.groups.delete(group_id, \*, project_id) -> GroupDeleteResponse
+
+##### Roles
+
+Types:
+
+```python
+from openai.types.admin.organization.projects.groups import (
+ RoleCreateResponse,
+ RoleListResponse,
+ RoleDeleteResponse,
+)
+```
+
+Methods:
+
+- client.admin.organization.projects.groups.roles.create(group_id, \*, project_id, \*\*params) -> RoleCreateResponse
+- client.admin.organization.projects.groups.roles.list(group_id, \*, project_id, \*\*params) -> SyncNextCursorPage[RoleListResponse]
+- client.admin.organization.projects.groups.roles.delete(role_id, \*, project_id, group_id) -> RoleDeleteResponse
+
+#### Roles
+
+Types:
+
+```python
+from openai.types.admin.organization.projects import RoleDeleteResponse
+```
+
+Methods:
+
+- client.admin.organization.projects.roles.create(project_id, \*\*params) -> Role
+- client.admin.organization.projects.roles.update(role_id, \*, project_id, \*\*params) -> Role
+- client.admin.organization.projects.roles.list(project_id, \*\*params) -> SyncNextCursorPage[Role]
+- client.admin.organization.projects.roles.delete(role_id, \*, project_id) -> RoleDeleteResponse
+
+#### Certificates
+
+Types:
+
+```python
+from openai.types.admin.organization.projects import (
+ CertificateListResponse,
+ CertificateActivateResponse,
+ CertificateDeactivateResponse,
+)
+```
+
+Methods:
+
+- client.admin.organization.projects.certificates.list(project_id, \*\*params) -> SyncConversationCursorPage[CertificateListResponse]
+- client.admin.organization.projects.certificates.activate(project_id, \*\*params) -> SyncPage[CertificateActivateResponse]
+- client.admin.organization.projects.certificates.deactivate(project_id, \*\*params) -> SyncPage[CertificateDeactivateResponse]
+
# [Responses](src/openai/resources/responses/api.md)
# [Realtime](src/openai/resources/realtime/api.md)
diff --git a/pyproject.toml b/pyproject.toml
index b2f4dd11cb..7fbf3bd49b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "openai"
-version = "2.33.0"
+version = "2.34.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/scripts/bootstrap b/scripts/bootstrap
index 953993addb..3e25ebec99 100755
--- a/scripts/bootstrap
+++ b/scripts/bootstrap
@@ -4,7 +4,7 @@ set -e
cd "$(dirname "$0")/.."
-if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ] && [ -t 0 ]; then
+if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "${SKIP_BREW:-}" != "1" ] && [ -t 0 ]; then
brew bundle check >/dev/null 2>&1 || {
echo -n "==> Install Homebrew dependencies? (y/N): "
read -r response
diff --git a/scripts/mock b/scripts/mock
index 886f2ffc14..04d29019fc 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -22,9 +22,9 @@ echo "==> Starting mock server with URL ${URL}"
# Run steady mock on the given spec
if [ "$1" == "--daemon" ]; then
# Pre-install the package so the download doesn't eat into the startup timeout
- npm exec --package=@stdy/cli@0.20.2 -- steady --version
+ npm exec --package=@stdy/cli@0.22.1 -- steady --version
- npm exec --package=@stdy/cli@0.20.2 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=brackets --validator-form-array-format=brackets --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log &
+ npm exec --package=@stdy/cli@0.22.1 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=brackets --validator-form-array-format=brackets --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log &
# Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
@@ -48,5 +48,5 @@ if [ "$1" == "--daemon" ]; then
echo
else
- npm exec --package=@stdy/cli@0.20.2 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=brackets --validator-form-array-format=brackets --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL"
+ npm exec --package=@stdy/cli@0.22.1 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=brackets --validator-form-array-format=brackets --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL"
fi
diff --git a/scripts/test b/scripts/test
index 57cabda6ae..7b05e44fd9 100755
--- a/scripts/test
+++ b/scripts/test
@@ -43,7 +43,7 @@ elif ! steady_is_running ; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.20.2 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=brackets --validator-form-array-format=brackets --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.22.1 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=brackets --validator-form-array-format=brackets --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
echo
exit 1
diff --git a/src/openai/__init__.py b/src/openai/__init__.py
index fc9675a8b5..cbaef0615f 100644
--- a/src/openai/__init__.py
+++ b/src/openai/__init__.py
@@ -130,6 +130,8 @@
api_key: str | None = None
+admin_api_key: str | None = None
+
organization: str | None = None
project: str | None = None
@@ -176,6 +178,17 @@ def api_key(self, value: str | None) -> None: # type: ignore
api_key = value
+ @property # type: ignore
+ @override
+ def admin_api_key(self) -> str | None:
+ return admin_api_key
+
+ @admin_api_key.setter # type: ignore
+ def admin_api_key(self, value: str | None) -> None: # type: ignore
+ global admin_api_key
+
+ admin_api_key = value
+
@property # type: ignore
@override
def organization(self) -> str | None:
@@ -359,6 +372,7 @@ def _load_client() -> OpenAI: # type: ignore[reportUnusedFunction]
_client = _ModuleClient(
api_key=api_key,
+ admin_api_key=admin_api_key,
organization=organization,
project=project,
webhook_secret=webhook_secret,
@@ -368,6 +382,7 @@ def _load_client() -> OpenAI: # type: ignore[reportUnusedFunction]
default_headers=default_headers,
default_query=default_query,
http_client=http_client,
+ _enforce_credentials=False,
)
return _client
@@ -383,6 +398,7 @@ def _reset_client() -> None: # type: ignore[reportUnusedFunction]
from ._module_client import (
beta as beta,
chat as chat,
+ admin as admin,
audio as audio,
evals as evals,
files as files,
diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py
index a1d0960700..216b36aabd 100644
--- a/src/openai/_base_client.py
+++ b/src/openai/_base_client.py
@@ -63,7 +63,7 @@
)
from ._utils import SensitiveHeadersFilter, is_dict, is_list, asyncify, is_given, lru_cache, is_mapping
from ._compat import PYDANTIC_V1, model_copy, model_dump
-from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type
+from ._models import GenericModel, SecurityOptions, FinalRequestOptions, validate_type, construct_type
from ._response import (
APIResponse,
BaseAPIResponse,
@@ -435,9 +435,27 @@ def _make_status_error(
) -> _exceptions.APIStatusError:
raise NotImplementedError()
+ def _auth_headers(
+ self,
+ security: SecurityOptions, # noqa: ARG002
+ ) -> dict[str, str]:
+ return {}
+
+ def _auth_query(
+ self,
+ security: SecurityOptions, # noqa: ARG002
+ ) -> dict[str, str]:
+ return {}
+
+ def _custom_auth(
+ self,
+ security: SecurityOptions, # noqa: ARG002
+ ) -> httpx.Auth | None:
+ return None
+
def _build_headers(self, options: FinalRequestOptions, *, retries_taken: int = 0) -> httpx.Headers:
custom_headers = options.headers or {}
- headers_dict = _merge_mappings(self.default_headers, custom_headers)
+ headers_dict = _merge_mappings({**self._auth_headers(options.security), **self.default_headers}, custom_headers)
self._validate_headers(headers_dict, custom_headers)
# headers are case-insensitive while dictionaries are not.
@@ -509,7 +527,7 @@ def _build_request(
raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`")
headers = self._build_headers(options, retries_taken=retries_taken)
- params = _merge_mappings(self.default_query, options.params)
+ params = _merge_mappings({**self._auth_query(options.security), **self.default_query}, options.params)
content_type = headers.get("Content-Type")
files = options.files
@@ -678,7 +696,6 @@ def default_headers(self) -> dict[str, str | Omit]:
"Content-Type": "application/json",
"User-Agent": self.user_agent,
**self.platform_headers(),
- **self.auth_headers,
**self._custom_headers,
}
@@ -1006,8 +1023,9 @@ def request(
self._prepare_request(request)
kwargs: HttpxSendArgs = {}
- if self.custom_auth is not None:
- kwargs["auth"] = self.custom_auth
+ custom_auth = self._custom_auth(options.security)
+ if custom_auth is not None:
+ kwargs["auth"] = custom_auth
if options.follow_redirects is not None:
kwargs["follow_redirects"] = options.follow_redirects
@@ -2013,6 +2031,7 @@ def make_request_options(
idempotency_key: str | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
post_parser: PostParser | NotGiven = not_given,
+ security: SecurityOptions | None = None,
synthesize_event_and_data: bool | None = None,
) -> RequestOptions:
"""Create a dict of type RequestOptions without keys of NotGiven values."""
@@ -2039,6 +2058,9 @@ def make_request_options(
# internal
options["post_parser"] = post_parser # type: ignore
+ if security is not None:
+ options["security"] = security
+
if synthesize_event_and_data is not None:
options["synthesize_event_and_data"] = synthesize_event_and_data
diff --git a/src/openai/_client.py b/src/openai/_client.py
index 434f957e19..499a62dfe5 100644
--- a/src/openai/_client.py
+++ b/src/openai/_client.py
@@ -13,6 +13,7 @@
from .auth import WorkloadIdentity, WorkloadIdentityAuth
from ._types import (
Omit,
+ Headers,
Timeout,
NotGiven,
Transport,
@@ -24,10 +25,11 @@
from ._utils import (
is_given,
is_mapping,
+ is_mapping_t,
get_async_library,
)
from ._compat import cached_property
-from ._models import FinalRequestOptions
+from ._models import SecurityOptions, FinalRequestOptions
from ._version import __version__
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
from ._exceptions import OpenAIError, APIStatusError
@@ -41,6 +43,7 @@
from .resources import (
beta,
chat,
+ admin,
audio,
evals,
files,
@@ -68,6 +71,7 @@
from .resources.beta.beta import Beta, AsyncBeta
from .resources.chat.chat import Chat, AsyncChat
from .resources.embeddings import Embeddings, AsyncEmbeddings
+ from .resources.admin.admin import Admin, AsyncAdmin
from .resources.audio.audio import Audio, AsyncAudio
from .resources.completions import Completions, AsyncCompletions
from .resources.evals.evals import Evals, AsyncEvals
@@ -87,9 +91,20 @@
WORKLOAD_IDENTITY_API_KEY_PLACEHOLDER = "workload-identity-auth"
+def _has_header(headers: Headers, header: str) -> bool:
+ header = header.lower()
+ return any(key.lower() == header for key in headers)
+
+
+def _has_omitted_header(headers: Headers, header: str) -> bool:
+ header = header.lower()
+ return any(key.lower() == header and isinstance(value, Omit) for key, value in headers.items())
+
+
class OpenAI(SyncAPIClient):
# client options
api_key: str
+ admin_api_key: str | None
workload_identity: WorkloadIdentity | None
organization: str | None
project: str | None
@@ -107,7 +122,8 @@ class OpenAI(SyncAPIClient):
def __init__(
self,
*,
- api_key: str | None | Callable[[], str] = None,
+ api_key: str | Callable[[], str] | None = None,
+ admin_api_key: str | None = None,
workload_identity: WorkloadIdentity | None = None,
organization: str | None = None,
project: str | None = None,
@@ -131,11 +147,13 @@ def __init__(
# outlining your use-case to help us decide if it should be
# part of our public interface in the future.
_strict_response_validation: bool = False,
+ _enforce_credentials: bool = True,
) -> None:
"""Construct a new synchronous OpenAI client instance.
This automatically infers the following arguments from their corresponding environment variables if they are not provided:
- `api_key` from `OPENAI_API_KEY`
+ - `admin_api_key` from `OPENAI_ADMIN_KEY`
- `organization` from `OPENAI_ORG_ID`
- `project` from `OPENAI_PROJECT_ID`
- `webhook_secret` from `OPENAI_WEBHOOK_SECRET`
@@ -154,18 +172,29 @@ def __init__(
else:
if api_key is None:
api_key = os.environ.get("OPENAI_API_KEY")
- if api_key is None:
- raise OpenAIError(
- "The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable"
- )
if callable(api_key):
self.api_key = ""
self._api_key_provider: Callable[[], str] | None = api_key # type: ignore[no-redef]
else:
- self.api_key = api_key
+ self.api_key = api_key or ""
self._api_key_provider = None
self._workload_identity_auth = None
+ if admin_api_key is None:
+ admin_api_key = os.environ.get("OPENAI_ADMIN_KEY")
+ self.admin_api_key = admin_api_key
+
+ if (
+ _enforce_credentials
+ and not self.api_key
+ and self._api_key_provider is None
+ and workload_identity is None
+ and self.admin_api_key is None
+ ):
+ raise OpenAIError(
+ "Missing credentials. Please pass an `api_key`, `workload_identity`, `admin_api_key`, or set the `OPENAI_API_KEY` or `OPENAI_ADMIN_KEY` environment variable."
+ )
+
if organization is None:
organization = os.environ.get("OPENAI_ORG_ID")
self.organization = organization
@@ -185,6 +214,15 @@ def __init__(
if base_url is None:
base_url = f"https://api.openai.com/v1"
+ custom_headers_env = os.environ.get("OPENAI_CUSTOM_HEADERS")
+ if custom_headers_env is not None:
+ parsed: dict[str, str] = {}
+ for line in custom_headers_env.split("\n"):
+ colon = line.find(":")
+ if colon >= 0:
+ parsed[line[:colon].strip()] = line[colon + 1 :].strip()
+ default_headers = {**parsed, **(default_headers if is_mapping_t(default_headers) else {})}
+
super().__init__(
version=__version__,
base_url=base_url,
@@ -298,6 +336,12 @@ def uploads(self) -> Uploads:
return Uploads(self)
+ @cached_property
+ def admin(self) -> Admin:
+ from .resources.admin import Admin
+
+ return Admin(self)
+
@cached_property
def responses(self) -> Responses:
from .resources.responses import Responses
@@ -355,15 +399,6 @@ def with_streaming_response(self) -> OpenAIWithStreamedResponse:
def qs(self) -> Querystring:
return Querystring(array_format="brackets")
- def _refresh_api_key(self) -> None:
- if self._api_key_provider:
- self.api_key = self._api_key_provider()
-
- @override
- def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
- self._refresh_api_key()
- return super()._prepare_options(options)
-
def _send_with_auth_retry(
self,
request: httpx.Request,
@@ -372,19 +407,24 @@ def _send_with_auth_retry(
retried: bool = False,
**kwargs: Unpack[HttpxSendArgs],
) -> httpx.Response:
- if self._workload_identity_auth:
- request.headers["Authorization"] = f"Bearer {self._workload_identity_auth.get_token()}"
+ used_workload_identity_auth = False
- response = super()._send_request(request, stream=stream, **kwargs)
+ if self._workload_identity_auth is not None:
+ authorization = request.headers.get("Authorization")
+ if authorization == f"Bearer {WORKLOAD_IDENTITY_API_KEY_PLACEHOLDER}":
+ request.headers["Authorization"] = f"Bearer {self._workload_identity_auth.get_token()}"
+ used_workload_identity_auth = True
- if not retried and response.status_code == 401 and self._workload_identity_auth:
+ response = super()._send_request(request, stream=stream, **kwargs)
+ if (
+ response.status_code == 401
+ and self._workload_identity_auth is not None
+ and used_workload_identity_auth
+ and not retried
+ ):
response.close()
-
self._workload_identity_auth.invalidate_token()
- fresh_token = self._workload_identity_auth.get_token()
-
- request.headers["Authorization"] = f"Bearer {fresh_token}"
-
+ request.headers["Authorization"] = f"Bearer {self._workload_identity_auth.get_token()}"
return self._send_with_auth_retry(request, stream=stream, retried=True, **kwargs)
return response
@@ -399,15 +439,40 @@ def _send_request(
) -> httpx.Response:
return self._send_with_auth_retry(request, stream=stream, **kwargs)
+ @override
+ def _auth_headers(self, security: SecurityOptions) -> dict[str, str]:
+ if security.get("bearer_auth", False):
+ headers = self._bearer_auth
+ if headers:
+ return headers
+
+ if security.get("admin_api_key_auth", False):
+ return self._admin_api_key_auth
+
+ return {}
+
+ @property
+ def _bearer_auth(self) -> dict[str, str]:
+ api_key = self.api_key
+ if not api_key:
+ return {}
+ return {"Authorization": f"Bearer {api_key}"}
+
@property
@override
def auth_headers(self) -> dict[str, str]:
api_key = self.api_key
if not api_key or api_key == WORKLOAD_IDENTITY_API_KEY_PLACEHOLDER:
- # if the api key is an empty string, encoding the header will fail
return {}
return {"Authorization": f"Bearer {api_key}"}
+ @property
+ def _admin_api_key_auth(self) -> dict[str, str]:
+ admin_api_key = self.admin_api_key
+ if admin_api_key is None:
+ return {}
+ return {"Authorization": f"Bearer {admin_api_key}"}
+
@property
@override
def default_headers(self) -> dict[str, str | Omit]:
@@ -419,10 +484,33 @@ def default_headers(self) -> dict[str, str | Omit]:
**self._custom_headers,
}
+ @override
+ def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
+ if _has_header(headers, "Authorization") or _has_omitted_header(custom_headers, "Authorization"):
+ return
+
+ raise TypeError(
+ '"Could not resolve authentication method. Expected either api_key or admin_api_key to be set. Or for one of the `Authorization` or `Authorization` headers to be explicitly omitted"'
+ )
+
+ @override
+ def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
+ if self._api_key_provider is not None and options.security.get("bearer_auth", False):
+ self._refresh_api_key()
+
+ return super()._prepare_options(options)
+
+ def _refresh_api_key(self) -> str:
+ if self._api_key_provider is not None:
+ self.api_key = self._api_key_provider()
+
+ return self.api_key
+
def copy(
self,
*,
api_key: str | Callable[[], str] | None = None,
+ admin_api_key: str | None = None,
workload_identity: WorkloadIdentity | None = None,
organization: str | None = None,
project: str | None = None,
@@ -436,6 +524,7 @@ def copy(
set_default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
set_default_query: Mapping[str, object] | None = None,
+ _enforce_credentials: bool | None = None,
_extra_kwargs: Mapping[str, Any] = {},
) -> Self:
"""
@@ -463,6 +552,7 @@ def copy(
return self.__class__(
api_key=api_key or self._api_key_provider or self.api_key,
+ admin_api_key=admin_api_key or self.admin_api_key,
workload_identity=workload_identity or self.workload_identity,
organization=organization or self.organization,
project=project or self.project,
@@ -474,6 +564,7 @@ def copy(
max_retries=max_retries if is_given(max_retries) else self.max_retries,
default_headers=headers,
default_query=params,
+ _enforce_credentials=True if _enforce_credentials is None else _enforce_credentials,
**_extra_kwargs,
)
@@ -519,6 +610,7 @@ def _make_status_error(
class AsyncOpenAI(AsyncAPIClient):
# client options
api_key: str
+ admin_api_key: str | None
workload_identity: WorkloadIdentity | None
organization: str | None
project: str | None
@@ -536,7 +628,8 @@ class AsyncOpenAI(AsyncAPIClient):
def __init__(
self,
*,
- api_key: str | None | Callable[[], Awaitable[str]] = None,
+ api_key: str | Callable[[], Awaitable[str]] | None = None,
+ admin_api_key: str | None = None,
workload_identity: WorkloadIdentity | None = None,
organization: str | None = None,
project: str | None = None,
@@ -560,11 +653,13 @@ def __init__(
# outlining your use-case to help us decide if it should be
# part of our public interface in the future.
_strict_response_validation: bool = False,
+ _enforce_credentials: bool = True,
) -> None:
"""Construct a new async AsyncOpenAI client instance.
This automatically infers the following arguments from their corresponding environment variables if they are not provided:
- `api_key` from `OPENAI_API_KEY`
+ - `admin_api_key` from `OPENAI_ADMIN_KEY`
- `organization` from `OPENAI_ORG_ID`
- `project` from `OPENAI_PROJECT_ID`
- `webhook_secret` from `OPENAI_WEBHOOK_SECRET`
@@ -583,18 +678,29 @@ def __init__(
else:
if api_key is None:
api_key = os.environ.get("OPENAI_API_KEY")
- if api_key is None:
- raise OpenAIError(
- "The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable"
- )
if callable(api_key):
self.api_key = ""
self._api_key_provider: Callable[[], Awaitable[str]] | None = api_key # type: ignore[no-redef]
else:
- self.api_key = api_key
+ self.api_key = api_key or ""
self._api_key_provider = None
self._workload_identity_auth = None
+ if admin_api_key is None:
+ admin_api_key = os.environ.get("OPENAI_ADMIN_KEY")
+ self.admin_api_key = admin_api_key
+
+ if (
+ _enforce_credentials
+ and not self.api_key
+ and self._api_key_provider is None
+ and workload_identity is None
+ and self.admin_api_key is None
+ ):
+ raise OpenAIError(
+ "Missing credentials. Please pass an `api_key`, `workload_identity`, `admin_api_key`, or set the `OPENAI_API_KEY` or `OPENAI_ADMIN_KEY` environment variable."
+ )
+
if organization is None:
organization = os.environ.get("OPENAI_ORG_ID")
self.organization = organization
@@ -614,6 +720,15 @@ def __init__(
if base_url is None:
base_url = f"https://api.openai.com/v1"
+ custom_headers_env = os.environ.get("OPENAI_CUSTOM_HEADERS")
+ if custom_headers_env is not None:
+ parsed: dict[str, str] = {}
+ for line in custom_headers_env.split("\n"):
+ colon = line.find(":")
+ if colon >= 0:
+ parsed[line[:colon].strip()] = line[colon + 1 :].strip()
+ default_headers = {**parsed, **(default_headers if is_mapping_t(default_headers) else {})}
+
super().__init__(
version=__version__,
base_url=base_url,
@@ -727,6 +842,12 @@ def uploads(self) -> AsyncUploads:
return AsyncUploads(self)
+ @cached_property
+ def admin(self) -> AsyncAdmin:
+ from .resources.admin import AsyncAdmin
+
+ return AsyncAdmin(self)
+
@cached_property
def responses(self) -> AsyncResponses:
from .resources.responses import AsyncResponses
@@ -784,15 +905,6 @@ def with_streaming_response(self) -> AsyncOpenAIWithStreamedResponse:
def qs(self) -> Querystring:
return Querystring(array_format="brackets")
- async def _refresh_api_key(self) -> None:
- if self._api_key_provider:
- self.api_key = await self._api_key_provider()
-
- @override
- async def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
- await self._refresh_api_key()
- return await super()._prepare_options(options)
-
async def _send_with_auth_retry(
self,
request: httpx.Request,
@@ -801,19 +913,24 @@ async def _send_with_auth_retry(
retried: bool = False,
**kwargs: Unpack[HttpxSendArgs],
) -> httpx.Response:
- if self._workload_identity_auth:
- request.headers["Authorization"] = f"Bearer {await self._workload_identity_auth.get_token_async()}"
+ used_workload_identity_auth = False
- response = await super()._send_request(request, stream=stream, **kwargs)
+ if self._workload_identity_auth is not None:
+ authorization = request.headers.get("Authorization")
+ if authorization == f"Bearer {WORKLOAD_IDENTITY_API_KEY_PLACEHOLDER}":
+ request.headers["Authorization"] = f"Bearer {await self._workload_identity_auth.get_token_async()}"
+ used_workload_identity_auth = True
- if not retried and response.status_code == 401 and self._workload_identity_auth:
+ response = await super()._send_request(request, stream=stream, **kwargs)
+ if (
+ response.status_code == 401
+ and self._workload_identity_auth is not None
+ and used_workload_identity_auth
+ and not retried
+ ):
await response.aclose()
-
self._workload_identity_auth.invalidate_token()
- fresh_token = await self._workload_identity_auth.get_token_async()
-
- request.headers["Authorization"] = f"Bearer {fresh_token}"
-
+ request.headers["Authorization"] = f"Bearer {await self._workload_identity_auth.get_token_async()}"
return await self._send_with_auth_retry(request, stream=stream, retried=True, **kwargs)
return response
@@ -828,15 +945,40 @@ async def _send_request(
) -> httpx.Response:
return await self._send_with_auth_retry(request, stream=stream, **kwargs)
+ @override
+ def _auth_headers(self, security: SecurityOptions) -> dict[str, str]:
+ if security.get("bearer_auth", False):
+ headers = self._bearer_auth
+ if headers:
+ return headers
+
+ if security.get("admin_api_key_auth", False):
+ return self._admin_api_key_auth
+
+ return {}
+
+ @property
+ def _bearer_auth(self) -> dict[str, str]:
+ api_key = self.api_key
+ if not api_key:
+ return {}
+ return {"Authorization": f"Bearer {api_key}"}
+
@property
@override
def auth_headers(self) -> dict[str, str]:
api_key = self.api_key
if not api_key or api_key == WORKLOAD_IDENTITY_API_KEY_PLACEHOLDER:
- # if the api key is an empty string, encoding the header will fail
return {}
return {"Authorization": f"Bearer {api_key}"}
+ @property
+ def _admin_api_key_auth(self) -> dict[str, str]:
+ admin_api_key = self.admin_api_key
+ if admin_api_key is None:
+ return {}
+ return {"Authorization": f"Bearer {admin_api_key}"}
+
@property
@override
def default_headers(self) -> dict[str, str | Omit]:
@@ -848,10 +990,33 @@ def default_headers(self) -> dict[str, str | Omit]:
**self._custom_headers,
}
+ @override
+ def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
+ if _has_header(headers, "Authorization") or _has_omitted_header(custom_headers, "Authorization"):
+ return
+
+ raise TypeError(
+ '"Could not resolve authentication method. Expected either api_key or admin_api_key to be set. Or for one of the `Authorization` or `Authorization` headers to be explicitly omitted"'
+ )
+
+ @override
+ async def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
+ if self._api_key_provider is not None and options.security.get("bearer_auth", False):
+ await self._refresh_api_key()
+
+ return await super()._prepare_options(options)
+
+ async def _refresh_api_key(self) -> str:
+ if self._api_key_provider is not None:
+ self.api_key = await self._api_key_provider()
+
+ return self.api_key
+
def copy(
self,
*,
api_key: str | Callable[[], Awaitable[str]] | None = None,
+ admin_api_key: str | None = None,
workload_identity: WorkloadIdentity | None = None,
organization: str | None = None,
project: str | None = None,
@@ -865,6 +1030,7 @@ def copy(
set_default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
set_default_query: Mapping[str, object] | None = None,
+ _enforce_credentials: bool | None = None,
_extra_kwargs: Mapping[str, Any] = {},
) -> Self:
"""
@@ -891,6 +1057,7 @@ def copy(
http_client = http_client or self._client
return self.__class__(
api_key=api_key or self._api_key_provider or self.api_key,
+ admin_api_key=admin_api_key or self.admin_api_key,
workload_identity=workload_identity or self.workload_identity,
organization=organization or self.organization,
project=project or self.project,
@@ -902,6 +1069,7 @@ def copy(
max_retries=max_retries if is_given(max_retries) else self.max_retries,
default_headers=headers,
default_query=params,
+ _enforce_credentials=True if _enforce_credentials is None else _enforce_credentials,
**_extra_kwargs,
)
@@ -1044,6 +1212,12 @@ def uploads(self) -> uploads.UploadsWithRawResponse:
return UploadsWithRawResponse(self._client.uploads)
+ @cached_property
+ def admin(self) -> admin.AdminWithRawResponse:
+ from .resources.admin import AdminWithRawResponse
+
+ return AdminWithRawResponse(self._client.admin)
+
@cached_property
def responses(self) -> responses.ResponsesWithRawResponse:
from .resources.responses import ResponsesWithRawResponse
@@ -1189,6 +1363,12 @@ def uploads(self) -> uploads.AsyncUploadsWithRawResponse:
return AsyncUploadsWithRawResponse(self._client.uploads)
+ @cached_property
+ def admin(self) -> admin.AsyncAdminWithRawResponse:
+ from .resources.admin import AsyncAdminWithRawResponse
+
+ return AsyncAdminWithRawResponse(self._client.admin)
+
@cached_property
def responses(self) -> responses.AsyncResponsesWithRawResponse:
from .resources.responses import AsyncResponsesWithRawResponse
@@ -1334,6 +1514,12 @@ def uploads(self) -> uploads.UploadsWithStreamingResponse:
return UploadsWithStreamingResponse(self._client.uploads)
+ @cached_property
+ def admin(self) -> admin.AdminWithStreamingResponse:
+ from .resources.admin import AdminWithStreamingResponse
+
+ return AdminWithStreamingResponse(self._client.admin)
+
@cached_property
def responses(self) -> responses.ResponsesWithStreamingResponse:
from .resources.responses import ResponsesWithStreamingResponse
@@ -1479,6 +1665,12 @@ def uploads(self) -> uploads.AsyncUploadsWithStreamingResponse:
return AsyncUploadsWithStreamingResponse(self._client.uploads)
+ @cached_property
+ def admin(self) -> admin.AsyncAdminWithStreamingResponse:
+ from .resources.admin import AsyncAdminWithStreamingResponse
+
+ return AsyncAdminWithStreamingResponse(self._client.admin)
+
@cached_property
def responses(self) -> responses.AsyncResponsesWithStreamingResponse:
from .resources.responses import AsyncResponsesWithStreamingResponse
diff --git a/src/openai/_files.py b/src/openai/_files.py
index 7b23ca084a..4cc4f35d8f 100644
--- a/src/openai/_files.py
+++ b/src/openai/_files.py
@@ -3,8 +3,8 @@
import io
import os
import pathlib
-from typing import overload
-from typing_extensions import TypeGuard
+from typing import Sequence, cast, overload
+from typing_extensions import TypeVar, TypeGuard
import anyio
@@ -17,7 +17,9 @@
HttpxFileContent,
HttpxRequestFiles,
)
-from ._utils import is_tuple_t, is_mapping_t, is_sequence_t
+from ._utils import is_list, is_mapping, is_tuple_t, is_mapping_t, is_sequence_t
+
+_T = TypeVar("_T")
def is_base64_file_input(obj: object) -> TypeGuard[Base64FileInput]:
@@ -121,3 +123,51 @@ async def async_read_file_content(file: FileContent) -> HttpxFileContent:
return await anyio.Path(file).read_bytes()
return file
+
+
+def deepcopy_with_paths(item: _T, paths: Sequence[Sequence[str]]) -> _T:
+ """Copy only the containers along the given paths.
+
+ Used to guard against mutation by extract_files without copying the entire structure.
+ Only dicts and lists that lie on a path are copied; everything else
+ is returned by reference.
+
+ For example, given paths=[["foo", "files", "file"]] and the structure:
+ {
+ "foo": {
+ "bar": {"baz": {}},
+ "files": {"file": }
+ }
+ }
+ The root dict, "foo", and "files" are copied (they lie on the path).
+ "bar" and "baz" are returned by reference (off the path).
+ """
+ return _deepcopy_with_paths(item, paths, 0)
+
+
+def _deepcopy_with_paths(item: _T, paths: Sequence[Sequence[str]], index: int) -> _T:
+ if not paths:
+ return item
+ if is_mapping(item):
+ key_to_paths: dict[str, list[Sequence[str]]] = {}
+ for path in paths:
+ if index < len(path):
+ key_to_paths.setdefault(path[index], []).append(path)
+
+ # if no path continues through this mapping, it won't be mutated and copying it is redundant
+ if not key_to_paths:
+ return item
+
+ result = dict(item)
+ for key, subpaths in key_to_paths.items():
+ if key in result:
+ result[key] = _deepcopy_with_paths(result[key], subpaths, index + 1)
+ return cast(_T, result)
+ if is_list(item):
+ array_paths = [path for path in paths if index < len(path) and path[index] == ""]
+
+ # if no path expects a list here, nothing will be mutated inside it - return by reference
+ if not array_paths:
+ return cast(_T, item)
+ return cast(_T, [_deepcopy_with_paths(entry, array_paths, index + 1) for entry in item])
+ return item
diff --git a/src/openai/_models.py b/src/openai/_models.py
index 810e49dfc5..5f12232437 100644
--- a/src/openai/_models.py
+++ b/src/openai/_models.py
@@ -832,6 +832,11 @@ def _create_pydantic_model(type_: _T) -> Type[RootModel[_T]]:
return RootModel[type_] # type: ignore
+class SecurityOptions(TypedDict, total=False):
+ bearer_auth: bool
+ admin_api_key_auth: bool
+
+
class FinalRequestOptionsInput(TypedDict, total=False):
method: Required[str]
url: Required[str]
@@ -845,6 +850,7 @@ class FinalRequestOptionsInput(TypedDict, total=False):
json_data: Body
extra_json: AnyMapping
follow_redirects: bool
+ security: SecurityOptions
synthesize_event_and_data: bool
@@ -860,6 +866,10 @@ class FinalRequestOptions(pydantic.BaseModel):
idempotency_key: Union[str, None] = None
post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven()
follow_redirects: Union[bool, None] = None
+ security: SecurityOptions = {
+ "bearer_auth": True,
+ "admin_api_key_auth": True,
+ }
synthesize_event_and_data: Optional[bool] = None
content: Union[bytes, bytearray, IO[bytes], Iterable[bytes], AsyncIterable[bytes], None] = None
diff --git a/src/openai/_module_client.py b/src/openai/_module_client.py
index 98901c0446..3554e11e50 100644
--- a/src/openai/_module_client.py
+++ b/src/openai/_module_client.py
@@ -14,6 +14,7 @@
from .resources.beta.beta import Beta
from .resources.chat.chat import Chat
from .resources.embeddings import Embeddings
+ from .resources.admin.admin import Admin
from .resources.audio.audio import Audio
from .resources.completions import Completions
from .resources.evals.evals import Evals
@@ -56,6 +57,12 @@ def __load__(self) -> Audio:
return _load_client().audio
+class AdminProxy(LazyProxy["Admin"]):
+ @override
+ def __load__(self) -> Admin:
+ return _load_client().admin
+
+
class EvalsProxy(LazyProxy["Evals"]):
@override
def __load__(self) -> Evals:
@@ -162,6 +169,7 @@ def __load__(self) -> Conversations:
beta: Beta = BetaProxy().__as_proxied__()
files: Files = FilesProxy().__as_proxied__()
audio: Audio = AudioProxy().__as_proxied__()
+admin: Admin = AdminProxy().__as_proxied__()
evals: Evals = EvalsProxy().__as_proxied__()
images: Images = ImagesProxy().__as_proxied__()
models: Models = ModelsProxy().__as_proxied__()
diff --git a/src/openai/_qs.py b/src/openai/_qs.py
index de8c99bc63..4127c19c62 100644
--- a/src/openai/_qs.py
+++ b/src/openai/_qs.py
@@ -2,17 +2,13 @@
from typing import Any, List, Tuple, Union, Mapping, TypeVar
from urllib.parse import parse_qs, urlencode
-from typing_extensions import Literal, get_args
+from typing_extensions import get_args
-from ._types import NotGiven, not_given
+from ._types import NotGiven, ArrayFormat, NestedFormat, not_given
from ._utils import flatten
_T = TypeVar("_T")
-
-ArrayFormat = Literal["comma", "repeat", "indices", "brackets"]
-NestedFormat = Literal["dots", "brackets"]
-
PrimitiveData = Union[str, int, float, bool, None]
# this should be Data = Union[PrimitiveData, "List[Data]", "Tuple[Data]", "Mapping[str, Data]"]
# https://github.com/microsoft/pyright/issues/3555
diff --git a/src/openai/_types.py b/src/openai/_types.py
index c55c6f808d..9936b00f73 100644
--- a/src/openai/_types.py
+++ b/src/openai/_types.py
@@ -36,7 +36,7 @@
from httpx import URL, Proxy, Timeout, Response, BaseTransport, AsyncBaseTransport
if TYPE_CHECKING:
- from ._models import BaseModel
+ from ._models import BaseModel, SecurityOptions
from ._response import APIResponse, AsyncAPIResponse
from ._legacy_response import HttpxBinaryResponseContent
@@ -48,6 +48,9 @@
ModelT = TypeVar("ModelT", bound=pydantic.BaseModel)
_T = TypeVar("_T")
+ArrayFormat = Literal["comma", "repeat", "indices", "brackets"]
+NestedFormat = Literal["dots", "brackets"]
+
# Approximates httpx internal ProxiesTypes and RequestFiles types
# while adding support for `PathLike` instances
@@ -122,6 +125,7 @@ class RequestOptions(TypedDict, total=False):
extra_json: AnyMapping
idempotency_key: str
follow_redirects: bool
+ security: SecurityOptions
synthesize_event_and_data: bool
diff --git a/src/openai/_utils/__init__.py b/src/openai/_utils/__init__.py
index 52853aaf03..bbd79691fa 100644
--- a/src/openai/_utils/__init__.py
+++ b/src/openai/_utils/__init__.py
@@ -26,7 +26,6 @@
file_from_path as file_from_path,
is_azure_client as is_azure_client,
strip_not_given as strip_not_given,
- deepcopy_minimal as deepcopy_minimal,
get_async_library as get_async_library,
maybe_coerce_float as maybe_coerce_float,
get_required_header as get_required_header,
diff --git a/src/openai/_utils/_utils.py b/src/openai/_utils/_utils.py
index b1e8e0d041..9f7401ca83 100644
--- a/src/openai/_utils/_utils.py
+++ b/src/openai/_utils/_utils.py
@@ -18,11 +18,11 @@
)
from pathlib import Path
from datetime import date, datetime
-from typing_extensions import TypeGuard
+from typing_extensions import TypeGuard, get_args
import sniffio
-from .._types import Omit, NotGiven, FileTypes, HeadersLike
+from .._types import Omit, NotGiven, FileTypes, ArrayFormat, HeadersLike
_T = TypeVar("_T")
_TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
@@ -44,25 +44,45 @@ def extract_files(
query: Mapping[str, object],
*,
paths: Sequence[Sequence[str]],
+ array_format: ArrayFormat = "brackets",
) -> list[tuple[str, FileTypes]]:
"""Recursively extract files from the given dictionary based on specified paths.
A path may look like this ['foo', 'files', '', 'data'].
+ ``array_format`` controls how ```` segments contribute to the emitted
+ field name. Supported values: ``"brackets"`` (``foo[]``), ``"repeat"`` and
+ ``"comma"`` (``foo``), ``"indices"`` (``foo[0]``, ``foo[1]``).
+
Note: this mutates the given dictionary.
"""
files: list[tuple[str, FileTypes]] = []
for path in paths:
- files.extend(_extract_items(query, path, index=0, flattened_key=None))
+ files.extend(_extract_items(query, path, index=0, flattened_key=None, array_format=array_format))
return files
+def _array_suffix(array_format: ArrayFormat, array_index: int) -> str:
+ if array_format == "brackets":
+ return "[]"
+ if array_format == "indices":
+ return f"[{array_index}]"
+ if array_format == "repeat" or array_format == "comma":
+ # Both repeat the bare field name for each file part; there is no
+ # meaningful way to comma-join binary parts.
+ return ""
+ raise NotImplementedError(
+ f"Unknown array_format value: {array_format}, choose from {', '.join(get_args(ArrayFormat))}"
+ )
+
+
def _extract_items(
obj: object,
path: Sequence[str],
*,
index: int,
flattened_key: str | None,
+ array_format: ArrayFormat,
) -> list[tuple[str, FileTypes]]:
try:
key = path[index]
@@ -79,9 +99,11 @@ def _extract_items(
if is_list(obj):
files: list[tuple[str, FileTypes]] = []
- for entry in obj:
- assert_is_file_content(entry, key=flattened_key + "[]" if flattened_key else "")
- files.append((flattened_key + "[]", cast(FileTypes, entry)))
+ for array_index, entry in enumerate(obj):
+ suffix = _array_suffix(array_format, array_index)
+ emitted_key = (flattened_key + suffix) if flattened_key else suffix
+ assert_is_file_content(entry, key=emitted_key)
+ files.append((emitted_key, cast(FileTypes, entry)))
return files
assert_is_file_content(obj, key=flattened_key)
@@ -110,6 +132,7 @@ def _extract_items(
path,
index=index,
flattened_key=flattened_key,
+ array_format=array_format,
)
elif is_list(obj):
if key != "":
@@ -121,9 +144,12 @@ def _extract_items(
item,
path,
index=index,
- flattened_key=flattened_key + "[]" if flattened_key is not None else "[]",
+ flattened_key=(
+ (flattened_key if flattened_key is not None else "") + _array_suffix(array_format, array_index)
+ ),
+ array_format=array_format,
)
- for item in obj
+ for array_index, item in enumerate(obj)
]
)
@@ -181,21 +207,6 @@ def is_iterable(obj: object) -> TypeGuard[Iterable[object]]:
return isinstance(obj, Iterable)
-def deepcopy_minimal(item: _T) -> _T:
- """Minimal reimplementation of copy.deepcopy() that will only copy certain object types:
-
- - mappings, e.g. `dict`
- - list
-
- This is done for performance reasons.
- """
- if is_mapping(item):
- return cast(_T, {k: deepcopy_minimal(v) for k, v in item.items()})
- if is_list(item):
- return cast(_T, [deepcopy_minimal(entry) for entry in item])
- return item
-
-
# copied from https://github.com/Rapptz/RoboDanny
def human_join(seq: Sequence[str], *, delim: str = ", ", final: str = "or") -> str:
size = len(seq)
diff --git a/src/openai/_version.py b/src/openai/_version.py
index b73f7aa7bd..857aeb7dff 100644
--- a/src/openai/_version.py
+++ b/src/openai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "openai"
-__version__ = "2.33.0" # x-release-please-version
+__version__ = "2.34.0" # x-release-please-version
diff --git a/src/openai/lib/azure.py b/src/openai/lib/azure.py
index 09fdd9507e..4fcae24788 100644
--- a/src/openai/lib/azure.py
+++ b/src/openai/lib/azure.py
@@ -8,11 +8,11 @@
import httpx
from ..auth import WorkloadIdentity
-from .._types import NOT_GIVEN, Omit, Query, Timeout, NotGiven
+from .._types import NOT_GIVEN, Omit, Query, Headers, Timeout, NotGiven
from .._utils import is_given, is_mapping
from .._client import OpenAI, AsyncOpenAI
from .._compat import model_copy
-from .._models import FinalRequestOptions
+from .._models import SecurityOptions, FinalRequestOptions
from .._streaming import Stream, AsyncStream
from .._exceptions import OpenAIError
from .._base_client import DEFAULT_MAX_RETRIES, BaseClient
@@ -43,6 +43,15 @@
API_KEY_SENTINEL = "".join(["<", "missing API key", ">"])
+def _has_header(headers: Headers, header: str) -> bool:
+ header = header.lower()
+ return any(key.lower() == header for key in headers)
+
+
+def _has_auth_header(headers: Headers) -> bool:
+ return _has_header(headers, "Authorization") or _has_header(headers, "api-key")
+
+
class MutuallyExclusiveAuthError(OpenAIError):
def __init__(self) -> None:
super().__init__(
@@ -96,6 +105,7 @@ def __init__(
azure_deployment: str | None = None,
api_version: str | None = None,
api_key: str | Callable[[], str] | None = None,
+ admin_api_key: str | None = None,
azure_ad_token: str | None = None,
azure_ad_token_provider: AzureADTokenProvider | None = None,
organization: str | None = None,
@@ -107,6 +117,7 @@ def __init__(
default_query: Mapping[str, object] | None = None,
http_client: httpx.Client | None = None,
_strict_response_validation: bool = False,
+ _enforce_credentials: bool = True,
) -> None: ...
@overload
@@ -116,6 +127,7 @@ def __init__(
azure_deployment: str | None = None,
api_version: str | None = None,
api_key: str | Callable[[], str] | None = None,
+ admin_api_key: str | None = None,
azure_ad_token: str | None = None,
azure_ad_token_provider: AzureADTokenProvider | None = None,
organization: str | None = None,
@@ -127,6 +139,7 @@ def __init__(
default_query: Mapping[str, object] | None = None,
http_client: httpx.Client | None = None,
_strict_response_validation: bool = False,
+ _enforce_credentials: bool = True,
) -> None: ...
@overload
@@ -136,6 +149,7 @@ def __init__(
base_url: str,
api_version: str | None = None,
api_key: str | Callable[[], str] | None = None,
+ admin_api_key: str | None = None,
azure_ad_token: str | None = None,
azure_ad_token_provider: AzureADTokenProvider | None = None,
organization: str | None = None,
@@ -147,6 +161,7 @@ def __init__(
default_query: Mapping[str, object] | None = None,
http_client: httpx.Client | None = None,
_strict_response_validation: bool = False,
+ _enforce_credentials: bool = True,
) -> None: ...
def __init__(
@@ -156,6 +171,7 @@ def __init__(
azure_endpoint: str | None = None,
azure_deployment: str | None = None,
api_key: str | Callable[[], str] | None = None,
+ admin_api_key: str | None = None,
# workload_identity is not functional in the Azure client
workload_identity: WorkloadIdentity | None = None, # noqa: ARG002
azure_ad_token: str | None = None,
@@ -171,6 +187,7 @@ def __init__(
default_query: Mapping[str, object] | None = None,
http_client: httpx.Client | None = None,
_strict_response_validation: bool = False,
+ _enforce_credentials: bool = True,
) -> None:
"""Construct a new synchronous azure openai client instance.
@@ -198,7 +215,7 @@ def __init__(
if azure_ad_token is None:
azure_ad_token = os.environ.get("AZURE_OPENAI_AD_TOKEN")
- if api_key is None and azure_ad_token is None and azure_ad_token_provider is None:
+ if _enforce_credentials and api_key is None and azure_ad_token is None and azure_ad_token_provider is None:
raise OpenAIError(
"Missing credentials. Please pass one of `api_key`, `azure_ad_token`, `azure_ad_token_provider`, or the `AZURE_OPENAI_API_KEY` or `AZURE_OPENAI_AD_TOKEN` environment variables."
)
@@ -239,6 +256,7 @@ def __init__(
super().__init__(
api_key=api_key,
+ admin_api_key=admin_api_key,
organization=organization,
project=project,
webhook_secret=webhook_secret,
@@ -250,6 +268,7 @@ def __init__(
http_client=http_client,
websocket_base_url=websocket_base_url,
_strict_response_validation=_strict_response_validation,
+ _enforce_credentials=_enforce_credentials,
)
self._api_version = api_version
self._azure_ad_token = azure_ad_token
@@ -262,6 +281,7 @@ def copy(
self,
*,
api_key: str | Callable[[], str] | None = None,
+ admin_api_key: str | None = None,
workload_identity: WorkloadIdentity | None = None,
organization: str | None = None,
project: str | None = None,
@@ -278,6 +298,7 @@ def copy(
set_default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
set_default_query: Mapping[str, object] | None = None,
+ _enforce_credentials: bool | None = None,
_extra_kwargs: Mapping[str, Any] = {},
) -> Self:
"""
@@ -285,6 +306,7 @@ def copy(
"""
return super().copy(
api_key=api_key,
+ admin_api_key=admin_api_key,
workload_identity=workload_identity,
organization=organization,
project=project,
@@ -298,6 +320,7 @@ def copy(
set_default_headers=set_default_headers,
default_query=default_query,
set_default_query=set_default_query,
+ _enforce_credentials=_enforce_credentials,
_extra_kwargs={
"api_version": api_version or self._api_version,
"azure_ad_token": azure_ad_token or self._azure_ad_token,
@@ -323,6 +346,25 @@ def _get_azure_ad_token(self) -> str | None:
return None
+ @override
+ def _auth_headers(self, security: SecurityOptions) -> dict[str, str]: # noqa: ARG002
+ if self._azure_ad_token is not None:
+ return {"Authorization": f"Bearer {self._azure_ad_token}"}
+
+ if self.api_key and self.api_key != API_KEY_SENTINEL:
+ return {"api-key": self.api_key}
+
+ return {}
+
+ @override
+ def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
+ if _has_auth_header(headers) or _has_auth_header(custom_headers):
+ return
+
+ raise TypeError(
+ '"Could not resolve authentication method. Expected either api_key, azure_ad_token or azure_ad_token_provider to be set. Or for one of the `Authorization` or `api-key` headers to be explicitly supplied or omitted"'
+ )
+
@override
def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
headers: dict[str, str | Omit] = {**options.headers} if is_given(options.headers) else {}
@@ -332,11 +374,13 @@ def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
azure_ad_token = self._get_azure_ad_token()
if azure_ad_token is not None:
- if headers.get("Authorization") is None:
+ if not _has_header(headers, "Authorization"):
headers["Authorization"] = f"Bearer {azure_ad_token}"
- elif self.api_key is not API_KEY_SENTINEL:
- if headers.get("api-key") is None:
+ elif self.api_key and self.api_key != API_KEY_SENTINEL:
+ if not _has_header(headers, "api-key"):
headers["api-key"] = self.api_key
+ elif _has_auth_header(headers) or _has_auth_header(self.default_headers):
+ pass
else:
# should never be hit
raise ValueError("Unable to handle auth")
@@ -378,6 +422,7 @@ def __init__(
azure_deployment: str | None = None,
api_version: str | None = None,
api_key: str | Callable[[], Awaitable[str]] | None = None,
+ admin_api_key: str | None = None,
azure_ad_token: str | None = None,
azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
organization: str | None = None,
@@ -390,6 +435,7 @@ def __init__(
default_query: Mapping[str, object] | None = None,
http_client: httpx.AsyncClient | None = None,
_strict_response_validation: bool = False,
+ _enforce_credentials: bool = True,
) -> None: ...
@overload
@@ -399,6 +445,7 @@ def __init__(
azure_deployment: str | None = None,
api_version: str | None = None,
api_key: str | Callable[[], Awaitable[str]] | None = None,
+ admin_api_key: str | None = None,
azure_ad_token: str | None = None,
azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
organization: str | None = None,
@@ -411,6 +458,7 @@ def __init__(
default_query: Mapping[str, object] | None = None,
http_client: httpx.AsyncClient | None = None,
_strict_response_validation: bool = False,
+ _enforce_credentials: bool = True,
) -> None: ...
@overload
@@ -420,6 +468,7 @@ def __init__(
base_url: str,
api_version: str | None = None,
api_key: str | Callable[[], Awaitable[str]] | None = None,
+ admin_api_key: str | None = None,
azure_ad_token: str | None = None,
azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
organization: str | None = None,
@@ -432,6 +481,7 @@ def __init__(
default_query: Mapping[str, object] | None = None,
http_client: httpx.AsyncClient | None = None,
_strict_response_validation: bool = False,
+ _enforce_credentials: bool = True,
) -> None: ...
def __init__(
@@ -441,6 +491,7 @@ def __init__(
azure_deployment: str | None = None,
api_version: str | None = None,
api_key: str | Callable[[], Awaitable[str]] | None = None,
+ admin_api_key: str | None = None,
# workload_identity is not functional in the Azure client
workload_identity: WorkloadIdentity | None = None, # noqa: ARG002
azure_ad_token: str | None = None,
@@ -456,6 +507,7 @@ def __init__(
default_query: Mapping[str, object] | None = None,
http_client: httpx.AsyncClient | None = None,
_strict_response_validation: bool = False,
+ _enforce_credentials: bool = True,
) -> None:
"""Construct a new asynchronous azure openai client instance.
@@ -483,7 +535,7 @@ def __init__(
if azure_ad_token is None:
azure_ad_token = os.environ.get("AZURE_OPENAI_AD_TOKEN")
- if api_key is None and azure_ad_token is None and azure_ad_token_provider is None:
+ if _enforce_credentials and api_key is None and azure_ad_token is None and azure_ad_token_provider is None:
raise OpenAIError(
"Missing credentials. Please pass one of `api_key`, `azure_ad_token`, `azure_ad_token_provider`, or the `AZURE_OPENAI_API_KEY` or `AZURE_OPENAI_AD_TOKEN` environment variables."
)
@@ -524,6 +576,7 @@ def __init__(
super().__init__(
api_key=api_key,
+ admin_api_key=admin_api_key,
organization=organization,
project=project,
webhook_secret=webhook_secret,
@@ -535,6 +588,7 @@ def __init__(
http_client=http_client,
websocket_base_url=websocket_base_url,
_strict_response_validation=_strict_response_validation,
+ _enforce_credentials=_enforce_credentials,
)
self._api_version = api_version
self._azure_ad_token = azure_ad_token
@@ -547,6 +601,7 @@ def copy(
self,
*,
api_key: str | Callable[[], Awaitable[str]] | None = None,
+ admin_api_key: str | None = None,
workload_identity: WorkloadIdentity | None = None,
organization: str | None = None,
project: str | None = None,
@@ -563,6 +618,7 @@ def copy(
set_default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
set_default_query: Mapping[str, object] | None = None,
+ _enforce_credentials: bool | None = None,
_extra_kwargs: Mapping[str, Any] = {},
) -> Self:
"""
@@ -570,6 +626,7 @@ def copy(
"""
return super().copy(
api_key=api_key,
+ admin_api_key=admin_api_key,
workload_identity=workload_identity,
organization=organization,
project=project,
@@ -583,6 +640,7 @@ def copy(
set_default_headers=set_default_headers,
default_query=default_query,
set_default_query=set_default_query,
+ _enforce_credentials=_enforce_credentials,
_extra_kwargs={
"api_version": api_version or self._api_version,
"azure_ad_token": azure_ad_token or self._azure_ad_token,
@@ -610,6 +668,25 @@ async def _get_azure_ad_token(self) -> str | None:
return None
+ @override
+ def _auth_headers(self, security: SecurityOptions) -> dict[str, str]: # noqa: ARG002
+ if self._azure_ad_token is not None:
+ return {"Authorization": f"Bearer {self._azure_ad_token}"}
+
+ if self.api_key and self.api_key != API_KEY_SENTINEL:
+ return {"api-key": self.api_key}
+
+ return {}
+
+ @override
+ def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
+ if _has_auth_header(headers) or _has_auth_header(custom_headers):
+ return
+
+ raise TypeError(
+ '"Could not resolve authentication method. Expected either api_key, azure_ad_token or azure_ad_token_provider to be set. Or for one of the `Authorization` or `api-key` headers to be explicitly supplied or omitted"'
+ )
+
@override
async def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
headers: dict[str, str | Omit] = {**options.headers} if is_given(options.headers) else {}
@@ -619,11 +696,13 @@ async def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOp
azure_ad_token = await self._get_azure_ad_token()
if azure_ad_token is not None:
- if headers.get("Authorization") is None:
+ if not _has_header(headers, "Authorization"):
headers["Authorization"] = f"Bearer {azure_ad_token}"
- elif self.api_key is not API_KEY_SENTINEL:
- if headers.get("api-key") is None:
+ elif self.api_key and self.api_key != API_KEY_SENTINEL:
+ if not _has_header(headers, "api-key"):
headers["api-key"] = self.api_key
+ elif _has_auth_header(headers) or _has_auth_header(self.default_headers):
+ pass
else:
# should never be hit
raise ValueError("Unable to handle auth")
diff --git a/src/openai/pagination.py b/src/openai/pagination.py
index 4dd3788aa3..6cf0c8e7f5 100644
--- a/src/openai/pagination.py
+++ b/src/openai/pagination.py
@@ -12,6 +12,8 @@
"AsyncCursorPage",
"SyncConversationCursorPage",
"AsyncConversationCursorPage",
+ "SyncNextCursorPage",
+ "AsyncNextCursorPage",
]
_T = TypeVar("_T")
@@ -188,3 +190,61 @@ def next_page_info(self) -> Optional[PageInfo]:
return None
return PageInfo(params={"after": last_id})
+
+
+class SyncNextCursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
+ data: List[_T]
+ has_more: Optional[bool] = None
+ next: Optional[str] = None
+
+ @override
+ def _get_page_items(self) -> List[_T]:
+ data = self.data
+ if not data:
+ return []
+ return data
+
+ @override
+ def has_next_page(self) -> bool:
+ has_more = self.has_more
+ if has_more is not None and has_more is False:
+ return False
+
+ return super().has_next_page()
+
+ @override
+ def next_page_info(self) -> Optional[PageInfo]:
+ next = self.next
+ if not next:
+ return None
+
+ return PageInfo(params={"after": next})
+
+
+class AsyncNextCursorPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
+ data: List[_T]
+ has_more: Optional[bool] = None
+ next: Optional[str] = None
+
+ @override
+ def _get_page_items(self) -> List[_T]:
+ data = self.data
+ if not data:
+ return []
+ return data
+
+ @override
+ def has_next_page(self) -> bool:
+ has_more = self.has_more
+ if has_more is not None and has_more is False:
+ return False
+
+ return super().has_next_page()
+
+ @override
+ def next_page_info(self) -> Optional[PageInfo]:
+ next = self.next
+ if not next:
+ return None
+
+ return PageInfo(params={"after": next})
diff --git a/src/openai/resources/__init__.py b/src/openai/resources/__init__.py
index ed030f7188..e4905152c0 100644
--- a/src/openai/resources/__init__.py
+++ b/src/openai/resources/__init__.py
@@ -16,6 +16,14 @@
ChatWithStreamingResponse,
AsyncChatWithStreamingResponse,
)
+from .admin import (
+ Admin,
+ AsyncAdmin,
+ AdminWithRawResponse,
+ AsyncAdminWithRawResponse,
+ AdminWithStreamingResponse,
+ AsyncAdminWithStreamingResponse,
+)
from .audio import (
Audio,
AsyncAudio,
@@ -216,6 +224,12 @@
"AsyncUploadsWithRawResponse",
"UploadsWithStreamingResponse",
"AsyncUploadsWithStreamingResponse",
+ "Admin",
+ "AsyncAdmin",
+ "AdminWithRawResponse",
+ "AsyncAdminWithRawResponse",
+ "AdminWithStreamingResponse",
+ "AsyncAdminWithStreamingResponse",
"Evals",
"AsyncEvals",
"EvalsWithRawResponse",
diff --git a/src/openai/resources/admin/__init__.py b/src/openai/resources/admin/__init__.py
new file mode 100644
index 0000000000..730c20540c
--- /dev/null
+++ b/src/openai/resources/admin/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .admin import (
+ Admin,
+ AsyncAdmin,
+ AdminWithRawResponse,
+ AsyncAdminWithRawResponse,
+ AdminWithStreamingResponse,
+ AsyncAdminWithStreamingResponse,
+)
+from .organization import (
+ Organization,
+ AsyncOrganization,
+ OrganizationWithRawResponse,
+ AsyncOrganizationWithRawResponse,
+ OrganizationWithStreamingResponse,
+ AsyncOrganizationWithStreamingResponse,
+)
+
+__all__ = [
+ "Organization",
+ "AsyncOrganization",
+ "OrganizationWithRawResponse",
+ "AsyncOrganizationWithRawResponse",
+ "OrganizationWithStreamingResponse",
+ "AsyncOrganizationWithStreamingResponse",
+ "Admin",
+ "AsyncAdmin",
+ "AdminWithRawResponse",
+ "AsyncAdminWithRawResponse",
+ "AdminWithStreamingResponse",
+ "AsyncAdminWithStreamingResponse",
+]
diff --git a/src/openai/resources/admin/admin.py b/src/openai/resources/admin/admin.py
new file mode 100644
index 0000000000..3a3e288c6a
--- /dev/null
+++ b/src/openai/resources/admin/admin.py
@@ -0,0 +1,102 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from .organization.organization import (
+ Organization,
+ AsyncOrganization,
+ OrganizationWithRawResponse,
+ AsyncOrganizationWithRawResponse,
+ OrganizationWithStreamingResponse,
+ AsyncOrganizationWithStreamingResponse,
+)
+
+__all__ = ["Admin", "AsyncAdmin"]
+
+
+class Admin(SyncAPIResource):
+ @cached_property
+ def organization(self) -> Organization:
+ return Organization(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AdminWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AdminWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AdminWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AdminWithStreamingResponse(self)
+
+
+class AsyncAdmin(AsyncAPIResource):
+ @cached_property
+ def organization(self) -> AsyncOrganization:
+ return AsyncOrganization(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncAdminWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAdminWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAdminWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncAdminWithStreamingResponse(self)
+
+
+class AdminWithRawResponse:
+ def __init__(self, admin: Admin) -> None:
+ self._admin = admin
+
+ @cached_property
+ def organization(self) -> OrganizationWithRawResponse:
+ return OrganizationWithRawResponse(self._admin.organization)
+
+
+class AsyncAdminWithRawResponse:
+ def __init__(self, admin: AsyncAdmin) -> None:
+ self._admin = admin
+
+ @cached_property
+ def organization(self) -> AsyncOrganizationWithRawResponse:
+ return AsyncOrganizationWithRawResponse(self._admin.organization)
+
+
+class AdminWithStreamingResponse:
+ def __init__(self, admin: Admin) -> None:
+ self._admin = admin
+
+ @cached_property
+ def organization(self) -> OrganizationWithStreamingResponse:
+ return OrganizationWithStreamingResponse(self._admin.organization)
+
+
+class AsyncAdminWithStreamingResponse:
+ def __init__(self, admin: AsyncAdmin) -> None:
+ self._admin = admin
+
+ @cached_property
+ def organization(self) -> AsyncOrganizationWithStreamingResponse:
+ return AsyncOrganizationWithStreamingResponse(self._admin.organization)
diff --git a/src/openai/resources/admin/organization/__init__.py b/src/openai/resources/admin/organization/__init__.py
new file mode 100644
index 0000000000..50641088bb
--- /dev/null
+++ b/src/openai/resources/admin/organization/__init__.py
@@ -0,0 +1,145 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .roles import (
+ Roles,
+ AsyncRoles,
+ RolesWithRawResponse,
+ AsyncRolesWithRawResponse,
+ RolesWithStreamingResponse,
+ AsyncRolesWithStreamingResponse,
+)
+from .usage import (
+ Usage,
+ AsyncUsage,
+ UsageWithRawResponse,
+ AsyncUsageWithRawResponse,
+ UsageWithStreamingResponse,
+ AsyncUsageWithStreamingResponse,
+)
+from .users import (
+ Users,
+ AsyncUsers,
+ UsersWithRawResponse,
+ AsyncUsersWithRawResponse,
+ UsersWithStreamingResponse,
+ AsyncUsersWithStreamingResponse,
+)
+from .groups import (
+ Groups,
+ AsyncGroups,
+ GroupsWithRawResponse,
+ AsyncGroupsWithRawResponse,
+ GroupsWithStreamingResponse,
+ AsyncGroupsWithStreamingResponse,
+)
+from .invites import (
+ Invites,
+ AsyncInvites,
+ InvitesWithRawResponse,
+ AsyncInvitesWithRawResponse,
+ InvitesWithStreamingResponse,
+ AsyncInvitesWithStreamingResponse,
+)
+from .projects import (
+ Projects,
+ AsyncProjects,
+ ProjectsWithRawResponse,
+ AsyncProjectsWithRawResponse,
+ ProjectsWithStreamingResponse,
+ AsyncProjectsWithStreamingResponse,
+)
+from .audit_logs import (
+ AuditLogs,
+ AsyncAuditLogs,
+ AuditLogsWithRawResponse,
+ AsyncAuditLogsWithRawResponse,
+ AuditLogsWithStreamingResponse,
+ AsyncAuditLogsWithStreamingResponse,
+)
+from .certificates import (
+ Certificates,
+ AsyncCertificates,
+ CertificatesWithRawResponse,
+ AsyncCertificatesWithRawResponse,
+ CertificatesWithStreamingResponse,
+ AsyncCertificatesWithStreamingResponse,
+)
+from .organization import (
+ Organization,
+ AsyncOrganization,
+ OrganizationWithRawResponse,
+ AsyncOrganizationWithRawResponse,
+ OrganizationWithStreamingResponse,
+ AsyncOrganizationWithStreamingResponse,
+)
+from .admin_api_keys import (
+ AdminAPIKeys,
+ AsyncAdminAPIKeys,
+ AdminAPIKeysWithRawResponse,
+ AsyncAdminAPIKeysWithRawResponse,
+ AdminAPIKeysWithStreamingResponse,
+ AsyncAdminAPIKeysWithStreamingResponse,
+)
+
+__all__ = [
+ "AuditLogs",
+ "AsyncAuditLogs",
+ "AuditLogsWithRawResponse",
+ "AsyncAuditLogsWithRawResponse",
+ "AuditLogsWithStreamingResponse",
+ "AsyncAuditLogsWithStreamingResponse",
+ "AdminAPIKeys",
+ "AsyncAdminAPIKeys",
+ "AdminAPIKeysWithRawResponse",
+ "AsyncAdminAPIKeysWithRawResponse",
+ "AdminAPIKeysWithStreamingResponse",
+ "AsyncAdminAPIKeysWithStreamingResponse",
+ "Usage",
+ "AsyncUsage",
+ "UsageWithRawResponse",
+ "AsyncUsageWithRawResponse",
+ "UsageWithStreamingResponse",
+ "AsyncUsageWithStreamingResponse",
+ "Invites",
+ "AsyncInvites",
+ "InvitesWithRawResponse",
+ "AsyncInvitesWithRawResponse",
+ "InvitesWithStreamingResponse",
+ "AsyncInvitesWithStreamingResponse",
+ "Users",
+ "AsyncUsers",
+ "UsersWithRawResponse",
+ "AsyncUsersWithRawResponse",
+ "UsersWithStreamingResponse",
+ "AsyncUsersWithStreamingResponse",
+ "Groups",
+ "AsyncGroups",
+ "GroupsWithRawResponse",
+ "AsyncGroupsWithRawResponse",
+ "GroupsWithStreamingResponse",
+ "AsyncGroupsWithStreamingResponse",
+ "Roles",
+ "AsyncRoles",
+ "RolesWithRawResponse",
+ "AsyncRolesWithRawResponse",
+ "RolesWithStreamingResponse",
+ "AsyncRolesWithStreamingResponse",
+ "Certificates",
+ "AsyncCertificates",
+ "CertificatesWithRawResponse",
+ "AsyncCertificatesWithRawResponse",
+ "CertificatesWithStreamingResponse",
+ "AsyncCertificatesWithStreamingResponse",
+ "Projects",
+ "AsyncProjects",
+ "ProjectsWithRawResponse",
+ "AsyncProjectsWithRawResponse",
+ "ProjectsWithStreamingResponse",
+ "AsyncProjectsWithStreamingResponse",
+ "Organization",
+ "AsyncOrganization",
+ "OrganizationWithRawResponse",
+ "AsyncOrganizationWithRawResponse",
+ "OrganizationWithStreamingResponse",
+ "AsyncOrganizationWithStreamingResponse",
+]
diff --git a/src/openai/resources/admin/organization/admin_api_keys.py b/src/openai/resources/admin/organization/admin_api_keys.py
new file mode 100644
index 0000000000..80ddc39b49
--- /dev/null
+++ b/src/openai/resources/admin/organization/admin_api_keys.py
@@ -0,0 +1,469 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ...._utils import path_template, maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncCursorPage, AsyncCursorPage
+from ...._base_client import AsyncPaginator, make_request_options
+from ....types.admin.organization import admin_api_key_list_params, admin_api_key_create_params
+from ....types.admin.organization.admin_api_key import AdminAPIKey
+from ....types.admin.organization.admin_api_key_create_response import AdminAPIKeyCreateResponse
+from ....types.admin.organization.admin_api_key_delete_response import AdminAPIKeyDeleteResponse
+
+__all__ = ["AdminAPIKeys", "AsyncAdminAPIKeys"]
+
+
+class AdminAPIKeys(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AdminAPIKeysWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AdminAPIKeysWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AdminAPIKeysWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AdminAPIKeysWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AdminAPIKeyCreateResponse:
+ """
+ Create an organization admin API key
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/organization/admin_api_keys",
+ body=maybe_transform({"name": name}, admin_api_key_create_params.AdminAPIKeyCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=AdminAPIKeyCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ key_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AdminAPIKey:
+ """
+ Retrieve a single organization API key
+
+ Args:
+ key_id: The ID of the API key.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not key_id:
+ raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
+ return self._get(
+ path_template("/organization/admin_api_keys/{key_id}", key_id=key_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=AdminAPIKey,
+ )
+
+ def list(
+ self,
+ *,
+ after: Optional[str] | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncCursorPage[AdminAPIKey]:
+ """
+ List organization API keys
+
+ Args:
+ after: Return keys with IDs that come after this ID in the pagination order.
+
+ limit: Maximum number of keys to return.
+
+ order: Order results by creation time, ascending or descending.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/admin_api_keys",
+ page=SyncCursorPage[AdminAPIKey],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ admin_api_key_list_params.AdminAPIKeyListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=AdminAPIKey,
+ )
+
+ def delete(
+ self,
+ key_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AdminAPIKeyDeleteResponse:
+ """
+ Delete an organization admin API key
+
+ Args:
+ key_id: The ID of the API key to be deleted.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not key_id:
+ raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
+ return self._delete(
+ path_template("/organization/admin_api_keys/{key_id}", key_id=key_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=AdminAPIKeyDeleteResponse,
+ )
+
+
+class AsyncAdminAPIKeys(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncAdminAPIKeysWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAdminAPIKeysWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAdminAPIKeysWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncAdminAPIKeysWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AdminAPIKeyCreateResponse:
+ """
+ Create an organization admin API key
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/organization/admin_api_keys",
+ body=await async_maybe_transform({"name": name}, admin_api_key_create_params.AdminAPIKeyCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=AdminAPIKeyCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ key_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AdminAPIKey:
+ """
+ Retrieve a single organization API key
+
+ Args:
+ key_id: The ID of the API key.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not key_id:
+ raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
+ return await self._get(
+ path_template("/organization/admin_api_keys/{key_id}", key_id=key_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=AdminAPIKey,
+ )
+
+ def list(
+ self,
+ *,
+ after: Optional[str] | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[AdminAPIKey, AsyncCursorPage[AdminAPIKey]]:
+ """
+ List organization API keys
+
+ Args:
+ after: Return keys with IDs that come after this ID in the pagination order.
+
+ limit: Maximum number of keys to return.
+
+ order: Order results by creation time, ascending or descending.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/admin_api_keys",
+ page=AsyncCursorPage[AdminAPIKey],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ admin_api_key_list_params.AdminAPIKeyListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=AdminAPIKey,
+ )
+
+ async def delete(
+ self,
+ key_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AdminAPIKeyDeleteResponse:
+ """
+ Delete an organization admin API key
+
+ Args:
+ key_id: The ID of the API key to be deleted.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not key_id:
+ raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
+ return await self._delete(
+ path_template("/organization/admin_api_keys/{key_id}", key_id=key_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=AdminAPIKeyDeleteResponse,
+ )
+
+
+class AdminAPIKeysWithRawResponse:
+ def __init__(self, admin_api_keys: AdminAPIKeys) -> None:
+ self._admin_api_keys = admin_api_keys
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ admin_api_keys.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ admin_api_keys.retrieve,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ admin_api_keys.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ admin_api_keys.delete,
+ )
+
+
+class AsyncAdminAPIKeysWithRawResponse:
+ def __init__(self, admin_api_keys: AsyncAdminAPIKeys) -> None:
+ self._admin_api_keys = admin_api_keys
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ admin_api_keys.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ admin_api_keys.retrieve,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ admin_api_keys.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ admin_api_keys.delete,
+ )
+
+
+class AdminAPIKeysWithStreamingResponse:
+ def __init__(self, admin_api_keys: AdminAPIKeys) -> None:
+ self._admin_api_keys = admin_api_keys
+
+ self.create = to_streamed_response_wrapper(
+ admin_api_keys.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ admin_api_keys.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ admin_api_keys.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ admin_api_keys.delete,
+ )
+
+
+class AsyncAdminAPIKeysWithStreamingResponse:
+ def __init__(self, admin_api_keys: AsyncAdminAPIKeys) -> None:
+ self._admin_api_keys = admin_api_keys
+
+ self.create = async_to_streamed_response_wrapper(
+ admin_api_keys.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ admin_api_keys.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ admin_api_keys.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ admin_api_keys.delete,
+ )
diff --git a/src/openai/resources/admin/organization/audit_logs.py b/src/openai/resources/admin/organization/audit_logs.py
new file mode 100644
index 0000000000..3cf3127631
--- /dev/null
+++ b/src/openai/resources/admin/organization/audit_logs.py
@@ -0,0 +1,387 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ...._utils import maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from ...._base_client import AsyncPaginator, make_request_options
+from ....types.admin.organization import audit_log_list_params
+from ....types.admin.organization.audit_log_list_response import AuditLogListResponse
+
+__all__ = ["AuditLogs", "AsyncAuditLogs"]
+
+
+class AuditLogs(SyncAPIResource):
+ """List user actions and configuration changes within this organization."""
+
+ @cached_property
+ def with_raw_response(self) -> AuditLogsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AuditLogsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AuditLogsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AuditLogsWithStreamingResponse(self)
+
+ def list(
+ self,
+ *,
+ actor_emails: SequenceNotStr[str] | Omit = omit,
+ actor_ids: SequenceNotStr[str] | Omit = omit,
+ after: str | Omit = omit,
+ before: str | Omit = omit,
+ effective_at: audit_log_list_params.EffectiveAt | Omit = omit,
+ event_types: List[
+ Literal[
+ "api_key.created",
+ "api_key.updated",
+ "api_key.deleted",
+ "certificate.created",
+ "certificate.updated",
+ "certificate.deleted",
+ "certificates.activated",
+ "certificates.deactivated",
+ "checkpoint.permission.created",
+ "checkpoint.permission.deleted",
+ "external_key.registered",
+ "external_key.removed",
+ "group.created",
+ "group.updated",
+ "group.deleted",
+ "invite.sent",
+ "invite.accepted",
+ "invite.deleted",
+ "ip_allowlist.created",
+ "ip_allowlist.updated",
+ "ip_allowlist.deleted",
+ "ip_allowlist.config.activated",
+ "ip_allowlist.config.deactivated",
+ "login.succeeded",
+ "login.failed",
+ "logout.succeeded",
+ "logout.failed",
+ "organization.updated",
+ "project.created",
+ "project.updated",
+ "project.archived",
+ "project.deleted",
+ "rate_limit.updated",
+ "rate_limit.deleted",
+ "resource.deleted",
+ "tunnel.created",
+ "tunnel.updated",
+ "tunnel.deleted",
+ "role.created",
+ "role.updated",
+ "role.deleted",
+ "role.assignment.created",
+ "role.assignment.deleted",
+ "scim.enabled",
+ "scim.disabled",
+ "service_account.created",
+ "service_account.updated",
+ "service_account.deleted",
+ "user.added",
+ "user.updated",
+ "user.deleted",
+ ]
+ ]
+ | Omit = omit,
+ limit: int | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ resource_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncConversationCursorPage[AuditLogListResponse]:
+ """
+ List user actions and configuration changes within this organization.
+
+ Args:
+ actor_emails: Return only events performed by users with these emails.
+
+ actor_ids: Return only events performed by these actors. Can be a user ID, a service
+ account ID, or an api key tracking ID.
+
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ before: A cursor for use in pagination. `before` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
+
+ effective_at: Return only events whose `effective_at` (Unix seconds) is in this range.
+
+ event_types: Return only events with a `type` in one of these values. For example,
+ `project.created`. For all options, see the documentation for the
+ [audit log object](https://platform.openai.com/docs/api-reference/audit-logs/object).
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ project_ids: Return only events for these projects.
+
+ resource_ids: Return only events performed on these targets. For example, a project ID
+ updated.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/audit_logs",
+ page=SyncConversationCursorPage[AuditLogListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "actor_emails": actor_emails,
+ "actor_ids": actor_ids,
+ "after": after,
+ "before": before,
+ "effective_at": effective_at,
+ "event_types": event_types,
+ "limit": limit,
+ "project_ids": project_ids,
+ "resource_ids": resource_ids,
+ },
+ audit_log_list_params.AuditLogListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=AuditLogListResponse,
+ )
+
+
+class AsyncAuditLogs(AsyncAPIResource):
+ """List user actions and configuration changes within this organization."""
+
+ @cached_property
+ def with_raw_response(self) -> AsyncAuditLogsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAuditLogsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAuditLogsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncAuditLogsWithStreamingResponse(self)
+
+ def list(
+ self,
+ *,
+ actor_emails: SequenceNotStr[str] | Omit = omit,
+ actor_ids: SequenceNotStr[str] | Omit = omit,
+ after: str | Omit = omit,
+ before: str | Omit = omit,
+ effective_at: audit_log_list_params.EffectiveAt | Omit = omit,
+ event_types: List[
+ Literal[
+ "api_key.created",
+ "api_key.updated",
+ "api_key.deleted",
+ "certificate.created",
+ "certificate.updated",
+ "certificate.deleted",
+ "certificates.activated",
+ "certificates.deactivated",
+ "checkpoint.permission.created",
+ "checkpoint.permission.deleted",
+ "external_key.registered",
+ "external_key.removed",
+ "group.created",
+ "group.updated",
+ "group.deleted",
+ "invite.sent",
+ "invite.accepted",
+ "invite.deleted",
+ "ip_allowlist.created",
+ "ip_allowlist.updated",
+ "ip_allowlist.deleted",
+ "ip_allowlist.config.activated",
+ "ip_allowlist.config.deactivated",
+ "login.succeeded",
+ "login.failed",
+ "logout.succeeded",
+ "logout.failed",
+ "organization.updated",
+ "project.created",
+ "project.updated",
+ "project.archived",
+ "project.deleted",
+ "rate_limit.updated",
+ "rate_limit.deleted",
+ "resource.deleted",
+ "tunnel.created",
+ "tunnel.updated",
+ "tunnel.deleted",
+ "role.created",
+ "role.updated",
+ "role.deleted",
+ "role.assignment.created",
+ "role.assignment.deleted",
+ "scim.enabled",
+ "scim.disabled",
+ "service_account.created",
+ "service_account.updated",
+ "service_account.deleted",
+ "user.added",
+ "user.updated",
+ "user.deleted",
+ ]
+ ]
+ | Omit = omit,
+ limit: int | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ resource_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[AuditLogListResponse, AsyncConversationCursorPage[AuditLogListResponse]]:
+ """
+ List user actions and configuration changes within this organization.
+
+ Args:
+ actor_emails: Return only events performed by users with these emails.
+
+ actor_ids: Return only events performed by these actors. Can be a user ID, a service
+ account ID, or an api key tracking ID.
+
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ before: A cursor for use in pagination. `before` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
+
+ effective_at: Return only events whose `effective_at` (Unix seconds) is in this range.
+
+ event_types: Return only events with a `type` in one of these values. For example,
+ `project.created`. For all options, see the documentation for the
+ [audit log object](https://platform.openai.com/docs/api-reference/audit-logs/object).
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ project_ids: Return only events for these projects.
+
+ resource_ids: Return only events performed on these targets. For example, a project ID
+ updated.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/audit_logs",
+ page=AsyncConversationCursorPage[AuditLogListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "actor_emails": actor_emails,
+ "actor_ids": actor_ids,
+ "after": after,
+ "before": before,
+ "effective_at": effective_at,
+ "event_types": event_types,
+ "limit": limit,
+ "project_ids": project_ids,
+ "resource_ids": resource_ids,
+ },
+ audit_log_list_params.AuditLogListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=AuditLogListResponse,
+ )
+
+
+class AuditLogsWithRawResponse:
+ def __init__(self, audit_logs: AuditLogs) -> None:
+ self._audit_logs = audit_logs
+
+ self.list = _legacy_response.to_raw_response_wrapper(
+ audit_logs.list,
+ )
+
+
+class AsyncAuditLogsWithRawResponse:
+ def __init__(self, audit_logs: AsyncAuditLogs) -> None:
+ self._audit_logs = audit_logs
+
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ audit_logs.list,
+ )
+
+
+class AuditLogsWithStreamingResponse:
+ def __init__(self, audit_logs: AuditLogs) -> None:
+ self._audit_logs = audit_logs
+
+ self.list = to_streamed_response_wrapper(
+ audit_logs.list,
+ )
+
+
+class AsyncAuditLogsWithStreamingResponse:
+ def __init__(self, audit_logs: AsyncAuditLogs) -> None:
+ self._audit_logs = audit_logs
+
+ self.list = async_to_streamed_response_wrapper(
+ audit_logs.list,
+ )
diff --git a/src/openai/resources/admin/organization/certificates.py b/src/openai/resources/admin/organization/certificates.py
new file mode 100644
index 0000000000..aa7826794d
--- /dev/null
+++ b/src/openai/resources/admin/organization/certificates.py
@@ -0,0 +1,818 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ...._utils import path_template, maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncPage, AsyncPage, SyncConversationCursorPage, AsyncConversationCursorPage
+from ...._base_client import AsyncPaginator, make_request_options
+from ....types.admin.organization import (
+ certificate_list_params,
+ certificate_create_params,
+ certificate_update_params,
+ certificate_activate_params,
+ certificate_retrieve_params,
+ certificate_deactivate_params,
+)
+from ....types.admin.organization.certificate import Certificate
+from ....types.admin.organization.certificate_list_response import CertificateListResponse
+from ....types.admin.organization.certificate_delete_response import CertificateDeleteResponse
+from ....types.admin.organization.certificate_activate_response import CertificateActivateResponse
+from ....types.admin.organization.certificate_deactivate_response import CertificateDeactivateResponse
+
+__all__ = ["Certificates", "AsyncCertificates"]
+
+
+class Certificates(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> CertificatesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return CertificatesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> CertificatesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return CertificatesWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ certificate: str,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Certificate:
+ """Upload a certificate to the organization.
+
+ This does **not** automatically
+ activate the certificate.
+
+ Organizations can upload up to 50 certificates.
+
+ Args:
+ certificate: The certificate content in PEM format
+
+ name: An optional name for the certificate
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/organization/certificates",
+ body=maybe_transform(
+ {
+ "certificate": certificate,
+ "name": name,
+ },
+ certificate_create_params.CertificateCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Certificate,
+ )
+
+ def retrieve(
+ self,
+ certificate_id: str,
+ *,
+ include: List[Literal["content"]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Certificate:
+ """
+ Get a certificate that has been uploaded to the organization.
+
+ You can get a certificate regardless of whether it is active or not.
+
+ Args:
+ include: A list of additional fields to include in the response. Currently the only
+ supported value is `content` to fetch the PEM content of the certificate.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not certificate_id:
+ raise ValueError(f"Expected a non-empty value for `certificate_id` but received {certificate_id!r}")
+ return self._get(
+ path_template("/organization/certificates/{certificate_id}", certificate_id=certificate_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"include": include}, certificate_retrieve_params.CertificateRetrieveParams),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Certificate,
+ )
+
+ def update(
+ self,
+ certificate_id: str,
+ *,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Certificate:
+ """Modify a certificate.
+
+ Note that only the name can be modified.
+
+ Args:
+ name: The updated name for the certificate
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not certificate_id:
+ raise ValueError(f"Expected a non-empty value for `certificate_id` but received {certificate_id!r}")
+ return self._post(
+ path_template("/organization/certificates/{certificate_id}", certificate_id=certificate_id),
+ body=maybe_transform({"name": name}, certificate_update_params.CertificateUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Certificate,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncConversationCursorPage[CertificateListResponse]:
+ """
+ List uploaded certificates for this organization.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/certificates",
+ page=SyncConversationCursorPage[CertificateListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ certificate_list_params.CertificateListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=CertificateListResponse,
+ )
+
+ def delete(
+ self,
+ certificate_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CertificateDeleteResponse:
+ """
+ Delete a certificate from the organization.
+
+ The certificate must be inactive for the organization and all projects.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not certificate_id:
+ raise ValueError(f"Expected a non-empty value for `certificate_id` but received {certificate_id!r}")
+ return self._delete(
+ path_template("/organization/certificates/{certificate_id}", certificate_id=certificate_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=CertificateDeleteResponse,
+ )
+
+ def activate(
+ self,
+ *,
+ certificate_ids: SequenceNotStr[str],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncPage[CertificateActivateResponse]:
+ """
+ Activate certificates at the organization level.
+
+ You can atomically and idempotently activate up to 10 certificates at a time.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/certificates/activate",
+ page=SyncPage[CertificateActivateResponse],
+ body=maybe_transform(
+ {"certificate_ids": certificate_ids}, certificate_activate_params.CertificateActivateParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ model=CertificateActivateResponse,
+ method="post",
+ )
+
+ def deactivate(
+ self,
+ *,
+ certificate_ids: SequenceNotStr[str],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncPage[CertificateDeactivateResponse]:
+ """
+ Deactivate certificates at the organization level.
+
+ You can atomically and idempotently deactivate up to 10 certificates at a time.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/certificates/deactivate",
+ page=SyncPage[CertificateDeactivateResponse],
+ body=maybe_transform(
+ {"certificate_ids": certificate_ids}, certificate_deactivate_params.CertificateDeactivateParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ model=CertificateDeactivateResponse,
+ method="post",
+ )
+
+
+class AsyncCertificates(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncCertificatesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncCertificatesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncCertificatesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncCertificatesWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ certificate: str,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Certificate:
+ """Upload a certificate to the organization.
+
+ This does **not** automatically
+ activate the certificate.
+
+ Organizations can upload up to 50 certificates.
+
+ Args:
+ certificate: The certificate content in PEM format
+
+ name: An optional name for the certificate
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/organization/certificates",
+ body=await async_maybe_transform(
+ {
+ "certificate": certificate,
+ "name": name,
+ },
+ certificate_create_params.CertificateCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Certificate,
+ )
+
+ async def retrieve(
+ self,
+ certificate_id: str,
+ *,
+ include: List[Literal["content"]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Certificate:
+ """
+ Get a certificate that has been uploaded to the organization.
+
+ You can get a certificate regardless of whether it is active or not.
+
+ Args:
+ include: A list of additional fields to include in the response. Currently the only
+ supported value is `content` to fetch the PEM content of the certificate.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not certificate_id:
+ raise ValueError(f"Expected a non-empty value for `certificate_id` but received {certificate_id!r}")
+ return await self._get(
+ path_template("/organization/certificates/{certificate_id}", certificate_id=certificate_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {"include": include}, certificate_retrieve_params.CertificateRetrieveParams
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Certificate,
+ )
+
+ async def update(
+ self,
+ certificate_id: str,
+ *,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Certificate:
+ """Modify a certificate.
+
+ Note that only the name can be modified.
+
+ Args:
+ name: The updated name for the certificate
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not certificate_id:
+ raise ValueError(f"Expected a non-empty value for `certificate_id` but received {certificate_id!r}")
+ return await self._post(
+ path_template("/organization/certificates/{certificate_id}", certificate_id=certificate_id),
+ body=await async_maybe_transform({"name": name}, certificate_update_params.CertificateUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Certificate,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[CertificateListResponse, AsyncConversationCursorPage[CertificateListResponse]]:
+ """
+ List uploaded certificates for this organization.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/certificates",
+ page=AsyncConversationCursorPage[CertificateListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ certificate_list_params.CertificateListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=CertificateListResponse,
+ )
+
+ async def delete(
+ self,
+ certificate_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CertificateDeleteResponse:
+ """
+ Delete a certificate from the organization.
+
+ The certificate must be inactive for the organization and all projects.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not certificate_id:
+ raise ValueError(f"Expected a non-empty value for `certificate_id` but received {certificate_id!r}")
+ return await self._delete(
+ path_template("/organization/certificates/{certificate_id}", certificate_id=certificate_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=CertificateDeleteResponse,
+ )
+
+ def activate(
+ self,
+ *,
+ certificate_ids: SequenceNotStr[str],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[CertificateActivateResponse, AsyncPage[CertificateActivateResponse]]:
+ """
+ Activate certificates at the organization level.
+
+ You can atomically and idempotently activate up to 10 certificates at a time.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/certificates/activate",
+ page=AsyncPage[CertificateActivateResponse],
+ body=maybe_transform(
+ {"certificate_ids": certificate_ids}, certificate_activate_params.CertificateActivateParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ model=CertificateActivateResponse,
+ method="post",
+ )
+
+ def deactivate(
+ self,
+ *,
+ certificate_ids: SequenceNotStr[str],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[CertificateDeactivateResponse, AsyncPage[CertificateDeactivateResponse]]:
+ """
+ Deactivate certificates at the organization level.
+
+ You can atomically and idempotently deactivate up to 10 certificates at a time.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/certificates/deactivate",
+ page=AsyncPage[CertificateDeactivateResponse],
+ body=maybe_transform(
+ {"certificate_ids": certificate_ids}, certificate_deactivate_params.CertificateDeactivateParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ model=CertificateDeactivateResponse,
+ method="post",
+ )
+
+
+class CertificatesWithRawResponse:
+ def __init__(self, certificates: Certificates) -> None:
+ self._certificates = certificates
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ certificates.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ certificates.retrieve,
+ )
+ self.update = _legacy_response.to_raw_response_wrapper(
+ certificates.update,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ certificates.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ certificates.delete,
+ )
+ self.activate = _legacy_response.to_raw_response_wrapper(
+ certificates.activate,
+ )
+ self.deactivate = _legacy_response.to_raw_response_wrapper(
+ certificates.deactivate,
+ )
+
+
+class AsyncCertificatesWithRawResponse:
+ def __init__(self, certificates: AsyncCertificates) -> None:
+ self._certificates = certificates
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ certificates.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ certificates.retrieve,
+ )
+ self.update = _legacy_response.async_to_raw_response_wrapper(
+ certificates.update,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ certificates.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ certificates.delete,
+ )
+ self.activate = _legacy_response.async_to_raw_response_wrapper(
+ certificates.activate,
+ )
+ self.deactivate = _legacy_response.async_to_raw_response_wrapper(
+ certificates.deactivate,
+ )
+
+
+class CertificatesWithStreamingResponse:
+ def __init__(self, certificates: Certificates) -> None:
+ self._certificates = certificates
+
+ self.create = to_streamed_response_wrapper(
+ certificates.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ certificates.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ certificates.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ certificates.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ certificates.delete,
+ )
+ self.activate = to_streamed_response_wrapper(
+ certificates.activate,
+ )
+ self.deactivate = to_streamed_response_wrapper(
+ certificates.deactivate,
+ )
+
+
+class AsyncCertificatesWithStreamingResponse:
+ def __init__(self, certificates: AsyncCertificates) -> None:
+ self._certificates = certificates
+
+ self.create = async_to_streamed_response_wrapper(
+ certificates.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ certificates.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ certificates.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ certificates.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ certificates.delete,
+ )
+ self.activate = async_to_streamed_response_wrapper(
+ certificates.activate,
+ )
+ self.deactivate = async_to_streamed_response_wrapper(
+ certificates.deactivate,
+ )
diff --git a/src/openai/resources/admin/organization/groups/__init__.py b/src/openai/resources/admin/organization/groups/__init__.py
new file mode 100644
index 0000000000..ffeb8b60ec
--- /dev/null
+++ b/src/openai/resources/admin/organization/groups/__init__.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .roles import (
+ Roles,
+ AsyncRoles,
+ RolesWithRawResponse,
+ AsyncRolesWithRawResponse,
+ RolesWithStreamingResponse,
+ AsyncRolesWithStreamingResponse,
+)
+from .users import (
+ Users,
+ AsyncUsers,
+ UsersWithRawResponse,
+ AsyncUsersWithRawResponse,
+ UsersWithStreamingResponse,
+ AsyncUsersWithStreamingResponse,
+)
+from .groups import (
+ Groups,
+ AsyncGroups,
+ GroupsWithRawResponse,
+ AsyncGroupsWithRawResponse,
+ GroupsWithStreamingResponse,
+ AsyncGroupsWithStreamingResponse,
+)
+
+__all__ = [
+ "Users",
+ "AsyncUsers",
+ "UsersWithRawResponse",
+ "AsyncUsersWithRawResponse",
+ "UsersWithStreamingResponse",
+ "AsyncUsersWithStreamingResponse",
+ "Roles",
+ "AsyncRoles",
+ "RolesWithRawResponse",
+ "AsyncRolesWithRawResponse",
+ "RolesWithStreamingResponse",
+ "AsyncRolesWithStreamingResponse",
+ "Groups",
+ "AsyncGroups",
+ "GroupsWithRawResponse",
+ "AsyncGroupsWithRawResponse",
+ "GroupsWithStreamingResponse",
+ "AsyncGroupsWithStreamingResponse",
+]
diff --git a/src/openai/resources/admin/organization/groups/groups.py b/src/openai/resources/admin/organization/groups/groups.py
new file mode 100644
index 0000000000..80baa38926
--- /dev/null
+++ b/src/openai/resources/admin/organization/groups/groups.py
@@ -0,0 +1,544 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal
+
+import httpx
+
+from ..... import _legacy_response
+from .roles import (
+ Roles,
+ AsyncRoles,
+ RolesWithRawResponse,
+ AsyncRolesWithRawResponse,
+ RolesWithStreamingResponse,
+ AsyncRolesWithStreamingResponse,
+)
+from .users import (
+ Users,
+ AsyncUsers,
+ UsersWithRawResponse,
+ AsyncUsersWithRawResponse,
+ UsersWithStreamingResponse,
+ AsyncUsersWithStreamingResponse,
+)
+from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ....._utils import path_template, maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .....pagination import SyncNextCursorPage, AsyncNextCursorPage
+from ....._base_client import AsyncPaginator, make_request_options
+from .....types.admin.organization import group_list_params, group_create_params, group_update_params
+from .....types.admin.organization.group import Group
+from .....types.admin.organization.group_delete_response import GroupDeleteResponse
+from .....types.admin.organization.group_update_response import GroupUpdateResponse
+
+__all__ = ["Groups", "AsyncGroups"]
+
+
+class Groups(SyncAPIResource):
+ @cached_property
+ def users(self) -> Users:
+ return Users(self._client)
+
+ @cached_property
+ def roles(self) -> Roles:
+ return Roles(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> GroupsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return GroupsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> GroupsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return GroupsWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Group:
+ """
+ Creates a new group in the organization.
+
+ Args:
+ name: Human readable name for the group.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/organization/groups",
+ body=maybe_transform({"name": name}, group_create_params.GroupCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Group,
+ )
+
+ def update(
+ self,
+ group_id: str,
+ *,
+ name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GroupUpdateResponse:
+ """
+ Updates a group's information.
+
+ Args:
+ name: New display name for the group.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return self._post(
+ path_template("/organization/groups/{group_id}", group_id=group_id),
+ body=maybe_transform({"name": name}, group_update_params.GroupUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=GroupUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncNextCursorPage[Group]:
+ """
+ Lists all groups in the organization.
+
+ Args:
+ after: A cursor for use in pagination. `after` is a group ID that defines your place in
+ the list. For instance, if you make a list request and receive 100 objects,
+ ending with group_abc, your subsequent call can include `after=group_abc` in
+ order to fetch the next page of the list.
+
+ limit: A limit on the number of groups to be returned. Limit can range between 0 and
+ 1000, and the default is 100.
+
+ order: Specifies the sort order of the returned groups.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/groups",
+ page=SyncNextCursorPage[Group],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ group_list_params.GroupListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=Group,
+ )
+
+ def delete(
+ self,
+ group_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GroupDeleteResponse:
+ """
+ Deletes a group from the organization.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return self._delete(
+ path_template("/organization/groups/{group_id}", group_id=group_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=GroupDeleteResponse,
+ )
+
+
+class AsyncGroups(AsyncAPIResource):
+ @cached_property
+ def users(self) -> AsyncUsers:
+ return AsyncUsers(self._client)
+
+ @cached_property
+ def roles(self) -> AsyncRoles:
+ return AsyncRoles(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncGroupsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncGroupsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncGroupsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncGroupsWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Group:
+ """
+ Creates a new group in the organization.
+
+ Args:
+ name: Human readable name for the group.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/organization/groups",
+ body=await async_maybe_transform({"name": name}, group_create_params.GroupCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Group,
+ )
+
+ async def update(
+ self,
+ group_id: str,
+ *,
+ name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GroupUpdateResponse:
+ """
+ Updates a group's information.
+
+ Args:
+ name: New display name for the group.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return await self._post(
+ path_template("/organization/groups/{group_id}", group_id=group_id),
+ body=await async_maybe_transform({"name": name}, group_update_params.GroupUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=GroupUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[Group, AsyncNextCursorPage[Group]]:
+ """
+ Lists all groups in the organization.
+
+ Args:
+ after: A cursor for use in pagination. `after` is a group ID that defines your place in
+ the list. For instance, if you make a list request and receive 100 objects,
+ ending with group_abc, your subsequent call can include `after=group_abc` in
+ order to fetch the next page of the list.
+
+ limit: A limit on the number of groups to be returned. Limit can range between 0 and
+ 1000, and the default is 100.
+
+ order: Specifies the sort order of the returned groups.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/groups",
+ page=AsyncNextCursorPage[Group],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ group_list_params.GroupListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=Group,
+ )
+
+ async def delete(
+ self,
+ group_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GroupDeleteResponse:
+ """
+ Deletes a group from the organization.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return await self._delete(
+ path_template("/organization/groups/{group_id}", group_id=group_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=GroupDeleteResponse,
+ )
+
+
+class GroupsWithRawResponse:
+ def __init__(self, groups: Groups) -> None:
+ self._groups = groups
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ groups.create,
+ )
+ self.update = _legacy_response.to_raw_response_wrapper(
+ groups.update,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ groups.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ groups.delete,
+ )
+
+ @cached_property
+ def users(self) -> UsersWithRawResponse:
+ return UsersWithRawResponse(self._groups.users)
+
+ @cached_property
+ def roles(self) -> RolesWithRawResponse:
+ return RolesWithRawResponse(self._groups.roles)
+
+
+class AsyncGroupsWithRawResponse:
+ def __init__(self, groups: AsyncGroups) -> None:
+ self._groups = groups
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ groups.create,
+ )
+ self.update = _legacy_response.async_to_raw_response_wrapper(
+ groups.update,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ groups.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ groups.delete,
+ )
+
+ @cached_property
+ def users(self) -> AsyncUsersWithRawResponse:
+ return AsyncUsersWithRawResponse(self._groups.users)
+
+ @cached_property
+ def roles(self) -> AsyncRolesWithRawResponse:
+ return AsyncRolesWithRawResponse(self._groups.roles)
+
+
+class GroupsWithStreamingResponse:
+ def __init__(self, groups: Groups) -> None:
+ self._groups = groups
+
+ self.create = to_streamed_response_wrapper(
+ groups.create,
+ )
+ self.update = to_streamed_response_wrapper(
+ groups.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ groups.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ groups.delete,
+ )
+
+ @cached_property
+ def users(self) -> UsersWithStreamingResponse:
+ return UsersWithStreamingResponse(self._groups.users)
+
+ @cached_property
+ def roles(self) -> RolesWithStreamingResponse:
+ return RolesWithStreamingResponse(self._groups.roles)
+
+
+class AsyncGroupsWithStreamingResponse:
+ def __init__(self, groups: AsyncGroups) -> None:
+ self._groups = groups
+
+ self.create = async_to_streamed_response_wrapper(
+ groups.create,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ groups.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ groups.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ groups.delete,
+ )
+
+ @cached_property
+ def users(self) -> AsyncUsersWithStreamingResponse:
+ return AsyncUsersWithStreamingResponse(self._groups.users)
+
+ @cached_property
+ def roles(self) -> AsyncRolesWithStreamingResponse:
+ return AsyncRolesWithStreamingResponse(self._groups.roles)
diff --git a/src/openai/resources/admin/organization/groups/roles.py b/src/openai/resources/admin/organization/groups/roles.py
new file mode 100644
index 0000000000..c85efccfef
--- /dev/null
+++ b/src/openai/resources/admin/organization/groups/roles.py
@@ -0,0 +1,398 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal
+
+import httpx
+
+from ..... import _legacy_response
+from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ....._utils import path_template, maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .....pagination import SyncNextCursorPage, AsyncNextCursorPage
+from ....._base_client import AsyncPaginator, make_request_options
+from .....types.admin.organization.groups import role_list_params, role_create_params
+from .....types.admin.organization.groups.role_list_response import RoleListResponse
+from .....types.admin.organization.groups.role_create_response import RoleCreateResponse
+from .....types.admin.organization.groups.role_delete_response import RoleDeleteResponse
+
+__all__ = ["Roles", "AsyncRoles"]
+
+
+class Roles(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> RolesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return RolesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> RolesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return RolesWithStreamingResponse(self)
+
+ def create(
+ self,
+ group_id: str,
+ *,
+ role_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleCreateResponse:
+ """
+ Assigns an organization role to a group within the organization.
+
+ Args:
+ role_id: Identifier of the role to assign.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return self._post(
+ path_template("/organization/groups/{group_id}/roles", group_id=group_id),
+ body=maybe_transform({"role_id": role_id}, role_create_params.RoleCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleCreateResponse,
+ )
+
+ def list(
+ self,
+ group_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncNextCursorPage[RoleListResponse]:
+ """
+ Lists the organization roles assigned to a group within the organization.
+
+ Args:
+ after: Cursor for pagination. Provide the value from the previous response's `next`
+ field to continue listing organization roles.
+
+ limit: A limit on the number of organization role assignments to return.
+
+ order: Sort order for the returned organization roles.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return self._get_api_list(
+ path_template("/organization/groups/{group_id}/roles", group_id=group_id),
+ page=SyncNextCursorPage[RoleListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ role_list_params.RoleListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=RoleListResponse,
+ )
+
+ def delete(
+ self,
+ role_id: str,
+ *,
+ group_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleDeleteResponse:
+ """
+ Unassigns an organization role from a group within the organization.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return self._delete(
+ path_template("/organization/groups/{group_id}/roles/{role_id}", group_id=group_id, role_id=role_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleDeleteResponse,
+ )
+
+
+class AsyncRoles(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncRolesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncRolesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncRolesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncRolesWithStreamingResponse(self)
+
+ async def create(
+ self,
+ group_id: str,
+ *,
+ role_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleCreateResponse:
+ """
+ Assigns an organization role to a group within the organization.
+
+ Args:
+ role_id: Identifier of the role to assign.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return await self._post(
+ path_template("/organization/groups/{group_id}/roles", group_id=group_id),
+ body=await async_maybe_transform({"role_id": role_id}, role_create_params.RoleCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleCreateResponse,
+ )
+
+ def list(
+ self,
+ group_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[RoleListResponse, AsyncNextCursorPage[RoleListResponse]]:
+ """
+ Lists the organization roles assigned to a group within the organization.
+
+ Args:
+ after: Cursor for pagination. Provide the value from the previous response's `next`
+ field to continue listing organization roles.
+
+ limit: A limit on the number of organization role assignments to return.
+
+ order: Sort order for the returned organization roles.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return self._get_api_list(
+ path_template("/organization/groups/{group_id}/roles", group_id=group_id),
+ page=AsyncNextCursorPage[RoleListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ role_list_params.RoleListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=RoleListResponse,
+ )
+
+ async def delete(
+ self,
+ role_id: str,
+ *,
+ group_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleDeleteResponse:
+ """
+ Unassigns an organization role from a group within the organization.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return await self._delete(
+ path_template("/organization/groups/{group_id}/roles/{role_id}", group_id=group_id, role_id=role_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleDeleteResponse,
+ )
+
+
+class RolesWithRawResponse:
+ def __init__(self, roles: Roles) -> None:
+ self._roles = roles
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ roles.create,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ roles.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ roles.delete,
+ )
+
+
+class AsyncRolesWithRawResponse:
+ def __init__(self, roles: AsyncRoles) -> None:
+ self._roles = roles
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ roles.create,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ roles.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ roles.delete,
+ )
+
+
+class RolesWithStreamingResponse:
+ def __init__(self, roles: Roles) -> None:
+ self._roles = roles
+
+ self.create = to_streamed_response_wrapper(
+ roles.create,
+ )
+ self.list = to_streamed_response_wrapper(
+ roles.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ roles.delete,
+ )
+
+
+class AsyncRolesWithStreamingResponse:
+ def __init__(self, roles: AsyncRoles) -> None:
+ self._roles = roles
+
+ self.create = async_to_streamed_response_wrapper(
+ roles.create,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ roles.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ roles.delete,
+ )
diff --git a/src/openai/resources/admin/organization/groups/users.py b/src/openai/resources/admin/organization/groups/users.py
new file mode 100644
index 0000000000..89a2996e13
--- /dev/null
+++ b/src/openai/resources/admin/organization/groups/users.py
@@ -0,0 +1,400 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal
+
+import httpx
+
+from ..... import _legacy_response
+from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ....._utils import path_template, maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .....pagination import SyncNextCursorPage, AsyncNextCursorPage
+from ....._base_client import AsyncPaginator, make_request_options
+from .....types.admin.organization.groups import user_list_params, user_create_params
+from .....types.admin.organization.groups.user_create_response import UserCreateResponse
+from .....types.admin.organization.groups.user_delete_response import UserDeleteResponse
+from .....types.admin.organization.groups.organization_group_user import OrganizationGroupUser
+
+__all__ = ["Users", "AsyncUsers"]
+
+
+class Users(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> UsersWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return UsersWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> UsersWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return UsersWithStreamingResponse(self)
+
+ def create(
+ self,
+ group_id: str,
+ *,
+ user_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UserCreateResponse:
+ """
+ Adds a user to a group.
+
+ Args:
+ user_id: Identifier of the user to add to the group.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return self._post(
+ path_template("/organization/groups/{group_id}/users", group_id=group_id),
+ body=maybe_transform({"user_id": user_id}, user_create_params.UserCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UserCreateResponse,
+ )
+
+ def list(
+ self,
+ group_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncNextCursorPage[OrganizationGroupUser]:
+ """
+ Lists the users assigned to a group.
+
+ Args:
+ after: A cursor for use in pagination. Provide the ID of the last user from the
+ previous list response to retrieve the next page.
+
+ limit: A limit on the number of users to be returned. Limit can range between 0 and
+ 1000, and the default is 100.
+
+ order: Specifies the sort order of users in the list.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return self._get_api_list(
+ path_template("/organization/groups/{group_id}/users", group_id=group_id),
+ page=SyncNextCursorPage[OrganizationGroupUser],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ user_list_params.UserListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=OrganizationGroupUser,
+ )
+
+ def delete(
+ self,
+ user_id: str,
+ *,
+ group_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UserDeleteResponse:
+ """
+ Removes a user from a group.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return self._delete(
+ path_template("/organization/groups/{group_id}/users/{user_id}", group_id=group_id, user_id=user_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UserDeleteResponse,
+ )
+
+
+class AsyncUsers(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncUsersWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncUsersWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncUsersWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncUsersWithStreamingResponse(self)
+
+ async def create(
+ self,
+ group_id: str,
+ *,
+ user_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UserCreateResponse:
+ """
+ Adds a user to a group.
+
+ Args:
+ user_id: Identifier of the user to add to the group.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return await self._post(
+ path_template("/organization/groups/{group_id}/users", group_id=group_id),
+ body=await async_maybe_transform({"user_id": user_id}, user_create_params.UserCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UserCreateResponse,
+ )
+
+ def list(
+ self,
+ group_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[OrganizationGroupUser, AsyncNextCursorPage[OrganizationGroupUser]]:
+ """
+ Lists the users assigned to a group.
+
+ Args:
+ after: A cursor for use in pagination. Provide the ID of the last user from the
+ previous list response to retrieve the next page.
+
+ limit: A limit on the number of users to be returned. Limit can range between 0 and
+ 1000, and the default is 100.
+
+ order: Specifies the sort order of users in the list.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return self._get_api_list(
+ path_template("/organization/groups/{group_id}/users", group_id=group_id),
+ page=AsyncNextCursorPage[OrganizationGroupUser],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ user_list_params.UserListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=OrganizationGroupUser,
+ )
+
+ async def delete(
+ self,
+ user_id: str,
+ *,
+ group_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UserDeleteResponse:
+ """
+ Removes a user from a group.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return await self._delete(
+ path_template("/organization/groups/{group_id}/users/{user_id}", group_id=group_id, user_id=user_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UserDeleteResponse,
+ )
+
+
+class UsersWithRawResponse:
+ def __init__(self, users: Users) -> None:
+ self._users = users
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ users.create,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ users.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ users.delete,
+ )
+
+
+class AsyncUsersWithRawResponse:
+ def __init__(self, users: AsyncUsers) -> None:
+ self._users = users
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ users.create,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ users.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ users.delete,
+ )
+
+
+class UsersWithStreamingResponse:
+ def __init__(self, users: Users) -> None:
+ self._users = users
+
+ self.create = to_streamed_response_wrapper(
+ users.create,
+ )
+ self.list = to_streamed_response_wrapper(
+ users.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ users.delete,
+ )
+
+
+class AsyncUsersWithStreamingResponse:
+ def __init__(self, users: AsyncUsers) -> None:
+ self._users = users
+
+ self.create = async_to_streamed_response_wrapper(
+ users.create,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ users.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ users.delete,
+ )
diff --git a/src/openai/resources/admin/organization/invites.py b/src/openai/resources/admin/organization/invites.py
new file mode 100644
index 0000000000..23b83ccb18
--- /dev/null
+++ b/src/openai/resources/admin/organization/invites.py
@@ -0,0 +1,500 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ...._utils import path_template, maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from ...._base_client import AsyncPaginator, make_request_options
+from ....types.admin.organization import invite_list_params, invite_create_params
+from ....types.admin.organization.invite import Invite
+from ....types.admin.organization.invite_delete_response import InviteDeleteResponse
+
+__all__ = ["Invites", "AsyncInvites"]
+
+
+class Invites(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> InvitesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return InvitesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> InvitesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return InvitesWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ email: str,
+ role: Literal["reader", "owner"],
+ projects: Iterable[invite_create_params.Project] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Invite:
+ """Create an invite for a user to the organization.
+
+ The invite must be accepted by
+ the user before they have access to the organization.
+
+ Args:
+ email: Send an email to this address
+
+ role: `owner` or `reader`
+
+ projects: An array of projects to which membership is granted at the same time the org
+ invite is accepted. If omitted, the user will be invited to the default project
+ for compatibility with legacy behavior.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/organization/invites",
+ body=maybe_transform(
+ {
+ "email": email,
+ "role": role,
+ "projects": projects,
+ },
+ invite_create_params.InviteCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Invite,
+ )
+
+ def retrieve(
+ self,
+ invite_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Invite:
+ """
+ Retrieves an invite.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not invite_id:
+ raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}")
+ return self._get(
+ path_template("/organization/invites/{invite_id}", invite_id=invite_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Invite,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncConversationCursorPage[Invite]:
+ """
+ Returns a list of invites in the organization.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/invites",
+ page=SyncConversationCursorPage[Invite],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ },
+ invite_list_params.InviteListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=Invite,
+ )
+
+ def delete(
+ self,
+ invite_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> InviteDeleteResponse:
+ """Delete an invite.
+
+ If the invite has already been accepted, it cannot be deleted.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not invite_id:
+ raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}")
+ return self._delete(
+ path_template("/organization/invites/{invite_id}", invite_id=invite_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=InviteDeleteResponse,
+ )
+
+
+class AsyncInvites(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncInvitesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncInvitesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncInvitesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncInvitesWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ email: str,
+ role: Literal["reader", "owner"],
+ projects: Iterable[invite_create_params.Project] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Invite:
+ """Create an invite for a user to the organization.
+
+ The invite must be accepted by
+ the user before they have access to the organization.
+
+ Args:
+ email: Send an email to this address
+
+ role: `owner` or `reader`
+
+ projects: An array of projects to which membership is granted at the same time the org
+ invite is accepted. If omitted, the user will be invited to the default project
+ for compatibility with legacy behavior.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/organization/invites",
+ body=await async_maybe_transform(
+ {
+ "email": email,
+ "role": role,
+ "projects": projects,
+ },
+ invite_create_params.InviteCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Invite,
+ )
+
+ async def retrieve(
+ self,
+ invite_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Invite:
+ """
+ Retrieves an invite.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not invite_id:
+ raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}")
+ return await self._get(
+ path_template("/organization/invites/{invite_id}", invite_id=invite_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Invite,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[Invite, AsyncConversationCursorPage[Invite]]:
+ """
+ Returns a list of invites in the organization.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/invites",
+ page=AsyncConversationCursorPage[Invite],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ },
+ invite_list_params.InviteListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=Invite,
+ )
+
+ async def delete(
+ self,
+ invite_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> InviteDeleteResponse:
+ """Delete an invite.
+
+ If the invite has already been accepted, it cannot be deleted.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not invite_id:
+ raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}")
+ return await self._delete(
+ path_template("/organization/invites/{invite_id}", invite_id=invite_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=InviteDeleteResponse,
+ )
+
+
+class InvitesWithRawResponse:
+ def __init__(self, invites: Invites) -> None:
+ self._invites = invites
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ invites.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ invites.retrieve,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ invites.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ invites.delete,
+ )
+
+
+class AsyncInvitesWithRawResponse:
+ def __init__(self, invites: AsyncInvites) -> None:
+ self._invites = invites
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ invites.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ invites.retrieve,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ invites.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ invites.delete,
+ )
+
+
+class InvitesWithStreamingResponse:
+ def __init__(self, invites: Invites) -> None:
+ self._invites = invites
+
+ self.create = to_streamed_response_wrapper(
+ invites.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ invites.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ invites.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ invites.delete,
+ )
+
+
+class AsyncInvitesWithStreamingResponse:
+ def __init__(self, invites: AsyncInvites) -> None:
+ self._invites = invites
+
+ self.create = async_to_streamed_response_wrapper(
+ invites.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ invites.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ invites.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ invites.delete,
+ )
diff --git a/src/openai/resources/admin/organization/organization.py b/src/openai/resources/admin/organization/organization.py
new file mode 100644
index 0000000000..abbbadf575
--- /dev/null
+++ b/src/openai/resources/admin/organization/organization.py
@@ -0,0 +1,364 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .roles import (
+ Roles,
+ AsyncRoles,
+ RolesWithRawResponse,
+ AsyncRolesWithRawResponse,
+ RolesWithStreamingResponse,
+ AsyncRolesWithStreamingResponse,
+)
+from .usage import (
+ Usage,
+ AsyncUsage,
+ UsageWithRawResponse,
+ AsyncUsageWithRawResponse,
+ UsageWithStreamingResponse,
+ AsyncUsageWithStreamingResponse,
+)
+from .invites import (
+ Invites,
+ AsyncInvites,
+ InvitesWithRawResponse,
+ AsyncInvitesWithRawResponse,
+ InvitesWithStreamingResponse,
+ AsyncInvitesWithStreamingResponse,
+)
+from ...._compat import cached_property
+from .audit_logs import (
+ AuditLogs,
+ AsyncAuditLogs,
+ AuditLogsWithRawResponse,
+ AsyncAuditLogsWithRawResponse,
+ AuditLogsWithStreamingResponse,
+ AsyncAuditLogsWithStreamingResponse,
+)
+from .users.users import (
+ Users,
+ AsyncUsers,
+ UsersWithRawResponse,
+ AsyncUsersWithRawResponse,
+ UsersWithStreamingResponse,
+ AsyncUsersWithStreamingResponse,
+)
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from .certificates import (
+ Certificates,
+ AsyncCertificates,
+ CertificatesWithRawResponse,
+ AsyncCertificatesWithRawResponse,
+ CertificatesWithStreamingResponse,
+ AsyncCertificatesWithStreamingResponse,
+)
+from .groups.groups import (
+ Groups,
+ AsyncGroups,
+ GroupsWithRawResponse,
+ AsyncGroupsWithRawResponse,
+ GroupsWithStreamingResponse,
+ AsyncGroupsWithStreamingResponse,
+)
+from .admin_api_keys import (
+ AdminAPIKeys,
+ AsyncAdminAPIKeys,
+ AdminAPIKeysWithRawResponse,
+ AsyncAdminAPIKeysWithRawResponse,
+ AdminAPIKeysWithStreamingResponse,
+ AsyncAdminAPIKeysWithStreamingResponse,
+)
+from .projects.projects import (
+ Projects,
+ AsyncProjects,
+ ProjectsWithRawResponse,
+ AsyncProjectsWithRawResponse,
+ ProjectsWithStreamingResponse,
+ AsyncProjectsWithStreamingResponse,
+)
+
+__all__ = ["Organization", "AsyncOrganization"]
+
+
+class Organization(SyncAPIResource):
+ @cached_property
+ def audit_logs(self) -> AuditLogs:
+ """List user actions and configuration changes within this organization."""
+ return AuditLogs(self._client)
+
+ @cached_property
+ def admin_api_keys(self) -> AdminAPIKeys:
+ return AdminAPIKeys(self._client)
+
+ @cached_property
+ def usage(self) -> Usage:
+ return Usage(self._client)
+
+ @cached_property
+ def invites(self) -> Invites:
+ return Invites(self._client)
+
+ @cached_property
+ def users(self) -> Users:
+ return Users(self._client)
+
+ @cached_property
+ def groups(self) -> Groups:
+ return Groups(self._client)
+
+ @cached_property
+ def roles(self) -> Roles:
+ return Roles(self._client)
+
+ @cached_property
+ def certificates(self) -> Certificates:
+ return Certificates(self._client)
+
+ @cached_property
+ def projects(self) -> Projects:
+ return Projects(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> OrganizationWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return OrganizationWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> OrganizationWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return OrganizationWithStreamingResponse(self)
+
+
+class AsyncOrganization(AsyncAPIResource):
+ @cached_property
+ def audit_logs(self) -> AsyncAuditLogs:
+ """List user actions and configuration changes within this organization."""
+ return AsyncAuditLogs(self._client)
+
+ @cached_property
+ def admin_api_keys(self) -> AsyncAdminAPIKeys:
+ return AsyncAdminAPIKeys(self._client)
+
+ @cached_property
+ def usage(self) -> AsyncUsage:
+ return AsyncUsage(self._client)
+
+ @cached_property
+ def invites(self) -> AsyncInvites:
+ return AsyncInvites(self._client)
+
+ @cached_property
+ def users(self) -> AsyncUsers:
+ return AsyncUsers(self._client)
+
+ @cached_property
+ def groups(self) -> AsyncGroups:
+ return AsyncGroups(self._client)
+
+ @cached_property
+ def roles(self) -> AsyncRoles:
+ return AsyncRoles(self._client)
+
+ @cached_property
+ def certificates(self) -> AsyncCertificates:
+ return AsyncCertificates(self._client)
+
+ @cached_property
+ def projects(self) -> AsyncProjects:
+ return AsyncProjects(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncOrganizationWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncOrganizationWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncOrganizationWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncOrganizationWithStreamingResponse(self)
+
+
+class OrganizationWithRawResponse:
+ def __init__(self, organization: Organization) -> None:
+ self._organization = organization
+
+ @cached_property
+ def audit_logs(self) -> AuditLogsWithRawResponse:
+ """List user actions and configuration changes within this organization."""
+ return AuditLogsWithRawResponse(self._organization.audit_logs)
+
+ @cached_property
+ def admin_api_keys(self) -> AdminAPIKeysWithRawResponse:
+ return AdminAPIKeysWithRawResponse(self._organization.admin_api_keys)
+
+ @cached_property
+ def usage(self) -> UsageWithRawResponse:
+ return UsageWithRawResponse(self._organization.usage)
+
+ @cached_property
+ def invites(self) -> InvitesWithRawResponse:
+ return InvitesWithRawResponse(self._organization.invites)
+
+ @cached_property
+ def users(self) -> UsersWithRawResponse:
+ return UsersWithRawResponse(self._organization.users)
+
+ @cached_property
+ def groups(self) -> GroupsWithRawResponse:
+ return GroupsWithRawResponse(self._organization.groups)
+
+ @cached_property
+ def roles(self) -> RolesWithRawResponse:
+ return RolesWithRawResponse(self._organization.roles)
+
+ @cached_property
+ def certificates(self) -> CertificatesWithRawResponse:
+ return CertificatesWithRawResponse(self._organization.certificates)
+
+ @cached_property
+ def projects(self) -> ProjectsWithRawResponse:
+ return ProjectsWithRawResponse(self._organization.projects)
+
+
+class AsyncOrganizationWithRawResponse:
+ def __init__(self, organization: AsyncOrganization) -> None:
+ self._organization = organization
+
+ @cached_property
+ def audit_logs(self) -> AsyncAuditLogsWithRawResponse:
+ """List user actions and configuration changes within this organization."""
+ return AsyncAuditLogsWithRawResponse(self._organization.audit_logs)
+
+ @cached_property
+ def admin_api_keys(self) -> AsyncAdminAPIKeysWithRawResponse:
+ return AsyncAdminAPIKeysWithRawResponse(self._organization.admin_api_keys)
+
+ @cached_property
+ def usage(self) -> AsyncUsageWithRawResponse:
+ return AsyncUsageWithRawResponse(self._organization.usage)
+
+ @cached_property
+ def invites(self) -> AsyncInvitesWithRawResponse:
+ return AsyncInvitesWithRawResponse(self._organization.invites)
+
+ @cached_property
+ def users(self) -> AsyncUsersWithRawResponse:
+ return AsyncUsersWithRawResponse(self._organization.users)
+
+ @cached_property
+ def groups(self) -> AsyncGroupsWithRawResponse:
+ return AsyncGroupsWithRawResponse(self._organization.groups)
+
+ @cached_property
+ def roles(self) -> AsyncRolesWithRawResponse:
+ return AsyncRolesWithRawResponse(self._organization.roles)
+
+ @cached_property
+ def certificates(self) -> AsyncCertificatesWithRawResponse:
+ return AsyncCertificatesWithRawResponse(self._organization.certificates)
+
+ @cached_property
+ def projects(self) -> AsyncProjectsWithRawResponse:
+ return AsyncProjectsWithRawResponse(self._organization.projects)
+
+
+class OrganizationWithStreamingResponse:
+ def __init__(self, organization: Organization) -> None:
+ self._organization = organization
+
+ @cached_property
+ def audit_logs(self) -> AuditLogsWithStreamingResponse:
+ """List user actions and configuration changes within this organization."""
+ return AuditLogsWithStreamingResponse(self._organization.audit_logs)
+
+ @cached_property
+ def admin_api_keys(self) -> AdminAPIKeysWithStreamingResponse:
+ return AdminAPIKeysWithStreamingResponse(self._organization.admin_api_keys)
+
+ @cached_property
+ def usage(self) -> UsageWithStreamingResponse:
+ return UsageWithStreamingResponse(self._organization.usage)
+
+ @cached_property
+ def invites(self) -> InvitesWithStreamingResponse:
+ return InvitesWithStreamingResponse(self._organization.invites)
+
+ @cached_property
+ def users(self) -> UsersWithStreamingResponse:
+ return UsersWithStreamingResponse(self._organization.users)
+
+ @cached_property
+ def groups(self) -> GroupsWithStreamingResponse:
+ return GroupsWithStreamingResponse(self._organization.groups)
+
+ @cached_property
+ def roles(self) -> RolesWithStreamingResponse:
+ return RolesWithStreamingResponse(self._organization.roles)
+
+ @cached_property
+ def certificates(self) -> CertificatesWithStreamingResponse:
+ return CertificatesWithStreamingResponse(self._organization.certificates)
+
+ @cached_property
+ def projects(self) -> ProjectsWithStreamingResponse:
+ return ProjectsWithStreamingResponse(self._organization.projects)
+
+
+class AsyncOrganizationWithStreamingResponse:
+ def __init__(self, organization: AsyncOrganization) -> None:
+ self._organization = organization
+
+ @cached_property
+ def audit_logs(self) -> AsyncAuditLogsWithStreamingResponse:
+ """List user actions and configuration changes within this organization."""
+ return AsyncAuditLogsWithStreamingResponse(self._organization.audit_logs)
+
+ @cached_property
+ def admin_api_keys(self) -> AsyncAdminAPIKeysWithStreamingResponse:
+ return AsyncAdminAPIKeysWithStreamingResponse(self._organization.admin_api_keys)
+
+ @cached_property
+ def usage(self) -> AsyncUsageWithStreamingResponse:
+ return AsyncUsageWithStreamingResponse(self._organization.usage)
+
+ @cached_property
+ def invites(self) -> AsyncInvitesWithStreamingResponse:
+ return AsyncInvitesWithStreamingResponse(self._organization.invites)
+
+ @cached_property
+ def users(self) -> AsyncUsersWithStreamingResponse:
+ return AsyncUsersWithStreamingResponse(self._organization.users)
+
+ @cached_property
+ def groups(self) -> AsyncGroupsWithStreamingResponse:
+ return AsyncGroupsWithStreamingResponse(self._organization.groups)
+
+ @cached_property
+ def roles(self) -> AsyncRolesWithStreamingResponse:
+ return AsyncRolesWithStreamingResponse(self._organization.roles)
+
+ @cached_property
+ def certificates(self) -> AsyncCertificatesWithStreamingResponse:
+ return AsyncCertificatesWithStreamingResponse(self._organization.certificates)
+
+ @cached_property
+ def projects(self) -> AsyncProjectsWithStreamingResponse:
+ return AsyncProjectsWithStreamingResponse(self._organization.projects)
diff --git a/src/openai/resources/admin/organization/projects/__init__.py b/src/openai/resources/admin/organization/projects/__init__.py
new file mode 100644
index 0000000000..a64326bfa9
--- /dev/null
+++ b/src/openai/resources/admin/organization/projects/__init__.py
@@ -0,0 +1,117 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .roles import (
+ Roles,
+ AsyncRoles,
+ RolesWithRawResponse,
+ AsyncRolesWithRawResponse,
+ RolesWithStreamingResponse,
+ AsyncRolesWithStreamingResponse,
+)
+from .users import (
+ Users,
+ AsyncUsers,
+ UsersWithRawResponse,
+ AsyncUsersWithRawResponse,
+ UsersWithStreamingResponse,
+ AsyncUsersWithStreamingResponse,
+)
+from .groups import (
+ Groups,
+ AsyncGroups,
+ GroupsWithRawResponse,
+ AsyncGroupsWithRawResponse,
+ GroupsWithStreamingResponse,
+ AsyncGroupsWithStreamingResponse,
+)
+from .api_keys import (
+ APIKeys,
+ AsyncAPIKeys,
+ APIKeysWithRawResponse,
+ AsyncAPIKeysWithRawResponse,
+ APIKeysWithStreamingResponse,
+ AsyncAPIKeysWithStreamingResponse,
+)
+from .projects import (
+ Projects,
+ AsyncProjects,
+ ProjectsWithRawResponse,
+ AsyncProjectsWithRawResponse,
+ ProjectsWithStreamingResponse,
+ AsyncProjectsWithStreamingResponse,
+)
+from .rate_limits import (
+ RateLimits,
+ AsyncRateLimits,
+ RateLimitsWithRawResponse,
+ AsyncRateLimitsWithRawResponse,
+ RateLimitsWithStreamingResponse,
+ AsyncRateLimitsWithStreamingResponse,
+)
+from .certificates import (
+ Certificates,
+ AsyncCertificates,
+ CertificatesWithRawResponse,
+ AsyncCertificatesWithRawResponse,
+ CertificatesWithStreamingResponse,
+ AsyncCertificatesWithStreamingResponse,
+)
+from .service_accounts import (
+ ServiceAccounts,
+ AsyncServiceAccounts,
+ ServiceAccountsWithRawResponse,
+ AsyncServiceAccountsWithRawResponse,
+ ServiceAccountsWithStreamingResponse,
+ AsyncServiceAccountsWithStreamingResponse,
+)
+
+__all__ = [
+ "Users",
+ "AsyncUsers",
+ "UsersWithRawResponse",
+ "AsyncUsersWithRawResponse",
+ "UsersWithStreamingResponse",
+ "AsyncUsersWithStreamingResponse",
+ "ServiceAccounts",
+ "AsyncServiceAccounts",
+ "ServiceAccountsWithRawResponse",
+ "AsyncServiceAccountsWithRawResponse",
+ "ServiceAccountsWithStreamingResponse",
+ "AsyncServiceAccountsWithStreamingResponse",
+ "APIKeys",
+ "AsyncAPIKeys",
+ "APIKeysWithRawResponse",
+ "AsyncAPIKeysWithRawResponse",
+ "APIKeysWithStreamingResponse",
+ "AsyncAPIKeysWithStreamingResponse",
+ "RateLimits",
+ "AsyncRateLimits",
+ "RateLimitsWithRawResponse",
+ "AsyncRateLimitsWithRawResponse",
+ "RateLimitsWithStreamingResponse",
+ "AsyncRateLimitsWithStreamingResponse",
+ "Groups",
+ "AsyncGroups",
+ "GroupsWithRawResponse",
+ "AsyncGroupsWithRawResponse",
+ "GroupsWithStreamingResponse",
+ "AsyncGroupsWithStreamingResponse",
+ "Roles",
+ "AsyncRoles",
+ "RolesWithRawResponse",
+ "AsyncRolesWithRawResponse",
+ "RolesWithStreamingResponse",
+ "AsyncRolesWithStreamingResponse",
+ "Certificates",
+ "AsyncCertificates",
+ "CertificatesWithRawResponse",
+ "AsyncCertificatesWithRawResponse",
+ "CertificatesWithStreamingResponse",
+ "AsyncCertificatesWithStreamingResponse",
+ "Projects",
+ "AsyncProjects",
+ "ProjectsWithRawResponse",
+ "AsyncProjectsWithRawResponse",
+ "ProjectsWithStreamingResponse",
+ "AsyncProjectsWithStreamingResponse",
+]
diff --git a/src/openai/resources/admin/organization/projects/api_keys.py b/src/openai/resources/admin/organization/projects/api_keys.py
new file mode 100644
index 0000000000..1517d213e0
--- /dev/null
+++ b/src/openai/resources/admin/organization/projects/api_keys.py
@@ -0,0 +1,413 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..... import _legacy_response
+from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ....._utils import path_template, maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .....pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from ....._base_client import AsyncPaginator, make_request_options
+from .....types.admin.organization.projects import api_key_list_params
+from .....types.admin.organization.projects.project_api_key import ProjectAPIKey
+from .....types.admin.organization.projects.api_key_delete_response import APIKeyDeleteResponse
+
+__all__ = ["APIKeys", "AsyncAPIKeys"]
+
+
+class APIKeys(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> APIKeysWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return APIKeysWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> APIKeysWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return APIKeysWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ api_key_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ProjectAPIKey:
+ """
+ Retrieves an API key in the project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not api_key_id:
+ raise ValueError(f"Expected a non-empty value for `api_key_id` but received {api_key_id!r}")
+ return self._get(
+ path_template(
+ "/organization/projects/{project_id}/api_keys/{api_key_id}",
+ project_id=project_id,
+ api_key_id=api_key_id,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ProjectAPIKey,
+ )
+
+ def list(
+ self,
+ project_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncConversationCursorPage[ProjectAPIKey]:
+ """
+ Returns a list of API keys in the project.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/api_keys", project_id=project_id),
+ page=SyncConversationCursorPage[ProjectAPIKey],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ },
+ api_key_list_params.APIKeyListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=ProjectAPIKey,
+ )
+
+ def delete(
+ self,
+ api_key_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyDeleteResponse:
+ """
+ Deletes an API key from the project.
+
+ Returns confirmation of the key deletion, or an error if the key belonged to a
+ service account.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not api_key_id:
+ raise ValueError(f"Expected a non-empty value for `api_key_id` but received {api_key_id!r}")
+ return self._delete(
+ path_template(
+ "/organization/projects/{project_id}/api_keys/{api_key_id}",
+ project_id=project_id,
+ api_key_id=api_key_id,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=APIKeyDeleteResponse,
+ )
+
+
+class AsyncAPIKeys(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncAPIKeysWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAPIKeysWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAPIKeysWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncAPIKeysWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ api_key_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ProjectAPIKey:
+ """
+ Retrieves an API key in the project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not api_key_id:
+ raise ValueError(f"Expected a non-empty value for `api_key_id` but received {api_key_id!r}")
+ return await self._get(
+ path_template(
+ "/organization/projects/{project_id}/api_keys/{api_key_id}",
+ project_id=project_id,
+ api_key_id=api_key_id,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ProjectAPIKey,
+ )
+
+ def list(
+ self,
+ project_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[ProjectAPIKey, AsyncConversationCursorPage[ProjectAPIKey]]:
+ """
+ Returns a list of API keys in the project.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/api_keys", project_id=project_id),
+ page=AsyncConversationCursorPage[ProjectAPIKey],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ },
+ api_key_list_params.APIKeyListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=ProjectAPIKey,
+ )
+
+ async def delete(
+ self,
+ api_key_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyDeleteResponse:
+ """
+ Deletes an API key from the project.
+
+ Returns confirmation of the key deletion, or an error if the key belonged to a
+ service account.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not api_key_id:
+ raise ValueError(f"Expected a non-empty value for `api_key_id` but received {api_key_id!r}")
+ return await self._delete(
+ path_template(
+ "/organization/projects/{project_id}/api_keys/{api_key_id}",
+ project_id=project_id,
+ api_key_id=api_key_id,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=APIKeyDeleteResponse,
+ )
+
+
+class APIKeysWithRawResponse:
+ def __init__(self, api_keys: APIKeys) -> None:
+ self._api_keys = api_keys
+
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ api_keys.retrieve,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ api_keys.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ api_keys.delete,
+ )
+
+
+class AsyncAPIKeysWithRawResponse:
+ def __init__(self, api_keys: AsyncAPIKeys) -> None:
+ self._api_keys = api_keys
+
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ api_keys.retrieve,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ api_keys.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ api_keys.delete,
+ )
+
+
+class APIKeysWithStreamingResponse:
+ def __init__(self, api_keys: APIKeys) -> None:
+ self._api_keys = api_keys
+
+ self.retrieve = to_streamed_response_wrapper(
+ api_keys.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ api_keys.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ api_keys.delete,
+ )
+
+
+class AsyncAPIKeysWithStreamingResponse:
+ def __init__(self, api_keys: AsyncAPIKeys) -> None:
+ self._api_keys = api_keys
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ api_keys.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ api_keys.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ api_keys.delete,
+ )
diff --git a/src/openai/resources/admin/organization/projects/certificates.py b/src/openai/resources/admin/organization/projects/certificates.py
new file mode 100644
index 0000000000..ec449d570a
--- /dev/null
+++ b/src/openai/resources/admin/organization/projects/certificates.py
@@ -0,0 +1,428 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal
+
+import httpx
+
+from ..... import _legacy_response
+from ....._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ....._utils import path_template, maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .....pagination import SyncPage, AsyncPage, SyncConversationCursorPage, AsyncConversationCursorPage
+from ....._base_client import AsyncPaginator, make_request_options
+from .....types.admin.organization.projects import (
+ certificate_list_params,
+ certificate_activate_params,
+ certificate_deactivate_params,
+)
+from .....types.admin.organization.projects.certificate_list_response import CertificateListResponse
+from .....types.admin.organization.projects.certificate_activate_response import CertificateActivateResponse
+from .....types.admin.organization.projects.certificate_deactivate_response import CertificateDeactivateResponse
+
+__all__ = ["Certificates", "AsyncCertificates"]
+
+
+class Certificates(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> CertificatesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return CertificatesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> CertificatesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return CertificatesWithStreamingResponse(self)
+
+ def list(
+ self,
+ project_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncConversationCursorPage[CertificateListResponse]:
+ """
+ List certificates for this project.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/certificates", project_id=project_id),
+ page=SyncConversationCursorPage[CertificateListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ certificate_list_params.CertificateListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=CertificateListResponse,
+ )
+
+ def activate(
+ self,
+ project_id: str,
+ *,
+ certificate_ids: SequenceNotStr[str],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncPage[CertificateActivateResponse]:
+ """
+ Activate certificates at the project level.
+
+ You can atomically and idempotently activate up to 10 certificates at a time.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/certificates/activate", project_id=project_id),
+ page=SyncPage[CertificateActivateResponse],
+ body=maybe_transform(
+ {"certificate_ids": certificate_ids}, certificate_activate_params.CertificateActivateParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ model=CertificateActivateResponse,
+ method="post",
+ )
+
+ def deactivate(
+ self,
+ project_id: str,
+ *,
+ certificate_ids: SequenceNotStr[str],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncPage[CertificateDeactivateResponse]:
+ """Deactivate certificates at the project level.
+
+ You can atomically and
+ idempotently deactivate up to 10 certificates at a time.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/certificates/deactivate", project_id=project_id),
+ page=SyncPage[CertificateDeactivateResponse],
+ body=maybe_transform(
+ {"certificate_ids": certificate_ids}, certificate_deactivate_params.CertificateDeactivateParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ model=CertificateDeactivateResponse,
+ method="post",
+ )
+
+
+class AsyncCertificates(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncCertificatesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncCertificatesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncCertificatesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncCertificatesWithStreamingResponse(self)
+
+ def list(
+ self,
+ project_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[CertificateListResponse, AsyncConversationCursorPage[CertificateListResponse]]:
+ """
+ List certificates for this project.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/certificates", project_id=project_id),
+ page=AsyncConversationCursorPage[CertificateListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ certificate_list_params.CertificateListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=CertificateListResponse,
+ )
+
+ def activate(
+ self,
+ project_id: str,
+ *,
+ certificate_ids: SequenceNotStr[str],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[CertificateActivateResponse, AsyncPage[CertificateActivateResponse]]:
+ """
+ Activate certificates at the project level.
+
+ You can atomically and idempotently activate up to 10 certificates at a time.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/certificates/activate", project_id=project_id),
+ page=AsyncPage[CertificateActivateResponse],
+ body=maybe_transform(
+ {"certificate_ids": certificate_ids}, certificate_activate_params.CertificateActivateParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ model=CertificateActivateResponse,
+ method="post",
+ )
+
+ def deactivate(
+ self,
+ project_id: str,
+ *,
+ certificate_ids: SequenceNotStr[str],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[CertificateDeactivateResponse, AsyncPage[CertificateDeactivateResponse]]:
+ """Deactivate certificates at the project level.
+
+ You can atomically and
+ idempotently deactivate up to 10 certificates at a time.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/certificates/deactivate", project_id=project_id),
+ page=AsyncPage[CertificateDeactivateResponse],
+ body=maybe_transform(
+ {"certificate_ids": certificate_ids}, certificate_deactivate_params.CertificateDeactivateParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ model=CertificateDeactivateResponse,
+ method="post",
+ )
+
+
+class CertificatesWithRawResponse:
+ def __init__(self, certificates: Certificates) -> None:
+ self._certificates = certificates
+
+ self.list = _legacy_response.to_raw_response_wrapper(
+ certificates.list,
+ )
+ self.activate = _legacy_response.to_raw_response_wrapper(
+ certificates.activate,
+ )
+ self.deactivate = _legacy_response.to_raw_response_wrapper(
+ certificates.deactivate,
+ )
+
+
+class AsyncCertificatesWithRawResponse:
+ def __init__(self, certificates: AsyncCertificates) -> None:
+ self._certificates = certificates
+
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ certificates.list,
+ )
+ self.activate = _legacy_response.async_to_raw_response_wrapper(
+ certificates.activate,
+ )
+ self.deactivate = _legacy_response.async_to_raw_response_wrapper(
+ certificates.deactivate,
+ )
+
+
+class CertificatesWithStreamingResponse:
+ def __init__(self, certificates: Certificates) -> None:
+ self._certificates = certificates
+
+ self.list = to_streamed_response_wrapper(
+ certificates.list,
+ )
+ self.activate = to_streamed_response_wrapper(
+ certificates.activate,
+ )
+ self.deactivate = to_streamed_response_wrapper(
+ certificates.deactivate,
+ )
+
+
+class AsyncCertificatesWithStreamingResponse:
+ def __init__(self, certificates: AsyncCertificates) -> None:
+ self._certificates = certificates
+
+ self.list = async_to_streamed_response_wrapper(
+ certificates.list,
+ )
+ self.activate = async_to_streamed_response_wrapper(
+ certificates.activate,
+ )
+ self.deactivate = async_to_streamed_response_wrapper(
+ certificates.deactivate,
+ )
diff --git a/src/openai/resources/admin/organization/projects/groups/__init__.py b/src/openai/resources/admin/organization/projects/groups/__init__.py
new file mode 100644
index 0000000000..4feb66239f
--- /dev/null
+++ b/src/openai/resources/admin/organization/projects/groups/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .roles import (
+ Roles,
+ AsyncRoles,
+ RolesWithRawResponse,
+ AsyncRolesWithRawResponse,
+ RolesWithStreamingResponse,
+ AsyncRolesWithStreamingResponse,
+)
+from .groups import (
+ Groups,
+ AsyncGroups,
+ GroupsWithRawResponse,
+ AsyncGroupsWithRawResponse,
+ GroupsWithStreamingResponse,
+ AsyncGroupsWithStreamingResponse,
+)
+
+__all__ = [
+ "Roles",
+ "AsyncRoles",
+ "RolesWithRawResponse",
+ "AsyncRolesWithRawResponse",
+ "RolesWithStreamingResponse",
+ "AsyncRolesWithStreamingResponse",
+ "Groups",
+ "AsyncGroups",
+ "GroupsWithRawResponse",
+ "AsyncGroupsWithRawResponse",
+ "GroupsWithStreamingResponse",
+ "AsyncGroupsWithStreamingResponse",
+]
diff --git a/src/openai/resources/admin/organization/projects/groups/groups.py b/src/openai/resources/admin/organization/projects/groups/groups.py
new file mode 100644
index 0000000000..aad9bfd6ec
--- /dev/null
+++ b/src/openai/resources/admin/organization/projects/groups/groups.py
@@ -0,0 +1,451 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal
+
+import httpx
+
+from ...... import _legacy_response
+from .roles import (
+ Roles,
+ AsyncRoles,
+ RolesWithRawResponse,
+ AsyncRolesWithRawResponse,
+ RolesWithStreamingResponse,
+ AsyncRolesWithStreamingResponse,
+)
+from ......_types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ......_utils import path_template, maybe_transform, async_maybe_transform
+from ......_compat import cached_property
+from ......_resource import SyncAPIResource, AsyncAPIResource
+from ......_response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ......pagination import SyncNextCursorPage, AsyncNextCursorPage
+from ......_base_client import AsyncPaginator, make_request_options
+from ......types.admin.organization.projects import group_list_params, group_create_params
+from ......types.admin.organization.projects.project_group import ProjectGroup
+from ......types.admin.organization.projects.group_delete_response import GroupDeleteResponse
+
+__all__ = ["Groups", "AsyncGroups"]
+
+
+class Groups(SyncAPIResource):
+ @cached_property
+ def roles(self) -> Roles:
+ return Roles(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> GroupsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return GroupsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> GroupsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return GroupsWithStreamingResponse(self)
+
+ def create(
+ self,
+ project_id: str,
+ *,
+ group_id: str,
+ role: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ProjectGroup:
+ """
+ Grants a group access to a project.
+
+ Args:
+ group_id: Identifier of the group to add to the project.
+
+ role: Identifier of the project role to grant to the group.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._post(
+ path_template("/organization/projects/{project_id}/groups", project_id=project_id),
+ body=maybe_transform(
+ {
+ "group_id": group_id,
+ "role": role,
+ },
+ group_create_params.GroupCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ProjectGroup,
+ )
+
+ def list(
+ self,
+ project_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncNextCursorPage[ProjectGroup]:
+ """
+ Lists the groups that have access to a project.
+
+ Args:
+ after: Cursor for pagination. Provide the ID of the last group from the previous
+ response to fetch the next page.
+
+ limit: A limit on the number of project groups to return. Defaults to 20.
+
+ order: Sort order for the returned groups.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/groups", project_id=project_id),
+ page=SyncNextCursorPage[ProjectGroup],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ group_list_params.GroupListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=ProjectGroup,
+ )
+
+ def delete(
+ self,
+ group_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GroupDeleteResponse:
+ """
+ Revokes a group's access to a project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return self._delete(
+ path_template(
+ "/organization/projects/{project_id}/groups/{group_id}", project_id=project_id, group_id=group_id
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=GroupDeleteResponse,
+ )
+
+
+class AsyncGroups(AsyncAPIResource):
+ @cached_property
+ def roles(self) -> AsyncRoles:
+ return AsyncRoles(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncGroupsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncGroupsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncGroupsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncGroupsWithStreamingResponse(self)
+
+ async def create(
+ self,
+ project_id: str,
+ *,
+ group_id: str,
+ role: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ProjectGroup:
+ """
+ Grants a group access to a project.
+
+ Args:
+ group_id: Identifier of the group to add to the project.
+
+ role: Identifier of the project role to grant to the group.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return await self._post(
+ path_template("/organization/projects/{project_id}/groups", project_id=project_id),
+ body=await async_maybe_transform(
+ {
+ "group_id": group_id,
+ "role": role,
+ },
+ group_create_params.GroupCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ProjectGroup,
+ )
+
+ def list(
+ self,
+ project_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[ProjectGroup, AsyncNextCursorPage[ProjectGroup]]:
+ """
+ Lists the groups that have access to a project.
+
+ Args:
+ after: Cursor for pagination. Provide the ID of the last group from the previous
+ response to fetch the next page.
+
+ limit: A limit on the number of project groups to return. Defaults to 20.
+
+ order: Sort order for the returned groups.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/groups", project_id=project_id),
+ page=AsyncNextCursorPage[ProjectGroup],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ group_list_params.GroupListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=ProjectGroup,
+ )
+
+ async def delete(
+ self,
+ group_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GroupDeleteResponse:
+ """
+ Revokes a group's access to a project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return await self._delete(
+ path_template(
+ "/organization/projects/{project_id}/groups/{group_id}", project_id=project_id, group_id=group_id
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=GroupDeleteResponse,
+ )
+
+
+class GroupsWithRawResponse:
+ def __init__(self, groups: Groups) -> None:
+ self._groups = groups
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ groups.create,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ groups.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ groups.delete,
+ )
+
+ @cached_property
+ def roles(self) -> RolesWithRawResponse:
+ return RolesWithRawResponse(self._groups.roles)
+
+
+class AsyncGroupsWithRawResponse:
+ def __init__(self, groups: AsyncGroups) -> None:
+ self._groups = groups
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ groups.create,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ groups.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ groups.delete,
+ )
+
+ @cached_property
+ def roles(self) -> AsyncRolesWithRawResponse:
+ return AsyncRolesWithRawResponse(self._groups.roles)
+
+
+class GroupsWithStreamingResponse:
+ def __init__(self, groups: Groups) -> None:
+ self._groups = groups
+
+ self.create = to_streamed_response_wrapper(
+ groups.create,
+ )
+ self.list = to_streamed_response_wrapper(
+ groups.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ groups.delete,
+ )
+
+ @cached_property
+ def roles(self) -> RolesWithStreamingResponse:
+ return RolesWithStreamingResponse(self._groups.roles)
+
+
+class AsyncGroupsWithStreamingResponse:
+ def __init__(self, groups: AsyncGroups) -> None:
+ self._groups = groups
+
+ self.create = async_to_streamed_response_wrapper(
+ groups.create,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ groups.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ groups.delete,
+ )
+
+ @cached_property
+ def roles(self) -> AsyncRolesWithStreamingResponse:
+ return AsyncRolesWithStreamingResponse(self._groups.roles)
diff --git a/src/openai/resources/admin/organization/projects/groups/roles.py b/src/openai/resources/admin/organization/projects/groups/roles.py
new file mode 100644
index 0000000000..e3fe3b54fe
--- /dev/null
+++ b/src/openai/resources/admin/organization/projects/groups/roles.py
@@ -0,0 +1,426 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal
+
+import httpx
+
+from ...... import _legacy_response
+from ......_types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ......_utils import path_template, maybe_transform, async_maybe_transform
+from ......_compat import cached_property
+from ......_resource import SyncAPIResource, AsyncAPIResource
+from ......_response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ......pagination import SyncNextCursorPage, AsyncNextCursorPage
+from ......_base_client import AsyncPaginator, make_request_options
+from ......types.admin.organization.projects.groups import role_list_params, role_create_params
+from ......types.admin.organization.projects.groups.role_list_response import RoleListResponse
+from ......types.admin.organization.projects.groups.role_create_response import RoleCreateResponse
+from ......types.admin.organization.projects.groups.role_delete_response import RoleDeleteResponse
+
+__all__ = ["Roles", "AsyncRoles"]
+
+
+class Roles(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> RolesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return RolesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> RolesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return RolesWithStreamingResponse(self)
+
+ def create(
+ self,
+ group_id: str,
+ *,
+ project_id: str,
+ role_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleCreateResponse:
+ """
+ Assigns a project role to a group within a project.
+
+ Args:
+ role_id: Identifier of the role to assign.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return self._post(
+ path_template("/projects/{project_id}/groups/{group_id}/roles", project_id=project_id, group_id=group_id),
+ body=maybe_transform({"role_id": role_id}, role_create_params.RoleCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleCreateResponse,
+ )
+
+ def list(
+ self,
+ group_id: str,
+ *,
+ project_id: str,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncNextCursorPage[RoleListResponse]:
+ """
+ Lists the project roles assigned to a group within a project.
+
+ Args:
+ after: Cursor for pagination. Provide the value from the previous response's `next`
+ field to continue listing project roles.
+
+ limit: A limit on the number of project role assignments to return.
+
+ order: Sort order for the returned project roles.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return self._get_api_list(
+ path_template("/projects/{project_id}/groups/{group_id}/roles", project_id=project_id, group_id=group_id),
+ page=SyncNextCursorPage[RoleListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ role_list_params.RoleListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=RoleListResponse,
+ )
+
+ def delete(
+ self,
+ role_id: str,
+ *,
+ project_id: str,
+ group_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleDeleteResponse:
+ """
+ Unassigns a project role from a group within a project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return self._delete(
+ path_template(
+ "/projects/{project_id}/groups/{group_id}/roles/{role_id}",
+ project_id=project_id,
+ group_id=group_id,
+ role_id=role_id,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleDeleteResponse,
+ )
+
+
+class AsyncRoles(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncRolesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncRolesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncRolesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncRolesWithStreamingResponse(self)
+
+ async def create(
+ self,
+ group_id: str,
+ *,
+ project_id: str,
+ role_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleCreateResponse:
+ """
+ Assigns a project role to a group within a project.
+
+ Args:
+ role_id: Identifier of the role to assign.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return await self._post(
+ path_template("/projects/{project_id}/groups/{group_id}/roles", project_id=project_id, group_id=group_id),
+ body=await async_maybe_transform({"role_id": role_id}, role_create_params.RoleCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleCreateResponse,
+ )
+
+ def list(
+ self,
+ group_id: str,
+ *,
+ project_id: str,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[RoleListResponse, AsyncNextCursorPage[RoleListResponse]]:
+ """
+ Lists the project roles assigned to a group within a project.
+
+ Args:
+ after: Cursor for pagination. Provide the value from the previous response's `next`
+ field to continue listing project roles.
+
+ limit: A limit on the number of project role assignments to return.
+
+ order: Sort order for the returned project roles.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ return self._get_api_list(
+ path_template("/projects/{project_id}/groups/{group_id}/roles", project_id=project_id, group_id=group_id),
+ page=AsyncNextCursorPage[RoleListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ role_list_params.RoleListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=RoleListResponse,
+ )
+
+ async def delete(
+ self,
+ role_id: str,
+ *,
+ project_id: str,
+ group_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleDeleteResponse:
+ """
+ Unassigns a project role from a group within a project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not group_id:
+ raise ValueError(f"Expected a non-empty value for `group_id` but received {group_id!r}")
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return await self._delete(
+ path_template(
+ "/projects/{project_id}/groups/{group_id}/roles/{role_id}",
+ project_id=project_id,
+ group_id=group_id,
+ role_id=role_id,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleDeleteResponse,
+ )
+
+
+class RolesWithRawResponse:
+ def __init__(self, roles: Roles) -> None:
+ self._roles = roles
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ roles.create,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ roles.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ roles.delete,
+ )
+
+
+class AsyncRolesWithRawResponse:
+ def __init__(self, roles: AsyncRoles) -> None:
+ self._roles = roles
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ roles.create,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ roles.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ roles.delete,
+ )
+
+
+class RolesWithStreamingResponse:
+ def __init__(self, roles: Roles) -> None:
+ self._roles = roles
+
+ self.create = to_streamed_response_wrapper(
+ roles.create,
+ )
+ self.list = to_streamed_response_wrapper(
+ roles.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ roles.delete,
+ )
+
+
+class AsyncRolesWithStreamingResponse:
+ def __init__(self, roles: AsyncRoles) -> None:
+ self._roles = roles
+
+ self.create = async_to_streamed_response_wrapper(
+ roles.create,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ roles.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ roles.delete,
+ )
diff --git a/src/openai/resources/admin/organization/projects/projects.py b/src/openai/resources/admin/organization/projects/projects.py
new file mode 100644
index 0000000000..60d252cd46
--- /dev/null
+++ b/src/openai/resources/admin/organization/projects/projects.py
@@ -0,0 +1,858 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+import httpx
+
+from ..... import _legacy_response
+from .roles import (
+ Roles,
+ AsyncRoles,
+ RolesWithRawResponse,
+ AsyncRolesWithRawResponse,
+ RolesWithStreamingResponse,
+ AsyncRolesWithStreamingResponse,
+)
+from .api_keys import (
+ APIKeys,
+ AsyncAPIKeys,
+ APIKeysWithRawResponse,
+ AsyncAPIKeysWithRawResponse,
+ APIKeysWithStreamingResponse,
+ AsyncAPIKeysWithStreamingResponse,
+)
+from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ....._utils import path_template, maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from .rate_limits import (
+ RateLimits,
+ AsyncRateLimits,
+ RateLimitsWithRawResponse,
+ AsyncRateLimitsWithRawResponse,
+ RateLimitsWithStreamingResponse,
+ AsyncRateLimitsWithStreamingResponse,
+)
+from .users.users import (
+ Users,
+ AsyncUsers,
+ UsersWithRawResponse,
+ AsyncUsersWithRawResponse,
+ UsersWithStreamingResponse,
+ AsyncUsersWithStreamingResponse,
+)
+from .certificates import (
+ Certificates,
+ AsyncCertificates,
+ CertificatesWithRawResponse,
+ AsyncCertificatesWithRawResponse,
+ CertificatesWithStreamingResponse,
+ AsyncCertificatesWithStreamingResponse,
+)
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .groups.groups import (
+ Groups,
+ AsyncGroups,
+ GroupsWithRawResponse,
+ AsyncGroupsWithRawResponse,
+ GroupsWithStreamingResponse,
+ AsyncGroupsWithStreamingResponse,
+)
+from .....pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from ....._base_client import AsyncPaginator, make_request_options
+from .service_accounts import (
+ ServiceAccounts,
+ AsyncServiceAccounts,
+ ServiceAccountsWithRawResponse,
+ AsyncServiceAccountsWithRawResponse,
+ ServiceAccountsWithStreamingResponse,
+ AsyncServiceAccountsWithStreamingResponse,
+)
+from .....types.admin.organization import project_list_params, project_create_params, project_update_params
+from .....types.admin.organization.project import Project
+
+__all__ = ["Projects", "AsyncProjects"]
+
+
+class Projects(SyncAPIResource):
+ @cached_property
+ def users(self) -> Users:
+ return Users(self._client)
+
+ @cached_property
+ def service_accounts(self) -> ServiceAccounts:
+ return ServiceAccounts(self._client)
+
+ @cached_property
+ def api_keys(self) -> APIKeys:
+ return APIKeys(self._client)
+
+ @cached_property
+ def rate_limits(self) -> RateLimits:
+ return RateLimits(self._client)
+
+ @cached_property
+ def groups(self) -> Groups:
+ return Groups(self._client)
+
+ @cached_property
+ def roles(self) -> Roles:
+ return Roles(self._client)
+
+ @cached_property
+ def certificates(self) -> Certificates:
+ return Certificates(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> ProjectsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return ProjectsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ProjectsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return ProjectsWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ name: str,
+ external_key_id: Optional[str] | Omit = omit,
+ geography: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Project:
+ """Create a new project in the organization.
+
+ Projects can be created and archived,
+ but cannot be deleted.
+
+ Args:
+ name: The friendly name of the project, this name appears in reports.
+
+ external_key_id: External key ID to associate with the project.
+
+ geography: Create the project with the specified data residency region. Your organization
+ must have access to Data residency functionality in order to use. See
+ [data residency controls](https://platform.openai.com/docs/guides/your-data#data-residency-controls)
+ to review the functionality and limitations of setting this field.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/organization/projects",
+ body=maybe_transform(
+ {
+ "name": name,
+ "external_key_id": external_key_id,
+ "geography": geography,
+ },
+ project_create_params.ProjectCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Project,
+ )
+
+ def retrieve(
+ self,
+ project_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Project:
+ """
+ Retrieves a project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get(
+ path_template("/organization/projects/{project_id}", project_id=project_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Project,
+ )
+
+ def update(
+ self,
+ project_id: str,
+ *,
+ external_key_id: Optional[str] | Omit = omit,
+ geography: Optional[str] | Omit = omit,
+ name: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Project:
+ """
+ Modifies a project in the organization.
+
+ Args:
+ external_key_id: External key ID to associate with the project.
+
+ geography: Geography for the project.
+
+ name: The updated name of the project, this name appears in reports.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._post(
+ path_template("/organization/projects/{project_id}", project_id=project_id),
+ body=maybe_transform(
+ {
+ "external_key_id": external_key_id,
+ "geography": geography,
+ "name": name,
+ },
+ project_update_params.ProjectUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Project,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | Omit = omit,
+ include_archived: bool | Omit = omit,
+ limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncConversationCursorPage[Project]:
+ """Returns a list of projects.
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ include_archived: If `true` returns all projects including those that have been `archived`.
+ Archived projects are not included by default.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/projects",
+ page=SyncConversationCursorPage[Project],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "include_archived": include_archived,
+ "limit": limit,
+ },
+ project_list_params.ProjectListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=Project,
+ )
+
+ def archive(
+ self,
+ project_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Project:
+ """Archives a project in the organization.
+
+ Archived projects cannot be used or
+ updated.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._post(
+ path_template("/organization/projects/{project_id}/archive", project_id=project_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Project,
+ )
+
+
+class AsyncProjects(AsyncAPIResource):
+ @cached_property
+ def users(self) -> AsyncUsers:
+ return AsyncUsers(self._client)
+
+ @cached_property
+ def service_accounts(self) -> AsyncServiceAccounts:
+ return AsyncServiceAccounts(self._client)
+
+ @cached_property
+ def api_keys(self) -> AsyncAPIKeys:
+ return AsyncAPIKeys(self._client)
+
+ @cached_property
+ def rate_limits(self) -> AsyncRateLimits:
+ return AsyncRateLimits(self._client)
+
+ @cached_property
+ def groups(self) -> AsyncGroups:
+ return AsyncGroups(self._client)
+
+ @cached_property
+ def roles(self) -> AsyncRoles:
+ return AsyncRoles(self._client)
+
+ @cached_property
+ def certificates(self) -> AsyncCertificates:
+ return AsyncCertificates(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncProjectsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncProjectsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncProjectsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncProjectsWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ name: str,
+ external_key_id: Optional[str] | Omit = omit,
+ geography: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Project:
+ """Create a new project in the organization.
+
+ Projects can be created and archived,
+ but cannot be deleted.
+
+ Args:
+ name: The friendly name of the project, this name appears in reports.
+
+ external_key_id: External key ID to associate with the project.
+
+ geography: Create the project with the specified data residency region. Your organization
+ must have access to Data residency functionality in order to use. See
+ [data residency controls](https://platform.openai.com/docs/guides/your-data#data-residency-controls)
+ to review the functionality and limitations of setting this field.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/organization/projects",
+ body=await async_maybe_transform(
+ {
+ "name": name,
+ "external_key_id": external_key_id,
+ "geography": geography,
+ },
+ project_create_params.ProjectCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Project,
+ )
+
+ async def retrieve(
+ self,
+ project_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Project:
+ """
+ Retrieves a project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return await self._get(
+ path_template("/organization/projects/{project_id}", project_id=project_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Project,
+ )
+
+ async def update(
+ self,
+ project_id: str,
+ *,
+ external_key_id: Optional[str] | Omit = omit,
+ geography: Optional[str] | Omit = omit,
+ name: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Project:
+ """
+ Modifies a project in the organization.
+
+ Args:
+ external_key_id: External key ID to associate with the project.
+
+ geography: Geography for the project.
+
+ name: The updated name of the project, this name appears in reports.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return await self._post(
+ path_template("/organization/projects/{project_id}", project_id=project_id),
+ body=await async_maybe_transform(
+ {
+ "external_key_id": external_key_id,
+ "geography": geography,
+ "name": name,
+ },
+ project_update_params.ProjectUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Project,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | Omit = omit,
+ include_archived: bool | Omit = omit,
+ limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[Project, AsyncConversationCursorPage[Project]]:
+ """Returns a list of projects.
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ include_archived: If `true` returns all projects including those that have been `archived`.
+ Archived projects are not included by default.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/projects",
+ page=AsyncConversationCursorPage[Project],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "include_archived": include_archived,
+ "limit": limit,
+ },
+ project_list_params.ProjectListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=Project,
+ )
+
+ async def archive(
+ self,
+ project_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Project:
+ """Archives a project in the organization.
+
+ Archived projects cannot be used or
+ updated.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return await self._post(
+ path_template("/organization/projects/{project_id}/archive", project_id=project_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Project,
+ )
+
+
+class ProjectsWithRawResponse:
+ def __init__(self, projects: Projects) -> None:
+ self._projects = projects
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ projects.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ projects.retrieve,
+ )
+ self.update = _legacy_response.to_raw_response_wrapper(
+ projects.update,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ projects.list,
+ )
+ self.archive = _legacy_response.to_raw_response_wrapper(
+ projects.archive,
+ )
+
+ @cached_property
+ def users(self) -> UsersWithRawResponse:
+ return UsersWithRawResponse(self._projects.users)
+
+ @cached_property
+ def service_accounts(self) -> ServiceAccountsWithRawResponse:
+ return ServiceAccountsWithRawResponse(self._projects.service_accounts)
+
+ @cached_property
+ def api_keys(self) -> APIKeysWithRawResponse:
+ return APIKeysWithRawResponse(self._projects.api_keys)
+
+ @cached_property
+ def rate_limits(self) -> RateLimitsWithRawResponse:
+ return RateLimitsWithRawResponse(self._projects.rate_limits)
+
+ @cached_property
+ def groups(self) -> GroupsWithRawResponse:
+ return GroupsWithRawResponse(self._projects.groups)
+
+ @cached_property
+ def roles(self) -> RolesWithRawResponse:
+ return RolesWithRawResponse(self._projects.roles)
+
+ @cached_property
+ def certificates(self) -> CertificatesWithRawResponse:
+ return CertificatesWithRawResponse(self._projects.certificates)
+
+
+class AsyncProjectsWithRawResponse:
+ def __init__(self, projects: AsyncProjects) -> None:
+ self._projects = projects
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ projects.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ projects.retrieve,
+ )
+ self.update = _legacy_response.async_to_raw_response_wrapper(
+ projects.update,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ projects.list,
+ )
+ self.archive = _legacy_response.async_to_raw_response_wrapper(
+ projects.archive,
+ )
+
+ @cached_property
+ def users(self) -> AsyncUsersWithRawResponse:
+ return AsyncUsersWithRawResponse(self._projects.users)
+
+ @cached_property
+ def service_accounts(self) -> AsyncServiceAccountsWithRawResponse:
+ return AsyncServiceAccountsWithRawResponse(self._projects.service_accounts)
+
+ @cached_property
+ def api_keys(self) -> AsyncAPIKeysWithRawResponse:
+ return AsyncAPIKeysWithRawResponse(self._projects.api_keys)
+
+ @cached_property
+ def rate_limits(self) -> AsyncRateLimitsWithRawResponse:
+ return AsyncRateLimitsWithRawResponse(self._projects.rate_limits)
+
+ @cached_property
+ def groups(self) -> AsyncGroupsWithRawResponse:
+ return AsyncGroupsWithRawResponse(self._projects.groups)
+
+ @cached_property
+ def roles(self) -> AsyncRolesWithRawResponse:
+ return AsyncRolesWithRawResponse(self._projects.roles)
+
+ @cached_property
+ def certificates(self) -> AsyncCertificatesWithRawResponse:
+ return AsyncCertificatesWithRawResponse(self._projects.certificates)
+
+
+class ProjectsWithStreamingResponse:
+ def __init__(self, projects: Projects) -> None:
+ self._projects = projects
+
+ self.create = to_streamed_response_wrapper(
+ projects.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ projects.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ projects.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ projects.list,
+ )
+ self.archive = to_streamed_response_wrapper(
+ projects.archive,
+ )
+
+ @cached_property
+ def users(self) -> UsersWithStreamingResponse:
+ return UsersWithStreamingResponse(self._projects.users)
+
+ @cached_property
+ def service_accounts(self) -> ServiceAccountsWithStreamingResponse:
+ return ServiceAccountsWithStreamingResponse(self._projects.service_accounts)
+
+ @cached_property
+ def api_keys(self) -> APIKeysWithStreamingResponse:
+ return APIKeysWithStreamingResponse(self._projects.api_keys)
+
+ @cached_property
+ def rate_limits(self) -> RateLimitsWithStreamingResponse:
+ return RateLimitsWithStreamingResponse(self._projects.rate_limits)
+
+ @cached_property
+ def groups(self) -> GroupsWithStreamingResponse:
+ return GroupsWithStreamingResponse(self._projects.groups)
+
+ @cached_property
+ def roles(self) -> RolesWithStreamingResponse:
+ return RolesWithStreamingResponse(self._projects.roles)
+
+ @cached_property
+ def certificates(self) -> CertificatesWithStreamingResponse:
+ return CertificatesWithStreamingResponse(self._projects.certificates)
+
+
+class AsyncProjectsWithStreamingResponse:
+ def __init__(self, projects: AsyncProjects) -> None:
+ self._projects = projects
+
+ self.create = async_to_streamed_response_wrapper(
+ projects.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ projects.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ projects.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ projects.list,
+ )
+ self.archive = async_to_streamed_response_wrapper(
+ projects.archive,
+ )
+
+ @cached_property
+ def users(self) -> AsyncUsersWithStreamingResponse:
+ return AsyncUsersWithStreamingResponse(self._projects.users)
+
+ @cached_property
+ def service_accounts(self) -> AsyncServiceAccountsWithStreamingResponse:
+ return AsyncServiceAccountsWithStreamingResponse(self._projects.service_accounts)
+
+ @cached_property
+ def api_keys(self) -> AsyncAPIKeysWithStreamingResponse:
+ return AsyncAPIKeysWithStreamingResponse(self._projects.api_keys)
+
+ @cached_property
+ def rate_limits(self) -> AsyncRateLimitsWithStreamingResponse:
+ return AsyncRateLimitsWithStreamingResponse(self._projects.rate_limits)
+
+ @cached_property
+ def groups(self) -> AsyncGroupsWithStreamingResponse:
+ return AsyncGroupsWithStreamingResponse(self._projects.groups)
+
+ @cached_property
+ def roles(self) -> AsyncRolesWithStreamingResponse:
+ return AsyncRolesWithStreamingResponse(self._projects.roles)
+
+ @cached_property
+ def certificates(self) -> AsyncCertificatesWithStreamingResponse:
+ return AsyncCertificatesWithStreamingResponse(self._projects.certificates)
diff --git a/src/openai/resources/admin/organization/projects/rate_limits.py b/src/openai/resources/admin/organization/projects/rate_limits.py
new file mode 100644
index 0000000000..9fe20f572e
--- /dev/null
+++ b/src/openai/resources/admin/organization/projects/rate_limits.py
@@ -0,0 +1,379 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..... import _legacy_response
+from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ....._utils import path_template, maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .....pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from ....._base_client import AsyncPaginator, make_request_options
+from .....types.admin.organization.projects import (
+ rate_limit_list_rate_limits_params,
+ rate_limit_update_rate_limit_params,
+)
+from .....types.admin.organization.projects.project_rate_limit import ProjectRateLimit
+
+__all__ = ["RateLimits", "AsyncRateLimits"]
+
+
+class RateLimits(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> RateLimitsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return RateLimitsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> RateLimitsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return RateLimitsWithStreamingResponse(self)
+
+ def list_rate_limits(
+ self,
+ project_id: str,
+ *,
+ after: str | Omit = omit,
+ before: str | Omit = omit,
+ limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncConversationCursorPage[ProjectRateLimit]:
+ """
+ Returns the rate limits per model for a project.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ before: A cursor for use in pagination. `before` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ beginning with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
+
+ limit: A limit on the number of objects to be returned. The default is 100.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/rate_limits", project_id=project_id),
+ page=SyncConversationCursorPage[ProjectRateLimit],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "before": before,
+ "limit": limit,
+ },
+ rate_limit_list_rate_limits_params.RateLimitListRateLimitsParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=ProjectRateLimit,
+ )
+
+ def update_rate_limit(
+ self,
+ rate_limit_id: str,
+ *,
+ project_id: str,
+ batch_1_day_max_input_tokens: int | Omit = omit,
+ max_audio_megabytes_per_1_minute: int | Omit = omit,
+ max_images_per_1_minute: int | Omit = omit,
+ max_requests_per_1_day: int | Omit = omit,
+ max_requests_per_1_minute: int | Omit = omit,
+ max_tokens_per_1_minute: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ProjectRateLimit:
+ """
+ Updates a project rate limit.
+
+ Args:
+ batch_1_day_max_input_tokens: The maximum batch input tokens per day. Only relevant for certain models.
+
+ max_audio_megabytes_per_1_minute: The maximum audio megabytes per minute. Only relevant for certain models.
+
+ max_images_per_1_minute: The maximum images per minute. Only relevant for certain models.
+
+ max_requests_per_1_day: The maximum requests per day. Only relevant for certain models.
+
+ max_requests_per_1_minute: The maximum requests per minute.
+
+ max_tokens_per_1_minute: The maximum tokens per minute.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not rate_limit_id:
+ raise ValueError(f"Expected a non-empty value for `rate_limit_id` but received {rate_limit_id!r}")
+ return self._post(
+ path_template(
+ "/organization/projects/{project_id}/rate_limits/{rate_limit_id}",
+ project_id=project_id,
+ rate_limit_id=rate_limit_id,
+ ),
+ body=maybe_transform(
+ {
+ "batch_1_day_max_input_tokens": batch_1_day_max_input_tokens,
+ "max_audio_megabytes_per_1_minute": max_audio_megabytes_per_1_minute,
+ "max_images_per_1_minute": max_images_per_1_minute,
+ "max_requests_per_1_day": max_requests_per_1_day,
+ "max_requests_per_1_minute": max_requests_per_1_minute,
+ "max_tokens_per_1_minute": max_tokens_per_1_minute,
+ },
+ rate_limit_update_rate_limit_params.RateLimitUpdateRateLimitParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ProjectRateLimit,
+ )
+
+
+class AsyncRateLimits(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncRateLimitsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncRateLimitsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncRateLimitsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncRateLimitsWithStreamingResponse(self)
+
+ def list_rate_limits(
+ self,
+ project_id: str,
+ *,
+ after: str | Omit = omit,
+ before: str | Omit = omit,
+ limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[ProjectRateLimit, AsyncConversationCursorPage[ProjectRateLimit]]:
+ """
+ Returns the rate limits per model for a project.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ before: A cursor for use in pagination. `before` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ beginning with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
+
+ limit: A limit on the number of objects to be returned. The default is 100.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/rate_limits", project_id=project_id),
+ page=AsyncConversationCursorPage[ProjectRateLimit],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "before": before,
+ "limit": limit,
+ },
+ rate_limit_list_rate_limits_params.RateLimitListRateLimitsParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=ProjectRateLimit,
+ )
+
+ async def update_rate_limit(
+ self,
+ rate_limit_id: str,
+ *,
+ project_id: str,
+ batch_1_day_max_input_tokens: int | Omit = omit,
+ max_audio_megabytes_per_1_minute: int | Omit = omit,
+ max_images_per_1_minute: int | Omit = omit,
+ max_requests_per_1_day: int | Omit = omit,
+ max_requests_per_1_minute: int | Omit = omit,
+ max_tokens_per_1_minute: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ProjectRateLimit:
+ """
+ Updates a project rate limit.
+
+ Args:
+ batch_1_day_max_input_tokens: The maximum batch input tokens per day. Only relevant for certain models.
+
+ max_audio_megabytes_per_1_minute: The maximum audio megabytes per minute. Only relevant for certain models.
+
+ max_images_per_1_minute: The maximum images per minute. Only relevant for certain models.
+
+ max_requests_per_1_day: The maximum requests per day. Only relevant for certain models.
+
+ max_requests_per_1_minute: The maximum requests per minute.
+
+ max_tokens_per_1_minute: The maximum tokens per minute.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not rate_limit_id:
+ raise ValueError(f"Expected a non-empty value for `rate_limit_id` but received {rate_limit_id!r}")
+ return await self._post(
+ path_template(
+ "/organization/projects/{project_id}/rate_limits/{rate_limit_id}",
+ project_id=project_id,
+ rate_limit_id=rate_limit_id,
+ ),
+ body=await async_maybe_transform(
+ {
+ "batch_1_day_max_input_tokens": batch_1_day_max_input_tokens,
+ "max_audio_megabytes_per_1_minute": max_audio_megabytes_per_1_minute,
+ "max_images_per_1_minute": max_images_per_1_minute,
+ "max_requests_per_1_day": max_requests_per_1_day,
+ "max_requests_per_1_minute": max_requests_per_1_minute,
+ "max_tokens_per_1_minute": max_tokens_per_1_minute,
+ },
+ rate_limit_update_rate_limit_params.RateLimitUpdateRateLimitParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ProjectRateLimit,
+ )
+
+
+class RateLimitsWithRawResponse:
+ def __init__(self, rate_limits: RateLimits) -> None:
+ self._rate_limits = rate_limits
+
+ self.list_rate_limits = _legacy_response.to_raw_response_wrapper(
+ rate_limits.list_rate_limits,
+ )
+ self.update_rate_limit = _legacy_response.to_raw_response_wrapper(
+ rate_limits.update_rate_limit,
+ )
+
+
+class AsyncRateLimitsWithRawResponse:
+ def __init__(self, rate_limits: AsyncRateLimits) -> None:
+ self._rate_limits = rate_limits
+
+ self.list_rate_limits = _legacy_response.async_to_raw_response_wrapper(
+ rate_limits.list_rate_limits,
+ )
+ self.update_rate_limit = _legacy_response.async_to_raw_response_wrapper(
+ rate_limits.update_rate_limit,
+ )
+
+
+class RateLimitsWithStreamingResponse:
+ def __init__(self, rate_limits: RateLimits) -> None:
+ self._rate_limits = rate_limits
+
+ self.list_rate_limits = to_streamed_response_wrapper(
+ rate_limits.list_rate_limits,
+ )
+ self.update_rate_limit = to_streamed_response_wrapper(
+ rate_limits.update_rate_limit,
+ )
+
+
+class AsyncRateLimitsWithStreamingResponse:
+ def __init__(self, rate_limits: AsyncRateLimits) -> None:
+ self._rate_limits = rate_limits
+
+ self.list_rate_limits = async_to_streamed_response_wrapper(
+ rate_limits.list_rate_limits,
+ )
+ self.update_rate_limit = async_to_streamed_response_wrapper(
+ rate_limits.update_rate_limit,
+ )
diff --git a/src/openai/resources/admin/organization/projects/roles.py b/src/openai/resources/admin/organization/projects/roles.py
new file mode 100644
index 0000000000..c958b037bb
--- /dev/null
+++ b/src/openai/resources/admin/organization/projects/roles.py
@@ -0,0 +1,552 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal
+
+import httpx
+
+from ..... import _legacy_response
+from ....._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ....._utils import path_template, maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .....pagination import SyncNextCursorPage, AsyncNextCursorPage
+from ....._base_client import AsyncPaginator, make_request_options
+from .....types.admin.organization.role import Role
+from .....types.admin.organization.projects import role_list_params, role_create_params, role_update_params
+from .....types.admin.organization.projects.role_delete_response import RoleDeleteResponse
+
+__all__ = ["Roles", "AsyncRoles"]
+
+
+class Roles(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> RolesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return RolesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> RolesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return RolesWithStreamingResponse(self)
+
+ def create(
+ self,
+ project_id: str,
+ *,
+ permissions: SequenceNotStr[str],
+ role_name: str,
+ description: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Role:
+ """
+ Creates a custom role for a project.
+
+ Args:
+ permissions: Permissions to grant to the role.
+
+ role_name: Unique name for the role.
+
+ description: Optional description of the role.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._post(
+ path_template("/projects/{project_id}/roles", project_id=project_id),
+ body=maybe_transform(
+ {
+ "permissions": permissions,
+ "role_name": role_name,
+ "description": description,
+ },
+ role_create_params.RoleCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Role,
+ )
+
+ def update(
+ self,
+ role_id: str,
+ *,
+ project_id: str,
+ description: Optional[str] | Omit = omit,
+ permissions: Optional[SequenceNotStr[str]] | Omit = omit,
+ role_name: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Role:
+ """
+ Updates an existing project role.
+
+ Args:
+ description: New description for the role.
+
+ permissions: Updated set of permissions for the role.
+
+ role_name: New name for the role.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return self._post(
+ path_template("/projects/{project_id}/roles/{role_id}", project_id=project_id, role_id=role_id),
+ body=maybe_transform(
+ {
+ "description": description,
+ "permissions": permissions,
+ "role_name": role_name,
+ },
+ role_update_params.RoleUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Role,
+ )
+
+ def list(
+ self,
+ project_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncNextCursorPage[Role]:
+ """Lists the roles configured for a project.
+
+ Args:
+ after: Cursor for pagination.
+
+ Provide the value from the previous response's `next`
+ field to continue listing roles.
+
+ limit: A limit on the number of roles to return. Defaults to 1000.
+
+ order: Sort order for the returned roles.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/projects/{project_id}/roles", project_id=project_id),
+ page=SyncNextCursorPage[Role],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ role_list_params.RoleListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=Role,
+ )
+
+ def delete(
+ self,
+ role_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleDeleteResponse:
+ """
+ Deletes a custom role from a project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return self._delete(
+ path_template("/projects/{project_id}/roles/{role_id}", project_id=project_id, role_id=role_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleDeleteResponse,
+ )
+
+
+class AsyncRoles(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncRolesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncRolesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncRolesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncRolesWithStreamingResponse(self)
+
+ async def create(
+ self,
+ project_id: str,
+ *,
+ permissions: SequenceNotStr[str],
+ role_name: str,
+ description: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Role:
+ """
+ Creates a custom role for a project.
+
+ Args:
+ permissions: Permissions to grant to the role.
+
+ role_name: Unique name for the role.
+
+ description: Optional description of the role.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return await self._post(
+ path_template("/projects/{project_id}/roles", project_id=project_id),
+ body=await async_maybe_transform(
+ {
+ "permissions": permissions,
+ "role_name": role_name,
+ "description": description,
+ },
+ role_create_params.RoleCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Role,
+ )
+
+ async def update(
+ self,
+ role_id: str,
+ *,
+ project_id: str,
+ description: Optional[str] | Omit = omit,
+ permissions: Optional[SequenceNotStr[str]] | Omit = omit,
+ role_name: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Role:
+ """
+ Updates an existing project role.
+
+ Args:
+ description: New description for the role.
+
+ permissions: Updated set of permissions for the role.
+
+ role_name: New name for the role.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return await self._post(
+ path_template("/projects/{project_id}/roles/{role_id}", project_id=project_id, role_id=role_id),
+ body=await async_maybe_transform(
+ {
+ "description": description,
+ "permissions": permissions,
+ "role_name": role_name,
+ },
+ role_update_params.RoleUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Role,
+ )
+
+ def list(
+ self,
+ project_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[Role, AsyncNextCursorPage[Role]]:
+ """Lists the roles configured for a project.
+
+ Args:
+ after: Cursor for pagination.
+
+ Provide the value from the previous response's `next`
+ field to continue listing roles.
+
+ limit: A limit on the number of roles to return. Defaults to 1000.
+
+ order: Sort order for the returned roles.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/projects/{project_id}/roles", project_id=project_id),
+ page=AsyncNextCursorPage[Role],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ role_list_params.RoleListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=Role,
+ )
+
+ async def delete(
+ self,
+ role_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleDeleteResponse:
+ """
+ Deletes a custom role from a project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return await self._delete(
+ path_template("/projects/{project_id}/roles/{role_id}", project_id=project_id, role_id=role_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleDeleteResponse,
+ )
+
+
+class RolesWithRawResponse:
+ def __init__(self, roles: Roles) -> None:
+ self._roles = roles
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ roles.create,
+ )
+ self.update = _legacy_response.to_raw_response_wrapper(
+ roles.update,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ roles.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ roles.delete,
+ )
+
+
+class AsyncRolesWithRawResponse:
+ def __init__(self, roles: AsyncRoles) -> None:
+ self._roles = roles
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ roles.create,
+ )
+ self.update = _legacy_response.async_to_raw_response_wrapper(
+ roles.update,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ roles.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ roles.delete,
+ )
+
+
+class RolesWithStreamingResponse:
+ def __init__(self, roles: Roles) -> None:
+ self._roles = roles
+
+ self.create = to_streamed_response_wrapper(
+ roles.create,
+ )
+ self.update = to_streamed_response_wrapper(
+ roles.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ roles.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ roles.delete,
+ )
+
+
+class AsyncRolesWithStreamingResponse:
+ def __init__(self, roles: AsyncRoles) -> None:
+ self._roles = roles
+
+ self.create = async_to_streamed_response_wrapper(
+ roles.create,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ roles.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ roles.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ roles.delete,
+ )
diff --git a/src/openai/resources/admin/organization/projects/service_accounts.py b/src/openai/resources/admin/organization/projects/service_accounts.py
new file mode 100644
index 0000000000..9c265fd766
--- /dev/null
+++ b/src/openai/resources/admin/organization/projects/service_accounts.py
@@ -0,0 +1,512 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..... import _legacy_response
+from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ....._utils import path_template, maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .....pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from ....._base_client import AsyncPaginator, make_request_options
+from .....types.admin.organization.projects import service_account_list_params, service_account_create_params
+from .....types.admin.organization.projects.project_service_account import ProjectServiceAccount
+from .....types.admin.organization.projects.service_account_create_response import ServiceAccountCreateResponse
+from .....types.admin.organization.projects.service_account_delete_response import ServiceAccountDeleteResponse
+
+__all__ = ["ServiceAccounts", "AsyncServiceAccounts"]
+
+
+class ServiceAccounts(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> ServiceAccountsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return ServiceAccountsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ServiceAccountsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return ServiceAccountsWithStreamingResponse(self)
+
+ def create(
+ self,
+ project_id: str,
+ *,
+ name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ServiceAccountCreateResponse:
+ """Creates a new service account in the project.
+
+ This also returns an unredacted
+ API key for the service account.
+
+ Args:
+ name: The name of the service account being created.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._post(
+ path_template("/organization/projects/{project_id}/service_accounts", project_id=project_id),
+ body=maybe_transform({"name": name}, service_account_create_params.ServiceAccountCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ServiceAccountCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ service_account_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ProjectServiceAccount:
+ """
+ Retrieves a service account in the project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not service_account_id:
+ raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}")
+ return self._get(
+ path_template(
+ "/organization/projects/{project_id}/service_accounts/{service_account_id}",
+ project_id=project_id,
+ service_account_id=service_account_id,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ProjectServiceAccount,
+ )
+
+ def list(
+ self,
+ project_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncConversationCursorPage[ProjectServiceAccount]:
+ """
+ Returns a list of service accounts in the project.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/service_accounts", project_id=project_id),
+ page=SyncConversationCursorPage[ProjectServiceAccount],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ },
+ service_account_list_params.ServiceAccountListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=ProjectServiceAccount,
+ )
+
+ def delete(
+ self,
+ service_account_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ServiceAccountDeleteResponse:
+ """
+ Deletes a service account from the project.
+
+ Returns confirmation of service account deletion, or an error if the project is
+ archived (archived projects have no service accounts).
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not service_account_id:
+ raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}")
+ return self._delete(
+ path_template(
+ "/organization/projects/{project_id}/service_accounts/{service_account_id}",
+ project_id=project_id,
+ service_account_id=service_account_id,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ServiceAccountDeleteResponse,
+ )
+
+
+class AsyncServiceAccounts(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncServiceAccountsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncServiceAccountsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncServiceAccountsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncServiceAccountsWithStreamingResponse(self)
+
+ async def create(
+ self,
+ project_id: str,
+ *,
+ name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ServiceAccountCreateResponse:
+ """Creates a new service account in the project.
+
+ This also returns an unredacted
+ API key for the service account.
+
+ Args:
+ name: The name of the service account being created.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return await self._post(
+ path_template("/organization/projects/{project_id}/service_accounts", project_id=project_id),
+ body=await async_maybe_transform({"name": name}, service_account_create_params.ServiceAccountCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ServiceAccountCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ service_account_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ProjectServiceAccount:
+ """
+ Retrieves a service account in the project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not service_account_id:
+ raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}")
+ return await self._get(
+ path_template(
+ "/organization/projects/{project_id}/service_accounts/{service_account_id}",
+ project_id=project_id,
+ service_account_id=service_account_id,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ProjectServiceAccount,
+ )
+
+ def list(
+ self,
+ project_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[ProjectServiceAccount, AsyncConversationCursorPage[ProjectServiceAccount]]:
+ """
+ Returns a list of service accounts in the project.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/service_accounts", project_id=project_id),
+ page=AsyncConversationCursorPage[ProjectServiceAccount],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ },
+ service_account_list_params.ServiceAccountListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=ProjectServiceAccount,
+ )
+
+ async def delete(
+ self,
+ service_account_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ServiceAccountDeleteResponse:
+ """
+ Deletes a service account from the project.
+
+ Returns confirmation of service account deletion, or an error if the project is
+ archived (archived projects have no service accounts).
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not service_account_id:
+ raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}")
+ return await self._delete(
+ path_template(
+ "/organization/projects/{project_id}/service_accounts/{service_account_id}",
+ project_id=project_id,
+ service_account_id=service_account_id,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ServiceAccountDeleteResponse,
+ )
+
+
+class ServiceAccountsWithRawResponse:
+ def __init__(self, service_accounts: ServiceAccounts) -> None:
+ self._service_accounts = service_accounts
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ service_accounts.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ service_accounts.retrieve,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ service_accounts.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ service_accounts.delete,
+ )
+
+
+class AsyncServiceAccountsWithRawResponse:
+ def __init__(self, service_accounts: AsyncServiceAccounts) -> None:
+ self._service_accounts = service_accounts
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ service_accounts.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ service_accounts.retrieve,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ service_accounts.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ service_accounts.delete,
+ )
+
+
+class ServiceAccountsWithStreamingResponse:
+ def __init__(self, service_accounts: ServiceAccounts) -> None:
+ self._service_accounts = service_accounts
+
+ self.create = to_streamed_response_wrapper(
+ service_accounts.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ service_accounts.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ service_accounts.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ service_accounts.delete,
+ )
+
+
+class AsyncServiceAccountsWithStreamingResponse:
+ def __init__(self, service_accounts: AsyncServiceAccounts) -> None:
+ self._service_accounts = service_accounts
+
+ self.create = async_to_streamed_response_wrapper(
+ service_accounts.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ service_accounts.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ service_accounts.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ service_accounts.delete,
+ )
diff --git a/src/openai/resources/admin/organization/projects/users/__init__.py b/src/openai/resources/admin/organization/projects/users/__init__.py
new file mode 100644
index 0000000000..d230cb8f34
--- /dev/null
+++ b/src/openai/resources/admin/organization/projects/users/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .roles import (
+ Roles,
+ AsyncRoles,
+ RolesWithRawResponse,
+ AsyncRolesWithRawResponse,
+ RolesWithStreamingResponse,
+ AsyncRolesWithStreamingResponse,
+)
+from .users import (
+ Users,
+ AsyncUsers,
+ UsersWithRawResponse,
+ AsyncUsersWithRawResponse,
+ UsersWithStreamingResponse,
+ AsyncUsersWithStreamingResponse,
+)
+
+__all__ = [
+ "Roles",
+ "AsyncRoles",
+ "RolesWithRawResponse",
+ "AsyncRolesWithRawResponse",
+ "RolesWithStreamingResponse",
+ "AsyncRolesWithStreamingResponse",
+ "Users",
+ "AsyncUsers",
+ "UsersWithRawResponse",
+ "AsyncUsersWithRawResponse",
+ "UsersWithStreamingResponse",
+ "AsyncUsersWithStreamingResponse",
+]
diff --git a/src/openai/resources/admin/organization/projects/users/roles.py b/src/openai/resources/admin/organization/projects/users/roles.py
new file mode 100644
index 0000000000..6a3233e275
--- /dev/null
+++ b/src/openai/resources/admin/organization/projects/users/roles.py
@@ -0,0 +1,426 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal
+
+import httpx
+
+from ...... import _legacy_response
+from ......_types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ......_utils import path_template, maybe_transform, async_maybe_transform
+from ......_compat import cached_property
+from ......_resource import SyncAPIResource, AsyncAPIResource
+from ......_response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ......pagination import SyncNextCursorPage, AsyncNextCursorPage
+from ......_base_client import AsyncPaginator, make_request_options
+from ......types.admin.organization.projects.users import role_list_params, role_create_params
+from ......types.admin.organization.projects.users.role_list_response import RoleListResponse
+from ......types.admin.organization.projects.users.role_create_response import RoleCreateResponse
+from ......types.admin.organization.projects.users.role_delete_response import RoleDeleteResponse
+
+__all__ = ["Roles", "AsyncRoles"]
+
+
+class Roles(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> RolesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return RolesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> RolesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return RolesWithStreamingResponse(self)
+
+ def create(
+ self,
+ user_id: str,
+ *,
+ project_id: str,
+ role_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleCreateResponse:
+ """
+ Assigns a project role to a user within a project.
+
+ Args:
+ role_id: Identifier of the role to assign.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return self._post(
+ path_template("/projects/{project_id}/users/{user_id}/roles", project_id=project_id, user_id=user_id),
+ body=maybe_transform({"role_id": role_id}, role_create_params.RoleCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleCreateResponse,
+ )
+
+ def list(
+ self,
+ user_id: str,
+ *,
+ project_id: str,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncNextCursorPage[RoleListResponse]:
+ """
+ Lists the project roles assigned to a user within a project.
+
+ Args:
+ after: Cursor for pagination. Provide the value from the previous response's `next`
+ field to continue listing project roles.
+
+ limit: A limit on the number of project role assignments to return.
+
+ order: Sort order for the returned project roles.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return self._get_api_list(
+ path_template("/projects/{project_id}/users/{user_id}/roles", project_id=project_id, user_id=user_id),
+ page=SyncNextCursorPage[RoleListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ role_list_params.RoleListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=RoleListResponse,
+ )
+
+ def delete(
+ self,
+ role_id: str,
+ *,
+ project_id: str,
+ user_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleDeleteResponse:
+ """
+ Unassigns a project role from a user within a project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return self._delete(
+ path_template(
+ "/projects/{project_id}/users/{user_id}/roles/{role_id}",
+ project_id=project_id,
+ user_id=user_id,
+ role_id=role_id,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleDeleteResponse,
+ )
+
+
+class AsyncRoles(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncRolesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncRolesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncRolesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncRolesWithStreamingResponse(self)
+
+ async def create(
+ self,
+ user_id: str,
+ *,
+ project_id: str,
+ role_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleCreateResponse:
+ """
+ Assigns a project role to a user within a project.
+
+ Args:
+ role_id: Identifier of the role to assign.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return await self._post(
+ path_template("/projects/{project_id}/users/{user_id}/roles", project_id=project_id, user_id=user_id),
+ body=await async_maybe_transform({"role_id": role_id}, role_create_params.RoleCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleCreateResponse,
+ )
+
+ def list(
+ self,
+ user_id: str,
+ *,
+ project_id: str,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[RoleListResponse, AsyncNextCursorPage[RoleListResponse]]:
+ """
+ Lists the project roles assigned to a user within a project.
+
+ Args:
+ after: Cursor for pagination. Provide the value from the previous response's `next`
+ field to continue listing project roles.
+
+ limit: A limit on the number of project role assignments to return.
+
+ order: Sort order for the returned project roles.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return self._get_api_list(
+ path_template("/projects/{project_id}/users/{user_id}/roles", project_id=project_id, user_id=user_id),
+ page=AsyncNextCursorPage[RoleListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ role_list_params.RoleListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=RoleListResponse,
+ )
+
+ async def delete(
+ self,
+ role_id: str,
+ *,
+ project_id: str,
+ user_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleDeleteResponse:
+ """
+ Unassigns a project role from a user within a project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return await self._delete(
+ path_template(
+ "/projects/{project_id}/users/{user_id}/roles/{role_id}",
+ project_id=project_id,
+ user_id=user_id,
+ role_id=role_id,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleDeleteResponse,
+ )
+
+
+class RolesWithRawResponse:
+ def __init__(self, roles: Roles) -> None:
+ self._roles = roles
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ roles.create,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ roles.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ roles.delete,
+ )
+
+
+class AsyncRolesWithRawResponse:
+ def __init__(self, roles: AsyncRoles) -> None:
+ self._roles = roles
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ roles.create,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ roles.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ roles.delete,
+ )
+
+
+class RolesWithStreamingResponse:
+ def __init__(self, roles: Roles) -> None:
+ self._roles = roles
+
+ self.create = to_streamed_response_wrapper(
+ roles.create,
+ )
+ self.list = to_streamed_response_wrapper(
+ roles.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ roles.delete,
+ )
+
+
+class AsyncRolesWithStreamingResponse:
+ def __init__(self, roles: AsyncRoles) -> None:
+ self._roles = roles
+
+ self.create = async_to_streamed_response_wrapper(
+ roles.create,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ roles.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ roles.delete,
+ )
diff --git a/src/openai/resources/admin/organization/projects/users/users.py b/src/openai/resources/admin/organization/projects/users/users.py
new file mode 100644
index 0000000000..3852bfb8c0
--- /dev/null
+++ b/src/openai/resources/admin/organization/projects/users/users.py
@@ -0,0 +1,667 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+import httpx
+
+from ...... import _legacy_response
+from .roles import (
+ Roles,
+ AsyncRoles,
+ RolesWithRawResponse,
+ AsyncRolesWithRawResponse,
+ RolesWithStreamingResponse,
+ AsyncRolesWithStreamingResponse,
+)
+from ......_types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ......_utils import path_template, maybe_transform, async_maybe_transform
+from ......_compat import cached_property
+from ......_resource import SyncAPIResource, AsyncAPIResource
+from ......_response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ......pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from ......_base_client import AsyncPaginator, make_request_options
+from ......types.admin.organization.projects import user_list_params, user_create_params, user_update_params
+from ......types.admin.organization.projects.project_user import ProjectUser
+from ......types.admin.organization.projects.user_delete_response import UserDeleteResponse
+
+__all__ = ["Users", "AsyncUsers"]
+
+
+class Users(SyncAPIResource):
+ @cached_property
+ def roles(self) -> Roles:
+ return Roles(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> UsersWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return UsersWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> UsersWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return UsersWithStreamingResponse(self)
+
+ def create(
+ self,
+ project_id: str,
+ *,
+ role: str,
+ email: Optional[str] | Omit = omit,
+ user_id: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ProjectUser:
+ """Adds a user to the project.
+
+ Users must already be members of the organization to
+ be added to a project.
+
+ Args:
+ role: `owner` or `member`
+
+ email: Email of the user to add.
+
+ user_id: The ID of the user.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._post(
+ path_template("/organization/projects/{project_id}/users", project_id=project_id),
+ body=maybe_transform(
+ {
+ "role": role,
+ "email": email,
+ "user_id": user_id,
+ },
+ user_create_params.UserCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ProjectUser,
+ )
+
+ def retrieve(
+ self,
+ user_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ProjectUser:
+ """
+ Retrieves a user in the project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return self._get(
+ path_template(
+ "/organization/projects/{project_id}/users/{user_id}", project_id=project_id, user_id=user_id
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ProjectUser,
+ )
+
+ def update(
+ self,
+ user_id: str,
+ *,
+ project_id: str,
+ role: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ProjectUser:
+ """
+ Modifies a user's role in the project.
+
+ Args:
+ role: `owner` or `member`
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return self._post(
+ path_template(
+ "/organization/projects/{project_id}/users/{user_id}", project_id=project_id, user_id=user_id
+ ),
+ body=maybe_transform({"role": role}, user_update_params.UserUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ProjectUser,
+ )
+
+ def list(
+ self,
+ project_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncConversationCursorPage[ProjectUser]:
+ """
+ Returns a list of users in the project.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/users", project_id=project_id),
+ page=SyncConversationCursorPage[ProjectUser],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ },
+ user_list_params.UserListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=ProjectUser,
+ )
+
+ def delete(
+ self,
+ user_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UserDeleteResponse:
+ """
+ Deletes a user from the project.
+
+ Returns confirmation of project user deletion, or an error if the project is
+ archived (archived projects have no users).
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return self._delete(
+ path_template(
+ "/organization/projects/{project_id}/users/{user_id}", project_id=project_id, user_id=user_id
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UserDeleteResponse,
+ )
+
+
+class AsyncUsers(AsyncAPIResource):
+ @cached_property
+ def roles(self) -> AsyncRoles:
+ return AsyncRoles(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncUsersWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncUsersWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncUsersWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncUsersWithStreamingResponse(self)
+
+ async def create(
+ self,
+ project_id: str,
+ *,
+ role: str,
+ email: Optional[str] | Omit = omit,
+ user_id: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ProjectUser:
+ """Adds a user to the project.
+
+ Users must already be members of the organization to
+ be added to a project.
+
+ Args:
+ role: `owner` or `member`
+
+ email: Email of the user to add.
+
+ user_id: The ID of the user.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return await self._post(
+ path_template("/organization/projects/{project_id}/users", project_id=project_id),
+ body=await async_maybe_transform(
+ {
+ "role": role,
+ "email": email,
+ "user_id": user_id,
+ },
+ user_create_params.UserCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ProjectUser,
+ )
+
+ async def retrieve(
+ self,
+ user_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ProjectUser:
+ """
+ Retrieves a user in the project.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return await self._get(
+ path_template(
+ "/organization/projects/{project_id}/users/{user_id}", project_id=project_id, user_id=user_id
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ProjectUser,
+ )
+
+ async def update(
+ self,
+ user_id: str,
+ *,
+ project_id: str,
+ role: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ProjectUser:
+ """
+ Modifies a user's role in the project.
+
+ Args:
+ role: `owner` or `member`
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return await self._post(
+ path_template(
+ "/organization/projects/{project_id}/users/{user_id}", project_id=project_id, user_id=user_id
+ ),
+ body=await async_maybe_transform({"role": role}, user_update_params.UserUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=ProjectUser,
+ )
+
+ def list(
+ self,
+ project_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[ProjectUser, AsyncConversationCursorPage[ProjectUser]]:
+ """
+ Returns a list of users in the project.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ return self._get_api_list(
+ path_template("/organization/projects/{project_id}/users", project_id=project_id),
+ page=AsyncConversationCursorPage[ProjectUser],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ },
+ user_list_params.UserListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=ProjectUser,
+ )
+
+ async def delete(
+ self,
+ user_id: str,
+ *,
+ project_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UserDeleteResponse:
+ """
+ Deletes a user from the project.
+
+ Returns confirmation of project user deletion, or an error if the project is
+ archived (archived projects have no users).
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return await self._delete(
+ path_template(
+ "/organization/projects/{project_id}/users/{user_id}", project_id=project_id, user_id=user_id
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UserDeleteResponse,
+ )
+
+
+class UsersWithRawResponse:
+ def __init__(self, users: Users) -> None:
+ self._users = users
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ users.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ users.retrieve,
+ )
+ self.update = _legacy_response.to_raw_response_wrapper(
+ users.update,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ users.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ users.delete,
+ )
+
+ @cached_property
+ def roles(self) -> RolesWithRawResponse:
+ return RolesWithRawResponse(self._users.roles)
+
+
+class AsyncUsersWithRawResponse:
+ def __init__(self, users: AsyncUsers) -> None:
+ self._users = users
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ users.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ users.retrieve,
+ )
+ self.update = _legacy_response.async_to_raw_response_wrapper(
+ users.update,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ users.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ users.delete,
+ )
+
+ @cached_property
+ def roles(self) -> AsyncRolesWithRawResponse:
+ return AsyncRolesWithRawResponse(self._users.roles)
+
+
+class UsersWithStreamingResponse:
+ def __init__(self, users: Users) -> None:
+ self._users = users
+
+ self.create = to_streamed_response_wrapper(
+ users.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ users.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ users.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ users.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ users.delete,
+ )
+
+ @cached_property
+ def roles(self) -> RolesWithStreamingResponse:
+ return RolesWithStreamingResponse(self._users.roles)
+
+
+class AsyncUsersWithStreamingResponse:
+ def __init__(self, users: AsyncUsers) -> None:
+ self._users = users
+
+ self.create = async_to_streamed_response_wrapper(
+ users.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ users.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ users.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ users.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ users.delete,
+ )
+
+ @cached_property
+ def roles(self) -> AsyncRolesWithStreamingResponse:
+ return AsyncRolesWithStreamingResponse(self._users.roles)
diff --git a/src/openai/resources/admin/organization/roles.py b/src/openai/resources/admin/organization/roles.py
new file mode 100644
index 0000000000..b25bbfb21e
--- /dev/null
+++ b/src/openai/resources/admin/organization/roles.py
@@ -0,0 +1,526 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ...._utils import path_template, maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncNextCursorPage, AsyncNextCursorPage
+from ...._base_client import AsyncPaginator, make_request_options
+from ....types.admin.organization import role_list_params, role_create_params, role_update_params
+from ....types.admin.organization.role import Role
+from ....types.admin.organization.role_delete_response import RoleDeleteResponse
+
+__all__ = ["Roles", "AsyncRoles"]
+
+
+class Roles(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> RolesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return RolesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> RolesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return RolesWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ permissions: SequenceNotStr[str],
+ role_name: str,
+ description: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Role:
+ """
+ Creates a custom role for the organization.
+
+ Args:
+ permissions: Permissions to grant to the role.
+
+ role_name: Unique name for the role.
+
+ description: Optional description of the role.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/organization/roles",
+ body=maybe_transform(
+ {
+ "permissions": permissions,
+ "role_name": role_name,
+ "description": description,
+ },
+ role_create_params.RoleCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Role,
+ )
+
+ def update(
+ self,
+ role_id: str,
+ *,
+ description: Optional[str] | Omit = omit,
+ permissions: Optional[SequenceNotStr[str]] | Omit = omit,
+ role_name: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Role:
+ """
+ Updates an existing organization role.
+
+ Args:
+ description: New description for the role.
+
+ permissions: Updated set of permissions for the role.
+
+ role_name: New name for the role.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return self._post(
+ path_template("/organization/roles/{role_id}", role_id=role_id),
+ body=maybe_transform(
+ {
+ "description": description,
+ "permissions": permissions,
+ "role_name": role_name,
+ },
+ role_update_params.RoleUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Role,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncNextCursorPage[Role]:
+ """
+ Lists the roles configured for the organization.
+
+ Args:
+ after: Cursor for pagination. Provide the value from the previous response's `next`
+ field to continue listing roles.
+
+ limit: A limit on the number of roles to return. Defaults to 1000.
+
+ order: Sort order for the returned roles.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/roles",
+ page=SyncNextCursorPage[Role],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ role_list_params.RoleListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=Role,
+ )
+
+ def delete(
+ self,
+ role_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleDeleteResponse:
+ """
+ Deletes a custom role from the organization.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return self._delete(
+ path_template("/organization/roles/{role_id}", role_id=role_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleDeleteResponse,
+ )
+
+
+class AsyncRoles(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncRolesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncRolesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncRolesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncRolesWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ permissions: SequenceNotStr[str],
+ role_name: str,
+ description: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Role:
+ """
+ Creates a custom role for the organization.
+
+ Args:
+ permissions: Permissions to grant to the role.
+
+ role_name: Unique name for the role.
+
+ description: Optional description of the role.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/organization/roles",
+ body=await async_maybe_transform(
+ {
+ "permissions": permissions,
+ "role_name": role_name,
+ "description": description,
+ },
+ role_create_params.RoleCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Role,
+ )
+
+ async def update(
+ self,
+ role_id: str,
+ *,
+ description: Optional[str] | Omit = omit,
+ permissions: Optional[SequenceNotStr[str]] | Omit = omit,
+ role_name: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Role:
+ """
+ Updates an existing organization role.
+
+ Args:
+ description: New description for the role.
+
+ permissions: Updated set of permissions for the role.
+
+ role_name: New name for the role.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return await self._post(
+ path_template("/organization/roles/{role_id}", role_id=role_id),
+ body=await async_maybe_transform(
+ {
+ "description": description,
+ "permissions": permissions,
+ "role_name": role_name,
+ },
+ role_update_params.RoleUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=Role,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[Role, AsyncNextCursorPage[Role]]:
+ """
+ Lists the roles configured for the organization.
+
+ Args:
+ after: Cursor for pagination. Provide the value from the previous response's `next`
+ field to continue listing roles.
+
+ limit: A limit on the number of roles to return. Defaults to 1000.
+
+ order: Sort order for the returned roles.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/roles",
+ page=AsyncNextCursorPage[Role],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ role_list_params.RoleListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=Role,
+ )
+
+ async def delete(
+ self,
+ role_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleDeleteResponse:
+ """
+ Deletes a custom role from the organization.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return await self._delete(
+ path_template("/organization/roles/{role_id}", role_id=role_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleDeleteResponse,
+ )
+
+
+class RolesWithRawResponse:
+ def __init__(self, roles: Roles) -> None:
+ self._roles = roles
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ roles.create,
+ )
+ self.update = _legacy_response.to_raw_response_wrapper(
+ roles.update,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ roles.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ roles.delete,
+ )
+
+
+class AsyncRolesWithRawResponse:
+ def __init__(self, roles: AsyncRoles) -> None:
+ self._roles = roles
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ roles.create,
+ )
+ self.update = _legacy_response.async_to_raw_response_wrapper(
+ roles.update,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ roles.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ roles.delete,
+ )
+
+
+class RolesWithStreamingResponse:
+ def __init__(self, roles: Roles) -> None:
+ self._roles = roles
+
+ self.create = to_streamed_response_wrapper(
+ roles.create,
+ )
+ self.update = to_streamed_response_wrapper(
+ roles.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ roles.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ roles.delete,
+ )
+
+
+class AsyncRolesWithStreamingResponse:
+ def __init__(self, roles: AsyncRoles) -> None:
+ self._roles = roles
+
+ self.create = async_to_streamed_response_wrapper(
+ roles.create,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ roles.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ roles.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ roles.delete,
+ )
diff --git a/src/openai/resources/admin/organization/usage.py b/src/openai/resources/admin/organization/usage.py
new file mode 100644
index 0000000000..2725d5e884
--- /dev/null
+++ b/src/openai/resources/admin/organization/usage.py
@@ -0,0 +1,1724 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...._base_client import make_request_options
+from ....types.admin.organization import (
+ usage_costs_params,
+ usage_images_params,
+ usage_embeddings_params,
+ usage_completions_params,
+ usage_moderations_params,
+ usage_vector_stores_params,
+ usage_audio_speeches_params,
+ usage_audio_transcriptions_params,
+ usage_code_interpreter_sessions_params,
+)
+from ....types.admin.organization.usage_costs_response import UsageCostsResponse
+from ....types.admin.organization.usage_images_response import UsageImagesResponse
+from ....types.admin.organization.usage_embeddings_response import UsageEmbeddingsResponse
+from ....types.admin.organization.usage_completions_response import UsageCompletionsResponse
+from ....types.admin.organization.usage_moderations_response import UsageModerationsResponse
+from ....types.admin.organization.usage_vector_stores_response import UsageVectorStoresResponse
+from ....types.admin.organization.usage_audio_speeches_response import UsageAudioSpeechesResponse
+from ....types.admin.organization.usage_audio_transcriptions_response import UsageAudioTranscriptionsResponse
+from ....types.admin.organization.usage_code_interpreter_sessions_response import UsageCodeInterpreterSessionsResponse
+
+__all__ = ["Usage", "AsyncUsage"]
+
+
+class Usage(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> UsageWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return UsageWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> UsageWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return UsageWithStreamingResponse(self)
+
+ def audio_speeches(
+ self,
+ *,
+ start_time: int,
+ api_key_ids: SequenceNotStr[str] | Omit = omit,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | Omit = omit,
+ limit: int | Omit = omit,
+ models: SequenceNotStr[str] | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ user_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageAudioSpeechesResponse:
+ """
+ Get audio speeches usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ api_key_ids: Return only usage for these API keys.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ models: Return only usage for these models.
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ user_ids: Return only usage for these users.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/organization/usage/audio_speeches",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "start_time": start_time,
+ "api_key_ids": api_key_ids,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "models": models,
+ "page": page,
+ "project_ids": project_ids,
+ "user_ids": user_ids,
+ },
+ usage_audio_speeches_params.UsageAudioSpeechesParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageAudioSpeechesResponse,
+ )
+
+ def audio_transcriptions(
+ self,
+ *,
+ start_time: int,
+ api_key_ids: SequenceNotStr[str] | Omit = omit,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | Omit = omit,
+ limit: int | Omit = omit,
+ models: SequenceNotStr[str] | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ user_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageAudioTranscriptionsResponse:
+ """
+ Get audio transcriptions usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ api_key_ids: Return only usage for these API keys.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ models: Return only usage for these models.
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ user_ids: Return only usage for these users.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/organization/usage/audio_transcriptions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "start_time": start_time,
+ "api_key_ids": api_key_ids,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "models": models,
+ "page": page,
+ "project_ids": project_ids,
+ "user_ids": user_ids,
+ },
+ usage_audio_transcriptions_params.UsageAudioTranscriptionsParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageAudioTranscriptionsResponse,
+ )
+
+ def code_interpreter_sessions(
+ self,
+ *,
+ start_time: int,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id"]] | Omit = omit,
+ limit: int | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageCodeInterpreterSessionsResponse:
+ """
+ Get code interpreter sessions usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/organization/usage/code_interpreter_sessions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "start_time": start_time,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "page": page,
+ "project_ids": project_ids,
+ },
+ usage_code_interpreter_sessions_params.UsageCodeInterpreterSessionsParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageCodeInterpreterSessionsResponse,
+ )
+
+ def completions(
+ self,
+ *,
+ start_time: int,
+ api_key_ids: SequenceNotStr[str] | Omit = omit,
+ batch: bool | Omit = omit,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "batch", "service_tier"]] | Omit = omit,
+ limit: int | Omit = omit,
+ models: SequenceNotStr[str] | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ user_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageCompletionsResponse:
+ """
+ Get completions usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ api_key_ids: Return only usage for these API keys.
+
+ batch: If `true`, return batch jobs only. If `false`, return non-batch jobs only. By
+ default, return both.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`, `user_id`, `api_key_id`, `model`, `batch`, `service_tier` or any
+ combination of them.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ models: Return only usage for these models.
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ user_ids: Return only usage for these users.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/organization/usage/completions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "start_time": start_time,
+ "api_key_ids": api_key_ids,
+ "batch": batch,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "models": models,
+ "page": page,
+ "project_ids": project_ids,
+ "user_ids": user_ids,
+ },
+ usage_completions_params.UsageCompletionsParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageCompletionsResponse,
+ )
+
+ def costs(
+ self,
+ *,
+ start_time: int,
+ api_key_ids: SequenceNotStr[str] | Omit = omit,
+ bucket_width: Literal["1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id", "line_item", "api_key_id"]] | Omit = omit,
+ limit: int | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageCostsResponse:
+ """
+ Get costs details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ api_key_ids: Return only costs for these API keys.
+
+ bucket_width: Width of each time bucket in response. Currently only `1d` is supported, default
+ to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the costs by the specified fields. Support fields include `project_id`,
+ `line_item`, `api_key_id` and any combination of them.
+
+ limit: A limit on the number of buckets to be returned. Limit can range between 1 and
+ 180, and the default is 7.
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only costs for these projects.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/organization/costs",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "start_time": start_time,
+ "api_key_ids": api_key_ids,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "page": page,
+ "project_ids": project_ids,
+ },
+ usage_costs_params.UsageCostsParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageCostsResponse,
+ )
+
+ def embeddings(
+ self,
+ *,
+ start_time: int,
+ api_key_ids: SequenceNotStr[str] | Omit = omit,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | Omit = omit,
+ limit: int | Omit = omit,
+ models: SequenceNotStr[str] | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ user_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageEmbeddingsResponse:
+ """
+ Get embeddings usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ api_key_ids: Return only usage for these API keys.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ models: Return only usage for these models.
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ user_ids: Return only usage for these users.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/organization/usage/embeddings",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "start_time": start_time,
+ "api_key_ids": api_key_ids,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "models": models,
+ "page": page,
+ "project_ids": project_ids,
+ "user_ids": user_ids,
+ },
+ usage_embeddings_params.UsageEmbeddingsParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageEmbeddingsResponse,
+ )
+
+ def images(
+ self,
+ *,
+ start_time: int,
+ api_key_ids: SequenceNotStr[str] | Omit = omit,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "size", "source"]] | Omit = omit,
+ limit: int | Omit = omit,
+ models: SequenceNotStr[str] | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ sizes: List[Literal["256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"]] | Omit = omit,
+ sources: List[Literal["image.generation", "image.edit", "image.variation"]] | Omit = omit,
+ user_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageImagesResponse:
+ """
+ Get images usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ api_key_ids: Return only usage for these API keys.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`, `user_id`, `api_key_id`, `model`, `size`, `source` or any
+ combination of them.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ models: Return only usage for these models.
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ sizes: Return only usages for these image sizes. Possible values are `256x256`,
+ `512x512`, `1024x1024`, `1792x1792`, `1024x1792` or any combination of them.
+
+ sources: Return only usages for these sources. Possible values are `image.generation`,
+ `image.edit`, `image.variation` or any combination of them.
+
+ user_ids: Return only usage for these users.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/organization/usage/images",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "start_time": start_time,
+ "api_key_ids": api_key_ids,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "models": models,
+ "page": page,
+ "project_ids": project_ids,
+ "sizes": sizes,
+ "sources": sources,
+ "user_ids": user_ids,
+ },
+ usage_images_params.UsageImagesParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageImagesResponse,
+ )
+
+ def moderations(
+ self,
+ *,
+ start_time: int,
+ api_key_ids: SequenceNotStr[str] | Omit = omit,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | Omit = omit,
+ limit: int | Omit = omit,
+ models: SequenceNotStr[str] | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ user_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageModerationsResponse:
+ """
+ Get moderations usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ api_key_ids: Return only usage for these API keys.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ models: Return only usage for these models.
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ user_ids: Return only usage for these users.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/organization/usage/moderations",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "start_time": start_time,
+ "api_key_ids": api_key_ids,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "models": models,
+ "page": page,
+ "project_ids": project_ids,
+ "user_ids": user_ids,
+ },
+ usage_moderations_params.UsageModerationsParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageModerationsResponse,
+ )
+
+ def vector_stores(
+ self,
+ *,
+ start_time: int,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id"]] | Omit = omit,
+ limit: int | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageVectorStoresResponse:
+ """
+ Get vector stores usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/organization/usage/vector_stores",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "start_time": start_time,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "page": page,
+ "project_ids": project_ids,
+ },
+ usage_vector_stores_params.UsageVectorStoresParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageVectorStoresResponse,
+ )
+
+
+class AsyncUsage(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncUsageWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncUsageWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncUsageWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncUsageWithStreamingResponse(self)
+
+ async def audio_speeches(
+ self,
+ *,
+ start_time: int,
+ api_key_ids: SequenceNotStr[str] | Omit = omit,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | Omit = omit,
+ limit: int | Omit = omit,
+ models: SequenceNotStr[str] | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ user_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageAudioSpeechesResponse:
+ """
+ Get audio speeches usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ api_key_ids: Return only usage for these API keys.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ models: Return only usage for these models.
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ user_ids: Return only usage for these users.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/organization/usage/audio_speeches",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "start_time": start_time,
+ "api_key_ids": api_key_ids,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "models": models,
+ "page": page,
+ "project_ids": project_ids,
+ "user_ids": user_ids,
+ },
+ usage_audio_speeches_params.UsageAudioSpeechesParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageAudioSpeechesResponse,
+ )
+
+ async def audio_transcriptions(
+ self,
+ *,
+ start_time: int,
+ api_key_ids: SequenceNotStr[str] | Omit = omit,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | Omit = omit,
+ limit: int | Omit = omit,
+ models: SequenceNotStr[str] | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ user_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageAudioTranscriptionsResponse:
+ """
+ Get audio transcriptions usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ api_key_ids: Return only usage for these API keys.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ models: Return only usage for these models.
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ user_ids: Return only usage for these users.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/organization/usage/audio_transcriptions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "start_time": start_time,
+ "api_key_ids": api_key_ids,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "models": models,
+ "page": page,
+ "project_ids": project_ids,
+ "user_ids": user_ids,
+ },
+ usage_audio_transcriptions_params.UsageAudioTranscriptionsParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageAudioTranscriptionsResponse,
+ )
+
+ async def code_interpreter_sessions(
+ self,
+ *,
+ start_time: int,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id"]] | Omit = omit,
+ limit: int | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageCodeInterpreterSessionsResponse:
+ """
+ Get code interpreter sessions usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/organization/usage/code_interpreter_sessions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "start_time": start_time,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "page": page,
+ "project_ids": project_ids,
+ },
+ usage_code_interpreter_sessions_params.UsageCodeInterpreterSessionsParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageCodeInterpreterSessionsResponse,
+ )
+
+ async def completions(
+ self,
+ *,
+ start_time: int,
+ api_key_ids: SequenceNotStr[str] | Omit = omit,
+ batch: bool | Omit = omit,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "batch", "service_tier"]] | Omit = omit,
+ limit: int | Omit = omit,
+ models: SequenceNotStr[str] | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ user_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageCompletionsResponse:
+ """
+ Get completions usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ api_key_ids: Return only usage for these API keys.
+
+ batch: If `true`, return batch jobs only. If `false`, return non-batch jobs only. By
+ default, return both.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`, `user_id`, `api_key_id`, `model`, `batch`, `service_tier` or any
+ combination of them.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ models: Return only usage for these models.
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ user_ids: Return only usage for these users.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/organization/usage/completions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "start_time": start_time,
+ "api_key_ids": api_key_ids,
+ "batch": batch,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "models": models,
+ "page": page,
+ "project_ids": project_ids,
+ "user_ids": user_ids,
+ },
+ usage_completions_params.UsageCompletionsParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageCompletionsResponse,
+ )
+
+ async def costs(
+ self,
+ *,
+ start_time: int,
+ api_key_ids: SequenceNotStr[str] | Omit = omit,
+ bucket_width: Literal["1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id", "line_item", "api_key_id"]] | Omit = omit,
+ limit: int | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageCostsResponse:
+ """
+ Get costs details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ api_key_ids: Return only costs for these API keys.
+
+ bucket_width: Width of each time bucket in response. Currently only `1d` is supported, default
+ to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the costs by the specified fields. Support fields include `project_id`,
+ `line_item`, `api_key_id` and any combination of them.
+
+ limit: A limit on the number of buckets to be returned. Limit can range between 1 and
+ 180, and the default is 7.
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only costs for these projects.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/organization/costs",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "start_time": start_time,
+ "api_key_ids": api_key_ids,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "page": page,
+ "project_ids": project_ids,
+ },
+ usage_costs_params.UsageCostsParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageCostsResponse,
+ )
+
+ async def embeddings(
+ self,
+ *,
+ start_time: int,
+ api_key_ids: SequenceNotStr[str] | Omit = omit,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | Omit = omit,
+ limit: int | Omit = omit,
+ models: SequenceNotStr[str] | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ user_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageEmbeddingsResponse:
+ """
+ Get embeddings usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ api_key_ids: Return only usage for these API keys.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ models: Return only usage for these models.
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ user_ids: Return only usage for these users.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/organization/usage/embeddings",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "start_time": start_time,
+ "api_key_ids": api_key_ids,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "models": models,
+ "page": page,
+ "project_ids": project_ids,
+ "user_ids": user_ids,
+ },
+ usage_embeddings_params.UsageEmbeddingsParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageEmbeddingsResponse,
+ )
+
+ async def images(
+ self,
+ *,
+ start_time: int,
+ api_key_ids: SequenceNotStr[str] | Omit = omit,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "size", "source"]] | Omit = omit,
+ limit: int | Omit = omit,
+ models: SequenceNotStr[str] | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ sizes: List[Literal["256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"]] | Omit = omit,
+ sources: List[Literal["image.generation", "image.edit", "image.variation"]] | Omit = omit,
+ user_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageImagesResponse:
+ """
+ Get images usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ api_key_ids: Return only usage for these API keys.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`, `user_id`, `api_key_id`, `model`, `size`, `source` or any
+ combination of them.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ models: Return only usage for these models.
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ sizes: Return only usages for these image sizes. Possible values are `256x256`,
+ `512x512`, `1024x1024`, `1792x1792`, `1024x1792` or any combination of them.
+
+ sources: Return only usages for these sources. Possible values are `image.generation`,
+ `image.edit`, `image.variation` or any combination of them.
+
+ user_ids: Return only usage for these users.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/organization/usage/images",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "start_time": start_time,
+ "api_key_ids": api_key_ids,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "models": models,
+ "page": page,
+ "project_ids": project_ids,
+ "sizes": sizes,
+ "sources": sources,
+ "user_ids": user_ids,
+ },
+ usage_images_params.UsageImagesParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageImagesResponse,
+ )
+
+ async def moderations(
+ self,
+ *,
+ start_time: int,
+ api_key_ids: SequenceNotStr[str] | Omit = omit,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | Omit = omit,
+ limit: int | Omit = omit,
+ models: SequenceNotStr[str] | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ user_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageModerationsResponse:
+ """
+ Get moderations usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ api_key_ids: Return only usage for these API keys.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ models: Return only usage for these models.
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ user_ids: Return only usage for these users.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/organization/usage/moderations",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "start_time": start_time,
+ "api_key_ids": api_key_ids,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "models": models,
+ "page": page,
+ "project_ids": project_ids,
+ "user_ids": user_ids,
+ },
+ usage_moderations_params.UsageModerationsParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageModerationsResponse,
+ )
+
+ async def vector_stores(
+ self,
+ *,
+ start_time: int,
+ bucket_width: Literal["1m", "1h", "1d"] | Omit = omit,
+ end_time: int | Omit = omit,
+ group_by: List[Literal["project_id"]] | Omit = omit,
+ limit: int | Omit = omit,
+ page: str | Omit = omit,
+ project_ids: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UsageVectorStoresResponse:
+ """
+ Get vector stores usage details for the organization.
+
+ Args:
+ start_time: Start time (Unix seconds) of the query time range, inclusive.
+
+ bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
+ supported, default to `1d`.
+
+ end_time: End time (Unix seconds) of the query time range, exclusive.
+
+ group_by: Group the usage data by the specified fields. Support fields include
+ `project_id`.
+
+ limit: Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+
+ page: A cursor for use in pagination. Corresponding to the `next_page` field from the
+ previous response.
+
+ project_ids: Return only usage for these projects.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/organization/usage/vector_stores",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "start_time": start_time,
+ "bucket_width": bucket_width,
+ "end_time": end_time,
+ "group_by": group_by,
+ "limit": limit,
+ "page": page,
+ "project_ids": project_ids,
+ },
+ usage_vector_stores_params.UsageVectorStoresParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UsageVectorStoresResponse,
+ )
+
+
+class UsageWithRawResponse:
+ def __init__(self, usage: Usage) -> None:
+ self._usage = usage
+
+ self.audio_speeches = _legacy_response.to_raw_response_wrapper(
+ usage.audio_speeches,
+ )
+ self.audio_transcriptions = _legacy_response.to_raw_response_wrapper(
+ usage.audio_transcriptions,
+ )
+ self.code_interpreter_sessions = _legacy_response.to_raw_response_wrapper(
+ usage.code_interpreter_sessions,
+ )
+ self.completions = _legacy_response.to_raw_response_wrapper(
+ usage.completions,
+ )
+ self.costs = _legacy_response.to_raw_response_wrapper(
+ usage.costs,
+ )
+ self.embeddings = _legacy_response.to_raw_response_wrapper(
+ usage.embeddings,
+ )
+ self.images = _legacy_response.to_raw_response_wrapper(
+ usage.images,
+ )
+ self.moderations = _legacy_response.to_raw_response_wrapper(
+ usage.moderations,
+ )
+ self.vector_stores = _legacy_response.to_raw_response_wrapper(
+ usage.vector_stores,
+ )
+
+
+class AsyncUsageWithRawResponse:
+ def __init__(self, usage: AsyncUsage) -> None:
+ self._usage = usage
+
+ self.audio_speeches = _legacy_response.async_to_raw_response_wrapper(
+ usage.audio_speeches,
+ )
+ self.audio_transcriptions = _legacy_response.async_to_raw_response_wrapper(
+ usage.audio_transcriptions,
+ )
+ self.code_interpreter_sessions = _legacy_response.async_to_raw_response_wrapper(
+ usage.code_interpreter_sessions,
+ )
+ self.completions = _legacy_response.async_to_raw_response_wrapper(
+ usage.completions,
+ )
+ self.costs = _legacy_response.async_to_raw_response_wrapper(
+ usage.costs,
+ )
+ self.embeddings = _legacy_response.async_to_raw_response_wrapper(
+ usage.embeddings,
+ )
+ self.images = _legacy_response.async_to_raw_response_wrapper(
+ usage.images,
+ )
+ self.moderations = _legacy_response.async_to_raw_response_wrapper(
+ usage.moderations,
+ )
+ self.vector_stores = _legacy_response.async_to_raw_response_wrapper(
+ usage.vector_stores,
+ )
+
+
+class UsageWithStreamingResponse:
+ def __init__(self, usage: Usage) -> None:
+ self._usage = usage
+
+ self.audio_speeches = to_streamed_response_wrapper(
+ usage.audio_speeches,
+ )
+ self.audio_transcriptions = to_streamed_response_wrapper(
+ usage.audio_transcriptions,
+ )
+ self.code_interpreter_sessions = to_streamed_response_wrapper(
+ usage.code_interpreter_sessions,
+ )
+ self.completions = to_streamed_response_wrapper(
+ usage.completions,
+ )
+ self.costs = to_streamed_response_wrapper(
+ usage.costs,
+ )
+ self.embeddings = to_streamed_response_wrapper(
+ usage.embeddings,
+ )
+ self.images = to_streamed_response_wrapper(
+ usage.images,
+ )
+ self.moderations = to_streamed_response_wrapper(
+ usage.moderations,
+ )
+ self.vector_stores = to_streamed_response_wrapper(
+ usage.vector_stores,
+ )
+
+
+class AsyncUsageWithStreamingResponse:
+ def __init__(self, usage: AsyncUsage) -> None:
+ self._usage = usage
+
+ self.audio_speeches = async_to_streamed_response_wrapper(
+ usage.audio_speeches,
+ )
+ self.audio_transcriptions = async_to_streamed_response_wrapper(
+ usage.audio_transcriptions,
+ )
+ self.code_interpreter_sessions = async_to_streamed_response_wrapper(
+ usage.code_interpreter_sessions,
+ )
+ self.completions = async_to_streamed_response_wrapper(
+ usage.completions,
+ )
+ self.costs = async_to_streamed_response_wrapper(
+ usage.costs,
+ )
+ self.embeddings = async_to_streamed_response_wrapper(
+ usage.embeddings,
+ )
+ self.images = async_to_streamed_response_wrapper(
+ usage.images,
+ )
+ self.moderations = async_to_streamed_response_wrapper(
+ usage.moderations,
+ )
+ self.vector_stores = async_to_streamed_response_wrapper(
+ usage.vector_stores,
+ )
diff --git a/src/openai/resources/admin/organization/users/__init__.py b/src/openai/resources/admin/organization/users/__init__.py
new file mode 100644
index 0000000000..d230cb8f34
--- /dev/null
+++ b/src/openai/resources/admin/organization/users/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .roles import (
+ Roles,
+ AsyncRoles,
+ RolesWithRawResponse,
+ AsyncRolesWithRawResponse,
+ RolesWithStreamingResponse,
+ AsyncRolesWithStreamingResponse,
+)
+from .users import (
+ Users,
+ AsyncUsers,
+ UsersWithRawResponse,
+ AsyncUsersWithRawResponse,
+ UsersWithStreamingResponse,
+ AsyncUsersWithStreamingResponse,
+)
+
+__all__ = [
+ "Roles",
+ "AsyncRoles",
+ "RolesWithRawResponse",
+ "AsyncRolesWithRawResponse",
+ "RolesWithStreamingResponse",
+ "AsyncRolesWithStreamingResponse",
+ "Users",
+ "AsyncUsers",
+ "UsersWithRawResponse",
+ "AsyncUsersWithRawResponse",
+ "UsersWithStreamingResponse",
+ "AsyncUsersWithStreamingResponse",
+]
diff --git a/src/openai/resources/admin/organization/users/roles.py b/src/openai/resources/admin/organization/users/roles.py
new file mode 100644
index 0000000000..01ea5f4844
--- /dev/null
+++ b/src/openai/resources/admin/organization/users/roles.py
@@ -0,0 +1,398 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal
+
+import httpx
+
+from ..... import _legacy_response
+from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ....._utils import path_template, maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .....pagination import SyncNextCursorPage, AsyncNextCursorPage
+from ....._base_client import AsyncPaginator, make_request_options
+from .....types.admin.organization.users import role_list_params, role_create_params
+from .....types.admin.organization.users.role_list_response import RoleListResponse
+from .....types.admin.organization.users.role_create_response import RoleCreateResponse
+from .....types.admin.organization.users.role_delete_response import RoleDeleteResponse
+
+__all__ = ["Roles", "AsyncRoles"]
+
+
+class Roles(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> RolesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return RolesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> RolesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return RolesWithStreamingResponse(self)
+
+ def create(
+ self,
+ user_id: str,
+ *,
+ role_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleCreateResponse:
+ """
+ Assigns an organization role to a user within the organization.
+
+ Args:
+ role_id: Identifier of the role to assign.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return self._post(
+ path_template("/organization/users/{user_id}/roles", user_id=user_id),
+ body=maybe_transform({"role_id": role_id}, role_create_params.RoleCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleCreateResponse,
+ )
+
+ def list(
+ self,
+ user_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncNextCursorPage[RoleListResponse]:
+ """
+ Lists the organization roles assigned to a user within the organization.
+
+ Args:
+ after: Cursor for pagination. Provide the value from the previous response's `next`
+ field to continue listing organization roles.
+
+ limit: A limit on the number of organization role assignments to return.
+
+ order: Sort order for the returned organization roles.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return self._get_api_list(
+ path_template("/organization/users/{user_id}/roles", user_id=user_id),
+ page=SyncNextCursorPage[RoleListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ role_list_params.RoleListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=RoleListResponse,
+ )
+
+ def delete(
+ self,
+ role_id: str,
+ *,
+ user_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleDeleteResponse:
+ """
+ Unassigns an organization role from a user within the organization.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return self._delete(
+ path_template("/organization/users/{user_id}/roles/{role_id}", user_id=user_id, role_id=role_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleDeleteResponse,
+ )
+
+
+class AsyncRoles(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncRolesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncRolesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncRolesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncRolesWithStreamingResponse(self)
+
+ async def create(
+ self,
+ user_id: str,
+ *,
+ role_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleCreateResponse:
+ """
+ Assigns an organization role to a user within the organization.
+
+ Args:
+ role_id: Identifier of the role to assign.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return await self._post(
+ path_template("/organization/users/{user_id}/roles", user_id=user_id),
+ body=await async_maybe_transform({"role_id": role_id}, role_create_params.RoleCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleCreateResponse,
+ )
+
+ def list(
+ self,
+ user_id: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[RoleListResponse, AsyncNextCursorPage[RoleListResponse]]:
+ """
+ Lists the organization roles assigned to a user within the organization.
+
+ Args:
+ after: Cursor for pagination. Provide the value from the previous response's `next`
+ field to continue listing organization roles.
+
+ limit: A limit on the number of organization role assignments to return.
+
+ order: Sort order for the returned organization roles.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return self._get_api_list(
+ path_template("/organization/users/{user_id}/roles", user_id=user_id),
+ page=AsyncNextCursorPage[RoleListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ role_list_params.RoleListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=RoleListResponse,
+ )
+
+ async def delete(
+ self,
+ role_id: str,
+ *,
+ user_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RoleDeleteResponse:
+ """
+ Unassigns an organization role from a user within the organization.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ if not role_id:
+ raise ValueError(f"Expected a non-empty value for `role_id` but received {role_id!r}")
+ return await self._delete(
+ path_template("/organization/users/{user_id}/roles/{role_id}", user_id=user_id, role_id=role_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=RoleDeleteResponse,
+ )
+
+
+class RolesWithRawResponse:
+ def __init__(self, roles: Roles) -> None:
+ self._roles = roles
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ roles.create,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ roles.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ roles.delete,
+ )
+
+
+class AsyncRolesWithRawResponse:
+ def __init__(self, roles: AsyncRoles) -> None:
+ self._roles = roles
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ roles.create,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ roles.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ roles.delete,
+ )
+
+
+class RolesWithStreamingResponse:
+ def __init__(self, roles: Roles) -> None:
+ self._roles = roles
+
+ self.create = to_streamed_response_wrapper(
+ roles.create,
+ )
+ self.list = to_streamed_response_wrapper(
+ roles.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ roles.delete,
+ )
+
+
+class AsyncRolesWithStreamingResponse:
+ def __init__(self, roles: AsyncRoles) -> None:
+ self._roles = roles
+
+ self.create = async_to_streamed_response_wrapper(
+ roles.create,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ roles.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ roles.delete,
+ )
diff --git a/src/openai/resources/admin/organization/users/users.py b/src/openai/resources/admin/organization/users/users.py
new file mode 100644
index 0000000000..94eab69e83
--- /dev/null
+++ b/src/openai/resources/admin/organization/users/users.py
@@ -0,0 +1,543 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+import httpx
+
+from ..... import _legacy_response
+from .roles import (
+ Roles,
+ AsyncRoles,
+ RolesWithRawResponse,
+ AsyncRolesWithRawResponse,
+ RolesWithStreamingResponse,
+ AsyncRolesWithStreamingResponse,
+)
+from ....._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ....._utils import path_template, maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .....pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from ....._base_client import AsyncPaginator, make_request_options
+from .....types.admin.organization import user_list_params, user_update_params
+from .....types.admin.organization.organization_user import OrganizationUser
+from .....types.admin.organization.user_delete_response import UserDeleteResponse
+
+__all__ = ["Users", "AsyncUsers"]
+
+
+class Users(SyncAPIResource):
+ @cached_property
+ def roles(self) -> Roles:
+ return Roles(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> UsersWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return UsersWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> UsersWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return UsersWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ user_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OrganizationUser:
+ """
+ Retrieves a user by their identifier.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return self._get(
+ path_template("/organization/users/{user_id}", user_id=user_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=OrganizationUser,
+ )
+
+ def update(
+ self,
+ user_id: str,
+ *,
+ developer_persona: Optional[str] | Omit = omit,
+ role: Optional[str] | Omit = omit,
+ role_id: Optional[str] | Omit = omit,
+ technical_level: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OrganizationUser:
+ """
+ Modifies a user's role in the organization.
+
+ Args:
+ developer_persona: Developer persona metadata.
+
+ role: `owner` or `reader`
+
+ role_id: Role ID to assign to the user.
+
+ technical_level: Technical level metadata.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return self._post(
+ path_template("/organization/users/{user_id}", user_id=user_id),
+ body=maybe_transform(
+ {
+ "developer_persona": developer_persona,
+ "role": role,
+ "role_id": role_id,
+ "technical_level": technical_level,
+ },
+ user_update_params.UserUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=OrganizationUser,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | Omit = omit,
+ emails: SequenceNotStr[str] | Omit = omit,
+ limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncConversationCursorPage[OrganizationUser]:
+ """
+ Lists all of the users in the organization.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ emails: Filter by the email address of users.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/users",
+ page=SyncConversationCursorPage[OrganizationUser],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "emails": emails,
+ "limit": limit,
+ },
+ user_list_params.UserListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=OrganizationUser,
+ )
+
+ def delete(
+ self,
+ user_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UserDeleteResponse:
+ """
+ Deletes a user from the organization.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return self._delete(
+ path_template("/organization/users/{user_id}", user_id=user_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UserDeleteResponse,
+ )
+
+
+class AsyncUsers(AsyncAPIResource):
+ @cached_property
+ def roles(self) -> AsyncRoles:
+ return AsyncRoles(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncUsersWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncUsersWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncUsersWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncUsersWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ user_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OrganizationUser:
+ """
+ Retrieves a user by their identifier.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return await self._get(
+ path_template("/organization/users/{user_id}", user_id=user_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=OrganizationUser,
+ )
+
+ async def update(
+ self,
+ user_id: str,
+ *,
+ developer_persona: Optional[str] | Omit = omit,
+ role: Optional[str] | Omit = omit,
+ role_id: Optional[str] | Omit = omit,
+ technical_level: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OrganizationUser:
+ """
+ Modifies a user's role in the organization.
+
+ Args:
+ developer_persona: Developer persona metadata.
+
+ role: `owner` or `reader`
+
+ role_id: Role ID to assign to the user.
+
+ technical_level: Technical level metadata.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return await self._post(
+ path_template("/organization/users/{user_id}", user_id=user_id),
+ body=await async_maybe_transform(
+ {
+ "developer_persona": developer_persona,
+ "role": role,
+ "role_id": role_id,
+ "technical_level": technical_level,
+ },
+ user_update_params.UserUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=OrganizationUser,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | Omit = omit,
+ emails: SequenceNotStr[str] | Omit = omit,
+ limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[OrganizationUser, AsyncConversationCursorPage[OrganizationUser]]:
+ """
+ Lists all of the users in the organization.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ emails: Filter by the email address of users.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/organization/users",
+ page=AsyncConversationCursorPage[OrganizationUser],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "emails": emails,
+ "limit": limit,
+ },
+ user_list_params.UserListParams,
+ ),
+ security={"admin_api_key_auth": True},
+ ),
+ model=OrganizationUser,
+ )
+
+ async def delete(
+ self,
+ user_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> UserDeleteResponse:
+ """
+ Deletes a user from the organization.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not user_id:
+ raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
+ return await self._delete(
+ path_template("/organization/users/{user_id}", user_id=user_id),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"admin_api_key_auth": True},
+ ),
+ cast_to=UserDeleteResponse,
+ )
+
+
+class UsersWithRawResponse:
+ def __init__(self, users: Users) -> None:
+ self._users = users
+
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ users.retrieve,
+ )
+ self.update = _legacy_response.to_raw_response_wrapper(
+ users.update,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ users.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ users.delete,
+ )
+
+ @cached_property
+ def roles(self) -> RolesWithRawResponse:
+ return RolesWithRawResponse(self._users.roles)
+
+
+class AsyncUsersWithRawResponse:
+ def __init__(self, users: AsyncUsers) -> None:
+ self._users = users
+
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ users.retrieve,
+ )
+ self.update = _legacy_response.async_to_raw_response_wrapper(
+ users.update,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ users.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ users.delete,
+ )
+
+ @cached_property
+ def roles(self) -> AsyncRolesWithRawResponse:
+ return AsyncRolesWithRawResponse(self._users.roles)
+
+
+class UsersWithStreamingResponse:
+ def __init__(self, users: Users) -> None:
+ self._users = users
+
+ self.retrieve = to_streamed_response_wrapper(
+ users.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ users.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ users.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ users.delete,
+ )
+
+ @cached_property
+ def roles(self) -> RolesWithStreamingResponse:
+ return RolesWithStreamingResponse(self._users.roles)
+
+
+class AsyncUsersWithStreamingResponse:
+ def __init__(self, users: AsyncUsers) -> None:
+ self._users = users
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ users.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ users.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ users.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ users.delete,
+ )
+
+ @cached_property
+ def roles(self) -> AsyncRolesWithStreamingResponse:
+ return AsyncRolesWithStreamingResponse(self._users.roles)
diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py
index 80dbb44077..3c05d690dd 100644
--- a/src/openai/resources/audio/speech.py
+++ b/src/openai/resources/audio/speech.py
@@ -119,7 +119,11 @@ def create(
speech_create_params.SpeechCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@@ -219,7 +223,11 @@ async def create(
speech_create_params.SpeechCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py
index 25e6e0cb5e..ab2480ef11 100644
--- a/src/openai/resources/audio/transcriptions.py
+++ b/src/openai/resources/audio/transcriptions.py
@@ -9,6 +9,7 @@
import httpx
from ... import _legacy_response
+from ..._files import deepcopy_with_paths
from ..._types import (
Body,
Omit,
@@ -20,7 +21,7 @@
omit,
not_given,
)
-from ..._utils import extract_files, required_args, maybe_transform, deepcopy_minimal, async_maybe_transform
+from ..._utils import extract_files, required_args, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -459,7 +460,7 @@ def create(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> str | Transcription | TranscriptionDiarized | TranscriptionVerbose | Stream[TranscriptionStreamEvent]:
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"file": file,
"model": model,
@@ -473,7 +474,8 @@ def create(
"stream": stream,
"temperature": temperature,
"timestamp_granularities": timestamp_granularities,
- }
+ },
+ [["file"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
@@ -490,7 +492,11 @@ def create(
),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_get_response_format_type(response_format),
stream=stream or False,
@@ -913,7 +919,7 @@ async def create(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Transcription | TranscriptionVerbose | TranscriptionDiarized | str | AsyncStream[TranscriptionStreamEvent]:
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"file": file,
"model": model,
@@ -927,7 +933,8 @@ async def create(
"stream": stream,
"temperature": temperature,
"timestamp_granularities": timestamp_granularities,
- }
+ },
+ [["file"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
@@ -944,7 +951,11 @@ async def create(
),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_get_response_format_type(response_format),
stream=stream or False,
diff --git a/src/openai/resources/audio/translations.py b/src/openai/resources/audio/translations.py
index 0751a65586..d0b5738045 100644
--- a/src/openai/resources/audio/translations.py
+++ b/src/openai/resources/audio/translations.py
@@ -9,8 +9,9 @@
import httpx
from ... import _legacy_response
+from ..._files import deepcopy_with_paths
from ..._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given
-from ..._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
+from ..._utils import extract_files, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -146,14 +147,15 @@ def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"file": file,
"model": model,
"prompt": prompt,
"response_format": response_format,
"temperature": temperature,
- }
+ },
+ [["file"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
@@ -165,7 +167,11 @@ def create(
body=maybe_transform(body, translation_create_params.TranslationCreateParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_get_response_format_type(response_format),
)
@@ -291,14 +297,15 @@ async def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"file": file,
"model": model,
"prompt": prompt,
"response_format": response_format,
"temperature": temperature,
- }
+ },
+ [["file"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
@@ -310,7 +317,11 @@ async def create(
body=await async_maybe_transform(body, translation_create_params.TranslationCreateParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_get_response_format_type(response_format),
)
diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py
index 6cdb50c280..e7146ef35f 100644
--- a/src/openai/resources/batches.py
+++ b/src/openai/resources/batches.py
@@ -123,7 +123,11 @@ def create(
batch_create_params.BatchCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Batch,
)
@@ -156,7 +160,11 @@ def retrieve(
return self._get(
path_template("/batches/{batch_id}", batch_id=batch_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Batch,
)
@@ -209,6 +217,7 @@ def list(
},
batch_list_params.BatchListParams,
),
+ security={"bearer_auth": True},
),
model=Batch,
)
@@ -244,7 +253,11 @@ def cancel(
return self._post(
path_template("/batches/{batch_id}/cancel", batch_id=batch_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Batch,
)
@@ -351,7 +364,11 @@ async def create(
batch_create_params.BatchCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Batch,
)
@@ -384,7 +401,11 @@ async def retrieve(
return await self._get(
path_template("/batches/{batch_id}", batch_id=batch_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Batch,
)
@@ -437,6 +458,7 @@ def list(
},
batch_list_params.BatchListParams,
),
+ security={"bearer_auth": True},
),
model=Batch,
)
@@ -472,7 +494,11 @@ async def cancel(
return await self._post(
path_template("/batches/{batch_id}/cancel", batch_id=batch_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Batch,
)
diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py
index 7ea8a918ca..6301082fea 100644
--- a/src/openai/resources/beta/assistants.py
+++ b/src/openai/resources/beta/assistants.py
@@ -182,7 +182,11 @@ def create(
assistant_create_params.AssistantCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Assistant,
)
@@ -217,7 +221,11 @@ def retrieve(
return self._get(
path_template("/assistants/{assistant_id}", assistant_id=assistant_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Assistant,
)
@@ -401,7 +409,11 @@ def update(
assistant_update_params.AssistantUpdateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Assistant,
)
@@ -468,6 +480,7 @@ def list(
},
assistant_list_params.AssistantListParams,
),
+ security={"bearer_auth": True},
),
model=Assistant,
)
@@ -502,7 +515,11 @@ def delete(
return self._delete(
path_template("/assistants/{assistant_id}", assistant_id=assistant_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=AssistantDeleted,
)
@@ -658,7 +675,11 @@ async def create(
assistant_create_params.AssistantCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Assistant,
)
@@ -693,7 +714,11 @@ async def retrieve(
return await self._get(
path_template("/assistants/{assistant_id}", assistant_id=assistant_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Assistant,
)
@@ -877,7 +902,11 @@ async def update(
assistant_update_params.AssistantUpdateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Assistant,
)
@@ -944,6 +973,7 @@ def list(
},
assistant_list_params.AssistantListParams,
),
+ security={"bearer_auth": True},
),
model=Assistant,
)
@@ -978,7 +1008,11 @@ async def delete(
return await self._delete(
path_template("/assistants/{assistant_id}", assistant_id=assistant_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=AssistantDeleted,
)
diff --git a/src/openai/resources/beta/chatkit/sessions.py b/src/openai/resources/beta/chatkit/sessions.py
index 6e95fd65fb..9049b06a40 100644
--- a/src/openai/resources/beta/chatkit/sessions.py
+++ b/src/openai/resources/beta/chatkit/sessions.py
@@ -100,7 +100,11 @@ def create(
session_create_params.SessionCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ChatSession,
)
@@ -136,7 +140,11 @@ def cancel(
return self._post(
path_template("/chatkit/sessions/{session_id}/cancel", session_id=session_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ChatSession,
)
@@ -215,7 +223,11 @@ async def create(
session_create_params.SessionCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ChatSession,
)
@@ -251,7 +263,11 @@ async def cancel(
return await self._post(
path_template("/chatkit/sessions/{session_id}/cancel", session_id=session_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ChatSession,
)
diff --git a/src/openai/resources/beta/chatkit/threads.py b/src/openai/resources/beta/chatkit/threads.py
index 16e0e11a0a..05ebf0fb87 100644
--- a/src/openai/resources/beta/chatkit/threads.py
+++ b/src/openai/resources/beta/chatkit/threads.py
@@ -72,7 +72,11 @@ def retrieve(
return self._get(
path_template("/chatkit/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ChatKitThread,
)
@@ -136,6 +140,7 @@ def list(
},
thread_list_params.ThreadListParams,
),
+ security={"bearer_auth": True},
),
model=ChatKitThread,
)
@@ -169,7 +174,11 @@ def delete(
return self._delete(
path_template("/chatkit/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ThreadDeleteResponse,
)
@@ -231,6 +240,7 @@ def list_items(
},
thread_list_items_params.ThreadListItemsParams,
),
+ security={"bearer_auth": True},
),
model=cast(Any, Data), # Union types cannot be passed in as arguments in the type system
)
@@ -285,7 +295,11 @@ async def retrieve(
return await self._get(
path_template("/chatkit/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ChatKitThread,
)
@@ -349,6 +363,7 @@ def list(
},
thread_list_params.ThreadListParams,
),
+ security={"bearer_auth": True},
),
model=ChatKitThread,
)
@@ -382,7 +397,11 @@ async def delete(
return await self._delete(
path_template("/chatkit/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ThreadDeleteResponse,
)
@@ -444,6 +463,7 @@ def list_items(
},
thread_list_items_params.ThreadListItemsParams,
),
+ security={"bearer_auth": True},
),
model=cast(Any, Data), # Union types cannot be passed in as arguments in the type system
)
diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py
index 95b750d4e4..e57e783577 100644
--- a/src/openai/resources/beta/threads/messages.py
+++ b/src/openai/resources/beta/threads/messages.py
@@ -112,7 +112,11 @@ def create(
message_create_params.MessageCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Message,
)
@@ -150,7 +154,11 @@ def retrieve(
return self._get(
path_template("/threads/{thread_id}/messages/{message_id}", thread_id=thread_id, message_id=message_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Message,
)
@@ -197,7 +205,11 @@ def update(
path_template("/threads/{thread_id}/messages/{message_id}", thread_id=thread_id, message_id=message_id),
body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Message,
)
@@ -270,6 +282,7 @@ def list(
},
message_list_params.MessageListParams,
),
+ security={"bearer_auth": True},
),
model=Message,
)
@@ -307,7 +320,11 @@ def delete(
return self._delete(
path_template("/threads/{thread_id}/messages/{message_id}", thread_id=thread_id, message_id=message_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=MessageDeleted,
)
@@ -397,7 +414,11 @@ async def create(
message_create_params.MessageCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Message,
)
@@ -435,7 +456,11 @@ async def retrieve(
return await self._get(
path_template("/threads/{thread_id}/messages/{message_id}", thread_id=thread_id, message_id=message_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Message,
)
@@ -482,7 +507,11 @@ async def update(
path_template("/threads/{thread_id}/messages/{message_id}", thread_id=thread_id, message_id=message_id),
body=await async_maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Message,
)
@@ -555,6 +584,7 @@ def list(
},
message_list_params.MessageListParams,
),
+ security={"bearer_auth": True},
),
model=Message,
)
@@ -592,7 +622,11 @@ async def delete(
return await self._delete(
path_template("/threads/{thread_id}/messages/{message_id}", thread_id=thread_id, message_id=message_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=MessageDeleted,
)
diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py
index 882e88dfa6..385d4c680c 100644
--- a/src/openai/resources/beta/threads/runs/runs.py
+++ b/src/openai/resources/beta/threads/runs/runs.py
@@ -624,6 +624,7 @@ def create(
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"include": include}, run_create_params.RunCreateParams),
+ security={"bearer_auth": True},
synthesize_event_and_data=True,
),
cast_to=Run,
@@ -664,7 +665,11 @@ def retrieve(
return self._get(
path_template("/threads/{thread_id}/runs/{run_id}", thread_id=thread_id, run_id=run_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Run,
)
@@ -711,7 +716,11 @@ def update(
path_template("/threads/{thread_id}/runs/{run_id}", thread_id=thread_id, run_id=run_id),
body=maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Run,
)
@@ -780,6 +789,7 @@ def list(
},
run_list_params.RunListParams,
),
+ security={"bearer_auth": True},
),
model=Run,
)
@@ -817,7 +827,11 @@ def cancel(
return self._post(
path_template("/threads/{thread_id}/runs/{run_id}/cancel", thread_id=thread_id, run_id=run_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Run,
)
@@ -1023,7 +1037,11 @@ def create_and_stream(
run_create_params.RunCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Run,
stream=True,
@@ -1215,6 +1233,7 @@ def stream(
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"include": include}, run_create_params.RunCreateParams),
+ security={"bearer_auth": True},
),
cast_to=Run,
stream=True,
@@ -1377,6 +1396,7 @@ def submit_tool_outputs(
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
+ security={"bearer_auth": True},
synthesize_event_and_data=True,
),
cast_to=Run,
@@ -1512,7 +1532,11 @@ def submit_tool_outputs_stream(
run_submit_tool_outputs_params.RunSubmitToolOutputsParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Run,
stream=True,
@@ -2087,6 +2111,7 @@ async def create(
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform({"include": include}, run_create_params.RunCreateParams),
+ security={"bearer_auth": True},
synthesize_event_and_data=True,
),
cast_to=Run,
@@ -2127,7 +2152,11 @@ async def retrieve(
return await self._get(
path_template("/threads/{thread_id}/runs/{run_id}", thread_id=thread_id, run_id=run_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Run,
)
@@ -2174,7 +2203,11 @@ async def update(
path_template("/threads/{thread_id}/runs/{run_id}", thread_id=thread_id, run_id=run_id),
body=await async_maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Run,
)
@@ -2243,6 +2276,7 @@ def list(
},
run_list_params.RunListParams,
),
+ security={"bearer_auth": True},
),
model=Run,
)
@@ -2280,7 +2314,11 @@ async def cancel(
return await self._post(
path_template("/threads/{thread_id}/runs/{run_id}/cancel", thread_id=thread_id, run_id=run_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Run,
)
@@ -2484,7 +2522,11 @@ def create_and_stream(
run_create_params.RunCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Run,
stream=True,
@@ -2677,6 +2719,7 @@ def stream(
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"include": include}, run_create_params.RunCreateParams),
+ security={"bearer_auth": True},
),
cast_to=Run,
stream=True,
@@ -2839,6 +2882,7 @@ async def submit_tool_outputs(
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
+ security={"bearer_auth": True},
synthesize_event_and_data=True,
),
cast_to=Run,
@@ -2976,7 +3020,11 @@ def submit_tool_outputs_stream(
run_submit_tool_outputs_params.RunSubmitToolOutputsParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Run,
stream=True,
diff --git a/src/openai/resources/beta/threads/runs/steps.py b/src/openai/resources/beta/threads/runs/steps.py
index 9a6402b263..a251d27bf5 100644
--- a/src/openai/resources/beta/threads/runs/steps.py
+++ b/src/openai/resources/beta/threads/runs/steps.py
@@ -100,6 +100,7 @@ def retrieve(
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
+ security={"bearer_auth": True},
),
cast_to=RunStep,
)
@@ -181,6 +182,7 @@ def list(
},
step_list_params.StepListParams,
),
+ security={"bearer_auth": True},
),
model=RunStep,
)
@@ -263,6 +265,7 @@ async def retrieve(
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
+ security={"bearer_auth": True},
),
cast_to=RunStep,
)
@@ -344,6 +347,7 @@ def list(
},
step_list_params.StepListParams,
),
+ security={"bearer_auth": True},
),
model=RunStep,
)
diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py
index 4b0f18fe47..d9425c64bd 100644
--- a/src/openai/resources/beta/threads/threads.py
+++ b/src/openai/resources/beta/threads/threads.py
@@ -144,7 +144,11 @@ def create(
thread_create_params.ThreadCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Thread,
)
@@ -179,7 +183,11 @@ def retrieve(
return self._get(
path_template("/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Thread,
)
@@ -235,7 +243,11 @@ def update(
thread_update_params.ThreadUpdateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Thread,
)
@@ -270,7 +282,11 @@ def delete(
return self._delete(
path_template("/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ThreadDeleted,
)
@@ -737,6 +753,7 @@ def create_and_run(
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
+ security={"bearer_auth": True},
synthesize_event_and_data=True,
),
cast_to=Run,
@@ -916,7 +933,11 @@ def create_and_run_stream(
thread_create_and_run_params.ThreadCreateAndRunParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Run,
stream=True,
@@ -1010,7 +1031,11 @@ async def create(
thread_create_params.ThreadCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Thread,
)
@@ -1045,7 +1070,11 @@ async def retrieve(
return await self._get(
path_template("/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Thread,
)
@@ -1101,7 +1130,11 @@ async def update(
thread_update_params.ThreadUpdateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Thread,
)
@@ -1136,7 +1169,11 @@ async def delete(
return await self._delete(
path_template("/threads/{thread_id}", thread_id=thread_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ThreadDeleted,
)
@@ -1603,6 +1640,7 @@ async def create_and_run(
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
+ security={"bearer_auth": True},
synthesize_event_and_data=True,
),
cast_to=Run,
@@ -1786,7 +1824,11 @@ def create_and_run_stream(
thread_create_and_run_params.ThreadCreateAndRunParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Run,
stream=True,
diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py
index 8b4fc12ae9..7a551e2459 100644
--- a/src/openai/resources/chat/completions/completions.py
+++ b/src/openai/resources/chat/completions/completions.py
@@ -236,6 +236,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
+ security={"bearer_auth": True},
),
# we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
# in the `parser` function above
@@ -1253,7 +1254,11 @@ def create(
else completion_create_params.CompletionCreateParamsNonStreaming,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ChatCompletion,
stream=stream or False,
@@ -1290,7 +1295,11 @@ def retrieve(
return self._get(
path_template("/chat/completions/{completion_id}", completion_id=completion_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ChatCompletion,
)
@@ -1335,7 +1344,11 @@ def update(
path_template("/chat/completions/{completion_id}", completion_id=completion_id),
body=maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ChatCompletion,
)
@@ -1401,6 +1414,7 @@ def list(
},
completion_list_params.CompletionListParams,
),
+ security={"bearer_auth": True},
),
model=ChatCompletion,
)
@@ -1435,7 +1449,11 @@ def delete(
return self._delete(
path_template("/chat/completions/{completion_id}", completion_id=completion_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ChatCompletionDeleted,
)
@@ -1739,6 +1757,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
+ security={"bearer_auth": True},
),
# we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
# in the `parser` function above
@@ -2756,7 +2775,11 @@ async def create(
else completion_create_params.CompletionCreateParamsNonStreaming,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ChatCompletion,
stream=stream or False,
@@ -2793,7 +2816,11 @@ async def retrieve(
return await self._get(
path_template("/chat/completions/{completion_id}", completion_id=completion_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ChatCompletion,
)
@@ -2838,7 +2865,11 @@ async def update(
path_template("/chat/completions/{completion_id}", completion_id=completion_id),
body=await async_maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ChatCompletion,
)
@@ -2904,6 +2935,7 @@ def list(
},
completion_list_params.CompletionListParams,
),
+ security={"bearer_auth": True},
),
model=ChatCompletion,
)
@@ -2938,7 +2970,11 @@ async def delete(
return await self._delete(
path_template("/chat/completions/{completion_id}", completion_id=completion_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ChatCompletionDeleted,
)
diff --git a/src/openai/resources/chat/completions/messages.py b/src/openai/resources/chat/completions/messages.py
index ffbff566db..05dd23735a 100644
--- a/src/openai/resources/chat/completions/messages.py
+++ b/src/openai/resources/chat/completions/messages.py
@@ -97,6 +97,7 @@ def list(
},
message_list_params.MessageListParams,
),
+ security={"bearer_auth": True},
),
model=ChatCompletionStoreMessage,
)
@@ -179,6 +180,7 @@ def list(
},
message_list_params.MessageListParams,
),
+ security={"bearer_auth": True},
),
model=ChatCompletionStoreMessage,
)
diff --git a/src/openai/resources/completions.py b/src/openai/resources/completions.py
index 4c9e266787..d2eb646a66 100644
--- a/src/openai/resources/completions.py
+++ b/src/openai/resources/completions.py
@@ -579,7 +579,11 @@ def create(
else completion_create_params.CompletionCreateParamsNonStreaming,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Completion,
stream=stream or False,
@@ -1142,7 +1146,11 @@ async def create(
else completion_create_params.CompletionCreateParamsNonStreaming,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Completion,
stream=stream or False,
diff --git a/src/openai/resources/containers/containers.py b/src/openai/resources/containers/containers.py
index f6b8c33c75..7588ad7240 100644
--- a/src/openai/resources/containers/containers.py
+++ b/src/openai/resources/containers/containers.py
@@ -109,7 +109,11 @@ def create(
container_create_params.ContainerCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ContainerCreateResponse,
)
@@ -142,7 +146,11 @@ def retrieve(
return self._get(
path_template("/containers/{container_id}", container_id=container_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ContainerRetrieveResponse,
)
@@ -204,6 +212,7 @@ def list(
},
container_list_params.ContainerListParams,
),
+ security={"bearer_auth": True},
),
model=ContainerListResponse,
)
@@ -237,7 +246,11 @@ def delete(
return self._delete(
path_template("/containers/{container_id}", container_id=container_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=NoneType,
)
@@ -321,7 +334,11 @@ async def create(
container_create_params.ContainerCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ContainerCreateResponse,
)
@@ -354,7 +371,11 @@ async def retrieve(
return await self._get(
path_template("/containers/{container_id}", container_id=container_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ContainerRetrieveResponse,
)
@@ -416,6 +437,7 @@ def list(
},
container_list_params.ContainerListParams,
),
+ security={"bearer_auth": True},
),
model=ContainerListResponse,
)
@@ -449,7 +471,11 @@ async def delete(
return await self._delete(
path_template("/containers/{container_id}", container_id=container_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=NoneType,
)
diff --git a/src/openai/resources/containers/files/content.py b/src/openai/resources/containers/files/content.py
index eb915b9c13..7df5e4cf2d 100644
--- a/src/openai/resources/containers/files/content.py
+++ b/src/openai/resources/containers/files/content.py
@@ -74,7 +74,11 @@ def retrieve(
"/containers/{container_id}/files/{file_id}/content", container_id=container_id, file_id=file_id
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@@ -134,7 +138,11 @@ async def retrieve(
"/containers/{container_id}/files/{file_id}/content", container_id=container_id, file_id=file_id
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
diff --git a/src/openai/resources/containers/files/files.py b/src/openai/resources/containers/files/files.py
index f48adf3a2a..736b1fb75b 100644
--- a/src/openai/resources/containers/files/files.py
+++ b/src/openai/resources/containers/files/files.py
@@ -16,8 +16,9 @@
ContentWithStreamingResponse,
AsyncContentWithStreamingResponse,
)
+from ...._files import deepcopy_with_paths
from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, FileTypes, omit, not_given
-from ...._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
+from ...._utils import extract_files, path_template, maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -89,11 +90,12 @@ def create(
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"file": file,
"file_id": file_id,
- }
+ },
+ [["file"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
if files:
@@ -106,7 +108,11 @@ def create(
body=maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FileCreateResponse,
)
@@ -142,7 +148,11 @@ def retrieve(
return self._get(
path_template("/containers/{container_id}/files/{file_id}", container_id=container_id, file_id=file_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FileRetrieveResponse,
)
@@ -203,6 +213,7 @@ def list(
},
file_list_params.FileListParams,
),
+ security={"bearer_auth": True},
),
model=FileListResponse,
)
@@ -239,7 +250,11 @@ def delete(
return self._delete(
path_template("/containers/{container_id}/files/{file_id}", container_id=container_id, file_id=file_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=NoneType,
)
@@ -303,11 +318,12 @@ async def create(
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"file": file,
"file_id": file_id,
- }
+ },
+ [["file"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
if files:
@@ -320,7 +336,11 @@ async def create(
body=await async_maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FileCreateResponse,
)
@@ -356,7 +376,11 @@ async def retrieve(
return await self._get(
path_template("/containers/{container_id}/files/{file_id}", container_id=container_id, file_id=file_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FileRetrieveResponse,
)
@@ -417,6 +441,7 @@ def list(
},
file_list_params.FileListParams,
),
+ security={"bearer_auth": True},
),
model=FileListResponse,
)
@@ -453,7 +478,11 @@ async def delete(
return await self._delete(
path_template("/containers/{container_id}/files/{file_id}", container_id=container_id, file_id=file_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=NoneType,
)
diff --git a/src/openai/resources/conversations/conversations.py b/src/openai/resources/conversations/conversations.py
index d349f38546..bdc8e1d637 100644
--- a/src/openai/resources/conversations/conversations.py
+++ b/src/openai/resources/conversations/conversations.py
@@ -101,7 +101,11 @@ def create(
conversation_create_params.ConversationCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Conversation,
)
@@ -134,7 +138,11 @@ def retrieve(
return self._get(
path_template("/conversations/{conversation_id}", conversation_id=conversation_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Conversation,
)
@@ -176,7 +184,11 @@ def update(
path_template("/conversations/{conversation_id}", conversation_id=conversation_id),
body=maybe_transform({"metadata": metadata}, conversation_update_params.ConversationUpdateParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Conversation,
)
@@ -210,7 +222,11 @@ def delete(
return self._delete(
path_template("/conversations/{conversation_id}", conversation_id=conversation_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ConversationDeletedResource,
)
@@ -287,7 +303,11 @@ async def create(
conversation_create_params.ConversationCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Conversation,
)
@@ -320,7 +340,11 @@ async def retrieve(
return await self._get(
path_template("/conversations/{conversation_id}", conversation_id=conversation_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Conversation,
)
@@ -364,7 +388,11 @@ async def update(
{"metadata": metadata}, conversation_update_params.ConversationUpdateParams
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Conversation,
)
@@ -398,7 +426,11 @@ async def delete(
return await self._delete(
path_template("/conversations/{conversation_id}", conversation_id=conversation_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ConversationDeletedResource,
)
diff --git a/src/openai/resources/conversations/items.py b/src/openai/resources/conversations/items.py
index 7d7c9a4aba..01cdfe0804 100644
--- a/src/openai/resources/conversations/items.py
+++ b/src/openai/resources/conversations/items.py
@@ -89,6 +89,7 @@ def create(
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"include": include}, item_create_params.ItemCreateParams),
+ security={"bearer_auth": True},
),
cast_to=ConversationItemList,
)
@@ -138,6 +139,7 @@ def retrieve(
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"include": include}, item_retrieve_params.ItemRetrieveParams),
+ security={"bearer_auth": True},
),
cast_to=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
),
@@ -218,6 +220,7 @@ def list(
},
item_list_params.ItemListParams,
),
+ security={"bearer_auth": True},
),
model=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
)
@@ -255,7 +258,11 @@ def delete(
"/conversations/{conversation_id}/items/{item_id}", conversation_id=conversation_id, item_id=item_id
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Conversation,
)
@@ -325,6 +332,7 @@ async def create(
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform({"include": include}, item_create_params.ItemCreateParams),
+ security={"bearer_auth": True},
),
cast_to=ConversationItemList,
)
@@ -374,6 +382,7 @@ async def retrieve(
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform({"include": include}, item_retrieve_params.ItemRetrieveParams),
+ security={"bearer_auth": True},
),
cast_to=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
),
@@ -454,6 +463,7 @@ def list(
},
item_list_params.ItemListParams,
),
+ security={"bearer_auth": True},
),
model=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
)
@@ -491,7 +501,11 @@ async def delete(
"/conversations/{conversation_id}/items/{item_id}", conversation_id=conversation_id, item_id=item_id
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Conversation,
)
diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py
index 86eb949a40..a51936d809 100644
--- a/src/openai/resources/embeddings.py
+++ b/src/openai/resources/embeddings.py
@@ -142,6 +142,7 @@ def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse:
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
+ security={"bearer_auth": True},
),
cast_to=CreateEmbeddingResponse,
)
@@ -265,6 +266,7 @@ def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse:
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
+ security={"bearer_auth": True},
),
cast_to=CreateEmbeddingResponse,
)
diff --git a/src/openai/resources/evals/evals.py b/src/openai/resources/evals/evals.py
index 6acd669a2c..a23c9cdef2 100644
--- a/src/openai/resources/evals/evals.py
+++ b/src/openai/resources/evals/evals.py
@@ -121,7 +121,11 @@ def create(
eval_create_params.EvalCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=EvalCreateResponse,
)
@@ -154,7 +158,11 @@ def retrieve(
return self._get(
path_template("/evals/{eval_id}", eval_id=eval_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=EvalRetrieveResponse,
)
@@ -205,7 +213,11 @@ def update(
eval_update_params.EvalUpdateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=EvalUpdateResponse,
)
@@ -263,6 +275,7 @@ def list(
},
eval_list_params.EvalListParams,
),
+ security={"bearer_auth": True},
),
model=EvalListResponse,
)
@@ -295,7 +308,11 @@ def delete(
return self._delete(
path_template("/evals/{eval_id}", eval_id=eval_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=EvalDeleteResponse,
)
@@ -388,7 +405,11 @@ async def create(
eval_create_params.EvalCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=EvalCreateResponse,
)
@@ -421,7 +442,11 @@ async def retrieve(
return await self._get(
path_template("/evals/{eval_id}", eval_id=eval_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=EvalRetrieveResponse,
)
@@ -472,7 +497,11 @@ async def update(
eval_update_params.EvalUpdateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=EvalUpdateResponse,
)
@@ -530,6 +559,7 @@ def list(
},
eval_list_params.EvalListParams,
),
+ security={"bearer_auth": True},
),
model=EvalListResponse,
)
@@ -562,7 +592,11 @@ async def delete(
return await self._delete(
path_template("/evals/{eval_id}", eval_id=eval_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=EvalDeleteResponse,
)
diff --git a/src/openai/resources/evals/runs/output_items.py b/src/openai/resources/evals/runs/output_items.py
index 7a498a7ebf..2f884dd876 100644
--- a/src/openai/resources/evals/runs/output_items.py
+++ b/src/openai/resources/evals/runs/output_items.py
@@ -82,7 +82,11 @@ def retrieve(
output_item_id=output_item_id,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=OutputItemRetrieveResponse,
)
@@ -146,6 +150,7 @@ def list(
},
output_item_list_params.OutputItemListParams,
),
+ security={"bearer_auth": True},
),
model=OutputItemListResponse,
)
@@ -212,7 +217,11 @@ async def retrieve(
output_item_id=output_item_id,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=OutputItemRetrieveResponse,
)
@@ -276,6 +285,7 @@ def list(
},
output_item_list_params.OutputItemListParams,
),
+ security={"bearer_auth": True},
),
model=OutputItemListResponse,
)
diff --git a/src/openai/resources/evals/runs/runs.py b/src/openai/resources/evals/runs/runs.py
index 152ce9cb77..ffba38db2e 100644
--- a/src/openai/resources/evals/runs/runs.py
+++ b/src/openai/resources/evals/runs/runs.py
@@ -113,7 +113,11 @@ def create(
run_create_params.RunCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=RunCreateResponse,
)
@@ -149,7 +153,11 @@ def retrieve(
return self._get(
path_template("/evals/{eval_id}/runs/{run_id}", eval_id=eval_id, run_id=run_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=RunRetrieveResponse,
)
@@ -210,6 +218,7 @@ def list(
},
run_list_params.RunListParams,
),
+ security={"bearer_auth": True},
),
model=RunListResponse,
)
@@ -245,7 +254,11 @@ def delete(
return self._delete(
path_template("/evals/{eval_id}/runs/{run_id}", eval_id=eval_id, run_id=run_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=RunDeleteResponse,
)
@@ -281,7 +294,11 @@ def cancel(
return self._post(
path_template("/evals/{eval_id}/runs/{run_id}", eval_id=eval_id, run_id=run_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=RunCancelResponse,
)
@@ -366,7 +383,11 @@ async def create(
run_create_params.RunCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=RunCreateResponse,
)
@@ -402,7 +423,11 @@ async def retrieve(
return await self._get(
path_template("/evals/{eval_id}/runs/{run_id}", eval_id=eval_id, run_id=run_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=RunRetrieveResponse,
)
@@ -463,6 +488,7 @@ def list(
},
run_list_params.RunListParams,
),
+ security={"bearer_auth": True},
),
model=RunListResponse,
)
@@ -498,7 +524,11 @@ async def delete(
return await self._delete(
path_template("/evals/{eval_id}/runs/{run_id}", eval_id=eval_id, run_id=run_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=RunDeleteResponse,
)
@@ -534,7 +564,11 @@ async def cancel(
return await self._post(
path_template("/evals/{eval_id}/runs/{run_id}", eval_id=eval_id, run_id=run_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=RunCancelResponse,
)
diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py
index b03f11b06a..868742f0f0 100644
--- a/src/openai/resources/files.py
+++ b/src/openai/resources/files.py
@@ -11,8 +11,9 @@
from .. import _legacy_response
from ..types import FilePurpose, file_list_params, file_create_params
+from .._files import deepcopy_with_paths
from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given
-from .._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
+from .._utils import extract_files, path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -73,7 +74,8 @@ def create(
Individual files can be
up to 512 MB, and each project can store up to 2.5 TB of files in total. There
- is no organization-wide storage limit.
+ is no organization-wide storage limit. Uploads to this endpoint are rate-limited
+ to 1,000 requests per minute per authenticated user.
- The Assistants API supports files up to 2 million tokens and of specific file
types. See the
@@ -88,6 +90,12 @@ def create(
- The Batch API only supports `.jsonl` files up to 200 MB in size. The input
also has a specific required
[format](https://platform.openai.com/docs/api-reference/batch/request-input).
+ - For Retrieval or `file_search` ingestion, upload files here first. If you need
+ to attach multiple uploaded files to the same vector store, use
+ [`/vector_stores/{vector_store_id}/file_batches`](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/createBatch)
+ instead of attaching them one by one. Vector store attachment has separate
+ limits from file upload, including 2,000 attached files per minute per
+ organization.
Please [contact us](https://help.openai.com/) if you need to increase these
storage limits.
@@ -116,12 +124,13 @@ def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"file": file,
"purpose": purpose,
"expires_after": expires_after,
- }
+ },
+ [["file"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
@@ -133,7 +142,11 @@ def create(
body=maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FileObject,
)
@@ -166,7 +179,11 @@ def retrieve(
return self._get(
path_template("/files/{file_id}", file_id=file_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FileObject,
)
@@ -228,6 +245,7 @@ def list(
},
file_list_params.FileListParams,
),
+ security={"bearer_auth": True},
),
model=FileObject,
)
@@ -260,7 +278,11 @@ def delete(
return self._delete(
path_template("/files/{file_id}", file_id=file_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FileDeleted,
)
@@ -294,7 +316,11 @@ def content(
return self._get(
path_template("/files/{file_id}/content", file_id=file_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@@ -328,7 +354,11 @@ def retrieve_content(
return self._get(
path_template("/files/{file_id}/content", file_id=file_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=str,
)
@@ -398,7 +428,8 @@ async def create(
Individual files can be
up to 512 MB, and each project can store up to 2.5 TB of files in total. There
- is no organization-wide storage limit.
+ is no organization-wide storage limit. Uploads to this endpoint are rate-limited
+ to 1,000 requests per minute per authenticated user.
- The Assistants API supports files up to 2 million tokens and of specific file
types. See the
@@ -413,6 +444,12 @@ async def create(
- The Batch API only supports `.jsonl` files up to 200 MB in size. The input
also has a specific required
[format](https://platform.openai.com/docs/api-reference/batch/request-input).
+ - For Retrieval or `file_search` ingestion, upload files here first. If you need
+ to attach multiple uploaded files to the same vector store, use
+ [`/vector_stores/{vector_store_id}/file_batches`](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/createBatch)
+ instead of attaching them one by one. Vector store attachment has separate
+ limits from file upload, including 2,000 attached files per minute per
+ organization.
Please [contact us](https://help.openai.com/) if you need to increase these
storage limits.
@@ -441,12 +478,13 @@ async def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"file": file,
"purpose": purpose,
"expires_after": expires_after,
- }
+ },
+ [["file"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
@@ -458,7 +496,11 @@ async def create(
body=await async_maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FileObject,
)
@@ -491,7 +533,11 @@ async def retrieve(
return await self._get(
path_template("/files/{file_id}", file_id=file_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FileObject,
)
@@ -553,6 +599,7 @@ def list(
},
file_list_params.FileListParams,
),
+ security={"bearer_auth": True},
),
model=FileObject,
)
@@ -585,7 +632,11 @@ async def delete(
return await self._delete(
path_template("/files/{file_id}", file_id=file_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FileDeleted,
)
@@ -619,7 +670,11 @@ async def content(
return await self._get(
path_template("/files/{file_id}/content", file_id=file_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@@ -653,7 +708,11 @@ async def retrieve_content(
return await self._get(
path_template("/files/{file_id}/content", file_id=file_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=str,
)
diff --git a/src/openai/resources/fine_tuning/alpha/graders.py b/src/openai/resources/fine_tuning/alpha/graders.py
index e5d5dea5de..51c491b91f 100644
--- a/src/openai/resources/fine_tuning/alpha/graders.py
+++ b/src/openai/resources/fine_tuning/alpha/graders.py
@@ -88,7 +88,11 @@ def run(
grader_run_params.GraderRunParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=GraderRunResponse,
)
@@ -122,7 +126,11 @@ def validate(
"/fine_tuning/alpha/graders/validate",
body=maybe_transform({"grader": grader}, grader_validate_params.GraderValidateParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=GraderValidateResponse,
)
@@ -198,7 +206,11 @@ async def run(
grader_run_params.GraderRunParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=GraderRunResponse,
)
@@ -232,7 +244,11 @@ async def validate(
"/fine_tuning/alpha/graders/validate",
body=await async_maybe_transform({"grader": grader}, grader_validate_params.GraderValidateParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=GraderValidateResponse,
)
diff --git a/src/openai/resources/fine_tuning/checkpoints/permissions.py b/src/openai/resources/fine_tuning/checkpoints/permissions.py
index 15184e130b..49687c15cd 100644
--- a/src/openai/resources/fine_tuning/checkpoints/permissions.py
+++ b/src/openai/resources/fine_tuning/checkpoints/permissions.py
@@ -91,7 +91,11 @@ def create(
page=SyncPage[PermissionCreateResponse],
body=maybe_transform({"project_ids": project_ids}, permission_create_params.PermissionCreateParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
model=PermissionCreateResponse,
method="post",
@@ -159,6 +163,7 @@ def retrieve(
},
permission_retrieve_params.PermissionRetrieveParams,
),
+ security={"bearer_auth": True},
),
cast_to=PermissionRetrieveResponse,
)
@@ -225,6 +230,7 @@ def list(
},
permission_list_params.PermissionListParams,
),
+ security={"bearer_auth": True},
),
model=PermissionListResponse,
)
@@ -269,7 +275,11 @@ def delete(
permission_id=permission_id,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=PermissionDeleteResponse,
)
@@ -338,7 +348,11 @@ def create(
page=AsyncPage[PermissionCreateResponse],
body=maybe_transform({"project_ids": project_ids}, permission_create_params.PermissionCreateParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
model=PermissionCreateResponse,
method="post",
@@ -406,6 +420,7 @@ async def retrieve(
},
permission_retrieve_params.PermissionRetrieveParams,
),
+ security={"bearer_auth": True},
),
cast_to=PermissionRetrieveResponse,
)
@@ -472,6 +487,7 @@ def list(
},
permission_list_params.PermissionListParams,
),
+ security={"bearer_auth": True},
),
model=PermissionListResponse,
)
@@ -516,7 +532,11 @@ async def delete(
permission_id=permission_id,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=PermissionDeleteResponse,
)
diff --git a/src/openai/resources/fine_tuning/jobs/checkpoints.py b/src/openai/resources/fine_tuning/jobs/checkpoints.py
index 0f91a6218a..b679372386 100644
--- a/src/openai/resources/fine_tuning/jobs/checkpoints.py
+++ b/src/openai/resources/fine_tuning/jobs/checkpoints.py
@@ -89,6 +89,7 @@ def list(
},
checkpoint_list_params.CheckpointListParams,
),
+ security={"bearer_auth": True},
),
model=FineTuningJobCheckpoint,
)
@@ -162,6 +163,7 @@ def list(
},
checkpoint_list_params.CheckpointListParams,
),
+ security={"bearer_auth": True},
),
model=FineTuningJobCheckpoint,
)
diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py
index a948b10349..e7e8903a84 100644
--- a/src/openai/resources/fine_tuning/jobs/jobs.py
+++ b/src/openai/resources/fine_tuning/jobs/jobs.py
@@ -175,7 +175,11 @@ def create(
job_create_params.JobCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FineTuningJob,
)
@@ -210,7 +214,11 @@ def retrieve(
return self._get(
path_template("/fine_tuning/jobs/{fine_tuning_job_id}", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FineTuningJob,
)
@@ -263,6 +271,7 @@ def list(
},
job_list_params.JobListParams,
),
+ security={"bearer_auth": True},
),
model=FineTuningJob,
)
@@ -295,7 +304,11 @@ def cancel(
return self._post(
path_template("/fine_tuning/jobs/{fine_tuning_job_id}/cancel", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FineTuningJob,
)
@@ -346,6 +359,7 @@ def list_events(
},
job_list_events_params.JobListEventsParams,
),
+ security={"bearer_auth": True},
),
model=FineTuningJobEvent,
)
@@ -378,7 +392,11 @@ def pause(
return self._post(
path_template("/fine_tuning/jobs/{fine_tuning_job_id}/pause", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FineTuningJob,
)
@@ -411,7 +429,11 @@ def resume(
return self._post(
path_template("/fine_tuning/jobs/{fine_tuning_job_id}/resume", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FineTuningJob,
)
@@ -558,7 +580,11 @@ async def create(
job_create_params.JobCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FineTuningJob,
)
@@ -593,7 +619,11 @@ async def retrieve(
return await self._get(
path_template("/fine_tuning/jobs/{fine_tuning_job_id}", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FineTuningJob,
)
@@ -646,6 +676,7 @@ def list(
},
job_list_params.JobListParams,
),
+ security={"bearer_auth": True},
),
model=FineTuningJob,
)
@@ -678,7 +709,11 @@ async def cancel(
return await self._post(
path_template("/fine_tuning/jobs/{fine_tuning_job_id}/cancel", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FineTuningJob,
)
@@ -729,6 +764,7 @@ def list_events(
},
job_list_events_params.JobListEventsParams,
),
+ security={"bearer_auth": True},
),
model=FineTuningJobEvent,
)
@@ -761,7 +797,11 @@ async def pause(
return await self._post(
path_template("/fine_tuning/jobs/{fine_tuning_job_id}/pause", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FineTuningJob,
)
@@ -794,7 +834,11 @@ async def resume(
return await self._post(
path_template("/fine_tuning/jobs/{fine_tuning_job_id}/resume", fine_tuning_job_id=fine_tuning_job_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=FineTuningJob,
)
diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py
index 6959c2aeff..ec4ca23aa2 100644
--- a/src/openai/resources/images.py
+++ b/src/openai/resources/images.py
@@ -9,8 +9,9 @@
from .. import _legacy_response
from ..types import image_edit_params, image_generate_params, image_create_variation_params
+from .._files import deepcopy_with_paths
from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, SequenceNotStr, omit, not_given
-from .._utils import extract_files, required_args, maybe_transform, deepcopy_minimal, async_maybe_transform
+from .._utils import extract_files, required_args, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -94,7 +95,7 @@ def create_variation(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"image": image,
"model": model,
@@ -102,7 +103,8 @@ def create_variation(
"response_format": response_format,
"size": size,
"user": user,
- }
+ },
+ [["image"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
# It should be noted that the actual Content-Type header that will be
@@ -114,7 +116,11 @@ def create_variation(
body=maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ImagesResponse,
)
@@ -484,7 +490,7 @@ def edit(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ImagesResponse | Stream[ImageEditStreamEvent]:
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"image": image,
"prompt": prompt,
@@ -501,7 +507,8 @@ def edit(
"size": size,
"stream": stream,
"user": user,
- }
+ },
+ [["image"], ["image", ""], ["mask"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", ""], ["mask"]])
# It should be noted that the actual Content-Type header that will be
@@ -516,7 +523,11 @@ def edit(
),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ImagesResponse,
stream=stream or False,
@@ -908,7 +919,11 @@ def generate(
else image_generate_params.ImageGenerateParamsNonStreaming,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ImagesResponse,
stream=stream or False,
@@ -986,7 +1001,7 @@ async def create_variation(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"image": image,
"model": model,
@@ -994,7 +1009,8 @@ async def create_variation(
"response_format": response_format,
"size": size,
"user": user,
- }
+ },
+ [["image"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
# It should be noted that the actual Content-Type header that will be
@@ -1006,7 +1022,11 @@ async def create_variation(
body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ImagesResponse,
)
@@ -1376,7 +1396,7 @@ async def edit(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]:
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"image": image,
"prompt": prompt,
@@ -1393,7 +1413,8 @@ async def edit(
"size": size,
"stream": stream,
"user": user,
- }
+ },
+ [["image"], ["image", ""], ["mask"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", ""], ["mask"]])
# It should be noted that the actual Content-Type header that will be
@@ -1408,7 +1429,11 @@ async def edit(
),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ImagesResponse,
stream=stream or False,
@@ -1800,7 +1825,11 @@ async def generate(
else image_generate_params.ImageGenerateParamsNonStreaming,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ImagesResponse,
stream=stream or False,
diff --git a/src/openai/resources/models.py b/src/openai/resources/models.py
index a1fe0d395e..a68fd83360 100644
--- a/src/openai/resources/models.py
+++ b/src/openai/resources/models.py
@@ -72,7 +72,11 @@ def retrieve(
return self._get(
path_template("/models/{model}", model=model),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Model,
)
@@ -95,7 +99,11 @@ def list(
"/models",
page=SyncPage[Model],
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
model=Model,
)
@@ -130,7 +138,11 @@ def delete(
return self._delete(
path_template("/models/{model}", model=model),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ModelDeleted,
)
@@ -187,7 +199,11 @@ async def retrieve(
return await self._get(
path_template("/models/{model}", model=model),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Model,
)
@@ -210,7 +226,11 @@ def list(
"/models",
page=AsyncPage[Model],
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
model=Model,
)
@@ -245,7 +265,11 @@ async def delete(
return await self._delete(
path_template("/models/{model}", model=model),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ModelDeleted,
)
diff --git a/src/openai/resources/moderations.py b/src/openai/resources/moderations.py
index 0b9a2d23c7..5ef4efeaa5 100644
--- a/src/openai/resources/moderations.py
+++ b/src/openai/resources/moderations.py
@@ -89,7 +89,11 @@ def create(
moderation_create_params.ModerationCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ModerationCreateResponse,
)
@@ -163,7 +167,11 @@ async def create(
moderation_create_params.ModerationCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ModerationCreateResponse,
)
diff --git a/src/openai/resources/realtime/calls.py b/src/openai/resources/realtime/calls.py
index 8fa2569a96..7a4fcc0110 100644
--- a/src/openai/resources/realtime/calls.py
+++ b/src/openai/resources/realtime/calls.py
@@ -98,7 +98,11 @@ def create(
call_create_params.CallCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@@ -250,7 +254,11 @@ def accept(
call_accept_params.CallAcceptParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=NoneType,
)
@@ -284,7 +292,11 @@ def hangup(
return self._post(
path_template("/realtime/calls/{call_id}/hangup", call_id=call_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=NoneType,
)
@@ -323,7 +335,11 @@ def refer(
path_template("/realtime/calls/{call_id}/refer", call_id=call_id),
body=maybe_transform({"target_uri": target_uri}, call_refer_params.CallReferParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=NoneType,
)
@@ -362,7 +378,11 @@ def reject(
path_template("/realtime/calls/{call_id}/reject", call_id=call_id),
body=maybe_transform({"status_code": status_code}, call_reject_params.CallRejectParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=NoneType,
)
@@ -428,7 +448,11 @@ async def create(
call_create_params.CallCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@@ -580,7 +604,11 @@ async def accept(
call_accept_params.CallAcceptParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=NoneType,
)
@@ -614,7 +642,11 @@ async def hangup(
return await self._post(
path_template("/realtime/calls/{call_id}/hangup", call_id=call_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=NoneType,
)
@@ -653,7 +685,11 @@ async def refer(
path_template("/realtime/calls/{call_id}/refer", call_id=call_id),
body=await async_maybe_transform({"target_uri": target_uri}, call_refer_params.CallReferParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=NoneType,
)
@@ -692,7 +728,11 @@ async def reject(
path_template("/realtime/calls/{call_id}/reject", call_id=call_id),
body=await async_maybe_transform({"status_code": status_code}, call_reject_params.CallRejectParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=NoneType,
)
diff --git a/src/openai/resources/realtime/client_secrets.py b/src/openai/resources/realtime/client_secrets.py
index d9947dd7e8..7478e35e27 100644
--- a/src/openai/resources/realtime/client_secrets.py
+++ b/src/openai/resources/realtime/client_secrets.py
@@ -93,7 +93,11 @@ def create(
client_secret_create_params.ClientSecretCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ClientSecretCreateResponse,
)
@@ -175,7 +179,11 @@ async def create(
client_secret_create_params.ClientSecretCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=ClientSecretCreateResponse,
)
diff --git a/src/openai/resources/responses/input_items.py b/src/openai/resources/responses/input_items.py
index b9ae5eeeae..1d3ed62543 100644
--- a/src/openai/resources/responses/input_items.py
+++ b/src/openai/resources/responses/input_items.py
@@ -101,6 +101,7 @@ def list(
},
input_item_list_params.InputItemListParams,
),
+ security={"bearer_auth": True},
),
model=cast(Any, ResponseItem), # Union types cannot be passed in as arguments in the type system
)
@@ -185,6 +186,7 @@ def list(
},
input_item_list_params.InputItemListParams,
),
+ security={"bearer_auth": True},
),
model=cast(Any, ResponseItem), # Union types cannot be passed in as arguments in the type system
)
diff --git a/src/openai/resources/responses/input_tokens.py b/src/openai/resources/responses/input_tokens.py
index 0056727fa0..fae71fd59c 100644
--- a/src/openai/resources/responses/input_tokens.py
+++ b/src/openai/resources/responses/input_tokens.py
@@ -143,7 +143,11 @@ def count(
input_token_count_params.InputTokenCountParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=InputTokenCountResponse,
)
@@ -269,7 +273,11 @@ async def count(
input_token_count_params.InputTokenCountParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=InputTokenCountResponse,
)
diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py
index 48705098cc..c0f9855bcf 100644
--- a/src/openai/resources/responses/responses.py
+++ b/src/openai/resources/responses/responses.py
@@ -953,7 +953,11 @@ def create(
else response_create_params.ResponseCreateParamsNonStreaming,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Response,
stream=stream or False,
@@ -1271,6 +1275,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
+ security={"bearer_auth": True},
),
# we turn the `Response` instance into a `ParsedResponse`
# in the `parser` function above
@@ -1505,6 +1510,7 @@ def retrieve(
},
response_retrieve_params.ResponseRetrieveParams,
),
+ security={"bearer_auth": True},
),
cast_to=Response,
stream=stream or False,
@@ -1540,7 +1546,11 @@ def delete(
return self._delete(
path_template("/responses/{response_id}", response_id=response_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=NoneType,
)
@@ -1576,7 +1586,11 @@ def cancel(
return self._post(
path_template("/responses/{response_id}/cancel", response_id=response_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Response,
)
@@ -1748,7 +1762,11 @@ def compact(
response_compact_params.ResponseCompactParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=CompactedResponse,
)
@@ -2634,7 +2652,11 @@ async def create(
else response_create_params.ResponseCreateParamsNonStreaming,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Response,
stream=stream or False,
@@ -2956,6 +2978,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
+ security={"bearer_auth": True},
),
# we turn the `Response` instance into a `ParsedResponse`
# in the `parser` function above
@@ -3190,6 +3213,7 @@ async def retrieve(
},
response_retrieve_params.ResponseRetrieveParams,
),
+ security={"bearer_auth": True},
),
cast_to=Response,
stream=stream or False,
@@ -3225,7 +3249,11 @@ async def delete(
return await self._delete(
path_template("/responses/{response_id}", response_id=response_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=NoneType,
)
@@ -3261,7 +3289,11 @@ async def cancel(
return await self._post(
path_template("/responses/{response_id}/cancel", response_id=response_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Response,
)
@@ -3433,7 +3465,11 @@ async def compact(
response_compact_params.ResponseCompactParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=CompactedResponse,
)
diff --git a/src/openai/resources/skills/content.py b/src/openai/resources/skills/content.py
index 96b237177e..d0639a5a52 100644
--- a/src/openai/resources/skills/content.py
+++ b/src/openai/resources/skills/content.py
@@ -69,7 +69,11 @@ def retrieve(
return self._get(
path_template("/skills/{skill_id}/content", skill_id=skill_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@@ -124,7 +128,11 @@ async def retrieve(
return await self._get(
path_template("/skills/{skill_id}/content", skill_id=skill_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
diff --git a/src/openai/resources/skills/skills.py b/src/openai/resources/skills/skills.py
index f44fb24607..aeea43ca3e 100644
--- a/src/openai/resources/skills/skills.py
+++ b/src/openai/resources/skills/skills.py
@@ -17,6 +17,7 @@
ContentWithStreamingResponse,
AsyncContentWithStreamingResponse,
)
+from ..._files import deepcopy_with_paths
from ..._types import (
Body,
Omit,
@@ -28,7 +29,7 @@
omit,
not_given,
)
-from ..._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
+from ..._utils import extract_files, path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -101,7 +102,7 @@ def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal({"files": files})
+ body = deepcopy_with_paths({"files": files}, [["files", ""], ["files"]])
extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", ""], ["files"]])
if extracted_files:
# It should be noted that the actual Content-Type header that will be
@@ -113,7 +114,11 @@ def create(
body=maybe_transform(body, skill_create_params.SkillCreateParams),
files=extracted_files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Skill,
)
@@ -146,7 +151,11 @@ def retrieve(
return self._get(
path_template("/skills/{skill_id}", skill_id=skill_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Skill,
)
@@ -183,7 +192,11 @@ def update(
path_template("/skills/{skill_id}", skill_id=skill_id),
body=maybe_transform({"default_version": default_version}, skill_update_params.SkillUpdateParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Skill,
)
@@ -236,6 +249,7 @@ def list(
},
skill_list_params.SkillListParams,
),
+ security={"bearer_auth": True},
),
model=Skill,
)
@@ -268,7 +282,11 @@ def delete(
return self._delete(
path_template("/skills/{skill_id}", skill_id=skill_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=DeletedSkill,
)
@@ -327,7 +345,7 @@ async def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal({"files": files})
+ body = deepcopy_with_paths({"files": files}, [["files", ""], ["files"]])
extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", ""], ["files"]])
if extracted_files:
# It should be noted that the actual Content-Type header that will be
@@ -339,7 +357,11 @@ async def create(
body=await async_maybe_transform(body, skill_create_params.SkillCreateParams),
files=extracted_files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Skill,
)
@@ -372,7 +394,11 @@ async def retrieve(
return await self._get(
path_template("/skills/{skill_id}", skill_id=skill_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Skill,
)
@@ -411,7 +437,11 @@ async def update(
{"default_version": default_version}, skill_update_params.SkillUpdateParams
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Skill,
)
@@ -464,6 +494,7 @@ def list(
},
skill_list_params.SkillListParams,
),
+ security={"bearer_auth": True},
),
model=Skill,
)
@@ -496,7 +527,11 @@ async def delete(
return await self._delete(
path_template("/skills/{skill_id}", skill_id=skill_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=DeletedSkill,
)
diff --git a/src/openai/resources/skills/versions/content.py b/src/openai/resources/skills/versions/content.py
index 2f54586718..173f94e4e1 100644
--- a/src/openai/resources/skills/versions/content.py
+++ b/src/openai/resources/skills/versions/content.py
@@ -74,7 +74,11 @@ def retrieve(
return self._get(
path_template("/skills/{skill_id}/versions/{version}/content", skill_id=skill_id, version=version),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@@ -134,7 +138,11 @@ async def retrieve(
return await self._get(
path_template("/skills/{skill_id}/versions/{version}/content", skill_id=skill_id, version=version),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
diff --git a/src/openai/resources/skills/versions/versions.py b/src/openai/resources/skills/versions/versions.py
index 8b48075cc3..d688d2917c 100644
--- a/src/openai/resources/skills/versions/versions.py
+++ b/src/openai/resources/skills/versions/versions.py
@@ -16,6 +16,7 @@
ContentWithStreamingResponse,
AsyncContentWithStreamingResponse,
)
+from ...._files import deepcopy_with_paths
from ...._types import (
Body,
Omit,
@@ -27,7 +28,7 @@
omit,
not_given,
)
-from ...._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
+from ...._utils import extract_files, path_template, maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -95,11 +96,12 @@ def create(
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"default": default,
"files": files,
- }
+ },
+ [["files", ""], ["files"]],
)
extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", ""], ["files"]])
if extracted_files:
@@ -112,7 +114,11 @@ def create(
body=maybe_transform(body, version_create_params.VersionCreateParams),
files=extracted_files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=SkillVersion,
)
@@ -150,7 +156,11 @@ def retrieve(
return self._get(
path_template("/skills/{skill_id}/versions/{version}", skill_id=skill_id, version=version),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=SkillVersion,
)
@@ -205,6 +215,7 @@ def list(
},
version_list_params.VersionListParams,
),
+ security={"bearer_auth": True},
),
model=SkillVersion,
)
@@ -242,7 +253,11 @@ def delete(
return self._delete(
path_template("/skills/{skill_id}/versions/{version}", skill_id=skill_id, version=version),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=DeletedSkillVersion,
)
@@ -303,11 +318,12 @@ async def create(
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"default": default,
"files": files,
- }
+ },
+ [["files", ""], ["files"]],
)
extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", ""], ["files"]])
if extracted_files:
@@ -320,7 +336,11 @@ async def create(
body=await async_maybe_transform(body, version_create_params.VersionCreateParams),
files=extracted_files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=SkillVersion,
)
@@ -358,7 +378,11 @@ async def retrieve(
return await self._get(
path_template("/skills/{skill_id}/versions/{version}", skill_id=skill_id, version=version),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=SkillVersion,
)
@@ -413,6 +437,7 @@ def list(
},
version_list_params.VersionListParams,
),
+ security={"bearer_auth": True},
),
model=SkillVersion,
)
@@ -450,7 +475,11 @@ async def delete(
return await self._delete(
path_template("/skills/{skill_id}/versions/{version}", skill_id=skill_id, version=version),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=DeletedSkillVersion,
)
diff --git a/src/openai/resources/uploads/parts.py b/src/openai/resources/uploads/parts.py
index cf09eea75e..a0c1e15223 100644
--- a/src/openai/resources/uploads/parts.py
+++ b/src/openai/resources/uploads/parts.py
@@ -7,8 +7,9 @@
import httpx
from ... import _legacy_response
+from ..._files import deepcopy_with_paths
from ..._types import Body, Query, Headers, NotGiven, FileTypes, not_given
-from ..._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
+from ..._utils import extract_files, path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
@@ -79,7 +80,7 @@ def create(
"""
if not upload_id:
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
- body = deepcopy_minimal({"data": data})
+ body = deepcopy_with_paths({"data": data}, [["data"]])
files = extract_files(cast(Mapping[str, object], body), paths=[["data"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
@@ -90,7 +91,11 @@ def create(
body=maybe_transform(body, part_create_params.PartCreateParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=UploadPart,
)
@@ -156,7 +161,7 @@ async def create(
"""
if not upload_id:
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
- body = deepcopy_minimal({"data": data})
+ body = deepcopy_with_paths({"data": data}, [["data"]])
files = extract_files(cast(Mapping[str, object], body), paths=[["data"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
@@ -167,7 +172,11 @@ async def create(
body=await async_maybe_transform(body, part_create_params.PartCreateParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=UploadPart,
)
diff --git a/src/openai/resources/uploads/uploads.py b/src/openai/resources/uploads/uploads.py
index 7778e51539..6c8c347f97 100644
--- a/src/openai/resources/uploads/uploads.py
+++ b/src/openai/resources/uploads/uploads.py
@@ -242,7 +242,11 @@ def create(
upload_create_params.UploadCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Upload,
)
@@ -278,7 +282,11 @@ def cancel(
return self._post(
path_template("/uploads/{upload_id}/cancel", upload_id=upload_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Upload,
)
@@ -339,7 +347,11 @@ def complete(
upload_complete_params.UploadCompleteParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Upload,
)
@@ -558,7 +570,11 @@ async def create(
upload_create_params.UploadCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Upload,
)
@@ -594,7 +610,11 @@ async def cancel(
return await self._post(
path_template("/uploads/{upload_id}/cancel", upload_id=upload_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Upload,
)
@@ -655,7 +675,11 @@ async def complete(
upload_complete_params.UploadCompleteParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Upload,
)
diff --git a/src/openai/resources/vector_stores/file_batches.py b/src/openai/resources/vector_stores/file_batches.py
index 1ffd7642c0..4bde1a4aa6 100644
--- a/src/openai/resources/vector_stores/file_batches.py
+++ b/src/openai/resources/vector_stores/file_batches.py
@@ -113,7 +113,11 @@ def create(
file_batch_create_params.FileBatchCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreFileBatch,
)
@@ -154,7 +158,11 @@ def retrieve(
batch_id=batch_id,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreFileBatch,
)
@@ -197,7 +205,11 @@ def cancel(
batch_id=batch_id,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreFileBatch,
)
@@ -311,6 +323,7 @@ def list_files(
},
file_batch_list_files_params.FileBatchListFilesParams,
),
+ security={"bearer_auth": True},
),
model=VectorStoreFile,
)
@@ -488,7 +501,11 @@ async def create(
file_batch_create_params.FileBatchCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreFileBatch,
)
@@ -529,7 +546,11 @@ async def retrieve(
batch_id=batch_id,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreFileBatch,
)
@@ -572,7 +593,11 @@ async def cancel(
batch_id=batch_id,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreFileBatch,
)
@@ -686,6 +711,7 @@ def list_files(
},
file_batch_list_files_params.FileBatchListFilesParams,
),
+ security={"bearer_auth": True},
),
model=VectorStoreFile,
)
diff --git a/src/openai/resources/vector_stores/files.py b/src/openai/resources/vector_stores/files.py
index 3ef6137267..b7e1ea9f92 100644
--- a/src/openai/resources/vector_stores/files.py
+++ b/src/openai/resources/vector_stores/files.py
@@ -102,7 +102,11 @@ def create(
file_create_params.FileCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreFile,
)
@@ -141,7 +145,11 @@ def retrieve(
"/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreFile,
)
@@ -188,7 +196,11 @@ def update(
),
body=maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreFile,
)
@@ -260,6 +272,7 @@ def list(
},
file_list_params.FileListParams,
),
+ security={"bearer_auth": True},
),
model=VectorStoreFile,
)
@@ -302,7 +315,11 @@ def delete(
"/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreFileDeleted,
)
@@ -452,7 +469,11 @@ def content(
),
page=SyncPage[FileContentResponse],
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
model=FileContentResponse,
)
@@ -535,7 +556,11 @@ async def create(
file_create_params.FileCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreFile,
)
@@ -574,7 +599,11 @@ async def retrieve(
"/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreFile,
)
@@ -621,7 +650,11 @@ async def update(
),
body=await async_maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreFile,
)
@@ -693,6 +726,7 @@ def list(
},
file_list_params.FileListParams,
),
+ security={"bearer_auth": True},
),
model=VectorStoreFile,
)
@@ -735,7 +769,11 @@ async def delete(
"/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreFileDeleted,
)
@@ -887,7 +925,11 @@ def content(
),
page=AsyncPage[FileContentResponse],
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
model=FileContentResponse,
)
diff --git a/src/openai/resources/vector_stores/vector_stores.py b/src/openai/resources/vector_stores/vector_stores.py
index 7fa2ad5274..e4c5d1440c 100644
--- a/src/openai/resources/vector_stores/vector_stores.py
+++ b/src/openai/resources/vector_stores/vector_stores.py
@@ -139,7 +139,11 @@ def create(
vector_store_create_params.VectorStoreCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStore,
)
@@ -173,7 +177,11 @@ def retrieve(
return self._get(
path_template("/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStore,
)
@@ -229,7 +237,11 @@ def update(
vector_store_update_params.VectorStoreUpdateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStore,
)
@@ -295,6 +307,7 @@ def list(
},
vector_store_list_params.VectorStoreListParams,
),
+ security={"bearer_auth": True},
),
model=VectorStore,
)
@@ -328,7 +341,11 @@ def delete(
return self._delete(
path_template("/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreDeleted,
)
@@ -390,7 +407,11 @@ def search(
vector_store_search_params.VectorStoreSearchParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
model=VectorStoreSearchResponse,
method="post",
@@ -489,7 +510,11 @@ async def create(
vector_store_create_params.VectorStoreCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStore,
)
@@ -523,7 +548,11 @@ async def retrieve(
return await self._get(
path_template("/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStore,
)
@@ -579,7 +608,11 @@ async def update(
vector_store_update_params.VectorStoreUpdateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStore,
)
@@ -645,6 +678,7 @@ def list(
},
vector_store_list_params.VectorStoreListParams,
),
+ security={"bearer_auth": True},
),
model=VectorStore,
)
@@ -678,7 +712,11 @@ async def delete(
return await self._delete(
path_template("/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VectorStoreDeleted,
)
@@ -740,7 +778,11 @@ def search(
vector_store_search_params.VectorStoreSearchParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
model=VectorStoreSearchResponse,
method="post",
diff --git a/src/openai/resources/videos.py b/src/openai/resources/videos.py
index a006e64705..e9bad80afe 100644
--- a/src/openai/resources/videos.py
+++ b/src/openai/resources/videos.py
@@ -19,8 +19,9 @@
video_create_character_params,
video_download_content_params,
)
+from .._files import deepcopy_with_paths
from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given
-from .._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
+from .._utils import extract_files, path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -104,14 +105,15 @@ def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"prompt": prompt,
"input_reference": input_reference,
"model": model,
"seconds": seconds,
"size": size,
- }
+ },
+ [["input_reference"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["input_reference"]])
# It should be noted that the actual Content-Type header that will be
@@ -123,7 +125,11 @@ def create(
body=maybe_transform(body, video_create_params.VideoCreateParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Video,
)
@@ -229,7 +235,11 @@ def retrieve(
return self._get(
path_template("/videos/{video_id}", video_id=video_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Video,
)
@@ -282,6 +292,7 @@ def list(
},
video_list_params.VideoListParams,
),
+ security={"bearer_auth": True},
),
model=Video,
)
@@ -314,7 +325,11 @@ def delete(
return self._delete(
path_template("/videos/{video_id}", video_id=video_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VideoDeleteResponse,
)
@@ -347,11 +362,12 @@ def create_character(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"name": name,
"video": video,
- }
+ },
+ [["video"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["video"]])
# It should be noted that the actual Content-Type header that will be
@@ -363,7 +379,11 @@ def create_character(
body=maybe_transform(body, video_create_character_params.VideoCreateCharacterParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VideoCreateCharacterResponse,
)
@@ -407,6 +427,7 @@ def download_content(
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"variant": variant}, video_download_content_params.VideoDownloadContentParams),
+ security={"bearer_auth": True},
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@@ -440,11 +461,12 @@ def edit(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"prompt": prompt,
"video": video,
- }
+ },
+ [["video"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["video"]])
# It should be noted that the actual Content-Type header that will be
@@ -456,7 +478,11 @@ def edit(
body=maybe_transform(body, video_edit_params.VideoEditParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Video,
)
@@ -493,12 +519,13 @@ def extend(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"prompt": prompt,
"seconds": seconds,
"video": video,
- }
+ },
+ [["video"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["video"]])
# It should be noted that the actual Content-Type header that will be
@@ -510,7 +537,11 @@ def extend(
body=maybe_transform(body, video_extend_params.VideoExtendParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Video,
)
@@ -543,7 +574,11 @@ def get_character(
return self._get(
path_template("/videos/characters/{character_id}", character_id=character_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VideoGetCharacterResponse,
)
@@ -580,7 +615,11 @@ def remix(
path_template("/videos/{video_id}/remix", video_id=video_id),
body=maybe_transform({"prompt": prompt}, video_remix_params.VideoRemixParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Video,
)
@@ -645,14 +684,15 @@ async def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"prompt": prompt,
"input_reference": input_reference,
"model": model,
"seconds": seconds,
"size": size,
- }
+ },
+ [["input_reference"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["input_reference"]])
# It should be noted that the actual Content-Type header that will be
@@ -664,7 +704,11 @@ async def create(
body=await async_maybe_transform(body, video_create_params.VideoCreateParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Video,
)
@@ -770,7 +814,11 @@ async def retrieve(
return await self._get(
path_template("/videos/{video_id}", video_id=video_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Video,
)
@@ -823,6 +871,7 @@ def list(
},
video_list_params.VideoListParams,
),
+ security={"bearer_auth": True},
),
model=Video,
)
@@ -855,7 +904,11 @@ async def delete(
return await self._delete(
path_template("/videos/{video_id}", video_id=video_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VideoDeleteResponse,
)
@@ -888,11 +941,12 @@ async def create_character(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"name": name,
"video": video,
- }
+ },
+ [["video"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["video"]])
# It should be noted that the actual Content-Type header that will be
@@ -904,7 +958,11 @@ async def create_character(
body=await async_maybe_transform(body, video_create_character_params.VideoCreateCharacterParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VideoCreateCharacterResponse,
)
@@ -950,6 +1008,7 @@ async def download_content(
query=await async_maybe_transform(
{"variant": variant}, video_download_content_params.VideoDownloadContentParams
),
+ security={"bearer_auth": True},
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@@ -983,11 +1042,12 @@ async def edit(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"prompt": prompt,
"video": video,
- }
+ },
+ [["video"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["video"]])
# It should be noted that the actual Content-Type header that will be
@@ -999,7 +1059,11 @@ async def edit(
body=await async_maybe_transform(body, video_edit_params.VideoEditParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Video,
)
@@ -1036,12 +1100,13 @@ async def extend(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"prompt": prompt,
"seconds": seconds,
"video": video,
- }
+ },
+ [["video"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["video"]])
# It should be noted that the actual Content-Type header that will be
@@ -1053,7 +1118,11 @@ async def extend(
body=await async_maybe_transform(body, video_extend_params.VideoExtendParams),
files=files,
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Video,
)
@@ -1086,7 +1155,11 @@ async def get_character(
return await self._get(
path_template("/videos/characters/{character_id}", character_id=character_id),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=VideoGetCharacterResponse,
)
@@ -1123,7 +1196,11 @@ async def remix(
path_template("/videos/{video_id}/remix", video_id=video_id),
body=await async_maybe_transform({"prompt": prompt}, video_remix_params.VideoRemixParams),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ security={"bearer_auth": True},
),
cast_to=Video,
)
diff --git a/src/openai/types/admin/__init__.py b/src/openai/types/admin/__init__.py
new file mode 100644
index 0000000000..f8ee8b14b1
--- /dev/null
+++ b/src/openai/types/admin/__init__.py
@@ -0,0 +1,3 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
diff --git a/src/openai/types/admin/organization/__init__.py b/src/openai/types/admin/organization/__init__.py
new file mode 100644
index 0000000000..dbb11795e5
--- /dev/null
+++ b/src/openai/types/admin/organization/__init__.py
@@ -0,0 +1,67 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .role import Role as Role
+from .group import Group as Group
+from .invite import Invite as Invite
+from .project import Project as Project
+from .certificate import Certificate as Certificate
+from .admin_api_key import AdminAPIKey as AdminAPIKey
+from .role_list_params import RoleListParams as RoleListParams
+from .user_list_params import UserListParams as UserListParams
+from .group_list_params import GroupListParams as GroupListParams
+from .organization_user import OrganizationUser as OrganizationUser
+from .invite_list_params import InviteListParams as InviteListParams
+from .role_create_params import RoleCreateParams as RoleCreateParams
+from .role_update_params import RoleUpdateParams as RoleUpdateParams
+from .usage_costs_params import UsageCostsParams as UsageCostsParams
+from .user_update_params import UserUpdateParams as UserUpdateParams
+from .group_create_params import GroupCreateParams as GroupCreateParams
+from .group_update_params import GroupUpdateParams as GroupUpdateParams
+from .project_list_params import ProjectListParams as ProjectListParams
+from .usage_images_params import UsageImagesParams as UsageImagesParams
+from .invite_create_params import InviteCreateParams as InviteCreateParams
+from .role_delete_response import RoleDeleteResponse as RoleDeleteResponse
+from .usage_costs_response import UsageCostsResponse as UsageCostsResponse
+from .user_delete_response import UserDeleteResponse as UserDeleteResponse
+from .audit_log_list_params import AuditLogListParams as AuditLogListParams
+from .group_delete_response import GroupDeleteResponse as GroupDeleteResponse
+from .group_update_response import GroupUpdateResponse as GroupUpdateResponse
+from .project_create_params import ProjectCreateParams as ProjectCreateParams
+from .project_update_params import ProjectUpdateParams as ProjectUpdateParams
+from .usage_images_response import UsageImagesResponse as UsageImagesResponse
+from .invite_delete_response import InviteDeleteResponse as InviteDeleteResponse
+from .audit_log_list_response import AuditLogListResponse as AuditLogListResponse
+from .certificate_list_params import CertificateListParams as CertificateListParams
+from .usage_embeddings_params import UsageEmbeddingsParams as UsageEmbeddingsParams
+from .usage_completions_params import UsageCompletionsParams as UsageCompletionsParams
+from .usage_moderations_params import UsageModerationsParams as UsageModerationsParams
+from .admin_api_key_list_params import AdminAPIKeyListParams as AdminAPIKeyListParams
+from .certificate_create_params import CertificateCreateParams as CertificateCreateParams
+from .certificate_list_response import CertificateListResponse as CertificateListResponse
+from .certificate_update_params import CertificateUpdateParams as CertificateUpdateParams
+from .usage_embeddings_response import UsageEmbeddingsResponse as UsageEmbeddingsResponse
+from .usage_completions_response import UsageCompletionsResponse as UsageCompletionsResponse
+from .usage_moderations_response import UsageModerationsResponse as UsageModerationsResponse
+from .usage_vector_stores_params import UsageVectorStoresParams as UsageVectorStoresParams
+from .admin_api_key_create_params import AdminAPIKeyCreateParams as AdminAPIKeyCreateParams
+from .certificate_activate_params import CertificateActivateParams as CertificateActivateParams
+from .certificate_delete_response import CertificateDeleteResponse as CertificateDeleteResponse
+from .certificate_retrieve_params import CertificateRetrieveParams as CertificateRetrieveParams
+from .usage_audio_speeches_params import UsageAudioSpeechesParams as UsageAudioSpeechesParams
+from .usage_vector_stores_response import UsageVectorStoresResponse as UsageVectorStoresResponse
+from .admin_api_key_create_response import AdminAPIKeyCreateResponse as AdminAPIKeyCreateResponse
+from .admin_api_key_delete_response import AdminAPIKeyDeleteResponse as AdminAPIKeyDeleteResponse
+from .certificate_activate_response import CertificateActivateResponse as CertificateActivateResponse
+from .certificate_deactivate_params import CertificateDeactivateParams as CertificateDeactivateParams
+from .usage_audio_speeches_response import UsageAudioSpeechesResponse as UsageAudioSpeechesResponse
+from .certificate_deactivate_response import CertificateDeactivateResponse as CertificateDeactivateResponse
+from .usage_audio_transcriptions_params import UsageAudioTranscriptionsParams as UsageAudioTranscriptionsParams
+from .usage_audio_transcriptions_response import UsageAudioTranscriptionsResponse as UsageAudioTranscriptionsResponse
+from .usage_code_interpreter_sessions_params import (
+ UsageCodeInterpreterSessionsParams as UsageCodeInterpreterSessionsParams,
+)
+from .usage_code_interpreter_sessions_response import (
+ UsageCodeInterpreterSessionsResponse as UsageCodeInterpreterSessionsResponse,
+)
diff --git a/src/openai/types/admin/organization/admin_api_key.py b/src/openai/types/admin/organization/admin_api_key.py
new file mode 100644
index 0000000000..5d2e5840f9
--- /dev/null
+++ b/src/openai/types/admin/organization/admin_api_key.py
@@ -0,0 +1,52 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["AdminAPIKey", "Owner"]
+
+
+class Owner(BaseModel):
+ id: Optional[str] = None
+ """The identifier, which can be referenced in API endpoints"""
+
+ created_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the user was created"""
+
+ name: Optional[str] = None
+ """The name of the user"""
+
+ object: Optional[str] = None
+ """The object type, which is always organization.user"""
+
+ role: Optional[str] = None
+ """Always `owner`"""
+
+ type: Optional[str] = None
+ """Always `user`"""
+
+
+class AdminAPIKey(BaseModel):
+ """Represents an individual Admin API key in an org."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ created_at: int
+ """The Unix timestamp (in seconds) of when the API key was created"""
+
+ object: Literal["organization.admin_api_key"]
+ """The object type, which is always `organization.admin_api_key`"""
+
+ owner: Owner
+
+ redacted_value: str
+ """The redacted value of the API key"""
+
+ last_used_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the API key was last used"""
+
+ name: Optional[str] = None
+ """The name of the API key"""
diff --git a/src/openai/types/admin/organization/admin_api_key_create_params.py b/src/openai/types/admin/organization/admin_api_key_create_params.py
new file mode 100644
index 0000000000..dccdfb8a75
--- /dev/null
+++ b/src/openai/types/admin/organization/admin_api_key_create_params.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["AdminAPIKeyCreateParams"]
+
+
+class AdminAPIKeyCreateParams(TypedDict, total=False):
+ name: Required[str]
diff --git a/src/openai/types/admin/organization/admin_api_key_create_response.py b/src/openai/types/admin/organization/admin_api_key_create_response.py
new file mode 100644
index 0000000000..58101a9e0a
--- /dev/null
+++ b/src/openai/types/admin/organization/admin_api_key_create_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .admin_api_key import AdminAPIKey
+
+__all__ = ["AdminAPIKeyCreateResponse"]
+
+
+class AdminAPIKeyCreateResponse(AdminAPIKey):
+ """Represents an individual Admin API key in an org."""
+
+ value: str
+ """The value of the API key. Only shown on create."""
diff --git a/src/openai/types/admin/organization/admin_api_key_delete_response.py b/src/openai/types/admin/organization/admin_api_key_delete_response.py
new file mode 100644
index 0000000000..df8c5171d4
--- /dev/null
+++ b/src/openai/types/admin/organization/admin_api_key_delete_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["AdminAPIKeyDeleteResponse"]
+
+
+class AdminAPIKeyDeleteResponse(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["organization.admin_api_key.deleted"]
diff --git a/src/openai/types/admin/organization/admin_api_key_list_params.py b/src/openai/types/admin/organization/admin_api_key_list_params.py
new file mode 100644
index 0000000000..c3b3f51008
--- /dev/null
+++ b/src/openai/types/admin/organization/admin_api_key_list_params.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["AdminAPIKeyListParams"]
+
+
+class AdminAPIKeyListParams(TypedDict, total=False):
+ after: Optional[str]
+ """Return keys with IDs that come after this ID in the pagination order."""
+
+ limit: int
+ """Maximum number of keys to return."""
+
+ order: Literal["asc", "desc"]
+ """Order results by creation time, ascending or descending."""
diff --git a/src/openai/types/admin/organization/audit_log_list_params.py b/src/openai/types/admin/organization/audit_log_list_params.py
new file mode 100644
index 0000000000..bd3bc6d629
--- /dev/null
+++ b/src/openai/types/admin/organization/audit_log_list_params.py
@@ -0,0 +1,143 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["AuditLogListParams", "EffectiveAt"]
+
+
+class AuditLogListParams(TypedDict, total=False):
+ actor_emails: SequenceNotStr[str]
+ """Return only events performed by users with these emails."""
+
+ actor_ids: SequenceNotStr[str]
+ """Return only events performed by these actors.
+
+ Can be a user ID, a service account ID, or an api key tracking ID.
+ """
+
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ before: str
+ """A cursor for use in pagination.
+
+ `before` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, starting with obj_foo, your
+ subsequent call can include before=obj_foo in order to fetch the previous page
+ of the list.
+ """
+
+ effective_at: EffectiveAt
+ """Return only events whose `effective_at` (Unix seconds) is in this range."""
+
+ event_types: List[
+ Literal[
+ "api_key.created",
+ "api_key.updated",
+ "api_key.deleted",
+ "certificate.created",
+ "certificate.updated",
+ "certificate.deleted",
+ "certificates.activated",
+ "certificates.deactivated",
+ "checkpoint.permission.created",
+ "checkpoint.permission.deleted",
+ "external_key.registered",
+ "external_key.removed",
+ "group.created",
+ "group.updated",
+ "group.deleted",
+ "invite.sent",
+ "invite.accepted",
+ "invite.deleted",
+ "ip_allowlist.created",
+ "ip_allowlist.updated",
+ "ip_allowlist.deleted",
+ "ip_allowlist.config.activated",
+ "ip_allowlist.config.deactivated",
+ "login.succeeded",
+ "login.failed",
+ "logout.succeeded",
+ "logout.failed",
+ "organization.updated",
+ "project.created",
+ "project.updated",
+ "project.archived",
+ "project.deleted",
+ "rate_limit.updated",
+ "rate_limit.deleted",
+ "resource.deleted",
+ "tunnel.created",
+ "tunnel.updated",
+ "tunnel.deleted",
+ "role.created",
+ "role.updated",
+ "role.deleted",
+ "role.assignment.created",
+ "role.assignment.deleted",
+ "scim.enabled",
+ "scim.disabled",
+ "service_account.created",
+ "service_account.updated",
+ "service_account.deleted",
+ "user.added",
+ "user.updated",
+ "user.deleted",
+ ]
+ ]
+ """Return only events with a `type` in one of these values.
+
+ For example, `project.created`. For all options, see the documentation for the
+ [audit log object](https://platform.openai.com/docs/api-reference/audit-logs/object).
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ project_ids: SequenceNotStr[str]
+ """Return only events for these projects."""
+
+ resource_ids: SequenceNotStr[str]
+ """Return only events performed on these targets.
+
+ For example, a project ID updated.
+ """
+
+
+class EffectiveAt(TypedDict, total=False):
+ """Return only events whose `effective_at` (Unix seconds) is in this range."""
+
+ gt: int
+ """
+ Return only events whose `effective_at` (Unix seconds) is greater than this
+ value.
+ """
+
+ gte: int
+ """
+ Return only events whose `effective_at` (Unix seconds) is greater than or equal
+ to this value.
+ """
+
+ lt: int
+ """Return only events whose `effective_at` (Unix seconds) is less than this value."""
+
+ lte: int
+ """
+ Return only events whose `effective_at` (Unix seconds) is less than or equal to
+ this value.
+ """
diff --git a/src/openai/types/admin/organization/audit_log_list_response.py b/src/openai/types/admin/organization/audit_log_list_response.py
new file mode 100644
index 0000000000..ec899758a2
--- /dev/null
+++ b/src/openai/types/admin/organization/audit_log_list_response.py
@@ -0,0 +1,1033 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from pydantic import Field as FieldInfo
+
+from ...._models import BaseModel
+
+__all__ = [
+ "AuditLogListResponse",
+ "Actor",
+ "ActorAPIKey",
+ "ActorAPIKeyServiceAccount",
+ "ActorAPIKeyUser",
+ "ActorSession",
+ "ActorSessionUser",
+ "APIKeyCreated",
+ "APIKeyCreatedData",
+ "APIKeyDeleted",
+ "APIKeyUpdated",
+ "APIKeyUpdatedChangesRequested",
+ "CertificateCreated",
+ "CertificateDeleted",
+ "CertificateUpdated",
+ "CertificatesActivated",
+ "CertificatesActivatedCertificate",
+ "CertificatesDeactivated",
+ "CertificatesDeactivatedCertificate",
+ "CheckpointPermissionCreated",
+ "CheckpointPermissionCreatedData",
+ "CheckpointPermissionDeleted",
+ "ExternalKeyRegistered",
+ "ExternalKeyRemoved",
+ "GroupCreated",
+ "GroupCreatedData",
+ "GroupDeleted",
+ "GroupUpdated",
+ "GroupUpdatedChangesRequested",
+ "InviteAccepted",
+ "InviteDeleted",
+ "InviteSent",
+ "InviteSentData",
+ "IPAllowlistConfigActivated",
+ "IPAllowlistConfigActivatedConfig",
+ "IPAllowlistConfigDeactivated",
+ "IPAllowlistConfigDeactivatedConfig",
+ "IPAllowlistCreated",
+ "IPAllowlistDeleted",
+ "IPAllowlistUpdated",
+ "LoginFailed",
+ "LogoutFailed",
+ "OrganizationUpdated",
+ "OrganizationUpdatedChangesRequested",
+ "Project",
+ "ProjectArchived",
+ "ProjectCreated",
+ "ProjectCreatedData",
+ "ProjectDeleted",
+ "ProjectUpdated",
+ "ProjectUpdatedChangesRequested",
+ "RateLimitDeleted",
+ "RateLimitUpdated",
+ "RateLimitUpdatedChangesRequested",
+ "RoleAssignmentCreated",
+ "RoleAssignmentDeleted",
+ "RoleCreated",
+ "RoleDeleted",
+ "RoleUpdated",
+ "RoleUpdatedChangesRequested",
+ "ScimDisabled",
+ "ScimEnabled",
+ "ServiceAccountCreated",
+ "ServiceAccountCreatedData",
+ "ServiceAccountDeleted",
+ "ServiceAccountUpdated",
+ "ServiceAccountUpdatedChangesRequested",
+ "UserAdded",
+ "UserAddedData",
+ "UserDeleted",
+ "UserUpdated",
+ "UserUpdatedChangesRequested",
+]
+
+
+class ActorAPIKeyServiceAccount(BaseModel):
+ """The service account that performed the audit logged action."""
+
+ id: Optional[str] = None
+ """The service account id."""
+
+
+class ActorAPIKeyUser(BaseModel):
+ """The user who performed the audit logged action."""
+
+ id: Optional[str] = None
+ """The user id."""
+
+ email: Optional[str] = None
+ """The user email."""
+
+
+class ActorAPIKey(BaseModel):
+ """The API Key used to perform the audit logged action."""
+
+ id: Optional[str] = None
+ """The tracking id of the API key."""
+
+ service_account: Optional[ActorAPIKeyServiceAccount] = None
+ """The service account that performed the audit logged action."""
+
+ type: Optional[Literal["user", "service_account"]] = None
+ """The type of API key. Can be either `user` or `service_account`."""
+
+ user: Optional[ActorAPIKeyUser] = None
+ """The user who performed the audit logged action."""
+
+
+class ActorSessionUser(BaseModel):
+ """The user who performed the audit logged action."""
+
+ id: Optional[str] = None
+ """The user id."""
+
+ email: Optional[str] = None
+ """The user email."""
+
+
+class ActorSession(BaseModel):
+ """The session in which the audit logged action was performed."""
+
+ ip_address: Optional[str] = None
+ """The IP address from which the action was performed."""
+
+ user: Optional[ActorSessionUser] = None
+ """The user who performed the audit logged action."""
+
+
+class Actor(BaseModel):
+ """The actor who performed the audit logged action."""
+
+ api_key: Optional[ActorAPIKey] = None
+ """The API Key used to perform the audit logged action."""
+
+ session: Optional[ActorSession] = None
+ """The session in which the audit logged action was performed."""
+
+ type: Optional[Literal["session", "api_key"]] = None
+ """The type of actor. Is either `session` or `api_key`."""
+
+
+class APIKeyCreatedData(BaseModel):
+ """The payload used to create the API key."""
+
+ scopes: Optional[List[str]] = None
+ """A list of scopes allowed for the API key, e.g. `["api.model.request"]`"""
+
+
+class APIKeyCreated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The tracking ID of the API key."""
+
+ data: Optional[APIKeyCreatedData] = None
+ """The payload used to create the API key."""
+
+
+class APIKeyDeleted(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The tracking ID of the API key."""
+
+
+class APIKeyUpdatedChangesRequested(BaseModel):
+ """The payload used to update the API key."""
+
+ scopes: Optional[List[str]] = None
+ """A list of scopes allowed for the API key, e.g. `["api.model.request"]`"""
+
+
+class APIKeyUpdated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The tracking ID of the API key."""
+
+ changes_requested: Optional[APIKeyUpdatedChangesRequested] = None
+ """The payload used to update the API key."""
+
+
+class CertificateCreated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The certificate ID."""
+
+ name: Optional[str] = None
+ """The name of the certificate."""
+
+
+class CertificateDeleted(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The certificate ID."""
+
+ certificate: Optional[str] = None
+ """The certificate content in PEM format."""
+
+ name: Optional[str] = None
+ """The name of the certificate."""
+
+
+class CertificateUpdated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The certificate ID."""
+
+ name: Optional[str] = None
+ """The name of the certificate."""
+
+
+class CertificatesActivatedCertificate(BaseModel):
+ id: Optional[str] = None
+ """The certificate ID."""
+
+ name: Optional[str] = None
+ """The name of the certificate."""
+
+
+class CertificatesActivated(BaseModel):
+ """The details for events with this `type`."""
+
+ certificates: Optional[List[CertificatesActivatedCertificate]] = None
+
+
+class CertificatesDeactivatedCertificate(BaseModel):
+ id: Optional[str] = None
+ """The certificate ID."""
+
+ name: Optional[str] = None
+ """The name of the certificate."""
+
+
+class CertificatesDeactivated(BaseModel):
+ """The details for events with this `type`."""
+
+ certificates: Optional[List[CertificatesDeactivatedCertificate]] = None
+
+
+class CheckpointPermissionCreatedData(BaseModel):
+ """The payload used to create the checkpoint permission."""
+
+ fine_tuned_model_checkpoint: Optional[str] = None
+ """The ID of the fine-tuned model checkpoint."""
+
+ project_id: Optional[str] = None
+ """The ID of the project that the checkpoint permission was created for."""
+
+
+class CheckpointPermissionCreated(BaseModel):
+ """
+ The project and fine-tuned model checkpoint that the checkpoint permission was created for.
+ """
+
+ id: Optional[str] = None
+ """The ID of the checkpoint permission."""
+
+ data: Optional[CheckpointPermissionCreatedData] = None
+ """The payload used to create the checkpoint permission."""
+
+
+class CheckpointPermissionDeleted(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The ID of the checkpoint permission."""
+
+
+class ExternalKeyRegistered(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The ID of the external key configuration."""
+
+ data: Optional[object] = None
+ """The configuration for the external key."""
+
+
+class ExternalKeyRemoved(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The ID of the external key configuration."""
+
+
+class GroupCreatedData(BaseModel):
+ """Information about the created group."""
+
+ group_name: Optional[str] = None
+ """The group name."""
+
+
+class GroupCreated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The ID of the group."""
+
+ data: Optional[GroupCreatedData] = None
+ """Information about the created group."""
+
+
+class GroupDeleted(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The ID of the group."""
+
+
+class GroupUpdatedChangesRequested(BaseModel):
+ """The payload used to update the group."""
+
+ group_name: Optional[str] = None
+ """The updated group name."""
+
+
+class GroupUpdated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The ID of the group."""
+
+ changes_requested: Optional[GroupUpdatedChangesRequested] = None
+ """The payload used to update the group."""
+
+
+class InviteAccepted(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The ID of the invite."""
+
+
+class InviteDeleted(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The ID of the invite."""
+
+
+class InviteSentData(BaseModel):
+ """The payload used to create the invite."""
+
+ email: Optional[str] = None
+ """The email invited to the organization."""
+
+ role: Optional[str] = None
+ """The role the email was invited to be. Is either `owner` or `member`."""
+
+
+class InviteSent(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The ID of the invite."""
+
+ data: Optional[InviteSentData] = None
+ """The payload used to create the invite."""
+
+
+class IPAllowlistConfigActivatedConfig(BaseModel):
+ id: Optional[str] = None
+ """The ID of the IP allowlist configuration."""
+
+ name: Optional[str] = None
+ """The name of the IP allowlist configuration."""
+
+
+class IPAllowlistConfigActivated(BaseModel):
+ """The details for events with this `type`."""
+
+ configs: Optional[List[IPAllowlistConfigActivatedConfig]] = None
+ """The configurations that were activated."""
+
+
+class IPAllowlistConfigDeactivatedConfig(BaseModel):
+ id: Optional[str] = None
+ """The ID of the IP allowlist configuration."""
+
+ name: Optional[str] = None
+ """The name of the IP allowlist configuration."""
+
+
+class IPAllowlistConfigDeactivated(BaseModel):
+ """The details for events with this `type`."""
+
+ configs: Optional[List[IPAllowlistConfigDeactivatedConfig]] = None
+ """The configurations that were deactivated."""
+
+
+class IPAllowlistCreated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The ID of the IP allowlist configuration."""
+
+ allowed_ips: Optional[List[str]] = None
+ """The IP addresses or CIDR ranges included in the configuration."""
+
+ name: Optional[str] = None
+ """The name of the IP allowlist configuration."""
+
+
+class IPAllowlistDeleted(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The ID of the IP allowlist configuration."""
+
+ allowed_ips: Optional[List[str]] = None
+ """The IP addresses or CIDR ranges that were in the configuration."""
+
+ name: Optional[str] = None
+ """The name of the IP allowlist configuration."""
+
+
+class IPAllowlistUpdated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The ID of the IP allowlist configuration."""
+
+ allowed_ips: Optional[List[str]] = None
+ """The updated set of IP addresses or CIDR ranges in the configuration."""
+
+
+class LoginFailed(BaseModel):
+ """The details for events with this `type`."""
+
+ error_code: Optional[str] = None
+ """The error code of the failure."""
+
+ error_message: Optional[str] = None
+ """The error message of the failure."""
+
+
+class LogoutFailed(BaseModel):
+ """The details for events with this `type`."""
+
+ error_code: Optional[str] = None
+ """The error code of the failure."""
+
+ error_message: Optional[str] = None
+ """The error message of the failure."""
+
+
+class OrganizationUpdatedChangesRequested(BaseModel):
+ """The payload used to update the organization settings."""
+
+ api_call_logging: Optional[str] = None
+ """How your organization logs data from supported API calls.
+
+ One of `disabled`, `enabled_per_call`, `enabled_for_all_projects`, or
+ `enabled_for_selected_projects`
+ """
+
+ api_call_logging_project_ids: Optional[str] = None
+ """
+ The list of project ids if api_call_logging is set to
+ `enabled_for_selected_projects`
+ """
+
+ description: Optional[str] = None
+ """The organization description."""
+
+ name: Optional[str] = None
+ """The organization name."""
+
+ threads_ui_visibility: Optional[str] = None
+ """
+ Visibility of the threads page which shows messages created with the Assistants
+ API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`.
+ """
+
+ title: Optional[str] = None
+ """The organization title."""
+
+ usage_dashboard_visibility: Optional[str] = None
+ """
+ Visibility of the usage dashboard which shows activity and costs for your
+ organization. One of `ANY_ROLE` or `OWNERS`.
+ """
+
+
+class OrganizationUpdated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The organization ID."""
+
+ changes_requested: Optional[OrganizationUpdatedChangesRequested] = None
+ """The payload used to update the organization settings."""
+
+
+class Project(BaseModel):
+ """The project that the action was scoped to.
+
+ Absent for actions not scoped to projects. Note that any admin actions taken via Admin API keys are associated with the default project.
+ """
+
+ id: Optional[str] = None
+ """The project ID."""
+
+ name: Optional[str] = None
+ """The project title."""
+
+
+class ProjectArchived(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The project ID."""
+
+
+class ProjectCreatedData(BaseModel):
+ """The payload used to create the project."""
+
+ name: Optional[str] = None
+ """The project name."""
+
+ title: Optional[str] = None
+ """The title of the project as seen on the dashboard."""
+
+
+class ProjectCreated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The project ID."""
+
+ data: Optional[ProjectCreatedData] = None
+ """The payload used to create the project."""
+
+
+class ProjectDeleted(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The project ID."""
+
+
+class ProjectUpdatedChangesRequested(BaseModel):
+ """The payload used to update the project."""
+
+ title: Optional[str] = None
+ """The title of the project as seen on the dashboard."""
+
+
+class ProjectUpdated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The project ID."""
+
+ changes_requested: Optional[ProjectUpdatedChangesRequested] = None
+ """The payload used to update the project."""
+
+
+class RateLimitDeleted(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The rate limit ID"""
+
+
+class RateLimitUpdatedChangesRequested(BaseModel):
+ """The payload used to update the rate limits."""
+
+ batch_1_day_max_input_tokens: Optional[int] = None
+ """The maximum batch input tokens per day. Only relevant for certain models."""
+
+ max_audio_megabytes_per_1_minute: Optional[int] = None
+ """The maximum audio megabytes per minute. Only relevant for certain models."""
+
+ max_images_per_1_minute: Optional[int] = None
+ """The maximum images per minute. Only relevant for certain models."""
+
+ max_requests_per_1_day: Optional[int] = None
+ """The maximum requests per day. Only relevant for certain models."""
+
+ max_requests_per_1_minute: Optional[int] = None
+ """The maximum requests per minute."""
+
+ max_tokens_per_1_minute: Optional[int] = None
+ """The maximum tokens per minute."""
+
+
+class RateLimitUpdated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The rate limit ID"""
+
+ changes_requested: Optional[RateLimitUpdatedChangesRequested] = None
+ """The payload used to update the rate limits."""
+
+
+class RoleAssignmentCreated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The identifier of the role assignment."""
+
+ principal_id: Optional[str] = None
+ """The principal (user or group) that received the role."""
+
+ principal_type: Optional[str] = None
+ """The type of principal (user or group) that received the role."""
+
+ resource_id: Optional[str] = None
+ """The resource the role assignment is scoped to."""
+
+ resource_type: Optional[str] = None
+ """The type of resource the role assignment is scoped to."""
+
+
+class RoleAssignmentDeleted(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The identifier of the role assignment."""
+
+ principal_id: Optional[str] = None
+ """The principal (user or group) that had the role removed."""
+
+ principal_type: Optional[str] = None
+ """The type of principal (user or group) that had the role removed."""
+
+ resource_id: Optional[str] = None
+ """The resource the role assignment was scoped to."""
+
+ resource_type: Optional[str] = None
+ """The type of resource the role assignment was scoped to."""
+
+
+class RoleCreated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The role ID."""
+
+ permissions: Optional[List[str]] = None
+ """The permissions granted by the role."""
+
+ resource_id: Optional[str] = None
+ """The resource the role is scoped to."""
+
+ resource_type: Optional[str] = None
+ """The type of resource the role belongs to."""
+
+ role_name: Optional[str] = None
+ """The name of the role."""
+
+
+class RoleDeleted(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The role ID."""
+
+
+class RoleUpdatedChangesRequested(BaseModel):
+ """The payload used to update the role."""
+
+ description: Optional[str] = None
+ """The updated role description, when provided."""
+
+ metadata: Optional[object] = None
+ """Additional metadata stored on the role."""
+
+ permissions_added: Optional[List[str]] = None
+ """The permissions added to the role."""
+
+ permissions_removed: Optional[List[str]] = None
+ """The permissions removed from the role."""
+
+ resource_id: Optional[str] = None
+ """The resource the role is scoped to."""
+
+ resource_type: Optional[str] = None
+ """The type of resource the role belongs to."""
+
+ role_name: Optional[str] = None
+ """The updated role name, when provided."""
+
+
+class RoleUpdated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The role ID."""
+
+ changes_requested: Optional[RoleUpdatedChangesRequested] = None
+ """The payload used to update the role."""
+
+
+class ScimDisabled(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The ID of the SCIM was disabled for."""
+
+
+class ScimEnabled(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The ID of the SCIM was enabled for."""
+
+
+class ServiceAccountCreatedData(BaseModel):
+ """The payload used to create the service account."""
+
+ role: Optional[str] = None
+ """The role of the service account. Is either `owner` or `member`."""
+
+
+class ServiceAccountCreated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The service account ID."""
+
+ data: Optional[ServiceAccountCreatedData] = None
+ """The payload used to create the service account."""
+
+
+class ServiceAccountDeleted(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The service account ID."""
+
+
+class ServiceAccountUpdatedChangesRequested(BaseModel):
+ """The payload used to updated the service account."""
+
+ role: Optional[str] = None
+ """The role of the service account. Is either `owner` or `member`."""
+
+
+class ServiceAccountUpdated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The service account ID."""
+
+ changes_requested: Optional[ServiceAccountUpdatedChangesRequested] = None
+ """The payload used to updated the service account."""
+
+
+class UserAddedData(BaseModel):
+ """The payload used to add the user to the project."""
+
+ role: Optional[str] = None
+ """The role of the user. Is either `owner` or `member`."""
+
+
+class UserAdded(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The user ID."""
+
+ data: Optional[UserAddedData] = None
+ """The payload used to add the user to the project."""
+
+
+class UserDeleted(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The user ID."""
+
+
+class UserUpdatedChangesRequested(BaseModel):
+ """The payload used to update the user."""
+
+ role: Optional[str] = None
+ """The role of the user. Is either `owner` or `member`."""
+
+
+class UserUpdated(BaseModel):
+ """The details for events with this `type`."""
+
+ id: Optional[str] = None
+ """The project ID."""
+
+ changes_requested: Optional[UserUpdatedChangesRequested] = None
+ """The payload used to update the user."""
+
+
+class AuditLogListResponse(BaseModel):
+ """A log of a user action or configuration change within this organization."""
+
+ id: str
+ """The ID of this log."""
+
+ effective_at: int
+ """The Unix timestamp (in seconds) of the event."""
+
+ type: Literal[
+ "api_key.created",
+ "api_key.updated",
+ "api_key.deleted",
+ "certificate.created",
+ "certificate.updated",
+ "certificate.deleted",
+ "certificates.activated",
+ "certificates.deactivated",
+ "checkpoint.permission.created",
+ "checkpoint.permission.deleted",
+ "external_key.registered",
+ "external_key.removed",
+ "group.created",
+ "group.updated",
+ "group.deleted",
+ "invite.sent",
+ "invite.accepted",
+ "invite.deleted",
+ "ip_allowlist.created",
+ "ip_allowlist.updated",
+ "ip_allowlist.deleted",
+ "ip_allowlist.config.activated",
+ "ip_allowlist.config.deactivated",
+ "login.succeeded",
+ "login.failed",
+ "logout.succeeded",
+ "logout.failed",
+ "organization.updated",
+ "project.created",
+ "project.updated",
+ "project.archived",
+ "project.deleted",
+ "rate_limit.updated",
+ "rate_limit.deleted",
+ "resource.deleted",
+ "tunnel.created",
+ "tunnel.updated",
+ "tunnel.deleted",
+ "role.created",
+ "role.updated",
+ "role.deleted",
+ "role.assignment.created",
+ "role.assignment.deleted",
+ "scim.enabled",
+ "scim.disabled",
+ "service_account.created",
+ "service_account.updated",
+ "service_account.deleted",
+ "user.added",
+ "user.updated",
+ "user.deleted",
+ ]
+ """The event type."""
+
+ actor: Optional[Actor] = None
+ """The actor who performed the audit logged action."""
+
+ api_key_created: Optional[APIKeyCreated] = FieldInfo(alias="api_key.created", default=None)
+ """The details for events with this `type`."""
+
+ api_key_deleted: Optional[APIKeyDeleted] = FieldInfo(alias="api_key.deleted", default=None)
+ """The details for events with this `type`."""
+
+ api_key_updated: Optional[APIKeyUpdated] = FieldInfo(alias="api_key.updated", default=None)
+ """The details for events with this `type`."""
+
+ certificate_created: Optional[CertificateCreated] = FieldInfo(alias="certificate.created", default=None)
+ """The details for events with this `type`."""
+
+ certificate_deleted: Optional[CertificateDeleted] = FieldInfo(alias="certificate.deleted", default=None)
+ """The details for events with this `type`."""
+
+ certificate_updated: Optional[CertificateUpdated] = FieldInfo(alias="certificate.updated", default=None)
+ """The details for events with this `type`."""
+
+ certificates_activated: Optional[CertificatesActivated] = FieldInfo(alias="certificates.activated", default=None)
+ """The details for events with this `type`."""
+
+ certificates_deactivated: Optional[CertificatesDeactivated] = FieldInfo(
+ alias="certificates.deactivated", default=None
+ )
+ """The details for events with this `type`."""
+
+ checkpoint_permission_created: Optional[CheckpointPermissionCreated] = FieldInfo(
+ alias="checkpoint.permission.created", default=None
+ )
+ """
+ The project and fine-tuned model checkpoint that the checkpoint permission was
+ created for.
+ """
+
+ checkpoint_permission_deleted: Optional[CheckpointPermissionDeleted] = FieldInfo(
+ alias="checkpoint.permission.deleted", default=None
+ )
+ """The details for events with this `type`."""
+
+ external_key_registered: Optional[ExternalKeyRegistered] = FieldInfo(alias="external_key.registered", default=None)
+ """The details for events with this `type`."""
+
+ external_key_removed: Optional[ExternalKeyRemoved] = FieldInfo(alias="external_key.removed", default=None)
+ """The details for events with this `type`."""
+
+ group_created: Optional[GroupCreated] = FieldInfo(alias="group.created", default=None)
+ """The details for events with this `type`."""
+
+ group_deleted: Optional[GroupDeleted] = FieldInfo(alias="group.deleted", default=None)
+ """The details for events with this `type`."""
+
+ group_updated: Optional[GroupUpdated] = FieldInfo(alias="group.updated", default=None)
+ """The details for events with this `type`."""
+
+ invite_accepted: Optional[InviteAccepted] = FieldInfo(alias="invite.accepted", default=None)
+ """The details for events with this `type`."""
+
+ invite_deleted: Optional[InviteDeleted] = FieldInfo(alias="invite.deleted", default=None)
+ """The details for events with this `type`."""
+
+ invite_sent: Optional[InviteSent] = FieldInfo(alias="invite.sent", default=None)
+ """The details for events with this `type`."""
+
+ ip_allowlist_config_activated: Optional[IPAllowlistConfigActivated] = FieldInfo(
+ alias="ip_allowlist.config.activated", default=None
+ )
+ """The details for events with this `type`."""
+
+ ip_allowlist_config_deactivated: Optional[IPAllowlistConfigDeactivated] = FieldInfo(
+ alias="ip_allowlist.config.deactivated", default=None
+ )
+ """The details for events with this `type`."""
+
+ ip_allowlist_created: Optional[IPAllowlistCreated] = FieldInfo(alias="ip_allowlist.created", default=None)
+ """The details for events with this `type`."""
+
+ ip_allowlist_deleted: Optional[IPAllowlistDeleted] = FieldInfo(alias="ip_allowlist.deleted", default=None)
+ """The details for events with this `type`."""
+
+ ip_allowlist_updated: Optional[IPAllowlistUpdated] = FieldInfo(alias="ip_allowlist.updated", default=None)
+ """The details for events with this `type`."""
+
+ login_failed: Optional[LoginFailed] = FieldInfo(alias="login.failed", default=None)
+ """The details for events with this `type`."""
+
+ login_succeeded: Optional[object] = FieldInfo(alias="login.succeeded", default=None)
+ """This event has no additional fields beyond the standard audit log attributes."""
+
+ logout_failed: Optional[LogoutFailed] = FieldInfo(alias="logout.failed", default=None)
+ """The details for events with this `type`."""
+
+ logout_succeeded: Optional[object] = FieldInfo(alias="logout.succeeded", default=None)
+ """This event has no additional fields beyond the standard audit log attributes."""
+
+ organization_updated: Optional[OrganizationUpdated] = FieldInfo(alias="organization.updated", default=None)
+ """The details for events with this `type`."""
+
+ project: Optional[Project] = None
+ """The project that the action was scoped to.
+
+ Absent for actions not scoped to projects. Note that any admin actions taken via
+ Admin API keys are associated with the default project.
+ """
+
+ project_archived: Optional[ProjectArchived] = FieldInfo(alias="project.archived", default=None)
+ """The details for events with this `type`."""
+
+ project_created: Optional[ProjectCreated] = FieldInfo(alias="project.created", default=None)
+ """The details for events with this `type`."""
+
+ project_deleted: Optional[ProjectDeleted] = FieldInfo(alias="project.deleted", default=None)
+ """The details for events with this `type`."""
+
+ project_updated: Optional[ProjectUpdated] = FieldInfo(alias="project.updated", default=None)
+ """The details for events with this `type`."""
+
+ rate_limit_deleted: Optional[RateLimitDeleted] = FieldInfo(alias="rate_limit.deleted", default=None)
+ """The details for events with this `type`."""
+
+ rate_limit_updated: Optional[RateLimitUpdated] = FieldInfo(alias="rate_limit.updated", default=None)
+ """The details for events with this `type`."""
+
+ role_assignment_created: Optional[RoleAssignmentCreated] = FieldInfo(alias="role.assignment.created", default=None)
+ """The details for events with this `type`."""
+
+ role_assignment_deleted: Optional[RoleAssignmentDeleted] = FieldInfo(alias="role.assignment.deleted", default=None)
+ """The details for events with this `type`."""
+
+ role_created: Optional[RoleCreated] = FieldInfo(alias="role.created", default=None)
+ """The details for events with this `type`."""
+
+ role_deleted: Optional[RoleDeleted] = FieldInfo(alias="role.deleted", default=None)
+ """The details for events with this `type`."""
+
+ role_updated: Optional[RoleUpdated] = FieldInfo(alias="role.updated", default=None)
+ """The details for events with this `type`."""
+
+ scim_disabled: Optional[ScimDisabled] = FieldInfo(alias="scim.disabled", default=None)
+ """The details for events with this `type`."""
+
+ scim_enabled: Optional[ScimEnabled] = FieldInfo(alias="scim.enabled", default=None)
+ """The details for events with this `type`."""
+
+ service_account_created: Optional[ServiceAccountCreated] = FieldInfo(alias="service_account.created", default=None)
+ """The details for events with this `type`."""
+
+ service_account_deleted: Optional[ServiceAccountDeleted] = FieldInfo(alias="service_account.deleted", default=None)
+ """The details for events with this `type`."""
+
+ service_account_updated: Optional[ServiceAccountUpdated] = FieldInfo(alias="service_account.updated", default=None)
+ """The details for events with this `type`."""
+
+ user_added: Optional[UserAdded] = FieldInfo(alias="user.added", default=None)
+ """The details for events with this `type`."""
+
+ user_deleted: Optional[UserDeleted] = FieldInfo(alias="user.deleted", default=None)
+ """The details for events with this `type`."""
+
+ user_updated: Optional[UserUpdated] = FieldInfo(alias="user.updated", default=None)
+ """The details for events with this `type`."""
diff --git a/src/openai/types/admin/organization/certificate.py b/src/openai/types/admin/organization/certificate.py
new file mode 100644
index 0000000000..a8663b4e4a
--- /dev/null
+++ b/src/openai/types/admin/organization/certificate.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["Certificate", "CertificateDetails"]
+
+
+class CertificateDetails(BaseModel):
+ content: Optional[str] = None
+ """The content of the certificate in PEM format."""
+
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the certificate expires."""
+
+ valid_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the certificate becomes valid."""
+
+
+class Certificate(BaseModel):
+ """Represents an individual `certificate` uploaded to the organization."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ certificate_details: CertificateDetails
+
+ created_at: int
+ """The Unix timestamp (in seconds) of when the certificate was uploaded."""
+
+ name: Optional[str] = None
+ """The name of the certificate."""
+
+ object: Literal["certificate", "organization.certificate", "organization.project.certificate"]
+ """The object type.
+
+ - If creating, updating, or getting a specific certificate, the object type is
+ `certificate`.
+ - If listing, activating, or deactivating certificates for the organization, the
+ object type is `organization.certificate`.
+ - If listing, activating, or deactivating certificates for a project, the object
+ type is `organization.project.certificate`.
+ """
+
+ active: Optional[bool] = None
+ """Whether the certificate is currently active at the specified scope.
+
+ Not returned when getting details for a specific certificate.
+ """
diff --git a/src/openai/types/admin/organization/certificate_activate_params.py b/src/openai/types/admin/organization/certificate_activate_params.py
new file mode 100644
index 0000000000..0bb6474c7c
--- /dev/null
+++ b/src/openai/types/admin/organization/certificate_activate_params.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["CertificateActivateParams"]
+
+
+class CertificateActivateParams(TypedDict, total=False):
+ certificate_ids: Required[SequenceNotStr[str]]
diff --git a/src/openai/types/admin/organization/certificate_activate_response.py b/src/openai/types/admin/organization/certificate_activate_response.py
new file mode 100644
index 0000000000..64239c3a93
--- /dev/null
+++ b/src/openai/types/admin/organization/certificate_activate_response.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["CertificateActivateResponse", "CertificateDetails"]
+
+
+class CertificateDetails(BaseModel):
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the certificate expires."""
+
+ valid_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the certificate becomes valid."""
+
+
+class CertificateActivateResponse(BaseModel):
+ """Represents an individual certificate configured at the organization level."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ active: bool
+ """Whether the certificate is currently active at the organization level."""
+
+ certificate_details: CertificateDetails
+
+ created_at: int
+ """The Unix timestamp (in seconds) of when the certificate was uploaded."""
+
+ name: Optional[str] = None
+ """The name of the certificate."""
+
+ object: Literal["organization.certificate"]
+ """The object type, which is always `organization.certificate`."""
diff --git a/src/openai/types/admin/organization/certificate_create_params.py b/src/openai/types/admin/organization/certificate_create_params.py
new file mode 100644
index 0000000000..9aeb3bbc94
--- /dev/null
+++ b/src/openai/types/admin/organization/certificate_create_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["CertificateCreateParams"]
+
+
+class CertificateCreateParams(TypedDict, total=False):
+ certificate: Required[str]
+ """The certificate content in PEM format"""
+
+ name: str
+ """An optional name for the certificate"""
diff --git a/src/openai/types/admin/organization/certificate_deactivate_params.py b/src/openai/types/admin/organization/certificate_deactivate_params.py
new file mode 100644
index 0000000000..827af54d35
--- /dev/null
+++ b/src/openai/types/admin/organization/certificate_deactivate_params.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["CertificateDeactivateParams"]
+
+
+class CertificateDeactivateParams(TypedDict, total=False):
+ certificate_ids: Required[SequenceNotStr[str]]
diff --git a/src/openai/types/admin/organization/certificate_deactivate_response.py b/src/openai/types/admin/organization/certificate_deactivate_response.py
new file mode 100644
index 0000000000..874251cadc
--- /dev/null
+++ b/src/openai/types/admin/organization/certificate_deactivate_response.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["CertificateDeactivateResponse", "CertificateDetails"]
+
+
+class CertificateDetails(BaseModel):
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the certificate expires."""
+
+ valid_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the certificate becomes valid."""
+
+
+class CertificateDeactivateResponse(BaseModel):
+ """Represents an individual certificate configured at the organization level."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ active: bool
+ """Whether the certificate is currently active at the organization level."""
+
+ certificate_details: CertificateDetails
+
+ created_at: int
+ """The Unix timestamp (in seconds) of when the certificate was uploaded."""
+
+ name: Optional[str] = None
+ """The name of the certificate."""
+
+ object: Literal["organization.certificate"]
+ """The object type, which is always `organization.certificate`."""
diff --git a/src/openai/types/admin/organization/certificate_delete_response.py b/src/openai/types/admin/organization/certificate_delete_response.py
new file mode 100644
index 0000000000..b0dc1fa018
--- /dev/null
+++ b/src/openai/types/admin/organization/certificate_delete_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["CertificateDeleteResponse"]
+
+
+class CertificateDeleteResponse(BaseModel):
+ id: str
+ """The ID of the certificate that was deleted."""
+
+ object: Literal["certificate.deleted"]
+ """The object type, must be `certificate.deleted`."""
diff --git a/src/openai/types/admin/organization/certificate_list_params.py b/src/openai/types/admin/organization/certificate_list_params.py
new file mode 100644
index 0000000000..1d9aff4b4a
--- /dev/null
+++ b/src/openai/types/admin/organization/certificate_list_params.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["CertificateListParams"]
+
+
+class CertificateListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
diff --git a/src/openai/types/admin/organization/certificate_list_response.py b/src/openai/types/admin/organization/certificate_list_response.py
new file mode 100644
index 0000000000..8d9816520f
--- /dev/null
+++ b/src/openai/types/admin/organization/certificate_list_response.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["CertificateListResponse", "CertificateDetails"]
+
+
+class CertificateDetails(BaseModel):
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the certificate expires."""
+
+ valid_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the certificate becomes valid."""
+
+
+class CertificateListResponse(BaseModel):
+ """Represents an individual certificate configured at the organization level."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ active: bool
+ """Whether the certificate is currently active at the organization level."""
+
+ certificate_details: CertificateDetails
+
+ created_at: int
+ """The Unix timestamp (in seconds) of when the certificate was uploaded."""
+
+ name: Optional[str] = None
+ """The name of the certificate."""
+
+ object: Literal["organization.certificate"]
+ """The object type, which is always `organization.certificate`."""
diff --git a/src/openai/types/admin/organization/certificate_retrieve_params.py b/src/openai/types/admin/organization/certificate_retrieve_params.py
new file mode 100644
index 0000000000..29bc8dedc5
--- /dev/null
+++ b/src/openai/types/admin/organization/certificate_retrieve_params.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["CertificateRetrieveParams"]
+
+
+class CertificateRetrieveParams(TypedDict, total=False):
+ include: List[Literal["content"]]
+ """A list of additional fields to include in the response.
+
+ Currently the only supported value is `content` to fetch the PEM content of the
+ certificate.
+ """
diff --git a/src/openai/types/admin/organization/certificate_update_params.py b/src/openai/types/admin/organization/certificate_update_params.py
new file mode 100644
index 0000000000..c55b3aceb2
--- /dev/null
+++ b/src/openai/types/admin/organization/certificate_update_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["CertificateUpdateParams"]
+
+
+class CertificateUpdateParams(TypedDict, total=False):
+ name: str
+ """The updated name for the certificate"""
diff --git a/src/openai/types/admin/organization/group.py b/src/openai/types/admin/organization/group.py
new file mode 100644
index 0000000000..a5823b1442
--- /dev/null
+++ b/src/openai/types/admin/organization/group.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ...._models import BaseModel
+
+__all__ = ["Group"]
+
+
+class Group(BaseModel):
+ """Details about an organization group."""
+
+ id: str
+ """Identifier for the group."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the group was created."""
+
+ group_type: str
+ """The type of the group."""
+
+ is_scim_managed: bool
+ """
+ Whether the group is managed through SCIM and controlled by your identity
+ provider.
+ """
+
+ name: str
+ """Display name of the group."""
diff --git a/src/openai/types/admin/organization/group_create_params.py b/src/openai/types/admin/organization/group_create_params.py
new file mode 100644
index 0000000000..8e27d299d3
--- /dev/null
+++ b/src/openai/types/admin/organization/group_create_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["GroupCreateParams"]
+
+
+class GroupCreateParams(TypedDict, total=False):
+ name: Required[str]
+ """Human readable name for the group."""
diff --git a/src/openai/types/admin/organization/group_delete_response.py b/src/openai/types/admin/organization/group_delete_response.py
new file mode 100644
index 0000000000..6dec56e58d
--- /dev/null
+++ b/src/openai/types/admin/organization/group_delete_response.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["GroupDeleteResponse"]
+
+
+class GroupDeleteResponse(BaseModel):
+ """Confirmation payload returned after deleting a group."""
+
+ id: str
+ """Identifier of the deleted group."""
+
+ deleted: bool
+ """Whether the group was deleted."""
+
+ object: Literal["group.deleted"]
+ """Always `group.deleted`."""
diff --git a/src/openai/types/admin/organization/group_list_params.py b/src/openai/types/admin/organization/group_list_params.py
new file mode 100644
index 0000000000..198478b35a
--- /dev/null
+++ b/src/openai/types/admin/organization/group_list_params.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["GroupListParams"]
+
+
+class GroupListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is a group ID that defines your place in the list. For instance, if you
+ make a list request and receive 100 objects, ending with group_abc, your
+ subsequent call can include `after=group_abc` in order to fetch the next page of
+ the list.
+ """
+
+ limit: int
+ """A limit on the number of groups to be returned.
+
+ Limit can range between 0 and 1000, and the default is 100.
+ """
+
+ order: Literal["asc", "desc"]
+ """Specifies the sort order of the returned groups."""
diff --git a/src/openai/types/admin/organization/group_update_params.py b/src/openai/types/admin/organization/group_update_params.py
new file mode 100644
index 0000000000..2bb3a9d8fe
--- /dev/null
+++ b/src/openai/types/admin/organization/group_update_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["GroupUpdateParams"]
+
+
+class GroupUpdateParams(TypedDict, total=False):
+ name: Required[str]
+ """New display name for the group."""
diff --git a/src/openai/types/admin/organization/group_update_response.py b/src/openai/types/admin/organization/group_update_response.py
new file mode 100644
index 0000000000..1ae6f86a64
--- /dev/null
+++ b/src/openai/types/admin/organization/group_update_response.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ...._models import BaseModel
+
+__all__ = ["GroupUpdateResponse"]
+
+
+class GroupUpdateResponse(BaseModel):
+ """Response returned after updating a group."""
+
+ id: str
+ """Identifier for the group."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the group was created."""
+
+ is_scim_managed: bool
+ """
+ Whether the group is managed through SCIM and controlled by your identity
+ provider.
+ """
+
+ name: str
+ """Updated display name for the group."""
diff --git a/src/openai/types/admin/organization/groups/__init__.py b/src/openai/types/admin/organization/groups/__init__.py
new file mode 100644
index 0000000000..22189c1085
--- /dev/null
+++ b/src/openai/types/admin/organization/groups/__init__.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .role_list_params import RoleListParams as RoleListParams
+from .user_list_params import UserListParams as UserListParams
+from .role_create_params import RoleCreateParams as RoleCreateParams
+from .role_list_response import RoleListResponse as RoleListResponse
+from .user_create_params import UserCreateParams as UserCreateParams
+from .role_create_response import RoleCreateResponse as RoleCreateResponse
+from .role_delete_response import RoleDeleteResponse as RoleDeleteResponse
+from .user_create_response import UserCreateResponse as UserCreateResponse
+from .user_delete_response import UserDeleteResponse as UserDeleteResponse
+from .organization_group_user import OrganizationGroupUser as OrganizationGroupUser
diff --git a/src/openai/types/admin/organization/groups/organization_group_user.py b/src/openai/types/admin/organization/groups/organization_group_user.py
new file mode 100644
index 0000000000..792022c4a9
--- /dev/null
+++ b/src/openai/types/admin/organization/groups/organization_group_user.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+
+__all__ = ["OrganizationGroupUser"]
+
+
+class OrganizationGroupUser(BaseModel):
+ """Represents an individual user returned when inspecting group membership."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ email: Optional[str] = None
+ """The email address of the user."""
+
+ name: str
+ """The name of the user."""
diff --git a/src/openai/types/admin/organization/groups/role_create_params.py b/src/openai/types/admin/organization/groups/role_create_params.py
new file mode 100644
index 0000000000..0ebc196eef
--- /dev/null
+++ b/src/openai/types/admin/organization/groups/role_create_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["RoleCreateParams"]
+
+
+class RoleCreateParams(TypedDict, total=False):
+ role_id: Required[str]
+ """Identifier of the role to assign."""
diff --git a/src/openai/types/admin/organization/groups/role_create_response.py b/src/openai/types/admin/organization/groups/role_create_response.py
new file mode 100644
index 0000000000..8f82bfc542
--- /dev/null
+++ b/src/openai/types/admin/organization/groups/role_create_response.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..role import Role
+from ....._models import BaseModel
+
+__all__ = ["RoleCreateResponse", "Group"]
+
+
+class Group(BaseModel):
+ """Summary information about a group returned in role assignment responses."""
+
+ id: str
+ """Identifier for the group."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the group was created."""
+
+ name: str
+ """Display name of the group."""
+
+ object: Literal["group"]
+ """Always `group`."""
+
+ scim_managed: bool
+ """Whether the group is managed through SCIM."""
+
+
+class RoleCreateResponse(BaseModel):
+ """Role assignment linking a group to a role."""
+
+ group: Group
+ """Summary information about a group returned in role assignment responses."""
+
+ object: Literal["group.role"]
+ """Always `group.role`."""
+
+ role: Role
+ """Details about a role that can be assigned through the public Roles API."""
diff --git a/src/openai/types/admin/organization/groups/role_delete_response.py b/src/openai/types/admin/organization/groups/role_delete_response.py
new file mode 100644
index 0000000000..fb6a111614
--- /dev/null
+++ b/src/openai/types/admin/organization/groups/role_delete_response.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ....._models import BaseModel
+
+__all__ = ["RoleDeleteResponse"]
+
+
+class RoleDeleteResponse(BaseModel):
+ """Confirmation payload returned after unassigning a role."""
+
+ deleted: bool
+ """Whether the assignment was removed."""
+
+ object: str
+ """
+ Identifier for the deleted assignment, such as `group.role.deleted` or
+ `user.role.deleted`.
+ """
diff --git a/src/openai/types/admin/organization/groups/role_list_params.py b/src/openai/types/admin/organization/groups/role_list_params.py
new file mode 100644
index 0000000000..451a1a2045
--- /dev/null
+++ b/src/openai/types/admin/organization/groups/role_list_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["RoleListParams"]
+
+
+class RoleListParams(TypedDict, total=False):
+ after: str
+ """Cursor for pagination.
+
+ Provide the value from the previous response's `next` field to continue listing
+ organization roles.
+ """
+
+ limit: int
+ """A limit on the number of organization role assignments to return."""
+
+ order: Literal["asc", "desc"]
+ """Sort order for the returned organization roles."""
diff --git a/src/openai/types/admin/organization/groups/role_list_response.py b/src/openai/types/admin/organization/groups/role_list_response.py
new file mode 100644
index 0000000000..337d517ba1
--- /dev/null
+++ b/src/openai/types/admin/organization/groups/role_list_response.py
@@ -0,0 +1,46 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+
+from ....._models import BaseModel
+
+__all__ = ["RoleListResponse"]
+
+
+class RoleListResponse(BaseModel):
+ """
+ Detailed information about a role assignment entry returned when listing assignments.
+ """
+
+ id: str
+ """Identifier for the role."""
+
+ created_at: Optional[int] = None
+ """When the role was created."""
+
+ created_by: Optional[str] = None
+ """Identifier of the actor who created the role."""
+
+ created_by_user_obj: Optional[Dict[str, object]] = None
+ """User details for the actor that created the role, when available."""
+
+ description: Optional[str] = None
+ """Description of the role."""
+
+ metadata: Optional[Dict[str, object]] = None
+ """Arbitrary metadata stored on the role."""
+
+ name: str
+ """Name of the role."""
+
+ permissions: List[str]
+ """Permissions associated with the role."""
+
+ predefined_role: bool
+ """Whether the role is predefined by OpenAI."""
+
+ resource_type: str
+ """Resource type the role applies to."""
+
+ updated_at: Optional[int] = None
+ """When the role was last updated."""
diff --git a/src/openai/types/admin/organization/groups/user_create_params.py b/src/openai/types/admin/organization/groups/user_create_params.py
new file mode 100644
index 0000000000..ec30b46f1c
--- /dev/null
+++ b/src/openai/types/admin/organization/groups/user_create_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["UserCreateParams"]
+
+
+class UserCreateParams(TypedDict, total=False):
+ user_id: Required[str]
+ """Identifier of the user to add to the group."""
diff --git a/src/openai/types/admin/organization/groups/user_create_response.py b/src/openai/types/admin/organization/groups/user_create_response.py
new file mode 100644
index 0000000000..508f51747e
--- /dev/null
+++ b/src/openai/types/admin/organization/groups/user_create_response.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["UserCreateResponse"]
+
+
+class UserCreateResponse(BaseModel):
+ """Confirmation payload returned after adding a user to a group."""
+
+ group_id: str
+ """Identifier of the group the user was added to."""
+
+ object: Literal["group.user"]
+ """Always `group.user`."""
+
+ user_id: str
+ """Identifier of the user that was added."""
diff --git a/src/openai/types/admin/organization/groups/user_delete_response.py b/src/openai/types/admin/organization/groups/user_delete_response.py
new file mode 100644
index 0000000000..3b484a9baf
--- /dev/null
+++ b/src/openai/types/admin/organization/groups/user_delete_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["UserDeleteResponse"]
+
+
+class UserDeleteResponse(BaseModel):
+ """Confirmation payload returned after removing a user from a group."""
+
+ deleted: bool
+ """Whether the group membership was removed."""
+
+ object: Literal["group.user.deleted"]
+ """Always `group.user.deleted`."""
diff --git a/src/openai/types/admin/organization/groups/user_list_params.py b/src/openai/types/admin/organization/groups/user_list_params.py
new file mode 100644
index 0000000000..09bcfb1ba7
--- /dev/null
+++ b/src/openai/types/admin/organization/groups/user_list_params.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["UserListParams"]
+
+
+class UserListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ Provide the ID of the last user from the previous list response to retrieve the
+ next page.
+ """
+
+ limit: int
+ """A limit on the number of users to be returned.
+
+ Limit can range between 0 and 1000, and the default is 100.
+ """
+
+ order: Literal["asc", "desc"]
+ """Specifies the sort order of users in the list."""
diff --git a/src/openai/types/admin/organization/invite.py b/src/openai/types/admin/organization/invite.py
new file mode 100644
index 0000000000..a3d2c50438
--- /dev/null
+++ b/src/openai/types/admin/organization/invite.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["Invite", "Project"]
+
+
+class Project(BaseModel):
+ id: str
+ """Project's public ID"""
+
+ role: Literal["member", "owner"]
+ """Project membership role"""
+
+
+class Invite(BaseModel):
+ """Represents an individual `invite` to the organization."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ created_at: int
+ """The Unix timestamp (in seconds) of when the invite was sent."""
+
+ email: str
+ """The email address of the individual to whom the invite was sent"""
+
+ object: Literal["organization.invite"]
+ """The object type, which is always `organization.invite`"""
+
+ projects: List[Project]
+ """The projects that were granted membership upon acceptance of the invite."""
+
+ role: Literal["owner", "reader"]
+ """`owner` or `reader`"""
+
+ status: Literal["accepted", "expired", "pending"]
+ """`accepted`,`expired`, or `pending`"""
+
+ accepted_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the invite was accepted."""
+
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the invite expires."""
diff --git a/src/openai/types/admin/organization/invite_create_params.py b/src/openai/types/admin/organization/invite_create_params.py
new file mode 100644
index 0000000000..7709003fe3
--- /dev/null
+++ b/src/openai/types/admin/organization/invite_create_params.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["InviteCreateParams", "Project"]
+
+
+class InviteCreateParams(TypedDict, total=False):
+ email: Required[str]
+ """Send an email to this address"""
+
+ role: Required[Literal["reader", "owner"]]
+ """`owner` or `reader`"""
+
+ projects: Iterable[Project]
+ """
+ An array of projects to which membership is granted at the same time the org
+ invite is accepted. If omitted, the user will be invited to the default project
+ for compatibility with legacy behavior.
+ """
+
+
+class Project(TypedDict, total=False):
+ id: Required[str]
+ """Project's public ID"""
+
+ role: Required[Literal["member", "owner"]]
+ """Project membership role"""
diff --git a/src/openai/types/admin/organization/invite_delete_response.py b/src/openai/types/admin/organization/invite_delete_response.py
new file mode 100644
index 0000000000..1a8aa0ce2f
--- /dev/null
+++ b/src/openai/types/admin/organization/invite_delete_response.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InviteDeleteResponse"]
+
+
+class InviteDeleteResponse(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["organization.invite.deleted"]
+ """The object type, which is always `organization.invite.deleted`"""
diff --git a/src/openai/types/admin/organization/invite_list_params.py b/src/openai/types/admin/organization/invite_list_params.py
new file mode 100644
index 0000000000..678510d655
--- /dev/null
+++ b/src/openai/types/admin/organization/invite_list_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["InviteListParams"]
+
+
+class InviteListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
diff --git a/src/openai/types/admin/organization/organization_user.py b/src/openai/types/admin/organization/organization_user.py
new file mode 100644
index 0000000000..3d1d43a8b6
--- /dev/null
+++ b/src/openai/types/admin/organization/organization_user.py
@@ -0,0 +1,96 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["OrganizationUser", "Projects", "ProjectsData", "User"]
+
+
+class ProjectsData(BaseModel):
+ id: Optional[str] = None
+
+ name: Optional[str] = None
+
+ role: Optional[str] = None
+
+
+class Projects(BaseModel):
+ """Projects associated with the user, if included."""
+
+ data: List[ProjectsData]
+
+ object: Literal["list"]
+
+
+class User(BaseModel):
+ """Nested user details."""
+
+ id: str
+
+ object: Literal["user"]
+
+ banned: Optional[bool] = None
+
+ banned_at: Optional[int] = None
+
+ email: Optional[str] = None
+
+ enabled: Optional[bool] = None
+
+ name: Optional[str] = None
+
+ picture: Optional[str] = None
+
+
+class OrganizationUser(BaseModel):
+ """Represents an individual `user` within an organization."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ added_at: int
+ """The Unix timestamp (in seconds) of when the user was added."""
+
+ object: Literal["organization.user"]
+ """The object type, which is always `organization.user`"""
+
+ api_key_last_used_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of the user's last API key usage."""
+
+ created: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the user was created."""
+
+ developer_persona: Optional[str] = None
+ """The developer persona metadata for the user."""
+
+ email: Optional[str] = None
+ """The email address of the user"""
+
+ is_default: Optional[bool] = None
+ """Whether this is the organization's default user."""
+
+ is_scale_tier_authorized_purchaser: Optional[bool] = None
+ """Whether the user is an authorized purchaser for Scale Tier."""
+
+ is_scim_managed: Optional[bool] = None
+ """Whether the user is managed through SCIM."""
+
+ is_service_account: Optional[bool] = None
+ """Whether the user is a service account."""
+
+ name: Optional[str] = None
+ """The name of the user"""
+
+ projects: Optional[Projects] = None
+ """Projects associated with the user, if included."""
+
+ role: Optional[str] = None
+ """`owner` or `reader`"""
+
+ technical_level: Optional[str] = None
+ """The technical level metadata for the user."""
+
+ user: Optional[User] = None
+ """Nested user details."""
diff --git a/src/openai/types/admin/organization/project.py b/src/openai/types/admin/organization/project.py
new file mode 100644
index 0000000000..982bb1e4b3
--- /dev/null
+++ b/src/openai/types/admin/organization/project.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["Project"]
+
+
+class Project(BaseModel):
+ """Represents an individual project."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ created_at: int
+ """The Unix timestamp (in seconds) of when the project was created."""
+
+ object: Literal["organization.project"]
+ """The object type, which is always `organization.project`"""
+
+ archived_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the project was archived or `null`."""
+
+ external_key_id: Optional[str] = None
+ """The external key associated with the project."""
+
+ name: Optional[str] = None
+ """The name of the project. This appears in reporting."""
+
+ status: Optional[str] = None
+ """`active` or `archived`"""
diff --git a/src/openai/types/admin/organization/project_create_params.py b/src/openai/types/admin/organization/project_create_params.py
new file mode 100644
index 0000000000..a4b7b2d424
--- /dev/null
+++ b/src/openai/types/admin/organization/project_create_params.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ProjectCreateParams"]
+
+
+class ProjectCreateParams(TypedDict, total=False):
+ name: Required[str]
+ """The friendly name of the project, this name appears in reports."""
+
+ external_key_id: Optional[str]
+ """External key ID to associate with the project."""
+
+ geography: Optional[str]
+ """Create the project with the specified data residency region.
+
+ Your organization must have access to Data residency functionality in order to
+ use. See
+ [data residency controls](https://platform.openai.com/docs/guides/your-data#data-residency-controls)
+ to review the functionality and limitations of setting this field.
+ """
diff --git a/src/openai/types/admin/organization/project_list_params.py b/src/openai/types/admin/organization/project_list_params.py
new file mode 100644
index 0000000000..f55fb8a392
--- /dev/null
+++ b/src/openai/types/admin/organization/project_list_params.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["ProjectListParams"]
+
+
+class ProjectListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ include_archived: bool
+ """If `true` returns all projects including those that have been `archived`.
+
+ Archived projects are not included by default.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
diff --git a/src/openai/types/admin/organization/project_update_params.py b/src/openai/types/admin/organization/project_update_params.py
new file mode 100644
index 0000000000..2ebdd09f4a
--- /dev/null
+++ b/src/openai/types/admin/organization/project_update_params.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import TypedDict
+
+__all__ = ["ProjectUpdateParams"]
+
+
+class ProjectUpdateParams(TypedDict, total=False):
+ external_key_id: Optional[str]
+ """External key ID to associate with the project."""
+
+ geography: Optional[str]
+ """Geography for the project."""
+
+ name: Optional[str]
+ """The updated name of the project, this name appears in reports."""
diff --git a/src/openai/types/admin/organization/projects/__init__.py b/src/openai/types/admin/organization/projects/__init__.py
new file mode 100644
index 0000000000..ea627ce7d6
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/__init__.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .project_user import ProjectUser as ProjectUser
+from .project_group import ProjectGroup as ProjectGroup
+from .project_api_key import ProjectAPIKey as ProjectAPIKey
+from .role_list_params import RoleListParams as RoleListParams
+from .user_list_params import UserListParams as UserListParams
+from .group_list_params import GroupListParams as GroupListParams
+from .project_rate_limit import ProjectRateLimit as ProjectRateLimit
+from .role_create_params import RoleCreateParams as RoleCreateParams
+from .role_update_params import RoleUpdateParams as RoleUpdateParams
+from .user_create_params import UserCreateParams as UserCreateParams
+from .user_update_params import UserUpdateParams as UserUpdateParams
+from .api_key_list_params import APIKeyListParams as APIKeyListParams
+from .group_create_params import GroupCreateParams as GroupCreateParams
+from .role_delete_response import RoleDeleteResponse as RoleDeleteResponse
+from .user_delete_response import UserDeleteResponse as UserDeleteResponse
+from .group_delete_response import GroupDeleteResponse as GroupDeleteResponse
+from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse
+from .certificate_list_params import CertificateListParams as CertificateListParams
+from .project_service_account import ProjectServiceAccount as ProjectServiceAccount
+from .certificate_list_response import CertificateListResponse as CertificateListResponse
+from .certificate_activate_params import CertificateActivateParams as CertificateActivateParams
+from .service_account_list_params import ServiceAccountListParams as ServiceAccountListParams
+from .certificate_activate_response import CertificateActivateResponse as CertificateActivateResponse
+from .certificate_deactivate_params import CertificateDeactivateParams as CertificateDeactivateParams
+from .service_account_create_params import ServiceAccountCreateParams as ServiceAccountCreateParams
+from .certificate_deactivate_response import CertificateDeactivateResponse as CertificateDeactivateResponse
+from .service_account_create_response import ServiceAccountCreateResponse as ServiceAccountCreateResponse
+from .service_account_delete_response import ServiceAccountDeleteResponse as ServiceAccountDeleteResponse
+from .rate_limit_list_rate_limits_params import RateLimitListRateLimitsParams as RateLimitListRateLimitsParams
+from .rate_limit_update_rate_limit_params import RateLimitUpdateRateLimitParams as RateLimitUpdateRateLimitParams
diff --git a/src/openai/types/admin/organization/projects/api_key_delete_response.py b/src/openai/types/admin/organization/projects/api_key_delete_response.py
new file mode 100644
index 0000000000..253a6746ba
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/api_key_delete_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["APIKeyDeleteResponse"]
+
+
+class APIKeyDeleteResponse(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["organization.project.api_key.deleted"]
diff --git a/src/openai/types/admin/organization/projects/api_key_list_params.py b/src/openai/types/admin/organization/projects/api_key_list_params.py
new file mode 100644
index 0000000000..422a28518e
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/api_key_list_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["APIKeyListParams"]
+
+
+class APIKeyListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
diff --git a/src/openai/types/admin/organization/projects/certificate_activate_params.py b/src/openai/types/admin/organization/projects/certificate_activate_params.py
new file mode 100644
index 0000000000..a0e7cd20e8
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/certificate_activate_params.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from ....._types import SequenceNotStr
+
+__all__ = ["CertificateActivateParams"]
+
+
+class CertificateActivateParams(TypedDict, total=False):
+ certificate_ids: Required[SequenceNotStr[str]]
diff --git a/src/openai/types/admin/organization/projects/certificate_activate_response.py b/src/openai/types/admin/organization/projects/certificate_activate_response.py
new file mode 100644
index 0000000000..4ee8ae07f4
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/certificate_activate_response.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["CertificateActivateResponse", "CertificateDetails"]
+
+
+class CertificateDetails(BaseModel):
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the certificate expires."""
+
+ valid_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the certificate becomes valid."""
+
+
+class CertificateActivateResponse(BaseModel):
+ """Represents an individual certificate configured at the project level."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ active: bool
+ """Whether the certificate is currently active at the project level."""
+
+ certificate_details: CertificateDetails
+
+ created_at: int
+ """The Unix timestamp (in seconds) of when the certificate was uploaded."""
+
+ name: Optional[str] = None
+ """The name of the certificate."""
+
+ object: Literal["organization.project.certificate"]
+ """The object type, which is always `organization.project.certificate`."""
diff --git a/src/openai/types/admin/organization/projects/certificate_deactivate_params.py b/src/openai/types/admin/organization/projects/certificate_deactivate_params.py
new file mode 100644
index 0000000000..75c32708ba
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/certificate_deactivate_params.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from ....._types import SequenceNotStr
+
+__all__ = ["CertificateDeactivateParams"]
+
+
+class CertificateDeactivateParams(TypedDict, total=False):
+ certificate_ids: Required[SequenceNotStr[str]]
diff --git a/src/openai/types/admin/organization/projects/certificate_deactivate_response.py b/src/openai/types/admin/organization/projects/certificate_deactivate_response.py
new file mode 100644
index 0000000000..846c7a2ab7
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/certificate_deactivate_response.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["CertificateDeactivateResponse", "CertificateDetails"]
+
+
+class CertificateDetails(BaseModel):
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the certificate expires."""
+
+ valid_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the certificate becomes valid."""
+
+
+class CertificateDeactivateResponse(BaseModel):
+ """Represents an individual certificate configured at the project level."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ active: bool
+ """Whether the certificate is currently active at the project level."""
+
+ certificate_details: CertificateDetails
+
+ created_at: int
+ """The Unix timestamp (in seconds) of when the certificate was uploaded."""
+
+ name: Optional[str] = None
+ """The name of the certificate."""
+
+ object: Literal["organization.project.certificate"]
+ """The object type, which is always `organization.project.certificate`."""
diff --git a/src/openai/types/admin/organization/projects/certificate_list_params.py b/src/openai/types/admin/organization/projects/certificate_list_params.py
new file mode 100644
index 0000000000..1d9aff4b4a
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/certificate_list_params.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["CertificateListParams"]
+
+
+class CertificateListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
diff --git a/src/openai/types/admin/organization/projects/certificate_list_response.py b/src/openai/types/admin/organization/projects/certificate_list_response.py
new file mode 100644
index 0000000000..d4345b3b8c
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/certificate_list_response.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["CertificateListResponse", "CertificateDetails"]
+
+
+class CertificateDetails(BaseModel):
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the certificate expires."""
+
+ valid_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the certificate becomes valid."""
+
+
+class CertificateListResponse(BaseModel):
+ """Represents an individual certificate configured at the project level."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ active: bool
+ """Whether the certificate is currently active at the project level."""
+
+ certificate_details: CertificateDetails
+
+ created_at: int
+ """The Unix timestamp (in seconds) of when the certificate was uploaded."""
+
+ name: Optional[str] = None
+ """The name of the certificate."""
+
+ object: Literal["organization.project.certificate"]
+ """The object type, which is always `organization.project.certificate`."""
diff --git a/src/openai/types/admin/organization/projects/group_create_params.py b/src/openai/types/admin/organization/projects/group_create_params.py
new file mode 100644
index 0000000000..b9f4626d74
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/group_create_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["GroupCreateParams"]
+
+
+class GroupCreateParams(TypedDict, total=False):
+ group_id: Required[str]
+ """Identifier of the group to add to the project."""
+
+ role: Required[str]
+ """Identifier of the project role to grant to the group."""
diff --git a/src/openai/types/admin/organization/projects/group_delete_response.py b/src/openai/types/admin/organization/projects/group_delete_response.py
new file mode 100644
index 0000000000..ef1ce0ddb8
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/group_delete_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["GroupDeleteResponse"]
+
+
+class GroupDeleteResponse(BaseModel):
+ """Confirmation payload returned after removing a group from a project."""
+
+ deleted: bool
+ """Whether the group membership in the project was removed."""
+
+ object: Literal["project.group.deleted"]
+ """Always `project.group.deleted`."""
diff --git a/src/openai/types/admin/organization/projects/group_list_params.py b/src/openai/types/admin/organization/projects/group_list_params.py
new file mode 100644
index 0000000000..26ab31a88b
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/group_list_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["GroupListParams"]
+
+
+class GroupListParams(TypedDict, total=False):
+ after: str
+ """Cursor for pagination.
+
+ Provide the ID of the last group from the previous response to fetch the next
+ page.
+ """
+
+ limit: int
+ """A limit on the number of project groups to return. Defaults to 20."""
+
+ order: Literal["asc", "desc"]
+ """Sort order for the returned groups."""
diff --git a/src/openai/types/admin/organization/projects/groups/__init__.py b/src/openai/types/admin/organization/projects/groups/__init__.py
new file mode 100644
index 0000000000..ed464fde83
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/groups/__init__.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .role_list_params import RoleListParams as RoleListParams
+from .role_create_params import RoleCreateParams as RoleCreateParams
+from .role_list_response import RoleListResponse as RoleListResponse
+from .role_create_response import RoleCreateResponse as RoleCreateResponse
+from .role_delete_response import RoleDeleteResponse as RoleDeleteResponse
diff --git a/src/openai/types/admin/organization/projects/groups/role_create_params.py b/src/openai/types/admin/organization/projects/groups/role_create_params.py
new file mode 100644
index 0000000000..2aba01e8a4
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/groups/role_create_params.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["RoleCreateParams"]
+
+
+class RoleCreateParams(TypedDict, total=False):
+ project_id: Required[str]
+
+ role_id: Required[str]
+ """Identifier of the role to assign."""
diff --git a/src/openai/types/admin/organization/projects/groups/role_create_response.py b/src/openai/types/admin/organization/projects/groups/role_create_response.py
new file mode 100644
index 0000000000..c6e7a1c048
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/groups/role_create_response.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...role import Role
+from ......_models import BaseModel
+
+__all__ = ["RoleCreateResponse", "Group"]
+
+
+class Group(BaseModel):
+ """Summary information about a group returned in role assignment responses."""
+
+ id: str
+ """Identifier for the group."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the group was created."""
+
+ name: str
+ """Display name of the group."""
+
+ object: Literal["group"]
+ """Always `group`."""
+
+ scim_managed: bool
+ """Whether the group is managed through SCIM."""
+
+
+class RoleCreateResponse(BaseModel):
+ """Role assignment linking a group to a role."""
+
+ group: Group
+ """Summary information about a group returned in role assignment responses."""
+
+ object: Literal["group.role"]
+ """Always `group.role`."""
+
+ role: Role
+ """Details about a role that can be assigned through the public Roles API."""
diff --git a/src/openai/types/admin/organization/projects/groups/role_delete_response.py b/src/openai/types/admin/organization/projects/groups/role_delete_response.py
new file mode 100644
index 0000000000..704de05117
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/groups/role_delete_response.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ......_models import BaseModel
+
+__all__ = ["RoleDeleteResponse"]
+
+
+class RoleDeleteResponse(BaseModel):
+ """Confirmation payload returned after unassigning a role."""
+
+ deleted: bool
+ """Whether the assignment was removed."""
+
+ object: str
+ """
+ Identifier for the deleted assignment, such as `group.role.deleted` or
+ `user.role.deleted`.
+ """
diff --git a/src/openai/types/admin/organization/projects/groups/role_list_params.py b/src/openai/types/admin/organization/projects/groups/role_list_params.py
new file mode 100644
index 0000000000..ffdbe210d2
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/groups/role_list_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["RoleListParams"]
+
+
+class RoleListParams(TypedDict, total=False):
+ project_id: Required[str]
+
+ after: str
+ """Cursor for pagination.
+
+ Provide the value from the previous response's `next` field to continue listing
+ project roles.
+ """
+
+ limit: int
+ """A limit on the number of project role assignments to return."""
+
+ order: Literal["asc", "desc"]
+ """Sort order for the returned project roles."""
diff --git a/src/openai/types/admin/organization/projects/groups/role_list_response.py b/src/openai/types/admin/organization/projects/groups/role_list_response.py
new file mode 100644
index 0000000000..72934bde13
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/groups/role_list_response.py
@@ -0,0 +1,46 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+
+from ......_models import BaseModel
+
+__all__ = ["RoleListResponse"]
+
+
+class RoleListResponse(BaseModel):
+ """
+ Detailed information about a role assignment entry returned when listing assignments.
+ """
+
+ id: str
+ """Identifier for the role."""
+
+ created_at: Optional[int] = None
+ """When the role was created."""
+
+ created_by: Optional[str] = None
+ """Identifier of the actor who created the role."""
+
+ created_by_user_obj: Optional[Dict[str, object]] = None
+ """User details for the actor that created the role, when available."""
+
+ description: Optional[str] = None
+ """Description of the role."""
+
+ metadata: Optional[Dict[str, object]] = None
+ """Arbitrary metadata stored on the role."""
+
+ name: str
+ """Name of the role."""
+
+ permissions: List[str]
+ """Permissions associated with the role."""
+
+ predefined_role: bool
+ """Whether the role is predefined by OpenAI."""
+
+ resource_type: str
+ """Resource type the role applies to."""
+
+ updated_at: Optional[int] = None
+ """When the role was last updated."""
diff --git a/src/openai/types/admin/organization/projects/project_api_key.py b/src/openai/types/admin/organization/projects/project_api_key.py
new file mode 100644
index 0000000000..7e5e6949eb
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/project_api_key.py
@@ -0,0 +1,78 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["ProjectAPIKey", "Owner", "OwnerServiceAccount", "OwnerUser"]
+
+
+class OwnerServiceAccount(BaseModel):
+ """The service account that owns a project API key."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ created_at: int
+ """The Unix timestamp (in seconds) of when the service account was created."""
+
+ name: str
+ """The name of the service account."""
+
+ role: str
+ """The service account's project role."""
+
+
+class OwnerUser(BaseModel):
+ """The user that owns a project API key."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ created_at: int
+ """The Unix timestamp (in seconds) of when the user was created."""
+
+ email: str
+ """The email address of the user."""
+
+ name: str
+ """The name of the user."""
+
+ role: str
+ """The user's project role."""
+
+
+class Owner(BaseModel):
+ service_account: Optional[OwnerServiceAccount] = None
+ """The service account that owns a project API key."""
+
+ type: Optional[Literal["user", "service_account"]] = None
+ """`user` or `service_account`"""
+
+ user: Optional[OwnerUser] = None
+ """The user that owns a project API key."""
+
+
+class ProjectAPIKey(BaseModel):
+ """Represents an individual API key in a project."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ created_at: int
+ """The Unix timestamp (in seconds) of when the API key was created"""
+
+ last_used_at: Optional[int] = None
+ """The Unix timestamp (in seconds) of when the API key was last used."""
+
+ name: str
+ """The name of the API key"""
+
+ object: Literal["organization.project.api_key"]
+ """The object type, which is always `organization.project.api_key`"""
+
+ owner: Owner
+
+ redacted_value: str
+ """The redacted value of the API key"""
diff --git a/src/openai/types/admin/organization/projects/project_group.py b/src/openai/types/admin/organization/projects/project_group.py
new file mode 100644
index 0000000000..b3da08ca32
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/project_group.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["ProjectGroup"]
+
+
+class ProjectGroup(BaseModel):
+ """Details about a group's membership in a project."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the group was granted project access."""
+
+ group_id: str
+ """Identifier of the group that has access to the project."""
+
+ group_name: str
+ """Display name of the group."""
+
+ group_type: str
+ """The type of the group."""
+
+ object: Literal["project.group"]
+ """Always `project.group`."""
+
+ project_id: str
+ """Identifier of the project."""
diff --git a/src/openai/types/admin/organization/projects/project_rate_limit.py b/src/openai/types/admin/organization/projects/project_rate_limit.py
new file mode 100644
index 0000000000..46ff1ef036
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/project_rate_limit.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["ProjectRateLimit"]
+
+
+class ProjectRateLimit(BaseModel):
+ """Represents a project rate limit config."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints."""
+
+ max_requests_per_1_minute: int
+ """The maximum requests per minute."""
+
+ max_tokens_per_1_minute: int
+ """The maximum tokens per minute."""
+
+ model: str
+ """The model this rate limit applies to."""
+
+ object: Literal["project.rate_limit"]
+ """The object type, which is always `project.rate_limit`"""
+
+ batch_1_day_max_input_tokens: Optional[int] = None
+ """The maximum batch input tokens per day. Only present for relevant models."""
+
+ max_audio_megabytes_per_1_minute: Optional[int] = None
+ """The maximum audio megabytes per minute. Only present for relevant models."""
+
+ max_images_per_1_minute: Optional[int] = None
+ """The maximum images per minute. Only present for relevant models."""
+
+ max_requests_per_1_day: Optional[int] = None
+ """The maximum requests per day. Only present for relevant models."""
diff --git a/src/openai/types/admin/organization/projects/project_service_account.py b/src/openai/types/admin/organization/projects/project_service_account.py
new file mode 100644
index 0000000000..ca7bf0bdae
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/project_service_account.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["ProjectServiceAccount"]
+
+
+class ProjectServiceAccount(BaseModel):
+ """Represents an individual service account in a project."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ created_at: int
+ """The Unix timestamp (in seconds) of when the service account was created"""
+
+ name: str
+ """The name of the service account"""
+
+ object: Literal["organization.project.service_account"]
+ """The object type, which is always `organization.project.service_account`"""
+
+ role: Literal["owner", "member"]
+ """`owner` or `member`"""
diff --git a/src/openai/types/admin/organization/projects/project_user.py b/src/openai/types/admin/organization/projects/project_user.py
new file mode 100644
index 0000000000..7aaa9fc05b
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/project_user.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["ProjectUser"]
+
+
+class ProjectUser(BaseModel):
+ """Represents an individual user in a project."""
+
+ id: str
+ """The identifier, which can be referenced in API endpoints"""
+
+ added_at: int
+ """The Unix timestamp (in seconds) of when the project was added."""
+
+ object: Literal["organization.project.user"]
+ """The object type, which is always `organization.project.user`"""
+
+ role: str
+ """`owner` or `member`"""
+
+ email: Optional[str] = None
+ """The email address of the user"""
+
+ name: Optional[str] = None
+ """The name of the user"""
diff --git a/src/openai/types/admin/organization/projects/rate_limit_list_rate_limits_params.py b/src/openai/types/admin/organization/projects/rate_limit_list_rate_limits_params.py
new file mode 100644
index 0000000000..198fe94f19
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/rate_limit_list_rate_limits_params.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["RateLimitListRateLimitsParams"]
+
+
+class RateLimitListRateLimitsParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ before: str
+ """A cursor for use in pagination.
+
+ `before` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, beginning with obj_foo, your
+ subsequent call can include before=obj_foo in order to fetch the previous page
+ of the list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned. The default is 100."""
diff --git a/src/openai/types/admin/organization/projects/rate_limit_update_rate_limit_params.py b/src/openai/types/admin/organization/projects/rate_limit_update_rate_limit_params.py
new file mode 100644
index 0000000000..5d5e515515
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/rate_limit_update_rate_limit_params.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["RateLimitUpdateRateLimitParams"]
+
+
+class RateLimitUpdateRateLimitParams(TypedDict, total=False):
+ project_id: Required[str]
+
+ batch_1_day_max_input_tokens: int
+ """The maximum batch input tokens per day. Only relevant for certain models."""
+
+ max_audio_megabytes_per_1_minute: int
+ """The maximum audio megabytes per minute. Only relevant for certain models."""
+
+ max_images_per_1_minute: int
+ """The maximum images per minute. Only relevant for certain models."""
+
+ max_requests_per_1_day: int
+ """The maximum requests per day. Only relevant for certain models."""
+
+ max_requests_per_1_minute: int
+ """The maximum requests per minute."""
+
+ max_tokens_per_1_minute: int
+ """The maximum tokens per minute."""
diff --git a/src/openai/types/admin/organization/projects/role_create_params.py b/src/openai/types/admin/organization/projects/role_create_params.py
new file mode 100644
index 0000000000..e05c8c8a63
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/role_create_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from ....._types import SequenceNotStr
+
+__all__ = ["RoleCreateParams"]
+
+
+class RoleCreateParams(TypedDict, total=False):
+ permissions: Required[SequenceNotStr[str]]
+ """Permissions to grant to the role."""
+
+ role_name: Required[str]
+ """Unique name for the role."""
+
+ description: Optional[str]
+ """Optional description of the role."""
diff --git a/src/openai/types/admin/organization/projects/role_delete_response.py b/src/openai/types/admin/organization/projects/role_delete_response.py
new file mode 100644
index 0000000000..87fa8e6200
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/role_delete_response.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["RoleDeleteResponse"]
+
+
+class RoleDeleteResponse(BaseModel):
+ """Confirmation payload returned after deleting a role."""
+
+ id: str
+ """Identifier of the deleted role."""
+
+ deleted: bool
+ """Whether the role was deleted."""
+
+ object: Literal["role.deleted"]
+ """Always `role.deleted`."""
diff --git a/src/openai/types/admin/organization/projects/role_list_params.py b/src/openai/types/admin/organization/projects/role_list_params.py
new file mode 100644
index 0000000000..88e957f886
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/role_list_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["RoleListParams"]
+
+
+class RoleListParams(TypedDict, total=False):
+ after: str
+ """Cursor for pagination.
+
+ Provide the value from the previous response's `next` field to continue listing
+ roles.
+ """
+
+ limit: int
+ """A limit on the number of roles to return. Defaults to 1000."""
+
+ order: Literal["asc", "desc"]
+ """Sort order for the returned roles."""
diff --git a/src/openai/types/admin/organization/projects/role_update_params.py b/src/openai/types/admin/organization/projects/role_update_params.py
new file mode 100644
index 0000000000..4d440c87cd
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/role_update_params.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from ....._types import SequenceNotStr
+
+__all__ = ["RoleUpdateParams"]
+
+
+class RoleUpdateParams(TypedDict, total=False):
+ project_id: Required[str]
+
+ description: Optional[str]
+ """New description for the role."""
+
+ permissions: Optional[SequenceNotStr[str]]
+ """Updated set of permissions for the role."""
+
+ role_name: Optional[str]
+ """New name for the role."""
diff --git a/src/openai/types/admin/organization/projects/service_account_create_params.py b/src/openai/types/admin/organization/projects/service_account_create_params.py
new file mode 100644
index 0000000000..409dcba500
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/service_account_create_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ServiceAccountCreateParams"]
+
+
+class ServiceAccountCreateParams(TypedDict, total=False):
+ name: Required[str]
+ """The name of the service account being created."""
diff --git a/src/openai/types/admin/organization/projects/service_account_create_response.py b/src/openai/types/admin/organization/projects/service_account_create_response.py
new file mode 100644
index 0000000000..430b11f655
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/service_account_create_response.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["ServiceAccountCreateResponse", "APIKey"]
+
+
+class APIKey(BaseModel):
+ id: str
+
+ created_at: int
+
+ name: str
+
+ object: Literal["organization.project.service_account.api_key"]
+ """The object type, which is always `organization.project.service_account.api_key`"""
+
+ value: str
+
+
+class ServiceAccountCreateResponse(BaseModel):
+ id: str
+
+ api_key: Optional[APIKey] = None
+
+ created_at: int
+
+ name: str
+
+ object: Literal["organization.project.service_account"]
+
+ role: Literal["member"]
+ """Service accounts can only have one role of type `member`"""
diff --git a/src/openai/types/admin/organization/projects/service_account_delete_response.py b/src/openai/types/admin/organization/projects/service_account_delete_response.py
new file mode 100644
index 0000000000..e67e635aab
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/service_account_delete_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["ServiceAccountDeleteResponse"]
+
+
+class ServiceAccountDeleteResponse(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["organization.project.service_account.deleted"]
diff --git a/src/openai/types/admin/organization/projects/service_account_list_params.py b/src/openai/types/admin/organization/projects/service_account_list_params.py
new file mode 100644
index 0000000000..7f808e285a
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/service_account_list_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["ServiceAccountListParams"]
+
+
+class ServiceAccountListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
diff --git a/src/openai/types/admin/organization/projects/user_create_params.py b/src/openai/types/admin/organization/projects/user_create_params.py
new file mode 100644
index 0000000000..4266ed01f9
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/user_create_params.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+__all__ = ["UserCreateParams"]
+
+
+class UserCreateParams(TypedDict, total=False):
+ role: Required[str]
+ """`owner` or `member`"""
+
+ email: Optional[str]
+ """Email of the user to add."""
+
+ user_id: Optional[str]
+ """The ID of the user."""
diff --git a/src/openai/types/admin/organization/projects/user_delete_response.py b/src/openai/types/admin/organization/projects/user_delete_response.py
new file mode 100644
index 0000000000..271d3a4126
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/user_delete_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ....._models import BaseModel
+
+__all__ = ["UserDeleteResponse"]
+
+
+class UserDeleteResponse(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["organization.project.user.deleted"]
diff --git a/src/openai/types/admin/organization/projects/user_list_params.py b/src/openai/types/admin/organization/projects/user_list_params.py
new file mode 100644
index 0000000000..d561e907b1
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/user_list_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["UserListParams"]
+
+
+class UserListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
diff --git a/src/openai/types/admin/organization/projects/user_update_params.py b/src/openai/types/admin/organization/projects/user_update_params.py
new file mode 100644
index 0000000000..20a4276567
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/user_update_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+__all__ = ["UserUpdateParams"]
+
+
+class UserUpdateParams(TypedDict, total=False):
+ project_id: Required[str]
+
+ role: Optional[str]
+ """`owner` or `member`"""
diff --git a/src/openai/types/admin/organization/projects/users/__init__.py b/src/openai/types/admin/organization/projects/users/__init__.py
new file mode 100644
index 0000000000..ed464fde83
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/users/__init__.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .role_list_params import RoleListParams as RoleListParams
+from .role_create_params import RoleCreateParams as RoleCreateParams
+from .role_list_response import RoleListResponse as RoleListResponse
+from .role_create_response import RoleCreateResponse as RoleCreateResponse
+from .role_delete_response import RoleDeleteResponse as RoleDeleteResponse
diff --git a/src/openai/types/admin/organization/projects/users/role_create_params.py b/src/openai/types/admin/organization/projects/users/role_create_params.py
new file mode 100644
index 0000000000..2aba01e8a4
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/users/role_create_params.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["RoleCreateParams"]
+
+
+class RoleCreateParams(TypedDict, total=False):
+ project_id: Required[str]
+
+ role_id: Required[str]
+ """Identifier of the role to assign."""
diff --git a/src/openai/types/admin/organization/projects/users/role_create_response.py b/src/openai/types/admin/organization/projects/users/role_create_response.py
new file mode 100644
index 0000000000..533df36500
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/users/role_create_response.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...role import Role
+from ......_models import BaseModel
+from ...organization_user import OrganizationUser
+
+__all__ = ["RoleCreateResponse"]
+
+
+class RoleCreateResponse(BaseModel):
+ """Role assignment linking a user to a role."""
+
+ object: Literal["user.role"]
+ """Always `user.role`."""
+
+ role: Role
+ """Details about a role that can be assigned through the public Roles API."""
+
+ user: OrganizationUser
+ """Represents an individual `user` within an organization."""
diff --git a/src/openai/types/admin/organization/projects/users/role_delete_response.py b/src/openai/types/admin/organization/projects/users/role_delete_response.py
new file mode 100644
index 0000000000..704de05117
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/users/role_delete_response.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ......_models import BaseModel
+
+__all__ = ["RoleDeleteResponse"]
+
+
+class RoleDeleteResponse(BaseModel):
+ """Confirmation payload returned after unassigning a role."""
+
+ deleted: bool
+ """Whether the assignment was removed."""
+
+ object: str
+ """
+ Identifier for the deleted assignment, such as `group.role.deleted` or
+ `user.role.deleted`.
+ """
diff --git a/src/openai/types/admin/organization/projects/users/role_list_params.py b/src/openai/types/admin/organization/projects/users/role_list_params.py
new file mode 100644
index 0000000000..ffdbe210d2
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/users/role_list_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["RoleListParams"]
+
+
+class RoleListParams(TypedDict, total=False):
+ project_id: Required[str]
+
+ after: str
+ """Cursor for pagination.
+
+ Provide the value from the previous response's `next` field to continue listing
+ project roles.
+ """
+
+ limit: int
+ """A limit on the number of project role assignments to return."""
+
+ order: Literal["asc", "desc"]
+ """Sort order for the returned project roles."""
diff --git a/src/openai/types/admin/organization/projects/users/role_list_response.py b/src/openai/types/admin/organization/projects/users/role_list_response.py
new file mode 100644
index 0000000000..72934bde13
--- /dev/null
+++ b/src/openai/types/admin/organization/projects/users/role_list_response.py
@@ -0,0 +1,46 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+
+from ......_models import BaseModel
+
+__all__ = ["RoleListResponse"]
+
+
+class RoleListResponse(BaseModel):
+ """
+ Detailed information about a role assignment entry returned when listing assignments.
+ """
+
+ id: str
+ """Identifier for the role."""
+
+ created_at: Optional[int] = None
+ """When the role was created."""
+
+ created_by: Optional[str] = None
+ """Identifier of the actor who created the role."""
+
+ created_by_user_obj: Optional[Dict[str, object]] = None
+ """User details for the actor that created the role, when available."""
+
+ description: Optional[str] = None
+ """Description of the role."""
+
+ metadata: Optional[Dict[str, object]] = None
+ """Arbitrary metadata stored on the role."""
+
+ name: str
+ """Name of the role."""
+
+ permissions: List[str]
+ """Permissions associated with the role."""
+
+ predefined_role: bool
+ """Whether the role is predefined by OpenAI."""
+
+ resource_type: str
+ """Resource type the role applies to."""
+
+ updated_at: Optional[int] = None
+ """When the role was last updated."""
diff --git a/src/openai/types/admin/organization/role.py b/src/openai/types/admin/organization/role.py
new file mode 100644
index 0000000000..4795144c68
--- /dev/null
+++ b/src/openai/types/admin/organization/role.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["Role"]
+
+
+class Role(BaseModel):
+ """Details about a role that can be assigned through the public Roles API."""
+
+ id: str
+ """Identifier for the role."""
+
+ description: Optional[str] = None
+ """Optional description of the role."""
+
+ name: str
+ """Unique name for the role."""
+
+ object: Literal["role"]
+ """Always `role`."""
+
+ permissions: List[str]
+ """Permissions granted by the role."""
+
+ predefined_role: bool
+ """Whether the role is predefined and managed by OpenAI."""
+
+ resource_type: str
+ """
+ Resource type the role is bound to (for example `api.organization` or
+ `api.project`).
+ """
diff --git a/src/openai/types/admin/organization/role_create_params.py b/src/openai/types/admin/organization/role_create_params.py
new file mode 100644
index 0000000000..60aaeb7383
--- /dev/null
+++ b/src/openai/types/admin/organization/role_create_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["RoleCreateParams"]
+
+
+class RoleCreateParams(TypedDict, total=False):
+ permissions: Required[SequenceNotStr[str]]
+ """Permissions to grant to the role."""
+
+ role_name: Required[str]
+ """Unique name for the role."""
+
+ description: Optional[str]
+ """Optional description of the role."""
diff --git a/src/openai/types/admin/organization/role_delete_response.py b/src/openai/types/admin/organization/role_delete_response.py
new file mode 100644
index 0000000000..934140d363
--- /dev/null
+++ b/src/openai/types/admin/organization/role_delete_response.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["RoleDeleteResponse"]
+
+
+class RoleDeleteResponse(BaseModel):
+ """Confirmation payload returned after deleting a role."""
+
+ id: str
+ """Identifier of the deleted role."""
+
+ deleted: bool
+ """Whether the role was deleted."""
+
+ object: Literal["role.deleted"]
+ """Always `role.deleted`."""
diff --git a/src/openai/types/admin/organization/role_list_params.py b/src/openai/types/admin/organization/role_list_params.py
new file mode 100644
index 0000000000..88e957f886
--- /dev/null
+++ b/src/openai/types/admin/organization/role_list_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["RoleListParams"]
+
+
+class RoleListParams(TypedDict, total=False):
+ after: str
+ """Cursor for pagination.
+
+ Provide the value from the previous response's `next` field to continue listing
+ roles.
+ """
+
+ limit: int
+ """A limit on the number of roles to return. Defaults to 1000."""
+
+ order: Literal["asc", "desc"]
+ """Sort order for the returned roles."""
diff --git a/src/openai/types/admin/organization/role_update_params.py b/src/openai/types/admin/organization/role_update_params.py
new file mode 100644
index 0000000000..955ca170a8
--- /dev/null
+++ b/src/openai/types/admin/organization/role_update_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["RoleUpdateParams"]
+
+
+class RoleUpdateParams(TypedDict, total=False):
+ description: Optional[str]
+ """New description for the role."""
+
+ permissions: Optional[SequenceNotStr[str]]
+ """Updated set of permissions for the role."""
+
+ role_name: Optional[str]
+ """New name for the role."""
diff --git a/src/openai/types/admin/organization/usage_audio_speeches_params.py b/src/openai/types/admin/organization/usage_audio_speeches_params.py
new file mode 100644
index 0000000000..5d3eff286c
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_audio_speeches_params.py
@@ -0,0 +1,57 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["UsageAudioSpeechesParams"]
+
+
+class UsageAudioSpeechesParams(TypedDict, total=False):
+ start_time: Required[int]
+ """Start time (Unix seconds) of the query time range, inclusive."""
+
+ api_key_ids: SequenceNotStr[str]
+ """Return only usage for these API keys."""
+
+ bucket_width: Literal["1m", "1h", "1d"]
+ """Width of each time bucket in response.
+
+ Currently `1m`, `1h` and `1d` are supported, default to `1d`.
+ """
+
+ end_time: int
+ """End time (Unix seconds) of the query time range, exclusive."""
+
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]]
+ """Group the usage data by the specified fields.
+
+ Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any
+ combination of them.
+ """
+
+ limit: int
+ """Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+ """
+
+ models: SequenceNotStr[str]
+ """Return only usage for these models."""
+
+ page: str
+ """A cursor for use in pagination.
+
+ Corresponding to the `next_page` field from the previous response.
+ """
+
+ project_ids: SequenceNotStr[str]
+ """Return only usage for these projects."""
+
+ user_ids: SequenceNotStr[str]
+ """Return only usage for these users."""
diff --git a/src/openai/types/admin/organization/usage_audio_speeches_response.py b/src/openai/types/admin/organization/usage_audio_speeches_response.py
new file mode 100644
index 0000000000..90e17c89b0
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_audio_speeches_response.py
@@ -0,0 +1,390 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+
+__all__ = [
+ "UsageAudioSpeechesResponse",
+ "Data",
+ "DataResult",
+ "DataResultOrganizationUsageCompletionsResult",
+ "DataResultOrganizationUsageEmbeddingsResult",
+ "DataResultOrganizationUsageModerationsResult",
+ "DataResultOrganizationUsageImagesResult",
+ "DataResultOrganizationUsageAudioSpeechesResult",
+ "DataResultOrganizationUsageAudioTranscriptionsResult",
+ "DataResultOrganizationUsageVectorStoresResult",
+ "DataResultOrganizationUsageCodeInterpreterSessionsResult",
+ "DataResultOrganizationCostsResult",
+ "DataResultOrganizationCostsResultAmount",
+]
+
+
+class DataResultOrganizationUsageCompletionsResult(BaseModel):
+ """The aggregated completions usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of text input tokens used, including cached tokens.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.completions.result"]
+
+ output_tokens: int
+ """The aggregated number of text output tokens used.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ batch: Optional[bool] = None
+ """
+ When `group_by=batch`, this field tells whether the grouped usage result is
+ batch or not.
+ """
+
+ input_audio_tokens: Optional[int] = None
+ """The aggregated number of audio input tokens used, including cached tokens."""
+
+ input_cached_tokens: Optional[int] = None
+ """
+ The aggregated number of text input tokens that has been cached from previous
+ requests. For customers subscribe to scale tier, this includes scale tier
+ tokens.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ output_audio_tokens: Optional[int] = None
+ """The aggregated number of audio output tokens used."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ service_tier: Optional[str] = None
+ """
+ When `group_by=service_tier`, this field provides the service tier of the
+ grouped usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageEmbeddingsResult(BaseModel):
+ """The aggregated embeddings usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.embeddings.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageModerationsResult(BaseModel):
+ """The aggregated moderations usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.moderations.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageImagesResult(BaseModel):
+ """The aggregated images usage details of the specific time bucket."""
+
+ images: int
+ """The number of images processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.images.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ size: Optional[str] = None
+ """
+ When `group_by=size`, this field provides the image size of the grouped usage
+ result.
+ """
+
+ source: Optional[str] = None
+ """
+ When `group_by=source`, this field provides the source of the grouped usage
+ result, possible values are `image.generation`, `image.edit`, `image.variation`.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioSpeechesResult(BaseModel):
+ """The aggregated audio speeches usage details of the specific time bucket."""
+
+ characters: int
+ """The number of characters processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_speeches.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioTranscriptionsResult(BaseModel):
+ """The aggregated audio transcriptions usage details of the specific time bucket."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_transcriptions.result"]
+
+ seconds: int
+ """The number of seconds processed."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageVectorStoresResult(BaseModel):
+ """The aggregated vector stores usage details of the specific time bucket."""
+
+ object: Literal["organization.usage.vector_stores.result"]
+
+ usage_bytes: int
+ """The vector stores usage in bytes."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationUsageCodeInterpreterSessionsResult(BaseModel):
+ """
+ The aggregated code interpreter sessions usage details of the specific time bucket.
+ """
+
+ num_sessions: int
+ """The number of code interpreter sessions."""
+
+ object: Literal["organization.usage.code_interpreter_sessions.result"]
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationCostsResultAmount(BaseModel):
+ """The monetary value in its associated currency."""
+
+ currency: Optional[str] = None
+ """Lowercase ISO-4217 currency e.g. "usd" """
+
+ value: Optional[float] = None
+ """The numeric value of the cost."""
+
+
+class DataResultOrganizationCostsResult(BaseModel):
+ """The aggregated costs details of the specific time bucket."""
+
+ object: Literal["organization.costs.result"]
+
+ amount: Optional[DataResultOrganizationCostsResultAmount] = None
+ """The monetary value in its associated currency."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API Key ID of the grouped
+ costs result.
+ """
+
+ line_item: Optional[str] = None
+ """
+ When `group_by=line_item`, this field provides the line item of the grouped
+ costs result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ costs result.
+ """
+
+
+DataResult: TypeAlias = Annotated[
+ Union[
+ DataResultOrganizationUsageCompletionsResult,
+ DataResultOrganizationUsageEmbeddingsResult,
+ DataResultOrganizationUsageModerationsResult,
+ DataResultOrganizationUsageImagesResult,
+ DataResultOrganizationUsageAudioSpeechesResult,
+ DataResultOrganizationUsageAudioTranscriptionsResult,
+ DataResultOrganizationUsageVectorStoresResult,
+ DataResultOrganizationUsageCodeInterpreterSessionsResult,
+ DataResultOrganizationCostsResult,
+ ],
+ PropertyInfo(discriminator="object"),
+]
+
+
+class Data(BaseModel):
+ end_time: int
+
+ object: Literal["bucket"]
+
+ results: List[DataResult]
+
+ start_time: int
+
+
+class UsageAudioSpeechesResponse(BaseModel):
+ data: List[Data]
+
+ has_more: bool
+
+ next_page: Optional[str] = None
+
+ object: Literal["page"]
diff --git a/src/openai/types/admin/organization/usage_audio_transcriptions_params.py b/src/openai/types/admin/organization/usage_audio_transcriptions_params.py
new file mode 100644
index 0000000000..ca5cd13c78
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_audio_transcriptions_params.py
@@ -0,0 +1,57 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["UsageAudioTranscriptionsParams"]
+
+
+class UsageAudioTranscriptionsParams(TypedDict, total=False):
+ start_time: Required[int]
+ """Start time (Unix seconds) of the query time range, inclusive."""
+
+ api_key_ids: SequenceNotStr[str]
+ """Return only usage for these API keys."""
+
+ bucket_width: Literal["1m", "1h", "1d"]
+ """Width of each time bucket in response.
+
+ Currently `1m`, `1h` and `1d` are supported, default to `1d`.
+ """
+
+ end_time: int
+ """End time (Unix seconds) of the query time range, exclusive."""
+
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]]
+ """Group the usage data by the specified fields.
+
+ Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any
+ combination of them.
+ """
+
+ limit: int
+ """Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+ """
+
+ models: SequenceNotStr[str]
+ """Return only usage for these models."""
+
+ page: str
+ """A cursor for use in pagination.
+
+ Corresponding to the `next_page` field from the previous response.
+ """
+
+ project_ids: SequenceNotStr[str]
+ """Return only usage for these projects."""
+
+ user_ids: SequenceNotStr[str]
+ """Return only usage for these users."""
diff --git a/src/openai/types/admin/organization/usage_audio_transcriptions_response.py b/src/openai/types/admin/organization/usage_audio_transcriptions_response.py
new file mode 100644
index 0000000000..abd7c3fb90
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_audio_transcriptions_response.py
@@ -0,0 +1,390 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+
+__all__ = [
+ "UsageAudioTranscriptionsResponse",
+ "Data",
+ "DataResult",
+ "DataResultOrganizationUsageCompletionsResult",
+ "DataResultOrganizationUsageEmbeddingsResult",
+ "DataResultOrganizationUsageModerationsResult",
+ "DataResultOrganizationUsageImagesResult",
+ "DataResultOrganizationUsageAudioSpeechesResult",
+ "DataResultOrganizationUsageAudioTranscriptionsResult",
+ "DataResultOrganizationUsageVectorStoresResult",
+ "DataResultOrganizationUsageCodeInterpreterSessionsResult",
+ "DataResultOrganizationCostsResult",
+ "DataResultOrganizationCostsResultAmount",
+]
+
+
+class DataResultOrganizationUsageCompletionsResult(BaseModel):
+ """The aggregated completions usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of text input tokens used, including cached tokens.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.completions.result"]
+
+ output_tokens: int
+ """The aggregated number of text output tokens used.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ batch: Optional[bool] = None
+ """
+ When `group_by=batch`, this field tells whether the grouped usage result is
+ batch or not.
+ """
+
+ input_audio_tokens: Optional[int] = None
+ """The aggregated number of audio input tokens used, including cached tokens."""
+
+ input_cached_tokens: Optional[int] = None
+ """
+ The aggregated number of text input tokens that has been cached from previous
+ requests. For customers subscribe to scale tier, this includes scale tier
+ tokens.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ output_audio_tokens: Optional[int] = None
+ """The aggregated number of audio output tokens used."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ service_tier: Optional[str] = None
+ """
+ When `group_by=service_tier`, this field provides the service tier of the
+ grouped usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageEmbeddingsResult(BaseModel):
+ """The aggregated embeddings usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.embeddings.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageModerationsResult(BaseModel):
+ """The aggregated moderations usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.moderations.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageImagesResult(BaseModel):
+ """The aggregated images usage details of the specific time bucket."""
+
+ images: int
+ """The number of images processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.images.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ size: Optional[str] = None
+ """
+ When `group_by=size`, this field provides the image size of the grouped usage
+ result.
+ """
+
+ source: Optional[str] = None
+ """
+ When `group_by=source`, this field provides the source of the grouped usage
+ result, possible values are `image.generation`, `image.edit`, `image.variation`.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioSpeechesResult(BaseModel):
+ """The aggregated audio speeches usage details of the specific time bucket."""
+
+ characters: int
+ """The number of characters processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_speeches.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioTranscriptionsResult(BaseModel):
+ """The aggregated audio transcriptions usage details of the specific time bucket."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_transcriptions.result"]
+
+ seconds: int
+ """The number of seconds processed."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageVectorStoresResult(BaseModel):
+ """The aggregated vector stores usage details of the specific time bucket."""
+
+ object: Literal["organization.usage.vector_stores.result"]
+
+ usage_bytes: int
+ """The vector stores usage in bytes."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationUsageCodeInterpreterSessionsResult(BaseModel):
+ """
+ The aggregated code interpreter sessions usage details of the specific time bucket.
+ """
+
+ num_sessions: int
+ """The number of code interpreter sessions."""
+
+ object: Literal["organization.usage.code_interpreter_sessions.result"]
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationCostsResultAmount(BaseModel):
+ """The monetary value in its associated currency."""
+
+ currency: Optional[str] = None
+ """Lowercase ISO-4217 currency e.g. "usd" """
+
+ value: Optional[float] = None
+ """The numeric value of the cost."""
+
+
+class DataResultOrganizationCostsResult(BaseModel):
+ """The aggregated costs details of the specific time bucket."""
+
+ object: Literal["organization.costs.result"]
+
+ amount: Optional[DataResultOrganizationCostsResultAmount] = None
+ """The monetary value in its associated currency."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API Key ID of the grouped
+ costs result.
+ """
+
+ line_item: Optional[str] = None
+ """
+ When `group_by=line_item`, this field provides the line item of the grouped
+ costs result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ costs result.
+ """
+
+
+DataResult: TypeAlias = Annotated[
+ Union[
+ DataResultOrganizationUsageCompletionsResult,
+ DataResultOrganizationUsageEmbeddingsResult,
+ DataResultOrganizationUsageModerationsResult,
+ DataResultOrganizationUsageImagesResult,
+ DataResultOrganizationUsageAudioSpeechesResult,
+ DataResultOrganizationUsageAudioTranscriptionsResult,
+ DataResultOrganizationUsageVectorStoresResult,
+ DataResultOrganizationUsageCodeInterpreterSessionsResult,
+ DataResultOrganizationCostsResult,
+ ],
+ PropertyInfo(discriminator="object"),
+]
+
+
+class Data(BaseModel):
+ end_time: int
+
+ object: Literal["bucket"]
+
+ results: List[DataResult]
+
+ start_time: int
+
+
+class UsageAudioTranscriptionsResponse(BaseModel):
+ data: List[Data]
+
+ has_more: bool
+
+ next_page: Optional[str] = None
+
+ object: Literal["page"]
diff --git a/src/openai/types/admin/organization/usage_code_interpreter_sessions_params.py b/src/openai/types/admin/organization/usage_code_interpreter_sessions_params.py
new file mode 100644
index 0000000000..dd4b59c2ab
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_code_interpreter_sessions_params.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["UsageCodeInterpreterSessionsParams"]
+
+
+class UsageCodeInterpreterSessionsParams(TypedDict, total=False):
+ start_time: Required[int]
+ """Start time (Unix seconds) of the query time range, inclusive."""
+
+ bucket_width: Literal["1m", "1h", "1d"]
+ """Width of each time bucket in response.
+
+ Currently `1m`, `1h` and `1d` are supported, default to `1d`.
+ """
+
+ end_time: int
+ """End time (Unix seconds) of the query time range, exclusive."""
+
+ group_by: List[Literal["project_id"]]
+ """Group the usage data by the specified fields.
+
+ Support fields include `project_id`.
+ """
+
+ limit: int
+ """Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+ """
+
+ page: str
+ """A cursor for use in pagination.
+
+ Corresponding to the `next_page` field from the previous response.
+ """
+
+ project_ids: SequenceNotStr[str]
+ """Return only usage for these projects."""
diff --git a/src/openai/types/admin/organization/usage_code_interpreter_sessions_response.py b/src/openai/types/admin/organization/usage_code_interpreter_sessions_response.py
new file mode 100644
index 0000000000..0cd6c2693c
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_code_interpreter_sessions_response.py
@@ -0,0 +1,390 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+
+__all__ = [
+ "UsageCodeInterpreterSessionsResponse",
+ "Data",
+ "DataResult",
+ "DataResultOrganizationUsageCompletionsResult",
+ "DataResultOrganizationUsageEmbeddingsResult",
+ "DataResultOrganizationUsageModerationsResult",
+ "DataResultOrganizationUsageImagesResult",
+ "DataResultOrganizationUsageAudioSpeechesResult",
+ "DataResultOrganizationUsageAudioTranscriptionsResult",
+ "DataResultOrganizationUsageVectorStoresResult",
+ "DataResultOrganizationUsageCodeInterpreterSessionsResult",
+ "DataResultOrganizationCostsResult",
+ "DataResultOrganizationCostsResultAmount",
+]
+
+
+class DataResultOrganizationUsageCompletionsResult(BaseModel):
+ """The aggregated completions usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of text input tokens used, including cached tokens.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.completions.result"]
+
+ output_tokens: int
+ """The aggregated number of text output tokens used.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ batch: Optional[bool] = None
+ """
+ When `group_by=batch`, this field tells whether the grouped usage result is
+ batch or not.
+ """
+
+ input_audio_tokens: Optional[int] = None
+ """The aggregated number of audio input tokens used, including cached tokens."""
+
+ input_cached_tokens: Optional[int] = None
+ """
+ The aggregated number of text input tokens that has been cached from previous
+ requests. For customers subscribe to scale tier, this includes scale tier
+ tokens.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ output_audio_tokens: Optional[int] = None
+ """The aggregated number of audio output tokens used."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ service_tier: Optional[str] = None
+ """
+ When `group_by=service_tier`, this field provides the service tier of the
+ grouped usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageEmbeddingsResult(BaseModel):
+ """The aggregated embeddings usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.embeddings.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageModerationsResult(BaseModel):
+ """The aggregated moderations usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.moderations.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageImagesResult(BaseModel):
+ """The aggregated images usage details of the specific time bucket."""
+
+ images: int
+ """The number of images processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.images.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ size: Optional[str] = None
+ """
+ When `group_by=size`, this field provides the image size of the grouped usage
+ result.
+ """
+
+ source: Optional[str] = None
+ """
+ When `group_by=source`, this field provides the source of the grouped usage
+ result, possible values are `image.generation`, `image.edit`, `image.variation`.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioSpeechesResult(BaseModel):
+ """The aggregated audio speeches usage details of the specific time bucket."""
+
+ characters: int
+ """The number of characters processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_speeches.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioTranscriptionsResult(BaseModel):
+ """The aggregated audio transcriptions usage details of the specific time bucket."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_transcriptions.result"]
+
+ seconds: int
+ """The number of seconds processed."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageVectorStoresResult(BaseModel):
+ """The aggregated vector stores usage details of the specific time bucket."""
+
+ object: Literal["organization.usage.vector_stores.result"]
+
+ usage_bytes: int
+ """The vector stores usage in bytes."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationUsageCodeInterpreterSessionsResult(BaseModel):
+ """
+ The aggregated code interpreter sessions usage details of the specific time bucket.
+ """
+
+ num_sessions: int
+ """The number of code interpreter sessions."""
+
+ object: Literal["organization.usage.code_interpreter_sessions.result"]
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationCostsResultAmount(BaseModel):
+ """The monetary value in its associated currency."""
+
+ currency: Optional[str] = None
+ """Lowercase ISO-4217 currency e.g. "usd" """
+
+ value: Optional[float] = None
+ """The numeric value of the cost."""
+
+
+class DataResultOrganizationCostsResult(BaseModel):
+ """The aggregated costs details of the specific time bucket."""
+
+ object: Literal["organization.costs.result"]
+
+ amount: Optional[DataResultOrganizationCostsResultAmount] = None
+ """The monetary value in its associated currency."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API Key ID of the grouped
+ costs result.
+ """
+
+ line_item: Optional[str] = None
+ """
+ When `group_by=line_item`, this field provides the line item of the grouped
+ costs result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ costs result.
+ """
+
+
+DataResult: TypeAlias = Annotated[
+ Union[
+ DataResultOrganizationUsageCompletionsResult,
+ DataResultOrganizationUsageEmbeddingsResult,
+ DataResultOrganizationUsageModerationsResult,
+ DataResultOrganizationUsageImagesResult,
+ DataResultOrganizationUsageAudioSpeechesResult,
+ DataResultOrganizationUsageAudioTranscriptionsResult,
+ DataResultOrganizationUsageVectorStoresResult,
+ DataResultOrganizationUsageCodeInterpreterSessionsResult,
+ DataResultOrganizationCostsResult,
+ ],
+ PropertyInfo(discriminator="object"),
+]
+
+
+class Data(BaseModel):
+ end_time: int
+
+ object: Literal["bucket"]
+
+ results: List[DataResult]
+
+ start_time: int
+
+
+class UsageCodeInterpreterSessionsResponse(BaseModel):
+ data: List[Data]
+
+ has_more: bool
+
+ next_page: Optional[str] = None
+
+ object: Literal["page"]
diff --git a/src/openai/types/admin/organization/usage_completions_params.py b/src/openai/types/admin/organization/usage_completions_params.py
new file mode 100644
index 0000000000..e7dbe2187e
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_completions_params.py
@@ -0,0 +1,63 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["UsageCompletionsParams"]
+
+
+class UsageCompletionsParams(TypedDict, total=False):
+ start_time: Required[int]
+ """Start time (Unix seconds) of the query time range, inclusive."""
+
+ api_key_ids: SequenceNotStr[str]
+ """Return only usage for these API keys."""
+
+ batch: bool
+ """If `true`, return batch jobs only.
+
+ If `false`, return non-batch jobs only. By default, return both.
+ """
+
+ bucket_width: Literal["1m", "1h", "1d"]
+ """Width of each time bucket in response.
+
+ Currently `1m`, `1h` and `1d` are supported, default to `1d`.
+ """
+
+ end_time: int
+ """End time (Unix seconds) of the query time range, exclusive."""
+
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "batch", "service_tier"]]
+ """Group the usage data by the specified fields.
+
+ Support fields include `project_id`, `user_id`, `api_key_id`, `model`, `batch`,
+ `service_tier` or any combination of them.
+ """
+
+ limit: int
+ """Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+ """
+
+ models: SequenceNotStr[str]
+ """Return only usage for these models."""
+
+ page: str
+ """A cursor for use in pagination.
+
+ Corresponding to the `next_page` field from the previous response.
+ """
+
+ project_ids: SequenceNotStr[str]
+ """Return only usage for these projects."""
+
+ user_ids: SequenceNotStr[str]
+ """Return only usage for these users."""
diff --git a/src/openai/types/admin/organization/usage_completions_response.py b/src/openai/types/admin/organization/usage_completions_response.py
new file mode 100644
index 0000000000..d37634bca5
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_completions_response.py
@@ -0,0 +1,390 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+
+__all__ = [
+ "UsageCompletionsResponse",
+ "Data",
+ "DataResult",
+ "DataResultOrganizationUsageCompletionsResult",
+ "DataResultOrganizationUsageEmbeddingsResult",
+ "DataResultOrganizationUsageModerationsResult",
+ "DataResultOrganizationUsageImagesResult",
+ "DataResultOrganizationUsageAudioSpeechesResult",
+ "DataResultOrganizationUsageAudioTranscriptionsResult",
+ "DataResultOrganizationUsageVectorStoresResult",
+ "DataResultOrganizationUsageCodeInterpreterSessionsResult",
+ "DataResultOrganizationCostsResult",
+ "DataResultOrganizationCostsResultAmount",
+]
+
+
+class DataResultOrganizationUsageCompletionsResult(BaseModel):
+ """The aggregated completions usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of text input tokens used, including cached tokens.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.completions.result"]
+
+ output_tokens: int
+ """The aggregated number of text output tokens used.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ batch: Optional[bool] = None
+ """
+ When `group_by=batch`, this field tells whether the grouped usage result is
+ batch or not.
+ """
+
+ input_audio_tokens: Optional[int] = None
+ """The aggregated number of audio input tokens used, including cached tokens."""
+
+ input_cached_tokens: Optional[int] = None
+ """
+ The aggregated number of text input tokens that has been cached from previous
+ requests. For customers subscribe to scale tier, this includes scale tier
+ tokens.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ output_audio_tokens: Optional[int] = None
+ """The aggregated number of audio output tokens used."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ service_tier: Optional[str] = None
+ """
+ When `group_by=service_tier`, this field provides the service tier of the
+ grouped usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageEmbeddingsResult(BaseModel):
+ """The aggregated embeddings usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.embeddings.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageModerationsResult(BaseModel):
+ """The aggregated moderations usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.moderations.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageImagesResult(BaseModel):
+ """The aggregated images usage details of the specific time bucket."""
+
+ images: int
+ """The number of images processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.images.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ size: Optional[str] = None
+ """
+ When `group_by=size`, this field provides the image size of the grouped usage
+ result.
+ """
+
+ source: Optional[str] = None
+ """
+ When `group_by=source`, this field provides the source of the grouped usage
+ result, possible values are `image.generation`, `image.edit`, `image.variation`.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioSpeechesResult(BaseModel):
+ """The aggregated audio speeches usage details of the specific time bucket."""
+
+ characters: int
+ """The number of characters processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_speeches.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioTranscriptionsResult(BaseModel):
+ """The aggregated audio transcriptions usage details of the specific time bucket."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_transcriptions.result"]
+
+ seconds: int
+ """The number of seconds processed."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageVectorStoresResult(BaseModel):
+ """The aggregated vector stores usage details of the specific time bucket."""
+
+ object: Literal["organization.usage.vector_stores.result"]
+
+ usage_bytes: int
+ """The vector stores usage in bytes."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationUsageCodeInterpreterSessionsResult(BaseModel):
+ """
+ The aggregated code interpreter sessions usage details of the specific time bucket.
+ """
+
+ num_sessions: int
+ """The number of code interpreter sessions."""
+
+ object: Literal["organization.usage.code_interpreter_sessions.result"]
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationCostsResultAmount(BaseModel):
+ """The monetary value in its associated currency."""
+
+ currency: Optional[str] = None
+ """Lowercase ISO-4217 currency e.g. "usd" """
+
+ value: Optional[float] = None
+ """The numeric value of the cost."""
+
+
+class DataResultOrganizationCostsResult(BaseModel):
+ """The aggregated costs details of the specific time bucket."""
+
+ object: Literal["organization.costs.result"]
+
+ amount: Optional[DataResultOrganizationCostsResultAmount] = None
+ """The monetary value in its associated currency."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API Key ID of the grouped
+ costs result.
+ """
+
+ line_item: Optional[str] = None
+ """
+ When `group_by=line_item`, this field provides the line item of the grouped
+ costs result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ costs result.
+ """
+
+
+DataResult: TypeAlias = Annotated[
+ Union[
+ DataResultOrganizationUsageCompletionsResult,
+ DataResultOrganizationUsageEmbeddingsResult,
+ DataResultOrganizationUsageModerationsResult,
+ DataResultOrganizationUsageImagesResult,
+ DataResultOrganizationUsageAudioSpeechesResult,
+ DataResultOrganizationUsageAudioTranscriptionsResult,
+ DataResultOrganizationUsageVectorStoresResult,
+ DataResultOrganizationUsageCodeInterpreterSessionsResult,
+ DataResultOrganizationCostsResult,
+ ],
+ PropertyInfo(discriminator="object"),
+]
+
+
+class Data(BaseModel):
+ end_time: int
+
+ object: Literal["bucket"]
+
+ results: List[DataResult]
+
+ start_time: int
+
+
+class UsageCompletionsResponse(BaseModel):
+ data: List[Data]
+
+ has_more: bool
+
+ next_page: Optional[str] = None
+
+ object: Literal["page"]
diff --git a/src/openai/types/admin/organization/usage_costs_params.py b/src/openai/types/admin/organization/usage_costs_params.py
new file mode 100644
index 0000000000..e848643339
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_costs_params.py
@@ -0,0 +1,49 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["UsageCostsParams"]
+
+
+class UsageCostsParams(TypedDict, total=False):
+ start_time: Required[int]
+ """Start time (Unix seconds) of the query time range, inclusive."""
+
+ api_key_ids: SequenceNotStr[str]
+ """Return only costs for these API keys."""
+
+ bucket_width: Literal["1d"]
+ """Width of each time bucket in response.
+
+ Currently only `1d` is supported, default to `1d`.
+ """
+
+ end_time: int
+ """End time (Unix seconds) of the query time range, exclusive."""
+
+ group_by: List[Literal["project_id", "line_item", "api_key_id"]]
+ """Group the costs by the specified fields.
+
+ Support fields include `project_id`, `line_item`, `api_key_id` and any
+ combination of them.
+ """
+
+ limit: int
+ """A limit on the number of buckets to be returned.
+
+ Limit can range between 1 and 180, and the default is 7.
+ """
+
+ page: str
+ """A cursor for use in pagination.
+
+ Corresponding to the `next_page` field from the previous response.
+ """
+
+ project_ids: SequenceNotStr[str]
+ """Return only costs for these projects."""
diff --git a/src/openai/types/admin/organization/usage_costs_response.py b/src/openai/types/admin/organization/usage_costs_response.py
new file mode 100644
index 0000000000..68c1f639c1
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_costs_response.py
@@ -0,0 +1,390 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+
+__all__ = [
+ "UsageCostsResponse",
+ "Data",
+ "DataResult",
+ "DataResultOrganizationUsageCompletionsResult",
+ "DataResultOrganizationUsageEmbeddingsResult",
+ "DataResultOrganizationUsageModerationsResult",
+ "DataResultOrganizationUsageImagesResult",
+ "DataResultOrganizationUsageAudioSpeechesResult",
+ "DataResultOrganizationUsageAudioTranscriptionsResult",
+ "DataResultOrganizationUsageVectorStoresResult",
+ "DataResultOrganizationUsageCodeInterpreterSessionsResult",
+ "DataResultOrganizationCostsResult",
+ "DataResultOrganizationCostsResultAmount",
+]
+
+
+class DataResultOrganizationUsageCompletionsResult(BaseModel):
+ """The aggregated completions usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of text input tokens used, including cached tokens.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.completions.result"]
+
+ output_tokens: int
+ """The aggregated number of text output tokens used.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ batch: Optional[bool] = None
+ """
+ When `group_by=batch`, this field tells whether the grouped usage result is
+ batch or not.
+ """
+
+ input_audio_tokens: Optional[int] = None
+ """The aggregated number of audio input tokens used, including cached tokens."""
+
+ input_cached_tokens: Optional[int] = None
+ """
+ The aggregated number of text input tokens that has been cached from previous
+ requests. For customers subscribe to scale tier, this includes scale tier
+ tokens.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ output_audio_tokens: Optional[int] = None
+ """The aggregated number of audio output tokens used."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ service_tier: Optional[str] = None
+ """
+ When `group_by=service_tier`, this field provides the service tier of the
+ grouped usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageEmbeddingsResult(BaseModel):
+ """The aggregated embeddings usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.embeddings.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageModerationsResult(BaseModel):
+ """The aggregated moderations usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.moderations.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageImagesResult(BaseModel):
+ """The aggregated images usage details of the specific time bucket."""
+
+ images: int
+ """The number of images processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.images.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ size: Optional[str] = None
+ """
+ When `group_by=size`, this field provides the image size of the grouped usage
+ result.
+ """
+
+ source: Optional[str] = None
+ """
+ When `group_by=source`, this field provides the source of the grouped usage
+ result, possible values are `image.generation`, `image.edit`, `image.variation`.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioSpeechesResult(BaseModel):
+ """The aggregated audio speeches usage details of the specific time bucket."""
+
+ characters: int
+ """The number of characters processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_speeches.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioTranscriptionsResult(BaseModel):
+ """The aggregated audio transcriptions usage details of the specific time bucket."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_transcriptions.result"]
+
+ seconds: int
+ """The number of seconds processed."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageVectorStoresResult(BaseModel):
+ """The aggregated vector stores usage details of the specific time bucket."""
+
+ object: Literal["organization.usage.vector_stores.result"]
+
+ usage_bytes: int
+ """The vector stores usage in bytes."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationUsageCodeInterpreterSessionsResult(BaseModel):
+ """
+ The aggregated code interpreter sessions usage details of the specific time bucket.
+ """
+
+ num_sessions: int
+ """The number of code interpreter sessions."""
+
+ object: Literal["organization.usage.code_interpreter_sessions.result"]
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationCostsResultAmount(BaseModel):
+ """The monetary value in its associated currency."""
+
+ currency: Optional[str] = None
+ """Lowercase ISO-4217 currency e.g. "usd" """
+
+ value: Optional[float] = None
+ """The numeric value of the cost."""
+
+
+class DataResultOrganizationCostsResult(BaseModel):
+ """The aggregated costs details of the specific time bucket."""
+
+ object: Literal["organization.costs.result"]
+
+ amount: Optional[DataResultOrganizationCostsResultAmount] = None
+ """The monetary value in its associated currency."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API Key ID of the grouped
+ costs result.
+ """
+
+ line_item: Optional[str] = None
+ """
+ When `group_by=line_item`, this field provides the line item of the grouped
+ costs result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ costs result.
+ """
+
+
+DataResult: TypeAlias = Annotated[
+ Union[
+ DataResultOrganizationUsageCompletionsResult,
+ DataResultOrganizationUsageEmbeddingsResult,
+ DataResultOrganizationUsageModerationsResult,
+ DataResultOrganizationUsageImagesResult,
+ DataResultOrganizationUsageAudioSpeechesResult,
+ DataResultOrganizationUsageAudioTranscriptionsResult,
+ DataResultOrganizationUsageVectorStoresResult,
+ DataResultOrganizationUsageCodeInterpreterSessionsResult,
+ DataResultOrganizationCostsResult,
+ ],
+ PropertyInfo(discriminator="object"),
+]
+
+
+class Data(BaseModel):
+ end_time: int
+
+ object: Literal["bucket"]
+
+ results: List[DataResult]
+
+ start_time: int
+
+
+class UsageCostsResponse(BaseModel):
+ data: List[Data]
+
+ has_more: bool
+
+ next_page: Optional[str] = None
+
+ object: Literal["page"]
diff --git a/src/openai/types/admin/organization/usage_embeddings_params.py b/src/openai/types/admin/organization/usage_embeddings_params.py
new file mode 100644
index 0000000000..56c9107b51
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_embeddings_params.py
@@ -0,0 +1,57 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["UsageEmbeddingsParams"]
+
+
+class UsageEmbeddingsParams(TypedDict, total=False):
+ start_time: Required[int]
+ """Start time (Unix seconds) of the query time range, inclusive."""
+
+ api_key_ids: SequenceNotStr[str]
+ """Return only usage for these API keys."""
+
+ bucket_width: Literal["1m", "1h", "1d"]
+ """Width of each time bucket in response.
+
+ Currently `1m`, `1h` and `1d` are supported, default to `1d`.
+ """
+
+ end_time: int
+ """End time (Unix seconds) of the query time range, exclusive."""
+
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]]
+ """Group the usage data by the specified fields.
+
+ Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any
+ combination of them.
+ """
+
+ limit: int
+ """Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+ """
+
+ models: SequenceNotStr[str]
+ """Return only usage for these models."""
+
+ page: str
+ """A cursor for use in pagination.
+
+ Corresponding to the `next_page` field from the previous response.
+ """
+
+ project_ids: SequenceNotStr[str]
+ """Return only usage for these projects."""
+
+ user_ids: SequenceNotStr[str]
+ """Return only usage for these users."""
diff --git a/src/openai/types/admin/organization/usage_embeddings_response.py b/src/openai/types/admin/organization/usage_embeddings_response.py
new file mode 100644
index 0000000000..905c8f5c6e
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_embeddings_response.py
@@ -0,0 +1,390 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+
+__all__ = [
+ "UsageEmbeddingsResponse",
+ "Data",
+ "DataResult",
+ "DataResultOrganizationUsageCompletionsResult",
+ "DataResultOrganizationUsageEmbeddingsResult",
+ "DataResultOrganizationUsageModerationsResult",
+ "DataResultOrganizationUsageImagesResult",
+ "DataResultOrganizationUsageAudioSpeechesResult",
+ "DataResultOrganizationUsageAudioTranscriptionsResult",
+ "DataResultOrganizationUsageVectorStoresResult",
+ "DataResultOrganizationUsageCodeInterpreterSessionsResult",
+ "DataResultOrganizationCostsResult",
+ "DataResultOrganizationCostsResultAmount",
+]
+
+
+class DataResultOrganizationUsageCompletionsResult(BaseModel):
+ """The aggregated completions usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of text input tokens used, including cached tokens.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.completions.result"]
+
+ output_tokens: int
+ """The aggregated number of text output tokens used.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ batch: Optional[bool] = None
+ """
+ When `group_by=batch`, this field tells whether the grouped usage result is
+ batch or not.
+ """
+
+ input_audio_tokens: Optional[int] = None
+ """The aggregated number of audio input tokens used, including cached tokens."""
+
+ input_cached_tokens: Optional[int] = None
+ """
+ The aggregated number of text input tokens that has been cached from previous
+ requests. For customers subscribe to scale tier, this includes scale tier
+ tokens.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ output_audio_tokens: Optional[int] = None
+ """The aggregated number of audio output tokens used."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ service_tier: Optional[str] = None
+ """
+ When `group_by=service_tier`, this field provides the service tier of the
+ grouped usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageEmbeddingsResult(BaseModel):
+ """The aggregated embeddings usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.embeddings.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageModerationsResult(BaseModel):
+ """The aggregated moderations usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.moderations.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageImagesResult(BaseModel):
+ """The aggregated images usage details of the specific time bucket."""
+
+ images: int
+ """The number of images processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.images.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ size: Optional[str] = None
+ """
+ When `group_by=size`, this field provides the image size of the grouped usage
+ result.
+ """
+
+ source: Optional[str] = None
+ """
+ When `group_by=source`, this field provides the source of the grouped usage
+ result, possible values are `image.generation`, `image.edit`, `image.variation`.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioSpeechesResult(BaseModel):
+ """The aggregated audio speeches usage details of the specific time bucket."""
+
+ characters: int
+ """The number of characters processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_speeches.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioTranscriptionsResult(BaseModel):
+ """The aggregated audio transcriptions usage details of the specific time bucket."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_transcriptions.result"]
+
+ seconds: int
+ """The number of seconds processed."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageVectorStoresResult(BaseModel):
+ """The aggregated vector stores usage details of the specific time bucket."""
+
+ object: Literal["organization.usage.vector_stores.result"]
+
+ usage_bytes: int
+ """The vector stores usage in bytes."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationUsageCodeInterpreterSessionsResult(BaseModel):
+ """
+ The aggregated code interpreter sessions usage details of the specific time bucket.
+ """
+
+ num_sessions: int
+ """The number of code interpreter sessions."""
+
+ object: Literal["organization.usage.code_interpreter_sessions.result"]
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationCostsResultAmount(BaseModel):
+ """The monetary value in its associated currency."""
+
+ currency: Optional[str] = None
+ """Lowercase ISO-4217 currency e.g. "usd" """
+
+ value: Optional[float] = None
+ """The numeric value of the cost."""
+
+
+class DataResultOrganizationCostsResult(BaseModel):
+ """The aggregated costs details of the specific time bucket."""
+
+ object: Literal["organization.costs.result"]
+
+ amount: Optional[DataResultOrganizationCostsResultAmount] = None
+ """The monetary value in its associated currency."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API Key ID of the grouped
+ costs result.
+ """
+
+ line_item: Optional[str] = None
+ """
+ When `group_by=line_item`, this field provides the line item of the grouped
+ costs result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ costs result.
+ """
+
+
+DataResult: TypeAlias = Annotated[
+ Union[
+ DataResultOrganizationUsageCompletionsResult,
+ DataResultOrganizationUsageEmbeddingsResult,
+ DataResultOrganizationUsageModerationsResult,
+ DataResultOrganizationUsageImagesResult,
+ DataResultOrganizationUsageAudioSpeechesResult,
+ DataResultOrganizationUsageAudioTranscriptionsResult,
+ DataResultOrganizationUsageVectorStoresResult,
+ DataResultOrganizationUsageCodeInterpreterSessionsResult,
+ DataResultOrganizationCostsResult,
+ ],
+ PropertyInfo(discriminator="object"),
+]
+
+
+class Data(BaseModel):
+ end_time: int
+
+ object: Literal["bucket"]
+
+ results: List[DataResult]
+
+ start_time: int
+
+
+class UsageEmbeddingsResponse(BaseModel):
+ data: List[Data]
+
+ has_more: bool
+
+ next_page: Optional[str] = None
+
+ object: Literal["page"]
diff --git a/src/openai/types/admin/organization/usage_images_params.py b/src/openai/types/admin/organization/usage_images_params.py
new file mode 100644
index 0000000000..4cce8c456a
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_images_params.py
@@ -0,0 +1,71 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["UsageImagesParams"]
+
+
+class UsageImagesParams(TypedDict, total=False):
+ start_time: Required[int]
+ """Start time (Unix seconds) of the query time range, inclusive."""
+
+ api_key_ids: SequenceNotStr[str]
+ """Return only usage for these API keys."""
+
+ bucket_width: Literal["1m", "1h", "1d"]
+ """Width of each time bucket in response.
+
+ Currently `1m`, `1h` and `1d` are supported, default to `1d`.
+ """
+
+ end_time: int
+ """End time (Unix seconds) of the query time range, exclusive."""
+
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "size", "source"]]
+ """Group the usage data by the specified fields.
+
+ Support fields include `project_id`, `user_id`, `api_key_id`, `model`, `size`,
+ `source` or any combination of them.
+ """
+
+ limit: int
+ """Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+ """
+
+ models: SequenceNotStr[str]
+ """Return only usage for these models."""
+
+ page: str
+ """A cursor for use in pagination.
+
+ Corresponding to the `next_page` field from the previous response.
+ """
+
+ project_ids: SequenceNotStr[str]
+ """Return only usage for these projects."""
+
+ sizes: List[Literal["256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"]]
+ """Return only usages for these image sizes.
+
+ Possible values are `256x256`, `512x512`, `1024x1024`, `1792x1792`, `1024x1792`
+ or any combination of them.
+ """
+
+ sources: List[Literal["image.generation", "image.edit", "image.variation"]]
+ """Return only usages for these sources.
+
+ Possible values are `image.generation`, `image.edit`, `image.variation` or any
+ combination of them.
+ """
+
+ user_ids: SequenceNotStr[str]
+ """Return only usage for these users."""
diff --git a/src/openai/types/admin/organization/usage_images_response.py b/src/openai/types/admin/organization/usage_images_response.py
new file mode 100644
index 0000000000..55f8d80096
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_images_response.py
@@ -0,0 +1,390 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+
+__all__ = [
+ "UsageImagesResponse",
+ "Data",
+ "DataResult",
+ "DataResultOrganizationUsageCompletionsResult",
+ "DataResultOrganizationUsageEmbeddingsResult",
+ "DataResultOrganizationUsageModerationsResult",
+ "DataResultOrganizationUsageImagesResult",
+ "DataResultOrganizationUsageAudioSpeechesResult",
+ "DataResultOrganizationUsageAudioTranscriptionsResult",
+ "DataResultOrganizationUsageVectorStoresResult",
+ "DataResultOrganizationUsageCodeInterpreterSessionsResult",
+ "DataResultOrganizationCostsResult",
+ "DataResultOrganizationCostsResultAmount",
+]
+
+
+class DataResultOrganizationUsageCompletionsResult(BaseModel):
+ """The aggregated completions usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of text input tokens used, including cached tokens.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.completions.result"]
+
+ output_tokens: int
+ """The aggregated number of text output tokens used.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ batch: Optional[bool] = None
+ """
+ When `group_by=batch`, this field tells whether the grouped usage result is
+ batch or not.
+ """
+
+ input_audio_tokens: Optional[int] = None
+ """The aggregated number of audio input tokens used, including cached tokens."""
+
+ input_cached_tokens: Optional[int] = None
+ """
+ The aggregated number of text input tokens that has been cached from previous
+ requests. For customers subscribe to scale tier, this includes scale tier
+ tokens.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ output_audio_tokens: Optional[int] = None
+ """The aggregated number of audio output tokens used."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ service_tier: Optional[str] = None
+ """
+ When `group_by=service_tier`, this field provides the service tier of the
+ grouped usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageEmbeddingsResult(BaseModel):
+ """The aggregated embeddings usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.embeddings.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageModerationsResult(BaseModel):
+ """The aggregated moderations usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.moderations.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageImagesResult(BaseModel):
+ """The aggregated images usage details of the specific time bucket."""
+
+ images: int
+ """The number of images processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.images.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ size: Optional[str] = None
+ """
+ When `group_by=size`, this field provides the image size of the grouped usage
+ result.
+ """
+
+ source: Optional[str] = None
+ """
+ When `group_by=source`, this field provides the source of the grouped usage
+ result, possible values are `image.generation`, `image.edit`, `image.variation`.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioSpeechesResult(BaseModel):
+ """The aggregated audio speeches usage details of the specific time bucket."""
+
+ characters: int
+ """The number of characters processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_speeches.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioTranscriptionsResult(BaseModel):
+ """The aggregated audio transcriptions usage details of the specific time bucket."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_transcriptions.result"]
+
+ seconds: int
+ """The number of seconds processed."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageVectorStoresResult(BaseModel):
+ """The aggregated vector stores usage details of the specific time bucket."""
+
+ object: Literal["organization.usage.vector_stores.result"]
+
+ usage_bytes: int
+ """The vector stores usage in bytes."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationUsageCodeInterpreterSessionsResult(BaseModel):
+ """
+ The aggregated code interpreter sessions usage details of the specific time bucket.
+ """
+
+ num_sessions: int
+ """The number of code interpreter sessions."""
+
+ object: Literal["organization.usage.code_interpreter_sessions.result"]
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationCostsResultAmount(BaseModel):
+ """The monetary value in its associated currency."""
+
+ currency: Optional[str] = None
+ """Lowercase ISO-4217 currency e.g. "usd" """
+
+ value: Optional[float] = None
+ """The numeric value of the cost."""
+
+
+class DataResultOrganizationCostsResult(BaseModel):
+ """The aggregated costs details of the specific time bucket."""
+
+ object: Literal["organization.costs.result"]
+
+ amount: Optional[DataResultOrganizationCostsResultAmount] = None
+ """The monetary value in its associated currency."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API Key ID of the grouped
+ costs result.
+ """
+
+ line_item: Optional[str] = None
+ """
+ When `group_by=line_item`, this field provides the line item of the grouped
+ costs result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ costs result.
+ """
+
+
+DataResult: TypeAlias = Annotated[
+ Union[
+ DataResultOrganizationUsageCompletionsResult,
+ DataResultOrganizationUsageEmbeddingsResult,
+ DataResultOrganizationUsageModerationsResult,
+ DataResultOrganizationUsageImagesResult,
+ DataResultOrganizationUsageAudioSpeechesResult,
+ DataResultOrganizationUsageAudioTranscriptionsResult,
+ DataResultOrganizationUsageVectorStoresResult,
+ DataResultOrganizationUsageCodeInterpreterSessionsResult,
+ DataResultOrganizationCostsResult,
+ ],
+ PropertyInfo(discriminator="object"),
+]
+
+
+class Data(BaseModel):
+ end_time: int
+
+ object: Literal["bucket"]
+
+ results: List[DataResult]
+
+ start_time: int
+
+
+class UsageImagesResponse(BaseModel):
+ data: List[Data]
+
+ has_more: bool
+
+ next_page: Optional[str] = None
+
+ object: Literal["page"]
diff --git a/src/openai/types/admin/organization/usage_moderations_params.py b/src/openai/types/admin/organization/usage_moderations_params.py
new file mode 100644
index 0000000000..acb401b9a8
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_moderations_params.py
@@ -0,0 +1,57 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["UsageModerationsParams"]
+
+
+class UsageModerationsParams(TypedDict, total=False):
+ start_time: Required[int]
+ """Start time (Unix seconds) of the query time range, inclusive."""
+
+ api_key_ids: SequenceNotStr[str]
+ """Return only usage for these API keys."""
+
+ bucket_width: Literal["1m", "1h", "1d"]
+ """Width of each time bucket in response.
+
+ Currently `1m`, `1h` and `1d` are supported, default to `1d`.
+ """
+
+ end_time: int
+ """End time (Unix seconds) of the query time range, exclusive."""
+
+ group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]]
+ """Group the usage data by the specified fields.
+
+ Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any
+ combination of them.
+ """
+
+ limit: int
+ """Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+ """
+
+ models: SequenceNotStr[str]
+ """Return only usage for these models."""
+
+ page: str
+ """A cursor for use in pagination.
+
+ Corresponding to the `next_page` field from the previous response.
+ """
+
+ project_ids: SequenceNotStr[str]
+ """Return only usage for these projects."""
+
+ user_ids: SequenceNotStr[str]
+ """Return only usage for these users."""
diff --git a/src/openai/types/admin/organization/usage_moderations_response.py b/src/openai/types/admin/organization/usage_moderations_response.py
new file mode 100644
index 0000000000..87919b50a9
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_moderations_response.py
@@ -0,0 +1,390 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+
+__all__ = [
+ "UsageModerationsResponse",
+ "Data",
+ "DataResult",
+ "DataResultOrganizationUsageCompletionsResult",
+ "DataResultOrganizationUsageEmbeddingsResult",
+ "DataResultOrganizationUsageModerationsResult",
+ "DataResultOrganizationUsageImagesResult",
+ "DataResultOrganizationUsageAudioSpeechesResult",
+ "DataResultOrganizationUsageAudioTranscriptionsResult",
+ "DataResultOrganizationUsageVectorStoresResult",
+ "DataResultOrganizationUsageCodeInterpreterSessionsResult",
+ "DataResultOrganizationCostsResult",
+ "DataResultOrganizationCostsResultAmount",
+]
+
+
+class DataResultOrganizationUsageCompletionsResult(BaseModel):
+ """The aggregated completions usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of text input tokens used, including cached tokens.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.completions.result"]
+
+ output_tokens: int
+ """The aggregated number of text output tokens used.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ batch: Optional[bool] = None
+ """
+ When `group_by=batch`, this field tells whether the grouped usage result is
+ batch or not.
+ """
+
+ input_audio_tokens: Optional[int] = None
+ """The aggregated number of audio input tokens used, including cached tokens."""
+
+ input_cached_tokens: Optional[int] = None
+ """
+ The aggregated number of text input tokens that has been cached from previous
+ requests. For customers subscribe to scale tier, this includes scale tier
+ tokens.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ output_audio_tokens: Optional[int] = None
+ """The aggregated number of audio output tokens used."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ service_tier: Optional[str] = None
+ """
+ When `group_by=service_tier`, this field provides the service tier of the
+ grouped usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageEmbeddingsResult(BaseModel):
+ """The aggregated embeddings usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.embeddings.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageModerationsResult(BaseModel):
+ """The aggregated moderations usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.moderations.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageImagesResult(BaseModel):
+ """The aggregated images usage details of the specific time bucket."""
+
+ images: int
+ """The number of images processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.images.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ size: Optional[str] = None
+ """
+ When `group_by=size`, this field provides the image size of the grouped usage
+ result.
+ """
+
+ source: Optional[str] = None
+ """
+ When `group_by=source`, this field provides the source of the grouped usage
+ result, possible values are `image.generation`, `image.edit`, `image.variation`.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioSpeechesResult(BaseModel):
+ """The aggregated audio speeches usage details of the specific time bucket."""
+
+ characters: int
+ """The number of characters processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_speeches.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioTranscriptionsResult(BaseModel):
+ """The aggregated audio transcriptions usage details of the specific time bucket."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_transcriptions.result"]
+
+ seconds: int
+ """The number of seconds processed."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageVectorStoresResult(BaseModel):
+ """The aggregated vector stores usage details of the specific time bucket."""
+
+ object: Literal["organization.usage.vector_stores.result"]
+
+ usage_bytes: int
+ """The vector stores usage in bytes."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationUsageCodeInterpreterSessionsResult(BaseModel):
+ """
+ The aggregated code interpreter sessions usage details of the specific time bucket.
+ """
+
+ num_sessions: int
+ """The number of code interpreter sessions."""
+
+ object: Literal["organization.usage.code_interpreter_sessions.result"]
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationCostsResultAmount(BaseModel):
+ """The monetary value in its associated currency."""
+
+ currency: Optional[str] = None
+ """Lowercase ISO-4217 currency e.g. "usd" """
+
+ value: Optional[float] = None
+ """The numeric value of the cost."""
+
+
+class DataResultOrganizationCostsResult(BaseModel):
+ """The aggregated costs details of the specific time bucket."""
+
+ object: Literal["organization.costs.result"]
+
+ amount: Optional[DataResultOrganizationCostsResultAmount] = None
+ """The monetary value in its associated currency."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API Key ID of the grouped
+ costs result.
+ """
+
+ line_item: Optional[str] = None
+ """
+ When `group_by=line_item`, this field provides the line item of the grouped
+ costs result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ costs result.
+ """
+
+
+DataResult: TypeAlias = Annotated[
+ Union[
+ DataResultOrganizationUsageCompletionsResult,
+ DataResultOrganizationUsageEmbeddingsResult,
+ DataResultOrganizationUsageModerationsResult,
+ DataResultOrganizationUsageImagesResult,
+ DataResultOrganizationUsageAudioSpeechesResult,
+ DataResultOrganizationUsageAudioTranscriptionsResult,
+ DataResultOrganizationUsageVectorStoresResult,
+ DataResultOrganizationUsageCodeInterpreterSessionsResult,
+ DataResultOrganizationCostsResult,
+ ],
+ PropertyInfo(discriminator="object"),
+]
+
+
+class Data(BaseModel):
+ end_time: int
+
+ object: Literal["bucket"]
+
+ results: List[DataResult]
+
+ start_time: int
+
+
+class UsageModerationsResponse(BaseModel):
+ data: List[Data]
+
+ has_more: bool
+
+ next_page: Optional[str] = None
+
+ object: Literal["page"]
diff --git a/src/openai/types/admin/organization/usage_vector_stores_params.py b/src/openai/types/admin/organization/usage_vector_stores_params.py
new file mode 100644
index 0000000000..bfb8dcede4
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_vector_stores_params.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["UsageVectorStoresParams"]
+
+
+class UsageVectorStoresParams(TypedDict, total=False):
+ start_time: Required[int]
+ """Start time (Unix seconds) of the query time range, inclusive."""
+
+ bucket_width: Literal["1m", "1h", "1d"]
+ """Width of each time bucket in response.
+
+ Currently `1m`, `1h` and `1d` are supported, default to `1d`.
+ """
+
+ end_time: int
+ """End time (Unix seconds) of the query time range, exclusive."""
+
+ group_by: List[Literal["project_id"]]
+ """Group the usage data by the specified fields.
+
+ Support fields include `project_id`.
+ """
+
+ limit: int
+ """Specifies the number of buckets to return.
+
+ - `bucket_width=1d`: default: 7, max: 31
+ - `bucket_width=1h`: default: 24, max: 168
+ - `bucket_width=1m`: default: 60, max: 1440
+ """
+
+ page: str
+ """A cursor for use in pagination.
+
+ Corresponding to the `next_page` field from the previous response.
+ """
+
+ project_ids: SequenceNotStr[str]
+ """Return only usage for these projects."""
diff --git a/src/openai/types/admin/organization/usage_vector_stores_response.py b/src/openai/types/admin/organization/usage_vector_stores_response.py
new file mode 100644
index 0000000000..d3cd853653
--- /dev/null
+++ b/src/openai/types/admin/organization/usage_vector_stores_response.py
@@ -0,0 +1,390 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+
+__all__ = [
+ "UsageVectorStoresResponse",
+ "Data",
+ "DataResult",
+ "DataResultOrganizationUsageCompletionsResult",
+ "DataResultOrganizationUsageEmbeddingsResult",
+ "DataResultOrganizationUsageModerationsResult",
+ "DataResultOrganizationUsageImagesResult",
+ "DataResultOrganizationUsageAudioSpeechesResult",
+ "DataResultOrganizationUsageAudioTranscriptionsResult",
+ "DataResultOrganizationUsageVectorStoresResult",
+ "DataResultOrganizationUsageCodeInterpreterSessionsResult",
+ "DataResultOrganizationCostsResult",
+ "DataResultOrganizationCostsResultAmount",
+]
+
+
+class DataResultOrganizationUsageCompletionsResult(BaseModel):
+ """The aggregated completions usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of text input tokens used, including cached tokens.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.completions.result"]
+
+ output_tokens: int
+ """The aggregated number of text output tokens used.
+
+ For customers subscribe to scale tier, this includes scale tier tokens.
+ """
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ batch: Optional[bool] = None
+ """
+ When `group_by=batch`, this field tells whether the grouped usage result is
+ batch or not.
+ """
+
+ input_audio_tokens: Optional[int] = None
+ """The aggregated number of audio input tokens used, including cached tokens."""
+
+ input_cached_tokens: Optional[int] = None
+ """
+ The aggregated number of text input tokens that has been cached from previous
+ requests. For customers subscribe to scale tier, this includes scale tier
+ tokens.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ output_audio_tokens: Optional[int] = None
+ """The aggregated number of audio output tokens used."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ service_tier: Optional[str] = None
+ """
+ When `group_by=service_tier`, this field provides the service tier of the
+ grouped usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageEmbeddingsResult(BaseModel):
+ """The aggregated embeddings usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.embeddings.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageModerationsResult(BaseModel):
+ """The aggregated moderations usage details of the specific time bucket."""
+
+ input_tokens: int
+ """The aggregated number of input tokens used."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.moderations.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageImagesResult(BaseModel):
+ """The aggregated images usage details of the specific time bucket."""
+
+ images: int
+ """The number of images processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.images.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ size: Optional[str] = None
+ """
+ When `group_by=size`, this field provides the image size of the grouped usage
+ result.
+ """
+
+ source: Optional[str] = None
+ """
+ When `group_by=source`, this field provides the source of the grouped usage
+ result, possible values are `image.generation`, `image.edit`, `image.variation`.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioSpeechesResult(BaseModel):
+ """The aggregated audio speeches usage details of the specific time bucket."""
+
+ characters: int
+ """The number of characters processed."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_speeches.result"]
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageAudioTranscriptionsResult(BaseModel):
+ """The aggregated audio transcriptions usage details of the specific time bucket."""
+
+ num_model_requests: int
+ """The count of requests made to the model."""
+
+ object: Literal["organization.usage.audio_transcriptions.result"]
+
+ seconds: int
+ """The number of seconds processed."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API key ID of the grouped
+ usage result.
+ """
+
+ model: Optional[str] = None
+ """
+ When `group_by=model`, this field provides the model name of the grouped usage
+ result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+ user_id: Optional[str] = None
+ """
+ When `group_by=user_id`, this field provides the user ID of the grouped usage
+ result.
+ """
+
+
+class DataResultOrganizationUsageVectorStoresResult(BaseModel):
+ """The aggregated vector stores usage details of the specific time bucket."""
+
+ object: Literal["organization.usage.vector_stores.result"]
+
+ usage_bytes: int
+ """The vector stores usage in bytes."""
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationUsageCodeInterpreterSessionsResult(BaseModel):
+ """
+ The aggregated code interpreter sessions usage details of the specific time bucket.
+ """
+
+ num_sessions: int
+ """The number of code interpreter sessions."""
+
+ object: Literal["organization.usage.code_interpreter_sessions.result"]
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ usage result.
+ """
+
+
+class DataResultOrganizationCostsResultAmount(BaseModel):
+ """The monetary value in its associated currency."""
+
+ currency: Optional[str] = None
+ """Lowercase ISO-4217 currency e.g. "usd" """
+
+ value: Optional[float] = None
+ """The numeric value of the cost."""
+
+
+class DataResultOrganizationCostsResult(BaseModel):
+ """The aggregated costs details of the specific time bucket."""
+
+ object: Literal["organization.costs.result"]
+
+ amount: Optional[DataResultOrganizationCostsResultAmount] = None
+ """The monetary value in its associated currency."""
+
+ api_key_id: Optional[str] = None
+ """
+ When `group_by=api_key_id`, this field provides the API Key ID of the grouped
+ costs result.
+ """
+
+ line_item: Optional[str] = None
+ """
+ When `group_by=line_item`, this field provides the line item of the grouped
+ costs result.
+ """
+
+ project_id: Optional[str] = None
+ """
+ When `group_by=project_id`, this field provides the project ID of the grouped
+ costs result.
+ """
+
+
+DataResult: TypeAlias = Annotated[
+ Union[
+ DataResultOrganizationUsageCompletionsResult,
+ DataResultOrganizationUsageEmbeddingsResult,
+ DataResultOrganizationUsageModerationsResult,
+ DataResultOrganizationUsageImagesResult,
+ DataResultOrganizationUsageAudioSpeechesResult,
+ DataResultOrganizationUsageAudioTranscriptionsResult,
+ DataResultOrganizationUsageVectorStoresResult,
+ DataResultOrganizationUsageCodeInterpreterSessionsResult,
+ DataResultOrganizationCostsResult,
+ ],
+ PropertyInfo(discriminator="object"),
+]
+
+
+class Data(BaseModel):
+ end_time: int
+
+ object: Literal["bucket"]
+
+ results: List[DataResult]
+
+ start_time: int
+
+
+class UsageVectorStoresResponse(BaseModel):
+ data: List[Data]
+
+ has_more: bool
+
+ next_page: Optional[str] = None
+
+ object: Literal["page"]
diff --git a/src/openai/types/admin/organization/user_delete_response.py b/src/openai/types/admin/organization/user_delete_response.py
new file mode 100644
index 0000000000..b8fc8c994a
--- /dev/null
+++ b/src/openai/types/admin/organization/user_delete_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["UserDeleteResponse"]
+
+
+class UserDeleteResponse(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["organization.user.deleted"]
diff --git a/src/openai/types/admin/organization/user_list_params.py b/src/openai/types/admin/organization/user_list_params.py
new file mode 100644
index 0000000000..7bcfc78979
--- /dev/null
+++ b/src/openai/types/admin/organization/user_list_params.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["UserListParams"]
+
+
+class UserListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ emails: SequenceNotStr[str]
+ """Filter by the email address of users."""
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
diff --git a/src/openai/types/admin/organization/user_update_params.py b/src/openai/types/admin/organization/user_update_params.py
new file mode 100644
index 0000000000..24181b0649
--- /dev/null
+++ b/src/openai/types/admin/organization/user_update_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import TypedDict
+
+__all__ = ["UserUpdateParams"]
+
+
+class UserUpdateParams(TypedDict, total=False):
+ developer_persona: Optional[str]
+ """Developer persona metadata."""
+
+ role: Optional[str]
+ """`owner` or `reader`"""
+
+ role_id: Optional[str]
+ """Role ID to assign to the user."""
+
+ technical_level: Optional[str]
+ """Technical level metadata."""
diff --git a/src/openai/types/admin/organization/users/__init__.py b/src/openai/types/admin/organization/users/__init__.py
new file mode 100644
index 0000000000..ed464fde83
--- /dev/null
+++ b/src/openai/types/admin/organization/users/__init__.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .role_list_params import RoleListParams as RoleListParams
+from .role_create_params import RoleCreateParams as RoleCreateParams
+from .role_list_response import RoleListResponse as RoleListResponse
+from .role_create_response import RoleCreateResponse as RoleCreateResponse
+from .role_delete_response import RoleDeleteResponse as RoleDeleteResponse
diff --git a/src/openai/types/admin/organization/users/role_create_params.py b/src/openai/types/admin/organization/users/role_create_params.py
new file mode 100644
index 0000000000..0ebc196eef
--- /dev/null
+++ b/src/openai/types/admin/organization/users/role_create_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["RoleCreateParams"]
+
+
+class RoleCreateParams(TypedDict, total=False):
+ role_id: Required[str]
+ """Identifier of the role to assign."""
diff --git a/src/openai/types/admin/organization/users/role_create_response.py b/src/openai/types/admin/organization/users/role_create_response.py
new file mode 100644
index 0000000000..0b989ad461
--- /dev/null
+++ b/src/openai/types/admin/organization/users/role_create_response.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..role import Role
+from ....._models import BaseModel
+from ..organization_user import OrganizationUser
+
+__all__ = ["RoleCreateResponse"]
+
+
+class RoleCreateResponse(BaseModel):
+ """Role assignment linking a user to a role."""
+
+ object: Literal["user.role"]
+ """Always `user.role`."""
+
+ role: Role
+ """Details about a role that can be assigned through the public Roles API."""
+
+ user: OrganizationUser
+ """Represents an individual `user` within an organization."""
diff --git a/src/openai/types/admin/organization/users/role_delete_response.py b/src/openai/types/admin/organization/users/role_delete_response.py
new file mode 100644
index 0000000000..fb6a111614
--- /dev/null
+++ b/src/openai/types/admin/organization/users/role_delete_response.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ....._models import BaseModel
+
+__all__ = ["RoleDeleteResponse"]
+
+
+class RoleDeleteResponse(BaseModel):
+ """Confirmation payload returned after unassigning a role."""
+
+ deleted: bool
+ """Whether the assignment was removed."""
+
+ object: str
+ """
+ Identifier for the deleted assignment, such as `group.role.deleted` or
+ `user.role.deleted`.
+ """
diff --git a/src/openai/types/admin/organization/users/role_list_params.py b/src/openai/types/admin/organization/users/role_list_params.py
new file mode 100644
index 0000000000..451a1a2045
--- /dev/null
+++ b/src/openai/types/admin/organization/users/role_list_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["RoleListParams"]
+
+
+class RoleListParams(TypedDict, total=False):
+ after: str
+ """Cursor for pagination.
+
+ Provide the value from the previous response's `next` field to continue listing
+ organization roles.
+ """
+
+ limit: int
+ """A limit on the number of organization role assignments to return."""
+
+ order: Literal["asc", "desc"]
+ """Sort order for the returned organization roles."""
diff --git a/src/openai/types/admin/organization/users/role_list_response.py b/src/openai/types/admin/organization/users/role_list_response.py
new file mode 100644
index 0000000000..337d517ba1
--- /dev/null
+++ b/src/openai/types/admin/organization/users/role_list_response.py
@@ -0,0 +1,46 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+
+from ....._models import BaseModel
+
+__all__ = ["RoleListResponse"]
+
+
+class RoleListResponse(BaseModel):
+ """
+ Detailed information about a role assignment entry returned when listing assignments.
+ """
+
+ id: str
+ """Identifier for the role."""
+
+ created_at: Optional[int] = None
+ """When the role was created."""
+
+ created_by: Optional[str] = None
+ """Identifier of the actor who created the role."""
+
+ created_by_user_obj: Optional[Dict[str, object]] = None
+ """User details for the actor that created the role, when available."""
+
+ description: Optional[str] = None
+ """Description of the role."""
+
+ metadata: Optional[Dict[str, object]] = None
+ """Arbitrary metadata stored on the role."""
+
+ name: str
+ """Name of the role."""
+
+ permissions: List[str]
+ """Permissions associated with the role."""
+
+ predefined_role: bool
+ """Whether the role is predefined by OpenAI."""
+
+ resource_type: str
+ """Resource type the role applies to."""
+
+ updated_at: Optional[int] = None
+ """When the role was last updated."""
diff --git a/tests/api_resources/admin/__init__.py b/tests/api_resources/admin/__init__.py
new file mode 100644
index 0000000000..fd8019a9a1
--- /dev/null
+++ b/tests/api_resources/admin/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/admin/organization/__init__.py b/tests/api_resources/admin/organization/__init__.py
new file mode 100644
index 0000000000..fd8019a9a1
--- /dev/null
+++ b/tests/api_resources/admin/organization/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/admin/organization/groups/__init__.py b/tests/api_resources/admin/organization/groups/__init__.py
new file mode 100644
index 0000000000..fd8019a9a1
--- /dev/null
+++ b/tests/api_resources/admin/organization/groups/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/admin/organization/groups/test_roles.py b/tests/api_resources/admin/organization/groups/test_roles.py
new file mode 100644
index 0000000000..08702fddd2
--- /dev/null
+++ b/tests/api_resources/admin/organization/groups/test_roles.py
@@ -0,0 +1,305 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncNextCursorPage, AsyncNextCursorPage
+from openai.types.admin.organization.groups import (
+ RoleListResponse,
+ RoleCreateResponse,
+ RoleDeleteResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestRoles:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ role = client.admin.organization.groups.roles.create(
+ group_id="group_id",
+ role_id="role_id",
+ )
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.admin.organization.groups.roles.with_raw_response.create(
+ group_id="group_id",
+ role_id="role_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.admin.organization.groups.roles.with_streaming_response.create(
+ group_id="group_id",
+ role_id="role_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_create(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ client.admin.organization.groups.roles.with_raw_response.create(
+ group_id="",
+ role_id="role_id",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ role = client.admin.organization.groups.roles.list(
+ group_id="group_id",
+ )
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ role = client.admin.organization.groups.roles.list(
+ group_id="group_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.groups.roles.with_raw_response.list(
+ group_id="group_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.groups.roles.with_streaming_response.list(
+ group_id="group_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ client.admin.organization.groups.roles.with_raw_response.list(
+ group_id="",
+ )
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ role = client.admin.organization.groups.roles.delete(
+ role_id="role_id",
+ group_id="group_id",
+ )
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.groups.roles.with_raw_response.delete(
+ role_id="role_id",
+ group_id="group_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.groups.roles.with_streaming_response.delete(
+ role_id="role_id",
+ group_id="group_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ client.admin.organization.groups.roles.with_raw_response.delete(
+ role_id="role_id",
+ group_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ client.admin.organization.groups.roles.with_raw_response.delete(
+ role_id="",
+ group_id="group_id",
+ )
+
+
+class TestAsyncRoles:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.groups.roles.create(
+ group_id="group_id",
+ role_id="role_id",
+ )
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.groups.roles.with_raw_response.create(
+ group_id="group_id",
+ role_id="role_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.groups.roles.with_streaming_response.create(
+ group_id="group_id",
+ role_id="role_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ await async_client.admin.organization.groups.roles.with_raw_response.create(
+ group_id="",
+ role_id="role_id",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.groups.roles.list(
+ group_id="group_id",
+ )
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.groups.roles.list(
+ group_id="group_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.groups.roles.with_raw_response.list(
+ group_id="group_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.groups.roles.with_streaming_response.list(
+ group_id="group_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ await async_client.admin.organization.groups.roles.with_raw_response.list(
+ group_id="",
+ )
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.groups.roles.delete(
+ role_id="role_id",
+ group_id="group_id",
+ )
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.groups.roles.with_raw_response.delete(
+ role_id="role_id",
+ group_id="group_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.groups.roles.with_streaming_response.delete(
+ role_id="role_id",
+ group_id="group_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ await async_client.admin.organization.groups.roles.with_raw_response.delete(
+ role_id="role_id",
+ group_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ await async_client.admin.organization.groups.roles.with_raw_response.delete(
+ role_id="",
+ group_id="group_id",
+ )
diff --git a/tests/api_resources/admin/organization/groups/test_users.py b/tests/api_resources/admin/organization/groups/test_users.py
new file mode 100644
index 0000000000..eda6be6bbf
--- /dev/null
+++ b/tests/api_resources/admin/organization/groups/test_users.py
@@ -0,0 +1,305 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncNextCursorPage, AsyncNextCursorPage
+from openai.types.admin.organization.groups import (
+ UserCreateResponse,
+ UserDeleteResponse,
+ OrganizationGroupUser,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestUsers:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ user = client.admin.organization.groups.users.create(
+ group_id="group_id",
+ user_id="user_id",
+ )
+ assert_matches_type(UserCreateResponse, user, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.admin.organization.groups.users.with_raw_response.create(
+ group_id="group_id",
+ user_id="user_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(UserCreateResponse, user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.admin.organization.groups.users.with_streaming_response.create(
+ group_id="group_id",
+ user_id="user_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(UserCreateResponse, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_create(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ client.admin.organization.groups.users.with_raw_response.create(
+ group_id="",
+ user_id="user_id",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ user = client.admin.organization.groups.users.list(
+ group_id="group_id",
+ )
+ assert_matches_type(SyncNextCursorPage[OrganizationGroupUser], user, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ user = client.admin.organization.groups.users.list(
+ group_id="group_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncNextCursorPage[OrganizationGroupUser], user, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.groups.users.with_raw_response.list(
+ group_id="group_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(SyncNextCursorPage[OrganizationGroupUser], user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.groups.users.with_streaming_response.list(
+ group_id="group_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(SyncNextCursorPage[OrganizationGroupUser], user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ client.admin.organization.groups.users.with_raw_response.list(
+ group_id="",
+ )
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ user = client.admin.organization.groups.users.delete(
+ user_id="user_id",
+ group_id="group_id",
+ )
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.groups.users.with_raw_response.delete(
+ user_id="user_id",
+ group_id="group_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.groups.users.with_streaming_response.delete(
+ user_id="user_id",
+ group_id="group_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ client.admin.organization.groups.users.with_raw_response.delete(
+ user_id="user_id",
+ group_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ client.admin.organization.groups.users.with_raw_response.delete(
+ user_id="",
+ group_id="group_id",
+ )
+
+
+class TestAsyncUsers:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.groups.users.create(
+ group_id="group_id",
+ user_id="user_id",
+ )
+ assert_matches_type(UserCreateResponse, user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.groups.users.with_raw_response.create(
+ group_id="group_id",
+ user_id="user_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(UserCreateResponse, user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.groups.users.with_streaming_response.create(
+ group_id="group_id",
+ user_id="user_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(UserCreateResponse, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ await async_client.admin.organization.groups.users.with_raw_response.create(
+ group_id="",
+ user_id="user_id",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.groups.users.list(
+ group_id="group_id",
+ )
+ assert_matches_type(AsyncNextCursorPage[OrganizationGroupUser], user, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.groups.users.list(
+ group_id="group_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncNextCursorPage[OrganizationGroupUser], user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.groups.users.with_raw_response.list(
+ group_id="group_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(AsyncNextCursorPage[OrganizationGroupUser], user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.groups.users.with_streaming_response.list(
+ group_id="group_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(AsyncNextCursorPage[OrganizationGroupUser], user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ await async_client.admin.organization.groups.users.with_raw_response.list(
+ group_id="",
+ )
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.groups.users.delete(
+ user_id="user_id",
+ group_id="group_id",
+ )
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.groups.users.with_raw_response.delete(
+ user_id="user_id",
+ group_id="group_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.groups.users.with_streaming_response.delete(
+ user_id="user_id",
+ group_id="group_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ await async_client.admin.organization.groups.users.with_raw_response.delete(
+ user_id="user_id",
+ group_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ await async_client.admin.organization.groups.users.with_raw_response.delete(
+ user_id="",
+ group_id="group_id",
+ )
diff --git a/tests/api_resources/admin/organization/projects/__init__.py b/tests/api_resources/admin/organization/projects/__init__.py
new file mode 100644
index 0000000000..fd8019a9a1
--- /dev/null
+++ b/tests/api_resources/admin/organization/projects/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/admin/organization/projects/groups/__init__.py b/tests/api_resources/admin/organization/projects/groups/__init__.py
new file mode 100644
index 0000000000..fd8019a9a1
--- /dev/null
+++ b/tests/api_resources/admin/organization/projects/groups/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/admin/organization/projects/groups/test_roles.py b/tests/api_resources/admin/organization/projects/groups/test_roles.py
new file mode 100644
index 0000000000..a129142ea9
--- /dev/null
+++ b/tests/api_resources/admin/organization/projects/groups/test_roles.py
@@ -0,0 +1,373 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncNextCursorPage, AsyncNextCursorPage
+from openai.types.admin.organization.projects.groups import (
+ RoleListResponse,
+ RoleCreateResponse,
+ RoleDeleteResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestRoles:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ role = client.admin.organization.projects.groups.roles.create(
+ group_id="group_id",
+ project_id="project_id",
+ role_id="role_id",
+ )
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.groups.roles.with_raw_response.create(
+ group_id="group_id",
+ project_id="project_id",
+ role_id="role_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.groups.roles.with_streaming_response.create(
+ group_id="group_id",
+ project_id="project_id",
+ role_id="role_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_create(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.groups.roles.with_raw_response.create(
+ group_id="group_id",
+ project_id="",
+ role_id="role_id",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ client.admin.organization.projects.groups.roles.with_raw_response.create(
+ group_id="",
+ project_id="project_id",
+ role_id="role_id",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ role = client.admin.organization.projects.groups.roles.list(
+ group_id="group_id",
+ project_id="project_id",
+ )
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ role = client.admin.organization.projects.groups.roles.list(
+ group_id="group_id",
+ project_id="project_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.groups.roles.with_raw_response.list(
+ group_id="group_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.groups.roles.with_streaming_response.list(
+ group_id="group_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.groups.roles.with_raw_response.list(
+ group_id="group_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ client.admin.organization.projects.groups.roles.with_raw_response.list(
+ group_id="",
+ project_id="project_id",
+ )
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ role = client.admin.organization.projects.groups.roles.delete(
+ role_id="role_id",
+ project_id="project_id",
+ group_id="group_id",
+ )
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.groups.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ group_id="group_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.groups.roles.with_streaming_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ group_id="group_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.groups.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="",
+ group_id="group_id",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ client.admin.organization.projects.groups.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ group_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ client.admin.organization.projects.groups.roles.with_raw_response.delete(
+ role_id="",
+ project_id="project_id",
+ group_id="group_id",
+ )
+
+
+class TestAsyncRoles:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.projects.groups.roles.create(
+ group_id="group_id",
+ project_id="project_id",
+ role_id="role_id",
+ )
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.groups.roles.with_raw_response.create(
+ group_id="group_id",
+ project_id="project_id",
+ role_id="role_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.groups.roles.with_streaming_response.create(
+ group_id="group_id",
+ project_id="project_id",
+ role_id="role_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.groups.roles.with_raw_response.create(
+ group_id="group_id",
+ project_id="",
+ role_id="role_id",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ await async_client.admin.organization.projects.groups.roles.with_raw_response.create(
+ group_id="",
+ project_id="project_id",
+ role_id="role_id",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.projects.groups.roles.list(
+ group_id="group_id",
+ project_id="project_id",
+ )
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.projects.groups.roles.list(
+ group_id="group_id",
+ project_id="project_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.groups.roles.with_raw_response.list(
+ group_id="group_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.groups.roles.with_streaming_response.list(
+ group_id="group_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.groups.roles.with_raw_response.list(
+ group_id="group_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ await async_client.admin.organization.projects.groups.roles.with_raw_response.list(
+ group_id="",
+ project_id="project_id",
+ )
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.projects.groups.roles.delete(
+ role_id="role_id",
+ project_id="project_id",
+ group_id="group_id",
+ )
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.groups.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ group_id="group_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.groups.roles.with_streaming_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ group_id="group_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.groups.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="",
+ group_id="group_id",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ await async_client.admin.organization.projects.groups.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ group_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ await async_client.admin.organization.projects.groups.roles.with_raw_response.delete(
+ role_id="",
+ project_id="project_id",
+ group_id="group_id",
+ )
diff --git a/tests/api_resources/admin/organization/projects/test_api_keys.py b/tests/api_resources/admin/organization/projects/test_api_keys.py
new file mode 100644
index 0000000000..f9aef44216
--- /dev/null
+++ b/tests/api_resources/admin/organization/projects/test_api_keys.py
@@ -0,0 +1,311 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from openai.types.admin.organization.projects import ProjectAPIKey, APIKeyDeleteResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestAPIKeys:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ api_key = client.admin.organization.projects.api_keys.retrieve(
+ api_key_id="api_key_id",
+ project_id="project_id",
+ )
+ assert_matches_type(ProjectAPIKey, api_key, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.api_keys.with_raw_response.retrieve(
+ api_key_id="api_key_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(ProjectAPIKey, api_key, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.api_keys.with_streaming_response.retrieve(
+ api_key_id="api_key_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = response.parse()
+ assert_matches_type(ProjectAPIKey, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.api_keys.with_raw_response.retrieve(
+ api_key_id="api_key_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_id` but received ''"):
+ client.admin.organization.projects.api_keys.with_raw_response.retrieve(
+ api_key_id="",
+ project_id="project_id",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ api_key = client.admin.organization.projects.api_keys.list(
+ project_id="project_id",
+ )
+ assert_matches_type(SyncConversationCursorPage[ProjectAPIKey], api_key, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ api_key = client.admin.organization.projects.api_keys.list(
+ project_id="project_id",
+ after="after",
+ limit=0,
+ )
+ assert_matches_type(SyncConversationCursorPage[ProjectAPIKey], api_key, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.api_keys.with_raw_response.list(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(SyncConversationCursorPage[ProjectAPIKey], api_key, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.api_keys.with_streaming_response.list(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = response.parse()
+ assert_matches_type(SyncConversationCursorPage[ProjectAPIKey], api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.api_keys.with_raw_response.list(
+ project_id="",
+ )
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ api_key = client.admin.organization.projects.api_keys.delete(
+ api_key_id="api_key_id",
+ project_id="project_id",
+ )
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.api_keys.with_raw_response.delete(
+ api_key_id="api_key_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.api_keys.with_streaming_response.delete(
+ api_key_id="api_key_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = response.parse()
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.api_keys.with_raw_response.delete(
+ api_key_id="api_key_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_id` but received ''"):
+ client.admin.organization.projects.api_keys.with_raw_response.delete(
+ api_key_id="",
+ project_id="project_id",
+ )
+
+
+class TestAsyncAPIKeys:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ api_key = await async_client.admin.organization.projects.api_keys.retrieve(
+ api_key_id="api_key_id",
+ project_id="project_id",
+ )
+ assert_matches_type(ProjectAPIKey, api_key, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.api_keys.with_raw_response.retrieve(
+ api_key_id="api_key_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(ProjectAPIKey, api_key, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.api_keys.with_streaming_response.retrieve(
+ api_key_id="api_key_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = await response.parse()
+ assert_matches_type(ProjectAPIKey, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.api_keys.with_raw_response.retrieve(
+ api_key_id="api_key_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_id` but received ''"):
+ await async_client.admin.organization.projects.api_keys.with_raw_response.retrieve(
+ api_key_id="",
+ project_id="project_id",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ api_key = await async_client.admin.organization.projects.api_keys.list(
+ project_id="project_id",
+ )
+ assert_matches_type(AsyncConversationCursorPage[ProjectAPIKey], api_key, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ api_key = await async_client.admin.organization.projects.api_keys.list(
+ project_id="project_id",
+ after="after",
+ limit=0,
+ )
+ assert_matches_type(AsyncConversationCursorPage[ProjectAPIKey], api_key, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.api_keys.with_raw_response.list(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(AsyncConversationCursorPage[ProjectAPIKey], api_key, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.api_keys.with_streaming_response.list(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = await response.parse()
+ assert_matches_type(AsyncConversationCursorPage[ProjectAPIKey], api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.api_keys.with_raw_response.list(
+ project_id="",
+ )
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ api_key = await async_client.admin.organization.projects.api_keys.delete(
+ api_key_id="api_key_id",
+ project_id="project_id",
+ )
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.api_keys.with_raw_response.delete(
+ api_key_id="api_key_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.api_keys.with_streaming_response.delete(
+ api_key_id="api_key_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = await response.parse()
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.api_keys.with_raw_response.delete(
+ api_key_id="api_key_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_id` but received ''"):
+ await async_client.admin.organization.projects.api_keys.with_raw_response.delete(
+ api_key_id="",
+ project_id="project_id",
+ )
diff --git a/tests/api_resources/admin/organization/projects/test_certificates.py b/tests/api_resources/admin/organization/projects/test_certificates.py
new file mode 100644
index 0000000000..e242b7d6d4
--- /dev/null
+++ b/tests/api_resources/admin/organization/projects/test_certificates.py
@@ -0,0 +1,293 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncPage, AsyncPage, SyncConversationCursorPage, AsyncConversationCursorPage
+from openai.types.admin.organization.projects import (
+ CertificateListResponse,
+ CertificateActivateResponse,
+ CertificateDeactivateResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestCertificates:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ certificate = client.admin.organization.projects.certificates.list(
+ project_id="project_id",
+ )
+ assert_matches_type(SyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ certificate = client.admin.organization.projects.certificates.list(
+ project_id="project_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.certificates.with_raw_response.list(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(SyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.certificates.with_streaming_response.list(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = response.parse()
+ assert_matches_type(SyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.certificates.with_raw_response.list(
+ project_id="",
+ )
+
+ @parametrize
+ def test_method_activate(self, client: OpenAI) -> None:
+ certificate = client.admin.organization.projects.certificates.activate(
+ project_id="project_id",
+ certificate_ids=["cert_abc"],
+ )
+ assert_matches_type(SyncPage[CertificateActivateResponse], certificate, path=["response"])
+
+ @parametrize
+ def test_raw_response_activate(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.certificates.with_raw_response.activate(
+ project_id="project_id",
+ certificate_ids=["cert_abc"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(SyncPage[CertificateActivateResponse], certificate, path=["response"])
+
+ @parametrize
+ def test_streaming_response_activate(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.certificates.with_streaming_response.activate(
+ project_id="project_id",
+ certificate_ids=["cert_abc"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = response.parse()
+ assert_matches_type(SyncPage[CertificateActivateResponse], certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_activate(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.certificates.with_raw_response.activate(
+ project_id="",
+ certificate_ids=["cert_abc"],
+ )
+
+ @parametrize
+ def test_method_deactivate(self, client: OpenAI) -> None:
+ certificate = client.admin.organization.projects.certificates.deactivate(
+ project_id="project_id",
+ certificate_ids=["cert_abc"],
+ )
+ assert_matches_type(SyncPage[CertificateDeactivateResponse], certificate, path=["response"])
+
+ @parametrize
+ def test_raw_response_deactivate(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.certificates.with_raw_response.deactivate(
+ project_id="project_id",
+ certificate_ids=["cert_abc"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(SyncPage[CertificateDeactivateResponse], certificate, path=["response"])
+
+ @parametrize
+ def test_streaming_response_deactivate(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.certificates.with_streaming_response.deactivate(
+ project_id="project_id",
+ certificate_ids=["cert_abc"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = response.parse()
+ assert_matches_type(SyncPage[CertificateDeactivateResponse], certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_deactivate(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.certificates.with_raw_response.deactivate(
+ project_id="",
+ certificate_ids=["cert_abc"],
+ )
+
+
+class TestAsyncCertificates:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ certificate = await async_client.admin.organization.projects.certificates.list(
+ project_id="project_id",
+ )
+ assert_matches_type(AsyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ certificate = await async_client.admin.organization.projects.certificates.list(
+ project_id="project_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.certificates.with_raw_response.list(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(AsyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.certificates.with_streaming_response.list(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = await response.parse()
+ assert_matches_type(AsyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.certificates.with_raw_response.list(
+ project_id="",
+ )
+
+ @parametrize
+ async def test_method_activate(self, async_client: AsyncOpenAI) -> None:
+ certificate = await async_client.admin.organization.projects.certificates.activate(
+ project_id="project_id",
+ certificate_ids=["cert_abc"],
+ )
+ assert_matches_type(AsyncPage[CertificateActivateResponse], certificate, path=["response"])
+
+ @parametrize
+ async def test_raw_response_activate(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.certificates.with_raw_response.activate(
+ project_id="project_id",
+ certificate_ids=["cert_abc"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(AsyncPage[CertificateActivateResponse], certificate, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_activate(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.certificates.with_streaming_response.activate(
+ project_id="project_id",
+ certificate_ids=["cert_abc"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = await response.parse()
+ assert_matches_type(AsyncPage[CertificateActivateResponse], certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_activate(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.certificates.with_raw_response.activate(
+ project_id="",
+ certificate_ids=["cert_abc"],
+ )
+
+ @parametrize
+ async def test_method_deactivate(self, async_client: AsyncOpenAI) -> None:
+ certificate = await async_client.admin.organization.projects.certificates.deactivate(
+ project_id="project_id",
+ certificate_ids=["cert_abc"],
+ )
+ assert_matches_type(AsyncPage[CertificateDeactivateResponse], certificate, path=["response"])
+
+ @parametrize
+ async def test_raw_response_deactivate(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.certificates.with_raw_response.deactivate(
+ project_id="project_id",
+ certificate_ids=["cert_abc"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(AsyncPage[CertificateDeactivateResponse], certificate, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_deactivate(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.certificates.with_streaming_response.deactivate(
+ project_id="project_id",
+ certificate_ids=["cert_abc"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = await response.parse()
+ assert_matches_type(AsyncPage[CertificateDeactivateResponse], certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_deactivate(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.certificates.with_raw_response.deactivate(
+ project_id="",
+ certificate_ids=["cert_abc"],
+ )
diff --git a/tests/api_resources/admin/organization/projects/test_groups.py b/tests/api_resources/admin/organization/projects/test_groups.py
new file mode 100644
index 0000000000..2db448e9b2
--- /dev/null
+++ b/tests/api_resources/admin/organization/projects/test_groups.py
@@ -0,0 +1,312 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncNextCursorPage, AsyncNextCursorPage
+from openai.types.admin.organization.projects import (
+ ProjectGroup,
+ GroupDeleteResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestGroups:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ group = client.admin.organization.projects.groups.create(
+ project_id="project_id",
+ group_id="group_id",
+ role="role",
+ )
+ assert_matches_type(ProjectGroup, group, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.groups.with_raw_response.create(
+ project_id="project_id",
+ group_id="group_id",
+ role="role",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ group = response.parse()
+ assert_matches_type(ProjectGroup, group, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.groups.with_streaming_response.create(
+ project_id="project_id",
+ group_id="group_id",
+ role="role",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ group = response.parse()
+ assert_matches_type(ProjectGroup, group, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_create(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.groups.with_raw_response.create(
+ project_id="",
+ group_id="group_id",
+ role="role",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ group = client.admin.organization.projects.groups.list(
+ project_id="project_id",
+ )
+ assert_matches_type(SyncNextCursorPage[ProjectGroup], group, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ group = client.admin.organization.projects.groups.list(
+ project_id="project_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncNextCursorPage[ProjectGroup], group, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.groups.with_raw_response.list(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ group = response.parse()
+ assert_matches_type(SyncNextCursorPage[ProjectGroup], group, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.groups.with_streaming_response.list(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ group = response.parse()
+ assert_matches_type(SyncNextCursorPage[ProjectGroup], group, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.groups.with_raw_response.list(
+ project_id="",
+ )
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ group = client.admin.organization.projects.groups.delete(
+ group_id="group_id",
+ project_id="project_id",
+ )
+ assert_matches_type(GroupDeleteResponse, group, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.groups.with_raw_response.delete(
+ group_id="group_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ group = response.parse()
+ assert_matches_type(GroupDeleteResponse, group, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.groups.with_streaming_response.delete(
+ group_id="group_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ group = response.parse()
+ assert_matches_type(GroupDeleteResponse, group, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.groups.with_raw_response.delete(
+ group_id="group_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ client.admin.organization.projects.groups.with_raw_response.delete(
+ group_id="",
+ project_id="project_id",
+ )
+
+
+class TestAsyncGroups:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ group = await async_client.admin.organization.projects.groups.create(
+ project_id="project_id",
+ group_id="group_id",
+ role="role",
+ )
+ assert_matches_type(ProjectGroup, group, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.groups.with_raw_response.create(
+ project_id="project_id",
+ group_id="group_id",
+ role="role",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ group = response.parse()
+ assert_matches_type(ProjectGroup, group, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.groups.with_streaming_response.create(
+ project_id="project_id",
+ group_id="group_id",
+ role="role",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ group = await response.parse()
+ assert_matches_type(ProjectGroup, group, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.groups.with_raw_response.create(
+ project_id="",
+ group_id="group_id",
+ role="role",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ group = await async_client.admin.organization.projects.groups.list(
+ project_id="project_id",
+ )
+ assert_matches_type(AsyncNextCursorPage[ProjectGroup], group, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ group = await async_client.admin.organization.projects.groups.list(
+ project_id="project_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncNextCursorPage[ProjectGroup], group, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.groups.with_raw_response.list(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ group = response.parse()
+ assert_matches_type(AsyncNextCursorPage[ProjectGroup], group, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.groups.with_streaming_response.list(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ group = await response.parse()
+ assert_matches_type(AsyncNextCursorPage[ProjectGroup], group, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.groups.with_raw_response.list(
+ project_id="",
+ )
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ group = await async_client.admin.organization.projects.groups.delete(
+ group_id="group_id",
+ project_id="project_id",
+ )
+ assert_matches_type(GroupDeleteResponse, group, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.groups.with_raw_response.delete(
+ group_id="group_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ group = response.parse()
+ assert_matches_type(GroupDeleteResponse, group, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.groups.with_streaming_response.delete(
+ group_id="group_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ group = await response.parse()
+ assert_matches_type(GroupDeleteResponse, group, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.groups.with_raw_response.delete(
+ group_id="group_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ await async_client.admin.organization.projects.groups.with_raw_response.delete(
+ group_id="",
+ project_id="project_id",
+ )
diff --git a/tests/api_resources/admin/organization/projects/test_rate_limits.py b/tests/api_resources/admin/organization/projects/test_rate_limits.py
new file mode 100644
index 0000000000..c06077614b
--- /dev/null
+++ b/tests/api_resources/admin/organization/projects/test_rate_limits.py
@@ -0,0 +1,247 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from openai.types.admin.organization.projects import (
+ ProjectRateLimit,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestRateLimits:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_list_rate_limits(self, client: OpenAI) -> None:
+ rate_limit = client.admin.organization.projects.rate_limits.list_rate_limits(
+ project_id="project_id",
+ )
+ assert_matches_type(SyncConversationCursorPage[ProjectRateLimit], rate_limit, path=["response"])
+
+ @parametrize
+ def test_method_list_rate_limits_with_all_params(self, client: OpenAI) -> None:
+ rate_limit = client.admin.organization.projects.rate_limits.list_rate_limits(
+ project_id="project_id",
+ after="after",
+ before="before",
+ limit=0,
+ )
+ assert_matches_type(SyncConversationCursorPage[ProjectRateLimit], rate_limit, path=["response"])
+
+ @parametrize
+ def test_raw_response_list_rate_limits(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.rate_limits.with_raw_response.list_rate_limits(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ rate_limit = response.parse()
+ assert_matches_type(SyncConversationCursorPage[ProjectRateLimit], rate_limit, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list_rate_limits(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.rate_limits.with_streaming_response.list_rate_limits(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ rate_limit = response.parse()
+ assert_matches_type(SyncConversationCursorPage[ProjectRateLimit], rate_limit, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list_rate_limits(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.rate_limits.with_raw_response.list_rate_limits(
+ project_id="",
+ )
+
+ @parametrize
+ def test_method_update_rate_limit(self, client: OpenAI) -> None:
+ rate_limit = client.admin.organization.projects.rate_limits.update_rate_limit(
+ rate_limit_id="rate_limit_id",
+ project_id="project_id",
+ )
+ assert_matches_type(ProjectRateLimit, rate_limit, path=["response"])
+
+ @parametrize
+ def test_method_update_rate_limit_with_all_params(self, client: OpenAI) -> None:
+ rate_limit = client.admin.organization.projects.rate_limits.update_rate_limit(
+ rate_limit_id="rate_limit_id",
+ project_id="project_id",
+ batch_1_day_max_input_tokens=0,
+ max_audio_megabytes_per_1_minute=0,
+ max_images_per_1_minute=0,
+ max_requests_per_1_day=0,
+ max_requests_per_1_minute=0,
+ max_tokens_per_1_minute=0,
+ )
+ assert_matches_type(ProjectRateLimit, rate_limit, path=["response"])
+
+ @parametrize
+ def test_raw_response_update_rate_limit(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.rate_limits.with_raw_response.update_rate_limit(
+ rate_limit_id="rate_limit_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ rate_limit = response.parse()
+ assert_matches_type(ProjectRateLimit, rate_limit, path=["response"])
+
+ @parametrize
+ def test_streaming_response_update_rate_limit(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.rate_limits.with_streaming_response.update_rate_limit(
+ rate_limit_id="rate_limit_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ rate_limit = response.parse()
+ assert_matches_type(ProjectRateLimit, rate_limit, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_update_rate_limit(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.rate_limits.with_raw_response.update_rate_limit(
+ rate_limit_id="rate_limit_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `rate_limit_id` but received ''"):
+ client.admin.organization.projects.rate_limits.with_raw_response.update_rate_limit(
+ rate_limit_id="",
+ project_id="project_id",
+ )
+
+
+class TestAsyncRateLimits:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_list_rate_limits(self, async_client: AsyncOpenAI) -> None:
+ rate_limit = await async_client.admin.organization.projects.rate_limits.list_rate_limits(
+ project_id="project_id",
+ )
+ assert_matches_type(AsyncConversationCursorPage[ProjectRateLimit], rate_limit, path=["response"])
+
+ @parametrize
+ async def test_method_list_rate_limits_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ rate_limit = await async_client.admin.organization.projects.rate_limits.list_rate_limits(
+ project_id="project_id",
+ after="after",
+ before="before",
+ limit=0,
+ )
+ assert_matches_type(AsyncConversationCursorPage[ProjectRateLimit], rate_limit, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list_rate_limits(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.rate_limits.with_raw_response.list_rate_limits(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ rate_limit = response.parse()
+ assert_matches_type(AsyncConversationCursorPage[ProjectRateLimit], rate_limit, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list_rate_limits(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.rate_limits.with_streaming_response.list_rate_limits(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ rate_limit = await response.parse()
+ assert_matches_type(AsyncConversationCursorPage[ProjectRateLimit], rate_limit, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list_rate_limits(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.rate_limits.with_raw_response.list_rate_limits(
+ project_id="",
+ )
+
+ @parametrize
+ async def test_method_update_rate_limit(self, async_client: AsyncOpenAI) -> None:
+ rate_limit = await async_client.admin.organization.projects.rate_limits.update_rate_limit(
+ rate_limit_id="rate_limit_id",
+ project_id="project_id",
+ )
+ assert_matches_type(ProjectRateLimit, rate_limit, path=["response"])
+
+ @parametrize
+ async def test_method_update_rate_limit_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ rate_limit = await async_client.admin.organization.projects.rate_limits.update_rate_limit(
+ rate_limit_id="rate_limit_id",
+ project_id="project_id",
+ batch_1_day_max_input_tokens=0,
+ max_audio_megabytes_per_1_minute=0,
+ max_images_per_1_minute=0,
+ max_requests_per_1_day=0,
+ max_requests_per_1_minute=0,
+ max_tokens_per_1_minute=0,
+ )
+ assert_matches_type(ProjectRateLimit, rate_limit, path=["response"])
+
+ @parametrize
+ async def test_raw_response_update_rate_limit(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.rate_limits.with_raw_response.update_rate_limit(
+ rate_limit_id="rate_limit_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ rate_limit = response.parse()
+ assert_matches_type(ProjectRateLimit, rate_limit, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_update_rate_limit(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.rate_limits.with_streaming_response.update_rate_limit(
+ rate_limit_id="rate_limit_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ rate_limit = await response.parse()
+ assert_matches_type(ProjectRateLimit, rate_limit, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_update_rate_limit(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.rate_limits.with_raw_response.update_rate_limit(
+ rate_limit_id="rate_limit_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `rate_limit_id` but received ''"):
+ await async_client.admin.organization.projects.rate_limits.with_raw_response.update_rate_limit(
+ rate_limit_id="",
+ project_id="project_id",
+ )
diff --git a/tests/api_resources/admin/organization/projects/test_roles.py b/tests/api_resources/admin/organization/projects/test_roles.py
new file mode 100644
index 0000000000..8c78ea21b4
--- /dev/null
+++ b/tests/api_resources/admin/organization/projects/test_roles.py
@@ -0,0 +1,450 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncNextCursorPage, AsyncNextCursorPage
+from openai.types.admin.organization import Role
+from openai.types.admin.organization.projects import (
+ RoleDeleteResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestRoles:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ role = client.admin.organization.projects.roles.create(
+ project_id="project_id",
+ permissions=["string"],
+ role_name="role_name",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: OpenAI) -> None:
+ role = client.admin.organization.projects.roles.create(
+ project_id="project_id",
+ permissions=["string"],
+ role_name="role_name",
+ description="description",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.roles.with_raw_response.create(
+ project_id="project_id",
+ permissions=["string"],
+ role_name="role_name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.roles.with_streaming_response.create(
+ project_id="project_id",
+ permissions=["string"],
+ role_name="role_name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_create(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.roles.with_raw_response.create(
+ project_id="",
+ permissions=["string"],
+ role_name="role_name",
+ )
+
+ @parametrize
+ def test_method_update(self, client: OpenAI) -> None:
+ role = client.admin.organization.projects.roles.update(
+ role_id="role_id",
+ project_id="project_id",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ def test_method_update_with_all_params(self, client: OpenAI) -> None:
+ role = client.admin.organization.projects.roles.update(
+ role_id="role_id",
+ project_id="project_id",
+ description="description",
+ permissions=["string"],
+ role_name="role_name",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ def test_raw_response_update(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.roles.with_raw_response.update(
+ role_id="role_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_update(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.roles.with_streaming_response.update(
+ role_id="role_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_update(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.roles.with_raw_response.update(
+ role_id="role_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ client.admin.organization.projects.roles.with_raw_response.update(
+ role_id="",
+ project_id="project_id",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ role = client.admin.organization.projects.roles.list(
+ project_id="project_id",
+ )
+ assert_matches_type(SyncNextCursorPage[Role], role, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ role = client.admin.organization.projects.roles.list(
+ project_id="project_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncNextCursorPage[Role], role, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.roles.with_raw_response.list(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(SyncNextCursorPage[Role], role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.roles.with_streaming_response.list(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(SyncNextCursorPage[Role], role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.roles.with_raw_response.list(
+ project_id="",
+ )
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ role = client.admin.organization.projects.roles.delete(
+ role_id="role_id",
+ project_id="project_id",
+ )
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.roles.with_streaming_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ client.admin.organization.projects.roles.with_raw_response.delete(
+ role_id="",
+ project_id="project_id",
+ )
+
+
+class TestAsyncRoles:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.projects.roles.create(
+ project_id="project_id",
+ permissions=["string"],
+ role_name="role_name",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.projects.roles.create(
+ project_id="project_id",
+ permissions=["string"],
+ role_name="role_name",
+ description="description",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.roles.with_raw_response.create(
+ project_id="project_id",
+ permissions=["string"],
+ role_name="role_name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.roles.with_streaming_response.create(
+ project_id="project_id",
+ permissions=["string"],
+ role_name="role_name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.roles.with_raw_response.create(
+ project_id="",
+ permissions=["string"],
+ role_name="role_name",
+ )
+
+ @parametrize
+ async def test_method_update(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.projects.roles.update(
+ role_id="role_id",
+ project_id="project_id",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.projects.roles.update(
+ role_id="role_id",
+ project_id="project_id",
+ description="description",
+ permissions=["string"],
+ role_name="role_name",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.roles.with_raw_response.update(
+ role_id="role_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.roles.with_streaming_response.update(
+ role_id="role_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.roles.with_raw_response.update(
+ role_id="role_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ await async_client.admin.organization.projects.roles.with_raw_response.update(
+ role_id="",
+ project_id="project_id",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.projects.roles.list(
+ project_id="project_id",
+ )
+ assert_matches_type(AsyncNextCursorPage[Role], role, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.projects.roles.list(
+ project_id="project_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncNextCursorPage[Role], role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.roles.with_raw_response.list(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(AsyncNextCursorPage[Role], role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.roles.with_streaming_response.list(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(AsyncNextCursorPage[Role], role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.roles.with_raw_response.list(
+ project_id="",
+ )
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.projects.roles.delete(
+ role_id="role_id",
+ project_id="project_id",
+ )
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.roles.with_streaming_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ await async_client.admin.organization.projects.roles.with_raw_response.delete(
+ role_id="",
+ project_id="project_id",
+ )
diff --git a/tests/api_resources/admin/organization/projects/test_service_accounts.py b/tests/api_resources/admin/organization/projects/test_service_accounts.py
new file mode 100644
index 0000000000..7c94283323
--- /dev/null
+++ b/tests/api_resources/admin/organization/projects/test_service_accounts.py
@@ -0,0 +1,399 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from openai.types.admin.organization.projects import (
+ ProjectServiceAccount,
+ ServiceAccountCreateResponse,
+ ServiceAccountDeleteResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestServiceAccounts:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ service_account = client.admin.organization.projects.service_accounts.create(
+ project_id="project_id",
+ name="name",
+ )
+ assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.service_accounts.with_raw_response.create(
+ project_id="project_id",
+ name="name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ service_account = response.parse()
+ assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.service_accounts.with_streaming_response.create(
+ project_id="project_id",
+ name="name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ service_account = response.parse()
+ assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_create(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.service_accounts.with_raw_response.create(
+ project_id="",
+ name="name",
+ )
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ service_account = client.admin.organization.projects.service_accounts.retrieve(
+ service_account_id="service_account_id",
+ project_id="project_id",
+ )
+ assert_matches_type(ProjectServiceAccount, service_account, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.service_accounts.with_raw_response.retrieve(
+ service_account_id="service_account_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ service_account = response.parse()
+ assert_matches_type(ProjectServiceAccount, service_account, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.service_accounts.with_streaming_response.retrieve(
+ service_account_id="service_account_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ service_account = response.parse()
+ assert_matches_type(ProjectServiceAccount, service_account, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.service_accounts.with_raw_response.retrieve(
+ service_account_id="service_account_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"):
+ client.admin.organization.projects.service_accounts.with_raw_response.retrieve(
+ service_account_id="",
+ project_id="project_id",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ service_account = client.admin.organization.projects.service_accounts.list(
+ project_id="project_id",
+ )
+ assert_matches_type(SyncConversationCursorPage[ProjectServiceAccount], service_account, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ service_account = client.admin.organization.projects.service_accounts.list(
+ project_id="project_id",
+ after="after",
+ limit=0,
+ )
+ assert_matches_type(SyncConversationCursorPage[ProjectServiceAccount], service_account, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.service_accounts.with_raw_response.list(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ service_account = response.parse()
+ assert_matches_type(SyncConversationCursorPage[ProjectServiceAccount], service_account, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.service_accounts.with_streaming_response.list(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ service_account = response.parse()
+ assert_matches_type(SyncConversationCursorPage[ProjectServiceAccount], service_account, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.service_accounts.with_raw_response.list(
+ project_id="",
+ )
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ service_account = client.admin.organization.projects.service_accounts.delete(
+ service_account_id="service_account_id",
+ project_id="project_id",
+ )
+ assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.service_accounts.with_raw_response.delete(
+ service_account_id="service_account_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ service_account = response.parse()
+ assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.service_accounts.with_streaming_response.delete(
+ service_account_id="service_account_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ service_account = response.parse()
+ assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.service_accounts.with_raw_response.delete(
+ service_account_id="service_account_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"):
+ client.admin.organization.projects.service_accounts.with_raw_response.delete(
+ service_account_id="",
+ project_id="project_id",
+ )
+
+
+class TestAsyncServiceAccounts:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ service_account = await async_client.admin.organization.projects.service_accounts.create(
+ project_id="project_id",
+ name="name",
+ )
+ assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.service_accounts.with_raw_response.create(
+ project_id="project_id",
+ name="name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ service_account = response.parse()
+ assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.service_accounts.with_streaming_response.create(
+ project_id="project_id",
+ name="name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ service_account = await response.parse()
+ assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.service_accounts.with_raw_response.create(
+ project_id="",
+ name="name",
+ )
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ service_account = await async_client.admin.organization.projects.service_accounts.retrieve(
+ service_account_id="service_account_id",
+ project_id="project_id",
+ )
+ assert_matches_type(ProjectServiceAccount, service_account, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.service_accounts.with_raw_response.retrieve(
+ service_account_id="service_account_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ service_account = response.parse()
+ assert_matches_type(ProjectServiceAccount, service_account, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.service_accounts.with_streaming_response.retrieve(
+ service_account_id="service_account_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ service_account = await response.parse()
+ assert_matches_type(ProjectServiceAccount, service_account, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.service_accounts.with_raw_response.retrieve(
+ service_account_id="service_account_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"):
+ await async_client.admin.organization.projects.service_accounts.with_raw_response.retrieve(
+ service_account_id="",
+ project_id="project_id",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ service_account = await async_client.admin.organization.projects.service_accounts.list(
+ project_id="project_id",
+ )
+ assert_matches_type(AsyncConversationCursorPage[ProjectServiceAccount], service_account, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ service_account = await async_client.admin.organization.projects.service_accounts.list(
+ project_id="project_id",
+ after="after",
+ limit=0,
+ )
+ assert_matches_type(AsyncConversationCursorPage[ProjectServiceAccount], service_account, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.service_accounts.with_raw_response.list(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ service_account = response.parse()
+ assert_matches_type(AsyncConversationCursorPage[ProjectServiceAccount], service_account, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.service_accounts.with_streaming_response.list(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ service_account = await response.parse()
+ assert_matches_type(AsyncConversationCursorPage[ProjectServiceAccount], service_account, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.service_accounts.with_raw_response.list(
+ project_id="",
+ )
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ service_account = await async_client.admin.organization.projects.service_accounts.delete(
+ service_account_id="service_account_id",
+ project_id="project_id",
+ )
+ assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.service_accounts.with_raw_response.delete(
+ service_account_id="service_account_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ service_account = response.parse()
+ assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.service_accounts.with_streaming_response.delete(
+ service_account_id="service_account_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ service_account = await response.parse()
+ assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.service_accounts.with_raw_response.delete(
+ service_account_id="service_account_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"):
+ await async_client.admin.organization.projects.service_accounts.with_raw_response.delete(
+ service_account_id="",
+ project_id="project_id",
+ )
diff --git a/tests/api_resources/admin/organization/projects/test_users.py b/tests/api_resources/admin/organization/projects/test_users.py
new file mode 100644
index 0000000000..66005bf657
--- /dev/null
+++ b/tests/api_resources/admin/organization/projects/test_users.py
@@ -0,0 +1,532 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from openai.types.admin.organization.projects import (
+ ProjectUser,
+ UserDeleteResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestUsers:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ user = client.admin.organization.projects.users.create(
+ project_id="project_id",
+ role="role",
+ )
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: OpenAI) -> None:
+ user = client.admin.organization.projects.users.create(
+ project_id="project_id",
+ role="role",
+ email="email",
+ user_id="user_id",
+ )
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.users.with_raw_response.create(
+ project_id="project_id",
+ role="role",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.users.with_streaming_response.create(
+ project_id="project_id",
+ role="role",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_create(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.users.with_raw_response.create(
+ project_id="",
+ role="role",
+ )
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ user = client.admin.organization.projects.users.retrieve(
+ user_id="user_id",
+ project_id="project_id",
+ )
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.users.with_raw_response.retrieve(
+ user_id="user_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.users.with_streaming_response.retrieve(
+ user_id="user_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.users.with_raw_response.retrieve(
+ user_id="user_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ client.admin.organization.projects.users.with_raw_response.retrieve(
+ user_id="",
+ project_id="project_id",
+ )
+
+ @parametrize
+ def test_method_update(self, client: OpenAI) -> None:
+ user = client.admin.organization.projects.users.update(
+ user_id="user_id",
+ project_id="project_id",
+ )
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ def test_method_update_with_all_params(self, client: OpenAI) -> None:
+ user = client.admin.organization.projects.users.update(
+ user_id="user_id",
+ project_id="project_id",
+ role="role",
+ )
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ def test_raw_response_update(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.users.with_raw_response.update(
+ user_id="user_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_update(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.users.with_streaming_response.update(
+ user_id="user_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_update(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.users.with_raw_response.update(
+ user_id="user_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ client.admin.organization.projects.users.with_raw_response.update(
+ user_id="",
+ project_id="project_id",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ user = client.admin.organization.projects.users.list(
+ project_id="project_id",
+ )
+ assert_matches_type(SyncConversationCursorPage[ProjectUser], user, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ user = client.admin.organization.projects.users.list(
+ project_id="project_id",
+ after="after",
+ limit=0,
+ )
+ assert_matches_type(SyncConversationCursorPage[ProjectUser], user, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.users.with_raw_response.list(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(SyncConversationCursorPage[ProjectUser], user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.users.with_streaming_response.list(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(SyncConversationCursorPage[ProjectUser], user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.users.with_raw_response.list(
+ project_id="",
+ )
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ user = client.admin.organization.projects.users.delete(
+ user_id="user_id",
+ project_id="project_id",
+ )
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.users.with_raw_response.delete(
+ user_id="user_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.users.with_streaming_response.delete(
+ user_id="user_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.users.with_raw_response.delete(
+ user_id="user_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ client.admin.organization.projects.users.with_raw_response.delete(
+ user_id="",
+ project_id="project_id",
+ )
+
+
+class TestAsyncUsers:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.projects.users.create(
+ project_id="project_id",
+ role="role",
+ )
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.projects.users.create(
+ project_id="project_id",
+ role="role",
+ email="email",
+ user_id="user_id",
+ )
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.users.with_raw_response.create(
+ project_id="project_id",
+ role="role",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.users.with_streaming_response.create(
+ project_id="project_id",
+ role="role",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.users.with_raw_response.create(
+ project_id="",
+ role="role",
+ )
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.projects.users.retrieve(
+ user_id="user_id",
+ project_id="project_id",
+ )
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.users.with_raw_response.retrieve(
+ user_id="user_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.users.with_streaming_response.retrieve(
+ user_id="user_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.users.with_raw_response.retrieve(
+ user_id="user_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ await async_client.admin.organization.projects.users.with_raw_response.retrieve(
+ user_id="",
+ project_id="project_id",
+ )
+
+ @parametrize
+ async def test_method_update(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.projects.users.update(
+ user_id="user_id",
+ project_id="project_id",
+ )
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.projects.users.update(
+ user_id="user_id",
+ project_id="project_id",
+ role="role",
+ )
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.users.with_raw_response.update(
+ user_id="user_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.users.with_streaming_response.update(
+ user_id="user_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(ProjectUser, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.users.with_raw_response.update(
+ user_id="user_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ await async_client.admin.organization.projects.users.with_raw_response.update(
+ user_id="",
+ project_id="project_id",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.projects.users.list(
+ project_id="project_id",
+ )
+ assert_matches_type(AsyncConversationCursorPage[ProjectUser], user, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.projects.users.list(
+ project_id="project_id",
+ after="after",
+ limit=0,
+ )
+ assert_matches_type(AsyncConversationCursorPage[ProjectUser], user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.users.with_raw_response.list(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(AsyncConversationCursorPage[ProjectUser], user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.users.with_streaming_response.list(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(AsyncConversationCursorPage[ProjectUser], user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.users.with_raw_response.list(
+ project_id="",
+ )
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.projects.users.delete(
+ user_id="user_id",
+ project_id="project_id",
+ )
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.users.with_raw_response.delete(
+ user_id="user_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.users.with_streaming_response.delete(
+ user_id="user_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.users.with_raw_response.delete(
+ user_id="user_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ await async_client.admin.organization.projects.users.with_raw_response.delete(
+ user_id="",
+ project_id="project_id",
+ )
diff --git a/tests/api_resources/admin/organization/projects/users/__init__.py b/tests/api_resources/admin/organization/projects/users/__init__.py
new file mode 100644
index 0000000000..fd8019a9a1
--- /dev/null
+++ b/tests/api_resources/admin/organization/projects/users/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/admin/organization/projects/users/test_roles.py b/tests/api_resources/admin/organization/projects/users/test_roles.py
new file mode 100644
index 0000000000..99c52d494c
--- /dev/null
+++ b/tests/api_resources/admin/organization/projects/users/test_roles.py
@@ -0,0 +1,373 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncNextCursorPage, AsyncNextCursorPage
+from openai.types.admin.organization.projects.users import (
+ RoleListResponse,
+ RoleCreateResponse,
+ RoleDeleteResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestRoles:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ role = client.admin.organization.projects.users.roles.create(
+ user_id="user_id",
+ project_id="project_id",
+ role_id="role_id",
+ )
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.users.roles.with_raw_response.create(
+ user_id="user_id",
+ project_id="project_id",
+ role_id="role_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.users.roles.with_streaming_response.create(
+ user_id="user_id",
+ project_id="project_id",
+ role_id="role_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_create(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.users.roles.with_raw_response.create(
+ user_id="user_id",
+ project_id="",
+ role_id="role_id",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ client.admin.organization.projects.users.roles.with_raw_response.create(
+ user_id="",
+ project_id="project_id",
+ role_id="role_id",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ role = client.admin.organization.projects.users.roles.list(
+ user_id="user_id",
+ project_id="project_id",
+ )
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ role = client.admin.organization.projects.users.roles.list(
+ user_id="user_id",
+ project_id="project_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.users.roles.with_raw_response.list(
+ user_id="user_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.users.roles.with_streaming_response.list(
+ user_id="user_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.users.roles.with_raw_response.list(
+ user_id="user_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ client.admin.organization.projects.users.roles.with_raw_response.list(
+ user_id="",
+ project_id="project_id",
+ )
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ role = client.admin.organization.projects.users.roles.delete(
+ role_id="role_id",
+ project_id="project_id",
+ user_id="user_id",
+ )
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.users.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ user_id="user_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.users.roles.with_streaming_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ user_id="user_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.users.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="",
+ user_id="user_id",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ client.admin.organization.projects.users.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ user_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ client.admin.organization.projects.users.roles.with_raw_response.delete(
+ role_id="",
+ project_id="project_id",
+ user_id="user_id",
+ )
+
+
+class TestAsyncRoles:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.projects.users.roles.create(
+ user_id="user_id",
+ project_id="project_id",
+ role_id="role_id",
+ )
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.users.roles.with_raw_response.create(
+ user_id="user_id",
+ project_id="project_id",
+ role_id="role_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.users.roles.with_streaming_response.create(
+ user_id="user_id",
+ project_id="project_id",
+ role_id="role_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.users.roles.with_raw_response.create(
+ user_id="user_id",
+ project_id="",
+ role_id="role_id",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ await async_client.admin.organization.projects.users.roles.with_raw_response.create(
+ user_id="",
+ project_id="project_id",
+ role_id="role_id",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.projects.users.roles.list(
+ user_id="user_id",
+ project_id="project_id",
+ )
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.projects.users.roles.list(
+ user_id="user_id",
+ project_id="project_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.users.roles.with_raw_response.list(
+ user_id="user_id",
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.users.roles.with_streaming_response.list(
+ user_id="user_id",
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.users.roles.with_raw_response.list(
+ user_id="user_id",
+ project_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ await async_client.admin.organization.projects.users.roles.with_raw_response.list(
+ user_id="",
+ project_id="project_id",
+ )
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.projects.users.roles.delete(
+ role_id="role_id",
+ project_id="project_id",
+ user_id="user_id",
+ )
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.users.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ user_id="user_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.users.roles.with_streaming_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ user_id="user_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.users.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="",
+ user_id="user_id",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ await async_client.admin.organization.projects.users.roles.with_raw_response.delete(
+ role_id="role_id",
+ project_id="project_id",
+ user_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ await async_client.admin.organization.projects.users.roles.with_raw_response.delete(
+ role_id="",
+ project_id="project_id",
+ user_id="user_id",
+ )
diff --git a/tests/api_resources/admin/organization/test_admin_api_keys.py b/tests/api_resources/admin/organization/test_admin_api_keys.py
new file mode 100644
index 0000000000..59eed910e8
--- /dev/null
+++ b/tests/api_resources/admin/organization/test_admin_api_keys.py
@@ -0,0 +1,311 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncCursorPage, AsyncCursorPage
+from openai.types.admin.organization import (
+ AdminAPIKey,
+ AdminAPIKeyCreateResponse,
+ AdminAPIKeyDeleteResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestAdminAPIKeys:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ admin_api_key = client.admin.organization.admin_api_keys.create(
+ name="New Admin Key",
+ )
+ assert_matches_type(AdminAPIKeyCreateResponse, admin_api_key, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.admin.organization.admin_api_keys.with_raw_response.create(
+ name="New Admin Key",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ admin_api_key = response.parse()
+ assert_matches_type(AdminAPIKeyCreateResponse, admin_api_key, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.admin.organization.admin_api_keys.with_streaming_response.create(
+ name="New Admin Key",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ admin_api_key = response.parse()
+ assert_matches_type(AdminAPIKeyCreateResponse, admin_api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ admin_api_key = client.admin.organization.admin_api_keys.retrieve(
+ "key_id",
+ )
+ assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.admin.organization.admin_api_keys.with_raw_response.retrieve(
+ "key_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ admin_api_key = response.parse()
+ assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.admin.organization.admin_api_keys.with_streaming_response.retrieve(
+ "key_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ admin_api_key = response.parse()
+ assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
+ client.admin.organization.admin_api_keys.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ admin_api_key = client.admin.organization.admin_api_keys.list()
+ assert_matches_type(SyncCursorPage[AdminAPIKey], admin_api_key, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ admin_api_key = client.admin.organization.admin_api_keys.list(
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncCursorPage[AdminAPIKey], admin_api_key, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.admin_api_keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ admin_api_key = response.parse()
+ assert_matches_type(SyncCursorPage[AdminAPIKey], admin_api_key, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.admin_api_keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ admin_api_key = response.parse()
+ assert_matches_type(SyncCursorPage[AdminAPIKey], admin_api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ admin_api_key = client.admin.organization.admin_api_keys.delete(
+ "key_id",
+ )
+ assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.admin_api_keys.with_raw_response.delete(
+ "key_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ admin_api_key = response.parse()
+ assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.admin_api_keys.with_streaming_response.delete(
+ "key_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ admin_api_key = response.parse()
+ assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
+ client.admin.organization.admin_api_keys.with_raw_response.delete(
+ "",
+ )
+
+
+class TestAsyncAdminAPIKeys:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ admin_api_key = await async_client.admin.organization.admin_api_keys.create(
+ name="New Admin Key",
+ )
+ assert_matches_type(AdminAPIKeyCreateResponse, admin_api_key, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.admin_api_keys.with_raw_response.create(
+ name="New Admin Key",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ admin_api_key = response.parse()
+ assert_matches_type(AdminAPIKeyCreateResponse, admin_api_key, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.admin_api_keys.with_streaming_response.create(
+ name="New Admin Key",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ admin_api_key = await response.parse()
+ assert_matches_type(AdminAPIKeyCreateResponse, admin_api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ admin_api_key = await async_client.admin.organization.admin_api_keys.retrieve(
+ "key_id",
+ )
+ assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.admin_api_keys.with_raw_response.retrieve(
+ "key_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ admin_api_key = response.parse()
+ assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.admin_api_keys.with_streaming_response.retrieve(
+ "key_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ admin_api_key = await response.parse()
+ assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
+ await async_client.admin.organization.admin_api_keys.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ admin_api_key = await async_client.admin.organization.admin_api_keys.list()
+ assert_matches_type(AsyncCursorPage[AdminAPIKey], admin_api_key, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ admin_api_key = await async_client.admin.organization.admin_api_keys.list(
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncCursorPage[AdminAPIKey], admin_api_key, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.admin_api_keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ admin_api_key = response.parse()
+ assert_matches_type(AsyncCursorPage[AdminAPIKey], admin_api_key, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.admin_api_keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ admin_api_key = await response.parse()
+ assert_matches_type(AsyncCursorPage[AdminAPIKey], admin_api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ admin_api_key = await async_client.admin.organization.admin_api_keys.delete(
+ "key_id",
+ )
+ assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.admin_api_keys.with_raw_response.delete(
+ "key_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ admin_api_key = response.parse()
+ assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.admin_api_keys.with_streaming_response.delete(
+ "key_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ admin_api_key = await response.parse()
+ assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
+ await async_client.admin.organization.admin_api_keys.with_raw_response.delete(
+ "",
+ )
diff --git a/tests/api_resources/admin/organization/test_audit_logs.py b/tests/api_resources/admin/organization/test_audit_logs.py
new file mode 100644
index 0000000000..5e696461fa
--- /dev/null
+++ b/tests/api_resources/admin/organization/test_audit_logs.py
@@ -0,0 +1,115 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from openai.types.admin.organization import AuditLogListResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestAuditLogs:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ audit_log = client.admin.organization.audit_logs.list()
+ assert_matches_type(SyncConversationCursorPage[AuditLogListResponse], audit_log, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ audit_log = client.admin.organization.audit_logs.list(
+ actor_emails=["string"],
+ actor_ids=["string"],
+ after="after",
+ before="before",
+ effective_at={
+ "gt": 0,
+ "gte": 0,
+ "lt": 0,
+ "lte": 0,
+ },
+ event_types=["api_key.created"],
+ limit=0,
+ project_ids=["string"],
+ resource_ids=["string"],
+ )
+ assert_matches_type(SyncConversationCursorPage[AuditLogListResponse], audit_log, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.audit_logs.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ audit_log = response.parse()
+ assert_matches_type(SyncConversationCursorPage[AuditLogListResponse], audit_log, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.audit_logs.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ audit_log = response.parse()
+ assert_matches_type(SyncConversationCursorPage[AuditLogListResponse], audit_log, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncAuditLogs:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ audit_log = await async_client.admin.organization.audit_logs.list()
+ assert_matches_type(AsyncConversationCursorPage[AuditLogListResponse], audit_log, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ audit_log = await async_client.admin.organization.audit_logs.list(
+ actor_emails=["string"],
+ actor_ids=["string"],
+ after="after",
+ before="before",
+ effective_at={
+ "gt": 0,
+ "gte": 0,
+ "lt": 0,
+ "lte": 0,
+ },
+ event_types=["api_key.created"],
+ limit=0,
+ project_ids=["string"],
+ resource_ids=["string"],
+ )
+ assert_matches_type(AsyncConversationCursorPage[AuditLogListResponse], audit_log, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.audit_logs.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ audit_log = response.parse()
+ assert_matches_type(AsyncConversationCursorPage[AuditLogListResponse], audit_log, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.audit_logs.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ audit_log = await response.parse()
+ assert_matches_type(AsyncConversationCursorPage[AuditLogListResponse], audit_log, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/admin/organization/test_certificates.py b/tests/api_resources/admin/organization/test_certificates.py
new file mode 100644
index 0000000000..209fd9220d
--- /dev/null
+++ b/tests/api_resources/admin/organization/test_certificates.py
@@ -0,0 +1,561 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncPage, AsyncPage, SyncConversationCursorPage, AsyncConversationCursorPage
+from openai.types.admin.organization import (
+ Certificate,
+ CertificateListResponse,
+ CertificateDeleteResponse,
+ CertificateActivateResponse,
+ CertificateDeactivateResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestCertificates:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ certificate = client.admin.organization.certificates.create(
+ certificate="certificate",
+ )
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: OpenAI) -> None:
+ certificate = client.admin.organization.certificates.create(
+ certificate="certificate",
+ name="name",
+ )
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.admin.organization.certificates.with_raw_response.create(
+ certificate="certificate",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.admin.organization.certificates.with_streaming_response.create(
+ certificate="certificate",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = response.parse()
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ certificate = client.admin.organization.certificates.retrieve(
+ certificate_id="certificate_id",
+ )
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ def test_method_retrieve_with_all_params(self, client: OpenAI) -> None:
+ certificate = client.admin.organization.certificates.retrieve(
+ certificate_id="certificate_id",
+ include=["content"],
+ )
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.admin.organization.certificates.with_raw_response.retrieve(
+ certificate_id="certificate_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.admin.organization.certificates.with_streaming_response.retrieve(
+ certificate_id="certificate_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = response.parse()
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `certificate_id` but received ''"):
+ client.admin.organization.certificates.with_raw_response.retrieve(
+ certificate_id="",
+ )
+
+ @parametrize
+ def test_method_update(self, client: OpenAI) -> None:
+ certificate = client.admin.organization.certificates.update(
+ certificate_id="certificate_id",
+ )
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ def test_method_update_with_all_params(self, client: OpenAI) -> None:
+ certificate = client.admin.organization.certificates.update(
+ certificate_id="certificate_id",
+ name="name",
+ )
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ def test_raw_response_update(self, client: OpenAI) -> None:
+ response = client.admin.organization.certificates.with_raw_response.update(
+ certificate_id="certificate_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ def test_streaming_response_update(self, client: OpenAI) -> None:
+ with client.admin.organization.certificates.with_streaming_response.update(
+ certificate_id="certificate_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = response.parse()
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_update(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `certificate_id` but received ''"):
+ client.admin.organization.certificates.with_raw_response.update(
+ certificate_id="",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ certificate = client.admin.organization.certificates.list()
+ assert_matches_type(SyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ certificate = client.admin.organization.certificates.list(
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.certificates.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(SyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.certificates.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = response.parse()
+ assert_matches_type(SyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ certificate = client.admin.organization.certificates.delete(
+ "certificate_id",
+ )
+ assert_matches_type(CertificateDeleteResponse, certificate, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.certificates.with_raw_response.delete(
+ "certificate_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(CertificateDeleteResponse, certificate, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.certificates.with_streaming_response.delete(
+ "certificate_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = response.parse()
+ assert_matches_type(CertificateDeleteResponse, certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `certificate_id` but received ''"):
+ client.admin.organization.certificates.with_raw_response.delete(
+ "",
+ )
+
+ @parametrize
+ def test_method_activate(self, client: OpenAI) -> None:
+ certificate = client.admin.organization.certificates.activate(
+ certificate_ids=["cert_abc"],
+ )
+ assert_matches_type(SyncPage[CertificateActivateResponse], certificate, path=["response"])
+
+ @parametrize
+ def test_raw_response_activate(self, client: OpenAI) -> None:
+ response = client.admin.organization.certificates.with_raw_response.activate(
+ certificate_ids=["cert_abc"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(SyncPage[CertificateActivateResponse], certificate, path=["response"])
+
+ @parametrize
+ def test_streaming_response_activate(self, client: OpenAI) -> None:
+ with client.admin.organization.certificates.with_streaming_response.activate(
+ certificate_ids=["cert_abc"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = response.parse()
+ assert_matches_type(SyncPage[CertificateActivateResponse], certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_deactivate(self, client: OpenAI) -> None:
+ certificate = client.admin.organization.certificates.deactivate(
+ certificate_ids=["cert_abc"],
+ )
+ assert_matches_type(SyncPage[CertificateDeactivateResponse], certificate, path=["response"])
+
+ @parametrize
+ def test_raw_response_deactivate(self, client: OpenAI) -> None:
+ response = client.admin.organization.certificates.with_raw_response.deactivate(
+ certificate_ids=["cert_abc"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(SyncPage[CertificateDeactivateResponse], certificate, path=["response"])
+
+ @parametrize
+ def test_streaming_response_deactivate(self, client: OpenAI) -> None:
+ with client.admin.organization.certificates.with_streaming_response.deactivate(
+ certificate_ids=["cert_abc"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = response.parse()
+ assert_matches_type(SyncPage[CertificateDeactivateResponse], certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncCertificates:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ certificate = await async_client.admin.organization.certificates.create(
+ certificate="certificate",
+ )
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ certificate = await async_client.admin.organization.certificates.create(
+ certificate="certificate",
+ name="name",
+ )
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.certificates.with_raw_response.create(
+ certificate="certificate",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.certificates.with_streaming_response.create(
+ certificate="certificate",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = await response.parse()
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ certificate = await async_client.admin.organization.certificates.retrieve(
+ certificate_id="certificate_id",
+ )
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ certificate = await async_client.admin.organization.certificates.retrieve(
+ certificate_id="certificate_id",
+ include=["content"],
+ )
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.certificates.with_raw_response.retrieve(
+ certificate_id="certificate_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.certificates.with_streaming_response.retrieve(
+ certificate_id="certificate_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = await response.parse()
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `certificate_id` but received ''"):
+ await async_client.admin.organization.certificates.with_raw_response.retrieve(
+ certificate_id="",
+ )
+
+ @parametrize
+ async def test_method_update(self, async_client: AsyncOpenAI) -> None:
+ certificate = await async_client.admin.organization.certificates.update(
+ certificate_id="certificate_id",
+ )
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ certificate = await async_client.admin.organization.certificates.update(
+ certificate_id="certificate_id",
+ name="name",
+ )
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.certificates.with_raw_response.update(
+ certificate_id="certificate_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.certificates.with_streaming_response.update(
+ certificate_id="certificate_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = await response.parse()
+ assert_matches_type(Certificate, certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `certificate_id` but received ''"):
+ await async_client.admin.organization.certificates.with_raw_response.update(
+ certificate_id="",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ certificate = await async_client.admin.organization.certificates.list()
+ assert_matches_type(AsyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ certificate = await async_client.admin.organization.certificates.list(
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.certificates.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(AsyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.certificates.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = await response.parse()
+ assert_matches_type(AsyncConversationCursorPage[CertificateListResponse], certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ certificate = await async_client.admin.organization.certificates.delete(
+ "certificate_id",
+ )
+ assert_matches_type(CertificateDeleteResponse, certificate, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.certificates.with_raw_response.delete(
+ "certificate_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(CertificateDeleteResponse, certificate, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.certificates.with_streaming_response.delete(
+ "certificate_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = await response.parse()
+ assert_matches_type(CertificateDeleteResponse, certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `certificate_id` but received ''"):
+ await async_client.admin.organization.certificates.with_raw_response.delete(
+ "",
+ )
+
+ @parametrize
+ async def test_method_activate(self, async_client: AsyncOpenAI) -> None:
+ certificate = await async_client.admin.organization.certificates.activate(
+ certificate_ids=["cert_abc"],
+ )
+ assert_matches_type(AsyncPage[CertificateActivateResponse], certificate, path=["response"])
+
+ @parametrize
+ async def test_raw_response_activate(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.certificates.with_raw_response.activate(
+ certificate_ids=["cert_abc"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(AsyncPage[CertificateActivateResponse], certificate, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_activate(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.certificates.with_streaming_response.activate(
+ certificate_ids=["cert_abc"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = await response.parse()
+ assert_matches_type(AsyncPage[CertificateActivateResponse], certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_deactivate(self, async_client: AsyncOpenAI) -> None:
+ certificate = await async_client.admin.organization.certificates.deactivate(
+ certificate_ids=["cert_abc"],
+ )
+ assert_matches_type(AsyncPage[CertificateDeactivateResponse], certificate, path=["response"])
+
+ @parametrize
+ async def test_raw_response_deactivate(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.certificates.with_raw_response.deactivate(
+ certificate_ids=["cert_abc"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ certificate = response.parse()
+ assert_matches_type(AsyncPage[CertificateDeactivateResponse], certificate, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_deactivate(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.certificates.with_streaming_response.deactivate(
+ certificate_ids=["cert_abc"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ certificate = await response.parse()
+ assert_matches_type(AsyncPage[CertificateDeactivateResponse], certificate, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/admin/organization/test_groups.py b/tests/api_resources/admin/organization/test_groups.py
new file mode 100644
index 0000000000..0eca89f06c
--- /dev/null
+++ b/tests/api_resources/admin/organization/test_groups.py
@@ -0,0 +1,319 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncNextCursorPage, AsyncNextCursorPage
+from openai.types.admin.organization import (
+ Group,
+ GroupDeleteResponse,
+ GroupUpdateResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestGroups:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ group = client.admin.organization.groups.create(
+ name="x",
+ )
+ assert_matches_type(Group, group, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.admin.organization.groups.with_raw_response.create(
+ name="x",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ group = response.parse()
+ assert_matches_type(Group, group, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.admin.organization.groups.with_streaming_response.create(
+ name="x",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ group = response.parse()
+ assert_matches_type(Group, group, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_update(self, client: OpenAI) -> None:
+ group = client.admin.organization.groups.update(
+ group_id="group_id",
+ name="x",
+ )
+ assert_matches_type(GroupUpdateResponse, group, path=["response"])
+
+ @parametrize
+ def test_raw_response_update(self, client: OpenAI) -> None:
+ response = client.admin.organization.groups.with_raw_response.update(
+ group_id="group_id",
+ name="x",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ group = response.parse()
+ assert_matches_type(GroupUpdateResponse, group, path=["response"])
+
+ @parametrize
+ def test_streaming_response_update(self, client: OpenAI) -> None:
+ with client.admin.organization.groups.with_streaming_response.update(
+ group_id="group_id",
+ name="x",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ group = response.parse()
+ assert_matches_type(GroupUpdateResponse, group, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_update(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ client.admin.organization.groups.with_raw_response.update(
+ group_id="",
+ name="x",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ group = client.admin.organization.groups.list()
+ assert_matches_type(SyncNextCursorPage[Group], group, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ group = client.admin.organization.groups.list(
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncNextCursorPage[Group], group, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.groups.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ group = response.parse()
+ assert_matches_type(SyncNextCursorPage[Group], group, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.groups.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ group = response.parse()
+ assert_matches_type(SyncNextCursorPage[Group], group, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ group = client.admin.organization.groups.delete(
+ "group_id",
+ )
+ assert_matches_type(GroupDeleteResponse, group, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.groups.with_raw_response.delete(
+ "group_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ group = response.parse()
+ assert_matches_type(GroupDeleteResponse, group, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.groups.with_streaming_response.delete(
+ "group_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ group = response.parse()
+ assert_matches_type(GroupDeleteResponse, group, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ client.admin.organization.groups.with_raw_response.delete(
+ "",
+ )
+
+
+class TestAsyncGroups:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ group = await async_client.admin.organization.groups.create(
+ name="x",
+ )
+ assert_matches_type(Group, group, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.groups.with_raw_response.create(
+ name="x",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ group = response.parse()
+ assert_matches_type(Group, group, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.groups.with_streaming_response.create(
+ name="x",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ group = await response.parse()
+ assert_matches_type(Group, group, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_update(self, async_client: AsyncOpenAI) -> None:
+ group = await async_client.admin.organization.groups.update(
+ group_id="group_id",
+ name="x",
+ )
+ assert_matches_type(GroupUpdateResponse, group, path=["response"])
+
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.groups.with_raw_response.update(
+ group_id="group_id",
+ name="x",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ group = response.parse()
+ assert_matches_type(GroupUpdateResponse, group, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.groups.with_streaming_response.update(
+ group_id="group_id",
+ name="x",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ group = await response.parse()
+ assert_matches_type(GroupUpdateResponse, group, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ await async_client.admin.organization.groups.with_raw_response.update(
+ group_id="",
+ name="x",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ group = await async_client.admin.organization.groups.list()
+ assert_matches_type(AsyncNextCursorPage[Group], group, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ group = await async_client.admin.organization.groups.list(
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncNextCursorPage[Group], group, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.groups.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ group = response.parse()
+ assert_matches_type(AsyncNextCursorPage[Group], group, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.groups.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ group = await response.parse()
+ assert_matches_type(AsyncNextCursorPage[Group], group, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ group = await async_client.admin.organization.groups.delete(
+ "group_id",
+ )
+ assert_matches_type(GroupDeleteResponse, group, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.groups.with_raw_response.delete(
+ "group_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ group = response.parse()
+ assert_matches_type(GroupDeleteResponse, group, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.groups.with_streaming_response.delete(
+ "group_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ group = await response.parse()
+ assert_matches_type(GroupDeleteResponse, group, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `group_id` but received ''"):
+ await async_client.admin.organization.groups.with_raw_response.delete(
+ "",
+ )
diff --git a/tests/api_resources/admin/organization/test_invites.py b/tests/api_resources/admin/organization/test_invites.py
new file mode 100644
index 0000000000..ddad6e6e31
--- /dev/null
+++ b/tests/api_resources/admin/organization/test_invites.py
@@ -0,0 +1,339 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from openai.types.admin.organization import Invite, InviteDeleteResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestInvites:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ invite = client.admin.organization.invites.create(
+ email="email",
+ role="reader",
+ )
+ assert_matches_type(Invite, invite, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: OpenAI) -> None:
+ invite = client.admin.organization.invites.create(
+ email="email",
+ role="reader",
+ projects=[
+ {
+ "id": "id",
+ "role": "member",
+ }
+ ],
+ )
+ assert_matches_type(Invite, invite, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.admin.organization.invites.with_raw_response.create(
+ email="email",
+ role="reader",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ invite = response.parse()
+ assert_matches_type(Invite, invite, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.admin.organization.invites.with_streaming_response.create(
+ email="email",
+ role="reader",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ invite = response.parse()
+ assert_matches_type(Invite, invite, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ invite = client.admin.organization.invites.retrieve(
+ "invite_id",
+ )
+ assert_matches_type(Invite, invite, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.admin.organization.invites.with_raw_response.retrieve(
+ "invite_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ invite = response.parse()
+ assert_matches_type(Invite, invite, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.admin.organization.invites.with_streaming_response.retrieve(
+ "invite_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ invite = response.parse()
+ assert_matches_type(Invite, invite, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"):
+ client.admin.organization.invites.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ invite = client.admin.organization.invites.list()
+ assert_matches_type(SyncConversationCursorPage[Invite], invite, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ invite = client.admin.organization.invites.list(
+ after="after",
+ limit=0,
+ )
+ assert_matches_type(SyncConversationCursorPage[Invite], invite, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.invites.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ invite = response.parse()
+ assert_matches_type(SyncConversationCursorPage[Invite], invite, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.invites.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ invite = response.parse()
+ assert_matches_type(SyncConversationCursorPage[Invite], invite, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ invite = client.admin.organization.invites.delete(
+ "invite_id",
+ )
+ assert_matches_type(InviteDeleteResponse, invite, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.invites.with_raw_response.delete(
+ "invite_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ invite = response.parse()
+ assert_matches_type(InviteDeleteResponse, invite, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.invites.with_streaming_response.delete(
+ "invite_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ invite = response.parse()
+ assert_matches_type(InviteDeleteResponse, invite, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"):
+ client.admin.organization.invites.with_raw_response.delete(
+ "",
+ )
+
+
+class TestAsyncInvites:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ invite = await async_client.admin.organization.invites.create(
+ email="email",
+ role="reader",
+ )
+ assert_matches_type(Invite, invite, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ invite = await async_client.admin.organization.invites.create(
+ email="email",
+ role="reader",
+ projects=[
+ {
+ "id": "id",
+ "role": "member",
+ }
+ ],
+ )
+ assert_matches_type(Invite, invite, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.invites.with_raw_response.create(
+ email="email",
+ role="reader",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ invite = response.parse()
+ assert_matches_type(Invite, invite, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.invites.with_streaming_response.create(
+ email="email",
+ role="reader",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ invite = await response.parse()
+ assert_matches_type(Invite, invite, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ invite = await async_client.admin.organization.invites.retrieve(
+ "invite_id",
+ )
+ assert_matches_type(Invite, invite, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.invites.with_raw_response.retrieve(
+ "invite_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ invite = response.parse()
+ assert_matches_type(Invite, invite, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.invites.with_streaming_response.retrieve(
+ "invite_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ invite = await response.parse()
+ assert_matches_type(Invite, invite, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"):
+ await async_client.admin.organization.invites.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ invite = await async_client.admin.organization.invites.list()
+ assert_matches_type(AsyncConversationCursorPage[Invite], invite, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ invite = await async_client.admin.organization.invites.list(
+ after="after",
+ limit=0,
+ )
+ assert_matches_type(AsyncConversationCursorPage[Invite], invite, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.invites.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ invite = response.parse()
+ assert_matches_type(AsyncConversationCursorPage[Invite], invite, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.invites.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ invite = await response.parse()
+ assert_matches_type(AsyncConversationCursorPage[Invite], invite, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ invite = await async_client.admin.organization.invites.delete(
+ "invite_id",
+ )
+ assert_matches_type(InviteDeleteResponse, invite, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.invites.with_raw_response.delete(
+ "invite_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ invite = response.parse()
+ assert_matches_type(InviteDeleteResponse, invite, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.invites.with_streaming_response.delete(
+ "invite_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ invite = await response.parse()
+ assert_matches_type(InviteDeleteResponse, invite, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"):
+ await async_client.admin.organization.invites.with_raw_response.delete(
+ "",
+ )
diff --git a/tests/api_resources/admin/organization/test_projects.py b/tests/api_resources/admin/organization/test_projects.py
new file mode 100644
index 0000000000..5e07ff1496
--- /dev/null
+++ b/tests/api_resources/admin/organization/test_projects.py
@@ -0,0 +1,421 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from openai.types.admin.organization import Project
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestProjects:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ project = client.admin.organization.projects.create(
+ name="name",
+ )
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: OpenAI) -> None:
+ project = client.admin.organization.projects.create(
+ name="name",
+ external_key_id="external_key_id",
+ geography="geography",
+ )
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.with_raw_response.create(
+ name="name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ project = response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.with_streaming_response.create(
+ name="name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ project = response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ project = client.admin.organization.projects.retrieve(
+ "project_id",
+ )
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.with_raw_response.retrieve(
+ "project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ project = response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.with_streaming_response.retrieve(
+ "project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ project = response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_update(self, client: OpenAI) -> None:
+ project = client.admin.organization.projects.update(
+ project_id="project_id",
+ )
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ def test_method_update_with_all_params(self, client: OpenAI) -> None:
+ project = client.admin.organization.projects.update(
+ project_id="project_id",
+ external_key_id="external_key_id",
+ geography="geography",
+ name="name",
+ )
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ def test_raw_response_update(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.with_raw_response.update(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ project = response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ def test_streaming_response_update(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.with_streaming_response.update(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ project = response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_update(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.with_raw_response.update(
+ project_id="",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ project = client.admin.organization.projects.list()
+ assert_matches_type(SyncConversationCursorPage[Project], project, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ project = client.admin.organization.projects.list(
+ after="after",
+ include_archived=True,
+ limit=0,
+ )
+ assert_matches_type(SyncConversationCursorPage[Project], project, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ project = response.parse()
+ assert_matches_type(SyncConversationCursorPage[Project], project, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ project = response.parse()
+ assert_matches_type(SyncConversationCursorPage[Project], project, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_archive(self, client: OpenAI) -> None:
+ project = client.admin.organization.projects.archive(
+ "project_id",
+ )
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ def test_raw_response_archive(self, client: OpenAI) -> None:
+ response = client.admin.organization.projects.with_raw_response.archive(
+ "project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ project = response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ def test_streaming_response_archive(self, client: OpenAI) -> None:
+ with client.admin.organization.projects.with_streaming_response.archive(
+ "project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ project = response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_archive(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.admin.organization.projects.with_raw_response.archive(
+ "",
+ )
+
+
+class TestAsyncProjects:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ project = await async_client.admin.organization.projects.create(
+ name="name",
+ )
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ project = await async_client.admin.organization.projects.create(
+ name="name",
+ external_key_id="external_key_id",
+ geography="geography",
+ )
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.with_raw_response.create(
+ name="name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ project = response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.with_streaming_response.create(
+ name="name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ project = await response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ project = await async_client.admin.organization.projects.retrieve(
+ "project_id",
+ )
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.with_raw_response.retrieve(
+ "project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ project = response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.with_streaming_response.retrieve(
+ "project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ project = await response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_update(self, async_client: AsyncOpenAI) -> None:
+ project = await async_client.admin.organization.projects.update(
+ project_id="project_id",
+ )
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ project = await async_client.admin.organization.projects.update(
+ project_id="project_id",
+ external_key_id="external_key_id",
+ geography="geography",
+ name="name",
+ )
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.with_raw_response.update(
+ project_id="project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ project = response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.with_streaming_response.update(
+ project_id="project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ project = await response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.with_raw_response.update(
+ project_id="",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ project = await async_client.admin.organization.projects.list()
+ assert_matches_type(AsyncConversationCursorPage[Project], project, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ project = await async_client.admin.organization.projects.list(
+ after="after",
+ include_archived=True,
+ limit=0,
+ )
+ assert_matches_type(AsyncConversationCursorPage[Project], project, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ project = response.parse()
+ assert_matches_type(AsyncConversationCursorPage[Project], project, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ project = await response.parse()
+ assert_matches_type(AsyncConversationCursorPage[Project], project, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_archive(self, async_client: AsyncOpenAI) -> None:
+ project = await async_client.admin.organization.projects.archive(
+ "project_id",
+ )
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ async def test_raw_response_archive(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.projects.with_raw_response.archive(
+ "project_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ project = response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_archive(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.projects.with_streaming_response.archive(
+ "project_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ project = await response.parse()
+ assert_matches_type(Project, project, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_archive(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.admin.organization.projects.with_raw_response.archive(
+ "",
+ )
diff --git a/tests/api_resources/admin/organization/test_roles.py b/tests/api_resources/admin/organization/test_roles.py
new file mode 100644
index 0000000000..ee70020c23
--- /dev/null
+++ b/tests/api_resources/admin/organization/test_roles.py
@@ -0,0 +1,354 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncNextCursorPage, AsyncNextCursorPage
+from openai.types.admin.organization import (
+ Role,
+ RoleDeleteResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestRoles:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ role = client.admin.organization.roles.create(
+ permissions=["string"],
+ role_name="role_name",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: OpenAI) -> None:
+ role = client.admin.organization.roles.create(
+ permissions=["string"],
+ role_name="role_name",
+ description="description",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.admin.organization.roles.with_raw_response.create(
+ permissions=["string"],
+ role_name="role_name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.admin.organization.roles.with_streaming_response.create(
+ permissions=["string"],
+ role_name="role_name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_update(self, client: OpenAI) -> None:
+ role = client.admin.organization.roles.update(
+ role_id="role_id",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ def test_method_update_with_all_params(self, client: OpenAI) -> None:
+ role = client.admin.organization.roles.update(
+ role_id="role_id",
+ description="description",
+ permissions=["string"],
+ role_name="role_name",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ def test_raw_response_update(self, client: OpenAI) -> None:
+ response = client.admin.organization.roles.with_raw_response.update(
+ role_id="role_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_update(self, client: OpenAI) -> None:
+ with client.admin.organization.roles.with_streaming_response.update(
+ role_id="role_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_update(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ client.admin.organization.roles.with_raw_response.update(
+ role_id="",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ role = client.admin.organization.roles.list()
+ assert_matches_type(SyncNextCursorPage[Role], role, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ role = client.admin.organization.roles.list(
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncNextCursorPage[Role], role, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.roles.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(SyncNextCursorPage[Role], role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.roles.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(SyncNextCursorPage[Role], role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ role = client.admin.organization.roles.delete(
+ "role_id",
+ )
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.roles.with_raw_response.delete(
+ "role_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.roles.with_streaming_response.delete(
+ "role_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ client.admin.organization.roles.with_raw_response.delete(
+ "",
+ )
+
+
+class TestAsyncRoles:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.roles.create(
+ permissions=["string"],
+ role_name="role_name",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.roles.create(
+ permissions=["string"],
+ role_name="role_name",
+ description="description",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.roles.with_raw_response.create(
+ permissions=["string"],
+ role_name="role_name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.roles.with_streaming_response.create(
+ permissions=["string"],
+ role_name="role_name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_update(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.roles.update(
+ role_id="role_id",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.roles.update(
+ role_id="role_id",
+ description="description",
+ permissions=["string"],
+ role_name="role_name",
+ )
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.roles.with_raw_response.update(
+ role_id="role_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.roles.with_streaming_response.update(
+ role_id="role_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(Role, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ await async_client.admin.organization.roles.with_raw_response.update(
+ role_id="",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.roles.list()
+ assert_matches_type(AsyncNextCursorPage[Role], role, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.roles.list(
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncNextCursorPage[Role], role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.roles.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(AsyncNextCursorPage[Role], role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.roles.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(AsyncNextCursorPage[Role], role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.roles.delete(
+ "role_id",
+ )
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.roles.with_raw_response.delete(
+ "role_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.roles.with_streaming_response.delete(
+ "role_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ await async_client.admin.organization.roles.with_raw_response.delete(
+ "",
+ )
diff --git a/tests/api_resources/admin/organization/test_usage.py b/tests/api_resources/admin/organization/test_usage.py
new file mode 100644
index 0000000000..f9c4a8e2c5
--- /dev/null
+++ b/tests/api_resources/admin/organization/test_usage.py
@@ -0,0 +1,870 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.types.admin.organization import (
+ UsageCostsResponse,
+ UsageImagesResponse,
+ UsageEmbeddingsResponse,
+ UsageCompletionsResponse,
+ UsageModerationsResponse,
+ UsageVectorStoresResponse,
+ UsageAudioSpeechesResponse,
+ UsageAudioTranscriptionsResponse,
+ UsageCodeInterpreterSessionsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestUsage:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_audio_speeches(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.audio_speeches(
+ start_time=0,
+ )
+ assert_matches_type(UsageAudioSpeechesResponse, usage, path=["response"])
+
+ @parametrize
+ def test_method_audio_speeches_with_all_params(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.audio_speeches(
+ start_time=0,
+ api_key_ids=["string"],
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ models=["string"],
+ page="page",
+ project_ids=["string"],
+ user_ids=["string"],
+ )
+ assert_matches_type(UsageAudioSpeechesResponse, usage, path=["response"])
+
+ @parametrize
+ def test_raw_response_audio_speeches(self, client: OpenAI) -> None:
+ response = client.admin.organization.usage.with_raw_response.audio_speeches(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageAudioSpeechesResponse, usage, path=["response"])
+
+ @parametrize
+ def test_streaming_response_audio_speeches(self, client: OpenAI) -> None:
+ with client.admin.organization.usage.with_streaming_response.audio_speeches(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = response.parse()
+ assert_matches_type(UsageAudioSpeechesResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_audio_transcriptions(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.audio_transcriptions(
+ start_time=0,
+ )
+ assert_matches_type(UsageAudioTranscriptionsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_method_audio_transcriptions_with_all_params(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.audio_transcriptions(
+ start_time=0,
+ api_key_ids=["string"],
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ models=["string"],
+ page="page",
+ project_ids=["string"],
+ user_ids=["string"],
+ )
+ assert_matches_type(UsageAudioTranscriptionsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_raw_response_audio_transcriptions(self, client: OpenAI) -> None:
+ response = client.admin.organization.usage.with_raw_response.audio_transcriptions(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageAudioTranscriptionsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_streaming_response_audio_transcriptions(self, client: OpenAI) -> None:
+ with client.admin.organization.usage.with_streaming_response.audio_transcriptions(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = response.parse()
+ assert_matches_type(UsageAudioTranscriptionsResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_code_interpreter_sessions(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.code_interpreter_sessions(
+ start_time=0,
+ )
+ assert_matches_type(UsageCodeInterpreterSessionsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_method_code_interpreter_sessions_with_all_params(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.code_interpreter_sessions(
+ start_time=0,
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ page="page",
+ project_ids=["string"],
+ )
+ assert_matches_type(UsageCodeInterpreterSessionsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_raw_response_code_interpreter_sessions(self, client: OpenAI) -> None:
+ response = client.admin.organization.usage.with_raw_response.code_interpreter_sessions(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageCodeInterpreterSessionsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_streaming_response_code_interpreter_sessions(self, client: OpenAI) -> None:
+ with client.admin.organization.usage.with_streaming_response.code_interpreter_sessions(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = response.parse()
+ assert_matches_type(UsageCodeInterpreterSessionsResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_completions(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.completions(
+ start_time=0,
+ )
+ assert_matches_type(UsageCompletionsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_method_completions_with_all_params(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.completions(
+ start_time=0,
+ api_key_ids=["string"],
+ batch=True,
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ models=["string"],
+ page="page",
+ project_ids=["string"],
+ user_ids=["string"],
+ )
+ assert_matches_type(UsageCompletionsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_raw_response_completions(self, client: OpenAI) -> None:
+ response = client.admin.organization.usage.with_raw_response.completions(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageCompletionsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_streaming_response_completions(self, client: OpenAI) -> None:
+ with client.admin.organization.usage.with_streaming_response.completions(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = response.parse()
+ assert_matches_type(UsageCompletionsResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_costs(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.costs(
+ start_time=0,
+ )
+ assert_matches_type(UsageCostsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_method_costs_with_all_params(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.costs(
+ start_time=0,
+ api_key_ids=["string"],
+ bucket_width="1d",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ page="page",
+ project_ids=["string"],
+ )
+ assert_matches_type(UsageCostsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_raw_response_costs(self, client: OpenAI) -> None:
+ response = client.admin.organization.usage.with_raw_response.costs(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageCostsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_streaming_response_costs(self, client: OpenAI) -> None:
+ with client.admin.organization.usage.with_streaming_response.costs(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = response.parse()
+ assert_matches_type(UsageCostsResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_embeddings(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.embeddings(
+ start_time=0,
+ )
+ assert_matches_type(UsageEmbeddingsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_method_embeddings_with_all_params(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.embeddings(
+ start_time=0,
+ api_key_ids=["string"],
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ models=["string"],
+ page="page",
+ project_ids=["string"],
+ user_ids=["string"],
+ )
+ assert_matches_type(UsageEmbeddingsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_raw_response_embeddings(self, client: OpenAI) -> None:
+ response = client.admin.organization.usage.with_raw_response.embeddings(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageEmbeddingsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_streaming_response_embeddings(self, client: OpenAI) -> None:
+ with client.admin.organization.usage.with_streaming_response.embeddings(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = response.parse()
+ assert_matches_type(UsageEmbeddingsResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_images(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.images(
+ start_time=0,
+ )
+ assert_matches_type(UsageImagesResponse, usage, path=["response"])
+
+ @parametrize
+ def test_method_images_with_all_params(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.images(
+ start_time=0,
+ api_key_ids=["string"],
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ models=["string"],
+ page="page",
+ project_ids=["string"],
+ sizes=["256x256"],
+ sources=["image.generation"],
+ user_ids=["string"],
+ )
+ assert_matches_type(UsageImagesResponse, usage, path=["response"])
+
+ @parametrize
+ def test_raw_response_images(self, client: OpenAI) -> None:
+ response = client.admin.organization.usage.with_raw_response.images(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageImagesResponse, usage, path=["response"])
+
+ @parametrize
+ def test_streaming_response_images(self, client: OpenAI) -> None:
+ with client.admin.organization.usage.with_streaming_response.images(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = response.parse()
+ assert_matches_type(UsageImagesResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_moderations(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.moderations(
+ start_time=0,
+ )
+ assert_matches_type(UsageModerationsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_method_moderations_with_all_params(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.moderations(
+ start_time=0,
+ api_key_ids=["string"],
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ models=["string"],
+ page="page",
+ project_ids=["string"],
+ user_ids=["string"],
+ )
+ assert_matches_type(UsageModerationsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_raw_response_moderations(self, client: OpenAI) -> None:
+ response = client.admin.organization.usage.with_raw_response.moderations(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageModerationsResponse, usage, path=["response"])
+
+ @parametrize
+ def test_streaming_response_moderations(self, client: OpenAI) -> None:
+ with client.admin.organization.usage.with_streaming_response.moderations(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = response.parse()
+ assert_matches_type(UsageModerationsResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_vector_stores(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.vector_stores(
+ start_time=0,
+ )
+ assert_matches_type(UsageVectorStoresResponse, usage, path=["response"])
+
+ @parametrize
+ def test_method_vector_stores_with_all_params(self, client: OpenAI) -> None:
+ usage = client.admin.organization.usage.vector_stores(
+ start_time=0,
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ page="page",
+ project_ids=["string"],
+ )
+ assert_matches_type(UsageVectorStoresResponse, usage, path=["response"])
+
+ @parametrize
+ def test_raw_response_vector_stores(self, client: OpenAI) -> None:
+ response = client.admin.organization.usage.with_raw_response.vector_stores(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageVectorStoresResponse, usage, path=["response"])
+
+ @parametrize
+ def test_streaming_response_vector_stores(self, client: OpenAI) -> None:
+ with client.admin.organization.usage.with_streaming_response.vector_stores(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = response.parse()
+ assert_matches_type(UsageVectorStoresResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncUsage:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_audio_speeches(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.audio_speeches(
+ start_time=0,
+ )
+ assert_matches_type(UsageAudioSpeechesResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_method_audio_speeches_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.audio_speeches(
+ start_time=0,
+ api_key_ids=["string"],
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ models=["string"],
+ page="page",
+ project_ids=["string"],
+ user_ids=["string"],
+ )
+ assert_matches_type(UsageAudioSpeechesResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_raw_response_audio_speeches(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.usage.with_raw_response.audio_speeches(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageAudioSpeechesResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_audio_speeches(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.usage.with_streaming_response.audio_speeches(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = await response.parse()
+ assert_matches_type(UsageAudioSpeechesResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_audio_transcriptions(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.audio_transcriptions(
+ start_time=0,
+ )
+ assert_matches_type(UsageAudioTranscriptionsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_method_audio_transcriptions_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.audio_transcriptions(
+ start_time=0,
+ api_key_ids=["string"],
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ models=["string"],
+ page="page",
+ project_ids=["string"],
+ user_ids=["string"],
+ )
+ assert_matches_type(UsageAudioTranscriptionsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_raw_response_audio_transcriptions(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.usage.with_raw_response.audio_transcriptions(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageAudioTranscriptionsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_audio_transcriptions(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.usage.with_streaming_response.audio_transcriptions(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = await response.parse()
+ assert_matches_type(UsageAudioTranscriptionsResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_code_interpreter_sessions(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.code_interpreter_sessions(
+ start_time=0,
+ )
+ assert_matches_type(UsageCodeInterpreterSessionsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_method_code_interpreter_sessions_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.code_interpreter_sessions(
+ start_time=0,
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ page="page",
+ project_ids=["string"],
+ )
+ assert_matches_type(UsageCodeInterpreterSessionsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_raw_response_code_interpreter_sessions(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.usage.with_raw_response.code_interpreter_sessions(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageCodeInterpreterSessionsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_code_interpreter_sessions(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.usage.with_streaming_response.code_interpreter_sessions(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = await response.parse()
+ assert_matches_type(UsageCodeInterpreterSessionsResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_completions(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.completions(
+ start_time=0,
+ )
+ assert_matches_type(UsageCompletionsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_method_completions_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.completions(
+ start_time=0,
+ api_key_ids=["string"],
+ batch=True,
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ models=["string"],
+ page="page",
+ project_ids=["string"],
+ user_ids=["string"],
+ )
+ assert_matches_type(UsageCompletionsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_raw_response_completions(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.usage.with_raw_response.completions(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageCompletionsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_completions(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.usage.with_streaming_response.completions(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = await response.parse()
+ assert_matches_type(UsageCompletionsResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_costs(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.costs(
+ start_time=0,
+ )
+ assert_matches_type(UsageCostsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_method_costs_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.costs(
+ start_time=0,
+ api_key_ids=["string"],
+ bucket_width="1d",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ page="page",
+ project_ids=["string"],
+ )
+ assert_matches_type(UsageCostsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_raw_response_costs(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.usage.with_raw_response.costs(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageCostsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_costs(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.usage.with_streaming_response.costs(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = await response.parse()
+ assert_matches_type(UsageCostsResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_embeddings(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.embeddings(
+ start_time=0,
+ )
+ assert_matches_type(UsageEmbeddingsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_method_embeddings_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.embeddings(
+ start_time=0,
+ api_key_ids=["string"],
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ models=["string"],
+ page="page",
+ project_ids=["string"],
+ user_ids=["string"],
+ )
+ assert_matches_type(UsageEmbeddingsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_raw_response_embeddings(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.usage.with_raw_response.embeddings(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageEmbeddingsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_embeddings(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.usage.with_streaming_response.embeddings(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = await response.parse()
+ assert_matches_type(UsageEmbeddingsResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_images(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.images(
+ start_time=0,
+ )
+ assert_matches_type(UsageImagesResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_method_images_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.images(
+ start_time=0,
+ api_key_ids=["string"],
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ models=["string"],
+ page="page",
+ project_ids=["string"],
+ sizes=["256x256"],
+ sources=["image.generation"],
+ user_ids=["string"],
+ )
+ assert_matches_type(UsageImagesResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_raw_response_images(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.usage.with_raw_response.images(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageImagesResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_images(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.usage.with_streaming_response.images(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = await response.parse()
+ assert_matches_type(UsageImagesResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_moderations(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.moderations(
+ start_time=0,
+ )
+ assert_matches_type(UsageModerationsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_method_moderations_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.moderations(
+ start_time=0,
+ api_key_ids=["string"],
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ models=["string"],
+ page="page",
+ project_ids=["string"],
+ user_ids=["string"],
+ )
+ assert_matches_type(UsageModerationsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_raw_response_moderations(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.usage.with_raw_response.moderations(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageModerationsResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_moderations(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.usage.with_streaming_response.moderations(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = await response.parse()
+ assert_matches_type(UsageModerationsResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_vector_stores(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.vector_stores(
+ start_time=0,
+ )
+ assert_matches_type(UsageVectorStoresResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_method_vector_stores_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ usage = await async_client.admin.organization.usage.vector_stores(
+ start_time=0,
+ bucket_width="1m",
+ end_time=0,
+ group_by=["project_id"],
+ limit=0,
+ page="page",
+ project_ids=["string"],
+ )
+ assert_matches_type(UsageVectorStoresResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_raw_response_vector_stores(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.usage.with_raw_response.vector_stores(
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ usage = response.parse()
+ assert_matches_type(UsageVectorStoresResponse, usage, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_vector_stores(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.usage.with_streaming_response.vector_stores(
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ usage = await response.parse()
+ assert_matches_type(UsageVectorStoresResponse, usage, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/admin/organization/test_users.py b/tests/api_resources/admin/organization/test_users.py
new file mode 100644
index 0000000000..308b199bc6
--- /dev/null
+++ b/tests/api_resources/admin/organization/test_users.py
@@ -0,0 +1,343 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from openai.types.admin.organization import OrganizationUser, UserDeleteResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestUsers:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ user = client.admin.organization.users.retrieve(
+ "user_id",
+ )
+ assert_matches_type(OrganizationUser, user, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.admin.organization.users.with_raw_response.retrieve(
+ "user_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(OrganizationUser, user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.admin.organization.users.with_streaming_response.retrieve(
+ "user_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(OrganizationUser, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ client.admin.organization.users.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_update(self, client: OpenAI) -> None:
+ user = client.admin.organization.users.update(
+ user_id="user_id",
+ )
+ assert_matches_type(OrganizationUser, user, path=["response"])
+
+ @parametrize
+ def test_method_update_with_all_params(self, client: OpenAI) -> None:
+ user = client.admin.organization.users.update(
+ user_id="user_id",
+ developer_persona="developer_persona",
+ role="role",
+ role_id="role_id",
+ technical_level="technical_level",
+ )
+ assert_matches_type(OrganizationUser, user, path=["response"])
+
+ @parametrize
+ def test_raw_response_update(self, client: OpenAI) -> None:
+ response = client.admin.organization.users.with_raw_response.update(
+ user_id="user_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(OrganizationUser, user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_update(self, client: OpenAI) -> None:
+ with client.admin.organization.users.with_streaming_response.update(
+ user_id="user_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(OrganizationUser, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_update(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ client.admin.organization.users.with_raw_response.update(
+ user_id="",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ user = client.admin.organization.users.list()
+ assert_matches_type(SyncConversationCursorPage[OrganizationUser], user, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ user = client.admin.organization.users.list(
+ after="after",
+ emails=["string"],
+ limit=0,
+ )
+ assert_matches_type(SyncConversationCursorPage[OrganizationUser], user, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.users.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(SyncConversationCursorPage[OrganizationUser], user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.users.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(SyncConversationCursorPage[OrganizationUser], user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ user = client.admin.organization.users.delete(
+ "user_id",
+ )
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.users.with_raw_response.delete(
+ "user_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.users.with_streaming_response.delete(
+ "user_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ client.admin.organization.users.with_raw_response.delete(
+ "",
+ )
+
+
+class TestAsyncUsers:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.users.retrieve(
+ "user_id",
+ )
+ assert_matches_type(OrganizationUser, user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.users.with_raw_response.retrieve(
+ "user_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(OrganizationUser, user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.users.with_streaming_response.retrieve(
+ "user_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(OrganizationUser, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ await async_client.admin.organization.users.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_update(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.users.update(
+ user_id="user_id",
+ )
+ assert_matches_type(OrganizationUser, user, path=["response"])
+
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.users.update(
+ user_id="user_id",
+ developer_persona="developer_persona",
+ role="role",
+ role_id="role_id",
+ technical_level="technical_level",
+ )
+ assert_matches_type(OrganizationUser, user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.users.with_raw_response.update(
+ user_id="user_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(OrganizationUser, user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.users.with_streaming_response.update(
+ user_id="user_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(OrganizationUser, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ await async_client.admin.organization.users.with_raw_response.update(
+ user_id="",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.users.list()
+ assert_matches_type(AsyncConversationCursorPage[OrganizationUser], user, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.users.list(
+ after="after",
+ emails=["string"],
+ limit=0,
+ )
+ assert_matches_type(AsyncConversationCursorPage[OrganizationUser], user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.users.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(AsyncConversationCursorPage[OrganizationUser], user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.users.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(AsyncConversationCursorPage[OrganizationUser], user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ user = await async_client.admin.organization.users.delete(
+ "user_id",
+ )
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.users.with_raw_response.delete(
+ "user_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.users.with_streaming_response.delete(
+ "user_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(UserDeleteResponse, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ await async_client.admin.organization.users.with_raw_response.delete(
+ "",
+ )
diff --git a/tests/api_resources/admin/organization/users/__init__.py b/tests/api_resources/admin/organization/users/__init__.py
new file mode 100644
index 0000000000..fd8019a9a1
--- /dev/null
+++ b/tests/api_resources/admin/organization/users/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/admin/organization/users/test_roles.py b/tests/api_resources/admin/organization/users/test_roles.py
new file mode 100644
index 0000000000..2455a38cff
--- /dev/null
+++ b/tests/api_resources/admin/organization/users/test_roles.py
@@ -0,0 +1,305 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncNextCursorPage, AsyncNextCursorPage
+from openai.types.admin.organization.users import (
+ RoleListResponse,
+ RoleCreateResponse,
+ RoleDeleteResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestRoles:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ role = client.admin.organization.users.roles.create(
+ user_id="user_id",
+ role_id="role_id",
+ )
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.admin.organization.users.roles.with_raw_response.create(
+ user_id="user_id",
+ role_id="role_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.admin.organization.users.roles.with_streaming_response.create(
+ user_id="user_id",
+ role_id="role_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_create(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ client.admin.organization.users.roles.with_raw_response.create(
+ user_id="",
+ role_id="role_id",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ role = client.admin.organization.users.roles.list(
+ user_id="user_id",
+ )
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ role = client.admin.organization.users.roles.list(
+ user_id="user_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.admin.organization.users.roles.with_raw_response.list(
+ user_id="user_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.admin.organization.users.roles.with_streaming_response.list(
+ user_id="user_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(SyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ client.admin.organization.users.roles.with_raw_response.list(
+ user_id="",
+ )
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ role = client.admin.organization.users.roles.delete(
+ role_id="role_id",
+ user_id="user_id",
+ )
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.admin.organization.users.roles.with_raw_response.delete(
+ role_id="role_id",
+ user_id="user_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.admin.organization.users.roles.with_streaming_response.delete(
+ role_id="role_id",
+ user_id="user_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ client.admin.organization.users.roles.with_raw_response.delete(
+ role_id="role_id",
+ user_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ client.admin.organization.users.roles.with_raw_response.delete(
+ role_id="",
+ user_id="user_id",
+ )
+
+
+class TestAsyncRoles:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.users.roles.create(
+ user_id="user_id",
+ role_id="role_id",
+ )
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.users.roles.with_raw_response.create(
+ user_id="user_id",
+ role_id="role_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.users.roles.with_streaming_response.create(
+ user_id="user_id",
+ role_id="role_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(RoleCreateResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ await async_client.admin.organization.users.roles.with_raw_response.create(
+ user_id="",
+ role_id="role_id",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.users.roles.list(
+ user_id="user_id",
+ )
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.users.roles.list(
+ user_id="user_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.users.roles.with_raw_response.list(
+ user_id="user_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.users.roles.with_streaming_response.list(
+ user_id="user_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(AsyncNextCursorPage[RoleListResponse], role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ await async_client.admin.organization.users.roles.with_raw_response.list(
+ user_id="",
+ )
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ role = await async_client.admin.organization.users.roles.delete(
+ role_id="role_id",
+ user_id="user_id",
+ )
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.admin.organization.users.roles.with_raw_response.delete(
+ role_id="role_id",
+ user_id="user_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ role = response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.admin.organization.users.roles.with_streaming_response.delete(
+ role_id="role_id",
+ user_id="user_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ role = await response.parse()
+ assert_matches_type(RoleDeleteResponse, role, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
+ await async_client.admin.organization.users.roles.with_raw_response.delete(
+ role_id="role_id",
+ user_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `role_id` but received ''"):
+ await async_client.admin.organization.users.roles.with_raw_response.delete(
+ role_id="",
+ user_id="user_id",
+ )
diff --git a/tests/conftest.py b/tests/conftest.py
index 408bcf76c0..1042fe59d9 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -46,6 +46,7 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None:
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
api_key = "My API Key"
+admin_api_key = "My Admin API Key"
@pytest.fixture(scope="session")
@@ -54,7 +55,9 @@ def client(request: FixtureRequest) -> Iterator[OpenAI]:
if not isinstance(strict, bool):
raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}")
- with OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client:
+ with OpenAI(
+ base_url=base_url, api_key=api_key, admin_api_key=admin_api_key, _strict_response_validation=strict
+ ) as client:
yield client
@@ -79,6 +82,10 @@ async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncOpenAI]:
raise TypeError(f"Unexpected fixture parameter type {type(param)}, expected bool or dict")
async with AsyncOpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=strict, http_client=http_client
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=strict,
+ http_client=http_client,
) as client:
yield client
diff --git a/tests/lib/test_azure.py b/tests/lib/test_azure.py
index 52c24eba27..3e1d783e2c 100644
--- a/tests/lib/test_azure.py
+++ b/tests/lib/test_azure.py
@@ -8,6 +8,9 @@
import pytest
from respx import MockRouter
+from openai import OpenAIError
+from tests.utils import update_env
+from openai._types import Omit
from openai._utils import SensitiveHeadersFilter, is_dict
from openai._models import FinalRequestOptions
from openai.lib.azure import AzureOpenAI, AsyncAzureOpenAI
@@ -76,6 +79,154 @@ def test_client_copying_override_options(client: Client) -> None:
assert copied._custom_query == {"api-version": "2022-05-01"}
+def test_enforce_credentials_false_sync() -> None:
+ with update_env(AZURE_OPENAI_API_KEY=Omit(), AZURE_OPENAI_AD_TOKEN=Omit()):
+ AzureOpenAI(
+ api_version="2024-02-01",
+ api_key=None,
+ azure_ad_token=None,
+ azure_ad_token_provider=None,
+ azure_endpoint="https://example-resource.azure.openai.com",
+ _enforce_credentials=False,
+ )
+
+
+@pytest.mark.respx()
+def test_enforce_credentials_false_sync_uses_default_api_key_header(respx_mock: MockRouter) -> None:
+ respx_mock.post(
+ "https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-02-01"
+ ).mock(return_value=httpx.Response(200, json={"model": "gpt-4"}))
+
+ with update_env(AZURE_OPENAI_API_KEY=Omit(), AZURE_OPENAI_AD_TOKEN=Omit()):
+ client = AzureOpenAI(
+ api_version="2024-02-01",
+ api_key=None,
+ azure_ad_token=None,
+ azure_ad_token_provider=None,
+ azure_endpoint="https://example-resource.azure.openai.com",
+ default_headers={"api-key": "manual-api-key"},
+ _enforce_credentials=False,
+ )
+ client.chat.completions.create(messages=[], model="gpt-4")
+
+ calls = cast("list[MockRequestCall]", respx_mock.calls)
+ assert calls[0].request.headers.get("api-key") == "manual-api-key"
+ assert calls[0].request.headers.get("Authorization") is None
+
+
+@pytest.mark.respx()
+def test_enforce_credentials_false_sync_uses_request_authorization_header(respx_mock: MockRouter) -> None:
+ respx_mock.post(
+ "https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-02-01"
+ ).mock(return_value=httpx.Response(200, json={"model": "gpt-4"}))
+
+ with update_env(AZURE_OPENAI_API_KEY=Omit(), AZURE_OPENAI_AD_TOKEN=Omit()):
+ client = AzureOpenAI(
+ api_version="2024-02-01",
+ api_key=None,
+ azure_ad_token=None,
+ azure_ad_token_provider=None,
+ azure_endpoint="https://example-resource.azure.openai.com",
+ _enforce_credentials=False,
+ )
+ client.chat.completions.create(
+ messages=[],
+ model="gpt-4",
+ extra_headers={"authorization": "Bearer manual-token"},
+ )
+
+ calls = cast("list[MockRequestCall]", respx_mock.calls)
+ assert calls[0].request.headers.get("Authorization") == "Bearer manual-token"
+ assert calls[0].request.headers.get("api-key") is None
+
+
+def test_enforce_credentials_true_sync() -> None:
+ with update_env(AZURE_OPENAI_API_KEY=Omit(), AZURE_OPENAI_AD_TOKEN=Omit()):
+ with pytest.raises(OpenAIError, match="Missing credentials"):
+ AzureOpenAI(
+ api_version="2024-02-01",
+ api_key=None,
+ azure_ad_token=None,
+ azure_ad_token_provider=None,
+ azure_endpoint="https://example-resource.azure.openai.com",
+ )
+
+
+def test_enforce_credentials_false_async() -> None:
+ with update_env(AZURE_OPENAI_API_KEY=Omit(), AZURE_OPENAI_AD_TOKEN=Omit()):
+ AsyncAzureOpenAI(
+ api_version="2024-02-01",
+ api_key=None,
+ azure_ad_token=None,
+ azure_ad_token_provider=None,
+ azure_endpoint="https://example-resource.azure.openai.com",
+ _enforce_credentials=False,
+ )
+
+
+@pytest.mark.asyncio
+@pytest.mark.respx()
+async def test_enforce_credentials_false_async_uses_default_api_key_header(respx_mock: MockRouter) -> None:
+ respx_mock.post(
+ "https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-02-01"
+ ).mock(return_value=httpx.Response(200, json={"model": "gpt-4"}))
+
+ with update_env(AZURE_OPENAI_API_KEY=Omit(), AZURE_OPENAI_AD_TOKEN=Omit()):
+ client = AsyncAzureOpenAI(
+ api_version="2024-02-01",
+ api_key=None,
+ azure_ad_token=None,
+ azure_ad_token_provider=None,
+ azure_endpoint="https://example-resource.azure.openai.com",
+ default_headers={"api-key": "manual-api-key"},
+ _enforce_credentials=False,
+ )
+ await client.chat.completions.create(messages=[], model="gpt-4")
+
+ calls = cast("list[MockRequestCall]", respx_mock.calls)
+ assert calls[0].request.headers.get("api-key") == "manual-api-key"
+ assert calls[0].request.headers.get("Authorization") is None
+
+
+@pytest.mark.asyncio
+@pytest.mark.respx()
+async def test_enforce_credentials_false_async_uses_request_authorization_header(respx_mock: MockRouter) -> None:
+ respx_mock.post(
+ "https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-02-01"
+ ).mock(return_value=httpx.Response(200, json={"model": "gpt-4"}))
+
+ with update_env(AZURE_OPENAI_API_KEY=Omit(), AZURE_OPENAI_AD_TOKEN=Omit()):
+ client = AsyncAzureOpenAI(
+ api_version="2024-02-01",
+ api_key=None,
+ azure_ad_token=None,
+ azure_ad_token_provider=None,
+ azure_endpoint="https://example-resource.azure.openai.com",
+ _enforce_credentials=False,
+ )
+ await client.chat.completions.create(
+ messages=[],
+ model="gpt-4",
+ extra_headers={"authorization": "Bearer manual-token"},
+ )
+
+ calls = cast("list[MockRequestCall]", respx_mock.calls)
+ assert calls[0].request.headers.get("Authorization") == "Bearer manual-token"
+ assert calls[0].request.headers.get("api-key") is None
+
+
+def test_enforce_credentials_true_async() -> None:
+ with update_env(AZURE_OPENAI_API_KEY=Omit(), AZURE_OPENAI_AD_TOKEN=Omit()):
+ with pytest.raises(OpenAIError, match="Missing credentials"):
+ AsyncAzureOpenAI(
+ api_version="2024-02-01",
+ api_key=None,
+ azure_ad_token=None,
+ azure_ad_token_provider=None,
+ azure_endpoint="https://example-resource.azure.openai.com",
+ )
+
+
@pytest.mark.respx()
def test_client_token_provider_refresh_sync(respx_mock: MockRouter) -> None:
respx_mock.post(
diff --git a/tests/test_client.py b/tests/test_client.py
index 570042c46a..e2bb6ea966 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -10,7 +10,7 @@
import inspect
import dataclasses
import tracemalloc
-from typing import Any, Union, TypeVar, Callable, Iterable, Iterator, Optional, Protocol, Coroutine, cast
+from typing import Any, Union, TypeVar, Callable, Iterable, Iterator, Optional, Coroutine, cast
from unittest import mock
from typing_extensions import Literal, AsyncIterator, override
@@ -18,14 +18,15 @@
import pytest
from respx import MockRouter
from pydantic import ValidationError
+from respx.models import Call as MockRequestCall
-from openai import OpenAI, AsyncOpenAI, APIResponseValidationError
+from openai import OpenAI, AsyncOpenAI, OpenAIError, APIResponseValidationError
from openai.auth import WorkloadIdentity
from openai._types import Omit
from openai._utils import asyncify
from openai._models import BaseModel, FinalRequestOptions
from openai._streaming import Stream, AsyncStream
-from openai._exceptions import OpenAIError, APIStatusError, APITimeoutError, APIResponseValidationError
+from openai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError
from openai._base_client import (
DEFAULT_TIMEOUT,
HTTPX_DEFAULT_TIMEOUT,
@@ -42,21 +43,18 @@
T = TypeVar("T")
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
api_key = "My API Key"
+admin_api_key = "My Admin API Key"
workload_identity: WorkloadIdentity = {
- "client_id": "client-id",
- "identity_provider_id": "identity-provider-id",
- "service_account_id": "service-account-id",
+ "client_id": "client_123",
+ "identity_provider_id": "provider_123",
+ "service_account_id": "service_account_123",
"provider": {
- "token_type": "jwt",
"get_token": lambda: "external-subject-token",
+ "token_type": "jwt",
},
}
-class MockRequestCall(Protocol):
- request: httpx.Request
-
-
def _get_params(client: BaseClient[Any, Any]) -> dict[str, str]:
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
url = httpx.URL(request.url)
@@ -155,6 +153,10 @@ def test_copy(self, client: OpenAI) -> None:
assert copied.api_key == "another My API Key"
assert client.api_key == "My API Key"
+ copied = client.copy(admin_api_key="another My Admin API Key")
+ assert copied.admin_api_key == "another My Admin API Key"
+ assert client.admin_api_key == "My Admin API Key"
+
def test_copy_default_options(self, client: OpenAI) -> None:
# options that have a default are overridden correctly
copied = client.copy(max_retries=7)
@@ -173,7 +175,11 @@ def test_copy_default_options(self, client: OpenAI) -> None:
def test_copy_default_headers(self) -> None:
client = OpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ default_headers={"X-Foo": "bar"},
)
assert client.default_headers["X-Foo"] == "bar"
@@ -208,7 +214,11 @@ def test_copy_default_headers(self) -> None:
def test_copy_default_query(self) -> None:
client = OpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ default_query={"foo": "bar"},
)
assert _get_params(client)["foo"] == "bar"
@@ -333,7 +343,13 @@ def test_request_timeout(self, client: OpenAI) -> None:
assert timeout == httpx.Timeout(100.0)
def test_client_timeout_option(self) -> None:
- client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0))
+ client = OpenAI(
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ timeout=httpx.Timeout(0),
+ )
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
@@ -345,7 +361,11 @@ def test_http_client_timeout_option(self) -> None:
# custom timeout given to the httpx client should be used
with httpx.Client(timeout=None) as http_client:
client = OpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -357,7 +377,11 @@ def test_http_client_timeout_option(self) -> None:
# no timeout given to the httpx client should not use the httpx default
with httpx.Client() as http_client:
client = OpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -369,7 +393,11 @@ def test_http_client_timeout_option(self) -> None:
# explicitly passing the default timeout currently results in it being ignored
with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client:
client = OpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -384,13 +412,18 @@ async def test_invalid_http_client(self) -> None:
OpenAI(
base_url=base_url,
api_key=api_key,
+ admin_api_key=admin_api_key,
_strict_response_validation=True,
http_client=cast(Any, http_client),
)
def test_default_headers_option(self) -> None:
test_client = OpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ default_headers={"X-Foo": "bar"},
)
request = test_client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "bar"
@@ -399,6 +432,7 @@ def test_default_headers_option(self) -> None:
test_client2 = OpenAI(
base_url=base_url,
api_key=api_key,
+ admin_api_key=admin_api_key,
_strict_response_validation=True,
default_headers={
"X-Foo": "stainless",
@@ -413,16 +447,135 @@ def test_default_headers_option(self) -> None:
test_client2.close()
def test_validate_headers(self) -> None:
- client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ client = OpenAI(
+ base_url=base_url, api_key=api_key, admin_api_key=admin_api_key, _strict_response_validation=True
+ )
options = client._prepare_options(FinalRequestOptions(method="get", url="/foo"))
request = client._build_request(options)
assert request.headers.get("Authorization") == f"Bearer {api_key}"
- with pytest.raises(OpenAIError):
- with update_env(**{"OPENAI_API_KEY": Omit()}):
- client2 = OpenAI(base_url=base_url, api_key=None, _strict_response_validation=True)
- _ = client2
+ admin_request = client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/organization/projects",
+ security={"admin_api_key_auth": True},
+ )
+ )
+ assert admin_request.headers.get("Authorization") == f"Bearer {admin_api_key}"
+
+ with update_env(**{"OPENAI_API_KEY": Omit()}):
+ admin_only = OpenAI(
+ base_url=base_url,
+ api_key=None,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ )
+ admin_only_request = admin_only._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/organization/projects",
+ security={"admin_api_key_auth": True},
+ )
+ )
+ assert admin_only_request.headers.get("Authorization") == f"Bearer {admin_api_key}"
+
+ with pytest.raises(
+ TypeError,
+ match="Could not resolve authentication method",
+ ):
+ admin_only._build_request(
+ FinalRequestOptions(
+ method="post",
+ url="/responses",
+ security={"bearer_auth": True},
+ )
+ )
+
+ with update_env(
+ **{
+ "OPENAI_API_KEY": Omit(),
+ "OPENAI_ADMIN_KEY": Omit(),
+ }
+ ):
+ no_credentials = OpenAI(
+ base_url=base_url,
+ api_key=None,
+ admin_api_key=None,
+ _enforce_credentials=False,
+ _strict_response_validation=True,
+ )
+ lowercase_auth_request = no_credentials._build_request(
+ FinalRequestOptions(method="get", url="/foo", headers={"authorization": "Bearer custom"})
+ )
+ assert lowercase_auth_request.headers.get("Authorization") == "Bearer custom"
+
+ omitted_auth_request = no_credentials._build_request(
+ FinalRequestOptions(method="get", url="/foo", headers={"authorization": Omit()})
+ )
+ assert "Authorization" not in omitted_auth_request.headers
+
+ with update_env(
+ **{
+ "OPENAI_API_KEY": Omit(),
+ "OPENAI_ADMIN_KEY": Omit(),
+ }
+ ):
+ with pytest.raises(OpenAIError, match="Missing credentials"):
+ OpenAI(base_url=base_url, api_key=None, admin_api_key=None, _strict_response_validation=True)
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_api_key_provider_preserves_admin_auth(self, respx_mock: MockRouter) -> None:
+ respx_mock.get("/organization/projects").mock(return_value=httpx.Response(200, json={"ok": True}))
+
+ provider_called = False
+
+ def api_key_provider() -> str:
+ nonlocal provider_called
+ provider_called = True
+ return "dynamic-api-key"
+
+ client = OpenAI(base_url=base_url, api_key=api_key_provider, admin_api_key=admin_api_key)
+ response = client.get(
+ "/organization/projects",
+ cast_to=httpx.Response,
+ options={"security": {"admin_api_key_auth": True}},
+ )
+
+ assert response.request.headers.get("Authorization") == f"Bearer {admin_api_key}"
+ assert provider_called is False
+
+ def test_api_key_provider_does_not_fill_admin_auth(self) -> None:
+ provider_called = False
+
+ def api_key_provider() -> str:
+ nonlocal provider_called
+ provider_called = True
+ return "dynamic-api-key"
+
+ with update_env(OPENAI_ADMIN_KEY=Omit()):
+ client = OpenAI(base_url=base_url, api_key=api_key_provider, admin_api_key=None)
+ with pytest.raises(TypeError, match="Could not resolve authentication method"):
+ client.get(
+ "/organization/projects",
+ cast_to=httpx.Response,
+ options={"security": {"admin_api_key_auth": True}},
+ )
+
+ assert provider_called is False
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_workload_identity_preserves_admin_auth(self, respx_mock: MockRouter) -> None:
+ respx_mock.get("/organization/projects").mock(return_value=httpx.Response(200, json={"ok": True}))
+
+ client = OpenAI(base_url=base_url, workload_identity=workload_identity, admin_api_key=admin_api_key)
+ response = client.get(
+ "/organization/projects",
+ cast_to=httpx.Response,
+ options={"security": {"admin_api_key_auth": True}},
+ )
+
+ assert response.request.headers.get("Authorization") == f"Bearer {admin_api_key}"
def test_workload_identity_is_mutually_exclusive_with_api_key(self) -> None:
with pytest.raises(
@@ -439,7 +592,11 @@ def test_workload_identity_is_mutually_exclusive_with_api_key(self) -> None:
def test_default_query_option(self) -> None:
client = OpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ default_query={"query_param": "bar"},
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
url = httpx.URL(request.url)
@@ -636,6 +793,7 @@ def mock_handler(request: httpx.Request) -> httpx.Response:
with OpenAI(
base_url=base_url,
api_key=api_key,
+ admin_api_key=admin_api_key,
_strict_response_validation=True,
http_client=httpx.Client(transport=MockTransport(handler=mock_handler)),
) as client:
@@ -729,7 +887,12 @@ class Model(BaseModel):
assert response.foo == 2
def test_base_url_setter(self) -> None:
- client = OpenAI(base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True)
+ client = OpenAI(
+ base_url="https://example.com/from_init",
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ )
assert client.base_url == "https://example.com/from_init/"
client.base_url = "https://example.com/from_setter" # type: ignore[assignment]
@@ -740,16 +903,22 @@ def test_base_url_setter(self) -> None:
def test_base_url_env(self) -> None:
with update_env(OPENAI_BASE_URL="http://localhost:5000/from/env"):
- client = OpenAI(api_key=api_key, _strict_response_validation=True)
+ client = OpenAI(api_key=api_key, admin_api_key=admin_api_key, _strict_response_validation=True)
assert client.base_url == "http://localhost:5000/from/env/"
@pytest.mark.parametrize(
"client",
[
- OpenAI(base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True),
OpenAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ ),
+ OpenAI(
+ base_url="http://localhost:5000/custom/path/",
+ api_key=api_key,
+ admin_api_key=admin_api_key,
_strict_response_validation=True,
http_client=httpx.Client(),
),
@@ -770,10 +939,16 @@ def test_base_url_trailing_slash(self, client: OpenAI) -> None:
@pytest.mark.parametrize(
"client",
[
- OpenAI(base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True),
OpenAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ ),
+ OpenAI(
+ base_url="http://localhost:5000/custom/path/",
+ api_key=api_key,
+ admin_api_key=admin_api_key,
_strict_response_validation=True,
http_client=httpx.Client(),
),
@@ -794,10 +969,16 @@ def test_base_url_no_trailing_slash(self, client: OpenAI) -> None:
@pytest.mark.parametrize(
"client",
[
- OpenAI(base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True),
OpenAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ ),
+ OpenAI(
+ base_url="http://localhost:5000/custom/path/",
+ api_key=api_key,
+ admin_api_key=admin_api_key,
_strict_response_validation=True,
http_client=httpx.Client(),
),
@@ -816,7 +997,9 @@ def test_absolute_request_url(self, client: OpenAI) -> None:
client.close()
def test_copied_client_does_not_close_http(self) -> None:
- test_client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ test_client = OpenAI(
+ base_url=base_url, api_key=api_key, admin_api_key=admin_api_key, _strict_response_validation=True
+ )
assert not test_client.is_closed()
copied = test_client.copy()
@@ -827,7 +1010,9 @@ def test_copied_client_does_not_close_http(self) -> None:
assert not test_client.is_closed()
def test_client_context_manager(self) -> None:
- test_client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ test_client = OpenAI(
+ base_url=base_url, api_key=api_key, admin_api_key=admin_api_key, _strict_response_validation=True
+ )
with test_client as c2:
assert c2 is test_client
assert not c2.is_closed()
@@ -848,7 +1033,13 @@ class Model(BaseModel):
def test_client_max_retries_validation(self) -> None:
with pytest.raises(TypeError, match=r"max_retries cannot be None"):
- OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None))
+ OpenAI(
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ max_retries=cast(Any, None),
+ )
@pytest.mark.respx(base_url=base_url)
def test_default_stream_cls(self, respx_mock: MockRouter, client: OpenAI) -> None:
@@ -868,12 +1059,16 @@ class Model(BaseModel):
respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format"))
- strict_client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ strict_client = OpenAI(
+ base_url=base_url, api_key=api_key, admin_api_key=admin_api_key, _strict_response_validation=True
+ )
with pytest.raises(APIResponseValidationError):
strict_client.get("/foo", cast_to=Model)
- non_strict_client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=False)
+ non_strict_client = OpenAI(
+ base_url=base_url, api_key=api_key, admin_api_key=admin_api_key, _strict_response_validation=False
+ )
response = non_strict_client.get("/foo", cast_to=Model)
assert isinstance(response, str) # type: ignore[unreachable]
@@ -1221,6 +1416,10 @@ def test_copy(self, async_client: AsyncOpenAI) -> None:
assert copied.api_key == "another My API Key"
assert async_client.api_key == "My API Key"
+ copied = async_client.copy(admin_api_key="another My Admin API Key")
+ assert copied.admin_api_key == "another My Admin API Key"
+ assert async_client.admin_api_key == "My Admin API Key"
+
def test_copy_default_options(self, async_client: AsyncOpenAI) -> None:
# options that have a default are overridden correctly
copied = async_client.copy(max_retries=7)
@@ -1239,7 +1438,11 @@ def test_copy_default_options(self, async_client: AsyncOpenAI) -> None:
async def test_copy_default_headers(self) -> None:
client = AsyncOpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ default_headers={"X-Foo": "bar"},
)
assert client.default_headers["X-Foo"] == "bar"
@@ -1274,7 +1477,11 @@ async def test_copy_default_headers(self) -> None:
async def test_copy_default_query(self) -> None:
client = AsyncOpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ default_query={"foo": "bar"},
)
assert _get_params(client)["foo"] == "bar"
@@ -1402,7 +1609,11 @@ async def test_request_timeout(self, async_client: AsyncOpenAI) -> None:
async def test_client_timeout_option(self) -> None:
client = AsyncOpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0)
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ timeout=httpx.Timeout(0),
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -1415,7 +1626,11 @@ async def test_http_client_timeout_option(self) -> None:
# custom timeout given to the httpx client should be used
async with httpx.AsyncClient(timeout=None) as http_client:
client = AsyncOpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -1427,7 +1642,11 @@ async def test_http_client_timeout_option(self) -> None:
# no timeout given to the httpx client should not use the httpx default
async with httpx.AsyncClient() as http_client:
client = AsyncOpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -1439,7 +1658,11 @@ async def test_http_client_timeout_option(self) -> None:
# explicitly passing the default timeout currently results in it being ignored
async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client:
client = AsyncOpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -1454,13 +1677,18 @@ def test_invalid_http_client(self) -> None:
AsyncOpenAI(
base_url=base_url,
api_key=api_key,
+ admin_api_key=admin_api_key,
_strict_response_validation=True,
http_client=cast(Any, http_client),
)
async def test_default_headers_option(self) -> None:
test_client = AsyncOpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ default_headers={"X-Foo": "bar"},
)
request = test_client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "bar"
@@ -1469,6 +1697,7 @@ async def test_default_headers_option(self) -> None:
test_client2 = AsyncOpenAI(
base_url=base_url,
api_key=api_key,
+ admin_api_key=admin_api_key,
_strict_response_validation=True,
default_headers={
"X-Foo": "stainless",
@@ -1483,19 +1712,142 @@ async def test_default_headers_option(self) -> None:
await test_client2.close()
async def test_validate_headers(self) -> None:
- client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ client = AsyncOpenAI(
+ base_url=base_url, api_key=api_key, admin_api_key=admin_api_key, _strict_response_validation=True
+ )
options = await client._prepare_options(FinalRequestOptions(method="get", url="/foo"))
request = client._build_request(options)
assert request.headers.get("Authorization") == f"Bearer {api_key}"
- with pytest.raises(OpenAIError):
- with update_env(**{"OPENAI_API_KEY": Omit()}):
- client2 = AsyncOpenAI(base_url=base_url, api_key=None, _strict_response_validation=True)
- _ = client2
+ admin_request = client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/organization/projects",
+ security={"admin_api_key_auth": True},
+ )
+ )
+ assert admin_request.headers.get("Authorization") == f"Bearer {admin_api_key}"
+
+ with update_env(**{"OPENAI_API_KEY": Omit()}):
+ admin_only = AsyncOpenAI(
+ base_url=base_url,
+ api_key=None,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ )
+ admin_only_request = admin_only._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/organization/projects",
+ security={"admin_api_key_auth": True},
+ )
+ )
+ assert admin_only_request.headers.get("Authorization") == f"Bearer {admin_api_key}"
+
+ with pytest.raises(
+ TypeError,
+ match="Could not resolve authentication method",
+ ):
+ admin_only._build_request(
+ FinalRequestOptions(
+ method="post",
+ url="/responses",
+ security={"bearer_auth": True},
+ )
+ )
+
+ with update_env(
+ **{
+ "OPENAI_API_KEY": Omit(),
+ "OPENAI_ADMIN_KEY": Omit(),
+ }
+ ):
+ no_credentials = AsyncOpenAI(
+ base_url=base_url,
+ api_key=None,
+ admin_api_key=None,
+ _enforce_credentials=False,
+ _strict_response_validation=True,
+ )
+ lowercase_auth_request = no_credentials._build_request(
+ FinalRequestOptions(method="get", url="/foo", headers={"authorization": "Bearer custom"})
+ )
+ assert lowercase_auth_request.headers.get("Authorization") == "Bearer custom"
+
+ omitted_auth_request = no_credentials._build_request(
+ FinalRequestOptions(method="get", url="/foo", headers={"authorization": Omit()})
+ )
+ assert "Authorization" not in omitted_auth_request.headers
+
+ with update_env(
+ **{
+ "OPENAI_API_KEY": Omit(),
+ "OPENAI_ADMIN_KEY": Omit(),
+ }
+ ):
+ with pytest.raises(OpenAIError, match="Missing credentials"):
+ AsyncOpenAI(base_url=base_url, api_key=None, admin_api_key=None, _strict_response_validation=True)
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_api_key_provider_preserves_admin_auth(self, respx_mock: MockRouter) -> None:
+ respx_mock.get("/organization/projects").mock(return_value=httpx.Response(200, json={"ok": True}))
+
+ provider_called = False
+
+ async def api_key_provider() -> str:
+ nonlocal provider_called
+ provider_called = True
+ return "dynamic-api-key"
+
+ client = AsyncOpenAI(base_url=base_url, api_key=api_key_provider, admin_api_key=admin_api_key)
+ response = await client.get(
+ "/organization/projects",
+ cast_to=httpx.Response,
+ options={"security": {"admin_api_key_auth": True}},
+ )
+
+ assert response.request.headers.get("Authorization") == f"Bearer {admin_api_key}"
+ assert provider_called is False
+
+ async def test_api_key_provider_does_not_fill_admin_auth(self) -> None:
+ provider_called = False
+
+ async def api_key_provider() -> str:
+ nonlocal provider_called
+ provider_called = True
+ return "dynamic-api-key"
+
+ with update_env(OPENAI_ADMIN_KEY=Omit()):
+ client = AsyncOpenAI(base_url=base_url, api_key=api_key_provider, admin_api_key=None)
+ with pytest.raises(TypeError, match="Could not resolve authentication method"):
+ await client.get(
+ "/organization/projects",
+ cast_to=httpx.Response,
+ options={"security": {"admin_api_key_auth": True}},
+ )
+
+ assert provider_called is False
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_workload_identity_preserves_admin_auth(self, respx_mock: MockRouter) -> None:
+ respx_mock.get("/organization/projects").mock(return_value=httpx.Response(200, json={"ok": True}))
+
+ client = AsyncOpenAI(base_url=base_url, workload_identity=workload_identity, admin_api_key=admin_api_key)
+ response = await client.get(
+ "/organization/projects",
+ cast_to=httpx.Response,
+ options={"security": {"admin_api_key_auth": True}},
+ )
+
+ assert response.request.headers.get("Authorization") == f"Bearer {admin_api_key}"
async def test_default_query_option(self) -> None:
client = AsyncOpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ default_query={"query_param": "bar"},
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
url = httpx.URL(request.url)
@@ -1692,6 +2044,7 @@ async def mock_handler(request: httpx.Request) -> httpx.Response:
async with AsyncOpenAI(
base_url=base_url,
api_key=api_key,
+ admin_api_key=admin_api_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(transport=MockTransport(handler=mock_handler)),
) as client:
@@ -1790,7 +2143,10 @@ class Model(BaseModel):
async def test_base_url_setter(self) -> None:
client = AsyncOpenAI(
- base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True
+ base_url="https://example.com/from_init",
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
)
assert client.base_url == "https://example.com/from_init/"
@@ -1802,18 +2158,22 @@ async def test_base_url_setter(self) -> None:
async def test_base_url_env(self) -> None:
with update_env(OPENAI_BASE_URL="http://localhost:5000/from/env"):
- client = AsyncOpenAI(api_key=api_key, _strict_response_validation=True)
+ client = AsyncOpenAI(api_key=api_key, admin_api_key=admin_api_key, _strict_response_validation=True)
assert client.base_url == "http://localhost:5000/from/env/"
@pytest.mark.parametrize(
"client",
[
AsyncOpenAI(
- base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
+ base_url="http://localhost:5000/custom/path/",
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
),
AsyncOpenAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
+ admin_api_key=admin_api_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
),
@@ -1835,11 +2195,15 @@ async def test_base_url_trailing_slash(self, client: AsyncOpenAI) -> None:
"client",
[
AsyncOpenAI(
- base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
+ base_url="http://localhost:5000/custom/path/",
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
),
AsyncOpenAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
+ admin_api_key=admin_api_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
),
@@ -1861,11 +2225,15 @@ async def test_base_url_no_trailing_slash(self, client: AsyncOpenAI) -> None:
"client",
[
AsyncOpenAI(
- base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
+ base_url="http://localhost:5000/custom/path/",
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
),
AsyncOpenAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
+ admin_api_key=admin_api_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
),
@@ -1884,7 +2252,9 @@ async def test_absolute_request_url(self, client: AsyncOpenAI) -> None:
await client.close()
async def test_copied_client_does_not_close_http(self) -> None:
- test_client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ test_client = AsyncOpenAI(
+ base_url=base_url, api_key=api_key, admin_api_key=admin_api_key, _strict_response_validation=True
+ )
assert not test_client.is_closed()
copied = test_client.copy()
@@ -1896,7 +2266,9 @@ async def test_copied_client_does_not_close_http(self) -> None:
assert not test_client.is_closed()
async def test_client_context_manager(self) -> None:
- test_client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ test_client = AsyncOpenAI(
+ base_url=base_url, api_key=api_key, admin_api_key=admin_api_key, _strict_response_validation=True
+ )
async with test_client as c2:
assert c2 is test_client
assert not c2.is_closed()
@@ -1918,7 +2290,11 @@ class Model(BaseModel):
async def test_client_max_retries_validation(self) -> None:
with pytest.raises(TypeError, match=r"max_retries cannot be None"):
AsyncOpenAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None)
+ base_url=base_url,
+ api_key=api_key,
+ admin_api_key=admin_api_key,
+ _strict_response_validation=True,
+ max_retries=cast(Any, None),
)
@pytest.mark.respx(base_url=base_url)
@@ -1939,12 +2315,16 @@ class Model(BaseModel):
respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format"))
- strict_client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ strict_client = AsyncOpenAI(
+ base_url=base_url, api_key=api_key, admin_api_key=admin_api_key, _strict_response_validation=True
+ )
with pytest.raises(APIResponseValidationError):
await strict_client.get("/foo", cast_to=Model)
- non_strict_client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=False)
+ non_strict_client = AsyncOpenAI(
+ base_url=base_url, api_key=api_key, admin_api_key=admin_api_key, _strict_response_validation=False
+ )
response = await non_strict_client.get("/foo", cast_to=Model)
assert isinstance(response, str) # type: ignore[unreachable]
diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py
deleted file mode 100644
index 86a2adb1a2..0000000000
--- a/tests/test_deepcopy.py
+++ /dev/null
@@ -1,58 +0,0 @@
-from openai._utils import deepcopy_minimal
-
-
-def assert_different_identities(obj1: object, obj2: object) -> None:
- assert obj1 == obj2
- assert id(obj1) != id(obj2)
-
-
-def test_simple_dict() -> None:
- obj1 = {"foo": "bar"}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
-
-
-def test_nested_dict() -> None:
- obj1 = {"foo": {"bar": True}}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1["foo"], obj2["foo"])
-
-
-def test_complex_nested_dict() -> None:
- obj1 = {"foo": {"bar": [{"hello": "world"}]}}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1["foo"], obj2["foo"])
- assert_different_identities(obj1["foo"]["bar"], obj2["foo"]["bar"])
- assert_different_identities(obj1["foo"]["bar"][0], obj2["foo"]["bar"][0])
-
-
-def test_simple_list() -> None:
- obj1 = ["a", "b", "c"]
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
-
-
-def test_nested_list() -> None:
- obj1 = ["a", [1, 2, 3]]
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1[1], obj2[1])
-
-
-class MyObject: ...
-
-
-def test_ignores_other_types() -> None:
- # custom classes
- my_obj = MyObject()
- obj1 = {"foo": my_obj}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert obj1["foo"] is my_obj
-
- # tuples
- obj3 = ("a", "b")
- obj4 = deepcopy_minimal(obj3)
- assert obj3 is obj4
diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py
index dcf85bd7c7..54490e133f 100644
--- a/tests/test_extract_files.py
+++ b/tests/test_extract_files.py
@@ -4,7 +4,7 @@
import pytest
-from openai._types import FileTypes
+from openai._types import FileTypes, ArrayFormat
from openai._utils import extract_files
@@ -37,10 +37,7 @@ def test_multiple_files() -> None:
def test_top_level_file_array() -> None:
query = {"files": [b"file one", b"file two"], "title": "hello"}
- assert extract_files(query, paths=[["files", ""]]) == [
- ("files[]", b"file one"),
- ("files[]", b"file two"),
- ]
+ assert extract_files(query, paths=[["files", ""]]) == [("files[]", b"file one"), ("files[]", b"file two")]
assert query == {"title": "hello"}
@@ -71,3 +68,24 @@ def test_ignores_incorrect_paths(
expected: list[tuple[str, FileTypes]],
) -> None:
assert extract_files(query, paths=paths) == expected
+
+
+@pytest.mark.parametrize(
+ "array_format,expected_top_level,expected_nested",
+ [
+ ("brackets", [("files[]", b"a"), ("files[]", b"b")], [("items[][file]", b"a"), ("items[][file]", b"b")]),
+ ("repeat", [("files", b"a"), ("files", b"b")], [("items[file]", b"a"), ("items[file]", b"b")]),
+ ("comma", [("files", b"a"), ("files", b"b")], [("items[file]", b"a"), ("items[file]", b"b")]),
+ ("indices", [("files[0]", b"a"), ("files[1]", b"b")], [("items[0][file]", b"a"), ("items[1][file]", b"b")]),
+ ],
+)
+def test_array_format_controls_file_field_names(
+ array_format: ArrayFormat,
+ expected_top_level: list[tuple[str, FileTypes]],
+ expected_nested: list[tuple[str, FileTypes]],
+) -> None:
+ top_level = {"files": [b"a", b"b"]}
+ assert extract_files(top_level, paths=[["files", ""]], array_format=array_format) == expected_top_level
+
+ nested = {"items": [{"file": b"a"}, {"file": b"b"}]}
+ assert extract_files(nested, paths=[["items", "", "file"]], array_format=array_format) == expected_nested
diff --git a/tests/test_files.py b/tests/test_files.py
index 15d5c6a811..56445fb550 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -4,7 +4,8 @@
import pytest
from dirty_equals import IsDict, IsList, IsBytes, IsTuple
-from openai._files import to_httpx_files, async_to_httpx_files
+from openai._files import to_httpx_files, deepcopy_with_paths, async_to_httpx_files
+from openai._utils import extract_files
readme_path = Path(__file__).parent.parent.joinpath("README.md")
@@ -49,3 +50,99 @@ def test_string_not_allowed() -> None:
"file": "foo", # type: ignore
}
)
+
+
+def assert_different_identities(obj1: object, obj2: object) -> None:
+ assert obj1 == obj2
+ assert obj1 is not obj2
+
+
+class TestDeepcopyWithPaths:
+ def test_copies_top_level_dict(self) -> None:
+ original = {"file": b"data", "other": "value"}
+ result = deepcopy_with_paths(original, [["file"]])
+ assert_different_identities(result, original)
+
+ def test_file_value_is_same_reference(self) -> None:
+ file_bytes = b"contents"
+ original = {"file": file_bytes}
+ result = deepcopy_with_paths(original, [["file"]])
+ assert_different_identities(result, original)
+ assert result["file"] is file_bytes
+
+ def test_list_popped_wholesale(self) -> None:
+ files = [b"f1", b"f2"]
+ original = {"files": files, "title": "t"}
+ result = deepcopy_with_paths(original, [["files", ""]])
+ assert_different_identities(result, original)
+ result_files = result["files"]
+ assert isinstance(result_files, list)
+ assert_different_identities(result_files, files)
+
+ def test_nested_array_path_copies_list_and_elements(self) -> None:
+ elem1 = {"file": b"f1", "extra": 1}
+ elem2 = {"file": b"f2", "extra": 2}
+ original = {"items": [elem1, elem2]}
+ result = deepcopy_with_paths(original, [["items", "", "file"]])
+ assert_different_identities(result, original)
+ result_items = result["items"]
+ assert isinstance(result_items, list)
+ assert_different_identities(result_items, original["items"])
+ assert_different_identities(result_items[0], elem1)
+ assert_different_identities(result_items[1], elem2)
+
+ def test_empty_paths_returns_same_object(self) -> None:
+ original = {"foo": "bar"}
+ result = deepcopy_with_paths(original, [])
+ assert result is original
+
+ def test_multiple_paths(self) -> None:
+ f1 = b"file1"
+ f2 = b"file2"
+ original = {"a": f1, "b": f2, "c": "unchanged"}
+ result = deepcopy_with_paths(original, [["a"], ["b"]])
+ assert_different_identities(result, original)
+ assert result["a"] is f1
+ assert result["b"] is f2
+ assert result["c"] is original["c"]
+
+ def test_extract_files_does_not_mutate_original_top_level(self) -> None:
+ file_bytes = b"contents"
+ original = {"file": file_bytes, "other": "value"}
+
+ copied = deepcopy_with_paths(original, [["file"]])
+ extracted = extract_files(copied, paths=[["file"]])
+
+ assert extracted == [("file", file_bytes)]
+ assert original == {"file": file_bytes, "other": "value"}
+ assert copied == {"other": "value"}
+
+ def test_extract_files_does_not_mutate_original_nested_array_path(self) -> None:
+ file1 = b"f1"
+ file2 = b"f2"
+ original = {
+ "items": [
+ {"file": file1, "extra": 1},
+ {"file": file2, "extra": 2},
+ ],
+ "title": "example",
+ }
+
+ copied = deepcopy_with_paths(original, [["items", "", "file"]])
+ extracted = extract_files(copied, paths=[["items", "", "file"]])
+
+ assert [entry for _, entry in extracted] == [file1, file2]
+ assert original == {
+ "items": [
+ {"file": file1, "extra": 1},
+ {"file": file2, "extra": 2},
+ ],
+ "title": "example",
+ }
+ assert copied == {
+ "items": [
+ {"extra": 1},
+ {"extra": 2},
+ ],
+ "title": "example",
+ }
diff --git a/tests/test_module_client.py b/tests/test_module_client.py
index 9c9a1addab..6371ae7057 100644
--- a/tests/test_module_client.py
+++ b/tests/test_module_client.py
@@ -14,7 +14,8 @@
def reset_state() -> None:
openai._reset_client()
- openai.api_key = None or "My API Key"
+ openai.api_key = None
+ openai.admin_api_key = None
openai.organization = None
openai.project = None
openai.webhook_secret = None