Skip to content

Commit 70e0b50

Browse files
pjoshi30Preetam Joshi
and
Preetam Joshi
authored
Updating SDK to handle instructions in evaluation and continuous monitoring. (#28)
* Initial commit Adding the Aimon Rely README, images, the postman collection, a simple client and examples. A few small changes for error handling in the client and the example application. Getting the Aimon API key from the streamlit app updating README Updating langchain example gif Updating API endpoint Adding V2 API with support for conciseness, completeness and toxicity checks (#1) * Adding V2 API with support for conciseness, completeness and toxicity checks. * Removing prints and updating config for the example application. * Updating README --------- Co-authored-by: Preetam Joshi <[email protected]> Updating postman collection Fixed the simple aimon client's handling of batch requests. Updated postman collection. Added support for a user_query parameter in the input data dictionary. Updating readme Fixed bug in the example app Uploading client code Adding more convenience APIs Fixing bug in create_dataset Added Github actions config to publish to PyPI. Cleaned up dependencies and updated documentation. Fixing langchain example Fixing doc links Formatting changes Changes for aimon-rely * Adding instruction adherence and hallucination v0.2 to the client Updating git ignore Adding more to gitignore Removing .idea files * Fixing doc string * Updating documentation * Updating Client to use V3 API * Fixing test * Updating tests * Updating documentation in the client * Adding .streamlit dir to .gitignore * initial version of decorators for syntactic sugar * A few more changes * updating analyze and detect decorators * Adding new notebooks * Fixing bug in analyze decorator * Updating Detect decorator to make it simpler. Adding Metaflow example. Adding documentation for the chatbot. * fixing chatbot example * Fixed issue in detect decorator. Improved code organization. * fixed typo * Updated the decorators with a more cleaner interface. Added a metaflow analyze example. * Updated version * Updated Notebook * Fixing context parsing issue with analyze_eval decorator * Updating application to production in the analyze_prod decorator * Updating SDK to handle instructions in evaluation and continuous monitoring. * Deleting old notebook * Fixing usability issues in the chatbot. Organizing examples a bit better. --------- Co-authored-by: Preetam Joshi <[email protected]>
1 parent a1d4ddc commit 70e0b50

32 files changed

+528
-674
lines changed

aimon/__init__.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2-
31
from . import types
42
from ._types import NOT_GIVEN, NoneType, NotGiven, Transport, ProxiesTypes
53
from ._utils import file_from_path
@@ -62,7 +60,7 @@
6260
"DEFAULT_MAX_RETRIES",
6361
"DEFAULT_CONNECTION_LIMITS",
6462
"DefaultHttpxClient",
65-
"DefaultAsyncHttpxClient"
63+
"DefaultAsyncHttpxClient",
6664
]
6765

6866
_setup_logging()

aimon/_base_client.py

+29-42
Original file line numberDiff line numberDiff line change
@@ -124,16 +124,14 @@ def __init__(
124124
self,
125125
*,
126126
url: URL,
127-
) -> None:
128-
...
127+
) -> None: ...
129128

130129
@overload
131130
def __init__(
132131
self,
133132
*,
134133
params: Query,
135-
) -> None:
136-
...
134+
) -> None: ...
137135

138136
def __init__(
139137
self,
@@ -166,8 +164,7 @@ def has_next_page(self) -> bool:
166164
return False
167165
return self.next_page_info() is not None
168166

169-
def next_page_info(self) -> Optional[PageInfo]:
170-
...
167+
def next_page_info(self) -> Optional[PageInfo]: ...
171168

172169
def _get_page_items(self) -> Iterable[_T]: # type: ignore[empty-body]
173170
...
@@ -903,8 +900,7 @@ def request(
903900
*,
904901
stream: Literal[True],
905902
stream_cls: Type[_StreamT],
906-
) -> _StreamT:
907-
...
903+
) -> _StreamT: ...
908904

909905
@overload
910906
def request(
@@ -914,8 +910,7 @@ def request(
914910
remaining_retries: Optional[int] = None,
915911
*,
916912
stream: Literal[False] = False,
917-
) -> ResponseT:
918-
...
913+
) -> ResponseT: ...
919914

920915
@overload
921916
def request(
@@ -926,8 +921,7 @@ def request(
926921
*,
927922
stream: bool = False,
928923
stream_cls: Type[_StreamT] | None = None,
929-
) -> ResponseT | _StreamT:
930-
...
924+
) -> ResponseT | _StreamT: ...
931925

932926
def request(
933927
self,
@@ -1049,6 +1043,7 @@ def _request(
10491043
response=response,
10501044
stream=stream,
10511045
stream_cls=stream_cls,
1046+
retries_taken=options.get_max_retries(self.max_retries) - retries,
10521047
)
10531048

10541049
def _retry_request(
@@ -1090,6 +1085,7 @@ def _process_response(
10901085
response: httpx.Response,
10911086
stream: bool,
10921087
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
1088+
retries_taken: int = 0,
10931089
) -> ResponseT:
10941090
origin = get_origin(cast_to) or cast_to
10951091

@@ -1107,6 +1103,7 @@ def _process_response(
11071103
stream=stream,
11081104
stream_cls=stream_cls,
11091105
options=options,
1106+
retries_taken=retries_taken,
11101107
),
11111108
)
11121109

@@ -1120,6 +1117,7 @@ def _process_response(
11201117
stream=stream,
11211118
stream_cls=stream_cls,
11221119
options=options,
1120+
retries_taken=retries_taken,
11231121
)
11241122
if bool(response.request.headers.get(RAW_RESPONSE_HEADER)):
11251123
return cast(ResponseT, api_response)
@@ -1152,8 +1150,7 @@ def get(
11521150
cast_to: Type[ResponseT],
11531151
options: RequestOptions = {},
11541152
stream: Literal[False] = False,
1155-
) -> ResponseT:
1156-
...
1153+
) -> ResponseT: ...
11571154

11581155
@overload
11591156
def get(
@@ -1164,8 +1161,7 @@ def get(
11641161
options: RequestOptions = {},
11651162
stream: Literal[True],
11661163
stream_cls: type[_StreamT],
1167-
) -> _StreamT:
1168-
...
1164+
) -> _StreamT: ...
11691165

11701166
@overload
11711167
def get(
@@ -1176,8 +1172,7 @@ def get(
11761172
options: RequestOptions = {},
11771173
stream: bool,
11781174
stream_cls: type[_StreamT] | None = None,
1179-
) -> ResponseT | _StreamT:
1180-
...
1175+
) -> ResponseT | _StreamT: ...
11811176

11821177
def get(
11831178
self,
@@ -1203,8 +1198,7 @@ def post(
12031198
options: RequestOptions = {},
12041199
files: RequestFiles | None = None,
12051200
stream: Literal[False] = False,
1206-
) -> ResponseT:
1207-
...
1201+
) -> ResponseT: ...
12081202

12091203
@overload
12101204
def post(
@@ -1217,8 +1211,7 @@ def post(
12171211
files: RequestFiles | None = None,
12181212
stream: Literal[True],
12191213
stream_cls: type[_StreamT],
1220-
) -> _StreamT:
1221-
...
1214+
) -> _StreamT: ...
12221215

12231216
@overload
12241217
def post(
@@ -1231,8 +1224,7 @@ def post(
12311224
files: RequestFiles | None = None,
12321225
stream: bool,
12331226
stream_cls: type[_StreamT] | None = None,
1234-
) -> ResponseT | _StreamT:
1235-
...
1227+
) -> ResponseT | _StreamT: ...
12361228

12371229
def post(
12381230
self,
@@ -1465,8 +1457,7 @@ async def request(
14651457
*,
14661458
stream: Literal[False] = False,
14671459
remaining_retries: Optional[int] = None,
1468-
) -> ResponseT:
1469-
...
1460+
) -> ResponseT: ...
14701461

14711462
@overload
14721463
async def request(
@@ -1477,8 +1468,7 @@ async def request(
14771468
stream: Literal[True],
14781469
stream_cls: type[_AsyncStreamT],
14791470
remaining_retries: Optional[int] = None,
1480-
) -> _AsyncStreamT:
1481-
...
1471+
) -> _AsyncStreamT: ...
14821472

14831473
@overload
14841474
async def request(
@@ -1489,8 +1479,7 @@ async def request(
14891479
stream: bool,
14901480
stream_cls: type[_AsyncStreamT] | None = None,
14911481
remaining_retries: Optional[int] = None,
1492-
) -> ResponseT | _AsyncStreamT:
1493-
...
1482+
) -> ResponseT | _AsyncStreamT: ...
14941483

14951484
async def request(
14961485
self,
@@ -1610,6 +1599,7 @@ async def _request(
16101599
response=response,
16111600
stream=stream,
16121601
stream_cls=stream_cls,
1602+
retries_taken=options.get_max_retries(self.max_retries) - retries,
16131603
)
16141604

16151605
async def _retry_request(
@@ -1649,6 +1639,7 @@ async def _process_response(
16491639
response: httpx.Response,
16501640
stream: bool,
16511641
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
1642+
retries_taken: int = 0,
16521643
) -> ResponseT:
16531644
origin = get_origin(cast_to) or cast_to
16541645

@@ -1666,6 +1657,7 @@ async def _process_response(
16661657
stream=stream,
16671658
stream_cls=stream_cls,
16681659
options=options,
1660+
retries_taken=retries_taken,
16691661
),
16701662
)
16711663

@@ -1679,6 +1671,7 @@ async def _process_response(
16791671
stream=stream,
16801672
stream_cls=stream_cls,
16811673
options=options,
1674+
retries_taken=retries_taken,
16821675
)
16831676
if bool(response.request.headers.get(RAW_RESPONSE_HEADER)):
16841677
return cast(ResponseT, api_response)
@@ -1701,8 +1694,7 @@ async def get(
17011694
cast_to: Type[ResponseT],
17021695
options: RequestOptions = {},
17031696
stream: Literal[False] = False,
1704-
) -> ResponseT:
1705-
...
1697+
) -> ResponseT: ...
17061698

17071699
@overload
17081700
async def get(
@@ -1713,8 +1705,7 @@ async def get(
17131705
options: RequestOptions = {},
17141706
stream: Literal[True],
17151707
stream_cls: type[_AsyncStreamT],
1716-
) -> _AsyncStreamT:
1717-
...
1708+
) -> _AsyncStreamT: ...
17181709

17191710
@overload
17201711
async def get(
@@ -1725,8 +1716,7 @@ async def get(
17251716
options: RequestOptions = {},
17261717
stream: bool,
17271718
stream_cls: type[_AsyncStreamT] | None = None,
1728-
) -> ResponseT | _AsyncStreamT:
1729-
...
1719+
) -> ResponseT | _AsyncStreamT: ...
17301720

17311721
async def get(
17321722
self,
@@ -1750,8 +1740,7 @@ async def post(
17501740
files: RequestFiles | None = None,
17511741
options: RequestOptions = {},
17521742
stream: Literal[False] = False,
1753-
) -> ResponseT:
1754-
...
1743+
) -> ResponseT: ...
17551744

17561745
@overload
17571746
async def post(
@@ -1764,8 +1753,7 @@ async def post(
17641753
options: RequestOptions = {},
17651754
stream: Literal[True],
17661755
stream_cls: type[_AsyncStreamT],
1767-
) -> _AsyncStreamT:
1768-
...
1756+
) -> _AsyncStreamT: ...
17691757

17701758
@overload
17711759
async def post(
@@ -1778,8 +1766,7 @@ async def post(
17781766
options: RequestOptions = {},
17791767
stream: bool,
17801768
stream_cls: type[_AsyncStreamT] | None = None,
1781-
) -> ResponseT | _AsyncStreamT:
1782-
...
1769+
) -> ResponseT | _AsyncStreamT: ...
17831770

17841771
async def post(
17851772
self,

aimon/_compat.py

+12-17
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
import pydantic
88
from pydantic.fields import FieldInfo
99

10-
from ._types import StrBytesIntFloat
10+
from ._types import IncEx, StrBytesIntFloat
1111

1212
_T = TypeVar("_T")
1313
_ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel)
@@ -133,17 +133,20 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
133133
def model_dump(
134134
model: pydantic.BaseModel,
135135
*,
136+
exclude: IncEx = None,
136137
exclude_unset: bool = False,
137138
exclude_defaults: bool = False,
138139
) -> dict[str, Any]:
139140
if PYDANTIC_V2:
140141
return model.model_dump(
142+
exclude=exclude,
141143
exclude_unset=exclude_unset,
142144
exclude_defaults=exclude_defaults,
143145
)
144146
return cast(
145147
"dict[str, Any]",
146148
model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
149+
exclude=exclude,
147150
exclude_unset=exclude_unset,
148151
exclude_defaults=exclude_defaults,
149152
),
@@ -159,22 +162,19 @@ def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
159162
# generic models
160163
if TYPE_CHECKING:
161164

162-
class GenericModel(pydantic.BaseModel):
163-
...
165+
class GenericModel(pydantic.BaseModel): ...
164166

165167
else:
166168
if PYDANTIC_V2:
167169
# there no longer needs to be a distinction in v2 but
168170
# we still have to create our own subclass to avoid
169171
# inconsistent MRO ordering errors
170-
class GenericModel(pydantic.BaseModel):
171-
...
172+
class GenericModel(pydantic.BaseModel): ...
172173

173174
else:
174175
import pydantic.generics
175176

176-
class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel):
177-
...
177+
class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ...
178178

179179

180180
# cached properties
@@ -193,26 +193,21 @@ class typed_cached_property(Generic[_T]):
193193
func: Callable[[Any], _T]
194194
attrname: str | None
195195

196-
def __init__(self, func: Callable[[Any], _T]) -> None:
197-
...
196+
def __init__(self, func: Callable[[Any], _T]) -> None: ...
198197

199198
@overload
200-
def __get__(self, instance: None, owner: type[Any] | None = None) -> Self:
201-
...
199+
def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ...
202200

203201
@overload
204-
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T:
205-
...
202+
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ...
206203

207204
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self:
208205
raise NotImplementedError()
209206

210-
def __set_name__(self, owner: type[Any], name: str) -> None:
211-
...
207+
def __set_name__(self, owner: type[Any], name: str) -> None: ...
212208

213209
# __set__ is not defined at runtime, but @cached_property is designed to be settable
214-
def __set__(self, instance: object, value: _T) -> None:
215-
...
210+
def __set__(self, instance: object, value: _T) -> None: ...
216211
else:
217212
try:
218213
from functools import cached_property as cached_property

0 commit comments

Comments
 (0)