Skip to content

Commit a6bd920

Browse files
committed
better names for commands
1 parent d778856 commit a6bd920

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

60 files changed

+200
-197
lines changed

pydantic_ai_slim/pydantic_ai/_agent_graph.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -608,7 +608,7 @@ async def process_function_tools( # noqa C901
608608
elif mcp_tool := await _tool_from_mcp_server(call.tool_name, ctx):
609609
if stub_function_tools:
610610
# TODO(Marcelo): We should add coverage for this part of the code.
611-
output_parts.append( # pragma: not covered
611+
output_parts.append( # pragma: no cover
612612
_messages.ToolReturnPart(
613613
tool_name=call.tool_name,
614614
content='Tool not executed - a final result was already processed.',
@@ -730,7 +730,7 @@ async def _tool_from_mcp_server(
730730
async def run_tool(ctx: RunContext[DepsT], **args: Any) -> Any:
731731
# There's no normal situation where the server will not be running at this point, we check just in case
732732
# some weird edge case occurs.
733-
if not server.is_running: # pragma: not covered
733+
if not server.is_running: # pragma: no cover
734734
raise exceptions.UserError(f'MCP server is not running: {server}')
735735
result = await server.call_tool(tool_name, args)
736736
return result

pydantic_ai_slim/pydantic_ai/_cli.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ def cli(args_list: Sequence[str] | None = None) -> int:
165165
session: PromptSession[Any] = PromptSession(history=FileHistory(str(history)))
166166
try:
167167
return asyncio.run(run_chat(session, stream, cli_agent, console, code_theme))
168-
except KeyboardInterrupt: # pragma: not covered
168+
except KeyboardInterrupt: # pragma: no cover
169169
return 0
170170

171171

@@ -177,7 +177,7 @@ async def run_chat(session: PromptSession[Any], stream: bool, agent: Agent, cons
177177
try:
178178
auto_suggest = CustomAutoSuggest(['/markdown', '/multiline', '/exit'])
179179
text = await session.prompt_async('pai ➤ ', auto_suggest=auto_suggest, multiline=multiline)
180-
except (KeyboardInterrupt, EOFError): # pragma: not covered
180+
except (KeyboardInterrupt, EOFError): # pragma: no cover
181181
return 0
182182

183183
if not text.strip():
@@ -191,7 +191,7 @@ async def run_chat(session: PromptSession[Any], stream: bool, agent: Agent, cons
191191
else:
192192
try:
193193
messages = await ask_agent(agent, text, stream, console, code_theme, messages)
194-
except CancelledError: # pragma: not covered
194+
except CancelledError: # pragma: no cover
195195
console.print('[dim]Interrupted[/dim]')
196196

197197

@@ -233,7 +233,7 @@ def __init__(self, special_suggestions: list[str] | None = None):
233233
super().__init__()
234234
self.special_suggestions = special_suggestions or []
235235

236-
def get_suggestion(self, buffer: Buffer, document: Document) -> Suggestion | None: # pragma: not covered
236+
def get_suggestion(self, buffer: Buffer, document: Document) -> Suggestion | None: # pragma: no cover
237237
# Get the suggestion from history
238238
suggestion = super().get_suggestion(buffer, document)
239239

@@ -284,5 +284,5 @@ def handle_slash_command(
284284
return None, multiline
285285

286286

287-
def app(): # pragma: not covered
287+
def app(): # pragma: no cover
288288
sys.exit(cli())

pydantic_ai_slim/pydantic_ai/_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ async def async_iter_groups() -> AsyncIterator[list[T]]:
182182

183183
try:
184184
yield async_iter_groups()
185-
finally: # pragma: not covered
185+
finally: # pragma: no cover
186186
# after iteration if a tasks still exists, cancel it, this will only happen if an error occurred
187187
if task:
188188
task.cancel('Cancelling due to error in iterator')

pydantic_ai_slim/pydantic_ai/agent.py

+11-11
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ def __init__(
269269
self.name = name
270270
self.model_settings = model_settings
271271

272-
if 'result_type' in _deprecated_kwargs: # pragma: not covered
272+
if 'result_type' in _deprecated_kwargs: # pragma: no cover
273273
if output_type is not str:
274274
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
275275
warnings.warn('`result_type` is deprecated, use `output_type` instead', DeprecationWarning)
@@ -282,20 +282,20 @@ def __init__(
282282
self._deps_type = deps_type
283283

284284
self._deprecated_result_tool_name = _deprecated_kwargs.get('result_tool_name')
285-
if self._deprecated_result_tool_name is not None: # pragma: not covered
285+
if self._deprecated_result_tool_name is not None: # pragma: no cover
286286
warnings.warn(
287287
'`result_tool_name` is deprecated, use `output_type` with `ToolOutput` instead',
288288
DeprecationWarning,
289289
)
290290

291291
self._deprecated_result_tool_description = _deprecated_kwargs.get('result_tool_description')
292-
if self._deprecated_result_tool_description is not None: # pragma: not covered
292+
if self._deprecated_result_tool_description is not None: # pragma: no cover
293293
warnings.warn(
294294
'`result_tool_description` is deprecated, use `output_type` with `ToolOutput` instead',
295295
DeprecationWarning,
296296
)
297297
result_retries = _deprecated_kwargs.get('result_retries')
298-
if result_retries is not None: # pragma: not covered
298+
if result_retries is not None: # pragma: no cover
299299
if output_retries is not None:
300300
raise TypeError('`output_retries` and `result_retries` cannot be set at the same time.')
301301
warnings.warn('`result_retries` is deprecated, use `max_result_retries` instead', DeprecationWarning)
@@ -432,7 +432,7 @@ async def main():
432432
if infer_name and self.name is None:
433433
self._infer_name(inspect.currentframe())
434434

435-
if 'result_type' in _deprecated_kwargs: # pragma: not covered
435+
if 'result_type' in _deprecated_kwargs: # pragma: no cover
436436
if output_type is not str:
437437
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
438438
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
@@ -583,7 +583,7 @@ async def main():
583583
model_used = self._get_model(model)
584584
del model
585585

586-
if 'result_type' in _deprecated_kwargs: # pragma: not covered
586+
if 'result_type' in _deprecated_kwargs: # pragma: no cover
587587
if output_type is not str:
588588
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
589589
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
@@ -810,7 +810,7 @@ def run_sync(
810810
if infer_name and self.name is None:
811811
self._infer_name(inspect.currentframe())
812812

813-
if 'result_type' in _deprecated_kwargs: # pragma: not covered
813+
if 'result_type' in _deprecated_kwargs: # pragma: no cover
814814
if output_type is not str:
815815
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
816816
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
@@ -926,7 +926,7 @@ async def main():
926926
if frame := inspect.currentframe(): # pragma: no branch
927927
self._infer_name(frame.f_back)
928928

929-
if 'result_type' in _deprecated_kwargs: # pragma: not covered
929+
if 'result_type' in _deprecated_kwargs: # pragma: no cover
930930
if output_type is not str:
931931
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
932932
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
@@ -1741,7 +1741,7 @@ def _traceparent(self, *, required: Literal[False]) -> str | None: ...
17411741
def _traceparent(self) -> str: ...
17421742
def _traceparent(self, *, required: bool = True) -> str | None:
17431743
traceparent = self._graph_run._traceparent(required=False) # type: ignore[reportPrivateUsage]
1744-
if traceparent is None and required: # pragma: not covered
1744+
if traceparent is None and required: # pragma: no cover
17451745
raise AttributeError('No span was created for this agent run')
17461746
return traceparent
17471747

@@ -1765,7 +1765,7 @@ def next_node(
17651765
return next_node
17661766
if _agent_graph.is_agent_node(next_node):
17671767
return next_node
1768-
raise exceptions.AgentRunError(f'Unexpected node type: {type(next_node)}') # pragma: not covered
1768+
raise exceptions.AgentRunError(f'Unexpected node type: {type(next_node)}') # pragma: no cover
17691769

17701770
@property
17711771
def result(self) -> AgentRunResult[OutputDataT] | None:
@@ -1907,7 +1907,7 @@ def _traceparent(self, *, required: Literal[False]) -> str | None: ...
19071907
@overload
19081908
def _traceparent(self) -> str: ...
19091909
def _traceparent(self, *, required: bool = True) -> str | None:
1910-
if self._traceparent_value is None and required: # pragma: not covered
1910+
if self._traceparent_value is None and required: # pragma: no cover
19111911
raise AttributeError('No span was created for this agent run')
19121912
return self._traceparent_value
19131913

pydantic_ai_slim/pydantic_ai/mcp.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,7 @@ async def client_streams(
308308
self,
309309
) -> AsyncIterator[
310310
tuple[MemoryObjectReceiveStream[JSONRPCMessage | Exception], MemoryObjectSendStream[JSONRPCMessage]]
311-
]: # pragma: not covered
311+
]: # pragma: no cover
312312
async with sse_client(
313313
url=self.url, headers=self.headers, timeout=self.timeout, sse_read_timeout=self.sse_read_timeout
314314
) as (read_stream, write_stream):

pydantic_ai_slim/pydantic_ai/messages.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ class VideoUrl:
8181
"""Type identifier, this is available on all parts as a discriminator."""
8282

8383
@property
84-
def media_type(self) -> VideoMediaType: # pragma: no cover
84+
def media_type(self) -> VideoMediaType: # pragma: lax no cover
8585
"""Return the media type of the video, based on the url."""
8686
if self.url.endswith('.mkv'):
8787
return 'video/x-matroska'
@@ -308,7 +308,7 @@ def _video_format(media_type: str) -> VideoFormat:
308308
return 'wmv'
309309
elif media_type == 'video/3gpp':
310310
return 'three_gp'
311-
else: # pragma: not covered
311+
else: # pragma: no cover
312312
raise ValueError(f'Unknown video media type: {media_type}')
313313

314314

pydantic_ai_slim/pydantic_ai/models/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -294,7 +294,7 @@ async def request_stream(
294294
raise NotImplementedError(f'Streamed requests not supported by this {self.__class__.__name__}')
295295
# yield is required to make this a generator for type checking
296296
# noinspection PyUnreachableCode
297-
yield # pragma: not covered
297+
yield # pragma: no cover
298298

299299
def customize_request_parameters(self, model_request_parameters: ModelRequestParameters) -> ModelRequestParameters:
300300
"""Customize the request parameters for the model.

pydantic_ai_slim/pydantic_ai/models/_json_schema.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def walk(self) -> JsonSchema:
4343
if not self.prefer_inlined_defs and self.defs:
4444
handled['$defs'] = {k: self._handle(v) for k, v in self.defs.items()}
4545

46-
elif self.recursive_refs: # pragma: not covered
46+
elif self.recursive_refs: # pragma: no cover
4747
# If we are preferring inlined defs and there are recursive refs, we _have_ to use a $defs+$ref structure
4848
# We try to use whatever the original root key was, but if it is already in use,
4949
# we modify it to avoid collisions.
@@ -70,7 +70,7 @@ def _handle(self, schema: JsonSchema) -> JsonSchema:
7070
break # recursive ref can't be unpacked
7171
self.refs_stack += (key,)
7272
def_schema = self.defs.get(key)
73-
if def_schema is None: # pragma: not covered
73+
if def_schema is None: # pragma: no cover
7474
raise UserError(f'Could not find $ref definition for {key}')
7575
schema = def_schema
7676

@@ -153,8 +153,8 @@ def _simplify_nullable_union(cases: list[JsonSchema]) -> list[JsonSchema]:
153153
new_schema = deepcopy(non_null_schema)
154154
new_schema['nullable'] = True
155155
return [new_schema]
156-
else: # pragma: not covered
156+
else: # pragma: no cover
157157
# they are both null, so just return one of them
158158
return [cases[0]]
159159

160-
return cases # pragma: not covered
160+
return cases # pragma: no cover

pydantic_ai_slim/pydantic_ai/models/anthropic.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -374,7 +374,7 @@ async def _map_user_prompt(
374374
source=PlainTextSourceParam(data=response.text, media_type=item.media_type, type='text'),
375375
type='document',
376376
)
377-
else: # pragma: not covered
377+
else: # pragma: no cover
378378
raise RuntimeError(f'Unsupported media type: {item.media_type}')
379379
else:
380380
raise RuntimeError(f'Unsupported content type: {type(item)}')

pydantic_ai_slim/pydantic_ai/models/bedrock.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -396,7 +396,7 @@ async def _map_messages(
396396
)
397397
elif isinstance(part, RetryPromptPart):
398398
# TODO(Marcelo): We need to add a test here.
399-
if part.tool_name is None: # pragma: not covered
399+
if part.tool_name is None: # pragma: no cover
400400
bedrock_messages.append({'role': 'user', 'content': [{'text': part.model_response()}]})
401401
else:
402402
assert part.tool_call_id is not None
@@ -475,7 +475,7 @@ async def _map_user_prompt(part: UserPromptPart, document_count: Iterator[int])
475475
)
476476
video: VideoBlockTypeDef = {'format': format, 'source': {'bytes': response.content}}
477477
content.append({'video': video})
478-
elif isinstance(item, AudioUrl): # pragma: not covered
478+
elif isinstance(item, AudioUrl): # pragma: no cover
479479
raise NotImplementedError('Audio is not supported yet.')
480480
else:
481481
assert_never(item)

pydantic_ai_slim/pydantic_ai/models/cohere.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -289,9 +289,9 @@ def _map_usage(response: ChatResponse) -> usage.Usage:
289289
details['input_tokens'] = int(u.billed_units.input_tokens)
290290
if u.billed_units.output_tokens:
291291
details['output_tokens'] = int(u.billed_units.output_tokens)
292-
if u.billed_units.search_units: # pragma: not covered
292+
if u.billed_units.search_units: # pragma: no cover
293293
details['search_units'] = int(u.billed_units.search_units)
294-
if u.billed_units.classifications: # pragma: not covered
294+
if u.billed_units.classifications: # pragma: no cover
295295
details['classifications'] = int(u.billed_units.classifications)
296296

297297
request_tokens = int(u.tokens.input_tokens) if u.tokens and u.tokens.input_tokens else None

pydantic_ai_slim/pydantic_ai/models/gemini.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,7 @@ async def _make_request(
236236
if (frequency_penalty := model_settings.get('frequency_penalty')) is not None:
237237
generation_config['frequency_penalty'] = frequency_penalty
238238
if (thinkingConfig := model_settings.get('gemini_thinking_config')) is not None:
239-
generation_config['thinking_config'] = thinkingConfig # pragma: not covered
239+
generation_config['thinking_config'] = thinkingConfig # pragma: no cover
240240
if (gemini_safety_settings := model_settings.get('gemini_safety_settings')) is not None:
241241
request_data['safetySettings'] = gemini_safety_settings
242242
if generation_config:
@@ -820,7 +820,7 @@ def transform(self, schema: JsonSchema) -> JsonSchema:
820820
schema.pop('title', None)
821821
schema.pop('default', None)
822822
schema.pop('$schema', None)
823-
if (const := schema.pop('const', None)) is not None: # pragma: not covered
823+
if (const := schema.pop('const', None)) is not None: # pragma: no cover
824824
# Gemini doesn't support const, but it does support enum with a single value
825825
schema['enum'] = [const]
826826
schema.pop('discriminator', None)
@@ -832,7 +832,7 @@ def transform(self, schema: JsonSchema) -> JsonSchema:
832832
schema.pop('exclusiveMinimum', None)
833833

834834
type_ = schema.get('type')
835-
if 'oneOf' in schema and 'type' not in schema: # pragma: not covered
835+
if 'oneOf' in schema and 'type' not in schema: # pragma: no cover
836836
# This gets hit when we have a discriminated union
837837
# Gemini returns an API error in this case even though it says in its error message it shouldn't...
838838
# Changing the oneOf to an anyOf prevents the API error and I think is functionally equivalent
@@ -856,7 +856,7 @@ def transform(self, schema: JsonSchema) -> JsonSchema:
856856
for item in prefix_items:
857857
if item not in unique_items:
858858
unique_items.append(item)
859-
if len(unique_items) > 1: # pragma: not covered
859+
if len(unique_items) > 1: # pragma: no cover
860860
schema['items'] = {'anyOf': unique_items}
861861
elif len(unique_items) == 1:
862862
schema['items'] = unique_items[0]

pydantic_ai_slim/pydantic_ai/models/groq.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -350,9 +350,9 @@ def _map_user_prompt(part: UserPromptPart) -> chat.ChatCompletionUserMessagePara
350350
content.append(chat.ChatCompletionContentPartImageParam(image_url=image_url, type='image_url'))
351351
else:
352352
raise RuntimeError('Only images are supported for binary content in Groq.')
353-
elif isinstance(item, DocumentUrl): # pragma: not covered
353+
elif isinstance(item, DocumentUrl): # pragma: no cover
354354
raise RuntimeError('DocumentUrl is not supported in Groq.')
355-
else: # pragma: not covered
355+
else: # pragma: no cover
356356
raise RuntimeError(f'Unsupported content type: {type(item)}')
357357

358358
return chat.ChatCompletionUserMessageParam(role='user', content=content)

pydantic_ai_slim/pydantic_ai/models/instrumented.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@ def model_attributes(model: Model):
238238
if base_url := model.base_url:
239239
try:
240240
parsed = urlparse(base_url)
241-
except Exception: # pragma: not covered
241+
except Exception: # pragma: no cover
242242
pass
243243
else:
244244
if parsed.hostname:

pydantic_ai_slim/pydantic_ai/models/mistral.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -408,7 +408,7 @@ def _get_python_type(cls, value: dict[str, Any]) -> str:
408408
if value_type == 'object':
409409
additional_properties = value.get('additionalProperties', {})
410410
if isinstance(additional_properties, bool):
411-
return 'bool' # pragma: not covered
411+
return 'bool' # pragma: no cover
412412
additional_properties_type = additional_properties.get('type')
413413
if (
414414
additional_properties_type in SIMPLE_JSON_TYPE_MAPPING
@@ -520,7 +520,7 @@ def _map_user_prompt(self, part: UserPromptPart) -> MistralUserMessage:
520520
raise RuntimeError('DocumentUrl is not supported in Mistral.')
521521
elif isinstance(item, VideoUrl):
522522
raise RuntimeError('VideoUrl is not supported in Mistral.')
523-
else: # pragma: not covered
523+
else: # pragma: no cover
524524
raise RuntimeError(f'Unsupported content type: {type(item)}')
525525
return MistralUserMessage(content=content)
526526

@@ -680,7 +680,7 @@ def _map_content(content: MistralOptionalNullable[MistralContent]) -> str | None
680680
output = content
681681

682682
# Note: Check len to handle potential mismatch between function calls and responses from the API. (`msg: not the same number of function class and responses`)
683-
if output and len(output) == 0: # pragma: not covered
683+
if output and len(output) == 0: # pragma: no cover
684684
output = None
685685

686686
return output

0 commit comments

Comments
 (0)