Skip to content

Commit 3494fe9

Browse files
committed
Fix linting issues in integration tests
- Remove main() functions with print statements - Fix import sorting issues - Remove unused imports - Fix line length violations - Format code with ruff
1 parent f78dd53 commit 3494fe9

File tree

4 files changed

+7
-110
lines changed

4 files changed

+7
-110
lines changed

libs/oci/tests/integration_tests/chat_models/test_chat_features.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,9 @@
1919
AIMessage,
2020
HumanMessage,
2121
SystemMessage,
22-
ToolMessage,
2322
)
24-
from langchain_core.prompts import ChatPromptTemplate
2523
from langchain_core.output_parsers import StrOutputParser
24+
from langchain_core.prompts import ChatPromptTemplate
2625
from pydantic import BaseModel, Field
2726

2827
from langchain_oci.chat_models import ChatOCIGenAI
@@ -212,9 +211,7 @@ def test_tool_choice_none(llm):
212211
tools = [add_numbers]
213212
llm_with_tools = llm.bind_tools(tools, tool_choice="none")
214213

215-
response = llm_with_tools.invoke(
216-
[HumanMessage(content="What is 5 plus 3?")]
217-
)
214+
response = llm_with_tools.invoke([HumanMessage(content="What is 5 plus 3?")])
218215

219216
# Should not make tool calls when tool_choice is none
220217
assert len(response.tool_calls) == 0
@@ -345,6 +342,7 @@ def test_stop_sequences():
345342
@pytest.mark.requires("oci")
346343
def test_invalid_tool_schema(llm):
347344
"""Test handling of invalid tool definitions."""
345+
348346
# Should handle tools without proper docstrings
349347
def bad_tool(x):
350348
return x
@@ -379,7 +377,7 @@ def test_system_message_role(llm):
379377
response_pirate = llm.invoke(messages_pirate)
380378

381379
messages_formal = [
382-
SystemMessage(content="You are a formal butler. Use extremely formal language."),
380+
SystemMessage(content="You are a formal butler. Use formal language."),
383381
HumanMessage(content="How are you today?"),
384382
]
385383
response_formal = llm.invoke(messages_formal)

libs/oci/tests/integration_tests/chat_models/test_langchain_compatibility.py

Lines changed: 0 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616
"""
1717

1818
import os
19-
import sys
2019

2120
import pytest
2221
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
@@ -363,62 +362,3 @@ def test_tool_calls_structure(chat_model):
363362
assert "id" in tc
364363
assert "type" in tc
365364
assert tc["type"] == "tool_call"
366-
367-
368-
def main():
369-
"""Run tests manually for debugging."""
370-
import langchain_core
371-
372-
print(f"langchain-core version: {langchain_core.__version__}")
373-
print(f"Python version: {sys.version}")
374-
375-
config = get_test_config()
376-
print(f"\nTest configuration:")
377-
print(f" Model: {config['model_id']}")
378-
print(f" Endpoint: {config['service_endpoint']}")
379-
print(f" Profile: {config['auth_profile']}")
380-
381-
chat = ChatOCIGenAI(
382-
model_id=config["model_id"],
383-
service_endpoint=config["service_endpoint"],
384-
compartment_id=config["compartment_id"],
385-
auth_profile=config["auth_profile"],
386-
auth_type=config["auth_type"],
387-
model_kwargs={"temperature": 0, "max_tokens": 256},
388-
)
389-
390-
print("\n" + "=" * 60)
391-
print("Running manual tests...")
392-
print("=" * 60)
393-
394-
# Test 1: Basic invoke
395-
print("\n1. Testing basic invoke...")
396-
response = chat.invoke([HumanMessage(content="Say hello")])
397-
print(f" Response: {response.content[:50]}...")
398-
print(f" Type: {type(response).__name__}")
399-
400-
# Test 2: Tool calling
401-
print("\n2. Testing tool calling...")
402-
chat_tools = chat.bind_tools([get_weather])
403-
response = chat_tools.invoke([HumanMessage(content="Weather in Tokyo?")])
404-
print(f" Tool calls: {response.tool_calls}")
405-
406-
# Test 3: Structured output
407-
print("\n3. Testing structured output...")
408-
structured = chat.with_structured_output(Joke)
409-
joke = structured.invoke("Tell a joke")
410-
print(f" Setup: {joke.setup}")
411-
print(f" Punchline: {joke.punchline}")
412-
413-
# Test 4: Streaming
414-
print("\n4. Testing streaming...")
415-
chunks = list(chat.stream([HumanMessage(content="Count 1-3")]))
416-
print(f" Chunks received: {len(chunks)}")
417-
418-
print("\n" + "=" * 60)
419-
print("All manual tests completed!")
420-
print("=" * 60)
421-
422-
423-
if __name__ == "__main__":
424-
main()

libs/oci/tests/integration_tests/chat_models/test_multi_model.py

Lines changed: 2 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@
2626

2727
from langchain_oci.chat_models import ChatOCIGenAI
2828

29-
3029
# =============================================================================
3130
# Model Configurations
3231
# =============================================================================
@@ -252,9 +251,7 @@ class Summary(BaseModel):
252251
llm = create_llm(model_id)
253252
structured_llm = llm.with_structured_output(Summary)
254253

255-
result = structured_llm.invoke(
256-
"Summarize: The Earth orbits the Sun once per year."
257-
)
254+
result = structured_llm.invoke("Summarize: The Earth orbits the Sun once per year.")
258255

259256
# Grok may return None in some cases
260257
if result is not None:
@@ -350,9 +347,7 @@ def get_info(topic: str) -> str:
350347
llm = create_openai_llm(model_id)
351348
llm_with_tools = llm.bind_tools([get_info])
352349

353-
response = llm_with_tools.invoke(
354-
[HumanMessage(content="Get info about Python")]
355-
)
350+
response = llm_with_tools.invoke([HumanMessage(content="Get info about Python")])
356351

357352
assert isinstance(response, AIMessage)
358353
# OpenAI models should call the tool
@@ -466,39 +461,3 @@ def test_fast_models_respond_quickly():
466461
llm = create_llm(model_id, max_tokens=50)
467462
response = llm.invoke([HumanMessage(content="Hi")])
468463
assert isinstance(response, AIMessage)
469-
470-
471-
def main():
472-
"""Manual test runner for debugging."""
473-
import sys
474-
475-
print("=" * 60)
476-
print("Multi-Model Integration Tests")
477-
print("=" * 60)
478-
479-
config = get_config()
480-
print(f"\nEndpoint: {config['service_endpoint']}")
481-
print(f"Profile: {config['auth_profile']}")
482-
483-
# Test each vendor
484-
test_models = [
485-
("Meta Llama 4", "meta.llama-4-maverick-17b-128e-instruct-fp8"),
486-
("Cohere Command", "cohere.command-a-03-2025"),
487-
("xAI Grok", "xai.grok-3-mini-fast"),
488-
]
489-
490-
for name, model_id in test_models:
491-
print(f"\n--- Testing {name} ({model_id}) ---")
492-
try:
493-
llm = create_llm(model_id)
494-
response = llm.invoke([HumanMessage(content="Say hello")])
495-
print(f"✓ Response: {response.content[:50]}...")
496-
except Exception as e:
497-
print(f"✗ Error: {e}")
498-
499-
print("\n" + "=" * 60)
500-
print("Manual tests complete")
501-
502-
503-
if __name__ == "__main__":
504-
main()

libs/oci/tests/integration_tests/chat_models/test_tool_calling.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,8 @@
5353
import os
5454

5555
import pytest
56-
from langchain_core.tools import StructuredTool
5756
from langchain_core.messages import HumanMessage, SystemMessage
57+
from langchain_core.tools import StructuredTool
5858
from langgraph.graph import END, START, MessagesState, StateGraph
5959
from langgraph.prebuilt import ToolNode
6060

0 commit comments

Comments
 (0)