|
1 | 1 | #!/usr/bin/env python3 |
2 | | -"""Quick test script for OpenAI models on OCI GenAI. |
| 2 | +# Copyright (c) 2025 Oracle and/or its affiliates. |
| 3 | +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ |
3 | 4 |
|
4 | | -This tests the rebased LangChain 1.x support with OpenAI models. |
| 5 | +"""Quick smoke tests for OpenAI models on OCI GenAI. |
| 6 | +
|
| 7 | +These are simple smoke tests to verify LangChain 1.x support with OpenAI models. |
| 8 | +For comprehensive OpenAI model tests, see test_openai_models.py. |
5 | 9 |
|
6 | 10 | Setup: |
7 | 11 | export OCI_COMP=<your-compartment-id> |
8 | 12 |
|
9 | 13 | Run: |
10 | | - python test_openai_model.py |
| 14 | + pytest tests/integration_tests/chat_models/test_openai_model.py -v |
11 | 15 | """ |
12 | 16 |
|
13 | 17 | import os |
14 | | -import sys |
15 | 18 |
|
16 | | -from langchain_core.messages import HumanMessage, SystemMessage |
| 19 | +import pytest |
| 20 | +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage |
| 21 | + |
17 | 22 | from langchain_oci.chat_models import ChatOCIGenAI |
18 | 23 |
|
19 | | -# Configuration |
20 | | -COMPARTMENT_ID = os.environ.get("OCI_COMP") |
21 | | -if not COMPARTMENT_ID: |
22 | | - print("ERROR: OCI_COMP environment variable not set") |
23 | | - print("Set it with: export OCI_COMP=<your-compartment-id>") |
24 | | - sys.exit(1) |
25 | | - |
26 | | -MODEL_ID = "openai.gpt-oss-20b" # or openai.gpt-oss-120b |
27 | | -ENDPOINT = "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com" |
28 | | - |
29 | | -print(f"Testing OpenAI model: {MODEL_ID}") |
30 | | -print(f"Compartment: {COMPARTMENT_ID[:50]}...") |
31 | | -print("-" * 60) |
32 | | - |
33 | | -# Create chat model |
34 | | -chat = ChatOCIGenAI( |
35 | | - model_id=MODEL_ID, |
36 | | - service_endpoint=ENDPOINT, |
37 | | - compartment_id=COMPARTMENT_ID, |
38 | | - auth_type="SECURITY_TOKEN", |
39 | | - auth_profile="DEFAULT", |
40 | | - model_kwargs={ |
41 | | - "temperature": 0.7, |
42 | | - "max_completion_tokens": 100, # OpenAI uses max_completion_tokens |
43 | | - }, |
44 | | -) |
45 | | - |
46 | | -# Test 1: Basic completion |
47 | | -print("\nTest 1: Basic completion") |
48 | | -print("-" * 60) |
49 | | -try: |
50 | | - response = chat.invoke([HumanMessage(content="Say hello in 5 words")]) |
51 | | - print(f"✓ Response: {response.content}") |
52 | | -except Exception as e: |
53 | | - print(f"✗ Error: {e}") |
54 | | - sys.exit(1) |
55 | | - |
56 | | -# Test 2: With system message |
57 | | -print("\nTest 2: With system message") |
58 | | -print("-" * 60) |
59 | | -try: |
60 | | - response = chat.invoke([ |
61 | | - SystemMessage(content="You are a helpful math tutor."), |
62 | | - HumanMessage(content="What is 15 * 23?") |
63 | | - ]) |
64 | | - print(f"✓ Response: {response.content}") |
65 | | -except Exception as e: |
66 | | - print(f"✗ Error: {e}") |
67 | | - sys.exit(1) |
68 | | - |
69 | | -# Test 3: Streaming |
70 | | -print("\nTest 3: Streaming") |
71 | | -print("-" * 60) |
72 | | -try: |
73 | | - print("Response: ", end="", flush=True) |
74 | | - for chunk in chat.stream([HumanMessage(content="Count from 1 to 5")]): |
75 | | - print(chunk.content, end="", flush=True) |
76 | | - print("\n✓ Streaming works!") |
77 | | -except Exception as e: |
78 | | - print(f"\n✗ Error: {e}") |
79 | | - sys.exit(1) |
80 | | - |
81 | | -print("\n" + "=" * 60) |
82 | | -print("✓ All tests passed! LangChain 1.x + OpenAI model working correctly") |
83 | | -print("=" * 60) |
| 24 | + |
| 25 | +@pytest.fixture |
| 26 | +def openai_chat(): |
| 27 | + """Create ChatOCIGenAI instance for OpenAI model testing.""" |
| 28 | + compartment_id = os.environ.get("OCI_COMP") |
| 29 | + if not compartment_id: |
| 30 | + pytest.skip("OCI_COMP environment variable not set") |
| 31 | + |
| 32 | + return ChatOCIGenAI( |
| 33 | + model_id="openai.gpt-oss-20b", |
| 34 | + service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com", |
| 35 | + compartment_id=compartment_id, |
| 36 | + auth_type="SECURITY_TOKEN", |
| 37 | + auth_profile="DEFAULT", |
| 38 | + model_kwargs={ |
| 39 | + "temperature": 0.7, |
| 40 | + "max_completion_tokens": 100, |
| 41 | + }, |
| 42 | + ) |
| 43 | + |
| 44 | + |
| 45 | +@pytest.mark.requires("oci") |
| 46 | +def test_basic_completion(openai_chat): |
| 47 | + """Test basic completion with OpenAI model.""" |
| 48 | + response = openai_chat.invoke([HumanMessage(content="Say hello in 5 words")]) |
| 49 | + |
| 50 | + assert isinstance(response, AIMessage) |
| 51 | + assert isinstance(response.content, str) |
| 52 | + assert len(response.content) > 0 |
| 53 | + |
| 54 | + |
| 55 | +@pytest.mark.requires("oci") |
| 56 | +def test_system_message(openai_chat): |
| 57 | + """Test completion with system message.""" |
| 58 | + response = openai_chat.invoke( |
| 59 | + [ |
| 60 | + SystemMessage(content="You are a helpful math tutor."), |
| 61 | + HumanMessage(content="What is 15 * 23?"), |
| 62 | + ] |
| 63 | + ) |
| 64 | + |
| 65 | + assert isinstance(response, AIMessage) |
| 66 | + assert isinstance(response.content, str) |
| 67 | + assert len(response.content) > 0 |
| 68 | + |
| 69 | + |
| 70 | +@pytest.mark.requires("oci") |
| 71 | +def test_streaming(openai_chat): |
| 72 | + """Test streaming with OpenAI model.""" |
| 73 | + chunks = list(openai_chat.stream([HumanMessage(content="Count from 1 to 5")])) |
| 74 | + |
| 75 | + assert len(chunks) > 0 |
| 76 | + for chunk in chunks: |
| 77 | + assert isinstance(chunk, AIMessage) |
| 78 | + assert isinstance(chunk.content, str) |
0 commit comments