|
| 1 | +# Copyright (c) 2025 Oracle and/or its affiliates. |
| 2 | +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ |
| 3 | + |
| 4 | +"""Integration tests for response_format feature with OCI Generative AI chat models. |
| 5 | +
|
| 6 | +These tests verify that the response_format parameter works correctly with real |
| 7 | +OCI Generative AI API calls for both JSON mode and JSON schema mode. |
| 8 | +
|
| 9 | +## Prerequisites |
| 10 | +
|
| 11 | +1. **OCI Authentication**: Set up OCI authentication with security token: |
| 12 | + ```bash |
| 13 | + oci session authenticate |
| 14 | + ``` |
| 15 | +
|
| 16 | +2. **Environment Variables**: Export the following: |
| 17 | + ```bash |
| 18 | + export OCI_REGION="us-chicago-1" # or your region |
| 19 | + export OCI_COMP="ocid1.compartment.oc1..your-compartment-id" |
| 20 | + ``` |
| 21 | +
|
| 22 | +3. **OCI Config**: Ensure `~/.oci/config` exists with DEFAULT profile |
| 23 | +
|
| 24 | +## Running the Tests |
| 25 | +
|
| 26 | +Run all integration tests: |
| 27 | +```bash |
| 28 | +cd libs/oci |
| 29 | +pytest tests/integration_tests/chat_models/test_response_format.py -v |
| 30 | +``` |
| 31 | +
|
| 32 | +Run specific test: |
| 33 | +```bash |
| 34 | +pytest tests/integration_tests/chat_models/\ |
| 35 | +test_response_format.py::test_json_mode_meta_llama -v |
| 36 | +``` |
| 37 | +
|
| 38 | +## What These Tests Verify |
| 39 | +
|
| 40 | +1. **JSON Mode**: Models return valid JSON when using {"type": "JSON_OBJECT"} |
| 41 | +2. **JSON Schema Mode**: Models follow specific JSON schemas when provided |
| 42 | +3. **Multi-Vendor**: Works for both Meta Llama and Cohere models |
| 43 | +4. **Structured Output**: with_structured_output integration works end-to-end |
| 44 | +""" |
| 45 | + |
| 46 | +import json |
| 47 | +import os |
| 48 | + |
| 49 | +import pytest |
| 50 | +from langchain_core.messages import HumanMessage |
| 51 | +from pydantic import BaseModel, Field |
| 52 | + |
| 53 | +from langchain_oci.chat_models import ChatOCIGenAI |
| 54 | + |
| 55 | + |
| 56 | +def create_chat_model(model_id: str, response_format=None, **kwargs): |
| 57 | + """Create a ChatOCIGenAI instance for testing.""" |
| 58 | + region = os.getenv("OCI_REGION", "us-chicago-1") |
| 59 | + endpoint = f"https://inference.generativeai.{region}.oci.oraclecloud.com" |
| 60 | + |
| 61 | + model_kwargs = {"temperature": 0.1, "max_tokens": 512} |
| 62 | + if response_format: |
| 63 | + model_kwargs["response_format"] = response_format |
| 64 | + |
| 65 | + return ChatOCIGenAI( |
| 66 | + model_id=model_id, |
| 67 | + service_endpoint=endpoint, |
| 68 | + compartment_id=os.getenv("OCI_COMP"), |
| 69 | + model_kwargs=model_kwargs, |
| 70 | + auth_type="SECURITY_TOKEN", |
| 71 | + auth_profile="DEFAULT", |
| 72 | + auth_file_location=os.path.expanduser("~/.oci/config"), |
| 73 | + **kwargs, |
| 74 | + ) |
| 75 | + |
| 76 | + |
| 77 | +@pytest.mark.requires("oci") |
| 78 | +@pytest.mark.parametrize( |
| 79 | + "model_id", |
| 80 | + [ |
| 81 | + "meta.llama-3.3-70b-instruct", |
| 82 | + "cohere.command-r-plus-08-2024", |
| 83 | + ], |
| 84 | +) |
| 85 | +def test_json_mode_basic(model_id: str): |
| 86 | + """Test basic JSON mode with response_format parameter. |
| 87 | +
|
| 88 | + This test verifies that when response_format={"type": "JSON_OBJECT"} is set, |
| 89 | + the model returns valid JSON output. |
| 90 | + """ |
| 91 | + llm = create_chat_model(model_id) |
| 92 | + llm_with_json = llm.bind(response_format={"type": "JSON_OBJECT"}) |
| 93 | + |
| 94 | + response = llm_with_json.invoke( |
| 95 | + [ |
| 96 | + HumanMessage( |
| 97 | + content="List three colors in JSON format with a 'colors' array." |
| 98 | + ) |
| 99 | + ] |
| 100 | + ) |
| 101 | + |
| 102 | + # Verify response is valid JSON |
| 103 | + try: |
| 104 | + parsed = json.loads(response.content) |
| 105 | + assert isinstance(parsed, dict), "Response should be a JSON object" |
| 106 | + assert "colors" in parsed or "colour" in parsed, "Should contain colors array" |
| 107 | + except json.JSONDecodeError as e: |
| 108 | + pytest.fail(f"Response is not valid JSON: {e}\nContent: {response.content}") |
| 109 | + |
| 110 | + |
| 111 | +@pytest.mark.requires("oci") |
| 112 | +def test_json_mode_meta_llama(): |
| 113 | + """Test JSON mode specifically with Meta Llama models.""" |
| 114 | + model_id = "meta.llama-3.3-70b-instruct" |
| 115 | + llm = create_chat_model(model_id, response_format={"type": "JSON_OBJECT"}) |
| 116 | + |
| 117 | + response = llm.invoke( |
| 118 | + [ |
| 119 | + HumanMessage( |
| 120 | + content=( |
| 121 | + "Create a JSON object with a person's name and age. " |
| 122 | + "Name: Alice, Age: 30" |
| 123 | + ) |
| 124 | + ) |
| 125 | + ] |
| 126 | + ) |
| 127 | + |
| 128 | + # Verify valid JSON |
| 129 | + try: |
| 130 | + parsed = json.loads(response.content) |
| 131 | + assert isinstance(parsed, dict) |
| 132 | + # Check for common variations in key names |
| 133 | + has_name = any( |
| 134 | + k.lower() in ["name", "person", "alice"] for k in str(parsed).lower() |
| 135 | + ) |
| 136 | + has_age = "30" in str(parsed) or "age" in str(parsed).lower() |
| 137 | + assert has_name or has_age, f"Should contain person info: {parsed}" |
| 138 | + except json.JSONDecodeError as e: |
| 139 | + pytest.fail(f"Meta Llama JSON mode failed: {e}\nContent: {response.content}") |
| 140 | + |
| 141 | + |
| 142 | +@pytest.mark.requires("oci") |
| 143 | +def test_json_mode_cohere(): |
| 144 | + """Test JSON mode specifically with Cohere models.""" |
| 145 | + model_id = "cohere.command-r-plus-08-2024" |
| 146 | + llm = create_chat_model(model_id, response_format={"type": "JSON_OBJECT"}) |
| 147 | + |
| 148 | + response = llm.invoke( |
| 149 | + [ |
| 150 | + HumanMessage( |
| 151 | + content=( |
| 152 | + "Generate a JSON object with a book title and author. " |
| 153 | + "Use 'title' and 'author' as keys." |
| 154 | + ) |
| 155 | + ) |
| 156 | + ] |
| 157 | + ) |
| 158 | + |
| 159 | + # Verify valid JSON |
| 160 | + try: |
| 161 | + parsed = json.loads(response.content) |
| 162 | + assert isinstance(parsed, dict) |
| 163 | + # Cohere should follow instructions closely |
| 164 | + assert len(parsed) >= 1, f"Should have at least one key: {parsed}" |
| 165 | + except json.JSONDecodeError as e: |
| 166 | + pytest.fail(f"Cohere JSON mode failed: {e}\nContent: {response.content}") |
| 167 | + |
| 168 | + |
| 169 | +@pytest.mark.requires("oci") |
| 170 | +@pytest.mark.parametrize( |
| 171 | + "model_id", |
| 172 | + [ |
| 173 | + "meta.llama-3.3-70b-instruct", |
| 174 | + "cohere.command-r-plus-08-2024", |
| 175 | + ], |
| 176 | +) |
| 177 | +def test_with_structured_output_json_mode(model_id: str): |
| 178 | + """Test with_structured_output using json_mode method. |
| 179 | +
|
| 180 | + This verifies the integration between response_format and LangChain's |
| 181 | + structured output feature using JSON mode. |
| 182 | + """ |
| 183 | + |
| 184 | + class Person(BaseModel): |
| 185 | + """A person with name and age.""" |
| 186 | + |
| 187 | + name: str = Field(description="The person's name") |
| 188 | + age: int = Field(description="The person's age") |
| 189 | + |
| 190 | + llm = create_chat_model(model_id) |
| 191 | + structured_llm = llm.with_structured_output(Person, method="json_mode") |
| 192 | + |
| 193 | + result = structured_llm.invoke( |
| 194 | + "Tell me about a person named Bob who is 25 years old." |
| 195 | + ) |
| 196 | + |
| 197 | + # Verify we got a Person object |
| 198 | + assert isinstance(result, Person), ( |
| 199 | + f"Should return Person object, got {type(result)}" |
| 200 | + ) |
| 201 | + assert hasattr(result, "name"), "Should have name attribute" |
| 202 | + assert hasattr(result, "age"), "Should have age attribute" |
| 203 | + |
| 204 | + # Verify the content is reasonable (some models might not follow exactly) |
| 205 | + # Just check that we got some data |
| 206 | + assert result.name, "Name should not be empty" |
| 207 | + assert result.age > 0, "Age should be positive" |
| 208 | + |
| 209 | + |
| 210 | +@pytest.mark.requires("oci") |
| 211 | +@pytest.mark.parametrize( |
| 212 | + "model_id", |
| 213 | + [ |
| 214 | + "meta.llama-3.3-70b-instruct", |
| 215 | + # Note: Cohere models use CohereResponseFormat, not JsonSchemaResponseFormat |
| 216 | + # so json_schema method is not supported for Cohere models |
| 217 | + ], |
| 218 | +) |
| 219 | +def test_with_structured_output_json_schema(model_id: str): |
| 220 | + """Test with_structured_output using json_schema method. |
| 221 | +
|
| 222 | + This verifies that JSON schema mode works with the OCI API and properly |
| 223 | + constrains the output to match the provided schema. |
| 224 | +
|
| 225 | + Note: This test only runs with Meta Llama models as Cohere models require |
| 226 | + a different response format type (CohereResponseFormat vs JsonSchemaResponseFormat). |
| 227 | + """ |
| 228 | + |
| 229 | + class Product(BaseModel): |
| 230 | + """A product with details.""" |
| 231 | + |
| 232 | + product_name: str = Field(description="Name of the product") |
| 233 | + price: float = Field(description="Price in USD") |
| 234 | + in_stock: bool = Field(description="Whether the product is in stock") |
| 235 | + |
| 236 | + llm = create_chat_model(model_id) |
| 237 | + structured_llm = llm.with_structured_output(Product, method="json_schema") |
| 238 | + |
| 239 | + result = structured_llm.invoke( |
| 240 | + "Create a product: Laptop, $999.99, available in stock" |
| 241 | + ) |
| 242 | + |
| 243 | + # Verify we got a Product object with correct types |
| 244 | + assert isinstance(result, Product), ( |
| 245 | + f"Should return Product object, got {type(result)}" |
| 246 | + ) |
| 247 | + assert isinstance(result.product_name, str), "product_name should be string" |
| 248 | + assert isinstance(result.price, (int, float)), "price should be numeric" |
| 249 | + assert isinstance(result.in_stock, bool), "in_stock should be boolean" |
| 250 | + |
| 251 | + # Verify reasonable values |
| 252 | + assert result.product_name, "product_name should not be empty" |
| 253 | + assert result.price > 0, "price should be positive" |
| 254 | + |
| 255 | + |
| 256 | +@pytest.mark.requires("oci") |
| 257 | +def test_response_format_via_model_kwargs(): |
| 258 | + """Test that response_format works when passed via model_kwargs. |
| 259 | +
|
| 260 | + This tests an alternative way to set response_format at initialization time. |
| 261 | + """ |
| 262 | + model_id = "meta.llama-3.3-70b-instruct" |
| 263 | + region = os.getenv("OCI_REGION", "us-chicago-1") |
| 264 | + endpoint = f"https://inference.generativeai.{region}.oci.oraclecloud.com" |
| 265 | + |
| 266 | + llm = ChatOCIGenAI( |
| 267 | + model_id=model_id, |
| 268 | + service_endpoint=endpoint, |
| 269 | + compartment_id=os.getenv("OCI_COMP"), |
| 270 | + model_kwargs={ |
| 271 | + "temperature": 0.1, |
| 272 | + "max_tokens": 512, |
| 273 | + "response_format": {"type": "JSON_OBJECT"}, |
| 274 | + }, |
| 275 | + auth_type="SECURITY_TOKEN", |
| 276 | + auth_profile="DEFAULT", |
| 277 | + auth_file_location=os.path.expanduser("~/.oci/config"), |
| 278 | + ) |
| 279 | + |
| 280 | + response = llm.invoke( |
| 281 | + [HumanMessage(content="Create a JSON with a list of two fruits.")] |
| 282 | + ) |
| 283 | + |
| 284 | + # Verify valid JSON |
| 285 | + try: |
| 286 | + parsed = json.loads(response.content) |
| 287 | + assert isinstance(parsed, dict), "Response should be a JSON object" |
| 288 | + except json.JSONDecodeError as e: |
| 289 | + pytest.fail( |
| 290 | + f"model_kwargs response_format failed: {e}\nContent: {response.content}" |
| 291 | + ) |
| 292 | + |
| 293 | + |
| 294 | +@pytest.mark.requires("oci") |
| 295 | +def test_json_mode_complex_nested_structure(): |
| 296 | + """Test JSON mode with a more complex nested structure request.""" |
| 297 | + model_id = "cohere.command-r-plus-08-2024" |
| 298 | + llm = create_chat_model(model_id, response_format={"type": "JSON_OBJECT"}) |
| 299 | + |
| 300 | + response = llm.invoke( |
| 301 | + [ |
| 302 | + HumanMessage( |
| 303 | + content="""Create a JSON object representing a company with: |
| 304 | + - name: "TechCorp" |
| 305 | + - employees: array of 2 employees, each with name and role |
| 306 | + - founded: 2020""" |
| 307 | + ) |
| 308 | + ] |
| 309 | + ) |
| 310 | + |
| 311 | + # Verify valid JSON with nested structure |
| 312 | + try: |
| 313 | + parsed = json.loads(response.content) |
| 314 | + assert isinstance(parsed, dict), "Response should be a JSON object" |
| 315 | + |
| 316 | + # Check for reasonable structure (flexible since models vary) |
| 317 | + assert len(parsed) >= 1, "Should have at least one top-level key" |
| 318 | + |
| 319 | + # Try to verify it has some nested structure |
| 320 | + has_nested = any(isinstance(v, (dict, list)) for v in parsed.values()) |
| 321 | + assert has_nested or len(str(parsed)) > 50, ( |
| 322 | + "Should have some nested structure or substantial content" |
| 323 | + ) |
| 324 | + |
| 325 | + except json.JSONDecodeError as e: |
| 326 | + pytest.fail(f"Complex JSON failed: {e}\nContent: {response.content}") |
| 327 | + |
| 328 | + |
| 329 | +@pytest.mark.requires("oci") |
| 330 | +def test_response_format_class_level(): |
| 331 | + """Test response_format set at class initialization level.""" |
| 332 | + model_id = "meta.llama-3.3-70b-instruct" |
| 333 | + llm = create_chat_model(model_id, response_format={"type": "JSON_OBJECT"}) |
| 334 | + |
| 335 | + # Should work without bind() |
| 336 | + response = llm.invoke( |
| 337 | + [HumanMessage(content="Return JSON with a single key 'status' set to 'ok'")] |
| 338 | + ) |
| 339 | + |
| 340 | + # Verify valid JSON |
| 341 | + try: |
| 342 | + parsed = json.loads(response.content) |
| 343 | + assert isinstance(parsed, dict), "Response should be a JSON object" |
| 344 | + except json.JSONDecodeError as e: |
| 345 | + pytest.fail( |
| 346 | + f"Class-level response_format failed: {e}\nContent: {response.content}" |
| 347 | + ) |
0 commit comments