generated from guardrails-ai/validator-template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvalidator.py
132 lines (100 loc) · 4.52 KB
/
validator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
"""
This template is intended for creating simple validators.
If your validator is complex or requires additional post-installation steps, consider using the template repository instead.
The template repository can be found here: https://github.com/guardrails-ai/validator-template
"""
from typing import Any, Callable, Dict, Optional
from guardrails.validator_base import (
FailResult,
PassResult,
ValidationResult,
Validator,
register_validator,
)
@register_validator(name="guardrails/qaprompt_relevance", data_type="string")
class QapromptRelevance(Validator):
# FIXME: Update the class docstring to reflect the purpose and usage of your validator.
"""# Overview
| Developed by | {FIXME: Your organization name} |
| Date of development | Feb 26, 2025 |
| Validator type | Format |
| License | Apache 2 |
| Input/Output | Output |
# Description
{FIXME: A brief description of what your validator does.}
## (Optional) Intended Use
{FIXME: Optionally, include a brief description of the intended use of your validator, including any limitations or constraints.}
## Requirements
* Dependencies:
- guardrails-ai>=0.4.0
- {FIXME: Include any other dependencies you need here}
* Dev Dependencies:
- pytest
- pyright
- ruff
- {FIXME: Include any other dev dependencies you need here}
* Foundation model access keys:
- {FIXME: Include any access environment variables you need here like OPENAI_API_KEY}
# Installation
```bash
$ guardrails hub install hub://guardrails/qaprompt_relevance
```
# Usage Examples
## Validating string output via Python
In this example, we apply the validator to a string output generated by an LLM.
```python
# Import Guard and Validator
from guardrails.hub import QapromptRelevance
from guardrails import Guard
# Setup Guard
guard = Guard.use(
QapromptRelevance({FIXME: list any args here})
)
guard.validate({FIXME: Add an input that should pass the validator}) # Validator passes
guard.validate({FIXME: Add an input that should fail the validator}) # Validator fails
```
""" # noqa
# If you don't have any init args, you can omit the __init__ method.
def __init__(
self,
arg_1: str, # FIXME: Replace with your custom init args.
on_fail: Optional[Callable] = None,
):
"""Initializes a new instance of the QapromptRelevance class.
Args:
arg_1 (str): FIXME: Describe the purpose of this argument.
on_fail`** *(str, Callable)*: The policy to enact when a validator fails. If `str`, must be one of `reask`, `fix`, `filter`, `refrain`, `noop`, `exception` or `fix_reask`. Otherwise, must be a function that is called when the validator fails.
"""
super().__init__(on_fail=on_fail, arg_1=arg_1)
self._arg_1 = arg_1
def validate(self, value: Any, metadata: Dict) -> ValidationResult:
"""Validates that {fill in how you validator interacts with the passed value}.
Args:
value (Any): The value to validate.
metadata (Dict): The metadata to validate against.
FIXME: Add any additional args you need here in metadata.
| Key | Description |
| --- | --- |
| a | b |
"""
# Add your custom validator logic here and return a PassResult or FailResult accordingly.
if value != "pass": # FIXME
return FailResult(
error_message="{FIXME: A descriptive but concise error message about why validation failed}",
fix_value="{FIXME: The programmtic fix if applicable, otherwise remove this kwarg.}",
)
return PassResult()
# Run tests via `pytest -rP ./qaprompt_relevance.py`
class TestQapromptRelevance:
def test_success_case(self):
# FIXME: Replace with your custom test logic for the success case.
validator = QapromptRelevance("s")
result = validator.validate("pass", {})
assert isinstance(result, PassResult) is True
def test_failure_case(self):
# FIXME: Replace with your custom test logic for the failure case.
validator = QapromptRelevance("s")
result = validator.validate("fail", {})
assert isinstance(result, FailResult) is True
assert result.error_message == "{A descriptive but concise error message about why validation failed}"
assert result.fix_value == "fails"