Skip to content

API Reference

Validators

django_ai_validator.validators

AISemanticValidator

Bases: BaseAIValidator

Concrete implementation of the validator.

Source code in src/django_ai_validator/validators.py
70
71
72
73
74
class AISemanticValidator(BaseAIValidator):
    """
    Concrete implementation of the validator.
    """
    pass

BaseAIValidator

Bases: BaseValidator

Template Method Pattern: Defines the skeleton of the validation algorithm.

Source code in src/django_ai_validator/validators.py
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
class BaseAIValidator(BaseValidator):
    """
    Template Method Pattern: Defines the skeleton of the validation algorithm.
    """
    message = None  # Override BaseValidator's message to avoid limit_value dependency

    def __init__(self, prompt_template, provider=None, message=None, code=None):
        self.prompt_template = prompt_template
        self.provider = provider
        if code:
            self.code = code
        # BaseValidator expects a limit_value. We pass None, but then we must ensure
        # the message doesn't try to use it.
        super().__init__(limit_value=None, message=message)

    def __call__(self, value):
        # Template Method
        if self.should_skip(value):
            return

        prepared_value = self.prepare_data(value)
        is_valid, error_reason = self.execute_llm_validation(prepared_value)

        if not is_valid:
            self.handle_error(value, error_reason)

    def should_skip(self, value):
        return value in (None, '')

    def prepare_data(self, value):
        return str(value)

    def execute_llm_validation(self, value):
        facade = AICleaningFacade(provider=self.provider)
        return facade.validate(value, self.prompt_template)

    def handle_error(self, value, error_reason):
        raise ValidationError(
            self.message or error_reason,
            code=self.code,
            params={'value': value},
        )

    def __eq__(self, other):
        return (
            isinstance(other, BaseAIValidator) and
            self.prompt_template == other.prompt_template and
            self.message == other.message and
            self.code == other.code and
            self.provider == other.provider
        )

    def deconstruct(self):
        path = f"{self.__module__}.{self.__class__.__name__}"
        args = [self.prompt_template]
        kwargs = {}
        if self.provider:
            kwargs['provider'] = self.provider
        if self.message:
            kwargs['message'] = self.message
        if self.code:
            kwargs['code'] = self.code
        return path, args, kwargs

Fields

django_ai_validator.fields

Admin

django_ai_validator.admin

LLM Adapters

django_ai_validator.llm.adapters

AnthropicAdapter

Bases: LLMAdapter

Adapter for Anthropic API.

Source code in src/django_ai_validator/llm/adapters.py
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
class AnthropicAdapter(LLMAdapter):
    """Adapter for Anthropic API."""
    def __init__(self, api_key: str = None, model: str = "claude-3-opus-20240229", **kwargs):
        self.api_key = api_key or getattr(settings, 'ANTHROPIC_API_KEY', os.environ.get("ANTHROPIC_API_KEY"))
        self.model = model
        try:
            import anthropic
            self.client = anthropic.Anthropic(api_key=self.api_key)
        except ImportError:
            raise ImportError("Anthropic package is not installed. Please install 'anthropic'.")

    def validate(self, value: str, prompt_template: str) -> Tuple[bool, Optional[str]]:
        prompt = f"{prompt_template}\n\nInput: {value}\n\nRespond with 'VALID' if it meets the criteria. Otherwise, explain why it is invalid."
        message = self.client.messages.create(
            model=self.model,
            max_tokens=1024,
            messages=[{"role": "user", "content": prompt}]
        )
        content = message.content[0].text.strip()
        if content.upper().startswith("VALID"):
            return True, None
        else:
            return False, content

    def clean(self, value: str, prompt_template: str) -> str:
        prompt = f"{prompt_template}\n\nInput: {value}\n\nReturn ONLY the cleaned/normalized value."
        message = self.client.messages.create(
            model=self.model,
            max_tokens=1024,
            messages=[{"role": "user", "content": prompt}]
        )
        return message.content[0].text.strip()

GeminiAdapter

Bases: LLMAdapter

Adapter for Google Gemini API.

Source code in src/django_ai_validator/llm/adapters.py
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
class GeminiAdapter(LLMAdapter):
    """Adapter for Google Gemini API."""
    def __init__(self, api_key: str = None, model: str = "gemini-pro", **kwargs):
        self.api_key = api_key or getattr(settings, 'GEMINI_API_KEY', os.environ.get("GEMINI_API_KEY"))
        self.model = model
        try:
            import google.generativeai as genai
            genai.configure(api_key=self.api_key)
            self.client = genai.GenerativeModel(self.model)
        except ImportError:
            raise ImportError("Google Generative AI package is not installed. Please install 'google-generativeai'.")

    def validate(self, value: str, prompt_template: str) -> Tuple[bool, Optional[str]]:
        prompt = f"{prompt_template}\n\nInput: {value}\n\nRespond with 'VALID' if it meets the criteria. Otherwise, explain why it is invalid."
        response = self.client.generate_content(prompt)
        content = response.text.strip()
        if content.upper().startswith("VALID"):
            return True, None
        else:
            return False, content

    def clean(self, value: str, prompt_template: str) -> str:
        prompt = f"{prompt_template}\n\nInput: {value}\n\nReturn ONLY the cleaned/normalized value."
        response = self.client.generate_content(prompt)
        return response.text.strip()

LLMAdapter

Bases: ABC

Target interface for the Adapter Pattern. Standardizes interaction with different LLM providers.

Source code in src/django_ai_validator/llm/adapters.py
 6
 7
 8
 9
10
11
12
13
14
15
16
17
class LLMAdapter(abc.ABC):
    """
    Target interface for the Adapter Pattern.
    Standardizes interaction with different LLM providers.
    """
    @abc.abstractmethod
    def validate(self, value: str, prompt_template: str) -> Tuple[bool, Optional[str]]:
        pass

    @abc.abstractmethod
    def clean(self, value: str, prompt_template: str) -> str:
        pass

OllamaAdapter

Bases: LLMAdapter

Adapter for Ollama (Llama) API.

Source code in src/django_ai_validator/llm/adapters.py
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
class OllamaAdapter(LLMAdapter):
    """Adapter for Ollama (Llama) API."""
    def __init__(self, host: str = None, model: str = "llama3", **kwargs):
        self.host = host or getattr(settings, 'OLLAMA_HOST', os.environ.get("OLLAMA_HOST"))
        self.model = model
        try:
            import ollama
            self.client = ollama.Client(host=self.host)
        except ImportError:
            raise ImportError("Ollama package is not installed. Please install 'ollama'.")

    def validate(self, value: str, prompt_template: str) -> Tuple[bool, Optional[str]]:
        prompt = f"{prompt_template}\n\nInput: {value}\n\nRespond with 'VALID' if it meets the criteria. Otherwise, explain why it is invalid."
        response = self.client.chat(model=self.model, messages=[
            {'role': 'user', 'content': prompt},
        ])
        content = response['message']['content'].strip()
        if content.upper().startswith("VALID"):
            return True, None
        else:
            return False, content

    def clean(self, value: str, prompt_template: str) -> str:
        prompt = f"{prompt_template}\n\nInput: {value}\n\nReturn ONLY the cleaned/normalized value."
        response = self.client.chat(model=self.model, messages=[
            {'role': 'user', 'content': prompt},
        ])
        return response['message']['content'].strip()

OpenAIAdapter

Bases: LLMAdapter

Adapter for OpenAI API.

Source code in src/django_ai_validator/llm/adapters.py
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
class OpenAIAdapter(LLMAdapter):
    """Adapter for OpenAI API."""
    def __init__(self, api_key: str = None, model: str = "gpt-3.5-turbo", **kwargs):
        self.api_key = api_key or getattr(settings, 'OPENAI_API_KEY', os.environ.get("OPENAI_API_KEY"))
        self.model = model
        try:
            from openai import OpenAI
            self.client = OpenAI(api_key=self.api_key)
        except ImportError:
            raise ImportError("OpenAI package is not installed. Please install 'openai'.")

    def validate(self, value: str, prompt_template: str) -> Tuple[bool, Optional[str]]:
        prompt = f"{prompt_template}\n\nInput: {value}\n\nRespond with 'VALID' if it meets the criteria. Otherwise, explain why it is invalid."
        response = self.client.chat.completions.create(
            model=self.model,
            messages=[
                {"role": "system", "content": "You are a helpful data validation assistant."},
                {"role": "user", "content": prompt}
            ],
            temperature=0.0,
        )
        content = response.choices[0].message.content.strip()
        if content.upper().startswith("VALID"):
            return True, None
        else:
            return False, content

    def clean(self, value: str, prompt_template: str) -> str:
        prompt = f"{prompt_template}\n\nInput: {value}\n\nReturn ONLY the cleaned/normalized value."
        response = self.client.chat.completions.create(
            model=self.model,
            messages=[
                {"role": "system", "content": "You are a helpful data cleaning assistant."},
                {"role": "user", "content": prompt}
            ],
            temperature=0.0,
        )
        return response.choices[0].message.content.strip()