Skip to content

gemini_api.py

Define Gemini adapter class.

GeminiAdapter

Bases: LlmAdapter

Define Gemini adapter class.

Source code in taglyatelle/llm_providers/gemini_api.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
class GeminiAdapter(LlmAdapter):
    """Define Gemini adapter class."""

    def __init__(self, model: str, temperature: float | int = 0):
        """
        Initialize Google's llms.

        Parameters
        ----------
        model
            LLM model name

        temperature
            LLM temperature
        """
        self.client = genai.Client()
        self.model = model
        self.temperature = temperature

    def invoke_llm(self, prompt: str) -> str | None:
        """
        Send a request to a LLM.

        Parameters
        ----------
        prompt
            The prompt to send to the LLM

        Returns
        -------
        Answer of the LLM or None
        """
        response = self.client.models.generate_content(
            model=self.model,
            contents=prompt,
            config=types.GenerateContentConfig(
                temperature=self.temperature,
            ),
        )
        return response.text

__init__(model, temperature=0)

Initialize Google's llms.

Parameters

model LLM model name

temperature LLM temperature

Source code in taglyatelle/llm_providers/gemini_api.py
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
def __init__(self, model: str, temperature: float | int = 0):
    """
    Initialize Google's llms.

    Parameters
    ----------
    model
        LLM model name

    temperature
        LLM temperature
    """
    self.client = genai.Client()
    self.model = model
    self.temperature = temperature

invoke_llm(prompt)

Send a request to a LLM.

Parameters

prompt The prompt to send to the LLM

Returns

Answer of the LLM or None

Source code in taglyatelle/llm_providers/gemini_api.py
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
def invoke_llm(self, prompt: str) -> str | None:
    """
    Send a request to a LLM.

    Parameters
    ----------
    prompt
        The prompt to send to the LLM

    Returns
    -------
    Answer of the LLM or None
    """
    response = self.client.models.generate_content(
        model=self.model,
        contents=prompt,
        config=types.GenerateContentConfig(
            temperature=self.temperature,
        ),
    )
    return response.text