Skip to content

neurotrace.core.llm_tasks

neurotrace.core.llm_tasks

get_graph_summary(llm, text)

Get a graph summary from the LLM.

Parameters:

Name Type Description Default
llm BaseLLM

The language model to use for summarisation.

required
text str

The input text to summarize.

required

Returns:

Name Type Description
str str

The graph summary generated by the LLM.

Source code in neurotrace/core/llm_tasks.py
def get_graph_summary(llm: BaseLLM, text: str) -> str:
    """
    Get a graph summary from the LLM.

    Args:
        llm (BaseLLM): The language model to use for summarisation.
        text (str): The input text to summarize.

    Returns:
        str: The graph summary generated by the LLM.
    """
    response = _perform_summarisation(llm=llm, prompt=task_prompts.PROMPT_GRAPH_SUMMARY, message=text)
    response = strip_json_code_block(response)
    return response

get_vector_and_graph_summary(llm, text)

Get vector and graph summaries from the LLM.

Parameters:

Name Type Description Default
llm BaseLLM

The language model to use for summarisation.

required
text str

The input text to summarize.

required

Returns:

Type Description
Dict[str, str]

Tuple[str, str]: A tuple containing the vector summary and graph summary.

Source code in neurotrace/core/llm_tasks.py
def get_vector_and_graph_summary(llm: BaseLLM, text: str) -> Dict[str, str]:
    """
    Get vector and graph summaries from the LLM.

    Args:
        llm (BaseLLM): The language model to use for summarisation.
        text (str): The input text to summarize.

    Returns:
        Tuple[str, str]: A tuple containing the vector summary and graph summary.
    """
    response = _perform_summarisation(llm=llm, prompt=task_prompts.PROMPT_VECTOR_AND_GRAPH_SUMMARY, message=text)
    response = strip_json_code_block(response)
    try:
        return json.loads(response)
    except json.decoder.JSONDecodeError:
        return {}

perform_summarisation(llm, prompt_placeholders, prompt=None)

Perform summarisation using the provided LLM and prompt with a single message.

Parameters:

Name Type Description Default
llm BaseLLM

The language model to use for summarisation.

required
prompt PromptTemplate

The prompt with any number of variables.

None
message str

The input text to summarise.

required

Returns:

Name Type Description
str str

The summarized or generated output from the LLM.

str

param prompt_placeholders:

Source code in neurotrace/core/llm_tasks.py
def perform_summarisation(llm: BaseLLM, prompt_placeholders: Dict[str, Any], prompt: PromptTemplate = None) -> str:
    """
    Perform summarisation using the provided LLM and prompt with a single message.

    Args:
        llm (BaseLLM): The language model to use for summarisation.
        prompt (PromptTemplate): The prompt with any number of variables.
        message (str): The input text to summarise.

    Returns:
        str: The summarized or generated output from the LLM.
        :param prompt_placeholders:
    """
    prompt = prompt or task_prompts.PROMPT_GENERAL_SUMMARY
    return _perform_summarisation(llm=llm, prompt=prompt, **prompt_placeholders)