Skip to content

get_elements.py§

get_elements(state, config) §

Process the user prompt to identify elements within specified categories using a language model.

Parameters:

Name Type Description Default
state State

The current state containing the user prompt.

required
config dict

Configuration dictionary containing the language model.

required

Returns:

Name Type Description
dict Dict[str, Any]

A dictionary containing the list of identified elements.

Source code in brickllm/nodes/get_elements.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
def get_elements(state: State, config: Dict[str, Any]) -> Dict[str, Any]:
    """
    Process the user prompt to identify elements within specified categories
    using a language model.

    Args:
        state (State): The current state containing the user prompt.
        config (dict): Configuration dictionary containing the language model.

    Returns:
        dict: A dictionary containing the list of identified elements.
    """
    custom_logger.eurac("🔍 Getting elements from user prompt")

    user_prompt = state["user_prompt"]

    categories = ["Point", "Equipment", "Location", "Collection"]

    category_dict = {}
    # Get hierarchy info for each category
    for category in categories:
        parents, children = get_hierarchical_info(category)
        # category_dict[category] = children

        # get definition for each child
        children_dict = {}
        for child in children:
            children_dict[child] = get_brick_definition(child)

        category_dict[category] = children_dict

    # Get the model name from the config
    llm = config.get("configurable", {}).get("llm_model")

    # Enforce structured output
    structured_llm = llm.with_structured_output(ElemListSchema)

    # System message
    system_message = get_elem_instructions.format(
        prompt=user_prompt, elements_dict=category_dict
    )

    # Generate question
    answer = structured_llm.invoke(
        [SystemMessage(content=system_message)]
        + [HumanMessage(content="Find the elements.")]
    )

    return {"elem_list": answer.elem_list}