diff options
Diffstat (limited to 'helpers')
-rw-r--r-- | helpers/__init__ | 0 | ||||
-rw-r--r-- | helpers/df_helpers.py | 71 | ||||
-rw-r--r-- | helpers/prompts.py | 74 |
3 files changed, 145 insertions, 0 deletions
diff --git a/helpers/__init__ b/helpers/__init__ new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/helpers/__init__ diff --git a/helpers/df_helpers.py b/helpers/df_helpers.py new file mode 100644 index 0000000..b241df5 --- /dev/null +++ b/helpers/df_helpers.py @@ -0,0 +1,71 @@ +import uuid +import pandas as pd +import numpy as np +from .prompts import extractConcepts +from .prompts import graphPrompt + + +def documents2Dataframe(documents) -> pd.DataFrame: + rows = [] + for chunk in documents: + row = { + "text": chunk.page_content, + **chunk.metadata, + "chunk_id": uuid.uuid4().hex, + } + rows = rows + [row] + + df = pd.DataFrame(rows) + return df + + +def df2ConceptsList(dataframe: pd.DataFrame) -> list: + # dataframe.reset_index(inplace=True) + results = dataframe.apply( + lambda row: extractConcepts( + row.text, {"chunk_id": row.chunk_id, "type": "concept"} + ), + axis=1, + ) + # invalid json results in NaN + results = results.dropna() + results = results.reset_index(drop=True) + + ## Flatten the list of lists to one single list of entities. + concept_list = np.concatenate(results).ravel().tolist() + return concept_list + + +def concepts2Df(concepts_list) -> pd.DataFrame: + ## Remove all NaN entities + concepts_dataframe = pd.DataFrame(concepts_list).replace(" ", np.nan) + concepts_dataframe = concepts_dataframe.dropna(subset=["entity"]) + concepts_dataframe["entity"] = concepts_dataframe["entity"].apply( + lambda x: x.lower() + ) + + return concepts_dataframe + + +def df2Graph(dataframe: pd.DataFrame, model=None) -> list: + # dataframe.reset_index(inplace=True) + results = dataframe.apply( + lambda row: graphPrompt(row.text, {"chunk_id": row.chunk_id}, model), axis=1 + ) + # invalid json results in NaN + results = results.dropna() + results = results.reset_index(drop=True) + + ## Flatten the list of lists to one single list of entities. + concept_list = np.concatenate(results).ravel().tolist() + return concept_list + + +def graph2Df(nodes_list) -> pd.DataFrame: + ## Remove all NaN entities + graph_dataframe = pd.DataFrame(nodes_list).replace(" ", np.nan) + graph_dataframe = graph_dataframe.dropna(subset=["node_1", "node_2"]) + graph_dataframe["node_1"] = graph_dataframe["node_1"].apply(lambda x: x.lower()) + graph_dataframe["node_2"] = graph_dataframe["node_2"].apply(lambda x: x.lower()) + + return graph_dataframe diff --git a/helpers/prompts.py b/helpers/prompts.py new file mode 100644 index 0000000..1c3801e --- /dev/null +++ b/helpers/prompts.py @@ -0,0 +1,74 @@ +import Ollama.client as client +import json +import sys +from yachalk import chalk +sys.path.append("..") + + +def extractConcepts(prompt: str, metadata={}, model="mistral-openorca:latest"): + SYS_PROMPT = ( + "Your task is extract the key concepts (and non personal entities) mentioned in the given context. " + "Extract only the most important and atomistic concepts, if needed break the concepts down to the simpler concepts." + "Categorize the concepts in one of the following categories: " + "[event, concept, place, object, document, organisation, condition, misc]\n" + "Format your output as a list of json with the following format:\n" + "[\n" + " {\n" + ' "entity": The Concept,\n' + ' "importance": The concontextual importance of the concept on a scale of 1 to 5 (5 being the highest),\n' + ' "category": The Type of Concept,\n' + " }, \n" + "{ }, \n" + "]\n" + ) + response, _ = client.generate( + model_name=model, system=SYS_PROMPT, prompt=prompt) + try: + result = json.loads(response) + result = [dict(item, **metadata) for item in result] + except: + print("\n\nERROR ### Here is the buggy response: ", response, "\n\n") + result = None + return result + + +def graphPrompt(input: str, metadata={}, model="mistral-openorca:latest"): + if model == None: + model = "mistral-openorca:latest" + + # model_info = client.show(model_name=model) + # print( chalk.blue(model_info)) + + SYS_PROMPT = ( + "You are a network graph maker who extracts terms and their relations from a given context. " + "You are provided with a context chunk (delimited by ```) Your task is to extract the ontology " + "of terms mentioned in the given context. These terms should represent the key concepts as per the context. \n" + "Thought 1: While traversing through each sentence, Think about the key terms mentioned in it.\n" + "\tTerms may include object, entity, location, organization, person, \n" + "\tcondition, acronym, documents, service, concept, etc.\n" + "\tTerms should be as atomistic as possible\n\n" + "Thought 2: Think about how these terms can have one on one relation with other terms.\n" + "\tTerms that are mentioned in the same sentence or the same paragraph are typically related to each other.\n" + "\tTerms can be related to many other terms\n\n" + "Thought 3: Find out the relation between each such related pair of terms. \n\n" + "Format your output as a list of json. Each element of the list contains a pair of terms" + "and the relation between them, like the follwing: \n" + "[\n" + " {\n" + ' "node_1": "A concept from extracted ontology",\n' + ' "node_2": "A related concept from extracted ontology",\n' + ' "edge": "relationship between the two concepts, node_1 and node_2 in one or two sentences"\n' + " }, {...}\n" + "]" + ) + + USER_PROMPT = f"context: ```{input}``` \n\n output: " + response, _ = client.generate( + model_name=model, system=SYS_PROMPT, prompt=USER_PROMPT) + try: + result = json.loads(response) + result = [dict(item, **metadata) for item in result] + except: + print("\n\nERROR ### Here is the buggy response: ", response, "\n\n") + result = None + return result |