| | |
| | | |
| | | from knowledgebase import utils |
| | | from knowledgebase.doc.entity_helper import entity_helper |
| | | from knowledgebase.log import Log |
| | | |
| | | llm = ChatOpenAI(temperature=0, |
| | | model="qwen2.5-72b-instruct", |
| | |
| | | |
| | | 使用langchain构建实体抽取流程。 |
| | | """ |
| | | use_cache = False |
| | | cache_file = "entity_recognition.cache" |
| | | |
| | | def __init__(self, doc_type: str): |
| | | # 实体词列表 |
| | | entities = filter(lambda x: x.doc_type == doc_type, entity_helper.entities) |
| | | entity_list = ';\n'.join([f'- {entity.name}:{entity.prompts}' for entity in entities]) + "。" |
| | | msg = HumanMessagePromptTemplate.from_template(template=""" |
| | | entities = list(filter(lambda x: x.doc_type == doc_type, entity_helper.entities)) |
| | | entity_list = ','.join([entity.name for entity in entities]) + "。" |
| | | entity_rules = ";\n".join([f"- {entity.name}:{entity.prompts}" for entity in entities]) + "。" |
| | | tpl = """ |
| | | # 指令 |
| | | 请从给定的文本中提取实体词列表,实体词列表定义如下: |
| | | ## 实体词列表及识别规则 |
| | | 请根据实体词判断规则从给定的文本中判断是否有下列实体词相关内容,如果有则输出相关的实体词,没有则不输出,实体词列表定义如下: |
| | | """ + entity_list + """ |
| | | ## 实体词判断规则: |
| | | """ + entity_rules + """ |
| | | # 约束 |
| | | - 输出格式为JSON格式; |
| | | - 提取的实体词必须是上面列举的实体词; |
| | | - 提取的实体词必须是:""" + entity_list + """; |
| | | - 如果没有复合上述规则的实体词则不要输出任何实体词; |
| | | - 输出数据结构为字符串数组。 |
| | | # 示例 |
| | | ```json |
| | | ["遥控帧格式","遥控包格式"] |
| | | [\"""" + entities[0].name + """\"] |
| | | ``` |
| | | |
| | | # 文本如下: |
| | | {text} |
| | | """ |
| | | ) |
| | | Log.info(tpl) |
| | | msg = HumanMessagePromptTemplate.from_template(template=tpl) |
| | | prompt = ChatPromptTemplate.from_messages([msg]) |
| | | parser = JsonOutputParser(pydantic_object=list[str]) |
| | | self.chain = prompt | llm | parser |
| | |
| | | """ |
| | | # 缓存命中 |
| | | text_md5 = utils.generate_text_md5(in_text) |
| | | if text_md5 in self.cache: |
| | | if self.use_cache and text_md5 in self.cache: |
| | | return self.cache[text_md5] |
| | | result = self.chain.invoke({"text": in_text}) |
| | | self.cache[text_md5] = result |