import pandas as pd from openai import OpenAI import requests import json import re from pic_read import pic_see from config import config_get,key_get import os # 全局变量在这里改写: max_length = 20 max_pic_see = 1 prompt_pic = config_get("prompt_pic") prompt_lora = config_get("prompt_lora") prompt_chpic = config_get("prompt_chpic") pro_good = "solo,cat ears,black_hair,black_eye," pro_bad = config_get("pro_bad") groq_key = key_get("groq") siliflow_key = key_get("siliflow") def AI_chat(group_id ,word ,prompt): """ 猫猫人格,输入群号获取聊天记录,输入问题获取对话对象,返回字符串 """ # 加载聊天记录 folder_path = '群聊记录' file_path = os.path.join(folder_path, str(group_id) + '.csv') df = pd.read_csv(file_path) recent = str("") recent_list = [] for i in range(len(df)): if i >= len(df ) -max_length: recent_list.append(str(df.loc[i ,'user_name'])) recent_list.append(str(df.loc[i, 'message'])) pic_num = 0 list_num = len(recent_list) for i in reversed(recent_list): list_num = list_num -1 if pic_num >= max_pic_see: break if str(i).startswith("图片内容:"): url = i[5:] recent_list[list_num] = "发送了一张图片," + str(pic_see(url)).replace("\n", "") pic_num +=1 for i in range(len(recent_list)): if i% 2 == 0: recent += recent_list[i] + ":" else: recent += recent_list[i] + "\n" print("-------------recent---------------") print(recent) messages = [{'role': 'system', 'content': prompt + recent}] messages.append({'role': 'user', 'content': word}) try: ans = groq_chat(messages) except Exception as e: ans = siliflow_chat(messages) ans = ans.lstrip() ans = ans.lstrip("猫猫:") while True: if ans.endswith("\n") or ans.endswith(" "): ans = ans[:-1] else: break return ans def groq_chat(messages, model = "deepseek-r1-distill-llama-70b"): """ 主对话通道,https://api.gxx12138.space/groq/v1 """ client = OpenAI( base_url="https://api.gxx12138.space/groq/v1", api_key=groq_key ) completion = client.chat.completions.create( model=model, stream=True, messages=messages ) ans = "" for chunk in completion: # print(chunk) if chunk.choices[0].delta.content: ans += chunk.choices[0].delta.content start_tag = "" end_tag = "" text = ans while start_tag in text and end_tag in text: start_idx = text.find(start_tag) end_idx = text.find(end_tag) + len(end_tag) text = text[:start_idx] + text[end_idx:] # 清理多余的空白行 cleaned_text = "\n".join([line for line in text.split("\n") if line.strip()]) return cleaned_text.strip() def siliflow_chat(messages, model = "THUDM/glm-4-9b-chat"): """ 备用对话通道,https://api.siliconflow.cn/v1' """ client = OpenAI( base_url='https://api.siliconflow.cn/v1', api_key=siliflow_key ) try: print("硅基模型正常运作") # 发送AI请求 # THUDM/glm-4-9b-chat google/gemma-2-9b-it response = client.chat.completions.create( model=model, messages=messages, stream=False, temperature=0.8, ) ans = response.choices[0].message.content except Exception as e: print("硅基模型爆了") ans = "" return ans def AI_get_picprompt(group_id): """ 群聊id自动获取群聊记录来获取绘画提示词 """ global pro_good # 加载聊天记录 folder_path = '群聊记录' file_path = os.path.join(folder_path, str(group_id) + '.csv') df = pd.read_csv(file_path) recent = "" for i in range(len(df) - 4, len(df)): recent += str(df.loc[i, 'user_name']) + ":" + str(df.loc[i, 'message']) + "\n" messages = [{'role': 'system', 'content': prompt_pic}] messages.append({'role': 'user', 'content': recent}) ans = groq_chat(messages) return pro_good + ans.strip().replace("\n", "") + "," def AI_sendphoto_ornot(event): """ AI思考链,判断是否应该发送图片 """ messages = [{'role': 'system', 'content': prompt_chpic}] messages.append({'role': 'user', 'content': event}) client = OpenAI( base_url='https://api.siliconflow.cn/v1', api_key=siliflow_key ) # 发送AI请求 response = client.chat.completions.create( model="THUDM/glm-4-9b-chat", messages=messages, stream=False, temperature=0.7, ) ans = response.choices[0].message.content ans = ans.lstrip() print(f"图片发送决定:{ans}") return ans def AI_lora_getpic_prompt(word): """ 群聊id自动获取群聊记录来获取绘画提示词 版本2 """ messages = [{'role': 'system', 'content': prompt_lora}] messages.append({'role': 'user', 'content': word}) ans = siliflow_chat(messages).replace(", ", ",") return ans.strip().replace("\n", "") + "," def detect_tool(message): """ 新方法,编写中 """ model = "llama-3.3-70b-versatile"