Advertisement
xosski

Ai-copilot with GUI

Jan 4th, 2025
8
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 10.86 KB | None | 0 0
  1. from flask import Flask, request, jsonify, send_from_directory, render_template
  2. import requests
  3. import json
  4. import subprocess
  5. import os
  6. import threading
  7. import re
  8. from tkinter import *
  9. from tkinter import ttk
  10. import tkinter.scrolledtext as scrolledtext
  11.  
  12. app = Flask(__name__)
  13.  
  14. # Dossier pour stocker les fichiers générés
  15. GENERATED_FILES_DIR = "generated_files"
  16. os.makedirs(GENERATED_FILES_DIR, exist_ok=True)
  17.  
  18. # URLs et clés des APIs
  19. OLLAMA_API_URL = "http://localhost:11434/api/chat"
  20. MISTRAL_API_URL = "https://api.mistral.ai/v1/chat/completions"
  21. MISTRAL_API_KEY = "JCASwm1qYdbfSCd7ilpEgUU0oRv7LcbG"
  22.  
  23. # Modèles disponibles
  24. OLLAMA_MODELS = []
  25. MISTRAL_MODELS = [
  26. "pixtral-large-latest",
  27. "pixtral-medium-latest",
  28. "pixtral-small-latest",
  29. "mistral-large-latest",
  30. "ministral-3b-latest",
  31. "ministral-8b-latest",
  32. "mistral-small-latest",
  33. "codestral-latest",
  34. "mistral-medium",
  35. "mistral-embed",
  36. "mistral-moderation-latest",
  37. "pixtral-12b-2409",
  38. "open-mistral-nemo",
  39. "open-codestral-mamba",
  40. "open-mistral-7b",
  41. "open-mixtral-8x7b",
  42. "open-mixtral-8x22b",
  43.  
  44. ]
  45.  
  46. # Stocker l'historique et la mémoire contextuelle
  47. conversation_history = []
  48. contextual_memory = []
  49.  
  50. # Route pour servir la page HTML
  51. @app.route('/')
  52. def serve_index():
  53. return render_template("index.html")
  54.  
  55. # Route pour interagir avec les APIs Ollama et Mistral
  56. @app.route('/ask', methods=['POST'])
  57. def ask_ai():
  58. global conversation_history, contextual_memory
  59.  
  60. user_message = request.json.get("message", "")
  61. selected_model = request.json.get("model", "llama3.2") # Par défaut Ollama
  62.  
  63. # Ajouter le message utilisateur à l'historique
  64. conversation_history.append({"role": "user", "content": user_message})
  65.  
  66. # Vérifier si le modèle appartient à Ollama ou Mistral
  67. if selected_model in OLLAMA_MODELS:
  68. ai_response, memory_update = query_ollama(selected_model)
  69. elif selected_model in MISTRAL_MODELS:
  70. ai_response, memory_update = query_mistral(selected_model, user_message)
  71. else:
  72. return jsonify({"error": "Modèle non reconnu"}), 400
  73.  
  74. # Ajouter la réponse de l'IA à l'historique
  75. conversation_history.append({"role": "assistant", "content": ai_response})
  76.  
  77. # Si l'IA identifie des informations à mémoriser, les stocker
  78. if memory_update:
  79. contextual_memory.append(memory_update)
  80.  
  81. # Gérer la création de fichiers si du code est généré
  82. create_files_from_response(ai_response)
  83.  
  84. return jsonify({"response": ai_response})
  85.  
  86. # Route pour lister les modèles disponibles
  87. @app.route('/models', methods=['GET'])
  88. def list_models():
  89. global OLLAMA_MODELS
  90. try:
  91. # Récupérer les modèles locaux d'Ollama
  92. result = subprocess.run(['ollama', 'list'], capture_output=True, text=True)
  93. if result.returncode == 0:
  94. OLLAMA_MODELS = [line.split()[0] for line in result.stdout.splitlines() if line]
  95. except Exception as e:
  96. OLLAMA_MODELS = []
  97.  
  98. # Fusionner les modèles Ollama et Mistral
  99. models = {"ollama": OLLAMA_MODELS, "mistral": MISTRAL_MODELS}
  100. return jsonify(models)
  101.  
  102. # Route pour explorer les fichiers générés
  103. @app.route('/explore', methods=['GET'])
  104. def explore_files():
  105. files_structure = {}
  106. for root, dirs, files in os.walk(GENERATED_FILES_DIR):
  107. path = root.split(os.sep)
  108. current_dir = files_structure
  109. for folder in path[1:]:
  110. current_dir = current_dir.setdefault(folder, {})
  111. for file in files:
  112. current_dir[file] = os.path.join(root, file)
  113. return jsonify(files_structure)
  114.  
  115. # Route pour lire un fichier
  116. @app.route('/file', methods=['GET'])
  117. def read_file():
  118. filepath = request.args.get("path")
  119. if not filepath or not os.path.exists(filepath):
  120. return jsonify({"error": "Fichier introuvable"}), 404
  121. with open(filepath, "r", encoding="utf-8") as f:
  122. return jsonify({"content": f.read()})
  123.  
  124. # Route pour modifier un fichier
  125. @app.route('/file', methods=['POST'])
  126. def edit_file():
  127. data = request.json
  128. filepath = data.get("path")
  129. content = data.get("content")
  130. if not filepath or not os.path.exists(filepath):
  131. return jsonify({"error": "Fichier introuvable"}), 404
  132. with open(filepath, "w", encoding="utf-8") as f:
  133. f.write(content)
  134. return jsonify({"message": "Fichier mis à jour avec succès"})
  135.  
  136. # Fonction pour interroger l'API Ollama
  137. def query_ollama(model):
  138. global contextual_memory
  139.  
  140. full_context = contextual_memory + conversation_history
  141. payload = {
  142. "model": model,
  143. "messages": full_context,
  144. "system_prompt": (
  145. "Tu es une IA conversationnelle spécialisée dans l'aide à la programmation. "
  146. "Respecte les consignes et génère des fichiers si nécessaire."
  147. ),
  148. }
  149.  
  150. try:
  151. response = requests.post(OLLAMA_API_URL, json=payload, stream=True)
  152. if response.status_code != 200:
  153. return f"Erreur de l'API Ollama : {response.status_code}", None
  154.  
  155. full_response = ""
  156. memory_update = None
  157.  
  158. for line in response.iter_lines():
  159. if line:
  160. json_part = json.loads(line.decode('utf-8'))
  161. if "memory_update" in json_part:
  162. memory_update = json_part["memory_update"]
  163. full_response += json_part.get("message", {}).get("content", "")
  164.  
  165. return full_response or "Pas de réponse reçue.", memory_update
  166. except Exception as e:
  167. return f"Erreur : {str(e)}", None
  168.  
  169. # Fonction pour interroger l'API Mistral
  170. def query_mistral(model, user_message):
  171. headers = {"Authorization": f"Bearer {MISTRAL_API_KEY}", "Content-Type": "application/json"}
  172. payload = {
  173. "model": model,
  174. "messages": [{"role": "user", "content": user_message}],
  175. }
  176.  
  177. try:
  178. response = requests.post(MISTRAL_API_URL, json=payload, headers=headers)
  179. if response.status_code == 200:
  180. json_response = response.json()
  181. ai_response = json_response.get("choices", [{}])[0].get("message", {}).get("content", "")
  182. return ai_response, None
  183. else:
  184. return f"Erreur de l'API Mistral : {response.status_code}", None
  185. except Exception as e:
  186. return f"Erreur : {str(e)}", None
  187.  
  188. # Fonction pour créer des fichiers à partir d'une réponse
  189. def create_files_from_response(response):
  190. code_blocks = re.findall(r"(?:\/\/\s*file:\s*([\w.\-]+))?\s*(\w+)?\n(.*?)", response, re.DOTALL)
  191. for i, (filename_hint, language, code) in enumerate(code_blocks):
  192. extension = {
  193. "html": "html",
  194. "css": "css",
  195. "js": "js",
  196. "python": "py",
  197. "java": "java",
  198. "php": "php",
  199. "json": "json",
  200. "xml": "xml",
  201. "txt": "txt",
  202. }.get(language.lower() if language else "txt", "txt")
  203.  
  204. filename = filename_hint or f"file_{i + 1}.{extension}"
  205. filepath = os.path.join(GENERATED_FILES_DIR, filename)
  206. with open(filepath, "w", encoding="utf-8") as f:
  207. f.write(code.strip())
  208. print(f"Fichier créé : {filepath}")
  209.  
  210. class ChatGUI:
  211. def __init__(self, root):
  212. self.root = root
  213. self.root.title("✨ AI Assistant Chat ✨")
  214. self.root.geometry("1000x800")
  215. self.root.configure(bg='#1a237e')
  216.  
  217. style = ttk.Style()
  218. style.configure('Custom.TFrame', background='#1a237e')
  219. style.configure('Custom.TButton', padding=10, font=('Helvetica', 10, 'bold'))
  220.  
  221. # Main frame
  222. main_frame = ttk.Frame(root, padding="20", style='Custom.TFrame')
  223. main_frame.pack(fill=BOTH, expand=True)
  224.  
  225. # Model selection
  226. self.model_var = StringVar(value="mistral-small-latest")
  227. model_frame = ttk.Frame(main_frame)
  228. model_frame.pack(fill=X, pady=(0, 10))
  229.  
  230. ttk.Label(model_frame, text="Select AI Model:", font=('Helvetica', 10, 'bold')).pack(side=LEFT)
  231. model_menu = ttk.Combobox(model_frame, textvariable=self.model_var, width=30)
  232. model_menu['values'] = MISTRAL_MODELS + OLLAMA_MODELS
  233. model_menu.pack(side=LEFT, padx=(5, 0))
  234.  
  235. # Chat display with custom styling
  236. self.chat_display = scrolledtext.ScrolledText(
  237. main_frame,
  238. wrap=WORD,
  239. height=25,
  240. font=('Helvetica', 11),
  241. bg='#f5f5f5',
  242. padx=10,
  243. pady=10
  244. )
  245. self.chat_display.pack(fill=BOTH, expand=True, pady=(0, 10))
  246.  
  247. # Input area with status
  248. self.status_var = StringVar(value="Ready to chat...")
  249. ttk.Label(main_frame, textvariable=self.status_var).pack(fill=X)
  250.  
  251. input_frame = ttk.Frame(main_frame)
  252. input_frame.pack(fill=X, pady=(5, 0))
  253.  
  254. self.message_input = ttk.Entry(input_frame, font=('Helvetica', 11))
  255. self.message_input.pack(side=LEFT, fill=X, expand=True)
  256.  
  257. send_button = ttk.Button(
  258. input_frame,
  259. text="Send Message",
  260. command=self.send_message,
  261. style='Custom.TButton'
  262. )
  263. send_button.pack(side=LEFT, padx=(5, 0))
  264.  
  265. # Bind Enter key
  266. self.message_input.bind("<Return>", lambda e: self.send_message())
  267.  
  268. def send_message(self):
  269. message = self.message_input.get()
  270. if message:
  271. self.chat_display.insert(END, f"\n🤔 You: {message}\n", 'user')
  272. self.chat_display.tag_configure('user', foreground='#2196F3')
  273.  
  274. self.message_input.delete(0, END)
  275. self.status_var.set("AI is thinking...")
  276. self.root.update()
  277.  
  278. # Run AI query in separate thread
  279. threading.Thread(target=self.get_ai_response, args=(message,), daemon=True).start()
  280.  
  281. def get_ai_response(self, message):
  282. selected_model = self.model_var.get()
  283.  
  284. try:
  285. if selected_model in MISTRAL_MODELS:
  286. response, _ = query_mistral(selected_model, message)
  287. else:
  288. response, _ = query_ollama(selected_model)
  289.  
  290. self.chat_display.insert(END, f"\n🤖 AI: {response}\n", 'ai')
  291. self.chat_display.tag_configure('ai', foreground='#4CAF50')
  292.  
  293. except Exception as e:
  294. self.chat_display.insert(END, f"\n⚠️ Error: {str(e)}\n", 'error')
  295. self.chat_display.tag_configure('error', foreground='#f44336')
  296.  
  297. self.status_var.set("Ready to chat...")
  298. self.chat_display.see(END)
  299. self.root.update()
  300.  
  301. def main():
  302. root = Tk()
  303. app = ChatGUI(root)
  304. root.mainloop()
  305.  
  306. if __name__ == "__main__":
  307. main()
  308.  
  309.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement