Advertisement
Developer_Bastian

Generate game text content with a local llm using LM Studio and Python

Apr 11th, 2024 (edited)
162
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 6.40 KB | Gaming | 0 0
  1. #
  2. # Developer Bastian © 2024
  3. # License Creative Commons DEED 4.0 (https://creativecommons.org/licenses/by-sa/4.0/deed.en)
  4. # Tutorial video at
  5. #
  6. # This generates item descriptions via a large language model (llm)
  7. # It loads a list of items and a prompt from files, then processes each item to generate text descriptions.
  8. # The descriptions are modified in content and text mood throught the use of 3 content arrays.
  9. # The resulting description is written back to a result text file.
  10. # The llm API is used through a local server based on LM Studio (https://lmstudio.ai/)
  11.  
  12. # Import necessary modules
  13. import random
  14. import re
  15. import time
  16. from openai import OpenAI               # install via 'pip install openai'
  17.  
  18. # Define constants
  19. SOURCE_FILE = "D:\\llm\\items.txt"      # example for minerals at https://pastebin.com/nfxP9TQJ
  20. TARGET_FILE = "D:\\llm\\results.txt"    # result text file will be created if not exists
  21. PROMPT_FILE = "D:\\llm\\prompt.txt"     # example at https://pastebin.com/EC0cVFz9
  22.  
  23. # Define search-replace dictionaries and vars
  24. item_replace_string = "X_ITEM_X"
  25.  
  26. MOOD_ARRAY = {
  27.     'X_MOOD_X': [
  28.         'a dry encyclopedia',
  29.         'a writing on a wall of a medieval public toilet',
  30.         'a handwritten crumbled note from an apprentice',
  31.         'the ultimate tome of wisdom from a master',
  32.         'a drunken archeologist',
  33.         'a poetry',
  34.         'the language of a detail obsessed nerd',
  35.         'a wall writing in an ancient crypt',
  36.         'a fairy tale',
  37.         'a folk song',
  38.         'a professional with decades of experiences',
  39.         'a theologic text',
  40.         'a alchemist laboratory handbook',
  41.         'a tired village school teacher',
  42.         'a steampunk technical manual',
  43.         'a wise dragon teaching its offspring',
  44.         'an old smith with decades of experience'
  45.     ]
  46. }
  47.  
  48. SOURCING_ARRAY = {
  49.     'X_SOURCE_X': [
  50.         'mining',
  51.         'harvesting plants',
  52.         'harvesting animal and monster parts',
  53.         'collecting from the sea',
  54.         'monster cores',
  55.         'collecting from other planes of existance',
  56.         'alchemy processes',
  57.         'enchanting processes',
  58.         'medieval scientific processes',
  59.         'steampunk creation processes',
  60.         'creation in very special circumstances'
  61.     ]
  62. }
  63.  
  64. FIELD_ARRAY = {
  65.     'X_FIELD_X': [
  66.         'nature medicine',
  67.         'magical curses',
  68.         'weapon improvements',
  69.         'armor improvements',
  70.         'magical elexirs',
  71.         'everydays usage',
  72.         'medical purpose',
  73.         'hunting and trapping',
  74.         'riding and pets',
  75.         'exploration',
  76.         'traveling and surviving',
  77.         'unarmed fighting',
  78.         'thieving',
  79.         'poisoning',
  80.         'wearables and juwellery',
  81.         'farseeing and portal travel',
  82.         'alchemy',
  83.         'magical research',
  84.         'steampunk'
  85.     ]
  86. }
  87.  
  88. # Define functions
  89. def query_llm(prompt):
  90.     # Query the LLM model API and return the message from the first choice.
  91.     try:
  92.         client = OpenAI(base_url="http://localhost:1234/v1", api_key="lm-studio")
  93.         completion = client.chat.completions.create(
  94.             model="TheBloke/Llama-2-7B-Chat-GGUF/llama-2-7b-chat.Q8_0.gguf",
  95.             messages=[{"role": "user",  "content": prompt}],
  96.             temperature=0.7,
  97.         )
  98.         return completion.choices[0].message
  99.     except Exception as e:
  100.         print(f"Error when querying the LLM model API: {e}")
  101.  
  102.  
  103. def append_or_create_file(file_name, line_to_add):
  104.     # Append a line to a file, creating the file if it doesn't exist.
  105.     try:
  106.         with open(file_name, 'a+') as file:
  107.             file.write(line_to_add + '\n')
  108.     except Exception as e:
  109.         print(f"An error occurred when adding a line to file: {e}")
  110.  
  111.  
  112. def adjust_prompt(prompt, moods, fields, sources):
  113.     # select mood
  114.     for key, values in moods.items():
  115.         random_mood = random.choice(values)
  116.         mood_key = key
  117.     # select field
  118.     for key, values in fields.items():
  119.         random_field = random.choice(values)
  120.         field_key = key
  121.     # select source
  122.     for key, values in sources.items():
  123.         random_source = random.choice(values)
  124.         source_key = key
  125.     #replace the keys with the string selected
  126.     prompt = prompt.replace(mood_key, random_mood)
  127.     prompt = prompt.replace(field_key, random_field)
  128.     prompt = prompt.replace(source_key, random_source)
  129.     print(f"Chosen parameters:\r\n\t{mood_key}\t=\t'{random_mood}'\r\n\t{field_key}\t=\t'{random_field}'\r\n\t{source_key}\t=\t'{random_source}'")
  130.     return prompt
  131.  
  132.  
  133. # read file (sourcepath, shouldBeOneLine)
  134. def get_file_content(source_path, should_be_one_line):
  135.     # Read the content of a file into memory.
  136.     try:
  137.         with open(source_path, 'r') as file:
  138.             if (should_be_one_line):
  139.                 file_content = file.read()
  140.                 return file_content.replace("\n", " ")
  141.             else:
  142.                 file_content =  file.readlines()
  143.                 return file_content
  144.     except FileNotFoundError:
  145.         print("The file was not found. Please check the file path.")
  146.     except Exception as e:
  147.         print(f"An error occurred: {e}")
  148.  
  149.  
  150. # Main script
  151. def main():
  152.     # get prompt and items
  153.     prompt = get_file_content(PROMPT_FILE, True)
  154.     items = get_file_content(SOURCE_FILE, False)
  155.     # prepare counters
  156.     total = len(items)
  157.     current = 0
  158.     # process over all items
  159.     for item in items:
  160.         # clean the item from line breaks
  161.         item = re.sub(r'[\r\n]+', '', item)
  162.         # prepare counter and info
  163.         start_time = time.time()
  164.         current += 1
  165.         print(f"Processing {current}/{total}: '{item}'")
  166.         # modify the prompt with random selections
  167.         modified_prompt = adjust_prompt(prompt, MOOD_ARRAY, FIELD_ARRAY, SOURCING_ARRAY)
  168.         modified_prompt = modified_prompt.replace(item_replace_string, item)
  169.         # query the llm
  170.         result = query_llm(modified_prompt)
  171.         # append to result file
  172.         # replace any line break with a '|' to not break the CSV (but rework to line break in later usage)
  173.         append_or_create_file(TARGET_FILE, result.content.replace("\n", "|"))
  174.         # report result
  175.         elapsed_time = time.time() - start_time
  176.         print(f"'{item}' text generation took {elapsed_time:.1f} seconds:\r\n{result.content}\r\n")
  177.        
  178.  
  179. # Starting main()
  180. if __name__ == "__main__":
  181.     main()
  182.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement