Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #
- # Developer Bastian © 2024
- # License Creative Commons DEED 4.0 (https://creativecommons.org/licenses/by-sa/4.0/deed.en)
- # Tutorial video at
- #
- # This generates item descriptions via a large language model (llm)
- # It loads a list of items and a prompt from files, then processes each item to generate text descriptions.
- # The descriptions are modified in content and text mood throught the use of 3 content arrays.
- # The resulting description is written back to a result text file.
- # The llm API is used through a local server based on LM Studio (https://lmstudio.ai/)
- # Import necessary modules
- import random
- import re
- import time
- from openai import OpenAI # install via 'pip install openai'
- # Define constants
- SOURCE_FILE = "D:\\llm\\items.txt" # example for minerals at https://pastebin.com/nfxP9TQJ
- TARGET_FILE = "D:\\llm\\results.txt" # result text file will be created if not exists
- PROMPT_FILE = "D:\\llm\\prompt.txt" # example at https://pastebin.com/EC0cVFz9
- # Define search-replace dictionaries and vars
- item_replace_string = "X_ITEM_X"
- MOOD_ARRAY = {
- 'X_MOOD_X': [
- 'a dry encyclopedia',
- 'a writing on a wall of a medieval public toilet',
- 'a handwritten crumbled note from an apprentice',
- 'the ultimate tome of wisdom from a master',
- 'a drunken archeologist',
- 'a poetry',
- 'the language of a detail obsessed nerd',
- 'a wall writing in an ancient crypt',
- 'a fairy tale',
- 'a folk song',
- 'a professional with decades of experiences',
- 'a theologic text',
- 'a alchemist laboratory handbook',
- 'a tired village school teacher',
- 'a steampunk technical manual',
- 'a wise dragon teaching its offspring',
- 'an old smith with decades of experience'
- ]
- }
- SOURCING_ARRAY = {
- 'X_SOURCE_X': [
- 'mining',
- 'harvesting plants',
- 'harvesting animal and monster parts',
- 'collecting from the sea',
- 'monster cores',
- 'collecting from other planes of existance',
- 'alchemy processes',
- 'enchanting processes',
- 'medieval scientific processes',
- 'steampunk creation processes',
- 'creation in very special circumstances'
- ]
- }
- FIELD_ARRAY = {
- 'X_FIELD_X': [
- 'nature medicine',
- 'magical curses',
- 'weapon improvements',
- 'armor improvements',
- 'magical elexirs',
- 'everydays usage',
- 'medical purpose',
- 'hunting and trapping',
- 'riding and pets',
- 'exploration',
- 'traveling and surviving',
- 'unarmed fighting',
- 'thieving',
- 'poisoning',
- 'wearables and juwellery',
- 'farseeing and portal travel',
- 'alchemy',
- 'magical research',
- 'steampunk'
- ]
- }
- # Define functions
- def query_llm(prompt):
- # Query the LLM model API and return the message from the first choice.
- try:
- client = OpenAI(base_url="http://localhost:1234/v1", api_key="lm-studio")
- completion = client.chat.completions.create(
- model="TheBloke/Llama-2-7B-Chat-GGUF/llama-2-7b-chat.Q8_0.gguf",
- messages=[{"role": "user", "content": prompt}],
- temperature=0.7,
- )
- return completion.choices[0].message
- except Exception as e:
- print(f"Error when querying the LLM model API: {e}")
- def append_or_create_file(file_name, line_to_add):
- # Append a line to a file, creating the file if it doesn't exist.
- try:
- with open(file_name, 'a+') as file:
- file.write(line_to_add + '\n')
- except Exception as e:
- print(f"An error occurred when adding a line to file: {e}")
- def adjust_prompt(prompt, moods, fields, sources):
- # select mood
- for key, values in moods.items():
- random_mood = random.choice(values)
- mood_key = key
- # select field
- for key, values in fields.items():
- random_field = random.choice(values)
- field_key = key
- # select source
- for key, values in sources.items():
- random_source = random.choice(values)
- source_key = key
- #replace the keys with the string selected
- prompt = prompt.replace(mood_key, random_mood)
- prompt = prompt.replace(field_key, random_field)
- prompt = prompt.replace(source_key, random_source)
- print(f"Chosen parameters:\r\n\t{mood_key}\t=\t'{random_mood}'\r\n\t{field_key}\t=\t'{random_field}'\r\n\t{source_key}\t=\t'{random_source}'")
- return prompt
- # read file (sourcepath, shouldBeOneLine)
- def get_file_content(source_path, should_be_one_line):
- # Read the content of a file into memory.
- try:
- with open(source_path, 'r') as file:
- if (should_be_one_line):
- file_content = file.read()
- return file_content.replace("\n", " ")
- else:
- file_content = file.readlines()
- return file_content
- except FileNotFoundError:
- print("The file was not found. Please check the file path.")
- except Exception as e:
- print(f"An error occurred: {e}")
- # Main script
- def main():
- # get prompt and items
- prompt = get_file_content(PROMPT_FILE, True)
- items = get_file_content(SOURCE_FILE, False)
- # prepare counters
- total = len(items)
- current = 0
- # process over all items
- for item in items:
- # clean the item from line breaks
- item = re.sub(r'[\r\n]+', '', item)
- # prepare counter and info
- start_time = time.time()
- current += 1
- print(f"Processing {current}/{total}: '{item}'")
- # modify the prompt with random selections
- modified_prompt = adjust_prompt(prompt, MOOD_ARRAY, FIELD_ARRAY, SOURCING_ARRAY)
- modified_prompt = modified_prompt.replace(item_replace_string, item)
- # query the llm
- result = query_llm(modified_prompt)
- # append to result file
- # replace any line break with a '|' to not break the CSV (but rework to line break in later usage)
- append_or_create_file(TARGET_FILE, result.content.replace("\n", "|"))
- # report result
- elapsed_time = time.time() - start_time
- print(f"'{item}' text generation took {elapsed_time:.1f} seconds:\r\n{result.content}\r\n")
- # Starting main()
- if __name__ == "__main__":
- main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement