Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # modified viz.py (run after each grid size simulation)
- import os
- import glob
- import argparse
- import pandas as pd
- parser = argparse.ArgumentParser(description="Visualize the results of a case")
- parser.add_argument('case_dir', type=str, help="Path to the case directory")
- parser.add_argument('fps', type=int, help="Frames per second for the video")
- ARGS = vars(parser.parse_args())
- DIRPATH = os.path.abspath(ARGS['case_dir'])
- CASENAME = os.path.basename(DIRPATH)
- TARGET_ASPECT_RATIO = 16 / 9
- PLOT_PADDING = 0.04
- PLOT_DIMS = (15, 10)
- VARS = {
- 'Density': ['prim.1', ['Density'] , '#ed4337'],
- 'Velocity': ['prim.2', ['Velocity'], '#3561f2'],
- 'Pressure': ['prim.3', ['Pressure'], '#28ed87'],
- }
- DATA = {}
- print(f"Importing data from {os.path.relpath(DIRPATH, os.getcwd())}...")
- Nx = 399 # ran after each grid size computation with sed
- # parse data
- for name, cfg in VARS.items():
- print(f"* {name} ({cfg[0]}):")
- print(f" * Reading data files...")
- dfs = {}
- for f in glob.glob(os.path.join(DIRPATH, f'D_{Nx}', f'{cfg[0]}.*.*.dat')):
- proc, t_step = int(f.split('.')[-3]), int(f.split('.')[-2])
- if t_step != dfs:
- dfs[t_step] = []
- dfs[t_step].append(pd.read_csv(f, sep='\s+', header=None, names=['x'] + cfg[1]))
- print(f" * Concatenating processors...")
- for t_step in dfs.keys():
- dfs[t_step] = pd.concat(dfs[t_step])
- print(f" * Merging across timesteps...")
- for t_step, df in dfs.items():
- if t_step not in DATA:
- DATA[t_step] = df
- else:
- DATA[t_step] = pd.merge(DATA[t_step], df)
- # remove empty timesteps and get the data at the two desired timesteps
- t_steps = list(DATA.keys())
- t_steps.sort()
- for t_step in t_steps:
- DATUM = DATA[t_step]
- if DATUM.empty:
- del DATA[t_step]
- n = 1
- t_step_0 = t_steps[0]
- t_step_1 = t_steps[n]
- # sort data by x and y positions before subtracting
- DATA_0 = DATA[t_step_0].drop("Velocity", axis=1).drop("Pressure", axis=1)#.reset_index().sort_index()
- DATA_1 = DATA[t_step_1].drop("Velocity", axis=1).drop("Pressure", axis=1)#.reset_index().sort_index()
- DATA_0.sort_values(["x"])
- DATA_1.sort_values(["x"])
- # calculating error vectors and take sum (L1) and max (Linf) and save to a file - these are modified properly later
- diff = DATA[t_step_1][["Density"]].subtract(DATA[t_step_0][["Density"]]).abs()
- print(f"Norms from t_step {0} to t_step {n}: {diff.sum().to_string(index=False)}, {diff.max().to_string(index=False)}")
- with open("./data.txt", "a+") as data_file:
- data_file.write(f"{Nx},{diff.sum().to_string(index=False)},{diff.max().to_string(index=False)}\n")
- # norms.py (runs after all grid sizes have been simulated and basic norms calculated
- import numpy as np
- from scipy.stats import linregress
- import matplotlib.pyplot as plt
- # read data and correct L1 norm by multiplying by h = L/grid_size = 10/grid_size for each grid size
- with open("./data.txt", "r") as data_file:
- data = [line.strip("\n\r").split(",") for line in data_file.readlines()]
- Nx = np.array([float(line[0])+1 for line in data])
- h = np.array([10/(grid_size) for grid_size in Nx])
- L1 = np.array([float(line[1]) for line in data]) * h
- Linf = np.array([float(line[2]) for line in data])
- # we plot 1/N instead of just N
- recipN = np.array([float(x) for x in Nx]) ** -1
- # get slope data
- print(linregress(np.log10(recipN), np.log10(L1)))
- print(linregress(np.log10(recipN), np.log10(Linf)))
- # plot w/ log scale
- fig = plt.figure()
- ax = fig.add_subplot(2, 1, 1)
- ax.set_xscale("log")
- ax.set_yscale("log")
- plt.scatter(recipN, L1, label="L1")
- plt.scatter(recipN, Linf, label="Linf")
- plt.xlabel("N^-1")
- plt.ylabel("norm(alpha_rho1)")
- plt.legend()
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement