Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import yt
- from yt.config import ytcfg
- import ytree
- import numpy as np
- from bisect import bisect_left
- from calculations import get_star_formation_times
- ####################################################################################################
- yt.enable_parallelism()
- nproc = ytcfg.get("yt", "internals", "topcomm_parallel_size")
- ####################################################################################################
- def neighbors(myList, myNumber):
- pos = bisect_left(myList, myNumber)
- if pos == 0:
- return myList[0]
- if pos == len(myList):
- return myList[-1]
- before = myList[pos - 1]
- after = myList[pos]
- return before, after
- ####################################################################################################
- zcat = dict(z=[], fn=[])
- with open('/storage/home/hcoda1/0/jw254/data/SG64-2020/redshifts.dat', 'r') as fp:
- for line in fp:
- zcat['z'].append(float(line.split("=")[-1]))
- zcat['fn'].append(line.split(":")[0])
- zcat['z'] = np.array(zcat['z'])
- def get_filename_from_redshift(z):
- i = np.argmin(np.abs(z - zcat['z']))
- return zcat['fn'][i]
- def get_yt_dataset(node):
- filename = get_filename_from_redshift(node["redshift"])
- node.ds = yt.load("/storage/home/hcoda1/0/jw254/data/SG64-2020/"+str(filename))
- def get_yt_sphere(node):
- ds = node.ds
- center = node["position"].to("unitary").v
- radius = node["virial_radius"].to("unitary").to_value()
- node.sphere = ds.sphere((center), (radius, "unitary"))
- def delete_attributes(node, attributes):
- for attr in attributes:
- if hasattr(node, attr):
- delattr(node, attr)
- def delete_dataset(node):
- if hasattr(node, "ds"):
- node.ds.index.clear_all_data()
- # try:
- # del node.ds.index.grid_dimensions
- # del node.ds.index.grid_left_edge
- # del node.ds.index.grid_right_edge
- # del node.ds.index.grid_levels
- # del node.ds.index.grid_particle_count
- # del node.ds.index.grids
- # except:
- # print("Could not delete some attribute(s)")
- ####################################################################################################
- @yt.particle_filter(requires=["particle_type"], filtered_type="all")
- def stars(pfilter, data):
- return data[(pfilter.filtered_type, "particle_type")] == 2
- @yt.particle_filter(requires=["creation_time"], filtered_type="stars")
- def young_stars(pfilter, data):
- age = data.ds.current_time - data[pfilter.filtered_type, "creation_time"]
- return np.logical_and(age.in_units("Myr") <= 3, age >= 0)
- # Halo criteria:
- # (i) Within the halo’s progenitor line, the halo at some point in its evolution must exceed a dark matter mass of 1E4 M⊙.
- # (ii) At the time of a radiative event, the halo must have a molecular hydrogen fraction fH2 ≥ 1E−4 within its densest grid cell and a dark matter to baryon mass ratio above 1% of the cosmic mean (Ωbaryon / ΩDM).
- # (iii) The halo must not be a subhalo.
- def filter_halos(node):
- # criterion (i)
- if max(list(node["prog", "mass"])) < 1E4:
- return False
- # criterion (ii)
- # step 1: get star formation times
- node.ds.add_particle_filter("stars")
- # print(node.ds.all_data().quantities.total_mass()) # gives non-zero masses
- node.ds.add_particle_filter("young_stars")
- # print(node.ds.all_data().quantities.total_mass()) # gives non-zero masses
- star_formation_times = node.ds.all_data()[("young_stars", "creation_time")].in_units('Myr')
- # step 2: if star formed around node time, check if its H2 fraction >= 1E-4 and mass ratio > 1% of the cosmic mean
- if len(star_formation_times) == 0: # every node stops here
- print("no stars at all for this node")
- return False
- if max(star_formation_times) < 0:
- print("no stars yet for this node", min(star_formation_times), max(star_formation_times))
- return False
- before, after = neighbors(star_formation_times, node["time"])
- time_before = node["time"] - before
- time_after = after - node["time"]
- # print(before, node["time"]) # all times are < 0
- if 0.33 > min(time_before, time_after):
- print(node.ds.all_data().max(("gas", "H2_fraction")))
- if node.ds.all_data().max(("gas", "H2_fraction")) <= 1E-4:
- return False
- # @TODO - Check mass ratio vs cosmic mean
- # criterion (iii)
- if node["upid"] != -1:
- return False
- return True
- ####################################################################################################
- def calculate_significance(node):
- if node.descendent is None:
- dt = 0. * node["time"]
- else:
- dt = node.descendent["time"] - node["time"]
- sig = node["mass"] * dt
- if node.ancestors is not None:
- for anc in node.ancestors:
- sig += calculate_significance(anc)
- node["significance"] = sig
- return sig
- ####################################################################################################
- save_and_reload = False
- ####################################################################################################
- arbor = ytree.load("/storage/home/hcoda1/0/jw254/data/SG64-2020/rockstar_halos-jhw/trees/tree_0_0_0.dat")
- if save_and_reload:
- fn = arbor.save_arbor(filename="arbor_original")
- arbor = ytree.load(fn)
- print(fn)
- arbor.add_analysis_field("significance", "Msun*Myr")
- ap_significance = ytree.AnalysisPipeline()
- ap_significance.add_operation(calculate_significance)
- trees = list(arbor[:])
- for tree in ytree.parallel_trees(trees):
- for node in tree["forest"]:
- ap_significance.process_target(node)
- ############################################################
- if save_and_reload:
- fn = arbor.save_arbor(filename="arbor_significance")
- arbor = ytree.load(fn)
- print(fn)
- ############################################################
- print("before filtering:", arbor.size)
- arbor.set_selector("max_field_value", "significance")
- if "halo_DM_mass" not in arbor.field_list:
- arbor.add_analysis_field("halo_DM_mass", default=-1, units="Msun")
- ap_filter = ytree.AnalysisPipeline()
- ap_filter.add_operation(get_yt_dataset)
- ap_filter.add_operation(filter_halos)
- ap_filter.add_operation(delete_dataset, always_do=True)
- trees = list(arbor[:])
- print("ntrees:", len(trees))
- for tree in ytree.parallel_trees(trees):
- for node in tree["forest"]:
- ap_filter.process_target(node)
- print("during filtering:", arbor.size)
- ############################################################
- if save_and_reload:
- arbor.save_arbor(filename="arbor_analysis", trees=trees)
- ############################################################
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement