mirror of
https://github.com/MarioSpore/Grinch-AP.git
synced 2025-10-21 12:11:33 -06:00
216 lines
8.8 KiB
Python
216 lines
8.8 KiB
Python
"""
|
|
This is a utility file that converts logic in the form of CSV files into Python files that can be imported and used
|
|
directly by the world implementation. Whenever the logic files are updated, this script should be run to re-generate
|
|
the python files containing the data.
|
|
"""
|
|
import requests
|
|
|
|
# The CSVs are updated at this repository to be shared between generator and client.
|
|
data_repository_address = "https://raw.githubusercontent.com/digiholic/osrs-archipelago-logic/"
|
|
# The Github tag of the CSVs this was generated with
|
|
data_csv_tag = "v2.0.4"
|
|
# If true, generate using file names in the repository
|
|
debug = False
|
|
|
|
if __name__ == "__main__":
|
|
import sys
|
|
import os
|
|
import csv
|
|
import typing
|
|
|
|
# makes this module runnable from its world folder. Shamelessly stolen from Subnautica
|
|
sys.path.remove(os.path.dirname(__file__))
|
|
new_home = os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
|
|
os.chdir(new_home)
|
|
sys.path.append(new_home)
|
|
|
|
|
|
def load_location_csv():
|
|
this_dir = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
with open(os.path.join(this_dir, "locations_generated.py"), 'w+') as loc_py_file:
|
|
loc_py_file.write('"""\nThis file was auto generated by LogicCSVToPython.py\n"""\n')
|
|
loc_py_file.write("from ..Locations import LocationRow, SkillRequirement\n")
|
|
loc_py_file.write("\n")
|
|
loc_py_file.write("location_rows = [\n")
|
|
|
|
if debug:
|
|
with open(os.path.join(this_dir, "locations.csv"), "r") as loc_file:
|
|
locations_reader = csv.reader(loc_file.read().splitlines())
|
|
parse_loc_file(loc_py_file, locations_reader)
|
|
else:
|
|
print("Loading: " + data_repository_address + "/" + data_csv_tag + "/locations.csv")
|
|
with requests.get(data_repository_address + "/" + data_csv_tag + "/locations.csv") as req:
|
|
if req.status_code == 200:
|
|
locations_reader = csv.reader(req.text.splitlines())
|
|
parse_loc_file(loc_py_file, locations_reader)
|
|
else:
|
|
print(str(req.status_code) + ": " + req.reason)
|
|
loc_py_file.write("]\n")
|
|
|
|
|
|
def parse_loc_file(loc_py_file, locations_reader):
|
|
for row in locations_reader:
|
|
# Skip the header row, if present
|
|
if row[0] == "Location Name":
|
|
continue
|
|
row_line = "LocationRow("
|
|
row_line += str_format(row[0])
|
|
row_line += str_format(row[1].lower())
|
|
|
|
region_strings = row[2].split(", ") if row[2] else []
|
|
row_line += f"{str_list_to_py(region_strings)}, "
|
|
|
|
skill_strings = row[3].split(", ")
|
|
row_line += "["
|
|
if skill_strings:
|
|
split_skills = [skill.split(" ") for skill in skill_strings if skill != ""]
|
|
if split_skills:
|
|
for split in split_skills:
|
|
row_line += f"SkillRequirement('{split[0]}', {split[1]}), "
|
|
row_line += "], "
|
|
|
|
item_strings = row[4].split(", ") if row[4] else []
|
|
row_line += f"{str_list_to_py(item_strings)}, "
|
|
row_line += f"{row[5]})" if row[5] != "" else "0)"
|
|
loc_py_file.write(f"\t{row_line},\n")
|
|
|
|
|
|
def load_region_csv():
|
|
this_dir = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
with open(os.path.join(this_dir, "regions_generated.py"), 'w+') as reg_py_file:
|
|
reg_py_file.write('"""\nThis file was auto generated by LogicCSVToPython.py\n"""\n')
|
|
reg_py_file.write("from ..Regions import RegionRow\n")
|
|
reg_py_file.write("\n")
|
|
reg_py_file.write("region_rows = [\n")
|
|
|
|
if debug:
|
|
with open(os.path.join(this_dir, "regions.csv"), "r") as region_file:
|
|
regions_reader = csv.reader(region_file.read().splitlines())
|
|
parse_region_file(reg_py_file, regions_reader)
|
|
else:
|
|
print("Loading: "+ data_repository_address + "/" + data_csv_tag + "/regions.csv")
|
|
with requests.get(data_repository_address + "/" + data_csv_tag + "/regions.csv") as req:
|
|
if req.status_code == 200:
|
|
regions_reader = csv.reader(req.text.splitlines())
|
|
parse_region_file(reg_py_file, regions_reader)
|
|
else:
|
|
print(str(req.status_code) + ": " + req.reason)
|
|
reg_py_file.write("]\n")
|
|
|
|
|
|
def parse_region_file(reg_py_file, regions_reader):
|
|
for row in regions_reader:
|
|
# Skip the header row, if present
|
|
if row[0] == "Region Name":
|
|
continue
|
|
|
|
row_line = "RegionRow("
|
|
row_line += str_format(row[0])
|
|
row_line += str_format(row[1])
|
|
connections = row[2]
|
|
row_line += f"{str_list_to_py(connections.split(', '))}, "
|
|
resources = row[3]
|
|
row_line += f"{str_list_to_py(resources.split(', '))})"
|
|
reg_py_file.write(f"\t{row_line},\n")
|
|
|
|
|
|
def load_resource_csv():
|
|
this_dir = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
with open(os.path.join(this_dir, "resources_generated.py"), 'w+') as res_py_file:
|
|
res_py_file.write('"""\nThis file was auto generated by LogicCSVToPython.py\n"""\n')
|
|
res_py_file.write("from ..Regions import ResourceRow\n")
|
|
res_py_file.write("\n")
|
|
res_py_file.write("resource_rows = [\n")
|
|
|
|
if debug:
|
|
with open(os.path.join(this_dir, "resources.csv"), "r") as region_file:
|
|
regions_reader = csv.reader(region_file.read().splitlines())
|
|
parse_resources_file(res_py_file, regions_reader)
|
|
else:
|
|
print("Loading: " + data_repository_address + "/" + data_csv_tag + "/resources.csv")
|
|
with requests.get(data_repository_address + "/" + data_csv_tag + "/resources.csv") as req:
|
|
if req.status_code == 200:
|
|
resource_reader = csv.reader(req.text.splitlines())
|
|
parse_resources_file(res_py_file, resource_reader)
|
|
else:
|
|
print(str(req.status_code) + ": " + req.reason)
|
|
res_py_file.write("]\n")
|
|
|
|
|
|
def parse_resources_file(res_py_file, resource_reader):
|
|
for row in resource_reader:
|
|
# Skip the header row, if present
|
|
if row[0] == "Resource Name":
|
|
continue
|
|
|
|
name = row[0].replace("'", "\\'")
|
|
row_line = f"ResourceRow('{name}')"
|
|
res_py_file.write(f"\t{row_line},\n")
|
|
|
|
|
|
def load_item_csv():
|
|
this_dir = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
with open(os.path.join(this_dir, "items_generated.py"), 'w+') as item_py_file:
|
|
item_py_file.write('"""\nThis file was auto generated by LogicCSVToPython.py\n"""\n')
|
|
item_py_file.write("from BaseClasses import ItemClassification\n")
|
|
item_py_file.write("from ..Items import ItemRow\n")
|
|
item_py_file.write("\n")
|
|
item_py_file.write("item_rows = [\n")
|
|
|
|
if debug:
|
|
with open(os.path.join(this_dir, "items.csv"), "r") as region_file:
|
|
regions_reader = csv.reader(region_file.read().splitlines())
|
|
parse_item_file(item_py_file, regions_reader)
|
|
else:
|
|
print("Loading: " + data_repository_address + "/" + data_csv_tag + "/items.csv")
|
|
with requests.get(data_repository_address + "/" + data_csv_tag + "/items.csv") as req:
|
|
if req.status_code == 200:
|
|
item_reader = csv.reader(req.text.splitlines())
|
|
parse_item_file(item_py_file, item_reader)
|
|
else:
|
|
print(str(req.status_code) + ": " + req.reason)
|
|
item_py_file.write("]\n")
|
|
|
|
|
|
def parse_item_file(item_py_file, item_reader):
|
|
for row in item_reader:
|
|
# Skip the header row, if present
|
|
if row[0] == "Name":
|
|
continue
|
|
|
|
row_line = "ItemRow("
|
|
row_line += str_format(row[0])
|
|
row_line += f"{row[1]}, "
|
|
|
|
row_line += f"ItemClassification.{row[2]})"
|
|
|
|
item_py_file.write(f"\t{row_line},\n")
|
|
|
|
|
|
def str_format(s) -> str:
|
|
ret_str = s.replace("'", "\\'")
|
|
return f"'{ret_str}', "
|
|
|
|
|
|
def str_list_to_py(str_list) -> str:
|
|
ret_str = "["
|
|
for s in str_list:
|
|
ret_str += str_format(s)
|
|
ret_str += "]"
|
|
return ret_str
|
|
|
|
|
|
|
|
load_location_csv()
|
|
print("Generated locations py")
|
|
load_region_csv()
|
|
print("Generated regions py")
|
|
load_resource_csv()
|
|
print("Generated resource py")
|
|
load_item_csv()
|
|
print("Generated item py")
|