OSRS: New Tasks, New Options, Compatibility with new Plugin Features (#4688)

This commit is contained in:
digiholic
2025-05-07 11:43:03 -06:00
committed by GitHub
parent 1ee8e339af
commit 703f5a22fd
10 changed files with 366 additions and 115 deletions

View File

@@ -8,7 +8,9 @@ import requests
# The CSVs are updated at this repository to be shared between generator and client.
data_repository_address = "https://raw.githubusercontent.com/digiholic/osrs-archipelago-logic/"
# The Github tag of the CSVs this was generated with
data_csv_tag = "v1.5"
data_csv_tag = "v2.0.4"
# If true, generate using file names in the repository
debug = False
if __name__ == "__main__":
import sys
@@ -26,98 +28,167 @@ if __name__ == "__main__":
def load_location_csv():
this_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(this_dir, "locations_generated.py"), 'w+') as locPyFile:
locPyFile.write('"""\nThis file was auto generated by LogicCSVToPython.py\n"""\n')
locPyFile.write("from ..Locations import LocationRow, SkillRequirement\n")
locPyFile.write("\n")
locPyFile.write("location_rows = [\n")
with open(os.path.join(this_dir, "locations_generated.py"), 'w+') as loc_py_file:
loc_py_file.write('"""\nThis file was auto generated by LogicCSVToPython.py\n"""\n')
loc_py_file.write("from ..Locations import LocationRow, SkillRequirement\n")
loc_py_file.write("\n")
loc_py_file.write("location_rows = [\n")
with requests.get(data_repository_address + "/" + data_csv_tag + "/locations.csv") as req:
locations_reader = csv.reader(req.text.splitlines())
for row in locations_reader:
row_line = "LocationRow("
row_line += str_format(row[0])
row_line += str_format(row[1].lower())
if debug:
with open(os.path.join(this_dir, "locations.csv"), "r") as loc_file:
locations_reader = csv.reader(loc_file.read().splitlines())
parse_loc_file(loc_py_file, locations_reader)
else:
print("Loading: " + data_repository_address + "/" + data_csv_tag + "/locations.csv")
with requests.get(data_repository_address + "/" + data_csv_tag + "/locations.csv") as req:
if req.status_code == 200:
locations_reader = csv.reader(req.text.splitlines())
parse_loc_file(loc_py_file, locations_reader)
else:
print(str(req.status_code) + ": " + req.reason)
loc_py_file.write("]\n")
region_strings = row[2].split(", ") if row[2] else []
row_line += f"{str_list_to_py(region_strings)}, "
skill_strings = row[3].split(", ")
row_line += "["
if skill_strings:
split_skills = [skill.split(" ") for skill in skill_strings if skill != ""]
if split_skills:
for split in split_skills:
row_line += f"SkillRequirement('{split[0]}', {split[1]}), "
row_line += "], "
def parse_loc_file(loc_py_file, locations_reader):
for row in locations_reader:
# Skip the header row, if present
if row[0] == "Location Name":
continue
row_line = "LocationRow("
row_line += str_format(row[0])
row_line += str_format(row[1].lower())
region_strings = row[2].split(", ") if row[2] else []
row_line += f"{str_list_to_py(region_strings)}, "
skill_strings = row[3].split(", ")
row_line += "["
if skill_strings:
split_skills = [skill.split(" ") for skill in skill_strings if skill != ""]
if split_skills:
for split in split_skills:
row_line += f"SkillRequirement('{split[0]}', {split[1]}), "
row_line += "], "
item_strings = row[4].split(", ") if row[4] else []
row_line += f"{str_list_to_py(item_strings)}, "
row_line += f"{row[5]})" if row[5] != "" else "0)"
loc_py_file.write(f"\t{row_line},\n")
item_strings = row[4].split(", ") if row[4] else []
row_line += f"{str_list_to_py(item_strings)}, "
row_line += f"{row[5]})" if row[5] != "" else "0)"
locPyFile.write(f"\t{row_line},\n")
locPyFile.write("]\n")
def load_region_csv():
this_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(this_dir, "regions_generated.py"), 'w+') as regPyFile:
regPyFile.write('"""\nThis file was auto generated by LogicCSVToPython.py\n"""\n')
regPyFile.write("from ..Regions import RegionRow\n")
regPyFile.write("\n")
regPyFile.write("region_rows = [\n")
with open(os.path.join(this_dir, "regions_generated.py"), 'w+') as reg_py_file:
reg_py_file.write('"""\nThis file was auto generated by LogicCSVToPython.py\n"""\n')
reg_py_file.write("from ..Regions import RegionRow\n")
reg_py_file.write("\n")
reg_py_file.write("region_rows = [\n")
if debug:
with open(os.path.join(this_dir, "regions.csv"), "r") as region_file:
regions_reader = csv.reader(region_file.read().splitlines())
parse_region_file(reg_py_file, regions_reader)
else:
print("Loading: "+ data_repository_address + "/" + data_csv_tag + "/regions.csv")
with requests.get(data_repository_address + "/" + data_csv_tag + "/regions.csv") as req:
if req.status_code == 200:
regions_reader = csv.reader(req.text.splitlines())
parse_region_file(reg_py_file, regions_reader)
else:
print(str(req.status_code) + ": " + req.reason)
reg_py_file.write("]\n")
def parse_region_file(reg_py_file, regions_reader):
for row in regions_reader:
# Skip the header row, if present
if row[0] == "Region Name":
continue
row_line = "RegionRow("
row_line += str_format(row[0])
row_line += str_format(row[1])
connections = row[2]
row_line += f"{str_list_to_py(connections.split(', '))}, "
resources = row[3]
row_line += f"{str_list_to_py(resources.split(', '))})"
reg_py_file.write(f"\t{row_line},\n")
with requests.get(data_repository_address + "/" + data_csv_tag + "/regions.csv") as req:
regions_reader = csv.reader(req.text.splitlines())
for row in regions_reader:
row_line = "RegionRow("
row_line += str_format(row[0])
row_line += str_format(row[1])
connections = row[2].replace("'", "\\'")
row_line += f"{str_list_to_py(connections.split(', '))}, "
resources = row[3].replace("'", "\\'")
row_line += f"{str_list_to_py(resources.split(', '))})"
regPyFile.write(f"\t{row_line},\n")
regPyFile.write("]\n")
def load_resource_csv():
this_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(this_dir, "resources_generated.py"), 'w+') as resPyFile:
resPyFile.write('"""\nThis file was auto generated by LogicCSVToPython.py\n"""\n')
resPyFile.write("from ..Regions import ResourceRow\n")
resPyFile.write("\n")
resPyFile.write("resource_rows = [\n")
with open(os.path.join(this_dir, "resources_generated.py"), 'w+') as res_py_file:
res_py_file.write('"""\nThis file was auto generated by LogicCSVToPython.py\n"""\n')
res_py_file.write("from ..Regions import ResourceRow\n")
res_py_file.write("\n")
res_py_file.write("resource_rows = [\n")
with requests.get(data_repository_address + "/" + data_csv_tag + "/resources.csv") as req:
resource_reader = csv.reader(req.text.splitlines())
for row in resource_reader:
name = row[0].replace("'", "\\'")
row_line = f"ResourceRow('{name}')"
resPyFile.write(f"\t{row_line},\n")
resPyFile.write("]\n")
if debug:
with open(os.path.join(this_dir, "resources.csv"), "r") as region_file:
regions_reader = csv.reader(region_file.read().splitlines())
parse_resources_file(res_py_file, regions_reader)
else:
print("Loading: " + data_repository_address + "/" + data_csv_tag + "/resources.csv")
with requests.get(data_repository_address + "/" + data_csv_tag + "/resources.csv") as req:
if req.status_code == 200:
resource_reader = csv.reader(req.text.splitlines())
parse_resources_file(res_py_file, resource_reader)
else:
print(str(req.status_code) + ": " + req.reason)
res_py_file.write("]\n")
def parse_resources_file(res_py_file, resource_reader):
for row in resource_reader:
# Skip the header row, if present
if row[0] == "Resource Name":
continue
name = row[0].replace("'", "\\'")
row_line = f"ResourceRow('{name}')"
res_py_file.write(f"\t{row_line},\n")
def load_item_csv():
this_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(this_dir, "items_generated.py"), 'w+') as itemPyfile:
itemPyfile.write('"""\nThis file was auto generated by LogicCSVToPython.py\n"""\n')
itemPyfile.write("from BaseClasses import ItemClassification\n")
itemPyfile.write("from ..Items import ItemRow\n")
itemPyfile.write("\n")
itemPyfile.write("item_rows = [\n")
with open(os.path.join(this_dir, "items_generated.py"), 'w+') as item_py_file:
item_py_file.write('"""\nThis file was auto generated by LogicCSVToPython.py\n"""\n')
item_py_file.write("from BaseClasses import ItemClassification\n")
item_py_file.write("from ..Items import ItemRow\n")
item_py_file.write("\n")
item_py_file.write("item_rows = [\n")
with requests.get(data_repository_address + "/" + data_csv_tag + "/items.csv") as req:
item_reader = csv.reader(req.text.splitlines())
for row in item_reader:
row_line = "ItemRow("
row_line += str_format(row[0])
row_line += f"{row[1]}, "
if debug:
with open(os.path.join(this_dir, "items.csv"), "r") as region_file:
regions_reader = csv.reader(region_file.read().splitlines())
parse_item_file(item_py_file, regions_reader)
else:
print("Loading: " + data_repository_address + "/" + data_csv_tag + "/items.csv")
with requests.get(data_repository_address + "/" + data_csv_tag + "/items.csv") as req:
if req.status_code == 200:
item_reader = csv.reader(req.text.splitlines())
parse_item_file(item_py_file, item_reader)
else:
print(str(req.status_code) + ": " + req.reason)
item_py_file.write("]\n")
row_line += f"ItemClassification.{row[2]})"
itemPyfile.write(f"\t{row_line},\n")
itemPyfile.write("]\n")
def parse_item_file(item_py_file, item_reader):
for row in item_reader:
# Skip the header row, if present
if row[0] == "Name":
continue
row_line = "ItemRow("
row_line += str_format(row[0])
row_line += f"{row[1]}, "
row_line += f"ItemClassification.{row[2]})"
item_py_file.write(f"\t{row_line},\n")
def str_format(s) -> str:
@@ -128,7 +199,7 @@ if __name__ == "__main__":
def str_list_to_py(str_list) -> str:
ret_str = "["
for s in str_list:
ret_str += f"'{s}', "
ret_str += str_format(s)
ret_str += "]"
return ret_str