Files
python-bzz/main.py

279 lines
12 KiB
Python
Raw Normal View History

import os
2025-09-15 19:32:58 -06:00
from bitarray import bitarray
from pathlib import Path
2025-09-15 19:32:58 -06:00
class BZZCompressor:
def decompress(
self, input_file_path, input_file, output_type="file", output_folder="out/"
) -> bytes:
2025-09-18 22:00:37 -06:00
data = bytes()
output_buffer = bytearray()
2025-09-18 22:00:37 -06:00
overflow_buffer = bytearray()
return_files = []
2025-09-15 19:32:58 -06:00
# read the input file
try:
2025-09-16 20:49:57 -06:00
with open(f"{input_file_path}/{input_file}", "rb") as infile:
temp = bitarray(endian="little")
2025-09-18 22:00:37 -06:00
temp.fromfile(infile)
data = temp.tobytes()
2025-09-15 19:32:58 -06:00
except IOError:
raise IOError(f"Could not open {input_file_path}/{input_file}...")
2025-09-15 19:32:58 -06:00
2025-09-18 22:00:37 -06:00
##############################################################################
#
# Reading the Headers from the file.
#
# This includes the version, some garbage 0s, the number of files, and the
# file list (probably), and finally a Checksum
#
##############################################################################
# This is always 1, 0, 0, 0. so I'm just having fun
bzz_version = int.from_bytes(data[0:4], "little")
game_id = int.from_bytes(data[4:8], "little")
num_files = int.from_bytes(data[8:12], "little")
2025-09-18 22:00:37 -06:00
# print(f"BZZ Version: {bzz_version}")
# print(f"Game ID: {game_id}")
# print(f"Number of Files: {num_files}")
2025-09-18 22:00:37 -06:00
files = []
for i in range(num_files):
tmp = (i) * 12
files.append(
{
"type": hex(
int.from_bytes(data[12 + tmp : 12 + tmp + 4], "little")
),
"file_end": hex(
int.from_bytes(data[12 + tmp + 4 : 12 + tmp + 8], "little")
),
"padding_end": hex(
int.from_bytes(data[12 + tmp + 8 : 12 + tmp + 12], "little")
),
2025-09-18 22:00:37 -06:00
}
)
checksum = data[0x7FC:0x800]
# print(f"Checksum: {checksum}")
2025-09-18 22:00:37 -06:00
##############################################################################
#
# This is the File Loop, where we process
# individual files from the .bzz
#
##############################################################################
starting_index = 0x800
2025-09-15 19:32:58 -06:00
# File Loop
for file_num, file in enumerate(files):
index = starting_index
2025-09-15 19:32:58 -06:00
# Prepping for the next loop
file_end = starting_index + int(file.get("file_end")[2:], 16)
starting_index = starting_index + int(file.get("padding_end")[2:], 16)
2025-09-15 19:32:58 -06:00
# print(hex(file_end))
# Getting our method, this is likely imprecise, since I'm one dealing with one
# method type, but it gets what I want
method = data[index]
# We move on to the next byte in data
index = index + 1
2025-09-15 19:32:58 -06:00
# Gathering variables based on the method according to
# https://problemkaputt.de/psxspx-cdrom-file-compression-bzz.htm
# Note: bin(int)[2:].zfill(8) converts a number to an 8-bit binary string
2025-09-15 19:32:58 -06:00
# `>> 3` is the same as dividing by 8
shifter = (method >> 3) & 0x03
len_bits = (method & 0x07) ^ 0x07
2025-09-15 19:32:58 -06:00
# The bin() function only returns the second half of the byte, so we pad the byte
len_mask = 1 << len_bits
threshold = len_mask >> 1
if threshold > 0x07:
threshold = 0x13
2025-09-15 19:32:58 -06:00
len_table = []
2025-09-15 19:32:58 -06:00
for i in range(len_mask):
if i > threshold:
len_table.append((i - threshold << shifter) + threshold + 3)
else:
len_table.append(i + 3)
2025-09-15 19:32:58 -06:00
temp_flags = ""
2025-09-15 19:32:58 -06:00
for item in data[index : index + 3]:
temp_flags += bin(item)[2:].zfill(8)
2025-09-15 19:32:58 -06:00
num_flags = int.from_bytes(data[index : index + 3], "big") + 1
index = index + 3
# print(f"Method: {hex(method)}")
# print(f"Shifter: {shifter}")
# print(f"Len Bits: {bin(len_bits)}")
# print(f"Len Mask: {bin(len_mask)}")
# print(f"Threshold: {threshold}")
# print(f"Len Table: {len_table}")
# print(f"Loops (based on num flags): {num_flags}")
2025-09-18 22:00:37 -06:00
# Adding 0x100 here means the bitarray is a length of 9, and the first item is always 1
# This means that later, when we need to gather more flag bits, we aren't losing any data, or
# hitting an index out of bounds error
flag_bits = data[index] + 0x100
index = index + 1
try:
while num_flags > 0:
carry = bin(flag_bits)[-1]
flag_bits = flag_bits >> 1
# if we are down to only 0 bits, we are out of file-driven data
# Here we collect more flag bits and re-iterate the loop
2025-09-21 18:20:41 -06:00
if flag_bits == 1:
flag_bits = data[index] + 0x100
index = index + 1
continue
# Carry means the next byte is raw data, no weird placement or indexing
if carry:
try:
output_buffer.append(data[index])
index = index + 1
except IndexError:
raise IndexError(
f"Error processing {input_file_path}/{input_file} on {file_num}/{len(files)} (Carry Path). Reached of data stream early. Index: {index}\n"
+ f"Index: {hex(index)}. File Start: {hex(starting_index - int(file.get("padding_end")[2:], 16))}. File End: {hex(file_end)}"
)
# If Carry is 0, then we are doing actual decompression. This is the tricky part
else:
# This shouldn't happen
if file_end <= index + 1:
raise IndexError(
f"Error processing {input_file_path}/{input_file} on {file_num}/{len(files)} (Non-Carry Path). Reached of data stream early.\n"
+ f"Index: {hex(copy_index)}. File Start: {hex(starting_index - int(file.get("padding_end")[2:], 16))}. File End: {hex(file_end)}"
)
# The "temp" in our documentation
distance_data = int.from_bytes(data[index : index + 2], "big")
index = index + 2
# length here is the length of the data we are copying.
# We multiply by 8 since we are working with bits instead of bytes
length = len_table[(distance_data & len_mask) - 1]
# Displacement is how far back in the existing output_buffer we are
# looking to copy from. We multiply by 8 since we are working with bits and not bytes
displacement = distance_data >> len_bits
# This shouldn't happen
if displacement <= 0:
raise ValueError(
f"Error processing {input_file_path}/{input_file} on {file_num}/{len(files)}. Displacement was less than or equal to 0.\n"
+ f"Displacement: {displacement}. Distance Data: {distance_data}. Length Bits: {len_bits}"
)
# print(f"Output Buffer Size {len(output_buffer)}")
# print(f"Distance Data: {distance_data}")
# print(f"Displacement: {displacement}")
# print(f"Length: {length}")
# Here we copy bit by bit from earlier in the output buffer.
# we use this instead of index slicing since the slice could lead to
# data we are currently copying into the buffer
copy_index = len(output_buffer) - displacement
# If start index is less than 0, we'll be checking something like output_buffer[-2]
# or smth, which will have an IndexOutOfBounds exception
if copy_index < 0:
raise IndexError(
f"Error decompressing {input_file_path}/{input_file} on {file_num}/{len(files)}. Displacement Index was out of range.\n"
+ f"Displacement Index: {hex(copy_index)}. File Start: {hex(starting_index - int(file.get("padding_end")[2:], 16))}. File End: {hex(file_end)}"
)
for i in range(length):
output_buffer.append(output_buffer[copy_index + i])
num_flags = num_flags - 1
except Exception as e:
print(
f"\nError while processing {input_file_path}/{input_file}_{hex(file_num)[2:].zfill(3)}. \nError: {e}"
)
# This handoff is so I can change buffer logic without breaking write-out logic.
# I need to
out_data = output_buffer
if output_type == "file":
try:
with open(
f"{output_folder}/{input_file}_{str(file_num).zfill(3)}.file{file['type'][2:]}",
"wb",
) as outfile:
outfile.write(out_data)
# print(
# f"File {output_folder}/{input_file}_{str(file_num).zfill(3)}.file{file['type'][2:]} saved successfully!"
# )
except IOError as e:
raise IOError(
f"Unable to write file for {input_file_path}/{input_file} on {file_num}/{len(files)}. Error: {e}"
)
elif output_type == "return":
return_files.append(
{
type: file["type"],
data: out_data,
}
)
2025-09-15 19:32:58 -06:00
if output_type == "file":
index = starting_index
skip_overflow = True
if len(data) > index:
for item in data[index:]:
overflow_buffer.append(item)
for item in overflow_buffer:
if item != 0x00:
skip_overflow = False
if not skip_overflow:
with open(
f"{output_folder}/{input_file}.overflow.file", "wb"
) as outfile:
outfile.write(overflow_buffer)
print(
f"File {output_folder}/{input_file}.overflow.file saved successfully!"
)
elif output_type == "return":
return return_files
2025-09-15 19:32:58 -06:00
if __name__ == "__main__":
compressor = BZZCompressor()
2025-09-15 19:32:58 -06:00
for dirpath, dirnames, filenames in os.walk("./bin_extract"):
for file in filenames:
if ".bzz" in file[4:]:
output_folder_path = Path(f"out/{'/'.join(dirpath.split("/")[2:])}")
output_folder_path.mkdir(parents=True, exist_ok=True)
try:
2025-09-16 20:49:57 -06:00
compressor.decompress(dirpath, file, str(output_folder_path))
except Exception as e:
print(
f"\nError while decompressing {output_folder_path}/{file}. \nError: {e}"
)
continue