Now catches all files, splits the file names by their number, and append the file types number to the end of the file extension
This commit is contained in:
279
main.py
279
main.py
@@ -44,13 +44,13 @@ class BZZCompressor:
|
|||||||
tmp = (i) * 12
|
tmp = (i) * 12
|
||||||
files.append(
|
files.append(
|
||||||
{
|
{
|
||||||
"pt_a": hex(
|
"type": hex(
|
||||||
int.from_bytes(data[12 + tmp : 12 + tmp + 4], "little")
|
int.from_bytes(data[12 + tmp : 12 + tmp + 4], "little")
|
||||||
),
|
),
|
||||||
"pt_b": hex(
|
"file_end": hex(
|
||||||
int.from_bytes(data[12 + tmp + 4 : 12 + tmp + 8], "little")
|
int.from_bytes(data[12 + tmp + 4 : 12 + tmp + 8], "little")
|
||||||
),
|
),
|
||||||
"pt_c": hex(
|
"padding_end": hex(
|
||||||
int.from_bytes(data[12 + tmp + 8 : 12 + tmp + 12], "little")
|
int.from_bytes(data[12 + tmp + 8 : 12 + tmp + 12], "little")
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
@@ -68,157 +68,176 @@ class BZZCompressor:
|
|||||||
# individual files from the .bzz
|
# individual files from the .bzz
|
||||||
#
|
#
|
||||||
##############################################################################
|
##############################################################################
|
||||||
index = 0x800
|
starting_index = 0x800
|
||||||
|
|
||||||
# Getting our method, this is likely imprecise, since I'm one dealing with one
|
# File Loop
|
||||||
# method type, but it gets what I want
|
for file_num, file in enumerate(files):
|
||||||
method = data[index]
|
print(hex(starting_index))
|
||||||
# We move on to the next byte in data
|
|
||||||
index = index + 1
|
|
||||||
|
|
||||||
# Gathering variables based on the method according to
|
index = starting_index
|
||||||
# https://problemkaputt.de/psxspx-cdrom-file-compression-bzz.htm
|
|
||||||
# Note: bin(int)[2:].zfill(8) converts a number to an 8-bit binary string
|
|
||||||
|
|
||||||
# `>> 3` is the same as dividing by 8
|
# Prepping for the next loop
|
||||||
shifter = (method >> 3) & 0x03
|
file_end = starting_index + int(file.get("file_end")[2:], 16)
|
||||||
len_bits = (method & 0x07) ^ 0x07
|
starting_index = starting_index + int(file.get("padding_end")[2:], 16)
|
||||||
|
|
||||||
# The bin() function only returns the second half of the byte, so we pad the byte
|
print(hex(file_end))
|
||||||
len_mask = 1 << len_bits
|
|
||||||
|
|
||||||
threshold = len_mask >> 1
|
# Getting our method, this is likely imprecise, since I'm one dealing with one
|
||||||
|
# method type, but it gets what I want
|
||||||
|
method = data[index]
|
||||||
|
# We move on to the next byte in data
|
||||||
|
index = index + 1
|
||||||
|
|
||||||
if threshold > 0x07:
|
# Gathering variables based on the method according to
|
||||||
threshold = 0x13
|
# https://problemkaputt.de/psxspx-cdrom-file-compression-bzz.htm
|
||||||
|
# Note: bin(int)[2:].zfill(8) converts a number to an 8-bit binary string
|
||||||
|
|
||||||
len_table = []
|
# `>> 3` is the same as dividing by 8
|
||||||
|
shifter = (method >> 3) & 0x03
|
||||||
|
len_bits = (method & 0x07) ^ 0x07
|
||||||
|
|
||||||
for i in range(len_mask):
|
# The bin() function only returns the second half of the byte, so we pad the byte
|
||||||
if i > threshold:
|
len_mask = 1 << len_bits
|
||||||
len_table.append((i - threshold << shifter) + threshold + 3)
|
|
||||||
else:
|
|
||||||
len_table.append(i + 3)
|
|
||||||
|
|
||||||
temp_flags = ""
|
threshold = len_mask >> 1
|
||||||
|
|
||||||
for item in data[index : index + 3]:
|
if threshold > 0x07:
|
||||||
temp_flags += bin(item)[2:].zfill(8)
|
threshold = 0x13
|
||||||
|
|
||||||
num_flags = int(temp_flags, 2) + 1
|
len_table = []
|
||||||
index = index + 3
|
|
||||||
|
|
||||||
print(f"Method: {hex(method)}")
|
for i in range(len_mask):
|
||||||
print(f"Shifter: {shifter}")
|
if i > threshold:
|
||||||
print(f"Len Bits: {bin(len_bits)}")
|
len_table.append((i - threshold << shifter) + threshold + 3)
|
||||||
print(f"Len Mask: {bin(len_mask)}")
|
else:
|
||||||
print(f"Threshold: {threshold}")
|
len_table.append(i + 3)
|
||||||
print(f"Len Table: {len_table}")
|
|
||||||
print(f"Loops (based on num flags): {num_flags}")
|
|
||||||
|
|
||||||
# Adding 0x100 here means the bitarray is a length of 9, and the first item is always 1
|
temp_flags = ""
|
||||||
# This means that later, when we need to gather more flag bits, we aren't losing any data, or
|
|
||||||
# hitting an index out of bounds error
|
|
||||||
flag_bits = bitarray(bin(data[index] + 0x100)[2:])
|
|
||||||
index = index + 1
|
|
||||||
|
|
||||||
while num_flags > 0:
|
for item in data[index : index + 3]:
|
||||||
carry = flag_bits[-1]
|
temp_flags += bin(item)[2:].zfill(8)
|
||||||
flag_bits = flag_bits >> 1
|
|
||||||
|
|
||||||
# if we are down to only 0 bits, we are out of file-driven data
|
num_flags = int(temp_flags, 2) + 1
|
||||||
# Here we collect more flag bits and re-iterate the loop
|
index = index + 3
|
||||||
if int(flag_bits.to01(), 2) == 0x00:
|
|
||||||
flag_bits = bitarray(bin(data[index] + 0x100)[2:])
|
|
||||||
index = index + 1
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Carry means the next byte is raw data, no weird placement or indexing
|
print(f"Method: {hex(method)}")
|
||||||
if carry:
|
print(f"Shifter: {shifter}")
|
||||||
try:
|
print(f"Len Bits: {bin(len_bits)}")
|
||||||
output_buffer.append(data[index])
|
print(f"Len Mask: {bin(len_mask)}")
|
||||||
|
print(f"Threshold: {threshold}")
|
||||||
|
print(f"Len Table: {len_table}")
|
||||||
|
print(f"Loops (based on num flags): {num_flags}")
|
||||||
|
|
||||||
|
# Adding 0x100 here means the bitarray is a length of 9, and the first item is always 1
|
||||||
|
# This means that later, when we need to gather more flag bits, we aren't losing any data, or
|
||||||
|
# hitting an index out of bounds error
|
||||||
|
flag_bits = bitarray(bin(data[index] + 0x100)[2:])
|
||||||
|
index = index + 1
|
||||||
|
|
||||||
|
while num_flags > 0:
|
||||||
|
carry = flag_bits[-1]
|
||||||
|
flag_bits = flag_bits >> 1
|
||||||
|
|
||||||
|
# if we are down to only 0 bits, we are out of file-driven data
|
||||||
|
# Here we collect more flag bits and re-iterate the loop
|
||||||
|
if int(flag_bits.to01(), 2) == 0x00:
|
||||||
|
flag_bits = bitarray(bin(data[index] + 0x100)[2:])
|
||||||
index = index + 1
|
index = index + 1
|
||||||
except IndexError:
|
continue
|
||||||
print(output_buffer)
|
|
||||||
|
# Carry means the next byte is raw data, no weird placement or indexing
|
||||||
|
if carry:
|
||||||
|
try:
|
||||||
|
output_buffer.append(data[index])
|
||||||
|
index = index + 1
|
||||||
|
except IndexError:
|
||||||
|
print(output_buffer)
|
||||||
|
print(
|
||||||
|
f"Error processing file. Reached of data stream early. Index: {index}"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# If Carry is 0, then we are doing actual decompression. This is the tricky part
|
||||||
|
else:
|
||||||
|
# This shouldn't happen
|
||||||
|
if len(data) <= index + 1:
|
||||||
|
print("Error processing file. Reached of data stream early.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# This is "temp" in our documentation
|
||||||
|
temp = ""
|
||||||
|
for item in data[index : index + 2]:
|
||||||
|
temp = temp + bin(item)[2:].zfill(8)
|
||||||
|
|
||||||
|
distance_data = int(temp, 2)
|
||||||
|
index = index + 2
|
||||||
|
|
||||||
|
# length here is the length of the data we are copying.
|
||||||
|
# We multiply by 8 since we are working with bits instead of bytes
|
||||||
|
length = len_table[(distance_data & len_mask) - 1]
|
||||||
|
|
||||||
|
# Displacement is how far back in the existing output_buffer we are
|
||||||
|
# looking to copy from. We multiply by 8 since we are working with bits and not bytes
|
||||||
|
displacement = distance_data >> len_bits
|
||||||
|
|
||||||
|
# This shouldn't happen
|
||||||
|
if displacement <= 0:
|
||||||
|
print(
|
||||||
|
f"Error processing file. Displacement was less than or equal to 0.\n"
|
||||||
|
+ f"Distance Data: {distance_data}. Displacement: {displacement}. Index: {hex(index)}"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# print(f"Output Buffer Size {len(output_buffer)}")
|
||||||
|
# print(f"Distance Data: {distance_data}")
|
||||||
|
# print(f"Displacement: {displacement}")
|
||||||
|
# print(f"Length: {length}")
|
||||||
|
|
||||||
|
# Here we copy bit by bit from earlier in the output buffer.
|
||||||
|
# we use this instead of index slicing since the slice could lead to
|
||||||
|
# data we are currently copying into the buffer
|
||||||
|
copy_index = len(output_buffer) - displacement
|
||||||
|
|
||||||
|
# If start index is less than 0, we'll be checking something like output_buffer[-2]
|
||||||
|
# or smth, which will have an IndexOutOfBounds exception
|
||||||
|
if copy_index < 0:
|
||||||
|
print(output_buffer)
|
||||||
|
print("Error decompressing file. Start Index was out of range.")
|
||||||
|
return
|
||||||
|
|
||||||
|
for i in range(length):
|
||||||
|
output_buffer.append(output_buffer[copy_index + i])
|
||||||
|
|
||||||
|
num_flags = num_flags - 1
|
||||||
|
|
||||||
|
if len(data) > index:
|
||||||
|
for item in data[index:]:
|
||||||
|
overflow_buffer.append(item)
|
||||||
|
|
||||||
|
# This handoff is so I can change buffer logic without breaking write-out logic
|
||||||
|
out_data = output_buffer
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(
|
||||||
|
f"{output_folder}/{input_file}_{str(file_num).zfill(3)}.file{file['type'][2:]}",
|
||||||
|
"wb",
|
||||||
|
) as outfile:
|
||||||
|
outfile.write(out_data)
|
||||||
print(
|
print(
|
||||||
f"Error processing file. Reached of data stream early. Index: {index}"
|
f"File {output_folder}/{input_file}_{str(file_num).zfill(3)}.file{file['type'][2:]} saved successfully!"
|
||||||
)
|
)
|
||||||
return
|
|
||||||
|
|
||||||
# If Carry is 0, then we are doing actual decompression. This is the tricky part
|
with open(
|
||||||
else:
|
f"{output_folder}/{input_file}.overflow.file", "wb"
|
||||||
# This shouldn't happen
|
) as outfile:
|
||||||
if len(data) <= index + 1:
|
outfile.write(overflow_buffer)
|
||||||
print("Error processing file. Reached of data stream early.")
|
|
||||||
return
|
|
||||||
|
|
||||||
# This is "temp" in our documentation
|
|
||||||
temp = ""
|
|
||||||
for item in data[index : index + 2]:
|
|
||||||
temp = temp + bin(item)[2:].zfill(8)
|
|
||||||
|
|
||||||
distance_data = int(temp, 2)
|
|
||||||
index = index + 2
|
|
||||||
|
|
||||||
# length here is the length of the data we are copying.
|
|
||||||
# We multiply by 8 since we are working with bits instead of bytes
|
|
||||||
length = len_table[(distance_data & len_mask) - 1]
|
|
||||||
|
|
||||||
# Displacement is how far back in the existing output_buffer we are
|
|
||||||
# looking to copy from. We multiply by 8 since we are working with bits and not bytes
|
|
||||||
displacement = distance_data >> len_bits
|
|
||||||
|
|
||||||
# This shouldn't happen
|
|
||||||
if displacement <= 0:
|
|
||||||
print(
|
print(
|
||||||
f"Error processing file. Displacement was less than or equal to 0.\n"
|
f"File {output_folder}/{input_file}.overflow.file saved successfully!"
|
||||||
+ f"Distance Data: {distance_data}. Displacement: {displacement}. Index: {hex(index)}"
|
|
||||||
)
|
)
|
||||||
return
|
except IOError as e:
|
||||||
|
|
||||||
# print(f"Output Buffer Size {len(output_buffer)}")
|
|
||||||
# print(f"Distance Data: {distance_data}")
|
|
||||||
# print(f"Displacement: {displacement}")
|
|
||||||
# print(f"Length: {length}")
|
|
||||||
|
|
||||||
# Here we copy bit by bit from earlier in the output buffer.
|
|
||||||
# we use this instead of index slicing since the slice could lead to
|
|
||||||
# data we are currently copying into the buffer
|
|
||||||
copy_index = len(output_buffer) - displacement
|
|
||||||
|
|
||||||
# If start index is less than 0, we'll be checking something like output_buffer[-2]
|
|
||||||
# or smth, which will have an IndexOutOfBounds exception
|
|
||||||
if copy_index < 0:
|
|
||||||
print(output_buffer)
|
|
||||||
print("Error decompressing file. Start Index was out of range.")
|
|
||||||
return
|
|
||||||
|
|
||||||
for i in range(length):
|
|
||||||
output_buffer.append(output_buffer[copy_index + i])
|
|
||||||
|
|
||||||
num_flags = num_flags - 1
|
|
||||||
|
|
||||||
if len(data) > index:
|
|
||||||
for item in data[index:]:
|
|
||||||
overflow_buffer.append(item)
|
|
||||||
|
|
||||||
# This handoff is so I can change buffer logic without breaking write-out logic
|
|
||||||
out_data = output_buffer
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(f"{output_folder}/{input_file}.file", "wb") as outfile:
|
|
||||||
outfile.write(out_data)
|
|
||||||
print(f"File {output_folder}/{input_file}.file saved successfully!")
|
|
||||||
|
|
||||||
with open(f"{output_folder}/{input_file}.overflow.file", "wb") as outfile:
|
|
||||||
outfile.write(overflow_buffer)
|
|
||||||
print(
|
print(
|
||||||
f"File {output_folder}/{input_file}.overflow.file saved successfully!"
|
f"Unable to write file for {input_file_path}/{input_file}. Error: {e}"
|
||||||
)
|
)
|
||||||
except IOError as e:
|
|
||||||
print(
|
|
||||||
f"Unable to write file for {input_file_path}/{input_file}. Error: {e}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
Reference in New Issue
Block a user