Replaced Checking Flag Bits against 0x00, and using a counter to count down us using the flag bits
This commit is contained in:
145
main.py
145
main.py
@@ -17,8 +17,7 @@ class BZZCompressor:
|
|||||||
|
|
||||||
data = temp.tobytes()
|
data = temp.tobytes()
|
||||||
except IOError:
|
except IOError:
|
||||||
print("Could not open {input_file_path}/{input_file}...")
|
raise IOError(f"Could not open {input_file_path}/{input_file}...")
|
||||||
raise
|
|
||||||
|
|
||||||
##############################################################################
|
##############################################################################
|
||||||
#
|
#
|
||||||
@@ -69,8 +68,6 @@ class BZZCompressor:
|
|||||||
|
|
||||||
# File Loop
|
# File Loop
|
||||||
for file_num, file in enumerate(files):
|
for file_num, file in enumerate(files):
|
||||||
# print(hex(starting_index))
|
|
||||||
|
|
||||||
index = starting_index
|
index = starting_index
|
||||||
|
|
||||||
# Prepping for the next loop
|
# Prepping for the next loop
|
||||||
@@ -128,85 +125,93 @@ class BZZCompressor:
|
|||||||
# Adding 0x100 here means the bitarray is a length of 9, and the first item is always 1
|
# Adding 0x100 here means the bitarray is a length of 9, and the first item is always 1
|
||||||
# This means that later, when we need to gather more flag bits, we aren't losing any data, or
|
# This means that later, when we need to gather more flag bits, we aren't losing any data, or
|
||||||
# hitting an index out of bounds error
|
# hitting an index out of bounds error
|
||||||
flag_bits = bitarray(bin(data[index] + 0x100)[2:])
|
flag_bits = data[index] + 0x100
|
||||||
index = index + 1
|
index = index + 1
|
||||||
|
# used to track how far in flag_bits we are
|
||||||
|
counter = 8
|
||||||
|
|
||||||
while num_flags > 0:
|
try:
|
||||||
carry = flag_bits[-1]
|
while num_flags > 0:
|
||||||
flag_bits = flag_bits >> 1
|
carry = bin(flag_bits)[-1]
|
||||||
|
flag_bits = flag_bits >> 1
|
||||||
|
counter = counter - 1
|
||||||
|
|
||||||
# if we are down to only 0 bits, we are out of file-driven data
|
# if we are down to only 0 bits, we are out of file-driven data
|
||||||
# Here we collect more flag bits and re-iterate the loop
|
# Here we collect more flag bits and re-iterate the loop
|
||||||
if int(flag_bits.to01(), 2) == 0x00:
|
if counter == 0:
|
||||||
flag_bits = bitarray(bin(data[index] + 0x100)[2:])
|
flag_bits = data[index] + 0x100
|
||||||
index = index + 1
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Carry means the next byte is raw data, no weird placement or indexing
|
|
||||||
if carry:
|
|
||||||
try:
|
|
||||||
output_buffer.append(data[index])
|
|
||||||
index = index + 1
|
index = index + 1
|
||||||
except IndexError:
|
counter = 8
|
||||||
print(output_buffer)
|
continue
|
||||||
print(
|
|
||||||
f"Error processing {input_file_path}/{input_file} on {file_num}/{len(files)}. Reached of data stream early. Index: {index}"
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
# If Carry is 0, then we are doing actual decompression. This is the tricky part
|
# Carry means the next byte is raw data, no weird placement or indexing
|
||||||
else:
|
if carry:
|
||||||
# This shouldn't happen
|
try:
|
||||||
if len(data) <= index + 1:
|
output_buffer.append(data[index])
|
||||||
print(
|
index = index + 1
|
||||||
"Error processing {input_file_path}/{input_file} on {file_num}/{len(files)}. Reached of data stream early."
|
except IndexError:
|
||||||
)
|
raise IndexError(
|
||||||
return
|
f"Error processing {input_file_path}/{input_file} on {file_num}/{len(files)} (Carry Path). Reached of data stream early. Index: {index}\n"
|
||||||
|
+ f"Index: {hex(index)}. File Start: {hex(starting_index - int(file.get("padding_end")[2:], 16))}. File End: {hex(file_end)}"
|
||||||
|
)
|
||||||
|
|
||||||
# The "temp" in our documentation
|
# If Carry is 0, then we are doing actual decompression. This is the tricky part
|
||||||
distance_data = int.from_bytes(data[index : index + 2], "big")
|
else:
|
||||||
index = index + 2
|
# This shouldn't happen
|
||||||
|
if file_end <= index + 1:
|
||||||
|
raise IndexError(
|
||||||
|
f"Error processing {input_file_path}/{input_file} on {file_num}/{len(files)} (Non-Carry Path). Reached of data stream early.\n"
|
||||||
|
+ f"Index: {hex(copy_index)}. File Start: {hex(starting_index - int(file.get("padding_end")[2:], 16))}. File End: {hex(file_end)}"
|
||||||
|
)
|
||||||
|
|
||||||
# length here is the length of the data we are copying.
|
# The "temp" in our documentation
|
||||||
# We multiply by 8 since we are working with bits instead of bytes
|
distance_data = int.from_bytes(data[index : index + 2], "big")
|
||||||
length = len_table[(distance_data & len_mask) - 1]
|
index = index + 2
|
||||||
|
|
||||||
# Displacement is how far back in the existing output_buffer we are
|
# length here is the length of the data we are copying.
|
||||||
# looking to copy from. We multiply by 8 since we are working with bits and not bytes
|
# We multiply by 8 since we are working with bits instead of bytes
|
||||||
displacement = distance_data >> len_bits
|
length = len_table[(distance_data & len_mask) - 1]
|
||||||
|
|
||||||
# This shouldn't happen
|
# Displacement is how far back in the existing output_buffer we are
|
||||||
if displacement <= 0:
|
# looking to copy from. We multiply by 8 since we are working with bits and not bytes
|
||||||
print(
|
displacement = distance_data >> len_bits
|
||||||
f"Error processing {input_file_path}/{input_file} on {file_num}/{len(files)}. Displacement was less than or equal to 0.\n"
|
|
||||||
+ f"Displacement: {displacement}. Distance Data: {distance_data}. Length Bits: {len_bits}"
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
# print(f"Output Buffer Size {len(output_buffer)}")
|
# This shouldn't happen
|
||||||
# print(f"Distance Data: {distance_data}")
|
if displacement <= 0:
|
||||||
# print(f"Displacement: {displacement}")
|
raise ValueError(
|
||||||
# print(f"Length: {length}")
|
f"Error processing {input_file_path}/{input_file} on {file_num}/{len(files)}. Displacement was less than or equal to 0.\n"
|
||||||
|
+ f"Displacement: {displacement}. Distance Data: {distance_data}. Length Bits: {len_bits}"
|
||||||
|
)
|
||||||
|
|
||||||
# Here we copy bit by bit from earlier in the output buffer.
|
# print(f"Output Buffer Size {len(output_buffer)}")
|
||||||
# we use this instead of index slicing since the slice could lead to
|
# print(f"Distance Data: {distance_data}")
|
||||||
# data we are currently copying into the buffer
|
# print(f"Displacement: {displacement}")
|
||||||
copy_index = len(output_buffer) - displacement
|
# print(f"Length: {length}")
|
||||||
|
|
||||||
# If start index is less than 0, we'll be checking something like output_buffer[-2]
|
# Here we copy bit by bit from earlier in the output buffer.
|
||||||
# or smth, which will have an IndexOutOfBounds exception
|
# we use this instead of index slicing since the slice could lead to
|
||||||
if copy_index < 0:
|
# data we are currently copying into the buffer
|
||||||
print(
|
copy_index = len(output_buffer) - displacement
|
||||||
"Error decompressing {input_file_path}/{input_file} on {file_num}/{len(files)}. Start Index was out of range."
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
for i in range(length):
|
# If start index is less than 0, we'll be checking something like output_buffer[-2]
|
||||||
output_buffer.append(output_buffer[copy_index + i])
|
# or smth, which will have an IndexOutOfBounds exception
|
||||||
|
if copy_index < 0:
|
||||||
|
raise IndexError(
|
||||||
|
f"Error decompressing {input_file_path}/{input_file} on {file_num}/{len(files)}. Displacement Index was out of range.\n"
|
||||||
|
+ f"Displacement Index: {hex(copy_index)}. File Start: {hex(starting_index - int(file.get("padding_end")[2:], 16))}. File End: {hex(file_end)}"
|
||||||
|
)
|
||||||
|
|
||||||
num_flags = num_flags - 1
|
for i in range(length):
|
||||||
|
output_buffer.append(output_buffer[copy_index + i])
|
||||||
|
|
||||||
# This handoff is so I can change buffer logic without breaking write-out logic
|
num_flags = num_flags - 1
|
||||||
|
except Exception as e:
|
||||||
|
print(
|
||||||
|
f"\nError while processing {input_file_path}/{input_file}_{hex(file_num)[2:].zfill(3)}. \nError: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# This handoff is so I can change buffer logic without breaking write-out logic.
|
||||||
|
# I need to
|
||||||
out_data = output_buffer
|
out_data = output_buffer
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -219,7 +224,7 @@ class BZZCompressor:
|
|||||||
# f"File {output_folder}/{input_file}_{str(file_num).zfill(3)}.file{file['type'][2:]} saved successfully!"
|
# f"File {output_folder}/{input_file}_{str(file_num).zfill(3)}.file{file['type'][2:]} saved successfully!"
|
||||||
# )
|
# )
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
print(
|
raise IOError(
|
||||||
f"Unable to write file for {input_file_path}/{input_file} on {file_num}/{len(files)}. Error: {e}"
|
f"Unable to write file for {input_file_path}/{input_file} on {file_num}/{len(files)}. Error: {e}"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -256,6 +261,6 @@ if __name__ == "__main__":
|
|||||||
compressor.decompress(dirpath, file, str(output_folder_path))
|
compressor.decompress(dirpath, file, str(output_folder_path))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(
|
print(
|
||||||
f"Error while decompressing {output_folder_path}/{file}. Error: {e}"
|
f"\nError while decompressing {output_folder_path}/{file}. \nError: {e}"
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
|
Reference in New Issue
Block a user