mirror of
https://github.com/qwerfd2/Groove_Coaster_2_Server.git
synced 2025-12-22 03:30:18 +00:00
toolset
This commit is contained in:
92
various-tools/player dat/reader.py
Normal file
92
various-tools/player dat/reader.py
Normal file
@@ -0,0 +1,92 @@
|
||||
import struct
|
||||
import pandas as pd
|
||||
|
||||
def read_string(f):
|
||||
length = struct.unpack("B", f.read(1))[0]
|
||||
str = f.read(length).decode("utf-8")
|
||||
print("str ", str)
|
||||
return str
|
||||
|
||||
def read_int_old(f, size=4):
|
||||
integer = int.from_bytes(f.read(size), byteorder="big")
|
||||
return integer
|
||||
|
||||
def read_int(f, size=4):
|
||||
result = " ".join(f"{b:02X}" for b in f.read(size))
|
||||
print("int ", result)
|
||||
return result
|
||||
|
||||
def read_byte(f):
|
||||
result = " ".join(f"{b:02X}" for b in f.read(1))
|
||||
print("byte ", result)
|
||||
return result
|
||||
|
||||
def parse_pak_file(file_path, output_xlsx):
|
||||
with open(file_path, "rb") as f:
|
||||
num_elements = read_int_old(f, 2)
|
||||
data = []
|
||||
|
||||
for _ in range(num_elements):
|
||||
print("prd", _)
|
||||
entry = {
|
||||
"Field1": read_int(f),
|
||||
"Field2": read_byte(f),
|
||||
"Field3": read_int(f),
|
||||
"Field4": read_byte(f),
|
||||
"String1": read_string(f),
|
||||
"String2": read_string(f),
|
||||
"String3": read_string(f),
|
||||
"String4": read_string(f),
|
||||
"String5": read_string(f),
|
||||
"String6": read_string(f),
|
||||
"Field5": read_int(f),
|
||||
"Float1": read_int(f, 1),
|
||||
"Float2": read_int(f, 1),
|
||||
"Float3": read_int(f, 1),
|
||||
"Float4": read_int(f, 1),
|
||||
"Field6": read_byte(f),
|
||||
"Field7": read_byte(f),
|
||||
"Field8": read_int(f),
|
||||
"Field9": read_byte(f),
|
||||
"Field10": read_byte(f),
|
||||
"Field11": read_int(f),
|
||||
"Field12": read_int(f),
|
||||
"Field13": read_int(f),
|
||||
"Field14": read_int(f),
|
||||
"Field15": read_byte(f),
|
||||
"Field16": read_byte(f),
|
||||
"Field17": read_int(f),
|
||||
"Field18": read_int(f),
|
||||
"Field19": read_byte(f),
|
||||
"Field20": read_int(f),
|
||||
"Field21": read_byte(f),
|
||||
"Field22": read_int(f),
|
||||
"Field23": read_int(f),
|
||||
"Field24": read_byte(f),
|
||||
"Field25": read_int(f),
|
||||
"Field26": read_int(f),
|
||||
"Field27": read_byte(f),
|
||||
"Field28": read_int(f),
|
||||
"Field29": read_int(f),
|
||||
"Field30": read_byte(f),
|
||||
"Field31": read_int(f),
|
||||
"Field32": read_int(f),
|
||||
"Field33": read_int(f),
|
||||
"Field34": read_byte(f),
|
||||
"Field35": read_int(f),
|
||||
"Field36": read_int(f),
|
||||
"Field37": read_int(f),
|
||||
"Field38": read_byte(f)
|
||||
}
|
||||
# This is a hack (not derived from IDA) but there are discrepancies here that can be addressed this way
|
||||
if (entry["String5"] != "" or entry["String6"] != "" ):
|
||||
entry["Field39"] = read_int(f)
|
||||
|
||||
entry["Field40"] = read_byte(f)
|
||||
data.append(entry)
|
||||
|
||||
df = pd.DataFrame(data)
|
||||
df.to_excel(output_xlsx, index=False)
|
||||
|
||||
# Example usage
|
||||
parse_pak_file("player.dat", "player.xlsx")
|
||||
5
various-tools/player dat/readme.txt
Normal file
5
various-tools/player dat/readme.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
Converts player.dat to and from xlsx sheets.
|
||||
|
||||
The data format is definitely not 100% correct here, and there is a bodge to address the 4 redundant bytes on the first few rows.
|
||||
|
||||
But it works!
|
||||
83
various-tools/player dat/writer.py
Normal file
83
various-tools/player dat/writer.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import struct
|
||||
import pandas as pd
|
||||
|
||||
def write_string(f, s):
|
||||
if pd.isna(s):
|
||||
f.write(struct.pack("B", 0))
|
||||
else:
|
||||
encoded = s.encode("utf-8")
|
||||
f.write(struct.pack("B", len(encoded)))
|
||||
f.write(encoded)
|
||||
|
||||
def write_int(f, value, size=4):
|
||||
f.write(bytes.fromhex(value))
|
||||
|
||||
def write_byte(f, value):
|
||||
if pd.isna(value):
|
||||
f.write(struct.pack("B", 0))
|
||||
else:
|
||||
f.write(bytes.fromhex(value))
|
||||
|
||||
def convert_xlsx_to_dat(input_xlsx, output_dat):
|
||||
df = pd.read_excel(input_xlsx, dtype=str)
|
||||
|
||||
with open(output_dat, "wb") as f:
|
||||
num_elements = len(df)
|
||||
write_int(f, f"{num_elements:04X}", 2)
|
||||
|
||||
for _, row in df.iterrows():
|
||||
write_int(f, row["Field1"])
|
||||
write_byte(f, row["Field2"])
|
||||
write_int(f, row["Field3"])
|
||||
write_byte(f, row["Field4"])
|
||||
write_string(f, row["String1"])
|
||||
write_string(f, row["String2"])
|
||||
write_string(f, row["String3"])
|
||||
write_string(f, row["String4"])
|
||||
write_string(f, row["String5"])
|
||||
write_string(f, row["String6"])
|
||||
write_int(f, row["Field5"])
|
||||
write_byte(f, row["Float1"])
|
||||
write_byte(f, row["Float2"])
|
||||
write_byte(f, row["Float3"])
|
||||
write_byte(f, row["Float4"])
|
||||
write_byte(f, row["Field6"])
|
||||
write_byte(f, row["Field7"])
|
||||
write_int(f, row["Field8"])
|
||||
write_byte(f, row["Field9"])
|
||||
write_byte(f, row["Field10"])
|
||||
write_int(f, row["Field11"])
|
||||
write_int(f, row["Field12"])
|
||||
write_int(f, row["Field13"])
|
||||
write_int(f, row["Field14"])
|
||||
write_byte(f, row["Field15"])
|
||||
write_byte(f, row["Field16"])
|
||||
write_int(f, row["Field17"])
|
||||
write_int(f, row["Field18"])
|
||||
write_byte(f, row["Field19"])
|
||||
write_int(f, row["Field20"])
|
||||
write_byte(f, row["Field21"])
|
||||
write_int(f, row["Field22"])
|
||||
write_int(f, row["Field23"])
|
||||
write_byte(f, row["Field24"])
|
||||
write_int(f, row["Field25"])
|
||||
write_int(f, row["Field26"])
|
||||
write_byte(f, row["Field27"])
|
||||
write_int(f, row["Field28"])
|
||||
write_int(f, row["Field29"])
|
||||
write_byte(f, row["Field30"])
|
||||
write_int(f, row["Field31"])
|
||||
write_int(f, row["Field32"])
|
||||
write_int(f, row["Field33"])
|
||||
write_byte(f, row["Field34"])
|
||||
write_int(f, row["Field35"])
|
||||
write_int(f, row["Field36"])
|
||||
write_int(f, row["Field37"])
|
||||
write_byte(f, row["Field38"])
|
||||
|
||||
if pd.notna(row.get("Field39")):
|
||||
write_int(f, row["Field39"])
|
||||
|
||||
write_byte(f, row["Field40"])
|
||||
|
||||
convert_xlsx_to_dat("player.xlsx", "out_player.dat")
|
||||
24
various-tools/player_name/reader.py
Normal file
24
various-tools/player_name/reader.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# Only slightly modified from sysmes
|
||||
import struct
|
||||
import json
|
||||
|
||||
def unpack_sysmes(filename, output_json):
|
||||
with open(filename, 'rb') as f:
|
||||
num_elements = struct.unpack('>H', f.read(2))[0]
|
||||
|
||||
strings = []
|
||||
for _ in range(num_elements):
|
||||
str_length = struct.unpack('>H', f.read(2))[0]
|
||||
print(str_length)
|
||||
string_data = f.read(str_length)
|
||||
decoded_string = string_data.decode('utf-8', errors='replace')
|
||||
print(decoded_string)
|
||||
strings.append(decoded_string)
|
||||
|
||||
with open(output_json, 'w', encoding='utf-8') as json_file:
|
||||
json.dump(strings, json_file, ensure_ascii=False, indent=4)
|
||||
|
||||
if __name__ == "__main__":
|
||||
filename = "player_name_it.dat"
|
||||
output_json = "player_name_it.json"
|
||||
unpack_sysmes(filename, output_json)
|
||||
1
various-tools/player_name/readme.txt
Normal file
1
various-tools/player_name/readme.txt
Normal file
@@ -0,0 +1 @@
|
||||
Same-ish implementation from sysmes, but without the bodge. Usage is similar too.
|
||||
33
various-tools/player_name/writer.py
Normal file
33
various-tools/player_name/writer.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import struct
|
||||
import json
|
||||
import os
|
||||
|
||||
FILES = {
|
||||
"player_name_en.json": "player_name_en.dat",
|
||||
"player_name_ja.json": "player_name_ja.dat",
|
||||
"player_name_it.json": "player_name_it.dat",
|
||||
"player_name_fr.json": "player_name_fr.dat"
|
||||
}
|
||||
|
||||
def parse_hex_string(hex_string):
|
||||
return bytes.fromhex(hex_string)
|
||||
|
||||
def pack_sysmes(input_json, output_filename):
|
||||
with open(input_json, 'r', encoding='utf-8') as json_file:
|
||||
strings = json.load(json_file)
|
||||
|
||||
with open(output_filename, 'wb') as f:
|
||||
f.write(struct.pack('>H', len(strings)))
|
||||
|
||||
for string in strings:
|
||||
encoded_string = string.encode('utf-8')
|
||||
f.write(struct.pack('>H', len(encoded_string)))
|
||||
f.write(encoded_string)
|
||||
|
||||
if __name__ == "__main__":
|
||||
for json_file, dat_file in FILES.items():
|
||||
if os.path.exists(json_file):
|
||||
print(f"Processing {json_file} → {dat_file}")
|
||||
pack_sysmes(json_file, dat_file)
|
||||
else:
|
||||
print(f"Warning: {json_file} not found, skipped.")
|
||||
Reference in New Issue
Block a user