我是基于Kimi moonshot-v1-8k实现的AI助手,在此博客上负责整理和概括文章
本文是关于如何汉化Escu:de游戏的教程。首先,使用EsucudeTools工具解包.bin和data.bin文件,生成script.db和data.db。接着,将db文件内容导出为json格式,进行翻译,并将翻译后的json写回db。然后,使用hanzi2kanji_table.txt进行汉字替换,生成translation_log.txt。之后,封包db文件回到.001和.bin文件,并替换游戏目录下的文件。最后,使用UniversalInjectorFramework工具和uif_config.json配置文件进行Jis替换,完成汉化。该方法能较好地完成大部分日文文本的汉化,运行良好。作者还提供了一个全自动汉化工具,实现一键运行。
编辑记录
2025-02-17 15:34:00 第一次编辑
- 正文
# 简介
本文章为使 Escu:de 游戏实现汉化的教程,该方案最终会生成使 Escu:de 游戏汉化的文件。
# 使用的工具
# 解包
使用工具:EsucudeTools。
- 取出.bin 的文件
- 使用 EscudeTools 工具,运行命令:
D:\Tool\汉化工具\net8.0\EscudeTools.exe -u D:\Data\python_prj\Escude_Translate\script_source |
其中 script_source 含有 script.bin 文件。运行命令后文件解包在 script_source/output/script 中。
D:\Tool\汉化工具\net8.0\EscudeTools.exe -u D:\Data\python_prj\Escude_Translate\data_source |
其中 data_source 含有 data.bin 文件。运行命令后文件解包在 data_source/output/data 中。
使用 EscudeTools 工具进行解包,方便后续封包,该方式解包会生成附加的包信息文件,能封回原包。
- 处理解包的文件
控制台运行命令:
D:\Tool\汉化工具\net8.0\EscudeTools.exe -v D:\Data\python_prj\Escude_Translate\script_source\output\script -t 1 |
-t
后的参数:
- 类型 0: 完整,这会创建包含所有 .bin 和 .001 信息的 script.db。
- 类型 1: 只导出 001 中的文本,这会创建 script_sm.db,包含所有 .001 信息。
- 类型 2: 只导出 bin 中的文本,这会创建 script_text.db 以及大量 .dat 文件(非文本的其他数据)。
D:\Tool\汉化工具\net8.0\EscudeTools.exe -d D:\Data\python_prj\Escude_Translate\data_source\output\data |
# 处理 db 文件用于翻译
方案为:读取 db 的内容写入到所需格式的 json 文件 -> 翻译 json 文件的文本 -> 把翻译后的 json 写回 db -> 读取 db 内容,进行汉字替换 -> 替换后的 db 作为最终打包文件,依次打包回到.bin
- 读取 db 的内容写入到所需格式的 json 文件
需要的 json 格式为:
[ | |
{ | |
"message": "既然是梦的話ーー那我継続齣浸其中也无妨槇。" | |
}, | |
{ | |
"message": "「那就這樣槇」" | |
}, | |
] |
导出代码为:
def export_db_to_json(db_path: str, json_path: str, task_type: str) -> None: | |
"""导出 SQLite 数据库所有表的数据为 JSON(按列存储)""" | |
print_info("\n>>> 正在从数据库导出数据到 JSON ...") | |
extracted_messages = [] # 用于写入 JSON 的列表,只包含 {"message": 文本} | |
data_list = [] | |
try: | |
conn = sqlite3.connect(db_path) | |
cursor = conn.cursor() | |
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") | |
tables = cursor.fetchall() | |
if task_type == "script": | |
for table_name in tables: | |
table = table_name[0] | |
try: | |
cursor.execute(f"SELECT DataString FROM {table};") | |
rows = cursor.fetchall() | |
for row in rows: | |
if row[0]: | |
data_list.append({"message": row[0]}) | |
except sqlite3.OperationalError as e: | |
print_warning(f"跳过表 {table}: {e}") | |
else: | |
global mapping_json_path | |
mapping_json_path = os.path.join(os.path.dirname(json_path),"mapping.json") | |
mapping = [] # 内部映射列表,记录每条文本所在的表、列、rowid | |
for table_tuple in tables: | |
table_name = table_tuple[0] | |
print(f"处理表:{table_name}") | |
# 获取表中各列名称 | |
cursor.execute(f"PRAGMA table_info('{table_name}');") | |
columns_info = cursor.fetchall() # (cid, name, type, notnull, dflt_value, pk) | |
column_names = [col[1] for col in columns_info] | |
# 查询所有数据,同时获取内建 rowid | |
cursor.execute(f"SELECT rowid, * FROM '{table_name}';") | |
rows = cursor.fetchall() | |
for row in rows: | |
current_rowid = row[0] # rowid | |
# row [1:] 与 column_names 一一对应 | |
for i, cell in enumerate(row[1:]): | |
if cell and isinstance(cell, str) and contains_japanese(cell): | |
extracted_messages.append({"message": cell}) | |
mapping.append({ | |
"table": table_name, | |
"column": column_names[i], | |
"rowid": current_rowid | |
}) | |
conn.close() | |
except Exception as e: | |
print_error(f"导出数据库 {db_path} 时出错: {e}") | |
sys.exit(1) | |
if not data_list and not extracted_messages: | |
print_error("错误:未导出任何数据。") | |
sys.exit(1) | |
if data_list: | |
with open(json_path, "w", encoding="utf-8") as json_file: | |
json.dump(data_list, json_file, ensure_ascii=False, indent=4) | |
print_success(f"✅ 提取完成,JSON 文件已保存到 {json_path}") | |
if extracted_messages: | |
# 写入仅含 "message" 的 JSON 文件,供翻译人员使用 | |
with open(json_path, 'w', encoding='utf-8') as f: | |
json.dump(extracted_messages, f, ensure_ascii=False, indent=4) | |
# 写入映射信息到单独文件(翻译人员无需查看) | |
with open(mapping_json_path, 'w', encoding='utf-8') as f: | |
json.dump(mapping, f, ensure_ascii=False, indent=4) | |
print(f"映射信息已保存到 {mapping_json_path}") | |
print_success(f"✅ 提取完成,JSON 文件已保存到 {json_path}") |
对于 script 的 db 文件,由于其中每个表只有一列,存储的是脚本的文本,因此只需要提取该列存到 json 即可。
对于 data 的 db 文件,选择通过识别日语的文本的方式(函数 contains_japanese (text)),把含有日语的文本内容的表及对应的列提取到 json 中。并用 mapping.json 记录位置,用于后续写回原位置。
# 进行翻译
使用工具 GalTransl,按照要求,编写字典,配置文件,完成翻译。
# 把翻译后的 json 写回 db
先复制原数据库:
def clone_db_structure(src_db: str, dest_db: str, task_type: str) -> None: | |
"""克隆原数据库结构到新数据库""" | |
if os.path.exists(dest_db): | |
os.remove(dest_db) | |
try: | |
if task_type == "script": | |
shutil.copy2(src_db, dest_db) | |
print_success(f"✅ 数据库复制完成:{dest_db}") | |
else: | |
shutil.copyfile(src_db, dest_db) | |
print_success(f"✅ 数据库复制完成:{dest_db}") | |
except Exception as e: | |
print_error(f"复制数据库结构时出错: {e}") | |
sys.exit(1) |
把 json 写回数据库:
def import_json_to_db(db_path: str, json_path: str, task_type: str) -> None: | |
"""将 JSON 文件中的数据写入 SQLite 数据库""" | |
global mapping_json_path | |
if not os.path.exists(json_path): | |
print_error(f"❌ 未找到 JSON 文件: {json_path}") | |
sys.exit(1) | |
try: | |
with open(json_path, "r", encoding="utf-8") as f: | |
data = json.load(f) | |
except Exception as e: | |
print_error(f"读取 JSON 文件时出错: {e}") | |
sys.exit(1) | |
try: | |
conn = sqlite3.connect(db_path) | |
cursor = conn.cursor() | |
if task_type == "script": | |
messages = [item.get("message", "") for item in data if "message" in item] | |
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") | |
tables = [table[0] for table in cursor.fetchall()] | |
index = 0 | |
for table in tables: | |
try: | |
cursor.execute(f"SELECT rowid FROM {table};") | |
rows = cursor.fetchall() | |
for row in rows: | |
if index >= len(messages): | |
print_warning(f"⚠️ 翻译 JSON 数据不足,{table} 表剩余行未填充") | |
break | |
translated_text = messages[index] | |
cursor.execute(f"UPDATE {table} SET DataString = ? WHERE rowid = ?", (translated_text, row[0])) | |
index += 1 | |
except sqlite3.OperationalError as e: | |
print_warning(f"⚠️ 跳过 {table}:{e}") | |
else: | |
with open(mapping_json_path, 'r', encoding='utf-8') as f: | |
mapping = json.load(f) | |
if len(mapping) != len(data): | |
print("错误:翻译后的条目数与映射信息条目数不一致!") | |
return | |
conn = sqlite3.connect(db_path) | |
cursor = conn.cursor() | |
# 根据映射信息逐条更新对应的单元格 | |
for loc, item in zip(mapping, data): | |
table = loc["table"] | |
column = loc["column"] | |
rowid = loc["rowid"] | |
new_text = item["message"] | |
# 使用 rowid 定位行,更新对应列 | |
sql = f'UPDATE "{table}" SET "{column}" = ? WHERE rowid = ?;' | |
cursor.execute(sql, (new_text, rowid)) | |
conn.commit() | |
conn.close() | |
print_success(f"✅ 翻译内容已成功写入 {db_path}") | |
except Exception as e: | |
print_error(f"导入 JSON 数据时出错: {e}") | |
sys.exit(1) |
# 读取 db 内容,进行汉字替换
def load_hanzi_conversion_table(txt_file: str) -> dict: | |
"""读取汉字转换表(假设以 Tab 分隔,格式:简体[TAB]繁体)""" | |
conversion_dict = {} | |
with open(txt_file, 'r', encoding='utf-8') as f: | |
for line in f: | |
parts = line.strip().split("\t") | |
if len(parts) == 2: | |
conversion_dict[parts[0]] = parts[1] | |
return conversion_dict | |
def load_existing_log(log_file: str) -> dict: | |
"""读取已有转换日志(JSON格式)""" | |
if os.path.exists(log_file): | |
with open(log_file, 'r', encoding='utf-8') as f: | |
try: | |
return json.load(f) | |
except json.JSONDecodeError: | |
return {"source_characters": "", "target_characters": ""} | |
return {"source_characters": "", "target_characters": ""} | |
def replace_hanzi(text, conversion_dict: dict, detected_changes: dict) -> str: | |
"""对文本进行汉字替换,并记录替换情况""" | |
if not isinstance(text, str): | |
return text | |
new_text = [] | |
for char in text: | |
if char in conversion_dict: | |
if char not in detected_changes["source_characters"]: | |
detected_changes["source_characters"] += conversion_dict[char] | |
detected_changes["target_characters"] += char | |
new_text.append(conversion_dict[char]) | |
else: | |
new_text.append(char) | |
return "".join(new_text) | |
def process_database_text(db_file: str, conversion_dict: dict, log_file: str) -> None: | |
"""对数据库中所有文本字段进行汉字替换,并更新转换日志(合并新旧记录)""" | |
print_info("\n>>> 正在进行文字替换...") | |
conn = sqlite3.connect(db_file) | |
cursor = conn.cursor() | |
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") | |
tables = [row[0] for row in cursor.fetchall()] | |
detected_changes = {"source_characters": "", "target_characters": ""} | |
for table in tables: | |
cursor.execute(f"PRAGMA table_info({table});") | |
columns = [row[1] for row in cursor.fetchall()] | |
for column in columns: | |
try: | |
cursor.execute(f"SELECT rowid, {column} FROM {table};") | |
rows = cursor.fetchall() | |
for rowid, text in rows: | |
new_text = replace_hanzi(text, conversion_dict, detected_changes) | |
if new_text != text: | |
cursor.execute(f"UPDATE {table} SET {column} = ? WHERE rowid = ?", (new_text, rowid)) | |
conn.commit() | |
except Exception as e: | |
print_warning(f"跳过 {table}.{column}:{e}") | |
conn.close() | |
existing_log = load_existing_log(log_file) | |
existing_source = existing_log.get("source_characters", "") | |
existing_target = existing_log.get("target_characters", "") | |
new_source = detected_changes["source_characters"] | |
new_target = detected_changes["target_characters"] | |
for i, char in enumerate(new_source): | |
if char not in existing_source: | |
existing_source += char | |
existing_target += new_target[i] | |
final_log = { | |
"source_characters": existing_source, | |
"target_characters": existing_target | |
} | |
with open(log_file, 'w', encoding='utf-8') as f: | |
json.dump(final_log, f, ensure_ascii=False, indent=4) | |
print_success("✅ 文字替换完成,日志已保存到 " + log_file) | |
log_file = os.path.join(output_dir, "translation_log.txt") | |
conversion_dict = load_hanzi_conversion_table(hanzi_table_path) | |
process_database_text(new_db_path, conversion_dict, log_file) |
读取 hanzi2kanji_table.txt
文件,这是一个汉字替换表。最终生成的 translation_log.txt
用于 jis
替换。
# 封包
- 封包第一步
把 db 文件封包回.001 文件。
D:\Tool\汉化工具\net8.0\EscudeTools.exe -e D:\Data\python_prj\Escude_Translate\script_source\output\cn_script_sm.db -t 1 |
(这里的参数 - t 要和前面解包的 - t 对应)
- 类型 0: 完整,这会生成 .bin 和 .001 文件。
- 类型 1: 这会生成 .001 文件。
- 类型 2: 这会生成 .bin 文件。
该命令会在 Escude_Translate\script_source\output 下生成 repack 文件夹,里面有.001 文件,接下来需要把这些文件替换掉 \output\script 中对应的.001 文件。
def pack_db_to_script(db_path: str, script_db_dir: str, pack_flag: str, file_type: str, repack_type) -> None: | |
"""调用 EscudeTools 打包数据库文件回脚本文件(会在 repack 文件夹内生成文件,并复制覆盖到 script 文件夹)""" | |
if os.path.basename(db_path).endswith("_staff.db"): | |
script_db_dir = os.path.join(script_db_dir, "script") | |
script_dir = os.path.join(script_db_dir, "staff") | |
else: | |
script_dir = os.path.join(script_db_dir, "script") | |
command = f'{escude_tools_path} {pack_flag} "{db_path}" {file_type} {repack_type}' | |
run_command(command) | |
repack_dir = os.path.join(script_db_dir, "repack") | |
if not os.path.exists(repack_dir): | |
print_error(f"错误:未找到 repack 文件夹:{repack_dir}") | |
sys.exit(1) | |
for root, dirs, files in os.walk(repack_dir): | |
for file in files: | |
source_file = os.path.join(root, file) | |
dest_file = os.path.join(script_dir, file) | |
shutil.copy2(source_file, dest_file) | |
print_info("替换文件:" + dest_file) | |
print_success(f"✅ 数据库文件已打包回脚本,路径在:{script_dir}。") | |
shutil.rmtree(repack_dir) | |
print_info("已移除 repack 文件夹。") |
把 db 文件封包回.bin 文件。
D:\Tool\汉化工具\net8.0\EscudeTools.exe -f D:\Data\python_prj\Escude_Translate\data_source\process_db |
先把翻译后的 db 文件移动到 data_source\process_db
,该命令会在 Escude_Translate\data_source\process_db 下生成.bin 文件,接下来需要把这些文件替换掉 \output\data 中对应的.001 文件。
if task_type == "data": | |
process_db_dir = os.path.join(bin_folder, "process_db") | |
if not os.path.exists(process_db_dir): | |
os.makedirs(process_db_dir) | |
for f in os.listdir(output_dir): | |
if f.endswith(".db") and f.startswith("cn_"): | |
src = os.path.join(output_dir, f) | |
dst = os.path.join(process_db_dir, f) | |
shutil.move(src, dst) | |
print_info(f"移动文件 {f} 到 {process_db_dir}") | |
cmd_pack_db = f'"{escude_tools_path}" {pack_flag} "{process_db_dir}"' | |
run_command(cmd_pack_db) | |
target_subfolder = task_dir | |
for f in os.listdir(process_db_dir): | |
if f.endswith(".bin") and f.startswith("cn_"): | |
old_bin_path = os.path.join(process_db_dir, f) | |
new_bin_name = f[3:] | |
new_bin_path = os.path.join(process_db_dir, new_bin_name) | |
os.rename(old_bin_path, new_bin_path) | |
shutil.move(new_bin_path, os.path.join(target_subfolder, new_bin_name)) | |
print_info("处理并移动文件:" + new_bin_name) | |
print_success("✅ DB 文件打包并替换原文件完成。") | |
if os.path.exists(process_db_dir): | |
shutil.rmtree(process_db_dir) |
- 封包第二步
打包为最终的.bin 文件。
D:\Tool\汉化工具\net8.0\EscudeTools.exe -r D:\Data\python_prj\Escude_Translate\script_source\output |
D:\Tool\汉化工具\net8.0\EscudeTools.exe -r D:\Data\python_prj\Escude_Translate\data_source\output |
最终得到的 script.bin
, data.bin
, translation_log.txt
即为汉化游戏所需的最终的文件。
# 最终汉化
- 替换游戏目录下对应的文件
- 使用
UniversalInjectorFramework
工具进行Jis
替换- 配置
uif_config.json
文件,按照translation_log.txt
配置"source_characters"
和"target_characters"
。 - 把该工具目录下的 dll 放到游戏目录,测试是否运行成功,一般先试用
winmm.dll
,运行成功则完成汉化。
- 配置
# 总结
通过该方式汉化,能较好地完成 Escu:de
引擎游戏内大部分日文文本的汉化,运行良好。
# 全自动汉化工具
整合以上流程,写了一个工具实现自动化,一键运行。
color_trans2.py
:
#!/usr/bin/env python | |
# -*- coding: utf-8 -*- | |
import os | |
import sys | |
import subprocess | |
import shutil | |
import sqlite3 | |
import json | |
import time | |
from glob import glob | |
import re | |
try: | |
from pick import pick | |
except ImportError: | |
print("请安装 pick 模块:pip install pick") | |
sys.exit(1) | |
try: | |
from colorama import init, Fore, Style | |
init(autoreset=True) | |
except ImportError: | |
print("请安装 colorama 模块:pip install colorama") | |
sys.exit(1) | |
# =============================== | |
# Console 输出辅助函数 | |
# =============================== | |
def print_info(message: str) -> None: | |
print(Fore.CYAN + message + Style.RESET_ALL) | |
def print_success(message: str) -> None: | |
print(Fore.GREEN + message + Style.RESET_ALL) | |
def print_error(message: str) -> None: | |
print(Fore.RED + message + Style.RESET_ALL) | |
def print_warning(message: str) -> None: | |
print(Fore.YELLOW + message + Style.RESET_ALL) | |
# =============================== | |
# 公共辅助函数(共用部分) | |
# =============================== | |
def run_command(command: str) -> subprocess.CompletedProcess: | |
"""执行命令并打印输出,失败则退出""" | |
print_info(f"\n>>> 执行命令: {command}") | |
result = subprocess.run(command, shell=True) | |
if result.returncode != 0: | |
print_error(f"错误:命令执行失败!\n命令: {command}") | |
sys.exit(1) | |
return result | |
def clear_directory(dir_path: str) -> None: | |
"""清空目录下所有文件,目录不存在则创建""" | |
if not os.path.exists(dir_path): | |
os.makedirs(dir_path) | |
print_info(f"目录不存在,已创建:{dir_path}") | |
return | |
for filename in os.listdir(dir_path): | |
file_path = os.path.join(dir_path, filename) | |
try: | |
if os.path.isfile(file_path) or os.path.islink(file_path): | |
os.unlink(file_path) | |
elif os.path.isdir(file_path): | |
shutil.rmtree(file_path) | |
except Exception as e: | |
print_error(f"删除 {file_path} 时出错: {e}") | |
print_info(f"目录 {dir_path} 已清空。") | |
def contains_japanese(text): | |
""" | |
判断文本中是否包含日语字符(平假名、片假名或常用汉字)。 | |
""" | |
if not isinstance(text, str): | |
return False | |
pattern = re.compile(r'[\u3040-\u30FF\u4E00-\u9FFF]') | |
return pattern.search(text) is not None | |
def export_db_to_json(db_path: str, json_path: str, task_type: str) -> None: | |
"""导出 SQLite 数据库所有表的数据为 JSON(按列存储)""" | |
print_info("\n>>> 正在从数据库导出数据到 JSON ...") | |
extracted_messages = [] # 用于写入 JSON 的列表,只包含 {"message": 文本} | |
data_list = [] | |
try: | |
conn = sqlite3.connect(db_path) | |
cursor = conn.cursor() | |
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") | |
tables = cursor.fetchall() | |
if task_type == "script": | |
for table_name in tables: | |
table = table_name[0] | |
try: | |
cursor.execute(f"SELECT DataString FROM {table};") | |
rows = cursor.fetchall() | |
for row in rows: | |
if row[0]: | |
data_list.append({"message": row[0]}) | |
except sqlite3.OperationalError as e: | |
print_warning(f"跳过表 {table}: {e}") | |
else: | |
global mapping_json_path | |
mapping_json_path = os.path.join(os.path.dirname(json_path),"mapping.json") | |
mapping = [] # 内部映射列表,记录每条文本所在的表、列、rowid | |
for table_tuple in tables: | |
table_name = table_tuple[0] | |
print(f"处理表:{table_name}") | |
# 获取表中各列名称 | |
cursor.execute(f"PRAGMA table_info('{table_name}');") | |
columns_info = cursor.fetchall() # (cid, name, type, notnull, dflt_value, pk) | |
column_names = [col[1] for col in columns_info] | |
# 查询所有数据,同时获取内建 rowid | |
cursor.execute(f"SELECT rowid, * FROM '{table_name}';") | |
rows = cursor.fetchall() | |
for row in rows: | |
current_rowid = row[0] # rowid | |
# row [1:] 与 column_names 一一对应 | |
for i, cell in enumerate(row[1:]): | |
if cell and isinstance(cell, str) and contains_japanese(cell): | |
extracted_messages.append({"message": cell}) | |
mapping.append({ | |
"table": table_name, | |
"column": column_names[i], | |
"rowid": current_rowid | |
}) | |
conn.close() | |
except Exception as e: | |
print_error(f"导出数据库 {db_path} 时出错: {e}") | |
sys.exit(1) | |
if not data_list and not extracted_messages: | |
print_error("错误:未导出任何数据。") | |
sys.exit(1) | |
if data_list: | |
with open(json_path, "w", encoding="utf-8") as json_file: | |
json.dump(data_list, json_file, ensure_ascii=False, indent=4) | |
print_success(f"✅ 提取完成,JSON 文件已保存到 {json_path}") | |
if extracted_messages: | |
# 写入仅含 "message" 的 JSON 文件,供翻译人员使用 | |
with open(json_path, 'w', encoding='utf-8') as f: | |
json.dump(extracted_messages, f, ensure_ascii=False, indent=4) | |
# 写入映射信息到单独文件(翻译人员无需查看) | |
with open(mapping_json_path, 'w', encoding='utf-8') as f: | |
json.dump(mapping, f, ensure_ascii=False, indent=4) | |
print(f"映射信息已保存到 {mapping_json_path}") | |
print_success(f"✅ 提取完成,JSON 文件已保存到 {json_path}") | |
def clone_db_structure(src_db: str, dest_db: str, task_type: str) -> None: | |
"""克隆原数据库结构到新数据库""" | |
if os.path.exists(dest_db): | |
os.remove(dest_db) | |
try: | |
if task_type == "script": | |
shutil.copy2(src_db, dest_db) | |
print_success(f"✅ 数据库复制完成:{dest_db}") | |
else: | |
shutil.copyfile(src_db, dest_db) | |
print_success(f"✅ 数据库复制完成:{dest_db}") | |
except Exception as e: | |
print_error(f"复制数据库结构时出错: {e}") | |
sys.exit(1) | |
def import_json_to_db(db_path: str, json_path: str, task_type: str) -> None: | |
"""将 JSON 文件中的数据写入 SQLite 数据库""" | |
global mapping_json_path | |
if not os.path.exists(json_path): | |
print_error(f"❌ 未找到 JSON 文件: {json_path}") | |
sys.exit(1) | |
try: | |
with open(json_path, "r", encoding="utf-8") as f: | |
data = json.load(f) | |
except Exception as e: | |
print_error(f"读取 JSON 文件时出错: {e}") | |
sys.exit(1) | |
try: | |
conn = sqlite3.connect(db_path) | |
cursor = conn.cursor() | |
if task_type == "script": | |
messages = [item.get("message", "") for item in data if "message" in item] | |
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") | |
tables = [table[0] for table in cursor.fetchall()] | |
index = 0 | |
for table in tables: | |
try: | |
cursor.execute(f"SELECT rowid FROM {table};") | |
rows = cursor.fetchall() | |
for row in rows: | |
if index >= len(messages): | |
print_warning(f"⚠️ 翻译 JSON 数据不足,{table} 表剩余行未填充") | |
break | |
translated_text = messages[index] | |
cursor.execute(f"UPDATE {table} SET DataString = ? WHERE rowid = ?", (translated_text, row[0])) | |
index += 1 | |
except sqlite3.OperationalError as e: | |
print_warning(f"⚠️ 跳过 {table}:{e}") | |
else: | |
with open(mapping_json_path, 'r', encoding='utf-8') as f: | |
mapping = json.load(f) | |
if len(mapping) != len(data): | |
print("错误:翻译后的条目数与映射信息条目数不一致!") | |
return | |
conn = sqlite3.connect(db_path) | |
cursor = conn.cursor() | |
# 根据映射信息逐条更新对应的单元格 | |
for loc, item in zip(mapping, data): | |
table = loc["table"] | |
column = loc["column"] | |
rowid = loc["rowid"] | |
new_text = item["message"] | |
# 使用 rowid 定位行,更新对应列 | |
sql = f'UPDATE "{table}" SET "{column}" = ? WHERE rowid = ?;' | |
cursor.execute(sql, (new_text, rowid)) | |
conn.commit() | |
conn.close() | |
print_success(f"✅ 翻译内容已成功写入 {db_path}") | |
except Exception as e: | |
print_error(f"导入 JSON 数据时出错: {e}") | |
sys.exit(1) | |
def cleanup_intermediate_files(folder_path: str, final_files: list) -> None: | |
"""清理指定目录下除 final_files 外的所有中间文件""" | |
print_info("\n>>> 正在清理中间文件...") | |
abs_final_files = [os.path.abspath(f) for f in final_files] | |
for item in os.listdir(folder_path): | |
item_path = os.path.join(folder_path, item) | |
if os.path.abspath(item_path) in abs_final_files: | |
continue | |
try: | |
if os.path.isfile(item_path) or os.path.islink(item_path): | |
os.remove(item_path) | |
elif os.path.isdir(item_path): | |
shutil.rmtree(item_path) | |
except Exception as e: | |
print_error(f"删除 {item_path} 时出错: {e}") | |
print_info("中间文件已清理。") | |
def load_hanzi_conversion_table(txt_file: str) -> dict: | |
"""读取汉字转换表(假设以 Tab 分隔,格式:简体[TAB]繁体)""" | |
conversion_dict = {} | |
with open(txt_file, 'r', encoding='utf-8') as f: | |
for line in f: | |
parts = line.strip().split("\t") | |
if len(parts) == 2: | |
conversion_dict[parts[0]] = parts[1] | |
return conversion_dict | |
def load_existing_log(log_file: str) -> dict: | |
"""读取已有转换日志(JSON格式)""" | |
if os.path.exists(log_file): | |
with open(log_file, 'r', encoding='utf-8') as f: | |
try: | |
return json.load(f) | |
except json.JSONDecodeError: | |
return {"source_characters": "", "target_characters": ""} | |
return {"source_characters": "", "target_characters": ""} | |
def replace_hanzi(text, conversion_dict: dict, detected_changes: dict) -> str: | |
"""对文本进行汉字替换,并记录替换情况""" | |
if not isinstance(text, str): | |
return text | |
new_text = [] | |
for char in text: | |
if char in conversion_dict: | |
if char not in detected_changes["source_characters"]: | |
detected_changes["source_characters"] += conversion_dict[char] | |
detected_changes["target_characters"] += char | |
new_text.append(conversion_dict[char]) | |
else: | |
new_text.append(char) | |
return "".join(new_text) | |
def process_database_text(db_file: str, conversion_dict: dict, log_file: str) -> None: | |
"""对数据库中所有文本字段进行汉字替换,并更新转换日志(合并新旧记录)""" | |
print_info("\n>>> 正在进行文字替换...") | |
conn = sqlite3.connect(db_file) | |
cursor = conn.cursor() | |
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") | |
tables = [row[0] for row in cursor.fetchall()] | |
detected_changes = {"source_characters": "", "target_characters": ""} | |
for table in tables: | |
cursor.execute(f"PRAGMA table_info({table});") | |
columns = [row[1] for row in cursor.fetchall()] | |
for column in columns: | |
try: | |
cursor.execute(f"SELECT rowid, {column} FROM {table};") | |
rows = cursor.fetchall() | |
for rowid, text in rows: | |
new_text = replace_hanzi(text, conversion_dict, detected_changes) | |
if new_text != text: | |
cursor.execute(f"UPDATE {table} SET {column} = ? WHERE rowid = ?", (new_text, rowid)) | |
conn.commit() | |
except Exception as e: | |
print_warning(f"跳过 {table}.{column}:{e}") | |
conn.close() | |
existing_log = load_existing_log(log_file) | |
existing_source = existing_log.get("source_characters", "") | |
existing_target = existing_log.get("target_characters", "") | |
new_source = detected_changes["source_characters"] | |
new_target = detected_changes["target_characters"] | |
for i, char in enumerate(new_source): | |
if char not in existing_source: | |
existing_source += char | |
existing_target += new_target[i] | |
final_log = { | |
"source_characters": existing_source, | |
"target_characters": existing_target | |
} | |
with open(log_file, 'w', encoding='utf-8') as f: | |
json.dump(final_log, f, ensure_ascii=False, indent=4) | |
print_success("✅ 文字替换完成,日志已保存到 " + log_file) | |
def pack_db_to_script(db_path: str, script_db_dir: str, pack_flag: str, file_type: str, repack_type) -> None: | |
"""调用 EscudeTools 打包数据库文件回脚本文件(会在 repack 文件夹内生成文件,并复制覆盖到 script 文件夹)""" | |
if os.path.basename(db_path).endswith("_staff.db"): | |
script_db_dir = os.path.join(script_db_dir, "script") | |
script_dir = os.path.join(script_db_dir, "staff") | |
else: | |
script_dir = os.path.join(script_db_dir, "script") | |
command = f'{escude_tools_path} {pack_flag} "{db_path}" {file_type} {repack_type}' | |
run_command(command) | |
repack_dir = os.path.join(script_db_dir, "repack") | |
if not os.path.exists(repack_dir): | |
print_error(f"错误:未找到 repack 文件夹:{repack_dir}") | |
sys.exit(1) | |
for root, dirs, files in os.walk(repack_dir): | |
for file in files: | |
source_file = os.path.join(root, file) | |
dest_file = os.path.join(script_dir, file) | |
shutil.copy2(source_file, dest_file) | |
print_info("替换文件:" + dest_file) | |
print_success(f"✅ 数据库文件已打包回脚本,路径在:{script_dir}。") | |
shutil.rmtree(repack_dir) | |
print_info("已移除 repack 文件夹。") | |
def pack_script_to_bin(output_dir: str, escude_tools_path: str) -> None: | |
"""调用 EscudeTools 打包脚本文件回 .bin 文件""" | |
command = f'{escude_tools_path} -r "{output_dir}"' | |
run_command(command) | |
print_success("✅ 打包生成最终的 .bin 文件。") | |
def merge_translation_logs(log_list: list, output_log: str) -> None: | |
"""合并多个转换日志,去重后写入输出文件""" | |
final_source = "" | |
final_target = "" | |
for log_file in log_list: | |
if os.path.exists(log_file): | |
log = load_existing_log(log_file) | |
for char in log.get("source_characters", ""): | |
if char not in final_source: | |
final_source += char | |
for char in log.get("target_characters", ""): | |
if char not in final_target: | |
final_target += char | |
final_log = { | |
"source_characters": final_source, | |
"target_characters": final_target | |
} | |
with open(output_log, 'w', encoding='utf-8') as f: | |
json.dump(final_log, f, ensure_ascii=False, indent=4) | |
print_success("✅ 合并转换日志完成,保存到 " + output_log) | |
# =============================== | |
# 针对不同任务的处理函数 | |
# =============================== | |
def process_bin_task(task_type: str) -> tuple: | |
""" | |
处理任务: | |
task_type: "script" 或 "data" | |
返回最终生成的 .bin 文件路径及该任务生成的转换日志路径 | |
""" | |
# 设置任务相关参数 | |
if task_type == "script": | |
subfolder = "script" # 子文件夹名称(output 下) | |
unpack_flag = "-v" # 解包到数据库命令参数(script 任务用 -v) | |
pack_flag = "-e" | |
final_bin_name = "script.bin" | |
file_type = "-t" | |
file_flag = 1 | |
translation_required = True # script 任务需要调用翻译工具 | |
elif task_type == "data": | |
subfolder = "data" # 数据任务使用 data 子文件夹 | |
unpack_flag = "-d" # 数据任务用 -d | |
pack_flag = "-f" | |
final_bin_name = "data.bin" | |
translation_required = True | |
else: | |
print_error("无效的任务类型!") | |
sys.exit(1) | |
print("\n" + Fore.MAGENTA + f"开始处理 {task_type} 任务..." + Style.RESET_ALL) | |
global escude_tools_path, runed, run_galtransl_path, hanzi_table_path, replaced, is_set_galtransl, project_path | |
# 询问是否重新指定 EscudeTools.exe 路径(script 任务额外询问 run_GalTransl.exe 路径) | |
if not runed: | |
choice = input(Fore.CYAN + "是否需要重新指定 EscudeTools.exe 路径?(输入 yes 重新指定,否则输入 no): " + Style.RESET_ALL).strip().lower() | |
if choice == "yes": | |
escude_tools_path = input(Fore.CYAN + "请输入 EscudeTools.exe 的路径: " + Style.RESET_ALL).strip().strip('"') | |
else: | |
escude_tools_path = r"D:\Tool\汉化工具\net8.0\EscudeTools.exe" | |
choice = input(Fore.CYAN + "是否需要重新指定 run_GalTransl.exe 路径?(输入 yes 重新指定,否则输入 no): " + Style.RESET_ALL).strip().lower() | |
if choice == "yes": | |
run_galtransl_path = input(Fore.CYAN + "请输入 run_GalTransl.exe 的路径: " + Style.RESET_ALL).strip().strip('"') | |
else: | |
run_galtransl_path = r"D:\Tool\GalTransl\GalTransl-v5.9.1-win\run_GalTransl.exe" | |
# 询问 bin 文件所在文件夹 | |
bin_folder = input(Fore.CYAN + f"\n请输入 {task_type}.bin 文件所在的文件夹路径: " + Style.RESET_ALL).strip().strip('"') | |
if not os.path.exists(bin_folder): | |
print_error("错误:指定的文件夹不存在!") | |
sys.exit(1) | |
# 删除该文件夹下的旧内容(子文件夹) | |
for item in os.listdir(bin_folder): | |
item_path = os.path.join(bin_folder, item) | |
if os.path.isdir(item_path): | |
shutil.rmtree(item_path) | |
print_info("已删除旧内容:" + item_path) | |
# 第一步:解包 .bin 文件(通用 -u 命令) | |
cmd_unpack = f'"{escude_tools_path}" -u "{bin_folder}"' | |
run_command(cmd_unpack) | |
# 第二步:进一步解包到数据库 | |
output_dir = os.path.join(bin_folder, "output") | |
task_dir = os.path.join(output_dir, subfolder) | |
if not os.path.exists(task_dir): | |
print_error(f"错误:解包后的 {subfolder} 文件夹不存在!") | |
sys.exit(1) | |
if task_type == "script": | |
print_info("\n>>> 正在解包output\script文件...") | |
cmd_unpack_db = f'"{escude_tools_path}" {unpack_flag} "{task_dir}" {file_type} {file_flag}' | |
run_command(cmd_unpack_db) | |
print_info("\n>>> 正在解包output\script\staff文件...") | |
staff_folder = os.path.join(task_dir, "staff") | |
cmd_unpack_db = f'"{escude_tools_path}" {unpack_flag} "{staff_folder}" {file_type} {file_flag}' | |
run_command(cmd_unpack_db) | |
elif task_type == "data": | |
print_info("\n>>> 正在解包output\data文件...") | |
cmd_unpack_db = f'"{escude_tools_path}" {unpack_flag} "{task_dir}"' | |
run_command(cmd_unpack_db) | |
# 第三步:处理解包后的数据库 | |
db_pattern = os.path.join(output_dir, "*.db") | |
db_files = [os.path.basename(f) for f in glob(db_pattern) if not os.path.basename(f).startswith("cn_")] | |
if task_type == "script": | |
script_dir = os.path.join(output_dir, "script") | |
db_files += [ | |
os.path.join("script", new) | |
for f in glob(os.path.join(script_dir, "*.db")) | |
if not os.path.basename(f).startswith("cn_") | |
and (new := os.path.splitext(os.path.basename(f))[0] + "_staff.db") | |
and (os.rename(f, os.path.join(script_dir, new)) or True) | |
] | |
if not db_files: | |
print_error("未找到待处理的 .db 文件。") | |
sys.exit(1) | |
for db_file in db_files: | |
print_info("\n------------------------------------------") | |
title = " ".join("请选择要处理的.")+"db"+" ".join("文件:") | |
option, _ = pick(db_files, title) | |
option = os.path.basename(option) | |
if option.endswith("_staff.db"): | |
selected_db = os.path.join(output_dir, "script", option) | |
json_path = os.path.join(output_dir, "script", os.path.splitext(option)[0] + ".json") | |
else: | |
selected_db = os.path.join(output_dir, option) | |
json_path = os.path.join(output_dir, os.path.splitext(option)[0] + ".json") | |
print_info(f"\n>>> 正在导出 {option} 到 JSON ...") | |
export_db_to_json(selected_db, json_path, task_type) | |
translation_required= True if (input(Fore.CYAN + "是否需要使用翻译工具进行翻译?(输入 yes 进行翻译,否则输入 no): " + Style.RESET_ALL).strip().lower() == "yes") else False | |
if translation_required: | |
if not is_set_galtransl: | |
project_path = input(Fore.CYAN + "请输入项目文件夹路径(翻译工具调用所需): " + Style.RESET_ALL).strip().strip('"') | |
is_set_galtransl = True | |
if not os.path.exists(project_path): | |
print_error("错误:项目文件夹不存在!") | |
sys.exit(1) | |
gt_output = os.path.join(project_path, "gt_output") | |
transl_cache = os.path.join(project_path, "transl_cache") | |
gt_input = os.path.join(project_path, "gt_input") | |
clear_directory(gt_output) | |
clear_directory(transl_cache) | |
clear_directory(gt_input) | |
dest_json_path = os.path.join(gt_input, os.path.basename(json_path)) | |
shutil.copy2(json_path, dest_json_path) | |
print_info("已将 JSON 文件复制到:" + dest_json_path) | |
cmd_translate = f'"{run_galtransl_path}" "{project_path}"' | |
galtransl_dir = os.path.dirname(run_galtransl_path) | |
proc = subprocess.Popen(cmd_translate, shell=True, cwd=galtransl_dir) | |
try: | |
print_info("\n等待翻译任务完成并生成翻译 JSON 文件...") | |
proc.wait() | |
except KeyboardInterrupt: | |
proc.terminate() | |
proc.wait() | |
translated_json_gt = os.path.join(gt_output, os.path.basename(json_path)) | |
print_info("\n翻译任务完成,检测是否生成翻译 JSON 文件...") | |
timeout = 300 | |
waited = 0 | |
while not os.path.exists(translated_json_gt) and waited < timeout: | |
time.sleep(5) | |
waited += 5 | |
if not os.path.exists(translated_json_gt): | |
print_error("错误:超时未生成翻译 JSON 文件!") | |
sys.exit(1) | |
use_translated = input(Fore.CYAN + "是否使用 gt_output 中的翻译结果?(输入 yes 使用,否则输入 no): " + Style.RESET_ALL).strip().lower() | |
if use_translated == "yes": | |
final_translated_json = translated_json_gt | |
else: | |
final_translated_json = dest_json_path | |
print_info("最终使用的翻译 JSON 文件:" + final_translated_json) | |
else: | |
final_translated_json = input(Fore.CYAN + "请输入翻译后的 JSON 文件路径: " + Style.RESET_ALL).strip().strip('"') | |
if not os.path.exists(final_translated_json): | |
print_error("错误:指定的翻译后 JSON 文件不存在!") | |
sys.exit(1) | |
new_db_name = "cn_" + option | |
if option.endswith("_staff.db"): | |
new_db_path = os.path.join(output_dir, "script", new_db_name) | |
else: | |
new_db_path = os.path.join(output_dir, new_db_name) | |
clone_db_structure(selected_db, new_db_path, task_type) | |
import_json_to_db(new_db_path, final_translated_json, task_type) | |
print_success(f"✅ 已完成 {option} 的翻译数据写入。") | |
print_info(">>> 开始进行文字替换") | |
if not replaced: | |
hanzi_table_path = input(Fore.CYAN + "请输入汉字替换表文件路径: " + Style.RESET_ALL).strip().strip('"') | |
replaced = True | |
if not os.path.exists(hanzi_table_path): | |
print_error("错误:汉字替换表不存在!") | |
sys.exit(1) | |
log_file = os.path.join(output_dir, "translation_log.txt") | |
conversion_dict = load_hanzi_conversion_table(hanzi_table_path) | |
process_database_text(new_db_path, conversion_dict, log_file) | |
print_success(f"✅ 已完成 {option} 的文字替换。") | |
if task_type == "script": | |
print_info(f"\n>>> 开始生成翻译后的脚本文件到{os.path.dirname(new_db_path)}...") | |
pack_db_to_script(new_db_path, output_dir, pack_flag, file_type, file_flag) | |
cont = input(Fore.CYAN + "是否需要处理下一个 .db 文件?(输入 yes 继续,否则输入 no): " + Style.RESET_ALL).strip().lower() | |
if cont != "yes": | |
break | |
if task_type == "data": | |
process_db_dir = os.path.join(bin_folder, "process_db") | |
if not os.path.exists(process_db_dir): | |
os.makedirs(process_db_dir) | |
for f in os.listdir(output_dir): | |
if f.endswith(".db") and f.startswith("cn_"): | |
src = os.path.join(output_dir, f) | |
dst = os.path.join(process_db_dir, f) | |
shutil.move(src, dst) | |
print_info(f"移动文件 {f} 到 {process_db_dir}") | |
cmd_pack_db = f'"{escude_tools_path}" {pack_flag} "{process_db_dir}"' | |
run_command(cmd_pack_db) | |
target_subfolder = task_dir | |
for f in os.listdir(process_db_dir): | |
if f.endswith(".bin") and f.startswith("cn_"): | |
old_bin_path = os.path.join(process_db_dir, f) | |
new_bin_name = f[3:] | |
new_bin_path = os.path.join(process_db_dir, new_bin_name) | |
os.rename(old_bin_path, new_bin_path) | |
shutil.move(new_bin_path, os.path.join(target_subfolder, new_bin_name)) | |
print_info("处理并移动文件:" + new_bin_name) | |
print_success("✅ DB 文件打包并替换原文件完成。") | |
if os.path.exists(process_db_dir): | |
shutil.rmtree(process_db_dir) | |
cmd_pack_script = f'"{escude_tools_path}" -r "{output_dir}"' | |
run_command(cmd_pack_script) | |
final_bin = os.path.join(output_dir, final_bin_name) | |
if os.path.exists(final_bin): | |
print_success(f"✅ {task_type.capitalize()}任务打包完成,生成文件: {final_bin}") | |
else: | |
print_error(f"错误:未生成最终 {final_bin_name} 文件!") | |
sys.exit(1) | |
return final_bin, log_file | |
# =============================== | |
# 主函数:整合两个任务 | |
# =============================== | |
def main() -> None: | |
print(Fore.MAGENTA + "============================================" + Style.RESET_ALL) | |
print(Fore.MAGENTA + " ESCU:DE引擎全自动化汉化任务开始" + Style.RESET_ALL) | |
print(Fore.MAGENTA + "============================================\n" + Style.RESET_ALL) | |
print_info("请选择要执行的任务:") | |
print("1) 脚本任务(处理 script.bin)") | |
print("2) 数据任务(处理 data.bin)") | |
print("3) 两者都执行") | |
choice = input(Fore.CYAN + "请输入选项(1/2/3): " + Style.RESET_ALL).strip() | |
final_script_bin = None | |
final_data_bin = None | |
log_files = [] | |
global runed, hanzi_table_path, replaced, is_set_galtransl, project_path | |
runed = False | |
hanzi_table_path = None | |
replaced = False | |
is_set_galtransl = False | |
project_path = None | |
if choice == "1": | |
final_script_bin, log1 = process_bin_task("script") | |
log_files.append(log1) | |
elif choice == "2": | |
final_data_bin, log2 = process_bin_task("data") | |
log_files.append(log2) | |
elif choice == "3": | |
final_script_bin, log1 = process_bin_task("script") | |
runed = True | |
final_data_bin, log2 = process_bin_task("data") | |
log_files.extend([log1, log2]) | |
else: | |
print_error("无效选项!") | |
sys.exit(1) | |
final_log = None | |
if len(log_files) > 1: | |
output_dir = os.path.join(os.getcwd(), "output") | |
if not os.path.exists(output_dir): | |
os.makedirs(output_dir) | |
merged_log = os.path.join(output_dir, "translation_log.txt") | |
merge_translation_logs(log_files, merged_log) | |
final_log = merged_log | |
for file_path in [final_script_bin, final_data_bin]: | |
if file_path and os.path.dirname(file_path) != output_dir: | |
shutil.copy(file_path, output_dir) | |
print_success(f"\n已将最终文件{os.path.basename(final_script_bin),os.path.basename(final_data_bin),os.path.basename(final_log)}复制到 output 目录。") | |
else: | |
final_log = log_files[0] | |
keep_choice = input(Fore.CYAN + "\n任务完成。是否保留中间文件?(输入 yes 保留,否则输入 no): " + Style.RESET_ALL).strip().lower() | |
if keep_choice != "yes": | |
if len(log_files) > 1: | |
output_dir = os.path.abspath(os.path.join(os.path.dirname(final_script_bin), os.pardir)) | |
savefile = os.path.join(output_dir, os.path.basename(final_script_bin)) | |
cleanup_intermediate_files(output_dir, [savefile]) | |
output_dir = os.path.abspath(os.path.join(os.path.dirname(final_data_bin), os.pardir)) | |
savefile = os.path.join(output_dir, os.path.basename(final_data_bin)) | |
cleanup_intermediate_files(output_dir, [savefile]) | |
else: | |
final_file = final_script_bin if final_script_bin else final_data_bin | |
if not final_file: | |
print_error("错误:没有生成任何最终.bin文件。") | |
sys.exit(1) | |
output_dir = os.path.dirname(final_file) | |
cleanup_intermediate_files(output_dir, [final_file, final_log]) | |
else: | |
print_info("中间文件已保留。") | |
print(Fore.MAGENTA + "\n============================================" + Style.RESET_ALL) | |
print(Fore.MAGENTA + " 全自动化任务已完成!" + Style.RESET_ALL) | |
if final_script_bin: | |
print_success("最终生成的 script.bin:" + os.path.join(os.path.dirname(final_log), os.path.basename(final_script_bin))) | |
if final_data_bin: | |
print_success("最终生成的 data.bin:" + os.path.join(os.path.dirname(final_log), os.path.basename(final_data_bin))) | |
print_success("最终生成的 translation_log.txt:" + final_log) | |
print(Fore.MAGENTA + "============================================\n" + Style.RESET_ALL) | |
if __name__ == "__main__": | |
main() |
run_color_trans.bat
:
@echo off | |
REM 调用整合后的 Python 脚本执行全自动化任务 | |
python color_trans2.py | |
pause |
打包为 exe:
python -m PyInstaller --onefile color_trans2.py |