Compare commits

..

33 Commits

Author SHA1 Message Date
99193a198b Add json2xdb 2024-01-24 05:15:00 -05:00
CakeLancelot
249af1d2d3 Ensure all objects are deleted after export + use env var for user directory 2024-01-19 14:30:17 -06:00
gsemaj
c7b4182a4b Export terrain meshes as FBX instead of OBJ 2024-01-19 10:42:49 -08:00
FinnHornhoover
b70816fa64 Non-updating rank table fix and tiebreaker logic (#1) 2023-12-25 21:37:22 +01:00
9deba1956f [rankendpoint] Add invocation instructions 2023-12-24 21:35:35 +01:00
1a3a530c7f [rankendpoint] Get DB path and endpoint route from env vars
Also reverted the use of main() that made it incompatible with uwsgi and
flask run.
2023-12-24 20:57:05 +01:00
f62f3af483 Revert "Autorun Flask app"
This reverts commit e0e0b3c5e3.

Flask applications are not supposed to be run this way. We run the
endpoints with uwsgi in prod or with the flask program for debugging.

https://flask.palletsprojects.com/en/3.0.x/api/#flask.Flask.run
2023-12-24 03:36:33 +01:00
gsemaj
f02c960497 Use same rank for tied scores 2023-12-22 22:14:05 -08:00
gsemaj
e0e0b3c5e3 Autorun Flask app 2023-12-22 22:04:05 -08:00
gsemaj
615bd3d9a3 Add ranked endpoint server 2023-12-22 21:48:18 -08:00
gsemaj
4d6490933d Add option for score capping 2023-12-19 13:21:37 -05:00
gsemaj
073666e6f4 Add OG racing score conversion script 2023-10-11 18:54:35 -04:00
gsemaj
7af28d8e70 fix submesh overlap 2023-09-09 17:45:47 -07:00
gsemaj
3bb3df7a2d Split terrain into chunks of chunk_size 2023-09-09 16:40:22 -07:00
gsemaj
f6dc2d690f code to show all selected faces 2023-09-09 11:24:47 -07:00
gsemaj
af2988e062 fix error 2023-09-09 10:43:43 -07:00
gsemaj
03a3e21532 Fix UVs 2023-09-09 10:28:48 -07:00
gsemaj
20d5f6231c Export as OBJ 2023-07-20 14:27:39 -04:00
gsemaj
131297552e Fix normals 2023-07-20 14:27:24 -04:00
gsemaj
ef8ae1fb6e Fix vertex positioning and outfile name 2023-07-20 14:04:00 -04:00
gsemaj
3aabb35f33 Add terrain mesh extractor 2023-07-13 16:18:09 -04:00
gsemaj
7b750db9f9 Update README 2022-08-11 15:56:07 -04:00
gsemaj
aa564926a0 Fix CameraPos translation 2022-08-11 15:46:58 -04:00
gsemaj
2496f04987 Add semantic to f2a struct 2022-08-11 13:21:33 -04:00
gsemaj
916857edc3 Unity 3 fixes 2022-08-11 12:20:51 -04:00
gsemaj
3791e889c8 Add support for multiple subprograms 2022-08-11 11:53:38 -04:00
gsemaj
d0e67d55c9 Standalone support for basic fragment shaders 2022-08-11 04:06:03 -04:00
gsemaj
869d5b1976 Add support for SetTexture 2022-08-10 21:03:56 -04:00
gsemaj
8725dd1e4e Entry points and arg fix 2022-08-10 21:01:51 -04:00
gsemaj
8fbe59e5a1 Fix rcp bug 2022-08-10 20:09:59 -04:00
gsemaj
a53fb21621 Add .gitignore 2022-08-10 20:09:22 -04:00
gsemaj
9dd5db86eb Update README 2022-08-10 20:06:02 -04:00
gsemaj
4ace5f065f Add dx2cg 2022-08-10 19:47:13 -04:00
15 changed files with 1446 additions and 0 deletions

136
db_migration/ogracing.py Normal file
View File

@@ -0,0 +1,136 @@
#!/usr/bin/env python3
# OG racing score conversion script
#
# This script is meant to update racing scoreboards from Retro-style scores
# to OG-accurate scores. Be careful to only run this once!
#
# The script will create a backup of your DB, as well as a log file
# containing a record of all the changes that were made. Make sure to
# preserve this log file in case you need to reference it in the future.
#
# If something goes wrong with the first invocation, you'll need to move the
# DB backup and log files out of the way before the script can be re-run.
#
# If all goes well, you should see different, OG-true scores in IZ scoreboards.
#
# Do not hesitate to ask the OF developers for assistance if necessary.
import sys
import os.path
import shutil
import logging
import json
from math import exp
import sqlite3
LOGFILE = 'ogracing.log'
DRY_RUN = False # set to True if testing the script
CAP_SCORES = True # set to False to disable capping scores to the IZ maximum
class EpData:
max_score = 0
pod_factor = 0
max_pods = 0
max_time = 0
time_factor = 0
scale_factor = 0
class RaceResult:
epid = 0
playerid = 0
score = 0
timestamp = 0
ring_count = 0
time = 0
def check_version(cur):
cur.execute("SELECT Value FROM Meta WHERE Key = 'DatabaseVersion';")
ver = cur.fetchone()[0]
if ver < 2:
sys.exit('fatal: you must first upgrade your DB version to 2 by running the server at least once')
def load_epinfo():
epinfo = {}
with open("drops.json", "r") as f:
dat = json.load(f)["Racing"]
for key in dat:
val = dat[key]
epdata = EpData()
epid = int(val["EPID"])
epdata.max_score = int(val["ScoreCap"])
epdata.pod_factor = float(val["PodFactor"])
epdata.max_pods = int(val["TotalPods"])
epdata.max_time = int(val["TimeLimit"])
epdata.time_factor = float(val["TimeFactor"])
epdata.scale_factor = float(val["ScaleFactor"])
epinfo[epid] = epdata
return epinfo
def get_results(cur):
results = []
cur.execute('SELECT EPID, PlayerID, Timestamp, RingCount, Time, Score FROM RaceResults;')
for x in cur.fetchall():
result = RaceResult()
result.epid = int(x[0])
result.playerid = int(x[1])
result.timestamp = int(x[2])
result.ring_count = int(x[3])
result.time = int(x[4])
result.score = int(x[5])
results.append(result)
return results
def process_result(cur, result, epinfo):
epdata = epinfo[result.epid]
pod_score = (epdata.pod_factor * result.ring_count) / epdata.max_pods
time_score = (epdata.time_factor * result.time) / epdata.max_time
newscore = int(exp(pod_score - time_score + epdata.scale_factor))
if CAP_SCORES and newscore > epdata.max_score:
logging.warning('score {} greater than max ({}) for epid {}, capping'.format(newscore, epdata.max_score, result.epid))
print('warning: score {} greater than max ({}) for epid {}, capping'.format(newscore, epdata.max_score, result.epid))
newscore = epdata.max_score
logging.info('* {} -> {} (EPID: {}, pods: {}, time: {})'.format(result.score, newscore, result.epid, result.ring_count, result.time))
if not DRY_RUN:
cur.execute('UPDATE RaceResults SET Score = ? WHERE (PlayerID, Timestamp) = (?, ?);', (newscore, result.playerid, result.timestamp))
def main(path):
if os.path.isfile(LOGFILE):
sys.exit('fatal: a log file named {} already exists. refusing to modify.'.format(LOGFILE))
logging.basicConfig(filename=LOGFILE, level=20, format='%(levelname)s: %(message)s')
if not os.path.isfile(path):
sys.exit('fatal: {} is not a file'.format(path))
bakpath = path + '.ogracing.bak'
if os.path.isfile(bakpath):
sys.exit('fatal: a DB backup named {} already exists. refusing to overwrite.'.format(bakpath))
shutil.copy(path, bakpath)
logging.info('saved database backup to {}'.format(bakpath))
print('saved database backup to {}'.format(bakpath))
epinfo = load_epinfo()
with sqlite3.connect(path) as db:
cur = db.cursor()
check_version(cur)
results = get_results(cur)
for result in results:
process_result(cur, result, epinfo)
logging.info('done.')
print('done.')
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit('usage: {} database.db'.format(sys.argv[0]))
main(sys.argv[1])

2
dx2cg/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
__pycache__

11
dx2cg/README.md Normal file
View File

@@ -0,0 +1,11 @@
# dx2cg
Tools for converting d3d9 shader assembly to HLSL/Cg.
- `disassembler.py`: Takes in d3d9 assembly and gives back the HLSL equivalent.
- `swapper.py`: Searches a shader file for d3d9 assembly and calls the disassembler to replace it with HLSL.
- `main.py`: Executes the swapper on every file in a path, writing the changes to new files.
## Known issues
- Only vertex shaders with profile `vs_1_1` are supported
- Only fragment shaders with profile `ps_2_0` are supported
- Only a limited set of instructions (those used by FF and Unity 2.6) are supported
- Properties that don't begin with an underscore do not get captured as locals

360
dx2cg/disassembler.py Normal file
View File

@@ -0,0 +1,360 @@
#!/usr/bin/env python
# coding: utf-8
# d3d9 to cg crude dissassembler
# ycc 08/08/2022
import re
import sys
legacy = False # True for 2.6
reserved = {
"_Time",
"_SinTime",
"_CosTime",
"_ProjectionParams",
"_PPLAmbient",
"_ObjectSpaceCameraPos",
"_ObjectSpaceLightPos0",
"_ModelLightColor0",
"_SpecularLightColor0",
"_Light2World0", "_World2Light0", "_Object2World", "_World2Object", "_Object2Light0",
"_LightDirectionBias",
"_LightPositionRange",
}
decls = {
"dcl_position": "float4 {0} = vdat.vertex;",
"dcl_normal": "float4 {0} = float4(vdat.normal, 0);",
"dcl_texcoord0": "float4 {0} = vdat.texcoord;",
"dcl_texcoord1": "float4 {0} = vdat.texcoord1;",
"dcl_color": "float4 {0} = vdat.color;",
"dcl_2d": "; // no operation",
"dcl": "float4 {0[0]}{0[1]} = pdat.{0[0]}{0[1]};",
"def": "const float4 {0} = float4({1}, {2}, {3}, {4});",
}
ops = {
"mov": "{0} = {1};",
"add": "{0} = {1} + {2};",
"mul": "{0} = {1} * {2};",
"mad": "{0} = {1} * {2} + {3};",
"dp4": "{0} = dot((float4){1}, (float4){2});",
"dp3": "{0} = dot((float3){1}, (float3){2});",
"min": "{0} = min({1}, {2});",
"max": "{0} = max({1}, {2});",
"rsq": "{0} = rsqrt({1});",
"frc": "{0} = float4({1}.x - (float)floor({1}.x), {1}.y - (float)floor({1}.y), {1}.z - (float)floor({1}.z), {1}.w - (float)floor({1}.w));",
"slt": "{0} = float4(({1}.x < {2}.x) ? 1.0f : 0.0f, ({1}.y < {2}.y) ? 1.0f : 0.0f, ({1}.z < {2}.z) ? 1.0f : 0.0f, ({1}.w < {2}.w) ? 1.0f : 0.0f);",
"sge": "{0} = float4(({1}.x >= {2}.x) ? 1.0f : 0.0f, ({1}.y >= {2}.y) ? 1.0f : 0.0f, ({1}.z >= {2}.z) ? 1.0f : 0.0f, ({1}.w >= {2}.w) ? 1.0f : 0.0f);",
"rcp": "{0} = ({1} == 0.0f) ? FLT_MAX : (({1} == 1.0f) ? {1} : (1 / {1}));",
"texld": "{0} = tex2D({2}, (float2){1});",
}
struct_a2v = """struct a2v {
\tfloat4 vertex : POSITION;
\tfloat3 normal : NORMAL;
\tfloat4 texcoord : TEXCOORD0;
\tfloat4 texcoord1 : TEXCOORD1;
\tfloat4 tangent : TANGENT;
\tfloat4 color : COLOR;
};
"""
v2f_postype = "POSITION" if legacy else "SV_POSITION"
struct_v2f = f"""struct v2f {{
\tfloat4 pos : {v2f_postype};
\tfloat4 t0 : TEXCOORD0;
\tfloat4 t1 : TEXCOORD1;
\tfloat4 t2 : TEXCOORD2;
\tfloat4 t3 : TEXCOORD3;
\tfloat fog : FOG;
\tfloat4 d0 : COLOR0;
\tfloat4 d1 : COLOR1;
}};
"""
struct_f2a = """struct f2a {
\tfloat4 c0 : COLOR0;
};
"""
cg_header = """CGPROGRAM
#include "UnityCG.cginc"
#pragma exclude_renderers xbox360 ps3 gles
"""
cg_footer = """ENDCG"""
vertex_func = """v2f vert(a2v vdat) {{
\tfloat4 r0, r1, r2, r3, r4;
\tfloat4 tmp;
\tv2f o;
{0}
\treturn o;
}}
"""
fragment_func = """f2a frag(v2f pdat) {{
\tfloat4 r0, r1, r2, r3, r4;
\tfloat4 tmp;
\tf2a o;
{0}
\treturn o;
}}
"""
def process_header(prog):
keywords = []
loctab = {}
locdecl = []
binds = []
i = 0
lighting = False
textures = 0
while i < len(prog):
line = prog[i]
if line.startswith("Keywords"):
keywords = re.findall("\"[\w\d]+\"", line)
del prog[i]
i = i - 1
elif line.startswith("Bind"):
binds.append(line)
del prog[i]
i = i - 1
elif line.startswith("Local") or line.startswith("Matrix"):
dec = line.split(' ')
key = int(dec[1][:-1])
if dec[2][0] == '[':
# singleton
val = dec[2][1:-1]
if val[0] == '_' and val not in reserved:
loctype = "float4" if dec[0] == "Local" else "float4x4"
locdecl.append(f"{loctype} {val};")
elif dec[2][0] == '(':
#components
vals = dec[2][1:-1].split(',')
for j, v in enumerate(vals):
if v[0] == '[':
vals[j] = v[1:-1]
if vals[j][0] == '_' and vals[j] not in reserved:
locdecl.append(f"float {vals[j]};")
val = f"float4({vals[0]},{vals[1]},{vals[2]},{vals[3]})"
lightval = re.match("glstate_light(\d)_([a-zA-Z]+)", val)
if lightval:
val = f"glstate.light[{lightval[1]}].{lightval[2]}"
lighting = True
elif val == "_ObjectSpaceCameraPos" and not legacy:
val = "mul(_World2Object, float4(_WorldSpaceCameraPos, 1.0f))"
elif val == "_ObjectSpaceLightPos0" and not legacy:
val = "mul(_World2Object, _WorldSpaceLightPos0)"
lighting = True
elif val == "glstate_lightmodel_ambient":
val = "glstate.lightmodel.ambient"
lighting = True
elif val.startswith("glstate_matrix_texture"):
val = f"glstate.matrix.texture[{val[-1]}]" if legacy else f"UNITY_MATRIX_TEXTURE{val[-1]}"
elif val == "glstate_matrix_mvp":
val = "glstate.matrix.mvp" if legacy else "UNITY_MATRIX_MVP"
elif val == "glstate_matrix_modelview0":
val = "glstate.matrix.modelview[0]" if legacy else "UNITY_MATRIX_MV"
elif val == "glstate_matrix_transpose_modelview0":
val = "glstate.matrix.transpose.modelview[0]" if legacy else "UNITY_MATRIX_T_MV"
elif val == "glstate_matrix_invtrans_modelview0":
val = "glstate.matrix.invtrans.modelview[0]" if legacy else "UNITY_MATRIX_IT_MV"
elif val.startswith("glstate"):
raise ValueError(f"Unrecognized glstate: {val}")
if dec[0] == "Local":
loctab[f"c{key}"] = val
elif dec[0] == "Matrix":
for offset in range(0,4):
loctab[f"c{key + offset}"] = f"{val}[{offset}]"
del prog[i]
i = i - 1
elif line.startswith("SetTexture"):
dec = line.split(' ')
if dec[2] != "{2D}":
raise ValueError(f"Unknown texture type {dec[2]}")
key = f"s{textures}"
val = dec[1][1:-1]
loctab[key] = val
locdecl.append(f"sampler2D {val};")
textures = textures + 1
del prog[i]
i = i - 1
i = i + 1
# print(loctab)
return (keywords, loctab, locdecl, binds, lighting)
def resolve_args(args, loctab, consts):
for a in range(0, len(args)):
arg = args[a]
neg = ""
if arg[0] == '-':
arg = arg[1:]
neg = "-"
# save swizzler!
dot = arg.find(".")
if dot > -1:
swiz = arg[dot:]
arg = arg[:dot]
else:
swiz = ""
if arg[0] == 'r':
pass
elif arg[0] == 'v':
pass
elif arg[0] == 't':
pass
elif arg[0] == 'c':
if arg not in consts:
arg = loctab[arg]
elif arg[0] == 's':
arg = loctab[arg]
elif arg[0] == 'o':
arg = f"o.{arg[1:].lower()}"
elif re.match("[+-]?([0-9]*[.])?[0-9]+", arg):
pass
else:
raise ValueError(f"Unknown arg {arg}")
args[a] = neg + arg + swiz
def decode(code, args):
if code in decls:
return [decls[code].format(*args)]
elif code in ops:
target = args[0]
if target == "o.fog":
return [ops[code].format(*args)]
dot = re.search("\.[xyzw]+", target)
if dot:
swiz = target[dot.start()+1:]
target = target[:dot.start()]
else:
swiz = "xyzw"
lines = [ops[code].format("tmp", *args[1:])]
for c in swiz:
lines.append(f"{target}.{c} = tmp.{c};")
return lines
else:
raise ValueError(f"Unknown opcode {code}")
def process_asm(asm, loctab):
shadertype = ""
if asm[0] == "\"vs_1_1":
shadertype = "vertex"
elif asm[0] == "\"ps_2_0":
shadertype = "fragment"
else:
raise ValueError(f"Unsupported shader type: {asm[0][1:]}")
consts = set()
translated = []
i = 1
while i < len(asm):
instruction = asm[i]
if instruction == "\"":
break
space = instruction.find(" ")
if space == -1:
code = instruction
args = []
else:
code = instruction[:space]
args = instruction[space+1:].split(", ")
if code == "def":
consts.add(args[0])
pp = code.find("_pp")
if pp > -1:
code = code[:pp]
resolve_args(args, loctab, consts)
disasm = decode(code, args)
# print(f"{instruction} \t==>\t{disasm}")
disasm.insert(0, f"// {instruction}")
translated.extend(disasm)
i = i + 1
return (shadertype, translated)
def disassemble(blocks):
shaders = {}
keywords = set()
locdecl = set()
binds = set()
lighting = False
for block in blocks:
asm = block.split('\n')[1:-1]
(kw, ltab, ldecl, bds, light) = process_header(asm)
keywords.update(kw)
locdecl.update(ldecl)
binds.update(bds)
lighting |= light
(shadertype, disasm) = process_asm(asm, ltab)
shaders[shadertype] = disasm
text = ""
if len(binds) > 0:
text += "BindChannels {\n"
for b in binds:
text += f"\t{b}\n"
text += "}\n"
if lighting:
text += "Lighting On\n"
text += cg_header
if len(keywords) > 0:
text += "#pragma multi_compile " + " ".join(keywords)
if "vertex" in shaders:
text += "#pragma vertex vert\n"
if "fragment" in shaders:
text += "#pragma fragment frag\n"
text += "\n"
if "vertex" in shaders:
text += struct_a2v + "\n"
text += struct_v2f + "\n"
if "fragment" in shaders:
text += struct_f2a + "\n"
text += "\n".join(locdecl) + "\n"
if "vertex" in shaders:
text += "\n" + vertex_func.format("\t" + "\n\t".join(shaders["vertex"]))
if "fragment" in shaders:
text += "\n" + fragment_func.format("\t" + "\n\t".join(shaders["fragment"]))
text += cg_footer
return text
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: disassembler.py <filename>")
else:
with open(sys.argv[1], "r") as fi:
buf = fi.read()
disasm = disassemble(buf.split('~'))
print(disasm)

37
dx2cg/main.py Normal file
View File

@@ -0,0 +1,37 @@
#!/usr/bin/env python
# coding: utf-8
import os
import sys
from swapper import process
def process_file(filename, suffix):
dot = filename.rfind(".")
if dot > -1:
outfile_name = filename[:dot] + suffix + filename[dot:]
else:
outfile_name = filename + suffix
return process(filename, outfile_name)
def process_batch(path, suffix="_hlsl"):
files = os.listdir(path)
for f in files:
if os.path.isdir(f):
process_batch(f"{path}/{f}")
else:
try:
if process_file(f"{path}/{f}", suffix):
print(f"Processed {f}")
else:
print(f"Skipping {f}")
except ValueError as err:
print(f"Failed to process {f}: {err}")
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: main.py <folder> [outfile-suffix]")
elif len(sys.argv) == 2:
process_batch(sys.argv[1])
else:
process_batch(*sys.argv[1:3])

77
dx2cg/swapper.py Normal file
View File

@@ -0,0 +1,77 @@
#!/usr/bin/env python
# coding: utf-8
# parser for replacing d3d9 subprograms in shaderlab files with HLSL/CG
# ycc 08/08/2022
import re
import sys
from disassembler import disassemble
tabs = 3
def indent(block):
lines = block.split('\n')
for i in range(0, len(lines)-1):
lines[i] = tabs * "\t" + lines[i]
return "\n".join(lines)
def find_closing_bracket(block, i):
count = 0
while i < len(block):
if block[i] == '{':
count = count + 1
if block[i] == '}':
count = count - 1
if count == 0:
return i
i = i + 1
raise ValueError(f"Block at {i} has no closing bracket")
def process_program(prog):
# print("processing:\n" + prog)
subprogs = []
subprog_index = prog.find("SubProgram \"d3d9")
while subprog_index > -1:
subprog_end_index = find_closing_bracket(prog, subprog_index)
subprog = prog[subprog_index:subprog_end_index+1]
subprogs.append(subprog)
prog = prog[subprog_end_index+1:]
subprog_index = prog.find("SubProgram \"d3d9")
if len(subprogs) < 1:
raise ValueError(f"Program has no d3d9 subprograms")
processed = disassemble(subprogs) + "\n"
return indent(processed)
def process_shader(shader):
buf = shader
processed = ''
program_index = buf.find("Program \"\"")
while program_index > -1:
processed = processed + buf[:program_index]
buf = buf[program_index:]
line = re.search("#LINE [0-9]+\n", buf)
if not line:
raise ValueError(f"Program at {program_index} has no #LINE marker")
end_index = line.end() + 1
program_section = buf[:end_index+1]
processed = processed + process_program(program_section)
buf = buf[end_index+1:]
program_index = buf.find("Program \"\"")
processed = processed + buf
return processed
def process(fn_in, fn_out):
with open(fn_in, "r") as fi:
buf = fi.read()
processed = process_shader(buf)
if buf != processed:
with open(fn_out, "w") as fo:
fo.write(processed)
return True
return False
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: swapper.py <file-in> <file-out>")
else:
process(*sys.argv[1:3])

2
json2xdb/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
.vscode

9
json2xdb/README.md Normal file
View File

@@ -0,0 +1,9 @@
# json2xdb
This script populates a functional, near-complete FusionFall XDB tabledata server.
You need an existing MySQL server (an old version; 5.5.42 seems to work with the FusionFall client). This can be set up pretty easily using Docker.
You also need a copy of xdt.json from the [OpenFusion tabledata repository](https://github.com/OpenFusionProject/tabledata).
It is interesting to note that the JSON tabledata file is really just a Unity ScriptableObject containing all the XDT/XDB state packaged into a FusionFall client build. The devs likely kept a central tabledata server around (XDB) and, whenever it was time for a client build, they fetched it into local binary files (XDT) before finally packing them into the XdtTableScript asset.
I would like to thank my girlfriend for showing me the wonders of `tqdm`. It really helped being able to see that things were happening.

View File

@@ -0,0 +1,12 @@
# Uses default credentials
version: '3.1'
services:
db:
image: mysql:5.5.42
restart: always
ports:
- 3306:3306
environment:
MYSQL_ROOT_PASSWORD: mypassword
MYSQL_DATABASE: tabledata

158
json2xdb/json2xdb.py Normal file
View File

@@ -0,0 +1,158 @@
# %%
import json
import sys
from tqdm import tqdm
import mysql.connector
# %%
def get_db_column_name(xdt_field_name):
# special case 1
if xdt_field_name == "m_iitemID":
return "ItemID"
try:
# find the first uppercase character and split the string there
idx_of_first_uppercase = next(i for i, c in enumerate(xdt_field_name) if c.isupper())
except StopIteration:
# special case 2
if xdt_field_name == "m_ibattery":
idx_of_first_uppercase = 3
else:
print(f"Could not find uppercase character in {xdt_field_name}")
sys.exit(1)
prefix = xdt_field_name[:idx_of_first_uppercase]
db_field_name = xdt_field_name[idx_of_first_uppercase:]
return db_field_name
# %%
def table_entry_to_tuple(table_entry):
vals = []
for field_name in table_entry:
field = table_entry[field_name]
vals.append(field)
return tuple(vals)
def flatten_table_entry(table_entry):
flattened_entry = {}
for field_name in table_entry:
field = table_entry[field_name]
if type(field) == list:
for i, item in enumerate(field):
flattened_entry[f"{field_name}{i}"] = item
else:
flattened_entry[field_name] = field
return flattened_entry
def handle_dict_table(table_entries, identifier_key, items_key):
new_table_entries = []
for table_entry in table_entries:
identifier = table_entry[identifier_key]
items = table_entry[items_key]
for item in items:
new_item = {}
new_item[identifier_key] = identifier # needs to be first
for field_name in item:
new_item[field_name] = item[field_name]
new_table_entries.append(new_item)
return new_table_entries
# %%
def gen_column_sql(field_name, field_value):
field_type = type(field_value)
if field_type == int:
return f"`{field_name}` INT,"
elif field_type == float:
return f"`{field_name}` FLOAT,"
elif field_type == str:
# TODO maybe ascii vs unicode?
return f"`{field_name}` TEXT,"
else:
print(f"Unknown type {field_type} for field {field_name}, skipping")
return ""
# %%
def table_create(cursor, table_name, xdt_template_entry):
sql = f"CREATE TABLE {table_name} ("
sql += "id INT AUTO_INCREMENT PRIMARY KEY,"
for field_name in xdt_template_entry:
db_field_name = get_db_column_name(field_name)
val = xdt_template_entry[field_name]
sql += gen_column_sql(db_field_name, val)
sql = sql[:-1] # remove trailing comma
sql += ")"
cursor.execute(sql)
# %%
def table_populate(cursor, table_name, table_entries):
# generate the SQL first
sql = f"INSERT INTO {table_name} ("
template_entry = table_entries[0]
for field_name in template_entry:
db_field_name = get_db_column_name(field_name)
sql += f"`{db_field_name}`,"
sql = sql[:-1] # remove trailing comma
sql += ") VALUES ("
for field_name in template_entry:
sql += f"%s,"
sql = sql[:-1] # remove trailing comma
sql += ")"
vals = [table_entry_to_tuple(entry) for entry in table_entries]
try:
cursor.executemany(sql, vals)
except Exception as e:
print(sql)
print(vals)
raise e
# %%
def process_xdt_table(cursor, root, table_name, mappings):
table = root[table_name]
for (i, subtable_name) in tqdm(enumerate(table), desc=table_name, total=len(table)):
db_table_name = mappings[table_name][i]
#print(f"{subtable_name} => {db_table_name}")
table_entries = table[subtable_name]
if db_table_name == "CutSceneText":
table_entries = handle_dict_table(table_entries, "m_iEvent", "m_TextElement")
table_entries = [flatten_table_entry(entry) for entry in table_entries]
# clear the table
drop_sql = f"DROP TABLE IF EXISTS {db_table_name}"
cursor.execute(drop_sql)
# create the table
table_create(cursor, db_table_name, table_entries[0])
table_populate(cursor, db_table_name, table_entries)
# %%
def main(conn, xdt_path):
with open("mappings.json", 'r') as f:
mappings = json.load(f)
with open(xdt_path, 'r') as f:
root = json.load(f)
cursor = conn.cursor()
for table_name in root:
if "Table" in table_name:
process_xdt_table(cursor, root, table_name, mappings)
conn.commit()
# %%
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: python3 json2xdb.py <path to xdt file>")
sys.exit(1)
xdt_path = sys.argv[1]
conn = mysql.connector.connect(
host="localhost",
user="root",
password="mypassword",
database="tabledata"
)
main(conn, xdt_path)
# %%

43
json2xdb/mappings.json Normal file
View File

@@ -0,0 +1,43 @@
{
"m_pNpcTable": [ "NpcTable", "NpcString", "NpcIcon", "BarkerTable", "NpcMesh", "NpcGroup", "ServiceString" ],
"m_pBackItemTable": [ "ItemBackTable", "ItemBackString", "ItemBackIcon", "ItemBackSound", "ItemBackMesh" ],
"m_pFaceItemTable": [ "ItemFaceTable", "ItemFaceString", "ItemFaceIcon", "ItemFaceSound", "ItemFaceMesh" ],
"m_pGlassItemTable": [ "ItemGlassTable", "ItemGlassString", "ItemGlassIcon", "ItemGlassSound", "ItemGlassMesh" ],
"m_pHatItemTable": [ "ItemHatTable", "ItemHatString", "ItemHatIcon", "ItemHatSound", "ItemHatMesh" ],
"m_pHeadItemTable": [ "ItemHeadTable", "ItemHeadString", "ItemHeadIcon", "ItemHeadSound", "ItemHeadMesh" ],
"m_pPantsItemTable": [ "ItemPantsTable", "ItemPantsString", "ItemPantsIcon", "ItemPantsSound", "ItemPantsMesh" ],
"m_pShirtsItemTable": [ "ItemShirtTable", "ItemShirtString", "ItemShirtIcon", "ItemShirtSound", "ItemShirtMesh" ],
"m_pShoesItemTable": [ "ItemShoesTable", "ItemShoesString", "ItemShoesIcon", "ItemShoesSound", "ItemShoesMesh" ],
"m_pWeaponItemTable": [ "ItemWpnTable", "ItemWpnString", "ItemWpnIcon", "ItemWpnSound", "ItemWpnMesh" ],
"m_pVehicleItemTable": [ "ItemVehicleTable", "ItemVehicleString", "ItemVehicleIcon", "ItemVehicleSound", "ItemVehicleMesh" ],
"m_pNameTable": [ "FirstNameTable", "MiddleNameTable", "LastNameTable" ],
"m_pChatTable": [ "1stChatTable", "2ndChatTable", "3rdChatTable", "ChatTable", "ChatString", "ClassTable", "EmoteLink" ],
"m_pAvatarTable": [ "AvatarTable", "AvatarGrowTable" ],
"m_pEmoteTable": [ "EmoteTable", "EmoteTexture" ],
"m_pGeneralItemTable": [ "ItemGeneralTable", "ItemGeneralString", "ItemGeneralIcon" ],
"m_pChestItemTable": [ "ItemChestTable", "ItemChestString", "ChestIconTable" ],
"m_pQuestItemTable": [ "ItemQuestTable", "ItemQuestString", "ItemQuestIcon" ],
"m_pAnimationTable": [ "M", "Mob", "Nano" ],
"m_pGuideTable": [ "GuideTable", "GuideStringTable" ],
"m_pInstanceTable": [ "InstanceTable", "WarpTable", "NameString" ],
"m_pMessageTable": [ "SystemMessage" ],
"m_pMissionTable": [ "MissionField", "MissionStringTable", "Journal_ID", "Reward" ],
"m_pNanoTable": [ "NanoTable", "NanoString", "NanoMesh", "NanoIcon", "NanoTuneTable", "NanoTuneString", "NanoTuneIcon" ],
"m_pShinyTable": [ "ShinyTable", "ShinyMesh", "ShinyString" ],
"m_pSkillTable": [ "SkillTable", "SkillIcon", "SkillBuffEffect", "SkillString" ],
"m_pConditionTable": [ "StatusTable" ],
"m_pTransportationTable": [ "TransportationTable", "TransportationMesh", "WarpLocationTable", "TransportationWarpString", "WyvernLocationTable", "TransportationWyvernString", "TransIcon" ],
"m_pVendorTable": [ "VendorTable" ],
"m_pXComTable": [ "XComTable" ],
"m_pCreationItemTable": [ "ItemCreationTable" ],
"m_pFirstUseTable": [ "FirstUseTable", "FirstUseString" ],
"m_pRulesTable": [ "RulesTable", "RulesString" ],
"m_pHelpTable": [ "HelpTable", "HelpString", "DescriptionTable", "Description", "DescriptionString" ],
"m_pCutSceneTable": [ "CutSceneText" ],
"m_pCombiningTable": [ "CombiningTable" ],
"m_pFilterTable": [ "UnfilterTable", "FilterTable", "NamefilterTable" ],
"m_pClassTable": [ "ClassType", "ClassString", "ClassWpnType", "ClassIcon" ],
"m_pEnchantTable": [ "EnchantTable" ],
"m_pClassSkillTable": [ "ClassSkill_Charging", "ClassSkill_Manager", "ClassSkill_Skill", "ClassSkill_String", "ClassSkill_BuffEffect", "ClassSkill_Icon", "ClassSkill_Sound", "Condition_Character" ],
"m_pSkillBookTable": [ "ItemSkillBookTable", "ItemSkillBookString", "ItemSkillBookIcon" ]
}

283
json2xdb/prototyping.ipynb Normal file
View File

@@ -0,0 +1,283 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 59,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import sys\n",
"from tqdm import tqdm\n",
"import mysql.connector"
]
},
{
"cell_type": "code",
"execution_count": 60,
"metadata": {},
"outputs": [],
"source": [
"def get_db_column_name(xdt_field_name):\n",
" # special case 1\n",
" if xdt_field_name == \"m_iitemID\":\n",
" return \"ItemID\"\n",
" \n",
" try:\n",
" # find the first uppercase character and split the string there\n",
" idx_of_first_uppercase = next(i for i, c in enumerate(xdt_field_name) if c.isupper())\n",
" except StopIteration:\n",
" # special case 2\n",
" if xdt_field_name == \"m_ibattery\":\n",
" idx_of_first_uppercase = 3\n",
" else:\n",
" print(f\"Could not find uppercase character in {xdt_field_name}\")\n",
" sys.exit(1)\n",
" prefix = xdt_field_name[:idx_of_first_uppercase]\n",
" db_field_name = xdt_field_name[idx_of_first_uppercase:]\n",
" return db_field_name"
]
},
{
"cell_type": "code",
"execution_count": 61,
"metadata": {},
"outputs": [],
"source": [
"def table_entry_to_tuple(table_entry):\n",
" vals = []\n",
" for field_name in table_entry:\n",
" field = table_entry[field_name]\n",
" vals.append(field)\n",
" return tuple(vals)\n",
"\n",
"def flatten_table_entry(table_entry):\n",
" flattened_entry = {}\n",
" for field_name in table_entry:\n",
" field = table_entry[field_name]\n",
" if type(field) == list:\n",
" for i, item in enumerate(field):\n",
" flattened_entry[f\"{field_name}{i}\"] = item\n",
" else:\n",
" flattened_entry[field_name] = field\n",
" return flattened_entry\n",
"\n",
"def handle_dict_table(table_entries, identifier_key, items_key):\n",
" new_table_entries = []\n",
" for table_entry in table_entries:\n",
" identifier = table_entry[identifier_key]\n",
" items = table_entry[items_key]\n",
" for item in items:\n",
" new_item = {}\n",
" new_item[identifier_key] = identifier # needs to be first\n",
" for field_name in item:\n",
" new_item[field_name] = item[field_name]\n",
" new_table_entries.append(new_item)\n",
" return new_table_entries\n"
]
},
{
"cell_type": "code",
"execution_count": 62,
"metadata": {},
"outputs": [],
"source": [
"def gen_column_sql(field_name, field_value):\n",
" field_type = type(field_value)\n",
" if field_type == int:\n",
" return f\"`{field_name}` INT,\"\n",
" elif field_type == float:\n",
" return f\"`{field_name}` FLOAT,\"\n",
" elif field_type == str:\n",
" # TODO maybe ascii vs unicode?\n",
" return f\"`{field_name}` TEXT,\"\n",
" else:\n",
" print(f\"Unknown type {field_type} for field {field_name}, skipping\")\n",
" return \"\""
]
},
{
"cell_type": "code",
"execution_count": 63,
"metadata": {},
"outputs": [],
"source": [
"def table_create(cursor, table_name, xdt_template_entry):\n",
" sql = f\"CREATE TABLE {table_name} (\"\n",
" sql += \"id INT AUTO_INCREMENT PRIMARY KEY,\"\n",
" for field_name in xdt_template_entry:\n",
" db_field_name = get_db_column_name(field_name)\n",
" val = xdt_template_entry[field_name]\n",
" sql += gen_column_sql(db_field_name, val)\n",
" sql = sql[:-1] # remove trailing comma\n",
" sql += \")\"\n",
" cursor.execute(sql)"
]
},
{
"cell_type": "code",
"execution_count": 64,
"metadata": {},
"outputs": [],
"source": [
"def table_populate(cursor, table_name, table_entries):\n",
" # generate the SQL first\n",
" sql = f\"INSERT INTO {table_name} (\"\n",
" template_entry = table_entries[0]\n",
" for field_name in template_entry:\n",
" db_field_name = get_db_column_name(field_name)\n",
" sql += f\"`{db_field_name}`,\"\n",
" sql = sql[:-1] # remove trailing comma\n",
" sql += \") VALUES (\"\n",
" for field_name in template_entry:\n",
" sql += f\"%s,\"\n",
" sql = sql[:-1] # remove trailing comma\n",
" sql += \")\"\n",
" \n",
" vals = [table_entry_to_tuple(entry) for entry in table_entries]\n",
" try:\n",
" cursor.executemany(sql, vals)\n",
" except Exception as e:\n",
" print(sql)\n",
" print(vals)\n",
" raise e"
]
},
{
"cell_type": "code",
"execution_count": 65,
"metadata": {},
"outputs": [],
"source": [
"def process_xdt_table(cursor, root, table_name, mappings):\n",
" table = root[table_name]\n",
" for (i, subtable_name) in tqdm(enumerate(table), desc=table_name, total=len(table)):\n",
" db_table_name = mappings[table_name][i]\n",
" #print(f\"{subtable_name} => {db_table_name}\")\n",
" \n",
" table_entries = table[subtable_name]\n",
" if db_table_name == \"CutSceneText\":\n",
" table_entries = handle_dict_table(table_entries, \"m_iEvent\", \"m_TextElement\")\n",
" table_entries = [flatten_table_entry(entry) for entry in table_entries]\n",
"\n",
" # clear the table\n",
" drop_sql = f\"DROP TABLE IF EXISTS {db_table_name}\"\n",
" cursor.execute(drop_sql)\n",
"\n",
" # create the table\n",
" table_create(cursor, db_table_name, table_entries[0])\n",
" table_populate(cursor, db_table_name, table_entries)"
]
},
{
"cell_type": "code",
"execution_count": 66,
"metadata": {},
"outputs": [],
"source": [
"def main(conn, xdt_path):\n",
" with open(\"mappings.json\", 'r') as f:\n",
" mappings = json.load(f)\n",
" with open(xdt_path, 'r') as f:\n",
" root = json.load(f)\n",
" cursor = conn.cursor()\n",
" for table_name in root:\n",
" if \"Table\" in table_name:\n",
" process_xdt_table(cursor, root, table_name, mappings)\n",
" conn.commit()"
]
},
{
"cell_type": "code",
"execution_count": 67,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"m_pAnimationTable: 100%|██████████| 3/3 [00:00<00:00, 9.30it/s]\n",
"m_pAvatarTable: 100%|██████████| 2/2 [00:00<00:00, 5.76it/s]\n",
"m_pChatTable: 100%|██████████| 7/7 [00:00<00:00, 9.55it/s]\n",
"m_pEmoteTable: 100%|██████████| 2/2 [00:00<00:00, 12.84it/s]\n",
"m_pGuideTable: 100%|██████████| 2/2 [00:00<00:00, 11.23it/s]\n",
"m_pInstanceTable: 100%|██████████| 3/3 [00:00<00:00, 9.99it/s]\n",
"m_pMessageTable: 100%|██████████| 1/1 [00:00<00:00, 8.64it/s]\n",
"m_pMissionTable: 100%|██████████| 4/4 [00:01<00:00, 2.57it/s]\n",
"m_pNameTable: 100%|██████████| 3/3 [00:00<00:00, 9.74it/s]\n",
"m_pNanoTable: 100%|██████████| 7/7 [00:00<00:00, 9.92it/s]\n",
"m_pNpcTable: 100%|██████████| 7/7 [00:01<00:00, 4.86it/s]\n",
"m_pShinyTable: 100%|██████████| 3/3 [00:00<00:00, 9.07it/s]\n",
"m_pSkillTable: 100%|██████████| 4/4 [00:00<00:00, 11.26it/s]\n",
"m_pConditionTable: 100%|██████████| 1/1 [00:00<00:00, 11.58it/s]\n",
"m_pTransportationTable: 100%|██████████| 7/7 [00:00<00:00, 10.35it/s]\n",
"m_pVendorTable: 100%|██████████| 1/1 [00:00<00:00, 4.68it/s]\n",
"m_pXComTable: 100%|██████████| 1/1 [00:00<00:00, 9.32it/s]\n",
"m_pBackItemTable: 100%|██████████| 5/5 [00:00<00:00, 9.05it/s]\n",
"m_pFaceItemTable: 100%|██████████| 5/5 [00:00<00:00, 9.85it/s]\n",
"m_pGlassItemTable: 100%|██████████| 5/5 [00:00<00:00, 8.95it/s]\n",
"m_pHatItemTable: 100%|██████████| 5/5 [00:00<00:00, 10.58it/s]\n",
"m_pHeadItemTable: 100%|██████████| 5/5 [00:00<00:00, 9.31it/s]\n",
"m_pPantsItemTable: 100%|██████████| 5/5 [00:00<00:00, 8.16it/s]\n",
"m_pShirtsItemTable: 100%|██████████| 5/5 [00:00<00:00, 7.10it/s]\n",
"m_pShoesItemTable: 100%|██████████| 5/5 [00:00<00:00, 6.49it/s]\n",
"m_pWeaponItemTable: 100%|██████████| 5/5 [00:00<00:00, 6.26it/s]\n",
"m_pVehicleItemTable: 100%|██████████| 5/5 [00:00<00:00, 9.41it/s]\n",
"m_pGeneralItemTable: 100%|██████████| 3/3 [00:00<00:00, 11.56it/s]\n",
"m_pChestItemTable: 100%|██████████| 3/3 [00:00<00:00, 6.83it/s]\n",
"m_pQuestItemTable: 100%|██████████| 3/3 [00:00<00:00, 11.19it/s]\n",
"m_pCreationItemTable: 100%|██████████| 1/1 [00:00<00:00, 12.50it/s]\n",
"m_pFirstUseTable: 100%|██████████| 2/2 [00:00<00:00, 10.72it/s]\n",
"m_pRulesTable: 100%|██████████| 2/2 [00:00<00:00, 8.38it/s]\n",
"m_pHelpTable: 100%|██████████| 5/5 [00:00<00:00, 9.11it/s]\n",
"m_pCutSceneTable: 100%|██████████| 1/1 [00:00<00:00, 11.51it/s]\n",
"m_pCombiningTable: 100%|██████████| 1/1 [00:00<00:00, 13.88it/s]\n",
"m_pFilterTable: 100%|██████████| 3/3 [00:00<00:00, 9.08it/s]\n",
"m_pClassTable: 100%|██████████| 4/4 [00:00<00:00, 10.94it/s]\n",
"m_pEnchantTable: 100%|██████████| 1/1 [00:00<00:00, 11.75it/s]\n",
"m_pClassSkillTable: 100%|██████████| 8/8 [00:00<00:00, 9.37it/s]\n",
"m_pSkillBookTable: 100%|██████████| 3/3 [00:00<00:00, 10.67it/s]\n"
]
}
],
"source": [
"xdt_path = \"tdata/xdt.json\"\n",
"conn = mysql.connector.connect(\n",
" host=\"localhost\",\n",
" user=\"root\",\n",
" password=\"mypassword\",\n",
" database=\"tabledata\"\n",
")\n",
"main(conn, xdt_path)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

157
rankendpoint.py Normal file
View File

@@ -0,0 +1,157 @@
# This script serves an HTTP endpoint that provides the racing scores.
#
# Example invocation for testing:
# $ RANKENDPOINT_DBPATH=/path/to/database.db RANKENDPOINT_ROUTE=/getranks flask --app rankendpoint.py run
#
# Example invocation in production (behind a properly configured gateway like nginx):
# $ RANKENDPOINT_DBPATH=/path/to/database.db RANKENDPOINT_ROUTE=/getranks uwsgi \
# -s localhost:3031 --manage-script-name --mount /=rankendpoint:app --plugin python3
from flask import Flask, request
app = Flask(__name__)
import sqlite3
import sys
import os
header = "SUCCESS"
db_path = os.environ.get('RANKENDPOINT_DBPATH')
route = os.environ.get('RANKENDPOINT_ROUTE')
if None in (db_path, route):
sys.exit('must set RANKENDPOINT_DBPATH and RANKENDPOINT_ROUTE environment variables')
# Opens database in read-only mode
# Checking same thread disabled for now, which is fine since we never modify anything
try:
db = sqlite3.connect('file:{}?mode=ro'.format(db_path), uri=True, check_same_thread=False)
cur = db.cursor()
except Exception as ex:
print(ex)
sys.exit()
#db.set_trace_callback(print)
def fetch_ranks(epid, date, num):
sql = """
SELECT
PBRaceResults.PlayerID,
Players.FirstName,
Players.LastName,
PBRaceResults.Score
FROM (
SELECT
ROW_NUMBER() OVER (
PARTITION BY RaceResults.PlayerID
ORDER BY
RaceResults.Score DESC,
RaceResults.RingCount DESC,
RaceResults.Time ASC
) AS PersonalOrder,
RaceResults.*
FROM RaceResults
WHERE EPID=? AND DATETIME(Timestamp, 'unixepoch') > DATETIME('now', ?)
) AS PBRaceResults
INNER JOIN Players ON PBRaceResults.PlayerID=Players.PlayerID AND PBRaceResults.PersonalOrder=1
ORDER BY
PBRaceResults.Score DESC,
PBRaceResults.RingCount DESC,
PBRaceResults.Time ASC
"""
if num > -1:
sql += "LIMIT ?"
args = (epid, date, num)
else:
args = (epid, date)
cur = db.execute(sql + ";", args)
rows = cur.fetchall()
return rows
def fetch_my_ranks(pcuid, epid, date):
sql = """
SELECT
RaceResults.PlayerID,
Players.FirstName,
Players.LastName,
RaceResults.Score
FROM RaceResults
INNER JOIN Players ON RaceResults.PlayerID=Players.PlayerID
WHERE RaceResults.PlayerID=? AND EPID=? AND DATETIME(Timestamp, 'unixepoch') > DATETIME('now', ?)
ORDER BY RaceResults.Score DESC
LIMIT 1;
"""
args = (pcuid, epid, date)
cur = db.execute(sql, args)
rows = cur.fetchall()
return rows
def get_score_entries(data, name):
# Uncomment if you want placeholders in top 10 ranks ala Retro
#if not name.startswith("my"):
# while len(data) < 10:
# data.append(((999, 'hehe', 'dong', 1)))
scores="<{}>\n".format(name)
rank = 1
last_score = -1
for item in data:
score = item[3]
if score == last_score:
rank -= 1
scores+='\t<score>PCUID="{}" Score="{}" Rank="{}" FirstName="{}" LastName="{}"</score>\n'.format(item[0], score, rank, item[1], item[2])
rank += 1
last_score = score
scores+="</{}>\n".format(name)
return scores
# route should be something like /getranks
@app.route(f'{route}', methods=['POST'])
def rankings():
#print("PCUID:", request.form['PCUID'])
#print("EP_ID:", request.form['EP_ID'])
# Input Validation
try:
pcuid = int(request.form['PCUID'])
epid = int(request.form['EP_ID'])
num = 10 if 'NUM' not in request.form else int(request.form['NUM'])
except ValueError as verr:
return "Request param does not convert to int", 400
except Exception as ex:
return "Error converting request param to int", 500
# EP_ID must be between 1 and 33. also, ep #6 doesn't exist
if not (1 <= epid <= 33) or (epid == 6):
return "Invalid EP_ID", 400
# Get everything we need from the DB...
myday = fetch_my_ranks(pcuid, epid, '-1 day')
day = fetch_ranks(epid, '-1 day', num)
myweek = fetch_my_ranks(pcuid, epid, '-7 day')
week = fetch_ranks(epid, '-7 day', num)
mymonth = fetch_my_ranks(pcuid, epid, '-1 month')
month = fetch_ranks(epid, '-1 month', num)
myalltime = fetch_my_ranks(pcuid, epid, '-999 year')
alltime = fetch_ranks(epid, '-999 year', num)
# Slap that all into an "xml"...
xmlbody = ""
xmlbody += get_score_entries(myday, "myday")
xmlbody += get_score_entries(day, "day")
xmlbody += get_score_entries(myweek, "myweek")
xmlbody += get_score_entries(week, "week")
xmlbody += get_score_entries(mymonth, "mymonth")
xmlbody += get_score_entries(month, "month")
xmlbody += get_score_entries(myalltime, "myalltime")
xmlbody += get_score_entries(alltime, "alltime")
# and send it off!
return header + xmlbody

View File

@@ -0,0 +1,154 @@
from unitypackff.asset import Asset
from unitypackff.environment import UnityEnvironment
import bpy
import bmesh
import os
dongpath = (os.path.expandvars('%userprofile%') + "/AppData/LocalLow/Unity/Web Player/Cache/FusionFall")
env = UnityEnvironment(base_path=dongpath)
outpath = (os.path.expandvars('%userprofile%') + "/3D Objects/FFTerrainMeshes")
def uvs_from_vert(uv_layer, v):
uvs = []
for l in v.link_loops:
uv_data = l[uv_layer]
uvs.append(uv_data.uv)
return uvs
def delete_all_objects():
for i in bpy.context.scene.objects:
i.select_set(True)
bpy.ops.object.delete()
def rip_terrain_mesh(f, outpath, clear=False):
dong = Asset.from_file(f, environment=env)
for k, v in dong.objects.items():
if v.type == 'TerrainData':
terrainData = dong.objects[k].read()
terrain_width = terrainData['m_Heightmap']['m_Width'] - 1
terrain_height = terrainData['m_Heightmap']['m_Height'] - 1
scale_x = terrainData['m_Heightmap']['m_Scale']['x']
scale_z = terrainData['m_Heightmap']['m_Scale']['z']
scale_y = terrainData['m_Heightmap']['m_Scale']['y']
# create the terrain
bpy.ops.mesh.primitive_grid_add(x_subdivisions=terrain_width, y_subdivisions=terrain_height, size=128, enter_editmode=True, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))
context = bpy.context
grid = context.edit_object
bpy.ops.object.mode_set(mode='EDIT')
bm = bmesh.from_edit_mesh(context.edit_object.data)
bm.verts.ensure_lookup_table()
for index, height in enumerate(terrainData['m_Heightmap']['m_Heights']):
# scale height
height_norm = height / (2 ** 15 - 2)
bm.verts[index].co.z = height_norm * scale_y
# pivot and scale x
bm.verts[index].co.x += terrain_width / 2
bm.verts[index].co.x *= scale_x
# pivot and scale z
bm.verts[index].co.y += terrain_height / 2
bm.verts[index].co.y *= scale_z
#print(f"{bm.verts[index].co.x}, {bm.verts[index].co.y}, {bm.verts[index].co.z}")
indices = []
shift_amt = abs(bm.verts[0].co.x - bm.verts[1].co.x)
uv_layer = bm.loops.layers.uv.active
uv_shift_amt = 1 / 256
# gather m_Shifts positions
for shift in terrainData['m_Heightmap']['m_Shifts']:
shift_index = shift['y'] + shift['x'] * 129
indices.append(shift_index)
v = bm.verts[shift_index]
flags = shift['flags'] # bits: +X -X +Y -Y
if flags & 0b1000: # +X
v.co.x += shift_amt
for uv in uvs_from_vert(uv_layer, v):
uv.x += uv_shift_amt
if flags & 0b0100: # -X
v.co.x -= shift_amt
for uv in uvs_from_vert(uv_layer, v):
uv.x -= uv_shift_amt
if flags & 0b0010: # +Y
v.co.y += shift_amt
for uv in uvs_from_vert(uv_layer, v):
uv.y += uv_shift_amt
if flags & 0b0001: # -Y
v.co.y -= shift_amt
for uv in uvs_from_vert(uv_layer, v):
uv.y -= uv_shift_amt
# apply triangulate modifier
mod = grid.modifiers.new("Triangulate", 'TRIANGULATE')
mod.quad_method = 'FIXED' # triangle orientation
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.modifier_apply(modifier="Triangulate")
bpy.ops.object.mode_set(mode="EDIT")
bm = bmesh.from_edit_mesh(context.edit_object.data)
bm.verts.ensure_lookup_table()
# flip diagonally
for v in bm.verts:
tmp = v.co.x
v.co.x = v.co.y
v.co.y = tmp
# flip normals
for f in bm.faces:
f.normal_flip()
# select vertex chunks and separate
verts = {}
for x in range(129):
for y in range(129):
idx = y + x * 129
v = bm.verts[idx]
verts[idx] = v
v.select = False
for f in v.link_faces:
f.select = False
chunk_size = 8
for x in range(128 // chunk_size):
for y in range(128 // chunk_size):
for i in range(x * chunk_size, x * chunk_size + chunk_size + 1):
for j in range(y * chunk_size, y * chunk_size + chunk_size + 1):
idx = j + i * 129
v = verts[idx]
v.select = True
bm.select_mode = {'VERT', 'EDGE', 'FACE'}
bm.select_flush_mode()
bpy.context.tool_settings.mesh_select_mode = (False, False, True)
bpy.ops.mesh.duplicate()
bpy.ops.mesh.separate(type='SELECTED')
bpy.ops.mesh.select_all(action='DESELECT')
# delete main
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.select_all(action='DESELECT')
grid.select_set(True)
bpy.ops.object.delete()
# export
bpy.ops.object.select_all(action='SELECT')
name = terrainData['m_Name']
outfile = f"{name}.fbx"
bpy.ops.export_scene.fbx(filepath=os.path.join(outpath, outfile))
if(clear):
delete_all_objects()
dongs = os.listdir(dongpath)
for dongname in dongs:
if not dongname.endswith("resourceFile"):
continue
assets = os.listdir(os.path.join(dongpath, dongname))
for assetname in assets:
if not assetname.startswith("CustomAssetBundle"):
continue
with open(os.path.join(dongpath, dongname, assetname), "rb") as f:
outdir = os.path.join(outpath, dongname, assetname)
os.makedirs(outdir, exist_ok=True)
rip_terrain_mesh(f, outdir, True)

View File

@@ -0,0 +1,5 @@
# Terrain Mesh Extractor
Blender + UPFF script to import terrain data as a mesh into Blender, then apply the shifts property to applicable vertices.
- Exports as FBX
- The fbx filenames are the index of the TerrainData object within the asset file
- Folders for asset bundles that had no terrain objects will be empty