mirror of
https://github.com/mon/ifstools.git
synced 2026-05-09 12:24:07 -05:00
(vibe coded) remove cache functionality as repack is now fast enough
This commit is contained in:
parent
732ad54395
commit
3aff2890c3
|
|
@ -46,13 +46,6 @@ class GenericFile(Node):
|
|||
self.size = len(ret)
|
||||
return ret
|
||||
|
||||
@property
|
||||
def needs_preload(self):
|
||||
return False
|
||||
|
||||
def preload(self, **kwargs):
|
||||
pass
|
||||
|
||||
def repack(self, manifest, data_blob, tqdm_progress, **kwargs):
|
||||
if tqdm_progress:
|
||||
tqdm_progress.write(self.full_path)
|
||||
|
|
|
|||
|
|
@ -1,161 +1,123 @@
|
|||
import errno
|
||||
import functools
|
||||
import time
|
||||
import timeit
|
||||
from io import BytesIO
|
||||
from os import mkdir, utime
|
||||
from os.path import dirname, getmtime, isfile, join
|
||||
from struct import pack, unpack
|
||||
from typing import cast
|
||||
|
||||
import lxml.etree as etree
|
||||
from kbinxml import KBinXML
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
|
||||
from .. import utils
|
||||
from . import lz77
|
||||
from .generic_file import GenericFile
|
||||
from .image_decoders import cachable_formats, encode_png, image_formats
|
||||
|
||||
|
||||
class ImageFile(GenericFile):
|
||||
def __init__(self, ifs_data, obj, parent = None, path = '', name = ''):
|
||||
raise Exception('ImageFile must be instantiated from existing GenericFile with ImageFile.upgrade_generic')
|
||||
|
||||
@classmethod
|
||||
def upgrade_generic(cls, gen_file, image_elem, fmt, compress):
|
||||
self = gen_file
|
||||
self.__class__ = cls
|
||||
|
||||
self.format = fmt
|
||||
self.compress = compress
|
||||
|
||||
# all values are multiplied by 2, odd values have never been seen
|
||||
self.uvrect = [x//2 for x in self._split_ints(image_elem.find('uvrect').text)]
|
||||
self.imgrect = [x//2 for x in self._split_ints(image_elem.find('imgrect').text)]
|
||||
self.img_size = (
|
||||
self.imgrect[1]-self.imgrect[0],
|
||||
self.imgrect[3]-self.imgrect[2]
|
||||
)
|
||||
self.uv_size = (
|
||||
self.uvrect[1]-self.uvrect[0],
|
||||
self.uvrect[3]-self.uvrect[2]
|
||||
)
|
||||
|
||||
def extract(self, base, use_cache = True, **kwargs):
|
||||
GenericFile.extract(self, base, **kwargs)
|
||||
|
||||
if use_cache and self.compress and self.from_ifs and self.format in cachable_formats:
|
||||
self.write_cache(GenericFile._load_from_ifs(self, **kwargs), base)
|
||||
|
||||
def _load_from_ifs(self, crop_to_uvrect = False, raw_pixels = False, **kwargs):
|
||||
data = GenericFile._load_from_ifs(self, **kwargs)
|
||||
|
||||
if self.compress == 'avslz':
|
||||
uncompressed_size = unpack('>I', data[:4])[0]
|
||||
compressed_size = unpack('>I', data[4:8])[0]
|
||||
# sometimes the headers are missing: not actually compressed
|
||||
# The 2 extra u32 are moved to the end of the file
|
||||
# Quality file format.
|
||||
if len(data) == compressed_size + 8:
|
||||
data = data[8:]
|
||||
data = lz77.decompress(data)
|
||||
assert len(data) == uncompressed_size
|
||||
else:
|
||||
data = data[8:] + data[:8]
|
||||
|
||||
if self.format in image_formats:
|
||||
decoder = image_formats[self.format]['decoder']
|
||||
im = decoder(self, data)
|
||||
else:
|
||||
raise NotImplementedError('Unknown format {}'.format(self.format))
|
||||
|
||||
if crop_to_uvrect:
|
||||
start_x = self.uvrect[0] - self.imgrect[0]
|
||||
start_y = self.uvrect[2] - self.imgrect[2]
|
||||
dims = (
|
||||
start_x,
|
||||
start_y,
|
||||
start_x + self.uv_size[0],
|
||||
start_y + self.uv_size[1],
|
||||
)
|
||||
im = im.crop(dims)
|
||||
|
||||
if raw_pixels:
|
||||
return (im.width, im.height), im.tobytes()
|
||||
else:
|
||||
return encode_png(im)
|
||||
|
||||
def repack(self, manifest, data_blob, tqdm_progress, **kwargs):
|
||||
if tqdm_progress:
|
||||
tqdm_progress.write(self.full_path)
|
||||
tqdm_progress.update(1)
|
||||
|
||||
if self.compress == 'avslz':
|
||||
data = self.read_cache()
|
||||
else:
|
||||
data = self._load_im()
|
||||
|
||||
# offset, size, timestamp
|
||||
elem = etree.SubElement(manifest, self.packed_name)
|
||||
elem.attrib['__type'] = '3s32'
|
||||
elem.text = '{} {} {}'.format(len(data_blob.getvalue()), len(data), self.time)
|
||||
data_blob.write(data)
|
||||
# 16 byte alignment
|
||||
align = len(data) % 16
|
||||
if align:
|
||||
data_blob.write(b'\0' * (16-align))
|
||||
|
||||
def _load_im(self):
|
||||
data = self.load()
|
||||
|
||||
im = Image.open(BytesIO(data))
|
||||
if im.mode != 'RGBA':
|
||||
im = im.convert('RGBA')
|
||||
|
||||
if self.format in image_formats:
|
||||
encoder = image_formats[self.format]['encoder']
|
||||
if encoder is None:
|
||||
# everything else becomes argb8888rev
|
||||
encoder = image_formats['argb8888rev']['encoder']
|
||||
data = encoder(self, im)
|
||||
else:
|
||||
raise NotImplementedError('Unknown format {}'.format(self.format))
|
||||
|
||||
return data
|
||||
|
||||
@property
|
||||
def needs_preload(self):
|
||||
cache = join(dirname(self.disk_path), '_cache', self._packed_name)
|
||||
if isfile(cache):
|
||||
mtime = int(getmtime(cache))
|
||||
if self.time <= mtime:
|
||||
return False
|
||||
return True
|
||||
|
||||
def preload(self, use_cache = True, tex_suffix = None, **kwargs):
|
||||
if not self.needs_preload and use_cache:
|
||||
return
|
||||
# Not cached/out of date, compressing
|
||||
data = self._load_im()
|
||||
uncompressed_size = len(data)
|
||||
data = lz77.compress(data)
|
||||
compressed_size = len(data)
|
||||
data = pack('>I', uncompressed_size) + pack('>I', compressed_size) + data
|
||||
self.write_cache(data)
|
||||
|
||||
def write_cache(self, data, base = None):
|
||||
if not self.from_ifs:
|
||||
base = self.base_path
|
||||
cache = join(base, self.path, '_cache', self._packed_name)
|
||||
utils.mkdir_silent(dirname(cache))
|
||||
with open(cache, 'wb') as f:
|
||||
f.write(data)
|
||||
utime(cache, (self.time, self.time))
|
||||
|
||||
def read_cache(self):
|
||||
cache = join(dirname(self.disk_path), '_cache', self._packed_name)
|
||||
with open(cache, 'rb') as f:
|
||||
return f.read()
|
||||
|
||||
from io import BytesIO
|
||||
from struct import pack, unpack
|
||||
|
||||
import lxml.etree as etree
|
||||
from PIL import Image
|
||||
|
||||
from . import lz77
|
||||
from .generic_file import GenericFile
|
||||
from .image_decoders import encode_png, image_formats
|
||||
|
||||
|
||||
class ImageFile(GenericFile):
|
||||
def __init__(self, ifs_data, obj, parent = None, path = '', name = ''):
|
||||
raise Exception('ImageFile must be instantiated from existing GenericFile with ImageFile.upgrade_generic')
|
||||
|
||||
@classmethod
|
||||
def upgrade_generic(cls, gen_file, image_elem, fmt, compress):
|
||||
self = gen_file
|
||||
self.__class__ = cls
|
||||
|
||||
self.format = fmt
|
||||
self.compress = compress
|
||||
|
||||
# all values are multiplied by 2, odd values have never been seen
|
||||
self.uvrect = [x//2 for x in self._split_ints(image_elem.find('uvrect').text)]
|
||||
self.imgrect = [x//2 for x in self._split_ints(image_elem.find('imgrect').text)]
|
||||
self.img_size = (
|
||||
self.imgrect[1]-self.imgrect[0],
|
||||
self.imgrect[3]-self.imgrect[2]
|
||||
)
|
||||
self.uv_size = (
|
||||
self.uvrect[1]-self.uvrect[0],
|
||||
self.uvrect[3]-self.uvrect[2]
|
||||
)
|
||||
|
||||
def _load_from_ifs(self, crop_to_uvrect = False, raw_pixels = False, **kwargs):
|
||||
data = GenericFile._load_from_ifs(self, **kwargs)
|
||||
|
||||
if self.compress == 'avslz':
|
||||
uncompressed_size = unpack('>I', data[:4])[0]
|
||||
compressed_size = unpack('>I', data[4:8])[0]
|
||||
# sometimes the headers are missing: not actually compressed
|
||||
# The 2 extra u32 are moved to the end of the file
|
||||
# Quality file format.
|
||||
if len(data) == compressed_size + 8:
|
||||
data = data[8:]
|
||||
data = lz77.decompress(data)
|
||||
assert len(data) == uncompressed_size
|
||||
else:
|
||||
data = data[8:] + data[:8]
|
||||
|
||||
if self.format in image_formats:
|
||||
decoder = image_formats[self.format]['decoder']
|
||||
im = decoder(self, data)
|
||||
else:
|
||||
raise NotImplementedError('Unknown format {}'.format(self.format))
|
||||
|
||||
if crop_to_uvrect:
|
||||
start_x = self.uvrect[0] - self.imgrect[0]
|
||||
start_y = self.uvrect[2] - self.imgrect[2]
|
||||
dims = (
|
||||
start_x,
|
||||
start_y,
|
||||
start_x + self.uv_size[0],
|
||||
start_y + self.uv_size[1],
|
||||
)
|
||||
im = im.crop(dims)
|
||||
|
||||
if raw_pixels:
|
||||
return (im.width, im.height), im.tobytes()
|
||||
else:
|
||||
return encode_png(im)
|
||||
|
||||
def _build_packed(self):
|
||||
data = self._load_im()
|
||||
if self.compress == 'avslz':
|
||||
uncompressed_size = len(data)
|
||||
compressed = lz77.compress(data)
|
||||
data = pack('>I', uncompressed_size) + pack('>I', len(compressed)) + compressed
|
||||
return data
|
||||
|
||||
def preload(self, **kwargs):
|
||||
# Compress in parallel; the actual write loop in repack() runs serially.
|
||||
self._packed = self._build_packed()
|
||||
|
||||
def repack(self, manifest, data_blob, tqdm_progress, **kwargs):
|
||||
if tqdm_progress:
|
||||
tqdm_progress.write(self.full_path)
|
||||
tqdm_progress.update(1)
|
||||
|
||||
data = getattr(self, '_packed', None)
|
||||
if data is None:
|
||||
data = self._build_packed()
|
||||
|
||||
# offset, size, timestamp
|
||||
elem = etree.SubElement(manifest, self.packed_name)
|
||||
elem.attrib['__type'] = '3s32'
|
||||
elem.text = '{} {} {}'.format(len(data_blob.getvalue()), len(data), self.time)
|
||||
data_blob.write(data)
|
||||
# 16 byte alignment
|
||||
align = len(data) % 16
|
||||
if align:
|
||||
data_blob.write(b'\0' * (16-align))
|
||||
|
||||
self._packed = None
|
||||
|
||||
def _load_im(self):
|
||||
data = self.load()
|
||||
|
||||
im = Image.open(BytesIO(data))
|
||||
if im.mode != 'RGBA':
|
||||
im = im.convert('RGBA')
|
||||
|
||||
if self.format in image_formats:
|
||||
encoder = image_formats[self.format]['encoder']
|
||||
if encoder is None:
|
||||
# everything else becomes argb8888rev
|
||||
encoder = image_formats['argb8888rev']['encoder']
|
||||
data = encoder(self, im)
|
||||
else:
|
||||
raise NotImplementedError('Unknown format {}'.format(self.format))
|
||||
|
||||
return data
|
||||
|
|
|
|||
|
|
@ -131,8 +131,6 @@ class IFS:
|
|||
extract_manifest = False, path = None, rename_dupes = False, **kwargs):
|
||||
if path is None:
|
||||
path = self.folder_out
|
||||
if tex_only:
|
||||
kwargs['use_cache'] = False
|
||||
utils.mkdir_silent(path)
|
||||
utime(path, (self.time, self.time))
|
||||
|
||||
|
|
@ -254,12 +252,14 @@ class IFS:
|
|||
|
||||
def _repack_tree(self, progress = True, **kwargs):
|
||||
files = self.tree.all_files
|
||||
needs_preload = [f for f in files if f.needs_preload or not kwargs['use_cache']]
|
||||
to_compress = [f for f in files if isinstance(f, ImageFile)]
|
||||
|
||||
# LZ77 compress and PIL decode both release the GIL, so threads scale.
|
||||
# PNG decode (PIL) and LZ77 compress (Rust) both release the GIL, so
|
||||
# threads scale. The actual write loop is serial; this stages each
|
||||
# file's packed bytes in memory.
|
||||
with ThreadPoolExecutor() as ex:
|
||||
futures = {ex.submit(f.preload, **kwargs): f for f in needs_preload}
|
||||
with tqdm(total=len(needs_preload), desc='Caching', disable=not progress) as bar:
|
||||
futures = {ex.submit(f.preload, **kwargs): f for f in to_compress}
|
||||
with tqdm(total=len(to_compress), desc='Compressing', disable=not progress) as bar:
|
||||
for fut in as_completed(futures):
|
||||
fut.result()
|
||||
f = futures[fut]
|
||||
|
|
|
|||
|
|
@ -37,7 +37,8 @@ def main():
|
|||
parser.add_argument('-c', '--canvas', action='store_true', help='dump the image canvas as defined by the texturelist.xml in _canvas.png', dest='dump_canvas')
|
||||
parser.add_argument('--bounds', action='store_true', help='draw image bounds on the exported canvas in red', dest='draw_bbox')
|
||||
parser.add_argument('--uv', action='store_true', help='crop images to uvrect (usually 1px smaller than imgrect). Forces --tex-only', dest='crop_to_uvrect')
|
||||
parser.add_argument('--no-cache', action='store_false', help='ignore texture cache, recompress all', dest='use_cache')
|
||||
parser.add_argument('--no-cache', action='store_true', dest='no_cache_deprecated',
|
||||
help=argparse.SUPPRESS)
|
||||
parser.add_argument('--rename-dupes', action='store_true',
|
||||
help='if two files have the same name but differing case (A.png vs a.png) rename the second as "a (1).png" to allow both to be extracted on Windows')
|
||||
parser.add_argument('-m', '--extract-manifest', action='store_true', help='extract the IFS manifest for inspection', dest='extract_manifest')
|
||||
|
|
@ -54,6 +55,10 @@ def main():
|
|||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.no_cache_deprecated:
|
||||
print("WARNING: --no-cache is deprecated and has no effect; the texture cache has been removed.")
|
||||
delattr(args, 'no_cache_deprecated')
|
||||
|
||||
if args.crop_to_uvrect:
|
||||
args.tex_only = True
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user