Last active
May 26, 2024 03:56
-
-
Save osyu/75375e7ccab39abb2ac3930d729e5ebd to your computer and use it in GitHub Desktop.
LBP2+ font converter
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import argparse | |
| import zlib | |
| import cv2 | |
| import numpy as np | |
| import skfmm | |
| from dataclasses import dataclass | |
| from io import BytesIO | |
| from multiprocessing import cpu_count | |
| from struct import calcsize, pack, unpack | |
| from fontTools.misc.transform import Identity | |
| from fontTools.pens.freetypePen import FreeTypePen | |
| from fontTools.ttLib import TTFont | |
| from tqdm.contrib.concurrent import process_map | |
| DESCRIPTION = "Convert TTF/OTF fonts to the LBP2+ font format." | |
| FONT_SIZE = 100 | |
| SIZE_DIV = 4 | |
| ADV_UNIT = 1600 | |
| OUT_DVER = 0x3b3 | |
| RANGE_DEF = 0.125 | |
| OFFSET_DEF = 0.005 | |
| IM_SIZE = FONT_SIZE * 3 | |
| POSITION = (FONT_SIZE, FONT_SIZE) | |
| STRUCT_HEADER = '>III' | |
| STRUCT_GLYPH = '>HHbbBBIH' | |
| def read_struct(f, s): return unpack(s, f.read(calcsize(s))) | |
| def write_struct(f, s, *v): f.write(pack(s, *v)) | |
| @dataclass | |
| class Glyph: | |
| advance: int | |
| left: int | |
| top: int | |
| width: int | |
| height: int | |
| data: bytes | |
| def draw_glyph(name, gset, upm, _range, offset, scale): | |
| ttglyph = gset[name] | |
| advance = round((ttglyph.width / upm) | |
| * ADV_UNIT * scale[0]) | |
| pen = FreeTypePen(gset) | |
| ttglyph.draw(pen) | |
| tf = Identity.translate(*POSITION).scale( | |
| *(FONT_SIZE / upm * x for x in scale)) | |
| phi = pen.array(IM_SIZE, IM_SIZE, transform=tf) | |
| if not phi.any(): | |
| return Glyph(advance, 0, 0, 3, 3, | |
| zlib.compress(bytes(9))) | |
| phi -= 0.5 | |
| sd = skfmm.distance(phi, dx=1) | |
| range_px = _range * FONT_SIZE | |
| offset_px = offset * FONT_SIZE | |
| sd += offset_px | |
| sd = np.clip(sd, -range_px, range_px) | |
| sd = ((sd / range_px / 2) + 0.5) * 255 | |
| sd = cv2.resize(sd, (IM_SIZE // SIZE_DIV,) * 2, | |
| interpolation=cv2.INTER_LINEAR) | |
| sd = np.rint(sd).astype(np.uint8) | |
| rows = np.any(sd, axis=1) | |
| cols = np.any(sd, axis=0) | |
| rmin, rmax = np.where(rows)[0][[0, -1]] | |
| cmin, cmax = np.where(cols)[0][[0, -1]] | |
| sd = sd[rmin:rmax + 2, cmin:cmax + 2] | |
| left = cmin - (POSITION[0] // SIZE_DIV) | |
| top = ((IM_SIZE - POSITION[1]) // SIZE_DIV) - rmin | |
| data = zlib.compress(sd.tobytes()) | |
| return Glyph(advance, left, top, | |
| *sd.shape[::-1], data) | |
| def read_fnt(f): | |
| if f.read(4) != b'FNTb': | |
| raise ValueError("not a LBP font file") | |
| data_ver, data_offset, padding = \ | |
| read_struct(f, STRUCT_HEADER) | |
| data_offset += 16 | |
| assert padding == 0 | |
| f.seek(read_struct(f, '>I')[0] * 2, 1) | |
| f.seek(read_struct(f, '>I')[0] * 4, 1) | |
| (glyph_count,) = read_struct(f, '>I') | |
| glyphs = {} | |
| for _ in range(glyph_count): | |
| codepoint, advance, left, top, \ | |
| width, height, offset, _next = \ | |
| read_struct(f, STRUCT_GLYPH) | |
| glyphs[codepoint] = Glyph(advance, | |
| left, top, width, height, offset) | |
| compressed = False | |
| if data_ver >= 0x3ab: | |
| (compressed,) = read_struct(f, '>?') | |
| if not compressed: | |
| raise ValueError( | |
| "uncompressed fonts are not supported") | |
| assert data_offset == f.tell() | |
| for codepoint, glyph in glyphs.items(): | |
| f.seek(data_offset + glyph.data) | |
| glyph.data = f.read( | |
| read_struct(f, '>H')[0] - 2) | |
| assert len(glyph.data) != 0 | |
| assert not f.read(1) | |
| return glyphs | |
| def write_fnt(f, glyphs): | |
| glyphs = sorted(glyphs.items()) | |
| mapping = {} | |
| index = [0xffff] * 0x80 | |
| pages_used = [0] * 16 | |
| for i, codepoint in enumerate(x[0] for x in glyphs): | |
| mapping[codepoint] = i | |
| char = codepoint % 0x80 | |
| page = codepoint // 0x80 | |
| if index[char] == 0xffff: | |
| index[char] = i | |
| pages_used[page // 32] |= 1 << (page % 32) | |
| f.write(b'FNTb') | |
| write_struct(f, STRUCT_HEADER, OUT_DVER, 0, 0) | |
| write_struct(f, f'>I{len(index)}H', | |
| len(index), *index) | |
| write_struct(f, f'>I{len(pages_used)}I', | |
| len(pages_used), *pages_used) | |
| f_data = BytesIO() | |
| write_struct(f, '>I', len(glyphs)) | |
| for codepoint, glyph in glyphs: | |
| for c in range(codepoint, 0x10000, 0x80)[1:]: | |
| if c in mapping: | |
| _next = mapping[c] | |
| break | |
| else: | |
| _next = 0xffff | |
| write_struct(f, STRUCT_GLYPH, | |
| codepoint, glyph.advance, glyph.left, | |
| glyph.top, glyph.width, glyph.height, | |
| f_data.tell(), _next) | |
| write_struct(f_data, '>H', len(glyph.data) + 2) | |
| f_data.write(glyph.data) | |
| write_struct(f, '>?', True) | |
| data_offset = f.tell() | |
| f.seek(8) | |
| write_struct(f, '>I', data_offset - 16) | |
| f.seek(data_offset) | |
| f.write(f_data.getbuffer()) | |
| f_data.close() | |
| def uni_range(val): | |
| strs = val.split(',') | |
| ranges = [] | |
| for s in strs: | |
| codepoints = [int(x, 16) for x in s.split('-')] | |
| if not 1 <= len(codepoints) <= 2: | |
| raise ValueError | |
| for c in codepoints: | |
| if not 0 <= c <= 0xffff: | |
| raise ValueError | |
| if len(codepoints) == 2: | |
| if codepoints[0] > codepoints[1]: | |
| raise ValueError | |
| else: | |
| codepoints.append(codepoints[0]) | |
| ranges.append(range( | |
| codepoints[0], codepoints[1] + 1)) | |
| return ranges | |
| def draw_glyph_wrap(args): | |
| return (args[0], draw_glyph(*args[1:])) | |
| if __name__ == '__main__': | |
| parser = argparse.ArgumentParser(description=DESCRIPTION) | |
| parser.add_argument('infile', type=str, | |
| help="path to input file") | |
| parser.add_argument('outfile', type=str, | |
| help="path to output file") | |
| parser.add_argument('-b', '--base', type=str, | |
| metavar='FILE', help="path to base file") | |
| parser.add_argument('-r', '--range', type=float, | |
| help="SDF range in em (default: %(default)s)", | |
| metavar='NUM', default=RANGE_DEF) | |
| parser.add_argument('-o', '--offset', type=float, | |
| help="SDF offset in em (default: %(default)s)", | |
| metavar='NUM', default=OFFSET_DEF) | |
| parser.add_argument('-s', '--scale', type=float, | |
| help="glyph scale (default: %(default)s)", | |
| nargs=2, metavar=('X', 'Y'), default=[1, 1]) | |
| parser.add_argument('-u', '--uni', type=uni_range, | |
| help="unicode range(s) (default: %(default)s)", | |
| metavar='LIST', default='20-ffff') | |
| parser.add_argument('-p', '--proc', type=int, | |
| help="max processes (default: cpu count)", | |
| metavar='NUM', default=cpu_count()) | |
| args = parser.parse_args() | |
| font = TTFont(args.infile) | |
| cmap = font.getBestCmap() | |
| gset = font.getGlyphSet() | |
| upm = font['head'].unitsPerEm | |
| glyphs = {} | |
| if args.base is not None: | |
| with open(args.base, 'rb') as f: | |
| with BytesIO(f.read()) as b: | |
| glyphs.update(read_fnt(b)) | |
| for codepoint in tuple(cmap.keys()): | |
| for _range in args.uni: | |
| if codepoint in _range: | |
| break | |
| else: | |
| del cmap[codepoint] | |
| pargs = [(c, n, gset, upm, args.range, args.offset, | |
| args.scale) for c, n in cmap.items()] | |
| pmap = process_map(draw_glyph_wrap, pargs, | |
| max_workers=args.proc, chunksize=args.proc) | |
| for codepoint, glyph in pmap: | |
| glyphs[codepoint] = glyph | |
| with BytesIO() as b: | |
| write_fnt(b, glyphs) | |
| with open(args.outfile, 'wb') as f: | |
| f.write(b.getbuffer()) |
Author
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Install requirements:
pip install fonttools numpy opencv-python scikit-fmm tqdmConvert a font:
mmfnt.py <input ttf/otf> <output fnt>Convert a font, using an existing one as a base (for glyphs the input font is missing):
mmfnt.py -b <base fnt> <input ttf/otf> <output fnt>For more options run
mmfnt.py --help