Skip to content

Instantly share code, notes, and snippets.

@astiob
Last active July 30, 2025 20:13
Show Gist options
  • Select an option

  • Save astiob/2c63c074771295087ab0b8b4a53958fe to your computer and use it in GitHub Desktop.

Select an option

Save astiob/2c63c074771295087ab0b8b4a53958fe to your computer and use it in GitHub Desktop.
Tweaked/expanded/improved scripts to find convolution (e. g. low-pass) kernels and level changes, based on arch1t3cht’s find_convolution.py (https://gist.github.com/arch1t3cht/2886dfccd070c50ef77a32a88f9e0ae5)
from vstools import vs, core, get_u, get_v, get_y, depth, set_output, ColorRange
import numpy as np
from matplotlib import pyplot as plt
from tqdm import tqdm
import itertools
from encode_framework import ScriptInfo
from vssource import BestSource
from vstools import core, set_output, vs
from filterchain import get_episode_path
from filterchain.mapping import Region
op_start = {
1: 1200,
2: 1200,
3: 1200,
4: 1200,
5: 1199,
6: 1199,
7: 1200,
8: 1199,
9: 1199,
10: 1200,
11: 1200,
12: 1200,
13: 1200,
}
tw_sync = {
1: 3,
2: 2,
3: 2,
4: 3,
5: 1,
6: 2,
7: 2,
8: 1,
9: 1,
10: 3,
11: 3,
12: 2,
13: 3,
}
episode_clips = {}
script_info = ScriptInfo(__file__)
for region in (Region.JP, Region.TW):
for epno in range(1, 14):
path = get_episode_path(f'S01E{epno:02}', region=region)
if region is Region.JP or epno not in (1, 6):
script_info.src_file.clear()
clip = script_info.index(path)
else:
clip = BestSource.source(path, rff=True).std.SetFieldBased(vs.FIELD_TOP)
start = op_start[epno]
if region is Region.TW:
start -= tw_sync[epno]
clip = clip[start:][:3116]
crop = 2 * (region is Region.JP)
clip = clip.std.CropAbs(top=crop, width=720, height=476)
set_output(clip, f'{region.name} {epno:02}')
episode_clips[region, epno] = clip
groups = [
# ("R2J DVD (1–6, 12–13)", Region.JP, [*range(1, 7), *range(12, 14)]),
# ("R2J DVD (10–11)", Region.JP, range(10, 12)),
# 5/1.5, 2/1.5
# ("R2J DVD (1–6, 10–13)", Region.JP, [*range(1, 7), *range(10, 14)]),
# 5/1.2, 2/1.3
("R2J DVD (7–9)", Region.JP, range(7, 10)),
("R3TW DVD", Region.TW, range(1, 14)),
]
mean_clips = []
for name, region, epnos in groups:
clips = [episode_clips[region, epno] for epno in epnos]
clip = core.std.AverageFrames(clips, [1] * len(clips))
mean_clips.append(clip)
clip2, clip1 = mean_clips
np.set_printoptions(threshold=np.inf)
get_plane = get_y
# get_plane = get_u
frames = [105, 304, 437, 529, 646, 743, 801, 912, 952, 1024, 1152, 1160, 1167, 1209, 1231, 1281, 1329, 1347, 1407, 1437, 1547, 1567, 1606, 1619, 1643, 1665, 1673, 1688, 1693, 1698, 1773, 1791, 1806, 1830, 1840, 1845, 1870, 1936, 1954, 2030, 2053, 2058, 2073, 2107, 2122, 2152, 2183, 2206, 2233, 2291, 2319, 2336, 2363, 2385, 2403, 2458, 2486, 2639, 2659, 2842, 2969]
# clip1 = clip1.fmtc .resample (1440, 1080, taps=7, sx=-1/6, fh=1/1.08, kernel="lanczos")
# # clip1 = clip1.akarin.Expr(f'x {-16/219} {(255-16)/219} clamp')
# clip1 = clip1.std.Limiter()
# clip2 = clip2.descale.Delanczos(1440, 1080, taps=7, src_left=1/8)
# clip1 = core.ffms2.Source('D:/Downloads/Nanoha [Kaleido]/S01E01-US.mkv').std.AssumeFPS(fpsnum=30000, fpsden=1001).std.SetFieldBased(0).std.Crop(bottom=2)
# clip2 = core.ffms2.Source('D:/Downloads/Nanoha [Kaleido]/S01E01-JP.mkv').std.AssumeFPS(fpsnum=30000, fpsden=1001).std.SetFieldBased(0).std.Crop(top=2)[2:]
# clip1 = clip1.std.Crop(left=8, right=8)
# clip2 = clip2.std.Crop(left=8, right=8)
# frames = 382, 1226, 1256, 1316
# frames = 1226, 1256, 1306, 1316
clip1 = core.std.Splice([clip1[fr] for fr in frames])
clip2 = core.std.Splice([clip2[fr] for fr in frames])
frs = list(range(clip1.num_frames))
clip1 = depth(get_plane(clip1), 32)
clip2 = depth(get_plane(clip2), 32)
# clip1 = (clip1
# .akarin.Expr([
# 'X 8 < 8 Y x[] X 714 >= 713 Y x[] x ? ?',
# # 'X 4 < 4 Y x[] X 357 >= 356 Y x[] x ? ?',
# ])
# .akarin.Expr([
# 'X 6 - 2 * 1 + 0 4 clip 716 X - 2 * 1 - 0 4 clip * 16 / x *',
# # 'X 3 - 4 * 1 + 0 4 clip 358 X - 4 * 1 - 0 4 clip * 16 / x *',
# ])
# )
field_based = False
#if field_based:
# clip1 = clip1.std.SeparateFields(tff=True)[::2]
# clip2 = clip2.std.SeparateFields(tff=True)[::2]
clip1.std.SetVideoCache(0)
clip2.std.SetVideoCache(0)
#set_output([clip1, clip2])
# empirically most lowpass kernels have a support < 16
# lower support makes calculations faster but it's probably not safe to go lower than 16
support_x = 16
support_y = 1
# assume that the convolution is horizontally and vertically symmetric
# (faster and slightly more accurate if true)
symmetric = True
# assume a fixed value for the total kernel volume, normally 1
# (slightly faster and more accurate if true)
force_volume = 1
# add an intercept term in case the convolution is accompanied by a change in black level
# (if it isn't, the search is slightly faster and more accurate with this disabled)
allow_intercept = False
# for a generic linear levels change, set:
# force_volume = None; allow_intercept = True
step_y = 1
if field_based:
support_y //= 2
step_y = 2
step_x = 1
#clip1 = clip1.std.Crop(left=4)
#clip2 = clip2.std.Crop(left=4)
nvars_y = support_y if symmetric else support_y*2-1
nvars_x = support_x if symmetric else support_x*2-1
eqns = None
with tqdm(smoothing=0, total=len(frs)*(support_x*2-1)*(support_y*2-1)) as pbar:
for ifr, fr in enumerate(frs):
w = clip1.width
h = clip1.height
with clip1.get_frame(fr) as frame1:
frame1 = np.array(frame1, dtype=np.float32).reshape((h, w))
with clip2.get_frame(fr) as frame2:
frame2 = np.array(frame2, dtype=np.float32).reshape((h, w))
if ifr == len(frs) - 1:
del clip1
del clip2
frame1_padded = frame1
#frame1_padded = np.vstack((np.flipud(frame1_padded[:support_y,:]), frame1_padded, np.flipud(frame1_padded[-support_y:,:])))
#frame1_padded = np.hstack((np.fliplr(frame1_padded[:,:support_x]), frame1_padded, np.fliplr(frame1_padded[:,-support_x:])))
frame2 = frame2[(support_y-1)*step_y:((1-support_y)*step_y if support_y>1 else None),(support_x-1)*step_x:((1-support_x)*step_x if support_x>1 else None)]
w -= 2 * (support_x - 1) * step_x
h -= 2 * (support_y - 1) * step_y
# frame1_padded = np.vstack((np.zeros((support_y,w), np.float32), frame1_padded, np.zeros((support_y,w), np.float32)))
# hp = h + support_y * 2
# frame1_padded = np.hstack((np.zeros((hp,support_x), np.float32), frame1_padded, np.zeros((hp,support_x), np.float32)))
if eqns is None:
#eqns = np.zeros((len(frs), h, w, nvars_y, nvars_x), np.float32)
eqns = np.zeros((len(frs), h, w, nvars_y, nvars_x))
# eqns = np.zeros((len(frs), h, w, 2, nvars_x))
vals = np.ndarray((len(frs), h, w))
for dx, dy in itertools.product(range(-support_x + 1, support_x), range(-support_y + 1, support_y)):
if symmetric:
eqns[ifr,:,:,abs(dy),abs(dx)] += frame1_padded[(support_y-1+dy)*step_y:,(support_x-1+dx)*step_x:][:h,:w]
# eqns[ifr,:,:,0,abs(dx)] += (support_y - abs(dy)) * frame1_padded[(support_y-1+dy)*step_y:,(support_x-1+dx)*step_x:][:h,:w]
# eqns[ifr,:,:,0,abs(dx)] += frame1_padded[(support_y-1+dy)*step_y:,(support_x-1+dx)*step_x:][:h,:w]
# if not dy:
# eqns[ifr,:,:,1,abs(dx)] += frame1_padded[(support_y-1+dy)*step_y:,(support_x-1+dx)*step_x:][:h,:w]
else:
eqns[ifr,:,:,support_y-1+dy,support_x-1+dx] += frame1_padded[(support_y-1+dy)*step_y:,(support_x-1+dx)*step_x:][:h,:w]
pbar.update()
vals[ifr] = frame2
all_kernels = []
# in case of suspected downscale -> upscale,
# find a separate convolution for every nth pixel
# where n is the denominator of the scale ratio
# offset_x = 1+3
# step_x = 9
# eqns = eqns[:, :, offset_x::step_x, :, :]
# vals = vals[:, :, offset_x::step_x]
# w = (w - offset_x + step_x-1) // step_x
# inspect single pixel columns
# all_eqns = eqns
# all_vals = vals
# for offset_x in range(w):
# eqns = all_eqns[:, :, offset_x:, :, :][:, :, :1, :, :].copy()
# vals = all_vals[:, :, offset_x:][:, :, :1].copy()
# w = 1
if True:
if force_volume is not None:
mask = np.ones((nvars_y, nvars_x), bool)
if symmetric:
vals -= force_volume * eqns[:,:,:,0,0]
eqns[:,:,:,1:,1:] -= 4 * eqns[:,:,:,:1,:1]
eqns[:,:,:,0,1:] -= 2 * eqns[:,:,:,0,:1]
eqns[:,:,:,1:,0] -= 2 * eqns[:,:,:,:1,0]
mask[0, 0] = False
else:
vals -= force_volume * eqns[:,:,:,support_y-1,support_x-1]
eqns -= eqns[:,:,:,support_y-1:support_y,support_x-1:support_x]
mask[support_y-1, support_x-1] = False
mask = mask.reshape(-1)
eqns = eqns.reshape((len(frs) * h * w, -1))
eqns = eqns[:,mask]
else:
eqns = eqns.reshape((len(frs) * h * w, -1))
if allow_intercept:
eqns = np.hstack((eqns, np.ones((len(frs) * h * w, 1))))
kernel, residuals, rank, sings = np.linalg.lstsq(eqns, vals.reshape((len(frs) * w * h,)), rcond=None)
if allow_intercept:
intercept, kernel = kernel[-1], kernel[:-1]
print('intercept:', intercept)
if force_volume is not None:
full_kernel = np.zeros(nvars_y * nvars_x)
full_kernel[mask] = kernel
kernel = full_kernel.reshape((nvars_y, nvars_x))
if symmetric:
kernel[0, 0] = force_volume - kernel.sum() * 2 - kernel[1:,1:].sum() * 2
else:
kernel[support_y-1, support_x-1] = force_volume - kernel.sum()
else:
kernel = kernel.reshape((nvars_y, nvars_x))
# kernel = kernel.reshape((2, nvars_x))
print(kernel)
# print(kernel.tolist())
all_kernels.append(kernel)
kernel = np.vstack(all_kernels)
# dump the kernel in Wolfram language for Mathematica
with open('conv.m', 'w') as file:
file.write('{')
file.write(','.join('{' + ','.join(str(float(val)).replace('e', '*10^') for val in row) + '}' for row in kernel))
file.write('}')
# plt.stem(list(range(support)), kernel[0,:])
# plt.stem(list(range(support)), kernel[:,0])
# plt.ylim((-1, 1))
# plt.show()
print(f"kernel volume: {kernel.sum() * 4 - kernel[0,:].sum() * 2 - kernel[:,0].sum() * 2 + kernel[0,0] if symmetric else kernel.sum()}")
print(f"kernel MSE: {residuals[0] / frame2.size}")
if support_x == support_y:
# We've found a 2d convolution kernel. Now, assume that it's separable and arises from a single symmetric 1d kernel,
# and find the best such kernel using gradient descent
def kernel_1dto2d(kernel):
return np.outer(kernel, kernel)
def kernel_1dto2d_deriv(kernel_1d, kernel_2d):
s = len(kernel_1d)
return np.array([
# d/dk_x sum((kernel_1d[y] * kernel_1d[x] - kernel_2d[y,x]) ** 2 for x in range(s) for y in range(s))
# = d/dk_x (sum((kernel_1d[k] * kernel_1d[y] - kernel_2d[y,k]) ** 2 for y in range(s) if y != k))
# + (sum((kernel_1d[x] * kernel_1d[k] - kernel_2d[k,x]) ** 2 for x in range(s) if x != k))
# + (kernel_1d[k] ** 2 - kernel_2d[k,k]) ** 2
# = (sum(2 * kernel_1d[y] * (kernel_1d[k] * kernel_1d[y] - kernel_2d[y,k]) for y in range(s) if x != k))
# + (sum(2 * kernel_1d[x] * (kernel_1d[x] * kernel_1d[k] - kernel_2d[k,x]) for x in range(s) if y != k))
# + 4 * kernel_1d[k] * (kernel_1d[k] ** 2 - kernel_2d[k,k])
# =
(sum(2 * kernel_1d[y] * (kernel_1d[k] * kernel_1d[y] - kernel_2d[y,k]) for y in range(s)))
+ (sum(2 * kernel_1d[x] * (kernel_1d[x] * kernel_1d[k] - kernel_2d[k,x]) for x in range(s)))
for k in range(s)])
kernel_1d = kernel_hori = kernel.sum(0) * 2 - kernel[0,:] if symmetric else kernel.sum(0)
print(kernel_1d)
print(f"hori area: {kernel_1d.sum() * 2 - kernel_1d[0] if symmetric else kernel_1d.sum()}")
print(f"hori MSE: {np.sum((kernel_1dto2d(kernel_1d) - kernel) ** 2) / kernel.size}")
kernel_2d = kernel_1dto2d(kernel_1d)
print(f"hori**2 volume: {kernel_2d.sum() * 4 - kernel_2d[0,:].sum() * 2 - kernel_2d[:,0].sum() * 2 + kernel_2d[0,0] if symmetric else kernel_2d.sum()}")
print(f"hori**2 MSE: {np.sum((eqns @ kernel_2d.ravel() - frame2.ravel()) ** 2) / frame2.size}")
kernel_1d = kernel_vert = kernel.sum(1) * 2 - kernel[:,0] if symmetric else kernel.sum(1)
print(kernel_1d)
print(f"vert area: {kernel_1d.sum() * 2 - kernel_1d[0] if symmetric else kernel_1d.sum()}")
print(f"vert MSE: {np.sum((kernel_1dto2d(kernel_1d) - kernel) ** 2) / kernel.size}")
kernel_2d = kernel_1dto2d(kernel_1d)
print(f"vert**2 volume: {kernel_2d.sum() * 4 - kernel_2d[0,:].sum() * 2 - kernel_2d[:,0].sum() * 2 + kernel_2d[0,0] if symmetric else kernel_2d.sum()}")
print(f"vert**2 MSE: {np.sum((eqns @ kernel_2d.ravel() - frame2.ravel()) ** 2) / frame2.size}")
kernel_2d = np.outer(kernel_vert, kernel_hori)
print(f"hori*vert volume: {kernel_2d.sum() * 4 - kernel_2d[0,:].sum() * 2 - kernel_2d[:,0].sum() * 2 + kernel_2d[0,0] if symmetric else kernel_2d.sum()}")
print(f"hori*vert MSE: {np.sum((eqns @ kernel_2d.ravel() - frame2.ravel()) ** 2) / frame2.size}")
print("Gradient descent:")
kernel_1d = kernel_hori.copy()
stepsize = np.sqrt(np.sum((kernel_1dto2d(kernel_1d) - kernel) ** 2)) / 2 # idfk
for i in range(100):
kernel_1d -= stepsize * kernel_1dto2d_deriv(kernel_1d, kernel)
print(f"MSE: {np.sum((kernel_1dto2d(kernel_1d) - kernel) ** 2) / kernel.size}")
print(kernel_1d)
kernel_2d = kernel_1dto2d(kernel_1d)
print(f"1dto2d volume: {kernel_2d.sum() * 4 - kernel_2d[0,:].sum() * 2 - kernel_2d[:,0].sum() * 2 + kernel_2d[0,0] if symmetric else kernel_2d.sum()}")
print(f"1dto2d MSE: {np.sum((eqns @ kernel_2d.ravel() - frame2.ravel()) ** 2) / frame2.size}")
plt.stem(list(range(support_x)), kernel_hori)
plt.stem(list(range(support_x)), kernel_vert)
plt.stem(list(range(support_x)), kernel_1d)
plt.ylim((-1, 1))
# plt.show()
# Now you could fit some known kernel (say lanczos with some taps+blur) to kernel_1d if you wanted to
from vstools import vs, core, get_u, get_v, get_y, depth, set_output
import functools
import numpy as np
import math
from matplotlib import pyplot as plt
import scipy
from tqdm import tqdm
import itertools
np.set_printoptions(threshold=np.inf)
discs = {
1: R'D:\Downloads\YESPRECURE5\YESPRECURE5_DISC1\BDMV\STREAM\00016.m2ts',
2: R'D:\Downloads\YESPRECURE5\YESPRECURE5_DISC2\BDMV\STREAM\00016.m2ts',
3: R'D:\Downloads\YESPRECURE5\YESPRECURE5_DISC3\BDMV\STREAM\00016.m2ts',
4: R'D:\Downloads\YESPRECURE5\YESPRECURE5_DISC4\BDMV\STREAM\00016.m2ts',
5: R'D:\Downloads\YESPRECURE5\YESPRECURE5_DISC5\BDMV\STREAM\00016.m2ts',
6: R'D:\Downloads\YESPRECURE5\YESPRECURE5_DISC6\BDMV\STREAM\00016.m2ts',
7: R'D:\Downloads\YESPRECURE5\YESPRECURE5_DISC7\BDMV\STREAM\00016.m2ts',
8: R'D:\Downloads\YESPRECURE5\YESPRECURE5_DISC8\BDMV\STREAM\00018.m2ts',
}
# @functools.lru_cache(maxsize=1)
def disc(n):
# return get_y(core.dgdecodenv.DGSource(f'D:/Downloads/YESPRECURE5/YESPRECURE5_DISC{n}.dgi'))
# clip = core.dgdecodenv.DGSource(f'D:/Downloads/YESPRECURE5/YESPRECURE5_DISC{n}.dgi')
clip = core.bs.VideoSource(discs[n])
clip = clip.std.SetFrameProp('_ColorRange', intval=vs.RANGE_FULL)
clip = get_y(clip)
# clip = clip.std.Crop(left=500, right=500, top=480, bottom=80)
# clip = clip.std.Crop(left=clip.width//2-32)
# clip = clip.std.Crop(bottom=1080-848)
# clip = core.std.StackVertical([get_u(clip), get_v(clip)])
return clip
# return get_y(core.dgdecodenv.DGSource(f'D:/Downloads/YESPRECURE5/YESPRECURE5_DISC{n}.dgi')).std.Crop(top=910, bottom=1080-975, left=530, right=530)
def ncoped():
# clip = core.dgdecodenv.DGSource('D:/Downloads/YESPRECURE5/00020.dgi')
clip = core.bs.VideoSource(R'D:\Downloads\YESPRECURE5\YESPRECURE5_DISC4\BDMV\STREAM\00020.m2ts')
clip = clip.std.SetFrameProp('_ColorRange', intval=vs.RANGE_FULL)
clip = get_y(clip)
return clip
episodes = {
1: lambda: disc(1)[30:43488],
2: lambda: disc(1)[43518:86978],
3: lambda: disc(1)[87008:130468],
4: lambda: disc(1)[130498:173956],
5: lambda: disc(1)[173986:217444],
6: lambda: disc(1)[217474:260932],
7: lambda: disc(2)[30:43488],
8: lambda: disc(2)[43518:86976],
9: lambda: disc(2)[87006:130464],
10: lambda: disc(2)[130494:173952],
11: lambda: disc(2)[173982:217442],
12: lambda: disc(2)[217472:260930],
13: lambda: disc(3)[30:43490],
14: lambda: disc(3)[43520:86978],
15: lambda: disc(3)[87008:130466],
16: lambda: disc(3)[130496:173956],
17: lambda: disc(3)[173986:217444],
18: lambda: disc(3)[217474:260932],
19: lambda: disc(4)[30:43488],
20: lambda: disc(4)[43518:86976],
21: lambda: disc(4)[87006:130464],
22: lambda: disc(4)[130494:173952],
23: lambda: disc(4)[173982:217440],
24: lambda: disc(4)[217470:260930],
# 25: lambda: depth(depth(disc(5)[30:43488], 32).std.Expr([f'x {0.019267391547870598/224} + 0.9999244799156317 /']), 8),
25: lambda: disc(5)[30:43488],
26: lambda: disc(5)[43518:86978], #.std.Expr([f'x {219 / 216} *']),
27: lambda: disc(5)[87008:130468],
28: lambda: disc(5)[130498:173956], #.std.Expr([f'x {1 / 219} + {219 / 220} *', '', f'x {112 / 118} *']),
29: lambda: disc(5)[173986:217444],
30: lambda: disc(5)[217474:260932],
31: lambda: disc(6)[30:43488],
32: lambda: disc(6)[43518:86976],
33: lambda: disc(6)[87006:130466],
34: lambda: disc(6)[130496:173954],
35: lambda: disc(6)[173984:217442],
36: lambda: disc(6)[217472:260930],
37: lambda: disc(7)[30:43490],
38: lambda: disc(7)[43520:86978],
39: lambda: disc(7)[87008:130468],
40: lambda: disc(7)[130498:173956],
41: lambda: disc(7)[173986:217444],
42: lambda: disc(7)[217474:260932],
43: lambda: disc(8)[30:43488],
44: lambda: disc(8)[43518:86978],
45: lambda: disc(8)[87008:130468],
46: lambda: disc(8)[130498:173958],
47: lambda: disc(8)[173988:217446],
48: lambda: disc(8)[217476:260936],
49: lambda: disc(8)[260966:304426],
}
a_eps = {
1: 3807,
2: 2699,
3: 1771,
4: 1291,
5: 1171,
6: 3419,
7: 3777,
8: 1561,
9: 4077,
10: 1441,
11: 2699,
12: 1531,
13: 2789,
# OP2
# 35: 1 + 3568,
# 36: 1 + 1740,
# 37: 1 + 2698,
# 38: 1 + 2248,
# 39: 1 + 2848,
# 40: 1 + 2218,
# 41: 1 + 1470,
# 42: 1 + 4676,
43: 1 + 1380,
44: 1 + 1410,
45: 1 + 2608,
46: 1 + 2368,
# new ABC logo
47: 1 + 4076,
}
b_eps = {
14: 3389,
15: 2969,
16: 2429,
17: 1829,
18: 3209,
19: 2639,
20: 2878,
21: 2189,
22: 1681,
23: 4887,
24: 721,
25: 2309,
# 26: 2489,
27: 2849,
# 28: 1829,
29: 3359,
30: 1201,
31: 1 + 1918,
32: 1 + 1620,
# ED2
33: 1 + 2518,
34: 1 + 3598,
# new ABC logo
48: 1 + 840,
49: 1 + 510,
}
# everything
# frs = (
# # intro
# 96, # butterfly
# 96 + 240, # fairies
# 96 + 240 + 72, # logo
# # planning
# # ball
# # 96 + 240 + 72 + 176 + 119 + 50, # producers
# # 96 + 240 + 72 + 176 + 119 + 50 + 82, # school
# # songs
# # flying
# # 96 + 240 + 72 + 176 + 119 + 50 + 82 + 528 + 144 + 79, # attacks
# # adults
# # 96 + 240 + 72 + 176 + 119 + 50 + 82 + 528 + 144 + 79 + 565 + 160, # everyone
# 96 + 240 + 72 + 176 + 119 + 50 + 82 + 528 + 144 + 79 + 565 + 160 + 75, # rainbow
# )
# clean background only (with crop applied in `disc`)
# frs = (
# # intro
# 96, # butterfly
# 96 + 61,
# 96 + 61 + 53,
# 96 + 61 + 53 + 46,
# 96 + 61 + 53 + 46 + 35,
# 96 + 240, # fairies
# 96 + 240 + 72, # logo
# # planning
# # ball
# # 96 + 240 + 72 + 176 + 119 + 50, # producers
# # 96 + 240 + 72 + 176 + 119 + 50 + 82, # school
# 5000 - 3807, # henshin Dream
# # songs
# # flying
# # 96 + 240 + 72 + 176 + 119 + 50 + 82 + 528 + 144 + 79, # attacks
# 5708 - 3807, # attack Dream
# 5785 - 3807, # attack result
# # 96 + 240 + 72 + 176 + 119 + 50 + 82 + 528 + 144 + 79 + 565, # adults
# # 96 + 240 + 72 + 176 + 119 + 50 + 82 + 528 + 144 + 79 + 565 + 160, # everyone
# 96 + 240 + 72 + 176 + 119 + 50 + 82 + 528 + 144 + 79 + 565 + 160 + 75, # rainbow
# )
# frs = (
# tuple(range(584)) +
# tuple(range(703, 753)) +
# tuple(range(900, 910)) +
# tuple(range(985, 989)) +
# tuple(range(1079, 1097)) +
# tuple(range(1193, 1223)) +
# tuple(range(1320, 1363)) +
# tuple(range(1507, 1586)) +
# tuple(range(1699, 1740)) +
# tuple(range(1873, 2068)) +
# tuple(range(2183, 2279)) +
# tuple(range(2386, 2399))
# )[::10]
# hand-picked creditless (but possibly lyricful) frames with relatively high frequencies
# frs = 70, 163, 705, 1081, 1984, 2034, 2274
# 704/705 is good! (lyricless and high-freq)
# frs = 704,
# subs only
# frs = (
# # intro
# 96, # butterfly
# 96 + 240, # fairies
# # 96 + 240 + 72, # logo
# # planning
# # ball
# 96 + 240 + 72 + 176 + 119 + 50, # producers
# # 96 + 240 + 72 + 176 + 119 + 50 + 82, # school
# # songs
# # flying
# 96 + 240 + 72 + 176 + 119 + 50 + 82 + 528 + 144 + 79, # attacks
# 96 + 240 + 72 + 176 + 119 + 50 + 82 + 528 + 144 + 79 + 565, # adults
# 96 + 240 + 72 + 176 + 119 + 50 + 82 + 528 + 144 + 79 + 565 + 160, # everyone
# 96 + 240 + 72 + 176 + 119 + 50 + 82 + 528 + 144 + 79 + 565 + 160 + 75, # rainbow
# )
# songs (probable I frame)
# frs = 1418,
# final credits frame (probable B/P frame in a still sequence)
# frs = 2544,
# clip1 = core.std.AverageFrames([episodes[epno]()[start:start+nfr] for epno, start in b_eps.items()], [1] * len(b_eps))[95]
# clip2 = core.std.AverageFrames([episodes[epno]()[start:start+nfr] for epno, start in a_eps.items()], [1] * len(a_eps))[95]
#set_output([clip1, clip2])
from hardsubs import extract_hardsubs
ncop1 = depth(ncoped()[30:2580], 32)
ncop2 = depth(ncoped()[2610:5160], 32)
ncop = core.std.AverageFrames([ncop1, ncop2], [1, 1])[1:]
def xx(clip, start):
# print(clip)
# print(start)
return clip[start:]
def fullband():
return extract_hardsubs(
core.std.AverageFrames([depth(xx(episodes[epno](), start), 32) for epno, start in b_eps.items()], [1] * len(b_eps)),
ncop,
989, 989 + 89,
left=1040, right=380, top=344, bottom=584,
)
def lowpassed():
return extract_hardsubs(
core.std.AverageFrames([depth(xx(episodes[epno](), start), 32) for epno, start in a_eps.items()], [1] * len(a_eps)),
ncop
.fmtc.resample(1440, 1080, kernel="lanczos", taps=7, sx=-1/6)
.akarin.Expr(f'x 0 1 clamp')
.fmtc.resample(1440, 1080, kernel="lanczos", taps=6, fh=1/1.06)
.akarin.Expr(f'x 0 1 clamp')
.fmtc.resample(1920, 1080, kernel="lanczos", taps=7, sx=1/8)
.akarin.Expr(f'x 0 1 clamp'),
989, 989 + 89,
left=1040, right=380, top=344, bottom=584,
)
frs = 0,
if __name__ != '__main__':
fullband_premul, fullband_alpha = fullband()
lowpassed_premul, lowpassed_alpha = lowpassed()
# fullband_premul = fullband()
# lowpassed_premul = lowpassed()
set_output(fullband_premul)
set_output(fullband_alpha)
set_output(lowpassed_premul)
set_output(lowpassed_alpha)
inferred_premul = (fullband_premul
.resize.Lanczos(1440, 1080, filter_param_a=7, src_left=-1/6)
.akarin.Expr(f'x 0 1 clamp')
.fmtc.resample(kernel='impulse', fh=-1, cnorm=False, center=False, impulsev=[1], impulseh=[0.0004316616355133783,-0.0015794013225869509,0.0019153772820731556,-0.0025030718761168795,0.0036299004722098916,-0.0036838537569184506,0.002695718386016015,-0.0012125632953251327,0.004138042437207754,0.004109393780954353,-0.005181058223482468,0.04652326221818901,1.0652555878188765,0.04652326221818901,-0.005181058223482468,0.004109393780954353,0.004138042437207754,-0.0012125632953251327,0.002695718386016015,-0.0036838537569184506,0.0036299004722098916,-0.0025030718761168795,0.0019153772820731556,-0.0015794013225869509,0.0004316616355133783])
.akarin.Expr(f'x 0 1 clamp')
.resize.Lanczos(1920, 1080, filter_param_a=7, src_left=1/8)
.akarin.Expr(f'x 0 1 clamp'))
set_output(inferred_premul)
inferred_alpha = (fullband_alpha
.resize.Lanczos(1440, 1080, filter_param_a=7, src_left=-1/6)
.akarin.Expr(f'x 0 1 clamp')
.fmtc.resample(kernel='impulse', fh=-1, cnorm=False, center=False, impulsev=[1], impulseh=[0.0004316616355133783,-0.0015794013225869509,0.0019153772820731556,-0.0025030718761168795,0.0036299004722098916,-0.0036838537569184506,0.002695718386016015,-0.0012125632953251327,0.004138042437207754,0.004109393780954353,-0.005181058223482468,0.04652326221818901,1.0652555878188765,0.04652326221818901,-0.005181058223482468,0.004109393780954353,0.004138042437207754,-0.0012125632953251327,0.002695718386016015,-0.0036838537569184506,0.0036299004722098916,-0.0025030718761168795,0.0019153772820731556,-0.0015794013225869509,0.0004316616355133783])
.akarin.Expr(f'x 0 1 clamp')
.resize.Lanczos(1920, 1080, filter_param_a=7, src_left=1/8)
.akarin.Expr(f'x 0 1 clamp'))
set_output(inferred_alpha)
else:
# bitdepth = 8
# dtypes = np.uint16, np.uint32
bitdepth = 32
dtypes = np.float64, np.float64
w = w2 = None
# with tqdm(smoothing=0, unit='fr', total=(len(frs) * (len(a_eps) + len(b_eps)))) as pbar:
with tqdm(smoothing=0, unit='fr', total=2) as pbar:
# for epno, start in a_eps.items():
pbar.refresh()
# clip = episodes[epno]()[start:]
# clip = depth(clip, bitdepth)
clip = lowpassed()
# clip = clip.descale.Delanczos(width=1440, height=1080, taps=7, src_left=1/8)
clip = clip.resize.Point(1440, 1080, src_left=-1/6)
clip = clip.std.Crop(left=1040 * clip.width/1920, right=380 * clip.width/1920, top=344, bottom=584)
# alpha = fullband()
# clip = get_u(alpha)
if w2 is None:
w2 = clip.width
h2 = clip.height
frames2 = np.zeros((len(frs), h2, w2), dtypes[0])
for ifr, fr in enumerate(frs):
with clip.get_frame(fr) as frame:
# frames2[ifr] += np.array(frame, np.uint8).reshape((h, w))
frames2[ifr] += np.asarray(frame[0])
pbar.update()
del frame
del clip
# for epno, start in b_eps.items():
pbar.refresh()
# clip = episodes[epno]()[start:]
clip = fullband()
clip = clip.resize.Lanczos(1440, 1080, filter_param_a=7, src_left=-1/6)
clip = clip.akarin.Expr(f'x 0 1 clamp')
# clip = depth(clip, bitdepth)
# clip = clip.resize.Lanczos(1920, 1080, filter_param_a=7, src_left=1/8)
clip = clip.std.Crop(left=1040 * clip.width/1920, right=380 * clip.width/1920, top=344, bottom=584)
# clip = get_y(alpha)
if w is None:
w = clip.width
h = clip.height
frames1 = np.zeros((len(frs), h, w), dtypes[0])
for ifr, fr in enumerate(frs):
with clip.get_frame(fr) as frame:
# frames1[ifr] += np.array(frame, np.uint8).reshape((h, w))
frames1[ifr] += np.asarray(frame[0])
pbar.update()
del frame
del clip
# del alpha
# ignore outermost pixels due to inconsistent dirtiness of edges
# margin_x = 32
margin_x = 0
# step_x = 4
step_x = 3
# step_x = 2
# step_x = 1
# empirically most lowpass kernels have a support < 16
# lower support makes calculations faster but it's probably not safe to go lower than 16
# support_x = 17
# support_x = 13
support_x = 13 if step_x == 3 else 17
assert (margin_x + support_x - 1) % step_x == w % step_x == 0
target_w = (w - (margin_x + support_x - 1) * 2) // step_x
def dumps(array):
return '{' + ','.join(dumps(elem) if isinstance(elem, np.ndarray) else str(float(elem)).replace('e', '*10^') for elem in array) + '}'
def dump(array):
print(dumps(array))
force_area = 1
allow_intercept = False
for offset_x in range(step_x // 2 + 1):
print("offset_x =", offset_x)
symmetric = offset_x == -offset_x % step_x
# This (nvars) was commented out; why?
nvars = support_x if symmetric else support_x*2-1
nvars += allow_intercept
nreflections = 1 if symmetric else 2
mask = np.ones(nvars, bool)
# we're assuming it's an interpolation filter, so some coefficients are inevitably zero
# mask[step_x::step_x] = False
eqns = np.zeros((nreflections, len(frs), h, target_w, nvars), dtypes[1])
for dx in tqdm(list(range(-support_x + 1, support_x))):
if symmetric:
if mask[abs(dx)]:
eqns[0,:,:,:,abs(dx)] += frames1[:,:,margin_x+offset_x+support_x-1+dx::step_x][:,:,:target_w]
else:
eqns[0,:,:,:,support_x-1+dx] += frames1[:,:,margin_x +offset_x+support_x-1+dx::step_x][:,:,:target_w]
eqns[1,:,:,:,support_x-1-dx] += frames1[:,:,margin_x+step_x-offset_x+support_x-1+dx::step_x][:,:,:target_w]
vals = frames2[:,:,margin_x+offset_x+support_x-1::step_x][:,:,:target_w]
if not symmetric:
vals = np.vstack((vals, frames2[:,:,margin_x+step_x-offset_x+support_x-1::step_x][:,:,:target_w]))
if force_area is not None:
if symmetric:
mask[0] = False
vals -= force_area * eqns[0,:,:,:,0]
eqns[0,:,:,:,1:] -= 2 * eqns[0,:,:,:,0,np.newaxis]
else:
mask[support_x-1] = False
vals[:len(vals)//2, ...] -= force_area * eqns[0,:,:,:,support_x-1]
vals[len(vals)//2:, ...] -= force_area * eqns[1,:,:,:,support_x-1]
eqns[0,:,:,:,:] -= eqns[0,:,:,:,support_x-1,np.newaxis]
eqns[1,:,:,:,:] -= eqns[1,:,:,:,support_x-1,np.newaxis]
eqns = eqns[:,:,:,:,mask]
eqns = eqns.reshape((nreflections * len(frs) * h * target_w, -1)).astype(np.float64) / len(b_eps)
vals = vals.reshape(-1).astype(np.float64) / len(a_eps)
row_mask = (vals >= 0/219) & (vals <= 1)
print(row_mask.sum(), 'out of', row_mask.size)
eqns = eqns[row_mask,:]
vals = vals[row_mask]
if allow_intercept:
eqns[:,-1] = 1
nonzero_kernel, residuals, rank, sings = np.linalg.lstsq(eqns, vals, rcond=None)
kernel = np.zeros(nvars)
kernel[mask] = nonzero_kernel
if allow_intercept:
intercept = kernel[-1]
kernel = kernel[:-1]
print(f"intercept: {intercept}")
if symmetric:
kernel = np.hstack((kernel[:0:-1], kernel))
if force_area is not None:
kernel[support_x-1] = force_area - kernel.sum()
# print(kernel)
# print(kernel.tolist())
with open(f'conv-{offset_x}.m', 'w') as file:
file.write(dumps(kernel))
# plt.stem(list(range(support)), kernel[0,:])
# plt.stem(list(range(support)), kernel[:,0])
# plt.ylim((-1, 1))
# plt.show()
print(f"kernel area: {kernel.sum() * 2 - kernel[0] if len(kernel) == support_x else kernel.sum()}")
print(f"MSE: {residuals[0] / vals.size}")
if offset_x:
continue
print('Now solving non-linear least squares to account for clipping...')
def compute_residuals(kernel):
rescaled = np.zeros((len(frs), h, target_w))
for offset_x in range(step_x):
for up in range(((-offset_x - support_steps_x) // step_x + 1) * step_x + offset_x, support_steps_x, step_x):
for down in range(((up - support_steps_x) // downscaled_step_x + 1) * downscaled_step_x - up, support_steps_x, downscaled_step_x):
rescaled[:, :, offset_x::step_x] += kernel[abs(up)] * kernel[abs(down)] * frames1[:, :, margin_x+offset_x+(up+down)//downscaled_step_x::step_x][:, :, :-((offset_x-target_w)//step_x)]
residuals = downscaled_step_x / step_x / len(b_eps) * rescaled.reshape(-1) - vals
# residual_pbar.update()
return residuals
def compute_jacobian(kernel):
jacobian = np.zeros((len(frs), h, target_w, len(kernel)))
for offset_x in range(step_x):
for up in range(((-offset_x - support_steps_x) // step_x + 1) * step_x + offset_x, support_steps_x, step_x):
for down in range(((up - support_steps_x) // downscaled_step_x + 1) * downscaled_step_x - up, support_steps_x, downscaled_step_x):
if abs(up) != abs(down):
jacobian[:, :, offset_x::step_x, abs(up)] += kernel[abs(down)] * frames1[:, :, margin_x+offset_x+(up+down)//downscaled_step_x::step_x][:, :, :-((offset_x-target_w)//step_x)]
jacobian[:, :, offset_x::step_x, abs(down)] += kernel[abs(up)] * frames1[:, :, margin_x+offset_x+(up+down)//downscaled_step_x::step_x][:, :, :-((offset_x-target_w)//step_x)]
else:
# Same but faster and more precise
jacobian[:, :, offset_x::step_x, abs(up)] += 2 * kernel[abs(up)] * frames1[:, :, margin_x+offset_x+(up+down)//downscaled_step_x::step_x][:, :, :-((offset_x-target_w)//step_x)]
jacobian = downscaled_step_x / step_x / len(b_eps) * jacobian.reshape((-1, len(kernel)))
# jacobian_tbar.update()
return jacobian
initial_guess = np.hstack((nonzero_kernel, [0, 1]))
lower_bounds = np.hstack((np.full_like(nonzero_kernel, -np.inf), [0, 0]))
upper_bounds = np.hstack((np.full_like(nonzero_kernel, np.inf), [1, 1]))
bounds = lower_bounds, upper_bounds
result = scipy.optimize.least_squares(compute_residuals, initial_guess, compute_jacobian, bounds, verbose=2, ftol=1e-10)
# Find downscale kernel assuming a known upscale kernel
# downscaled_step_x = 3
# taps_up = 7
# down_support_steps_x = (support_x - 1) * downscaled_step_x + 1
# up_margin_x = -(-taps_up * step_x // down_support_steps_x)
# margin_x += up_margin_x
# target_w -= up_margin_x * 2
# nvars = down_support_steps_x
# kernel = np.zeros(nvars)
# eqns = np.zeros((len(frs), h, target_w, nvars))
# for offset_x in range(step_x):
# for up in range(((-offset_x - taps_up * step_x) // step_x + 1) * step_x + offset_x, taps_up * step_x, step_x):
# if abs(up) < step_x * taps_up and (up == 0 or up % step_x):
# coeff_up = np.sinc(up / step_x) * np.sinc(up / (step_x * taps_up))
# for down in range(((up - down_support_steps_x) // downscaled_step_x + 1) * downscaled_step_x - up, down_support_steps_x, downscaled_step_x):
# eqns[:,:,offset_x::step_x,abs(down)] += coeff_up * frames1[:, :, margin_x+offset_x+support_x-1+(up+down)//downscaled_step_x::step_x][:, :, :-((offset_x-target_w)//step_x)]
# vals = frames2[:,:,margin_x+support_x-1::][:,:,:target_w]
# eqns = eqns.reshape((len(frs) * h * target_w, -1))
# eqns /= len(b_eps)
# vals = vals.reshape(-1).astype(np.float64) / len(a_eps)
# kernel, residuals, rank, sings = np.linalg.lstsq(eqns, vals, rcond=None)
# # print(kernel)
# # print(kernel.tolist())
# # with open(f'conv-{offset_x}.m', 'w') as file:
# # file.write(dumps(kernel))
# # plt.stem(list(range(support)), kernel[0,:])
# # plt.stem(list(range(support)), kernel[:,0])
# # plt.ylim((-1, 1))
# # plt.show()
# dump(kernel)
# print(f"kernel area: {(kernel.sum() * 2 - kernel[0]) / step_x}")
# print(f"MSE: {residuals[0] / vals.size}")
# downscaled_step_x = 3
# support_steps_x = 48
# support_x = -(-support_steps_x // step_x)
# margin_x += support_x * 2
# target_w = w - margin_x * 2
# vals = frames2[:, :, margin_x:margin_x+target_w].reshape(-1).astype(np.float64) / len(a_eps)
# coeff_used = [False] * support_steps_x
# for offset_x in range(step_x):
# for up in range(-support_steps_x + (offset_x or step_x), support_steps_x, step_x):
# coeff_used[abs(up)] = True
# for down in range(((up - support_steps_x) // downscaled_step_x + 1) * downscaled_step_x - up, support_steps_x, downscaled_step_x):
# coeff_used[abs(down)] = True
# assert all(coeff_used), coeff_used
# # with tqdm() as residual_pbar, tqdm() as jacobian_tbar:
# if True:
# def compute_residuals(kernel):
# rescaled = np.zeros((len(frs), h, target_w))
# for offset_x in range(step_x):
# for up in range(((-offset_x - support_steps_x) // step_x + 1) * step_x + offset_x, support_steps_x, step_x):
# for down in range(((up - support_steps_x) // downscaled_step_x + 1) * downscaled_step_x - up, support_steps_x, downscaled_step_x):
# rescaled[:, :, offset_x::step_x] += kernel[abs(up)] * kernel[abs(down)] * frames1[:, :, margin_x+offset_x+(up+down)//downscaled_step_x::step_x][:, :, :-((offset_x-target_w)//step_x)]
# residuals = downscaled_step_x / step_x / len(b_eps) * rescaled.reshape(-1) - vals
# # residual_pbar.update()
# return residuals
# def compute_jacobian(kernel):
# jacobian = np.zeros((len(frs), h, target_w, len(kernel)))
# for offset_x in range(step_x):
# for up in range(((-offset_x - support_steps_x) // step_x + 1) * step_x + offset_x, support_steps_x, step_x):
# for down in range(((up - support_steps_x) // downscaled_step_x + 1) * downscaled_step_x - up, support_steps_x, downscaled_step_x):
# if abs(up) != abs(down):
# jacobian[:, :, offset_x::step_x, abs(up)] += kernel[abs(down)] * frames1[:, :, margin_x+offset_x+(up+down)//downscaled_step_x::step_x][:, :, :-((offset_x-target_w)//step_x)]
# jacobian[:, :, offset_x::step_x, abs(down)] += kernel[abs(up)] * frames1[:, :, margin_x+offset_x+(up+down)//downscaled_step_x::step_x][:, :, :-((offset_x-target_w)//step_x)]
# else:
# # Same but faster and more precise
# jacobian[:, :, offset_x::step_x, abs(up)] += 2 * kernel[abs(up)] * frames1[:, :, margin_x+offset_x+(up+down)//downscaled_step_x::step_x][:, :, :-((offset_x-target_w)//step_x)]
# jacobian = downscaled_step_x / step_x / len(b_eps) * jacobian.reshape((-1, len(kernel)))
# # jacobian_tbar.update()
# return jacobian
# initial_guess = [1] + [math.sin(math.pi * i/step_x) * math.sin(math.pi * i/step_x / 6) / ((math.pi * i/step_x) ** 2 / 6) if i < 6*step_x else 0 for i in range(1, support_steps_x)]
# result = scipy.optimize.least_squares(compute_residuals, initial_guess, compute_jacobian, verbose=2, ftol=1e-10)
# dump(result.x)
# print(result.cost / vals.size)
# print(result.success)
from vstools import vs, core, get_y, get_u, get_v, depth, set_output
import math
import numpy as np
from matplotlib import pyplot as plt
from tqdm import tqdm
np.set_printoptions(threshold=np.inf)
def disc(n):
return depth(core.dgdecodenv.DGSource(f'D:/Downloads/YESPRECURE5/YESPRECURE5_DISC{n}.dgi'), 32)
def init_clips():
eps = {
# 1: disc(1)[30:43488],
# 2: disc(1)[43518:86978],
# 3: disc(1)[87008:130468],
# 7: disc(2)[30:43488],
# 9: disc(2)[87006:130464],
# 11: disc(2)[173982:217442],
# 13: disc(3)[30:43490],
16: disc(3)[130496:173956],
17: disc(3)[173986:217444],
19: disc(4)[30:43488],
20: disc(4)[43518:86976],
21: disc(4)[87006:130464],
22: disc(4)[130494:173952],
23: disc(4)[173982:217440],
24: disc(4)[217470:260930],
25: disc(5)[30:43488],
# 26: disc(5)[43518:86978].std.Expr([f'x {219 / 216} *']),
27: disc(5)[87008:130468],
28: disc(5)[130498:173956].std.Expr([f'x {1 / 219} + {219 / 220} *', f'x {1/14784} - {14784/14896} *', f'x {112 / 118} *']),
}
clip1 = [
# eps[ 1][3806:6354],
# eps[ 2][2698:5246],
# eps[ 3][1770:4318],
# eps[ 7][3776:6324],
# eps[ 9][4076:6624],
# eps[11][2698:5246],
# eps[13][2788:5336],
eps[16][2428:4976],
eps[17][1828:4376],
eps[21][2188:4736],
eps[24][720:3268],
eps[25][2308:4856],
]
# clip1_all = core.std.AverageFrames(clip1, [1] * len(clip1))
clip1 = [
eps[19][22634:22706],
eps[20][29207:29279],
eps[21][30124:30196],
eps[22][26400:26472],
eps[23][26019:26091],
eps[25][24970:25042],
eps[27][29855:29927],
]
# clip1_all += core.std.AverageFrames(clip1, [1] * len(clip1))
# clip1 = [
# eps[19][39860:39976],
# eps[20][39860:39976],
# eps[21][39860:39976],
# eps[22][39860:39976],
# eps[23][39860:39976],
# eps[25][39860:39976],
# ]
clip1 = core.std.AverageFrames(clip1, [1] * len(clip1))
# clip1 = clip1_all + clip1
# clip2 = eps[26][2488:5036]
# clip2 = eps[28][1828:4376]
clip2 = eps[28][28909:28981]
# clip2 = eps[25][2308:4856]
# clip2 = eps[27][2848:5396]
# clip2 += eps[28][39860:39976]
del eps
clip1 = clip1.std.Crop(left=32, right=32)
clip2 = clip2.std.Crop(left=32, right=32)
clip1_all = clip1[::1]
clip2_all = clip2[::1]
return clip1_all, clip2_all
if False:
clip1_all, clip2_all = init_clips()
clip1_y = get_y(clip1_all)
clip1_u = get_u(clip1_all)
clip1_v = get_v(clip1_all)
clip2 = get_u(clip2_all)
zero = 128
value_range = 224
w = clip2.width
h = clip2.height
nfr = clip2.num_frames
clip1_y = clip1_y.resize.Bilinear(w, h, src_left=-0.5)
eqns = np.ones((nfr, h * w, 4))
vals = np.zeros((nfr, h * w))
for fr, (frame1_y, frame1_u, frame1_v, frame2) in tqdm(enumerate(zip(clip1_y.frames(), clip1_u.frames(), clip1_v.frames(), clip2.frames())), total=nfr):
frame1_y = np.array(frame1_y, dtype=np.float32).reshape(h * w) * value_range + zero
frame1_u = np.array(frame1_u, dtype=np.float32).reshape(h * w) * value_range + zero
frame1_v = np.array(frame1_v, dtype=np.float32).reshape(h * w) * value_range + zero
frame2 = np.array(frame2, dtype=np.float32).reshape(h * w) * value_range + zero
eqns[fr, :, 0] = frame1_y
eqns[fr, :, 1] = frame1_u
eqns[fr, :, 2] = frame1_v
vals[fr, :] = frame2
eqns = eqns.reshape((nfr * h * w, 4))
vals = vals.reshape((nfr * h * w,))
kernel, residuals, rank, sings = np.linalg.lstsq(eqns, vals, rcond=None)
print(kernel.tolist())
mse = residuals[0] / len(vals)
print(f"MSE: {mse}")
print(f"sqrt(MSE): {math.sqrt(mse)}")
raise SystemExit
for plane in (1,): #range(3):
clip1_all, clip2_all = init_clips()
match plane:
case 0:
clip1 = get_y(clip1_all)
clip2 = get_y(clip2_all)
case 1:
clip1 = get_u(clip1_all)
clip2 = get_u(clip2_all)
case 2:
clip1 = get_v(clip1_all)
clip2 = get_v(clip2_all)
del clip1_all
del clip2_all
# clip1 = core.std.Expr([clip1], 'x 0 < x 0 ?')
# clip2 = core.std.Expr([clip1, clip2], 'x 0 < y 0 ?')
if plane:
zero = 128
value_range = 224
else:
zero = 16
value_range = 219
#set_output([clip1, clip2])
w = clip1.width
h = clip1.height
nfr = clip1.num_frames
eqns = np.ones((nfr, h * w, 2))
vals = np.zeros((nfr, h * w))
for fr, (frame1, frame2) in tqdm(enumerate(zip(clip1.frames(), clip2.frames())), total=nfr):
frame1 = np.array(frame1, dtype=np.float32).reshape(h * w) * value_range + zero
frame2 = np.array(frame2, dtype=np.float32).reshape(h * w) * value_range + zero
eqns[fr, :, 0] = frame1
vals[fr, :] = frame2
del clip1
del clip2
eqns = eqns.reshape((nfr * h * w, 2))
vals = vals.reshape((nfr * h * w,))
# plt.scatter(eqns[:, 0], vals)
# plt.show()
def solve():
kernel, residuals, rank, sings = np.linalg.lstsq(eqns, vals, rcond=None)
# print(kernel)
print(kernel.tolist())
if len(kernel) > 1:
print(f"= {kernel[0]}(x-{zero})+{zero}{kernel[1]-zero*(1-kernel[0]):+}")
offset = kernel[1] / (1 - kernel[0])
print(f"= {kernel[0]}(x{-offset:+}){offset:+}")
print(f"= {kernel[0]}(x{kernel[1]/kernel[0]:+})")
# plt.stem(list(range(support)), kernel[0,:])
# plt.stem(list(range(support)), kernel[:,0])
# plt.ylim((-1, 1))
# plt.show()
mse = residuals[0] / len(vals)
print(f"MSE: {mse}")
print(f"sqrt(MSE): {math.sqrt(mse)}")
print()
print("ax+b:")
solve()
print()
# print("ax:")
eqns = eqns[:, :1]
# solve()
# print()
print(f"a(x-{zero})+{zero}:")
eqns -= zero
vals -= zero
solve()
print()
print("x:")
mse = np.sum((eqns.ravel() - vals) ** 2) / len(vals)
print(f"MSE: {mse}")
print(f"sqrt(MSE): {math.sqrt(mse)}")
print()
# outdated TODO:
# * Drop pixels where either coef or val is too close to range extremes, in order to avoid issues caused by clipping.
# * Compare sections using the Chow test.
# * Drop frames whose field matches aren't 'c'.
# * Estimate each frame's error variance and divide the frame's equations by that.
# * Box-downscale all clips to a single sample per macroblock.
# * Test the normality of the residual distribution.
# * Test whether the mean of the residual distribution is zero. (This is guaranteed with an intercept but not without it.)
# * Test whether y=x is plausible.
import vskernels as kernels
from vstools import core, depth, get_y, get_u, get_v
from fractions import Fraction
import math
import numpy as np
import scipy.stats as sps
from tqdm import tqdm
def disc(n):
return core.dgdecodenv.DGSource(f'D:/Downloads/YESPRECURE5/YESPRECURE5_DISC{n}.dgi')
episodes = {
1: lambda: disc(1)[30:43488],
2: lambda: disc(1)[43518:86978],
3: lambda: disc(1)[87008:130468],
4: lambda: disc(1)[130498:173956],
5: lambda: disc(1)[173986:217444],
6: lambda: disc(1)[217474:260932],
7: lambda: disc(2)[30:43488],
8: lambda: disc(2)[43518:86976],
9: lambda: disc(2)[87006:130464],
10: lambda: disc(2)[130494:173952],
11: lambda: disc(2)[173982:217442],
12: lambda: disc(2)[217472:260930],
13: lambda: disc(3)[30:43490],
14: lambda: disc(3)[43520:86978],
15: lambda: disc(3)[87008:130466],
16: lambda: disc(3)[130496:173956],
17: lambda: disc(3)[173986:217444],
18: lambda: disc(3)[217474:260932],
19: lambda: disc(4)[30:43488],
20: lambda: disc(4)[43518:86976],
21: lambda: disc(4)[87006:130464],
22: lambda: disc(4)[130494:173952],
23: lambda: disc(4)[173982:217440],
24: lambda: disc(4)[217470:260930],
# 25: lambda: depth(depth(disc(5)[30:43488], 32).std.Expr([f'x {0.019267391547870598/224} + 0.9999244799156317 /']), 8),
25: lambda: disc(5)[30:43488],
26: lambda: disc(5)[43518:86978], #.std.Expr([f'x {219 / 216} *']),
27: lambda: disc(5)[87008:130468],
# 28: lambda: depth(depth(disc(5)[130498:173956], 32).std.Expr([f'x {1 / 219} + {219 / 220} *', '', f'x {112 / 118} *']), 8),
# 28: lambda: disc(5)[130498:173956].std.Expr([f'x 15 - {219 / 220} * 16 +', '', f'x 128 - {112 / 118} * 128 +']),
28: lambda: disc(5)[130498:173956],
29: lambda: disc(5)[173986:217444],
30: lambda: disc(5)[217474:260932],
31: lambda: disc(6)[30:43488],
32: lambda: disc(6)[43518:86976],
33: lambda: disc(6)[87006:130466],
34: lambda: disc(6)[130496:173954],
35: lambda: disc(6)[173984:217442],
36: lambda: disc(6)[217472:260930],
37: lambda: disc(7)[30:43490],
38: lambda: disc(7)[43520:86978],
39: lambda: disc(7)[87008:130468],
40: lambda: disc(7)[130498:173956],
41: lambda: disc(7)[173986:217444],
42: lambda: disc(7)[217474:260932],
43: lambda: disc(8)[30:43488],
44: lambda: disc(8)[43518:86978],
45: lambda: disc(8)[87008:130468],
46: lambda: disc(8)[130498:173958],
47: lambda: disc(8)[173988:217446],
48: lambda: disc(8)[217476:260936],
49: lambda: disc(8)[260966:304426],
}
bad_epnos = frozenset({26, 28})
# Low-passed backgrounds, less low-passed text, low-contrast borders
op_intro1a_2a = 97, {
1: 3806,
2: 2698,
3: 1770,
4: 1290,
5: 1170,
6: 3418,
7: 3776,
8: 1560,
9: 4076,
10: 1440,
11: 2698,
12: 1530,
13: 2788,
}
# Full-band backgrounds and text, low-contrast borders
op_intro1b = 1, {
14: 3388,
15: 2968,
16: 2428,
17: 1828,
18: 3208,
19: 2638,
21: 2188,
22: 1680,
23: 4886,
24: 720,
25: 2308,
26: 2488,
27: 2848,
28: 1828,
29: 3358,
30: 1200,
31: 1918,
32: 1620,
33: 2518,
34: 3598,
48: 840,
49: 510,
}
# Low-passed backgrounds and text, high-contrast borders
op_intro1c_2c = 97, {
35: 3568,
36: 1740,
37: 2698,
38: 2248,
39: 2848,
40: 2218,
41: 1470,
42: 4676,
}
# Low-passed backgrounds, less low-passed text, high-contrast borders
op_intro1d_2d = 97, {
43: 1380,
44: 1410,
45: 2608,
46: 2368,
47: 4076,
}
# Full-band backgrounds and text
op_intro2b = 96, {
14: 3389,
15: 2969,
16: 2429,
17: 1829,
18: 3209,
19: 2639,
20: 2878,
21: 2189,
22: 1681,
23: 4887,
24: 721,
25: 2309,
26: 2489,
27: 2849,
28: 1829,
29: 3359,
30: 1201,
31: 1 + 1918,
32: 1 + 1620,
33: 1 + 2518,
34: 1 + 3598,
48: 1 + 840,
49: 1 + 510,
}
# Low-passed backgrounds, less low-passed text, low-contrast borders
op_butterfly_a = 240, {
1: 3903,
2: 2795,
3: 1867,
4: 1387,
5: 1267,
6: 3515,
7: 3873,
8: 1657,
9: 4173,
10: 1537,
11: 2795,
12: 1627,
13: 2885,
}
# Full-band backgrounds and text, low-contrast borders
op_butterfly_b = 240, {
14: 3485,
15: 3065,
16: 2525,
17: 1925,
18: 3305,
19: 2735,
20: 2974,
21: 2285,
22: 1777,
23: 4983,
24: 817,
25: 2405,
26: 2585,
27: 2945,
28: 1925,
29: 3455,
30: 1297,
31: 2015,
32: 1717,
33: 2615,
34: 3695,
48: 937,
49: 607,
}
# Low-passed backgrounds and text, high-contrast borders
op_butterfly_c = 240, {
35: 3665,
36: 1837,
37: 2795,
38: 2345,
39: 2945,
40: 2315,
41: 1567,
42: 4773,
}
# Low-passed backgrounds, less low-passed text, high-contrast borders
# including 4+5+6 textless frames shared with op_butterfly_c
op_butterfly_d = 240, {
43: 1477,
44: 1507,
45: 2705,
46: 2465,
47: 4173,
}
# OP1 (no Milk), low-passed backgrounds, less low-passed text, low-contrast borders
op_fairies_a = 72, {
1: 240 + 3903,
2: 240 + 2795,
3: 240 + 1867,
4: 240 + 1387,
5: 240 + 1267,
6: 240 + 3515,
7: 240 + 3873,
8: 240 + 1657,
9: 240 + 4173,
10: 240 + 1537,
11: 240 + 2795,
12: 240 + 1627,
13: 240 + 2885,
}
# OP1 (no Milk), full-band backgrounds and text, low-contrast borders
op_fairies_b = 72, {
14: 240 + 3485,
15: 240 + 3065,
16: 240 + 2525,
17: 240 + 1925,
18: 240 + 3305,
19: 240 + 2735,
20: 240 + 2974,
21: 240 + 2285,
22: 240 + 1777,
23: 240 + 4983,
24: 240 + 817,
25: 240 + 2405,
26: 240 + 2585,
27: 240 + 2945,
28: 240 + 1925,
29: 240 + 3455,
30: 240 + 1297,
31: 240 + 2015,
32: 240 + 1717,
33: 240 + 2615,
34: 240 + 3695,
}
# OP2 (Milk present), low-passed backgrounds and text, high-contrast borders
op_fairies_c = 72, {
35: 240 + 3665,
36: 240 + 1837,
37: 240 + 2795,
38: 240 + 2345,
39: 240 + 2945,
40: 240 + 2315,
41: 240 + 1567,
42: 240 + 4773,
}
# OP2 (Milk present), low-passed backgrounds, less low-passed text, high-contrast borders
op_fairies_d = 72, {
43: 240 + 1477,
44: 240 + 1507,
45: 240 + 2705,
46: 240 + 2465,
47: 240 + 4173,
}
# OP2 (Milk present), full-band backgrounds and text, low-contrast borders
op_fairies_e = 72, {
48: 240 + 937,
49: 240 + 607,
}
# Low-passed, low-contrast borders
op_logo_a = 176, {
1: 312 + 3903,
2: 312 + 2795,
3: 312 + 1867,
4: 312 + 1387,
5: 312 + 1267,
6: 312 + 3515,
7: 312 + 3873,
8: 312 + 1657,
9: 312 + 4173,
10: 312 + 1537,
11: 312 + 2795,
12: 312 + 1627,
13: 312 + 2885,
}
# Full-band, low-contrast borders
op_logo_b = 176, {
14: 312 + 3485,
15: 312 + 3065,
16: 312 + 2525,
17: 312 + 1925,
18: 312 + 3305,
19: 312 + 2735,
20: 312 + 2974,
21: 312 + 2285,
22: 312 + 1777,
23: 312 + 4983,
24: 312 + 817,
25: 312 + 2405,
26: 312 + 2585,
27: 312 + 2945,
28: 312 + 1925,
29: 312 + 3455,
30: 312 + 1297,
31: 312 + 2015,
32: 312 + 1717,
33: 312 + 2615,
34: 312 + 3695,
48: 312 + 937,
49: 312 + 607,
}
# Low-passed, high-contrast borders
op_logo_c = 176, {
35: 312 + 3665,
36: 312 + 1837,
37: 312 + 2795,
38: 312 + 2345,
39: 312 + 2945,
40: 312 + 2315,
41: 312 + 1567,
42: 312 + 4773,
43: 312 + 1477,
44: 312 + 1507,
45: 312 + 2705,
46: 312 + 2465,
47: 312 + 4173,
}
# Low-passed backgrounds, less low-passed text, low-contrast borders, Oono Itsuo credited, old ABC logo
op_planning_a = 119, {
1: 488 + 3903,
2: 488 + 2795,
3: 488 + 1867,
4: 488 + 1387,
5: 488 + 1267,
6: 488 + 3515,
7: 488 + 3873,
8: 488 + 1657,
9: 488 + 4173,
10: 488 + 1537,
11: 488 + 2795,
12: 488 + 1627,
13: 488 + 2885,
}
# Full-band backgrounds and text, low-contrast borders, Nishide Masayuki credited, old ABC logo
op_planning_b = 119, {
14: 488 + 3485,
15: 488 + 3065,
16: 488 + 2525,
17: 488 + 1925,
18: 488 + 3305,
19: 488 + 2735,
20: 488 + 2974,
21: 488 + 2285,
22: 488 + 1777,
23: 488 + 4983,
24: 488 + 817,
25: 488 + 2405,
26: 488 + 2585,
27: 488 + 2945,
28: 488 + 1925,
29: 488 + 3455,
30: 488 + 1297,
31: 488 + 2015,
32: 488 + 1717,
33: 488 + 2615,
34: 488 + 3695,
}
# Low-passed backgrounds and text, high-contrast borders, Nishide Masayuki credited, old ABC logo
op_planning_c = 119, {
35: 488 + 3665,
36: 488 + 1837,
37: 488 + 2795,
38: 488 + 2345,
39: 488 + 2945,
40: 488 + 2315,
41: 488 + 1567,
42: 488 + 4773,
}
# Low-passed backgrounds, less low-passed text, high-contrast borders, Nishide Masayuki credited, old ABC logo
op_planning_d = 119, {
43: 488 + 1477,
44: 488 + 1507,
45: 488 + 2705,
46: 488 + 2465,
}
# Low-passed backgrounds, less low-passed text, high-contrast borders, Nishide Masayuki credited, new ABC logo
op_planning_e = 119, {
47: 488 + 4173,
}
# Full-band backgrounds and text, low-contrast borders, Nishide Masayuki credited, new ABC logo
op_planning_f = 119, {
48: 488 + 937,
49: 488 + 607,
}
# Low-passed, low-contrast borders
op_ball_a = 50, {
1: 607 + 3903,
2: 607 + 2795,
3: 607 + 1867,
4: 607 + 1387,
5: 607 + 1267,
6: 607 + 3515,
7: 607 + 3873,
8: 607 + 1657,
9: 607 + 4173,
10: 607 + 1537,
11: 607 + 2795,
12: 607 + 1627,
13: 607 + 2885,
}
# Full-band, low-contrast borders
op_ball_b = 50, {
14: 607 + 3485,
15: 607 + 3065,
16: 607 + 2525,
17: 607 + 1925,
18: 607 + 3305,
19: 607 + 2735,
20: 607 + 2974,
21: 607 + 2285,
22: 607 + 1777,
23: 607 + 4983,
24: 607 + 817,
25: 607 + 2405,
26: 607 + 2585,
27: 607 + 2945,
28: 607 + 1925,
29: 607 + 3455,
30: 607 + 1297,
31: 607 + 2015,
32: 607 + 1717,
33: 607 + 2615,
34: 607 + 3695,
48: 607 + 937,
49: 607 + 607,
}
# Low-passed, high-contrast borders
op_ball_c = 50, {
35: 607 + 3665,
36: 607 + 1837,
37: 607 + 2795,
38: 607 + 2345,
39: 607 + 2945,
40: 607 + 2315,
41: 607 + 1567,
42: 607 + 4773,
43: 607 + 1477,
44: 607 + 1507,
45: 607 + 2705,
46: 607 + 2465,
47: 607 + 4173,
}
# Low-passed backgrounds, less low-passed text, low-contrast borders, old ABC logo
op_producers_a = 82 + 65, {
1: 657 + 3903,
2: 657 + 2795,
3: 657 + 1867,
4: 657 + 1387,
5: 657 + 1267,
6: 657 + 3515,
7: 657 + 3873,
8: 657 + 1657,
9: 657 + 4173,
10: 657 + 1537,
11: 657 + 2795,
12: 657 + 1627,
13: 657 + 2885,
}
# Full-band backgrounds and text, low-contrast borders, old ABC logo
op_producers_b = 82 + 65, {
14: 657 + 3485,
15: 657 + 3065,
16: 657 + 2525,
17: 657 + 1925,
18: 657 + 3305,
19: 657 + 2735,
20: 657 + 2974,
21: 657 + 2285,
22: 657 + 1777,
23: 657 + 4983,
24: 657 + 817,
25: 657 + 2405,
26: 657 + 2585,
27: 657 + 2945,
28: 657 + 1925,
29: 657 + 3455,
30: 657 + 1297,
31: 657 + 2015,
32: 657 + 1717,
33: 657 + 2615,
34: 657 + 3695,
}
# Low-passed backgrounds and text, high-contrast borders, old ABC logo
op_producers_c = 82 + 65, {
35: 657 + 3665,
36: 657 + 1837,
37: 657 + 2795,
38: 657 + 2345,
39: 657 + 2945,
40: 657 + 2315,
41: 657 + 1567,
42: 657 + 4773,
}
# Low-passed backgrounds, less low-passed text, high-contrast borders, old ABC logo
op_producers_d = 82 + 65, {
43: 657 + 1477,
44: 657 + 1507,
45: 657 + 2705,
46: 657 + 2465,
}
# Low-passed backgrounds, less low-passed text, high-contrast borders, new ABC logo
op_producers_e = 82 + 65, {
47: 657 + 4173,
}
# Full-band backgrounds and text, low-contrast borders, new ABC logo
op_producers_f = 82 + 65, {
48: 657 + 937,
49: 657 + 607,
}
# Low-passed backgrounds, less low-passed text, low-contrast borders
op_school_a = 463, {
1: 804 + 3903,
2: 804 + 2795,
3: 804 + 1867,
4: 804 + 1387,
5: 804 + 1267,
6: 804 + 3515,
7: 804 + 3873,
8: 804 + 1657,
9: 804 + 4173,
10: 804 + 1537,
11: 804 + 2795,
12: 804 + 1627,
13: 804 + 2885,
}
# Full-band backgrounds and text, low-contrast borders
op_school_b = 463, {
14: 804 + 3485,
15: 804 + 3065,
16: 804 + 2525,
17: 804 + 1925,
18: 804 + 3305,
19: 804 + 2735,
20: 804 + 2974,
21: 804 + 2285,
22: 804 + 1777,
23: 804 + 4983,
24: 804 + 817,
25: 804 + 2405,
26: 804 + 2585,
27: 804 + 2945,
28: 804 + 1925,
29: 804 + 3455,
30: 804 + 1297,
31: 804 + 2015,
32: 804 + 1717,
33: 804 + 2615,
34: 804 + 3695,
48: 804 + 937,
49: 804 + 607,
}
# Low-passed backgrounds and text, high-contrast borders
op_school_c = 463, {
35: 804 + 3665,
36: 804 + 1837,
37: 804 + 2795,
38: 804 + 2345,
39: 804 + 2945,
40: 804 + 2315,
41: 804 + 1567,
42: 804 + 4773,
}
# Low-passed backgrounds, less low-passed text, high-contrast borders
# including 10+30+5 textless frames shared with op_school_c
op_school_d = 463, {
43: 804 + 1477,
44: 804 + 1507,
45: 804 + 2705,
46: 804 + 2465,
47: 804 + 4173,
}
# ED1, low-passed backgrounds, less low-passed text, low-contrast borders
op_songs_a = 144, {
1: 1267 + 3903,
2: 1267 + 2795,
3: 1267 + 1867,
4: 1267 + 1387,
5: 1267 + 1267,
6: 1267 + 3515,
7: 1267 + 3873,
8: 1267 + 1657,
9: 1267 + 4173,
10: 1267 + 1537,
11: 1267 + 2795,
12: 1267 + 1627,
13: 1267 + 2885,
}
# ED1, full-band backgrounds and text
op_songs_b = 144, {
14: 1267 + 3485,
15: 1267 + 3065,
16: 1267 + 2525,
17: 1267 + 1925,
18: 1267 + 3305,
19: 1267 + 2735,
20: 1267 + 2974,
21: 1267 + 2285,
22: 1267 + 1777,
23: 1267 + 4983,
24: 1267 + 817,
25: 1267 + 2405,
26: 1267 + 2585,
27: 1267 + 2945,
28: 1267 + 1925,
29: 1267 + 3455,
30: 1267 + 1297,
31: 1267 + 2015,
32: 1267 + 1717,
}
# ED2, full-band backgrounds and text, low-contrast borders
op_songs_c = 144, {
33: 1267 + 2615,
34: 1267 + 3695,
48: 1267 + 937,
49: 1267 + 607,
}
# ED2, low-passed backgrounds and text, high-contrast borders
op_songs_d = 144, {
35: 1267 + 3665,
36: 1267 + 1837,
37: 1267 + 2795,
38: 1267 + 2345,
39: 1267 + 2945,
40: 1267 + 2315,
41: 1267 + 1567,
42: 1267 + 4773,
}
# ED2, low-passed backgrounds, less low-passed text, high-contrast borders
op_songs_e = 144, {
43: 1267 + 1477,
44: 1267 + 1507,
45: 1267 + 2705,
46: 1267 + 2465,
47: 1267 + 4173,
}
# Low-passed backgrounds, less low-passed text, low-contrast borders
op_flying_a = 79, {
1: 1411 + 3903,
2: 1411 + 2795,
3: 1411 + 1867,
4: 1411 + 1387,
5: 1411 + 1267,
6: 1411 + 3515,
7: 1411 + 3873,
8: 1411 + 1657,
9: 1411 + 4173,
10: 1411 + 1537,
11: 1411 + 2795,
12: 1411 + 1627,
13: 1411 + 2885,
}
# Full-band backgrounds and text, low-contrast borders
op_flying_b = 79, {
14: 1411 + 3485,
15: 1411 + 3065,
16: 1411 + 2525,
17: 1411 + 1925,
18: 1411 + 3305,
19: 1411 + 2735,
20: 1411 + 2974,
21: 1411 + 2285,
22: 1411 + 1777,
23: 1411 + 4983,
24: 1411 + 817,
25: 1411 + 2405,
26: 1411 + 2585,
27: 1411 + 2945,
28: 1411 + 1925,
29: 1411 + 3455,
30: 1411 + 1297,
31: 1411 + 2015,
32: 1411 + 1717,
33: 1411 + 2615,
34: 1411 + 3695,
48: 1411 + 937,
49: 1411 + 607,
}
# Low-passed backgrounds and text, high-contrast borders
op_flying_c = 79, {
35: 1411 + 3665,
36: 1411 + 1837,
37: 1411 + 2795,
38: 1411 + 2345,
39: 1411 + 2945,
40: 1411 + 2315,
41: 1411 + 1567,
42: 1411 + 4773,
}
# Low-passed backgrounds, less low-passed text, high-contrast borders
op_flying_d = 79, {
43: 1411 + 1477,
44: 1411 + 1507,
45: 1411 + 2705,
46: 1411 + 2465,
47: 1411 + 4173,
}
# OP1 (attacks, no Milk, character designer credit in the center), low-passed backgrounds, less low-passed text, low-contrast borders
op_attacks_a = 565 + 32, {
1: 1490 + 3903,
2: 1490 + 2795,
3: 1490 + 1867,
4: 1490 + 1387,
5: 1490 + 1267,
6: 1490 + 3515,
7: 1490 + 3873,
8: 1490 + 1657,
9: 1490 + 4173,
10: 1490 + 1537,
11: 1490 + 2795,
12: 1490 + 1627,
13: 1490 + 2885,
}
# OP1 (attacks, no Milk, character designer credit in the center), full-band backgrounds and text, low-contrast borders
op_attacks_b = 565 + 32, {
14: 1490 + 3485,
15: 1490 + 3065,
16: 1490 + 2525,
17: 1490 + 1925,
18: 1490 + 3305,
19: 1490 + 2735,
20: 1490 + 2974,
21: 1490 + 2285,
22: 1490 + 1777,
23: 1490 + 4983,
24: 1490 + 817,
25: 1490 + 2405,
26: 1490 + 2585,
27: 1490 + 2945,
28: 1490 + 1925,
29: 1490 + 3455,
30: 1490 + 1297,
31: 1490 + 2015,
32: 1490 + 1717,
33: 1490 + 2615,
34: 1490 + 3695,
}
# OP2 (attacks, Milk present, character designer credit on the left), low-passed backgrounds and text, high-contrast borders
op_attacks_c = 565 + 32, {
35: 1490 + 3665,
36: 1490 + 1837,
37: 1490 + 2795,
38: 1490 + 2345,
39: 1490 + 2945,
40: 1490 + 2315,
41: 1490 + 1567,
42: 1490 + 4773,
}
# OP2 (attacks, Milk present, character designer credit on the left), low-passed backgrounds, less low-passed text, high-contrast borders
op_attacks_d = 565 + 32, {
43: 1490 + 1477,
44: 1490 + 1507,
45: 1490 + 2705,
46: 1490 + 2465,
47: 1490 + 4173,
}
# OP2 (attacks, Milk present, character designer credit on the left), full-band backgrounds and text, low-contrast borders
op_attacks_e = 565 + 32, {
48: 1490 + 937,
49: 1490 + 607,
}
# Low-passed backgrounds, less low-passed text, low-contrast borders
op_books_a = 128, {
1: 2087 + 3903,
2: 2087 + 2795,
3: 2087 + 1867,
4: 2087 + 1387,
5: 2087 + 1267,
6: 2087 + 3515,
7: 2087 + 3873,
8: 2087 + 1657,
9: 2087 + 4173,
10: 2087 + 1537,
11: 2087 + 2795,
12: 2087 + 1627,
13: 2087 + 2885,
}
# Full-band backgrounds and text, low-contrast borders
op_books_b = 128, {
14: 2087 + 3485,
15: 2087 + 3065,
16: 2087 + 2525,
17: 2087 + 1925,
18: 2087 + 3305,
19: 2087 + 2735,
20: 2087 + 2974,
21: 2087 + 2285,
22: 2087 + 1777,
23: 2087 + 4983,
24: 2087 + 817,
25: 2087 + 2405,
26: 2087 + 2585,
27: 2087 + 2945,
28: 2087 + 1925,
29: 2087 + 3455,
30: 2087 + 1297,
31: 2087 + 2015,
32: 2087 + 1717,
33: 2087 + 2615,
34: 2087 + 3695,
48: 2087 + 937,
49: 2087 + 607,
}
# Low-passed backgrounds and text, high-contrast borders
op_books_c = 128, {
35: 2087 + 3665,
36: 2087 + 1837,
37: 2087 + 2795,
38: 2087 + 2345,
39: 2087 + 2945,
40: 2087 + 2315,
41: 2087 + 1567,
42: 2087 + 4773,
}
# Low-passed backgrounds, less low-passed text, high-contrast borders
# including 4 textless frames shared with op_books_c
op_books_d = 128, {
43: 2087 + 1477,
44: 2087 + 1507,
45: 2087 + 2705,
46: 2087 + 2465,
47: 2087 + 4173,
}
# OP1 (no Milk), low-passed backgrounds, less low-passed text, low-contrast borders
op_everyone_a = 75, {
1: 2215 + 3903,
2: 2215 + 2795,
3: 2215 + 1867,
4: 2215 + 1387,
5: 2215 + 1267,
6: 2215 + 3515,
7: 2215 + 3873,
8: 2215 + 1657,
9: 2215 + 4173,
10: 2215 + 1537,
11: 2215 + 2795,
12: 2215 + 1627,
13: 2215 + 2885,
}
# OP1 (no Milk), full-band backgrounds and text, low-contrast borders
op_everyone_b = 75, {
14: 2215 + 3485,
15: 2215 + 3065,
16: 2215 + 2525,
17: 2215 + 1925,
18: 2215 + 3305,
19: 2215 + 2735,
20: 2215 + 2974,
21: 2215 + 2285,
22: 2215 + 1777,
23: 2215 + 4983,
24: 2215 + 817,
25: 2215 + 2405,
26: 2215 + 2585,
27: 2215 + 2945,
28: 2215 + 1925,
29: 2215 + 3455,
30: 2215 + 1297,
31: 2215 + 2015,
32: 2215 + 1717,
33: 2215 + 2615,
34: 2215 + 3695,
}
# OP2 (Milk present), low-passed backgrounds and text, high-contrast borders
op_everyone_c = 75, {
35: 2215 + 3665,
36: 2215 + 1837,
37: 2215 + 2795,
38: 2215 + 2345,
39: 2215 + 2945,
40: 2215 + 2315,
41: 2215 + 1567,
42: 2215 + 4773,
}
# OP2 (Milk present), low-passed backgrounds, less low-passed text, high-contrast borders
op_everyone_d = 75, {
43: 2215 + 1477,
44: 2215 + 1507,
45: 2215 + 2705,
46: 2215 + 2465,
47: 2215 + 4173,
}
# OP2 (Milk present), full-band backgrounds and text, low-contrast borders
op_everyone_e = 75, {
48: 2215 + 937,
49: 2215 + 607,
}
# Low-passed backgrounds, less low-passed text, low-contrast borders, old ABC logo
op_rainbow1a_2a = 159, {
1: 6193,
2: 5085,
3: 4157,
4: 3677,
5: 3557,
6: 5805,
7: 6163,
8: 3947,
9: 6463,
10: 3827,
11: 5085,
12: 3917,
13: 5175,
}
# Full-band backgrounds and text, low-contrast borders
op_rainbow1b_1f = 13, {
14: 5775,
15: 5355,
16: 4815,
17: 4215,
18: 5595,
19: 5025,
20: 5264,
21: 4575,
22: 4067,
23: 7273,
24: 3107,
25: 4695,
26: 4875,
27: 5235,
28: 4215,
29: 5745,
30: 3587,
31: 4305,
32: 4007,
33: 4905,
34: 5985,
48: 3227,
49: 2897,
}
# Low-passed backgrounds and text, high-contrast borders, old ABC logo
op_rainbow1c_2c = 159, {
35: 5955,
36: 4127,
37: 5246,
38: 4796,
39: 5396,
40: 4766,
41: 4016,
42: 7222,
}
# Low-passed backgrounds, less low-passed text, high-contrast borders
op_rainbow1d_1e = 13, {
43: 3926,
44: 3956,
45: 5156,
46: 4916,
47: 6463,
}
# Full-band backgrounds and text, low-contrast borders, old ABC logo
op_rainbow2b = 146, {
14: 13 + 5775,
15: 13 + 5355,
16: 13 + 4815,
17: 13 + 4215,
18: 13 + 5595,
19: 13 + 5025,
20: 13 + 5264,
21: 13 + 4575,
22: 13 + 4067,
23: 13 + 7273,
24: 13 + 3107,
25: 13 + 4695,
26: 13 + 4875,
27: 13 + 5235,
28: 13 + 4215,
29: 13 + 5745,
30: 13 + 3587,
31: 13 + 4305,
32: 13 + 4007,
33: 13 + 4905,
34: 13 + 5985,
}
# Low-passed backgrounds, less low-passed text, high-contrast borders, old ABC logo
op_rainbow2d = 146, {
43: 13 + 3926,
44: 13 + 3956,
45: 13 + 5156,
46: 13 + 4916,
}
# Low-passed backgrounds, less low-passed text, high-contrast borders, new ABC logo
op_rainbow2e = 146, {
47: 13 + 6463,
}
# Full-band backgrounds and text, low-contrast borders, new ABC logo
op_rainbow2f = 146, {
48: 13 + 3227,
49: 13 + 2897,
}
# Low-passed backgrounds, less low-passed text, low-contrast borders, old ABC logo
op_rainbow3a_4a = 2, {
1: 159 + 6193,
2: 159 + 5085,
3: 159 + 4157,
7: 159 + 6163,
9: 159 + 6463,
11: 159 + 5085,
13: 159 + 5175,
}
# Full-band backgrounds and text, low-contrast borders, old ABC logo
op_rainbow3b = 1, {
16: 159 + 4815,
17: 159 + 4215,
19: 159 + 5025,
20: 159 + 5264,
21: 159 + 4575,
24: 159 + 3107,
25: 159 + 4695,
26: 159 + 4875,
27: 159 + 5235,
28: 159 + 4215,
31: 159 + 4305,
33: 159 + 4905,
}
# Low-passed backgrounds and text, high-contrast borders, old ABC logo
op_rainbow3c_4c = 2, {
37: 159 + 5246,
38: 159 + 4796,
39: 159 + 5396,
40: 159 + 4766,
}
# Low-passed backgrounds, less low-passed text, high-contrast borders, old ABC logo
op_rainbow3d_4d = 2, {
45: 159 + 5156,
46: 159 + 4916,
}
# Low-passed backgrounds, less low-passed text, high-contrast borders, new ABC logo
op_rainbow3e_4e = 2, {
47: 159 + 6463,
}
# Full-band backgrounds and text, low-contrast borders, new ABC logo
op_rainbow3f_4f = 2, {
48: 159 + 3227,
49: 159 + 2897,
}
# Full-band backgrounds and text, low-contrast borders, old ABC logo
op_rainbow4b = 1, {
16: 160 + 4815,
17: 160 + 4215,
19: 160 + 5025,
21: 160 + 4575,
24: 160 + 3107,
25: 160 + 4695,
26: 160 + 4875,
27: 160 + 5235,
28: 160 + 4215,
31: 160 + 4305,
33: 160 + 4905,
}
eyecatch1_1 = 97 + 111, {
2: 20890,
3: 17234,
4: 22568,
5: 19510,
6: 19540,
7: 18282,
8: 21398,
9: 21398,
10: 20978,
11: 16904,
12: 19270,
13: 20020,
14: 19390,
15: 20858,
16: 18582,
17: 19810,
18: 22356,
19: 18882,
20: 20438,
21: 19960,
22: 20288,
23: 23494,
24: 18102,
25: 23438,
26: 21788,
27: 20440,
28: 22628,
29: 20858,
30: 18820,
31: 20918,
32: 19090,
33: 19330,
34: 20798,
}
eyecatch1_2 = 2, {
2: 21098,
3: 17442,
4: 22776,
5: 19718,
6: 19748,
7: 18490,
8: 21606,
9: 21606,
10: 21186,
11: 17112,
12: 19478,
13: 20228,
14: 19598,
15: 21066,
16: 18790,
17: 20018,
18: 22564,
19: 19090,
20: 20646,
21: 20168,
22: 20496,
23: 23702,
24: 18310,
26: 21996,
27: 20648,
29: 21066,
30: 19028,
31: 21126,
32: 19298,
33: 19538,
34: 21006,
}
eyecatch1_3 = 2, {
13: 20230,
16: 18792,
18: 22566,
20: 20648,
22: 20498,
23: 23704,
24: 18312,
26: 21998,
27: 20650,
30: 19030,
33: 19540,
}
eyecatch1_4a = 86, {
1: 22896,
2: 21100,
3: 17444,
4: 22778,
5: 19720,
6: 19750,
7: 18492,
}
eyecatch1_4b = 86, {
8: 21608,
9: 21608,
10: 21188,
11: 17114,
12: 19480,
13: 20232,
14: 19600,
15: 21068,
16: 18794,
17: 20020,
18: 22568,
19: 19092,
20: 20650,
21: 20170,
22: 20500,
23: 23706,
24: 18314,
25: 23646,
26: 22000,
27: 20652,
28: 22836,
29: 21068,
30: 19032,
31: 21128,
32: 19300,
33: 19542,
34: 21008,
}
eyecatch1_5 = 124, {
2: 21186,
3: 17530,
4: 22864,
5: 19806,
6: 19836,
7: 18578,
8: 21694,
9: 21694,
10: 21274,
11: 17200,
12: 19566,
13: 20318,
14: 19686,
15: 21154,
16: 18880,
17: 20106,
18: 22654,
19: 19178,
20: 20736,
21: 20256,
22: 20586,
23: 23792,
24: 18400,
25: 23732,
26: 22086,
27: 20738,
28: 22922,
29: 21154,
30: 19118,
31: 21214,
32: 19386,
33: 19628,
34: 21094,
}
eyecatch2_1 = 7, {
35: 19240,
36: 22268,
37: 19960,
38: 22628,
39: 21220,
40: 19690,
41: 21728,
42: 21816,
43: 21068,
44: 20168,
}
eyecatch2_2a = 166 + 4, {
35: 7 + 19240,
36: 7 + 22268,
37: 7 + 19960,
38: 7 + 22628,
39: 7 + 21220,
40: 7 + 19690,
}
eyecatch2_2b = 166 + 4, {
41: 21735,
42: 21823,
43: 21075,
44: 7 + 20168,
}
eyecatch2_3a = 31, {
35: 19417,
36: 22445,
37: 20137,
38: 22805,
39: 21397,
40: 19867,
}
eyecatch2_3b = 33, {
41: 21905,
42: 21993,
43: 21245,
44: 20345,
}
eyecatch2_4a = 2, {
35: 31 + 19417,
36: 31 + 22445,
37: (31 + 20137, 20170),
39: 31 + 21397,
40: 31 + 19867,
}
eyecatch2_4b = 2, {
42: 22026,
44: 33 + 20345,
}
eyecatch2_5a = 210, {
35: 19450,
36: 22478,
37: 20172,
38: 22836,
39: 21430,
40: 19900,
}
eyecatch2_5b = 210, {
41: 21938,
42: 22028,
43: 21278,
44: 20380,
}
ed1_1 = 116, {
1: 39860,
2: 39862,
3: 39862,
4: 39860,
5: 39860,
6: 39860,
7: 39860,
8: 39860,
9: 39860,
10: 39860,
11: 39862,
12: 39860,
13: 39862,
14: 39860,
15: 39860,
16: 39862,
17: 39860,
18: 39860,
19: 39860,
20: 39860,
21: 39860,
22: 39860,
23: 39860,
24: 39862,
25: 39860,
26: 39862,
27: 39862,
28: 39860,
29: 39860,
30: 39860,
31: 39860,
32: 39860,
}
ed1_3 = 129, {
1: 251 + 39860,
2: 251 + 39862,
3: 251 + 39862,
4: 251 + 39860,
5: 251 + 39860,
6: 251 + 39860,
7: 251 + 39860,
8: 251 + 39860,
9: 251 + 39860,
10: 251 + 39860,
11: 251 + 39862,
12: 251 + 39860,
13: 251 + 39862,
14: 251 + 39860,
15: 251 + 39860,
16: 251 + 39862,
17: 251 + 39860,
18: 251 + 39860,
19: 251 + 39860,
20: 251 + 39860,
21: 251 + 39860,
22: 251 + 39860,
23: 251 + 39860,
24: 251 + 39862,
25: 251 + 39860,
26: 251 + 39862,
27: 251 + 39862,
28: 251 + 39860,
29: 251 + 39860,
30: 251 + 39860,
31: 251 + 39860,
32: 251 + 39860,
}
preview1 = 75, {
1: 42558,
2: 42560,
3: 42560,
4: 42558,
5: 42558,
6: 42558,
7: 42558,
8: 42558,
9: 42558,
10: 42558,
11: 42560,
12: 42558,
13: 42560,
14: 42558,
15: 42558,
16: 42560,
17: 42558,
18: 42558,
19: 42558,
20: 42558,
21: 42558,
22: 42558,
23: 42558,
24: 42560,
25: 42558,
26: 42560,
27: 42560,
28: 42558,
29: 42558,
30: 42558,
31: 42558,
32: 42558,
33: 42560,
34: 42558,
35: 42558,
36: 42558,
37: 42560,
38: 42558,
39: 42560,
40: 42558,
41: 42558,
42: 42558,
43: 42558,
}
preview2 = 5, {
1: 42633,
2: 42635,
3: 42635,
4: 42633,
5: 42633,
6: 42633,
7: 42633,
8: 42633,
9: 42633,
10: 42633,
11: 42635,
12: 42633,
13: 42635,
14: 42633,
15: 42633,
16: 42635,
17: 42633,
18: 42633,
19: 42633,
20: 42633,
21: 42633,
22: 42633,
23: 42633,
24: 42635,
25: 42633,
26: 42635,
28: 42633,
29: 42633,
30: 42633,
31: 42633,
32: 42633,
33: 42635,
34: 42633,
35: 42633,
36: 42633,
37: 42635,
38: 42633,
39: 42635,
40: 42633,
41: 42633,
42: 42633,
43: 42633,
}
preview3 = 4, {
1: 42638,
2: 42640,
3: 42640,
4: 42638,
5: 42638,
6: 42638,
7: 42638,
8: 42638,
9: 42638,
10: 42638,
11: 42640,
12: 42638,
13: 42640,
14: 42638,
15: 42638,
16: 42640,
17: 42638,
18: 42638,
19: 42638,
20: 42638,
21: 42638,
22: 42638,
23: 42638,
24: 42640,
25: 42638,
26: 42640,
29: 42638,
30: 42638,
34: 42638,
35: 42638,
36: 42638,
37: 42640,
38: 42638,
39: 42640,
40: 42638,
41: 42638,
42: 42638,
43: 42638,
}
preview4 = 6, {
1: 42642,
2: 42644,
3: 42644,
4: 42642,
5: 42642,
6: 42642,
7: 42642,
8: 42642,
9: 42642,
10: 42642,
11: 42644,
12: 42642,
13: 42644,
14: 42642,
15: 42642,
17: 42642,
18: 42642,
19: 42642,
20: 42642,
21: 42642,
22: 42642,
23: 42642,
24: 42644,
26: 42644,
29: 42642,
30: 42642,
34: 42642,
35: 42642,
36: 42642,
37: 42644,
38: 42642,
39: 42644,
40: 42642,
41: 42642,
42: 42642,
43: 42642,
}
henshin_ypc5kimepose1 = 65 + 24 + 33 + 6, {
6: 32507,
7: 27848,
9: 28297,
10: 32092,
12: 27461,
13: 28881,
14: 29007,
15: 28810,
16: 29942,
17: 30390,
18: 29695,
20: 30531,
21: 31887,
22: 27920,
25: 26490,
26: 32101,
28: 31433,
29: 31444,
31: 24604,
32: 30789,
33: 23275,
36: 26198,
37: 28200,
38: 29143,
40: 24547,
43: 25230,
}
henshin_ypc5kimepose2 = 8, {
6: 32635,
9: 28425,
10: 32220,
12: 27589,
13: 29009,
14: 29135,
15: 28938,
16: 30070,
17: 30518,
18: 29823,
20: 30659,
21: 32015,
22: 28048,
25: 26618,
26: 32229,
28: 31561,
29: 31572,
31: 24732,
32: 30917,
33: 23403,
36: 26326,
37: 28328,
38: 29271,
40: 24675,
43: 25358,
}
henshin_ypc5kimepose3 = 7, {
6: 32658,
7: 27984,
}
henshin_ypc5kimepose4 = 101, {
6: 32665,
7: 27991,
9: 28433,
10: 32228,
12: 27597,
13: 29017,
14: 29143,
15: 28946,
16: 30078,
17: 30526,
18: 29831,
20: 30667,
21: 32023,
22: 28056,
25: 26626,
26: 32237,
28: 31569,
29: 31580,
31: 24740,
32: 30925,
33: 23411,
36: 26334,
37: 28336,
38: 29279,
40: 24683,
43: 25366,
}
henshin_ypc5kimepose5 = 1, {
6: 101 + 32665,
9: 101 + 28433,
10: 101 + 32228,
12: 101 + 27597,
13: 101 + 29017,
14: 101 + 29143,
15: 101 + 28946,
16: 101 + 30078,
17: 101 + 30526,
18: 101 + 29831,
20: 101 + 30667,
21: 101 + 32023,
22: 101 + 28056,
25: 101 + 26626,
26: 101 + 32237,
28: 101 + 31569,
29: 101 + 31580,
31: 101 + 24740,
32: 101 + 30925,
33: 101 + 23411,
36: 101 + 26334,
37: 101 + 28336,
38: 101 + 29279,
40: 101 + 24683,
43: 101 + 25366,
}
henshin_ypc5kimepose6 = 14, {
6: 32767,
7: 28092,
9: 28535,
10: 32330,
12: 27699,
13: 29119,
14: 29245,
15: 29048,
16: 30180,
17: 30628,
18: 29933,
20: 30769,
21: 32125,
22: 28158,
25: 26728,
26: 32339,
28: 31671,
29: 31682,
31: 24842,
32: 31027,
33: 23513,
36: 26436,
37: 28438,
38: 29381,
40: 24785,
43: 25468,
}
henshin_ypc5kimepose7 = 132, {
6: 32781,
7: 28106,
9: 28549,
10: 32344,
13: 29133,
14: 29259,
15: 29062,
16: 30194,
17: 30642,
18: 29947,
20: 30783,
21: 32139,
22: 28172,
25: 26742,
26: 32353,
28: 31685,
29: 31696,
31: 24856,
32: 31041,
33: 23527,
36: 26450,
37: 28452,
38: 29395,
40: 24799,
43: 25482,
}
henshin_ypc5kimepose8 = 4, {
6: 32913,
7: 28238,
9: 28681,
10: 32476,
13: 29265,
14: 29391,
15: 29194,
16: 30326,
17: 30774,
18: 30079,
21: 32271,
22: 28304,
25: 26874,
26: 32485,
28: 31817,
29: 31828,
31: 24988,
32: 31173,
33: 23659,
36: 26582,
37: 28584,
38: 29527,
40: 24931,
43: 25614,
}
henshin_ypc5kimepose9 = 3, {
6: 32917,
7: 28242,
9: 28685,
10: 32480,
13: 29269,
14: 29395,
15: 29198,
16: 30330,
17: 30778,
18: 30083,
21: 32275,
22: 28308,
25: 26878,
28: 31821,
29: 31832,
31: 24992,
32: 31177,
33: 23663,
36: 26586,
37: 28588,
38: 29531,
40: 24935,
}
henshin_ypc5kimepose10 = 4, {
6: 32920,
7: 28245,
}
sections = [
# (27, {
# 32: 36271,
# 33: 5308,
# }),
# op_intro1a_2a,
# op_intro1b,
# op_intro1c_2c,
# op_intro1d_2d,
# op_intro2b,
# op_butterfly_a,
# op_butterfly_b,
# op_butterfly_c,
# op_butterfly_d,
# op_fairies_a,
# op_fairies_b,
# op_fairies_c,
# op_fairies_d,
# op_fairies_e,
# op_logo_a,
# op_logo_b,
op_logo_c,
# op_planning_a,
# op_planning_b,
# op_planning_c,
# op_planning_d,
# op_planning_e,
# op_planning_f,
# op_ball_a,
# op_ball_b,
op_ball_c,
# op_producers_a,
# op_producers_b,
# op_producers_c,
# op_producers_d,
# op_producers_e,
# op_producers_f,
# op_school_a,
# op_school_b,
# op_school_c,
# op_school_d,
# op_songs_a,
# op_songs_b,
# op_songs_c,
# op_songs_d,
# op_songs_e,
# op_flying_a,
# op_flying_b,
# op_flying_c,
# op_flying_d,
# op_attacks_a,
# op_attacks_b,
# op_attacks_c,
# op_attacks_d,
# op_attacks_e,
# op_books_a,
# op_books_b,
# op_books_c,
# op_books_d,
# op_everyone_a,
# op_everyone_b,
# op_everyone_c,
# op_everyone_d,
# op_everyone_e,
# op_rainbow1a_2a,
# op_rainbow1b_1f,
# op_rainbow1c_2c,
# op_rainbow1d_1e,
# op_rainbow2b,
# op_rainbow2d,
# op_rainbow2e,
# op_rainbow2f,
# op_rainbow3a_4a,
# op_rainbow3b,
# op_rainbow3c_4c,
# op_rainbow3d_4d,
# op_rainbow3e_4e,
# op_rainbow3f_4f,
# op_rainbow4b,
# eyecatch1_1,
# eyecatch1_2,
# eyecatch1_3,
# eyecatch1_4a,
# eyecatch1_4b,
# eyecatch1_5,
# eyecatch2_1,
# eyecatch2_2a,
# eyecatch2_2b,
# eyecatch2_3a,
# eyecatch2_3b,
# eyecatch2_4a,
# eyecatch2_4b,
# eyecatch2_5a,
# eyecatch2_5b,
# ed1_1,
# ed1_3,
# preview1,
# preview2,
# preview3,
# preview4,
# henshin_ypc5kimepose1,
# henshin_ypc5kimepose2,
# henshin_ypc5kimepose3,
# henshin_ypc5kimepose4,
# henshin_ypc5kimepose5,
# henshin_ypc5kimepose6,
# henshin_ypc5kimepose7,
# henshin_ypc5kimepose8,
# henshin_ypc5kimepose9,
# henshin_ypc5kimepose10,
]
import sys
# target_epno = int(sys.argv[1])
target_epnos = [int(arg) for arg in sys.argv[1:]]
bad_epnos |= {-epno for epno in target_epnos if epno < 0}
target_epnos = [abs(epno) for epno in target_epnos]
get_plane, zero, subsampling = get_y, 16, 0
# get_plane, zero, subsampling = get_u, 128, 1
# get_plane, zero, subsampling = get_v, 128, 1
# get_plane, zero, subsampling = lambda clip: core.std.StackVertical([get_u(clip), get_v(clip)]), 128, 1
box_blur = False
average = False
# skipped_epnos = bad_epnos - {target_epno}
# sections = [section for section in sections if target_epno in section[1]]
sections = [section for section in sections if any(target_epno in section[1] for target_epno in target_epnos)]
sections = [section for section in sections if sum(epno not in bad_epnos for epno in section[1]) >= 5]
# sections = [section for section in sections if section[0] >= 100]
sections = [(nfr, {epno: ((start,) if isinstance(start, int) else start) for epno, start in eps.items()}) for nfr, eps in sections]
# total_coef_sqr = total_coef = total_dot = total_val = total_val_sqr = total_cnt = 0
total_coef_sqr = {target_epno: 0 for target_epno in target_epnos}
total_coef = {target_epno: 0 for target_epno in target_epnos}
total_dot = {target_epno: 0 for target_epno in target_epnos}
total_val = {target_epno: 0 for target_epno in target_epnos}
total_val_sqr = {target_epno: 0 for target_epno in target_epnos}
total_cnt = {target_epno: 0 for target_epno in target_epnos}
dump = []
num = None
src_dtype = np.uint16 if box_blur else np.uint8
num_dtype = np.uint64 if average else np.uint32 if box_blur else np.uint16
# with tqdm(unit='fr', total=sum(nfr * sum(len(starts) for epno, starts in eps.items() if epno not in skipped_epnos) for nfr, eps in sections)) as pbar:
with tqdm(smoothing=0, unit='fr', total=sum(nfr * (sum(len(starts) * (1 - (epno in bad_epnos) + (epno in target_epnos)) for epno, starts in eps.items())) for nfr, eps in sections)) as pbar:
# with tqdm(unit='fr', total=sum(nfr * (sum(len(starts) for epno, starts in eps.items() if epno not in skipped_epnos) - 1) for nfr, eps in sections)) as pbar:
if not average:
for section_len, section_starts in sections:
def get_clip(epno):
pbar.refresh()
clip = episodes[epno]()[start:start+section_len]
clip = get_plane(clip)
if box_blur:
clip = depth(clip, 16)
clip = clip.std.BoxBlur(hpasses=3, vpasses=3)
clip = clip.std.Crop(left=(32 >> subsampling), right=(32 >> subsampling))
# clip = clip.std.Crop(top=(32 >> subsampling), bottom=(32 >> subsampling), left=(32 >> subsampling), right=(32 >> subsampling))
# clip = clip.std.Crop(bottom=(clip.height % 32))
# if average:
# clip = depth(clip, 16)
# # clip = kernels.Box.scale(clip, clip.width // 32, clip.height // 32)
# clip = kernels.Box.scale(clip, 1, 1)
return clip
den = 0
for epno, starts in section_starts.items():
# if epno != target_epno and epno not in skipped_epnos:
if epno not in bad_epnos:
for start in starts:
clip = get_clip(epno)
if num is None:
w = clip.width
h = clip.height
num = np.zeros((section_len, 1 if average else h * w), num_dtype)
for fr, frame in enumerate(clip.frames(close=True)):
frame = np.array(frame, dtype=src_dtype).reshape(-1)
if average:
num[fr] += frame.sum(dtype=np.uint64)
else:
num[fr] += frame
pbar.update()
den += frame.size if average else 1
relevant_target_epnos = [epno for epno in target_epnos if epno in section_starts]
for target_epno in relevant_target_epnos:
for start in section_starts[target_epno]:
clip = get_clip(target_epno)
for fr, frame in enumerate(clip.frames(close=True)):
# if frame.props['_PictType'] == b'B':
if frame.props['_PictType'] == b'I':
frame = np.array(frame, dtype=src_dtype).reshape(-1)
if average:
frame = np.array([frame.sum(dtype=np.uint64)])
# for i, sample in enumerate(frame):
# dump.append(f'{int(sample) - Fraction(int(num[fr][i]), den)}')
total_val[target_epno] += int(frame.sum(dtype=np.uint64))
total_val_sqr[target_epno] += int(np.einsum('i,i', frame, frame, dtype=np.uint64))
total_coef[target_epno] += Fraction(int(num[fr].sum(dtype=np.uint64)), den)
total_coef_sqr[target_epno] += Fraction(int(np.einsum('i,i', num[fr], num[fr], dtype=np.uint64)), den * den)
total_dot[target_epno] += Fraction(int(np.einsum('i,i', num[fr], frame, dtype=np.uint64)), den)
total_cnt[target_epno] += num[fr].size
pbar.update()
num = None
else:
def get_clip(epno):
pbar.refresh()
clip = episodes[epno]()
clip = get_plane(clip)
if box_blur:
clip = depth(clip, 16)
clip = clip.std.BoxBlur(hpasses=3, vpasses=3)
clip = clip.std.Crop(left=(32 >> subsampling), right=(32 >> subsampling))
# clip = clip.std.Crop(top=(32 >> subsampling), bottom=(32 >> subsampling), left=(32 >> subsampling), right=(32 >> subsampling))
# clip = clip.std.Crop(bottom=(clip.height % 32))
# if average:
# clip = depth(clip, 16)
# # clip = kernels.Box.scale(clip, clip.width // 32, clip.height // 32)
# clip = kernels.Box.scale(clip, 1, 1)
return clip
den = [0] * sum(nfr for nfr, eps in sections)
clip = None
for epno in {epno for nfr, eps in sections for epno in eps.keys() if epno not in bad_epnos}:
fr_offset = 0
for section_len, section_starts in sections:
starts = section_starts.get(epno)
if starts is not None:
for start in starts:
if clip is None:
clip = get_clip(epno)
if num is None:
w = clip.width
h = clip.height
if average:
num = [0] * len(den)
else:
num = np.zeros((len(den), h * w), num_dtype)
for fr, frame in enumerate(clip[start:start+section_len].frames(close=True), fr_offset):
frame = np.array(frame, dtype=src_dtype).reshape(-1)
if average:
den[fr] += frame.size
frame = int(frame.sum(dtype=np.uint64))
# frame = int(frame.min()) * w * h
dump.append(f'{{{len(dump)},{epno},{start+fr-fr_offset},{Fraction(int(frame), w * h)}}}')
else:
den[fr] += 1
num[fr] += frame
pbar.update()
fr_offset += section_len
clip = None
for target_epno in target_epnos:
fr_offset = 0
for section_len, section_starts in sections:
starts = section_starts.get(target_epno)
if starts is not None:
for start in starts:
if clip is None:
clip = get_clip(target_epno)
for fr, frame in enumerate(clip[start:start+section_len].frames(close=True), fr_offset):
# if frame.props['_PictType'] == b'B':
if frame.props['_PictType'] == b'I':
frame = np.array(frame, dtype=src_dtype).reshape(-1)
if average:
frame = frame.sum(dtype=np.uint64)
# frame = np.uint64(frame.min()) * w * h
total_coef[target_epno] += Fraction(num[fr], den[fr])
total_coef_sqr[target_epno] += Fraction(num[fr] * num[fr], den[fr] * den[fr])
total_dot[target_epno] += Fraction(num[fr] * int(frame), den[fr])
total_cnt[target_epno] += 1
# dump.append(f'{{{len(dump)},{start+fr-fr_offset},{Fraction(num[fr], den[fr])},{Fraction(int(frame), w * h)}}}')
delta = Fraction(num[fr], den[fr]) - Fraction(int(frame), w * h)
if abs(delta) * 10 >= 1:
print(f'WARNING: episode {target_epno} frame {start+fr-fr_offset} deviates significantly ({float(delta)}) from the average!')
frame = np.array([frame])
else:
total_coef[target_epno] += Fraction(int(num[fr].sum(dtype=np.uint64)), den[fr])
total_coef_sqr[target_epno] += Fraction(int(np.einsum('i,i', num[fr], num[fr], dtype=np.uint64)), den[fr] * den[fr])
total_dot[target_epno] += Fraction(int(np.einsum('i,i', num[fr], frame, dtype=np.uint64)), den[fr])
total_cnt[target_epno] += num[fr].size
total_val[target_epno] += int(frame.sum(dtype=np.uint64))
total_val_sqr[target_epno] += int(np.einsum('i,i', frame, frame, dtype=np.uint64))
pbar.update()
fr_offset += section_len
clip = None
num = None
# print(f'{{{",".join(dump)}}}')
confidence = 0.9999
for epno in target_epnos:
print(f'Episode {epno}:')
if box_blur:
total_coef_sqr[epno] /= 65536
total_val_sqr[epno] /= Fraction(65536)
total_dot[epno] /= 65536
total_coef[epno] /= 256
total_val[epno] /= Fraction(256)
if average:
total_val_sqr[epno] /= Fraction((w * h) ** 2)
total_dot[epno] /= w * h
total_val[epno] /= Fraction(w * h)
total_zero = total_cnt[epno] * zero
total_coef_sqr[epno] -= (2 * total_coef[epno] - total_zero) * zero
total_val_sqr[epno] -= (2 * total_val[epno] - total_zero) * zero
total_dot[epno] -= (total_coef[epno] + total_val[epno] - total_zero) * zero
total_coef[epno] -= total_zero
total_val[epno] -= total_zero
# if not total_cnt[epno]:
# print(' No frames found.')
# continue
print(' Raw values:')
print(' total_coef_sqr =', total_coef_sqr[epno])
print(' total_coef =', total_coef[epno])
print(' total_dot =', total_dot[epno])
print(' total_val =', total_val[epno])
print(' total_val_sqr =', total_val_sqr[epno])
print(' total_cnt =', total_cnt[epno])
rss = total_coef_sqr[epno] - 2 * total_dot[epno] + total_val_sqr[epno]
print(' As is:')
print(' y = x')
print(' with mean squared error', float(rss / total_cnt[epno]))
tss = total_val_sqr[epno] - total_val[epno] ** 2 / total_cnt[epno]
if tss:
fuv = rss / tss
print(' with fraction of variance unexplained', float(fuv))
mse = rss / total_cnt[epno]
mean_diff = (total_coef[epno] - total_val[epno]) / total_cnt[epno]
std_error = math.sqrt((mse - mean_diff ** 2) / (total_cnt[epno] - 1))
if std_error:
t = mean_diff / std_error
p = 2 * sps.t.sf(abs(t), total_cnt[epno] - 1)
print(' with likelihood of zero error mean', p)
print(' Without intercept:')
linear_coef = total_dot[epno] / total_coef_sqr[epno]
print(f' y = {float(linear_coef)}(x{-zero:+}){zero:+}')
rss = linear_coef ** 2 * total_coef_sqr[epno] - 2 * linear_coef * total_dot[epno] + total_val_sqr[epno]
mse = rss / total_cnt[epno]
if mse:
std_error = math.sqrt(mse / total_coef_sqr[epno])
confidence_interval = sps.norm.interval(confidence, float(linear_coef), std_error)
print(f' with {confidence} confidence interval for linear coefficient {confidence_interval}')
print(' with mean squared error', float(mse))
if total_cnt[epno] > 1:
print(' with estimated error variance', float(rss / (total_cnt[epno] - 1)))
tss = total_val_sqr[epno] - total_val[epno] ** 2 / total_cnt[epno]
if tss:
fuv = rss / tss
print(' with fraction of variance unexplained', float(fuv))
print(' With intercept:')
common_den = total_coef_sqr[epno] * total_cnt[epno] - total_coef[epno] ** 2
if common_den:
linear_coef = (total_dot[epno] * total_cnt[epno] - total_coef[epno] * total_val[epno]) / common_den
intercept = (total_coef_sqr[epno] * total_val[epno] - total_dot[epno] * total_coef[epno]) / common_den
if linear_coef != 1:
print(f' y = {float(linear_coef)}(x{-zero:+}){zero:+}{float(intercept):+}')
offset = float(intercept / (1 - linear_coef) + zero)
print(f' = {float(linear_coef)}(x{-offset:+}){offset:+}')
else:
print(f' y = 1.0x{float(intercept):+}')
rss = linear_coef ** 2 * total_coef_sqr[epno] - 2 * linear_coef * total_dot[epno] + total_val_sqr[epno]
rss += total_cnt[epno] * intercept ** 2 + 2 * linear_coef * intercept * total_coef[epno] - 2 * intercept * total_val[epno]
mse = rss / total_cnt[epno]
if mse and common_den:
std_error = math.sqrt(mse * total_cnt[epno] / common_den)
confidence_interval = sps.norm.interval(confidence, float(linear_coef), std_error)
print(f' with {confidence} confidence interval for linear coefficient {confidence_interval}')
std_error = math.sqrt(mse * total_coef_sqr[epno] / common_den)
confidence_interval = sps.norm.interval(confidence, float(intercept), std_error)
print(f' with {confidence} confidence interval for intercept {confidence_interval}')
t = intercept / std_error
p = 2 * sps.t.sf(abs(t), total_cnt[epno] - 2)
print(' with likelihood of zero intercept', p)
print(' with mean squared error', float(mse))
if total_cnt[epno] > 2:
print(' with estimated error variance', float(rss / (total_cnt[epno] - 2)))
tss = total_val_sqr[epno] - total_val[epno] ** 2 / total_cnt[epno]
if tss:
fuv = rss / tss
print(' with fraction of variance unexplained', float(fuv))
# vals = np.ndarray((section_len, h * w), np.uint8)
# for fr, frame in enumerate(clip.frames(close=True)):
# vals[fr, :] = (np.array(frame, dtype=np.uint8).reshape(-1) + 7) #.view(np.int8)
# full_num = num
# for cutoff in range(num.min() // den + 1, num.max() // den):
# total_coef_sqr = total_dot = 0
# mask = (cutoff * den - den // 2 <= full_num) * (full_num < (cutoff + 1) * den - den // 2)
# num = full_num * mask
# # print(np.count_nonzero(num))
# flat = num.reshape(-1)
# total_coef_sqr += Fraction(int(flat @ flat), den * den)
# if total_coef_sqr:
# num *= vals
# total_dot += Fraction(int(num.sum()), den)
# print(f'{{{cutoff},{float(total_dot / total_coef_sqr)}}},')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment