comparison shaders/ntsc.f.glsl @ 2329:06d5e9b08bdb

Add NTSC composite shader by Sik
author Michael Pavone <pavone@retrodev.com>
date Wed, 23 Aug 2023 21:38:39 -0700
parents
children 49bd818ec9d8
comparison
equal deleted inserted replaced
2328:7f8d0fdc5bca 2329:06d5e9b08bdb
1 //******************************************************************************
2 // NTSC composite simulator for BlastEm
3 // Shader by Sik, based on BlastEm's default shader
4 //
5 // It works by converting from RGB to YIQ and then encoding it into NTSC, then
6 // trying to decode it back. The lossy nature of the encoding process results in
7 // the rainbow effect. It also accounts for the differences between H40 and H32
8 // mode as it computes the exact colorburst cycle length.
9 //
10 // This shader tries to work around the inability to keep track of previous
11 // pixels by sampling seven points (in 0.25 colorburst cycle intervals), that
12 // seems to be enough to give decent filtering (four samples are used for
13 // low-pass filtering, but we need seven because decoding chroma also requires
14 // four samples so we're filtering over overlapping samples... just see the
15 // comments in the I/Q code to understand).
16 //******************************************************************************
17
18 uniform mediump float width;
19 uniform sampler2D textures[2];
20 uniform mediump vec2 texsize;
21 varying mediump vec2 texcoord;
22
23 // Converts from RGB to YIQ
24 mediump vec3 rgba2yiq(vec4 rgba)
25 {
26 return vec3(
27 rgba[0] * 0.3 + rgba[1] * 0.59 + rgba[2] * 0.11,
28 rgba[0] * 0.599 + rgba[1] * -0.2773 + rgba[2] * -0.3217,
29 rgba[0] * 0.213 + rgba[1] * -0.5251 + rgba[2] * 0.3121
30 );
31 }
32
33 // Encodes YIQ into composite
34 mediump float yiq2raw(vec3 yiq, float phase)
35 {
36 return yiq[0] + yiq[1] * sin(phase) + yiq[2] * cos(phase);
37 }
38
39 // Converts from YIQ to RGB
40 mediump vec4 yiq2rgba(vec3 yiq)
41 {
42 return vec4(
43 yiq[0] + yiq[1] * 0.9469 + yiq[2] * 0.6236,
44 yiq[0] - yiq[1] * 0.2748 - yiq[2] * 0.6357,
45 yiq[0] - yiq[1] * 1.1 + yiq[2] * 1.7,
46 1.0
47 );
48 }
49
50 void main()
51 {
52 // Use first pair of lines for hard line edges
53 // Use second pair of lines for soft line edges
54 mediump float modifiedY0 = (floor(texcoord.y * texsize.y + 0.25) + 0.5) / texsize.y;
55 mediump float modifiedY1 = (floor(texcoord.y * texsize.y - 0.25) + 0.5) / texsize.y;
56 //mediump float modifiedY0 = (texcoord.y * texsize.y + 0.75) / texsize.y;
57 //mediump float modifiedY1 = (texcoord.y * texsize.y + 0.25) / texsize.y;
58
59 // Used by the mixing when fetching texels, related to the way BlastEm
60 // handles interlaced mode (nothing to do with composite)
61 mediump float factorY = (sin(texcoord.y * texsize.y * 6.283185307) + 1.0) * 0.5;
62
63 // Horizontal distance of half a colorburst cycle
64 mediump float factorX = (1.0 / texsize.x) / 170.667 * 0.5 * (width - 27.0);
65
66 // Where we store the sampled pixels.
67 // [0] = current pixel
68 // [1] = 1/4 colorburst cycles earlier
69 // [2] = 2/4 colorburst cycles earlier
70 // [3] = 3/4 colorburst cycles earlier
71 // [4] = 1 colorburst cycle earlier
72 // [5] = 1 1/4 colorburst cycles earlier
73 // [6] = 1 2/4 colorburst cycles earlier
74 mediump float phase[7]; // Colorburst phase (in radians)
75 mediump float raw[7]; // Raw encoded composite signal
76
77 // Sample all the pixels we're going to use
78 mediump float x = texcoord.x;
79 for (int n = 0; n < 7; n++, x -= factorX * 0.5) {
80 // Compute colorburst phase at this point
81 phase[n] = x / factorX * 3.1415926;
82
83 // Decode RGB into YIQ and then into composite
84 // Reading two textures is a BlastEm thing :P (the two fields in
85 // interlaced mode, that's taken as-is from the stock shaders)
86 raw[n] = yiq2raw(mix(
87 rgba2yiq(texture2D(textures[1], vec2(x, modifiedY1))),
88 rgba2yiq(texture2D(textures[0], vec2(x, modifiedY0))),
89 factorY
90 ), phase[n]);
91 }
92
93 // Decode Y by averaging over the the whole sampled cycle (effectively
94 // filtering anything above the colorburst frequency)
95 mediump float y_mix = (raw[0] + raw[1] + raw[2] + raw[3]) * 0.25;
96
97 // Decode I and Q (see page below to understand what's going on)
98 // https://codeandlife.com/2012/10/09/composite-video-decoding-theory-and-practice/
99 //
100 // Retrieving I and Q out of the raw signal is done like this
101 // (use sin for I and cos for Q):
102 //
103 // 0.5 * raw[0] * sin(phase[0]) + 0.5 * raw[1] * sin(phase[1]) +
104 // 0.5 * raw[2] * sin(phase[2]) + 0.5 * raw[3] * sin(phase[3])
105 //
106 // i.e. multiply each of the sampled quarter cycles against the reference
107 // wave and average them (actually double that because for some reason
108 // that's needed to get the correct scale, hence 0.5 instead of 0.25)
109 //
110 // That turns out to be blocky tho, so we opt to filter down the chroma...
111 // which requires doing the above *four* times if we do it the same way as
112 // we did for luminance (note that 0.125 = 1/4 of 0.5):
113 //
114 // 0.125 * raw[0] * sin(phase[0]) + 0.125 * raw[1] * sin(phase[1]) +
115 // 0.125 * raw[2] * sin(phase[2]) + 0.125 * raw[3] * sin(phase[3]) +
116 // 0.125 * raw[1] * sin(phase[1]) + 0.125 * raw[2] * sin(phase[2]) +
117 // 0.125 * raw[3] * sin(phase[3]) + 0.125 * raw[4] * sin(phase[4]) +
118 // 0.125 * raw[2] * sin(phase[2]) + 0.125 * raw[3] * sin(phase[3]) +
119 // 0.125 * raw[4] * sin(phase[4]) + 0.125 * raw[5] * sin(phase[5]) +
120 // 0.125 * raw[3] * sin(phase[3]) + 0.125 * raw[4] * sin(phase[4]) +
121 // 0.125 * raw[5] * sin(phase[5]) + 0.125 * raw[6] * sin(phase[6])
122 //
123 // There are a lot of repeated values there that could be merged into one,
124 // what you see below is the resulting simplification.
125
126 mediump float i_mix =
127 0.125 * raw[0] * sin(phase[0]) +
128 0.25 * raw[1] * sin(phase[1]) +
129 0.375 * raw[2] * sin(phase[2]) +
130 0.5 * raw[3] * sin(phase[3]) +
131 0.375 * raw[4] * sin(phase[4]) +
132 0.25 * raw[5] * sin(phase[5]) +
133 0.125 * raw[6] * sin(phase[6]);
134
135 mediump float q_mix =
136 0.125 * raw[0] * cos(phase[0]) +
137 0.25 * raw[1] * cos(phase[1]) +
138 0.375 * raw[2] * cos(phase[2]) +
139 0.5 * raw[3] * cos(phase[3]) +
140 0.375 * raw[4] * cos(phase[4]) +
141 0.25 * raw[5] * cos(phase[5]) +
142 0.125 * raw[6] * cos(phase[6]);
143
144 // Convert YIQ back to RGB and output it
145 gl_FragColor = yiq2rgba(vec3(y_mix, i_mix, q_mix));
146
147 // If you're curious to see what the raw composite signal looks like,
148 // comment out the above and uncomment the line below instead
149 //gl_FragColor = vec4(raw[0], raw[0], raw[0], 1.0);
150 }