SLProject  4.2.000
A platform independent 3D computer graphics framework for desktop OS, Android, iOS and online in web browsers
SLGLOVRWorkaround.h
Go to the documentation of this file.
1 /**
2  * \file SLGLOVRWorkaround.h
3  * \brief Wrapper around Oculus Rift
4  * \date July 2014
5  * \authors Marc Wacker, Roman Kuehne, Marcus Hudritsch
6  * \copyright http://opensource.org/licenses/GPL-3.0
7  * \remarks Please use clangformat to format the code. See more code style on
8  * https://github.com/cpvrlab/SLProject4/wiki/SLProject-Coding-Style
9 */
10 
11 #ifndef SLOVRWORKAROUND_H
12 #define SLOVRWORKAROUND_H
13 
14 #include <SLGLOculus.h>
15 #include <SLGLVertexArray.h>
16 #include <SLScene.h>
17 
18 //-------------------------------------------------------------------------------------
20 {
22  // These two are legacy and deprecated.
23  Distortion_Poly4 = 0, // scale = (K0 + K1*r^2 + K2*r^4 + K3*r^6)
24  Distortion_RecipPoly4 = 1, // scale = 1/(K0 + K1*r^2 + K2*r^4 + K3*r^6)
25 
26  // CatmullRom10 is the preferred distortion format.
27  Distortion_CatmullRom10 = 2, // scale = Catmull-Rom spline through points (1.0, K[1]...K[9])
28  Distortion_LAST // For ease of enumeration.
29 };
30 
31 //-------------------------------------------------------------------------------------
32 // HMD types.
33 //
35 {
37  HmdType_DKProto, // First duct-tape model, never sold.
38  HmdType_DK1, // DevKit1 - on sale to developers.
39  HmdType_DKHDProto, // DKHD - shown at various shows, never sold.
40  HmdType_DKHD2Proto, // DKHD2, 5.85-inch panel, never sold.
41  HmdType_DKHDProto566Mi, // DKHD, 5.66-inch panel, never sold.
42  HmdType_CrystalCoveProto, // Crystal Cove, 5.66-inch panel, shown at shows but never sold.
44 
45  // Reminder - this header file is public - code names only!
46  HmdType_Unknown, // Used for unnamed HW lab experiments.
48 };
49 //-------------------------------------------------------------------------------------
50 // HMD shutter types.
51 //
53 {
58  // TODO:
59  // color-sequential e.g. LCOS?
60  // alternate eyes?
61  // alternate columns?
62  // outside-in?
63 
65 };
66 
67 //-------------------------------------------------------------------------------------
68 // For headsets that use eye cups
69 //
71 {
72  // Public lenses
77 
78  // Internal R&D code names.
79  // Reminder - this header file is public - code names only!
89 
91 };
92 
93 //-------------------------------------------------------------------------------------
94 bool FitCubicPolynomial(float* pResult, const float* pFitX, const float* pFitY)
95 {
96  float d0 = ((pFitX[0] - pFitX[1]) * (pFitX[0] - pFitX[2]) * (pFitX[0] - pFitX[3]));
97  float d1 = ((pFitX[1] - pFitX[2]) * (pFitX[1] - pFitX[3]) * (pFitX[1] - pFitX[0]));
98  float d2 = ((pFitX[2] - pFitX[3]) * (pFitX[2] - pFitX[0]) * (pFitX[2] - pFitX[1]));
99  float d3 = ((pFitX[3] - pFitX[0]) * (pFitX[3] - pFitX[1]) * (pFitX[3] - pFitX[2]));
100 
101  if ((d0 == 0.0f) || (d1 == 0.0f) || (d2 == 0.0f) || (d3 == 0.0f))
102  {
103  return false;
104  }
105 
106  float f0 = pFitY[0] / d0;
107  float f1 = pFitY[1] / d1;
108  float f2 = pFitY[2] / d2;
109  float f3 = pFitY[3] / d3;
110 
111  pResult[0] = -(f0 * pFitX[1] * pFitX[2] * pFitX[3] +
112  f1 * pFitX[0] * pFitX[2] * pFitX[3] +
113  f2 * pFitX[0] * pFitX[1] * pFitX[3] +
114  f3 * pFitX[0] * pFitX[1] * pFitX[2]);
115  pResult[1] = f0 * (pFitX[1] * pFitX[2] + pFitX[2] * pFitX[3] + pFitX[3] * pFitX[1]) +
116  f1 * (pFitX[0] * pFitX[2] + pFitX[2] * pFitX[3] + pFitX[3] * pFitX[0]) +
117  f2 * (pFitX[0] * pFitX[1] + pFitX[1] * pFitX[3] + pFitX[3] * pFitX[0]) +
118  f3 * (pFitX[0] * pFitX[1] + pFitX[1] * pFitX[2] + pFitX[2] * pFitX[0]);
119  pResult[2] = -(f0 * (pFitX[1] + pFitX[2] + pFitX[3]) +
120  f1 * (pFitX[0] + pFitX[2] + pFitX[3]) +
121  f2 * (pFitX[0] + pFitX[1] + pFitX[3]) +
122  f3 * (pFitX[0] + pFitX[1] + pFitX[2]));
123  pResult[3] = f0 + f1 + f2 + f3;
124 
125  return true;
126 }
127 
128 //-------------------------------------------------------------------------------------
129 enum
130 {
131  NumCoefficients = 11
132 };
133 #define TPH_SPLINE_STATISTICS 0
134 #if TPH_SPLINE_STATISTICS
135 static float max_scaledVal = 0;
136 static float average_total_out_of_range = 0;
137 static float average_out_of_range;
138 static int num_total = 0;
139 static int num_out_of_range = 0;
140 static int num_out_of_range_over_1 = 0;
141 static int num_out_of_range_over_2 = 0;
142 static int num_out_of_range_over_3 = 0;
143 static float percent_out_of_range;
144 #endif
145 
146 //-------------------------------------------------------------------------------------
147 float EvalCatmullRom10Spline(float const* K, float scaledVal)
148 {
149  int const NumSegments = NumCoefficients;
150 
151 #if TPH_SPLINE_STATISTICS
152  // Value should be in range of 0 to (NumSegments-1) (typically 10) if spline is valid. Right?
153  if (scaledVal > (NumSegments - 1))
154  {
155  num_out_of_range++;
156  average_total_out_of_range += scaledVal;
157  average_out_of_range = average_total_out_of_range / ((float)num_out_of_range);
158  percent_out_of_range = 100.0f * (num_out_of_range) / num_total;
159  }
160  if (scaledVal > (NumSegments - 1 + 1)) num_out_of_range_over_1++;
161  if (scaledVal > (NumSegments - 1 + 2)) num_out_of_range_over_2++;
162  if (scaledVal > (NumSegments - 1 + 3)) num_out_of_range_over_3++;
163  num_total++;
164  if (scaledVal > max_scaledVal)
165  {
166  max_scaledVal = scaledVal;
167  max_scaledVal = scaledVal;
168  }
169 #endif
170 
171  float scaledValFloor = floorf(scaledVal);
172  scaledValFloor = std::max(0.0f, std::min((float)(NumSegments - 1), scaledValFloor));
173  float t = scaledVal - scaledValFloor;
174  int k = (int)scaledValFloor;
175 
176  float p0, p1;
177  float m0, m1;
178  switch (k)
179  {
180  case 0:
181  // Curve starts at 1.0 with gradient K[1]-K[0]
182  p0 = 1.0f;
183  m0 = (K[1] - K[0]); // general case would have been (K[1]-K[-1])/2
184  p1 = K[1];
185  m1 = 0.5f * (K[2] - K[0]);
186  break;
187  default:
188  // General case
189  p0 = K[k];
190  m0 = 0.5f * (K[k + 1] - K[k - 1]);
191  p1 = K[k + 1];
192  m1 = 0.5f * (K[k + 2] - K[k]);
193  break;
194  case NumSegments - 2:
195  // Last tangent is just the slope of the last two points.
196  p0 = K[NumSegments - 2];
197  m0 = 0.5f * (K[NumSegments - 1] - K[NumSegments - 2]);
198  p1 = K[NumSegments - 1];
199  m1 = K[NumSegments - 1] - K[NumSegments - 2];
200  break;
201  case NumSegments - 1:
202  // Beyond the last segment it's just a straight line
203  p0 = K[NumSegments - 1];
204  m0 = K[NumSegments - 1] - K[NumSegments - 2];
205  p1 = p0 + m0;
206  m1 = m0;
207  break;
208  }
209 
210  float omt = 1.0f - t;
211  float res = (p0 * (1.0f + 2.0f * t) + m0 * t) * omt * omt + (p1 * (1.0f + 2.0f * omt) - m1 * omt) * t * t;
212 
213  return res;
214 }
215 
216 //-------------------------------------------------------------------------------------
218 {
219  // The result is a scaling applied to the distance from the center of the lens.
220  float DistortionFnScaleRadiusSquared(float rsq) const
221  {
222  float scale = 1.0f;
223  switch (Eqn)
224  {
225  case Distortion_Poly4:
226  // This version is deprecated! Prefer one of the other two.
227  scale = (K[0] + rsq * (K[1] + rsq * (K[2] + rsq * K[3])));
228  break;
230  scale = 1.0f / (K[0] + rsq * (K[1] + rsq * (K[2] + rsq * K[3])));
231  break;
233  {
234  // A Catmull-Rom spline through the values 1.0, K[1], K[2] ... K[10]
235  // evenly spaced in R^2 from 0.0 to MaxR^2
236  // K[0] controls the slope at radius=0.0, rather than the actual value.
237  const int NumSegments = NumCoefficients;
238  assert(NumSegments <= NumCoefficients);
239  float scaledRsq = (float)(NumSegments - 1) * rsq / (MaxR * MaxR);
240  scale = EvalCatmullRom10Spline(K, scaledRsq);
241  }
242  break;
243  default:
244  assert(false);
245  break;
246  }
247  return scale;
248  }
249  // x,y,z components map to r,g,b scales.
251  {
252  float scale = DistortionFnScaleRadiusSquared(rsq);
253  SLVec3f scaleRGB;
254  scaleRGB.x = scale * (1.0f + ChromaticAberration[0] + rsq * ChromaticAberration[1]); // Red
255  scaleRGB.y = scale; // Green
256  scaleRGB.z = scale * (1.0f + ChromaticAberration[2] + rsq * ChromaticAberration[3]); // Blue
257  return scaleRGB;
258  }
259 
260  // DistortionFn applies distortion to the argument.
261  // Input: the distance in TanAngle/NIC space from the optical center to the input pixel.
262  // Output: the resulting distance after distortion.
263  float DistortionFn(float r) const
264  {
265  return r * DistortionFnScaleRadiusSquared(r * r);
266  }
267 
268  // DistortionFnInverse computes the inverse of the distortion function on an argument.
269  float DistortionFnInverse(float r) const
270  {
271  assert((r <= 20.0f));
272 
273  float s, d;
274  float delta = r * 0.25f;
275 
276  // Better to start guessing too low & take longer to converge than too high
277  // and hit singularities. Empirically, r * 0.5f is too high in some cases.
278  s = r * 0.25f;
279  d = fabs(r - DistortionFn(s));
280 
281  for (int i = 0; i < 20; i++)
282  {
283  float sUp = s + delta;
284  float sDown = s - delta;
285  float dUp = fabs(r - DistortionFn(sUp));
286  float dDown = fabs(r - DistortionFn(sDown));
287 
288  if (dUp < d)
289  {
290  s = sUp;
291  d = dUp;
292  }
293  else if (dDown < d)
294  {
295  s = sDown;
296  d = dDown;
297  }
298  else
299  {
300  delta *= 0.5f;
301  }
302  }
303 
304  return s;
305  }
306 
307  // Also computes the inverse, but using a polynomial approximation. Warning - it's just an approximation!
308  float DistortionFnInverseApprox(float r) const
309  {
310  float rsq = r * r;
311  float scale = 1.0f;
312  switch (Eqn)
313  {
314  case Distortion_Poly4:
315  // Deprecated
316  assert(false);
317  break;
319  scale = 1.0f / (InvK[0] + rsq * (InvK[1] + rsq * (InvK[2] + rsq * InvK[3])));
320  break;
322  {
323  // A Catmull-Rom spline through the values 1.0, K[1], K[2] ... K[9]
324  // evenly spaced in R^2 from 0.0 to MaxR^2
325  // K[0] controls the slope at radius=0.0, rather than the actual value.
326  const int NumSegments = NumCoefficients;
327  assert(NumSegments <= NumCoefficients);
328  float scaledRsq = (float)(NumSegments - 1) * rsq / (MaxInvR * MaxInvR);
329  scale = EvalCatmullRom10Spline(InvK, scaledRsq);
330  }
331  break;
332  default:
333  assert(false);
334  break;
335  }
336  return r * scale;
337  }
338  // Sets up InvK[].
340  {
341  float maxR = MaxInvR;
342 
343  switch (Eqn)
344  {
345  case Distortion_Poly4:
346  // Deprecated
347  assert(false);
348  break;
350  {
351 
352  float sampleR[4];
353  float sampleRSq[4];
354  float sampleInv[4];
355  float sampleFit[4];
356 
357  // Found heuristically...
358  sampleR[0] = 0.0f;
359  sampleR[1] = maxR * 0.4f;
360  sampleR[2] = maxR * 0.8f;
361  sampleR[3] = maxR * 1.5f;
362  for (int i = 0; i < 4; i++)
363  {
364  sampleRSq[i] = sampleR[i] * sampleR[i];
365  sampleInv[i] = DistortionFnInverse(sampleR[i]);
366  sampleFit[i] = sampleR[i] / sampleInv[i];
367  }
368  sampleFit[0] = 1.0f;
369  FitCubicPolynomial(InvK, sampleRSq, sampleFit);
370 
371 #if 0
372  // Should be a nearly exact match on the chosen points.
373  OVR_ASSERT ( fabs ( DistortionFnInverse ( sampleR[0] ) - DistortionFnInverseApprox ( sampleR[0] ) ) / maxR < 0.0001f );
374  OVR_ASSERT ( fabs ( DistortionFnInverse ( sampleR[1] ) - DistortionFnInverseApprox ( sampleR[1] ) ) / maxR < 0.0001f );
375  OVR_ASSERT ( fabs ( DistortionFnInverse ( sampleR[2] ) - DistortionFnInverseApprox ( sampleR[2] ) ) / maxR < 0.0001f );
376  OVR_ASSERT ( fabs ( DistortionFnInverse ( sampleR[3] ) - DistortionFnInverseApprox ( sampleR[3] ) ) / maxR < 0.0001f );
377  // Should be a decent match on the rest of the range.
378  const int maxCheck = 20;
379  for ( int i = 0; i < maxCheck; i++ )
380  {
381  float checkR = (float)i * maxR / (float)maxCheck;
382  float realInv = DistortionFnInverse ( checkR );
383  float testInv = DistortionFnInverseApprox ( checkR );
384  float error = fabsf ( realInv - testInv ) / maxR;
385  OVR_ASSERT ( error < 0.1f );
386  }
387 #endif
388  }
389  break;
391  {
392 
393  const int NumSegments = NumCoefficients;
394  assert(NumSegments <= NumCoefficients);
395  for (int i = 1; i < NumSegments; i++)
396  {
397  float scaledRsq = (float)i;
398  float rsq = scaledRsq * MaxInvR * MaxInvR / (float)(NumSegments - 1);
399  float r = sqrtf(rsq);
400  float inv = DistortionFnInverse(r);
401  InvK[i] = inv / r;
402  InvK[0] = 1.0f; // TODO: fix this.
403  }
404 
405 #if 0
406  const int maxCheck = 20;
407  for ( int i = 0; i <= maxCheck; i++ )
408  {
409  float checkR = (float)i * MaxInvR / (float)maxCheck;
410  float realInv = DistortionFnInverse ( checkR );
411  float testInv = DistortionFnInverseApprox ( checkR );
412  float error = fabsf ( realInv - testInv ) / MaxR;
413  OVR_ASSERT ( error < 0.01f );
414  }
415 #endif
416  }
417  break;
418 
419  default:
420  break;
421  }
422  }
423 
424  // Sets a bunch of sensible defaults.
426  {
427  for (int i = 0; i < NumCoefficients; i++)
428  {
429  K[i] = 0.0f;
430  InvK[i] = 0.0f;
431  }
433  K[0] = 1.0f;
434  InvK[0] = 1.0f;
435  MaxR = 1.0f;
436  MaxInvR = 1.0f;
437  ChromaticAberration[0] = 0.0f;
438  ChromaticAberration[1] = 0.0f;
439  ChromaticAberration[2] = 0.0f;
440  ChromaticAberration[3] = 0.0f;
442  }
443 
446  float MaxR; // The highest R you're going to query for - the curve is unpredictable beyond it.
447 
449 
450  // Additional per-channel scaling is applied after distortion:
451  // Index [0] - Red channel constant coefficient.
452  // Index [1] - Red channel r^2 coefficient.
453  // Index [2] - Blue channel constant coefficient.
454  // Index [3] - Blue channel r^2 coefficient.
456 
458  float MaxInvR;
459 };
460 
461 //-------------------------------------------------------------------------------------
462 struct ovrSizei
463 {
464  int w, h;
465 };
466 //-------------------------------------------------------------------------------------
467 struct ovrSizef
468 {
469  float w, h;
470 };
471 //-------------------------------------------------------------------------------------
473 {
474  // The start of this structure is intentionally very similar to HMDInfo in OVER_Device.h
475  // However to reduce interdependencies, one does not simply #include the other.
476 
478 
479  // Size of the entire screen
483 
484  // Characteristics of the lenses.
490 
491  // Timing & shutter data. All values in seconds.
492  struct ShutterInfo
493  {
495  float VsyncToNextVsync; // 1/framerate
496  float VsyncToFirstScanline; // for global shutter, vsync->shutter open.
497  float FirstScanlineToLastScanline; // for global shutter, will be zero.
498  float PixelSettleTime; // estimated.
499  float PixelPersistence; // Full persistence = 1/framerate.
501 
502  // These are all set from the user's profile.
503  struct EyeConfig
504  {
505  // Distance from center of eyeball to front plane of lens.
507  // Distance from nose (technically, center of Rift) to the middle of the eye.
509 
512 
514  {
516  ScreenGapSizeInMeters = 0.0f;
517  CenterFromTopInMeters = 0.0f;
518  LensSeparationInMeters = 0.0f;
519  LensDiameterInMeters = 0.0f;
522  Shutter.VsyncToNextVsync = 0.0f;
525  Shutter.PixelSettleTime = 0.0f;
526  Shutter.PixelPersistence = 0.0f;
528  EyeLeft.ReliefInMeters = 0.0f;
531  EyeRight = EyeLeft;
532  }
533 
534  // The "center eye" is the position the HMD tracking returns,
535  // and games will also usually use it for audio, aiming reticles, some line-of-sight tests, etc.
537  {
538  EyeConfig result;
540  result.NoseToPupilInMeters = 0.0f;
541  result.Distortion.SetToIdentity();
542  return result;
543  }
544 };
545 
546 //-------------------------------------------------------------------------------------
547 SLMat4f
548 ovrMatrix4f_OrthoSubProjection(SLMat4f projection, SLVec2f orthoScale, float orthoDistance, float eyeViewAdjustX)
549 {
550 
551  float orthoHorizontalOffset = eyeViewAdjustX / orthoDistance;
552 
553  // Current projection maps real-world vector (x,y,1) to the RT.
554  // We want to find the projection that maps the range [-FovPixels/2,FovPixels/2] to
555  // the physical [-orthoHalfFov,orthoHalfFov]
556  // Note moving the offset from M[0][2]+M[1][2] to M[0][3]+M[1][3] - this means
557  // we don't have to feed in Z=1 all the time.
558  // The horizontal offset math is a little hinky because the destination is
559  // actually [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]
560  // So we need to first map [-FovPixels/2,FovPixels/2] to
561  // [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]:
562  // x1 = x0 * orthoHalfFov/(FovPixels/2) + orthoHorizontalOffset;
563  // = x0 * 2*orthoHalfFov/FovPixels + orthoHorizontalOffset;
564  // But then we need the sam mapping as the existing projection matrix, i.e.
565  // x2 = x1 * Projection.M[0][0] + Projection.M[0][2];
566  // = x0 * (2*orthoHalfFov/FovPixels + orthoHorizontalOffset) * Projection.M[0][0] + Projection.M[0][2];
567  // = x0 * Projection.M[0][0]*2*orthoHalfFov/FovPixels +
568  // orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2];
569  // So in the new projection matrix we need to scale by Projection.M[0][0]*2*orthoHalfFov/FovPixels and
570  // offset by orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2].
571 
572  SLfloat orthoData[16] = {1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f};
573 
574  orthoData[0] = projection.m(0) * orthoScale.x;
575  orthoData[4] = 0.0f;
576  orthoData[8] = 0.0f;
577  orthoData[12] = -projection.m(8) + (orthoHorizontalOffset * projection.m(0));
578 
579  orthoData[1] = 0.0f;
580  orthoData[5] = -projection.m(5) * orthoScale.y; // Note sign flip (text rendering uses Y=down).
581  orthoData[9] = 0.0f;
582  orthoData[13] = -projection.m(9);
583 
584  // mA: Undo effect of sign
585  orthoData[2] = 0.0f;
586  orthoData[1] = 0.0f;
587  // orthoData[2][2] = projection.m[2][2] * projection.m[3][2] * -1.0f; // reverse right-handedness
588  orthoData[10] = 0.0f;
589  orthoData[14] = 0.0f;
590  // projection.m[2][3];
591 
592  // No perspective correction for ortho.
593  orthoData[3] = 0.0f;
594  orthoData[7] = 0.0f;
595  orthoData[11] = 0.0f;
596  orthoData[15] = 1.0f;
597 
598  SLMat4f ortho(orthoData);
599 
600  return ortho;
601 }
602 
603 //-------------------------------------------------------------------------------------
605 {
606  // The raw lens values.
608 
609  // These map from [-1,1] across the eye being rendered into TanEyeAngle space (but still distorted)
612 
613  // Computed from device characteristics, IPD and eye-relief.
614  // (not directly used for rendering, but very useful)
616 };
617 
618 //-------------------------------------------------------------------------------------
619 typedef struct ovrDistortionVertex_
620 {
621  SLVec2f ScreenPosNDC; // [-1,+1],[-1,+1] over the entire framebuffer.
622  float TimeWarpFactor; // Lerp factor between time-warp matrices. Can be encoded in Pos.z.
623  float VignetteFactor; // Vignette fade factor. Can be encoded in Pos.w.
628 
629 //-------------------------------------------------------------------------------------
630 typedef struct ovrDistortionMesh_
631 {
633  unsigned short* pIndexData;
634  unsigned int VertexCount;
635  unsigned int IndexCount;
637 
638 //-------------------------------------------------------------------------------------
640 {
641  // [-1,+1],[-1,+1] over the entire framebuffer.
643 
644  // [0.0-1.0] interpolation value for time warping - see documentation for details.
646 
647  // [0.0-1.0] fade-to-black at the edges to reduce peripheral vision noise.
648  float Shade;
649 
650  // The red, green, and blue vectors in tan(angle) space.
651  // Scale and offset by the values in StereoEyeParams.EyeToSourceUV.Scale
652  // and StereoParams.EyeToSourceUV.Offset to get to real texture UV coords.
656 };
657 
658 //-----------------------------------------------------------------------------------
659 // A set of "reverse-mapping" functions, mapping from real-world and/or texture space back to the framebuffer.
660 
661 SLVec2f
663  const SLVec2f& tanEyeAngle,
664  bool usePolyApprox /*= false*/)
665 {
666  float tanEyeAngleRadius = tanEyeAngle.length();
667  float tanEyeAngleDistortedRadius = distortion.Lens.DistortionFnInverseApprox(tanEyeAngleRadius);
668  if (!usePolyApprox)
669  {
670  tanEyeAngleDistortedRadius = distortion.Lens.DistortionFnInverse(tanEyeAngleRadius);
671  }
672  SLVec2f tanEyeAngleDistorted = tanEyeAngle;
673  if (tanEyeAngleRadius > 0.0f)
674  {
675  tanEyeAngleDistorted = tanEyeAngle * (tanEyeAngleDistortedRadius / tanEyeAngleRadius);
676  }
677 
678  SLVec2f framebufferNDC;
679  framebufferNDC.x = (tanEyeAngleDistorted.x / distortion.TanEyeAngleScale.x) + distortion.LensCenter.x;
680  framebufferNDC.y = (tanEyeAngleDistorted.y / distortion.TanEyeAngleScale.y) + distortion.LensCenter.y;
681 
682  return framebufferNDC;
683 }
684 
685 //-------------------------------------------------------------------------------------
686 // Same, with chromatic aberration correction.
687 void TransformScreenNDCToTanFovSpaceChroma(SLVec2f* resultR, SLVec2f* resultG, SLVec2f* resultB, DistortionRenderDesc const& distortion, const SLVec2f& framebufferNDC)
688 {
689  // Scale to TanHalfFov space, but still distorted.
690  SLVec2f tanEyeAngleDistorted;
691  tanEyeAngleDistorted.x = (framebufferNDC.x - distortion.LensCenter.x) * distortion.TanEyeAngleScale.x;
692  tanEyeAngleDistorted.y = (framebufferNDC.y - distortion.LensCenter.y) * distortion.TanEyeAngleScale.y;
693  // Distort.
694  float radiusSquared = (tanEyeAngleDistorted.x * tanEyeAngleDistorted.x) + (tanEyeAngleDistorted.y * tanEyeAngleDistorted.y);
695  SLVec3f distortionScales = distortion.Lens.DistortionFnScaleRadiusSquaredChroma(radiusSquared);
696  *resultR = tanEyeAngleDistorted * distortionScales.x;
697  *resultG = tanEyeAngleDistorted * distortionScales.y;
698  *resultB = tanEyeAngleDistorted * distortionScales.z;
699 }
700 
701 //-------------------------------------------------------------------------------------
702 typedef struct ovrFovPort_
703 {
704  /// The tangent of the angle between the viewing vector and the top edge of the field of view.
705  float UpTan;
706  /// The tangent of the angle between the viewing vector and the bottom edge of the field of view.
707  float DownTan;
708  /// The tangent of the angle between the viewing vector and the left edge of the field of view.
709  float LeftTan;
710  /// The tangent of the angle between the viewing vector and the right edge of the field of view.
711  float RightTan;
713 
714 //-------------------------------------------------------------------------------------
716 {
719 
720  ScaleAndOffset2D(float sx = 0.0f, float sy = 0.0f, float ox = 0.0f, float oy = 0.0f)
721  : Scale(sx, sy), Offset(ox, oy)
722  {
723  }
724 };
725 
726 //-------------------------------------------------------------------------------------
727 SLVec2f
729  SLVec2f const& tanEyeAngle)
730 {
731  SLVec2f textureNDC;
732  textureNDC.x = tanEyeAngle.x * eyeToSourceNDC.Scale.x + eyeToSourceNDC.Offset.x;
733  textureNDC.y = tanEyeAngle.y * eyeToSourceNDC.Scale.y + eyeToSourceNDC.Offset.y;
734  return textureNDC;
735 }
736 
737 //-------------------------------------------------------------------------------------
738 SLVec2f
740  const SLVec2f& textureNDC)
741 {
742  SLVec2f tanEyeAngle;
743  tanEyeAngle.x = (textureNDC.x - eyeToSourceNDC.Offset.x) / eyeToSourceNDC.Scale.x;
744  tanEyeAngle.y = (textureNDC.y - eyeToSourceNDC.Offset.y) / eyeToSourceNDC.Scale.y;
745  return tanEyeAngle;
746 }
747 
748 //-------------------------------------------------------------------------------------
751 {
752  float projXScale = 2.0f / (tanHalfFov.LeftTan + tanHalfFov.RightTan);
753  float projXOffset = (tanHalfFov.LeftTan - tanHalfFov.RightTan) * projXScale * 0.5f;
754  float projYScale = 2.0f / (tanHalfFov.UpTan + tanHalfFov.DownTan);
755  float projYOffset = (tanHalfFov.UpTan - tanHalfFov.DownTan) * projYScale * 0.5f;
756 
757  ScaleAndOffset2D result;
758  result.Scale = SLVec2f(projXScale, projYScale);
759  result.Offset = SLVec2f(projXOffset, projYOffset);
760  // Hey - why is that Y.Offset negated?
761  // It's because a projection matrix transforms from world coords with Y=up,
762  // whereas this is from NDC which is Y=down.
763 
764  return result;
765 }
766 
767 //-------------------------------------------------------------------------------------
768 SLMat4f
769 CreateProjection(bool rightHanded, ovrFovPort tanHalfFov, float zNear /*= 0.01f*/, float zFar /*= 10000.0f*/)
770 {
771  // A projection matrix is very like a scaling from NDC, so we can start with that.
772  ScaleAndOffset2D scaleAndOffset = CreateNDCScaleAndOffsetFromFov(tanHalfFov);
773 
774  float handednessScale = 1.0f;
775  if (rightHanded)
776  {
777  handednessScale = -1.0f;
778  }
779 
780  float proj[4][4] = {{1, 0, 0, 0},
781  {0, 1, 0, 0},
782  {0, 0, 1, 0},
783  {0, 0, 0, 1}};
784 
785  // Produces X result, mapping clip edges to [-w,+w]
786  proj[0][0] = scaleAndOffset.Scale.x;
787  proj[0][1] = 0.0f;
788  proj[0][2] = handednessScale * scaleAndOffset.Offset.x;
789  proj[0][3] = 0.0f;
790 
791  // Produces Y result, mapping clip edges to [-w,+w]
792  // Hey - why is that YOffset negated?
793  // It's because a projection matrix transforms from world coords with Y=up,
794  // whereas this is derived from an NDC scaling, which is Y=down.
795  proj[1][0] = 0.0f;
796  proj[1][1] = scaleAndOffset.Scale.y;
797  proj[1][2] = handednessScale * -scaleAndOffset.Offset.y;
798  proj[1][3] = 0.0f;
799 
800  // Produces Z-buffer result - app needs to fill this in with whatever Z range it wants.
801  // We'll just use some defaults for now.
802  proj[2][0] = 0.0f;
803  proj[2][1] = 0.0f;
804  proj[2][2] = -handednessScale * zFar / (zNear - zFar);
805  proj[2][3] = (zFar * zNear) / (zNear - zFar);
806 
807  // Produces W result (= Z in)
808  proj[3][0] = 0.0f;
809  proj[3][1] = 0.0f;
810  proj[3][2] = handednessScale;
811  proj[3][3] = 0.0f;
812 
813  SLMat4f projection((SLfloat*)proj);
814  projection.transpose();
815 
816  return projection;
817 }
818 
819 //-------------------------------------------------------------------------------------
821  uint16_t** ppTriangleListIndices,
822  SLuint* pNumVertices,
823  SLuint* pNumTriangles,
824  bool rightEye,
825  const HmdRenderInfo& hmdRenderInfo,
826  const DistortionRenderDesc& distortion,
827  const ScaleAndOffset2D& eyeToSourceNDC)
828 {
829  static const int DMA_GridSizeLog2 = 6;
830  static const int DMA_GridSize = 1 << DMA_GridSizeLog2;
831  static const int DMA_NumVertsPerEye = (DMA_GridSize + 1) * (DMA_GridSize + 1);
832  static const int DMA_NumTrisPerEye = (DMA_GridSize) * (DMA_GridSize)*2;
833 
834  // When does the fade-to-black edge start? Chosen heuristically.
835  const float fadeOutBorderFraction = 0.075f;
836 
837  // Populate vertex buffer info
838  float xOffset = 0.0f;
839 
840  if (rightEye)
841  {
842  xOffset = 1.0f;
843  }
844  *pNumVertices = DMA_NumVertsPerEye;
845  *pNumTriangles = DMA_NumTrisPerEye;
846 
847  *ppVertices = (DistortionMeshVertexData*)(new DistortionMeshVertexData[sizeof(DistortionMeshVertexData) * (*pNumVertices)]);
848  *ppTriangleListIndices = (uint16_t*)(new uint16_t[sizeof(uint16_t) * (*pNumTriangles) * 3]);
849 
850  // First pass - build up raw vertex data.
851  DistortionMeshVertexData* pcurVert = *ppVertices;
852 
853  for (int y = 0; y <= DMA_GridSize; y++)
854  {
855  for (int x = 0; x <= DMA_GridSize; x++)
856  {
857 
858  SLVec2f sourceCoordNDC;
859  // NDC texture coords [-1,+1]
860  sourceCoordNDC.x = 2.0f * ((float)x / (float)DMA_GridSize) - 1.0f;
861  sourceCoordNDC.y = 2.0f * ((float)y / (float)DMA_GridSize) - 1.0f;
862  SLVec2f tanEyeAngle = TransformRendertargetNDCToTanFovSpace(eyeToSourceNDC, sourceCoordNDC);
863 
864  // Find a corresponding screen position.
865  // Note - this function does not have to be precise - we're just trying to match the mesh tessellation
866  // with the shape of the distortion to minimize the number of triangles needed.
867  SLVec2f screenNDC = TransformTanFovSpaceToScreenNDC(distortion, tanEyeAngle, false);
868  // ...but don't let verts overlap to the other eye.
869  screenNDC.x = std::max(-1.0f, std::min(screenNDC.x, 1.0f));
870  screenNDC.y = std::max(-1.0f, std::min(screenNDC.y, 1.0f));
871 
872  // From those screen positions, we then need (effectively) RGB UVs.
873  // This is the function that actually matters when doing the distortion calculation.
874  SLVec2f tanEyeAnglesR, tanEyeAnglesG, tanEyeAnglesB;
875  TransformScreenNDCToTanFovSpaceChroma(&tanEyeAnglesR, &tanEyeAnglesG, &tanEyeAnglesB, distortion, screenNDC);
876 
877  pcurVert->TanEyeAnglesR = tanEyeAnglesR;
878  pcurVert->TanEyeAnglesG = tanEyeAnglesG;
879  pcurVert->TanEyeAnglesB = tanEyeAnglesB;
880 
881  HmdShutterTypeEnum shutterType = hmdRenderInfo.Shutter.Type;
882  switch (shutterType)
883  {
884  case HmdShutteRT_global:
885  pcurVert->TimewarpLerp = 0.0f;
886  break;
888  // Retrace is left to right - left eye goes 0.0 -> 0.5, then right goes 0.5 -> 1.0
889  pcurVert->TimewarpLerp = screenNDC.x * 0.25f + 0.25f;
890  if (rightEye)
891  {
892  pcurVert->TimewarpLerp += 0.5f;
893  }
894  break;
896  // Retrace is right to left - right eye goes 0.0 -> 0.5, then left goes 0.5 -> 1.0
897  pcurVert->TimewarpLerp = 0.75f - screenNDC.x * 0.25f;
898  if (rightEye)
899  {
900  pcurVert->TimewarpLerp -= 0.5f;
901  }
902  break;
904  // Retrace is top to bottom on both eyes at the same time.
905  pcurVert->TimewarpLerp = screenNDC.y * 0.5f + 0.5f;
906  break;
907  default: assert(false); break;
908  }
909 
910  // Fade out at texture edges.
911  // The furthest out will be the blue channel, because of chromatic aberration (true of any standard lens)
912  SLVec2f sourceTexCoordBlueNDC = TransformTanFovSpaceToRendertargetNDC(eyeToSourceNDC, tanEyeAnglesB);
913  float edgeFadeIn = (1.0f / fadeOutBorderFraction) *
914  (1.0f - std::max(Utils::abs(sourceTexCoordBlueNDC.x), Utils::abs(sourceTexCoordBlueNDC.y)));
915  // Also fade out at screen edges.
916  float edgeFadeInScreen = (2.0f / fadeOutBorderFraction) *
917  (1.0f - std::max(Utils::abs(screenNDC.x), Utils::abs(screenNDC.y)));
918  edgeFadeIn = std::min(edgeFadeInScreen, edgeFadeIn);
919 
920  pcurVert->Shade = std::max(0.0f, std::min(edgeFadeIn, 1.0f));
921  pcurVert->ScreenPosNDC.x = 0.5f * screenNDC.x - 0.5f + xOffset;
922  pcurVert->ScreenPosNDC.y = -screenNDC.y;
923 
924  pcurVert++;
925  }
926  }
927 
928  // Populate index buffer info
929  uint16_t* pcurIndex = *ppTriangleListIndices;
930 
931  for (int triNum = 0; triNum < DMA_GridSize * DMA_GridSize; triNum++)
932  {
933  // Use a Morton order to help locality of FB, texture and vertex cache.
934  // (0.325ms raster order -> 0.257ms Morton order)
935  assert(DMA_GridSize <= 256);
936  int x = ((triNum & 0x0001) >> 0) |
937  ((triNum & 0x0004) >> 1) |
938  ((triNum & 0x0010) >> 2) |
939  ((triNum & 0x0040) >> 3) |
940  ((triNum & 0x0100) >> 4) |
941  ((triNum & 0x0400) >> 5) |
942  ((triNum & 0x1000) >> 6) |
943  ((triNum & 0x4000) >> 7);
944  int y = ((triNum & 0x0002) >> 1) |
945  ((triNum & 0x0008) >> 2) |
946  ((triNum & 0x0020) >> 3) |
947  ((triNum & 0x0080) >> 4) |
948  ((triNum & 0x0200) >> 5) |
949  ((triNum & 0x0800) >> 6) |
950  ((triNum & 0x2000) >> 7) |
951  ((triNum & 0x8000) >> 8);
952  int FirstVertex = x * (DMA_GridSize + 1) + y;
953  // Another twist - we want the top-left and bottom-right quadrants to
954  // have the triangles split one way, the other two split the other.
955  // +---+---+---+---+
956  // | /| /|\ |\ |
957  // | / | / | \ | \ |
958  // |/ |/ | \| \|
959  // +---+---+---+---+
960  // | /| /|\ |\ |
961  // | / | / | \ | \ |
962  // |/ |/ | \| \|
963  // +---+---+---+---+
964  // |\ |\ | /| /|
965  // | \ | \ | / | / |
966  // | \| \|/ |/ |
967  // +---+---+---+---+
968  // |\ |\ | /| /|
969  // | \ | \ | / | / |
970  // | \| \|/ |/ |
971  // +---+---+---+---+
972  // This way triangle edges don't span long distances over the distortion function,
973  // so linear interpolation works better & we can use fewer tris.
974  if ((x < DMA_GridSize / 2) != (y < DMA_GridSize / 2)) // != is logical XOR
975  {
976  *pcurIndex++ = (uint16_t)FirstVertex;
977  *pcurIndex++ = (uint16_t)FirstVertex + 1;
978  *pcurIndex++ = (uint16_t)FirstVertex + (DMA_GridSize + 1) + 1;
979 
980  *pcurIndex++ = (uint16_t)FirstVertex + (DMA_GridSize + 1) + 1;
981  *pcurIndex++ = (uint16_t)FirstVertex + (DMA_GridSize + 1);
982  *pcurIndex++ = (uint16_t)FirstVertex;
983  }
984  else
985  {
986  *pcurIndex++ = (uint16_t)FirstVertex;
987  *pcurIndex++ = (uint16_t)FirstVertex + 1;
988  *pcurIndex++ = (uint16_t)FirstVertex + (DMA_GridSize + 1);
989 
990  *pcurIndex++ = (uint16_t)FirstVertex + 1;
991  *pcurIndex++ = (uint16_t)FirstVertex + (DMA_GridSize + 1) + 1;
992  *pcurIndex++ = (uint16_t)FirstVertex + (DMA_GridSize + 1);
993  }
994  }
995 }
996 
997 //-------------------------------------------------------------------------------------
998 void createSLDistortionMesh(SLGLProgram* stereoOculusDistProgram, SLEyeType eye, SLGLVertexArray& vao)
999 {
1000  // fill the variables below with useful data from dk2
1001  HmdRenderInfo hmdri;
1002  hmdri.HmdType = HmdType_DK2;
1003  hmdri.ResolutionInPixels.w = 1920;
1004  hmdri.ResolutionInPixels.h = 1080;
1005  hmdri.ScreenSizeInMeters.w = 0.125760004f;
1006  hmdri.ScreenSizeInMeters.h = 0.0707399994f;
1007  hmdri.ScreenGapSizeInMeters = 0.0f;
1008  hmdri.CenterFromTopInMeters = 0.0353f;
1009  hmdri.LensSeparationInMeters = 0.0635f;
1010  hmdri.LensDiameterInMeters = 0.0399f;
1011  hmdri.LensSurfaceToMidplateInMeters = 0.01964f;
1012  hmdri.EyeCups = EyeCup_DK2A;
1014  hmdri.Shutter.VsyncToNextVsync = 0.013157f;
1015  hmdri.Shutter.VsyncToFirstScanline = 2.73000005e-005f;
1016  hmdri.Shutter.FirstScanlineToLastScanline = 0.0131f;
1017  hmdri.Shutter.PixelSettleTime = 0.0f;
1018  hmdri.Shutter.PixelPersistence = 0.0023f;
1019 
1020  hmdri.EyeLeft.ReliefInMeters = 0.0109f;
1021  hmdri.EyeLeft.NoseToPupilInMeters = 0.032f;
1023  hmdri.EyeLeft.Distortion.K[0] = 1.00300002f;
1024  hmdri.EyeLeft.Distortion.K[1] = 1.01999998f;
1025  hmdri.EyeLeft.Distortion.K[2] = 1.04200006f;
1026  hmdri.EyeLeft.Distortion.K[3] = 1.06599998f;
1027  hmdri.EyeLeft.Distortion.K[4] = 1.09399998f;
1028  hmdri.EyeLeft.Distortion.K[5] = 1.12600005f;
1029  hmdri.EyeLeft.Distortion.K[6] = 1.16199994f;
1030  hmdri.EyeLeft.Distortion.K[7] = 1.20299995f;
1031  hmdri.EyeLeft.Distortion.K[8] = 1.25000000f;
1032  hmdri.EyeLeft.Distortion.K[9] = 1.30999994f;
1033  hmdri.EyeLeft.Distortion.K[10] = 1.38000000f;
1034  hmdri.EyeLeft.Distortion.MaxR = 1.00000000f;
1035  hmdri.EyeLeft.Distortion.MetersPerTanAngleAtCenter = 0.0359999985f;
1036  hmdri.EyeLeft.Distortion.ChromaticAberration[0] = -0.0123399980f;
1037  hmdri.EyeLeft.Distortion.ChromaticAberration[1] = -0.0164999980f;
1038  hmdri.EyeLeft.Distortion.ChromaticAberration[2] = 0.0205899980f;
1039  hmdri.EyeLeft.Distortion.ChromaticAberration[3] = 0.0164999980f;
1040  hmdri.EyeLeft.Distortion.InvK[0] = 1.0f;
1041  hmdri.EyeLeft.Distortion.InvK[1] = 0.964599669f;
1042  hmdri.EyeLeft.Distortion.InvK[2] = 0.931152463f;
1043  hmdri.EyeLeft.Distortion.InvK[3] = 0.898376584f;
1044  hmdri.EyeLeft.Distortion.InvK[4] = 0.867980957f;
1045  hmdri.EyeLeft.Distortion.InvK[5] = 0.839782715f;
1046  hmdri.EyeLeft.Distortion.InvK[6] = 0.813964784f;
1047  hmdri.EyeLeft.Distortion.InvK[7] = 0.789245605f;
1048  hmdri.EyeLeft.Distortion.InvK[8] = 0.765808105f;
1049  hmdri.EyeLeft.Distortion.InvK[9] = 0.745178223f;
1050  hmdri.EyeLeft.Distortion.InvK[10] = 0.724639833f;
1051  hmdri.EyeLeft.Distortion.MaxInvR = 1.38000000f;
1052 
1053  hmdri.EyeRight.ReliefInMeters = 0.0109f;
1054  hmdri.EyeRight.NoseToPupilInMeters = 0.032f;
1056  hmdri.EyeRight.Distortion.K[0] = 1.00300002f;
1057  hmdri.EyeRight.Distortion.K[1] = 1.01999998f;
1058  hmdri.EyeRight.Distortion.K[2] = 1.04200006f;
1059  hmdri.EyeRight.Distortion.K[3] = 1.06599998f;
1060  hmdri.EyeRight.Distortion.K[4] = 1.09399998f;
1061  hmdri.EyeRight.Distortion.K[5] = 1.12600005f;
1062  hmdri.EyeRight.Distortion.K[6] = 1.16199994f;
1063  hmdri.EyeRight.Distortion.K[7] = 1.20299995f;
1064  hmdri.EyeRight.Distortion.K[8] = 1.25000000f;
1065  hmdri.EyeRight.Distortion.K[9] = 1.30999994f;
1066  hmdri.EyeRight.Distortion.K[10] = 1.38000000f;
1067  hmdri.EyeRight.Distortion.MaxR = 1.00000000f;
1068  hmdri.EyeRight.Distortion.MetersPerTanAngleAtCenter = 0.0359999985f;
1069  hmdri.EyeRight.Distortion.ChromaticAberration[0] = -0.0123399980f;
1070  hmdri.EyeRight.Distortion.ChromaticAberration[1] = -0.0164999980f;
1071  hmdri.EyeRight.Distortion.ChromaticAberration[2] = 0.0205899980f;
1072  hmdri.EyeRight.Distortion.ChromaticAberration[3] = 0.0164999980f;
1073  hmdri.EyeRight.Distortion.InvK[0] = 1.0f;
1074  hmdri.EyeRight.Distortion.InvK[1] = 0.964599669f;
1075  hmdri.EyeRight.Distortion.InvK[2] = 0.931152463f;
1076  hmdri.EyeRight.Distortion.InvK[3] = 0.898376584f;
1077  hmdri.EyeRight.Distortion.InvK[4] = 0.867980957f;
1078  hmdri.EyeRight.Distortion.InvK[5] = 0.839782715f;
1079  hmdri.EyeRight.Distortion.InvK[6] = 0.813964784f;
1080  hmdri.EyeRight.Distortion.InvK[7] = 0.789245605f;
1081  hmdri.EyeRight.Distortion.InvK[8] = 0.765808105f;
1082  hmdri.EyeRight.Distortion.InvK[9] = 0.745178223f;
1083  hmdri.EyeRight.Distortion.InvK[10] = 0.724639833f;
1084  hmdri.EyeRight.Distortion.MaxInvR = 1.38000000f;
1085 
1086  DistortionRenderDesc distortion;
1087  distortion.Lens.Eqn = Distortion_CatmullRom10;
1088  distortion.Lens.K[0] = 1.00300002f;
1089  distortion.Lens.K[1] = 1.01999998f;
1090  distortion.Lens.K[2] = 1.04200006f;
1091  distortion.Lens.K[3] = 1.06599998f;
1092  distortion.Lens.K[4] = 1.09399998f;
1093  distortion.Lens.K[5] = 1.12600005f;
1094  distortion.Lens.K[6] = 1.16199994f;
1095  distortion.Lens.K[7] = 1.20299995f;
1096  distortion.Lens.K[8] = 1.25000000f;
1097  distortion.Lens.K[9] = 1.30999994f;
1098  distortion.Lens.K[10] = 1.38000000f;
1099  distortion.Lens.MaxR = 1.00000000f;
1100  distortion.Lens.MetersPerTanAngleAtCenter = 0.0359999985f;
1101  distortion.Lens.ChromaticAberration[0] = -0.0123399980f;
1102  distortion.Lens.ChromaticAberration[1] = -0.0164999980f;
1103  distortion.Lens.ChromaticAberration[2] = 0.0205899980f;
1104  distortion.Lens.ChromaticAberration[3] = 0.0164999980f;
1105  distortion.Lens.InvK[0] = 1.0f;
1106  distortion.Lens.InvK[1] = 0.964599669f;
1107  distortion.Lens.InvK[2] = 0.931152463f;
1108  distortion.Lens.InvK[3] = 0.898376584f;
1109  distortion.Lens.InvK[4] = 0.867980957f;
1110  distortion.Lens.InvK[5] = 0.839782715f;
1111  distortion.Lens.InvK[6] = 0.813964784f;
1112  distortion.Lens.InvK[7] = 0.789245605f;
1113  distortion.Lens.InvK[8] = 0.765808105f;
1114  distortion.Lens.InvK[9] = 0.745178223f;
1115  distortion.Lens.InvK[10] = 0.724639833f;
1116  distortion.Lens.MaxInvR = 1.38000000f;
1117 
1118  distortion.LensCenter.x = -0.00986003876f;
1119  distortion.LensCenter.y = 0.000000000f;
1120  distortion.TanEyeAngleScale.x = 0.873333395f;
1121  distortion.TanEyeAngleScale.y = 0.982500017f;
1122  distortion.PixelsPerTanAngleAtCenter.x = 549.618286f;
1123  distortion.PixelsPerTanAngleAtCenter.y = 549.618286f;
1124 
1125  ovrFovPort fov;
1126  fov.DownTan = 1.329f;
1127  fov.UpTan = 1.329f;
1128  fov.LeftTan = 1.058f;
1129  fov.RightTan = 1.092f;
1130 
1131  ovrDistortionVertex* vertexData;
1132  SLushort* indexData;
1133  SLuint triangleCount = 0;
1134  SLuint vertexCount = 0;
1135 #ifdef SL_GUI_JAVA
1136  bool rightEye = (eye == rightEye);
1137 #else
1138  bool rightEye = (eye == SLEyeType::ET_right);
1139 #endif
1140  ScaleAndOffset2D eyeToSourceNDC = CreateNDCScaleAndOffsetFromFov(fov);
1141  eyeToSourceNDC.Scale.x = 0.929788947f;
1142  eyeToSourceNDC.Scale.y = 0.752283394f;
1143  eyeToSourceNDC.Offset.x = -0.0156717598f;
1144  eyeToSourceNDC.Offset.y = 0.0f;
1145  if (rightEye)
1146  {
1147  eyeToSourceNDC.Offset.x *= -1;
1148  distortion.LensCenter.x *= -1;
1149  }
1150 
1152  (uint16_t**)&indexData,
1153  &vertexCount,
1154  &triangleCount,
1155  rightEye,
1156  hmdri,
1157  distortion,
1158  eyeToSourceNDC);
1159 
1160  SLuint indexCount = triangleCount * 3;
1161 
1162  // Now parse the vertex data and create a render ready vertex buffer from it
1163  SLVVertexOculus verts;
1164  verts.resize(vertexCount);
1165 
1166  SLVuint tempIndex;
1167 
1168  ovrDistortionVertex* ov = vertexData;
1169  for (SLuint vertNum = 0; vertNum < vertexCount; vertNum++)
1170  {
1171  verts[vertNum].screenPosNDC.x = ov->ScreenPosNDC.x;
1172  verts[vertNum].screenPosNDC.y = ov->ScreenPosNDC.y;
1173  verts[vertNum].timeWarpFactor = ov->TimeWarpFactor;
1174  verts[vertNum].vignetteFactor = ov->VignetteFactor;
1175  verts[vertNum].tanEyeAnglesR.x = ov->TanEyeAnglesR.x;
1176  verts[vertNum].tanEyeAnglesR.y = ov->TanEyeAnglesR.y;
1177  verts[vertNum].tanEyeAnglesG.x = ov->TanEyeAnglesG.x;
1178  verts[vertNum].tanEyeAnglesG.y = ov->TanEyeAnglesG.y;
1179  verts[vertNum].tanEyeAnglesB.x = ov->TanEyeAnglesB.x;
1180  verts[vertNum].tanEyeAnglesB.y = ov->TanEyeAnglesB.y;
1181  ov++;
1182  }
1183 
1184  for (SLuint i = 0; i < indexCount; i++)
1185  tempIndex.push_back(indexData[i]);
1186 
1187  SLGLProgram* sp = stereoOculusDistProgram;
1188  sp->useProgram();
1189 
1190  // set attributes with all the same data pointer to the interleaved array
1191  vao.setAttrib(AT_position, 2, AT_position, &verts[0]);
1192  vao.setAttrib(AT_custom1, 1, 1, &verts[0]);
1193  vao.setAttrib(AT_custom2, 1, 2, &verts[0]);
1194  vao.setAttrib(AT_custom3, 2, 3, &verts[0]);
1195  vao.setAttrib(AT_custom4, 2, 4, &verts[0]);
1196  vao.setAttrib(AT_custom5, 2, 5, &verts[0]);
1197  vao.setIndices(indexCount, BT_uint, &tempIndex[0]);
1198  vao.generate(vertexCount);
1199 
1200  // dispose temp. arrays
1201  delete[] vertexData;
1202  delete[] indexData;
1203 }
1204 
1205 //-------------------------------------------------------------------------------------
1206 #endif
float SLfloat
Definition: SL.h:173
unsigned int SLuint
Definition: SL.h:171
unsigned short SLushort
Definition: SL.h:169
vector< SLuint > SLVuint
Definition: SL.h:197
SLEyeType
Enumeration for stereo eye type used for camera projection.
Definition: SLEnums.h:152
@ ET_right
Definition: SLEnums.h:155
@ AT_custom5
Custom vertex attribute 5.
Definition: SLGLEnums.h:72
@ AT_custom1
Custom vertex attribute 1.
Definition: SLGLEnums.h:68
@ AT_position
Vertex position as a 2, 3 or 4 component vectors.
Definition: SLGLEnums.h:58
@ AT_custom3
Custom vertex attribute 3.
Definition: SLGLEnums.h:70
@ AT_custom2
Custom vertex attribute 2.
Definition: SLGLEnums.h:69
@ AT_custom4
Custom vertex attribute 4.
Definition: SLGLEnums.h:71
@ BT_uint
vertex index type (0-2^32)
Definition: SLGLEnums.h:25
SLVec2f TransformTanFovSpaceToRendertargetNDC(ScaleAndOffset2D const &eyeToSourceNDC, SLVec2f const &tanEyeAngle)
@ NumCoefficients
ScaleAndOffset2D CreateNDCScaleAndOffsetFromFov(ovrFovPort tanHalfFov)
void TransformScreenNDCToTanFovSpaceChroma(SLVec2f *resultR, SLVec2f *resultG, SLVec2f *resultB, DistortionRenderDesc const &distortion, const SLVec2f &framebufferNDC)
EyeCupType
@ EyeCup_DKHD2A
@ EyeCup_OrangeA
@ EyeCup_DK1A
@ EyeCup_SunMandalaA
@ EyeCup_DK2A
@ EyeCup_DK1B
@ EyeCup_LAST
@ EyeCup_PinkA
@ EyeCup_JamesA
@ EyeCup_Delilah2A
@ EyeCup_Delilah1A
@ EyeCup_BlueA
@ EyeCup_RedA
@ EyeCup_DK1C
void createSLDistortionMesh(DistortionMeshVertexData **ppVertices, uint16_t **ppTriangleListIndices, SLuint *pNumVertices, SLuint *pNumTriangles, bool rightEye, const HmdRenderInfo &hmdRenderInfo, const DistortionRenderDesc &distortion, const ScaleAndOffset2D &eyeToSourceNDC)
SLVec2f TransformTanFovSpaceToScreenNDC(DistortionRenderDesc const &distortion, const SLVec2f &tanEyeAngle, bool usePolyApprox)
struct ovrFovPort_ ovrFovPort
SLVec2f TransformRendertargetNDCToTanFovSpace(const ScaleAndOffset2D &eyeToSourceNDC, const SLVec2f &textureNDC)
HmdTypeEnum
@ HmdType_DKProto
@ HmdType_DK1
@ HmdType_DKHDProto
@ HmdType_DK2
@ HmdType_Unknown
@ HmdType_None
@ HmdType_DKHD2Proto
@ HmdType_LAST
@ HmdType_CrystalCoveProto
@ HmdType_DKHDProto566Mi
DistortionEqnType
@ Distortion_RecipPoly4
@ Distortion_LAST
@ Distortion_CatmullRom10
@ Distortion_Poly4
@ Distortion_No_Override
HmdShutterTypeEnum
@ HmdShutter_LAST
@ HmdShutter_RollingLeftToRight
@ HmdShutter_RollingTopToBottom
@ HmdShutteRT_global
@ HmdShutter_RollingRightToLeft
float EvalCatmullRom10Spline(float const *K, float scaledVal)
SLMat4f ovrMatrix4f_OrthoSubProjection(SLMat4f projection, SLVec2f orthoScale, float orthoDistance, float eyeViewAdjustX)
struct ovrDistortionVertex_ ovrDistortionVertex
SLMat4f CreateProjection(bool rightHanded, ovrFovPort tanHalfFov, float zNear, float zFar)
struct ovrDistortionMesh_ ovrDistortionMesh
bool FitCubicPolynomial(float *pResult, const float *pFitX, const float *pFitY)
vector< SLVertexOculus > SLVVertexOculus
Definition: SLGLOculus.h:28
Wrapper class around OpenGL Vertex Array Objects (VAO)
SLScene * s
Definition: SLScene.h:31
SLVec2< SLfloat > SLVec2f
Definition: SLVec2.h:141
Encapsulation of an OpenGL shader program object.
Definition: SLGLProgram.h:56
void useProgram()
SLGLVertexArray encapsulates the core OpenGL drawing.
void setAttrib(SLGLAttributeType type, SLint elementSize, SLint location, void *dataPointer, SLGLBufferType dataType=BT_float)
Adds a vertex attribute with data pointer and an element size.
void setIndices(SLuint numIndicesElements, SLGLBufferType indexDataType, void *indexDataElements, SLuint numIndicesEdges=0, void *indexDataEdges=nullptr)
Adds the index array for indexed element drawing.
void generate(SLuint numVertices, SLGLBufferUsage usage=BU_static, SLbool outputInterleaved=true, SLuint divisor=0)
Generates the VA & VB objects for a NO. of vertices.
void transpose()
Sets the transposed matrix by swaping around the main diagonal.
Definition: SLMat4.h:1341
void m(int i, T val)
Definition: SLMat4.h:93
The SLScene class represents the top level instance holding the scene structure.
Definition: SLScene.h:47
T y
Definition: SLVec2.h:30
T x
Definition: SLVec2.h:30
T length() const
Definition: SLVec2.h:87
T y
Definition: SLVec3.h:43
T x
Definition: SLVec3.h:43
T z
Definition: SLVec3.h:43
T abs(T a)
Definition: Utils.h:249
ovrSizei ResolutionInPixels
struct HmdRenderInfo::ShutterInfo Shutter
EyeConfig GetEyeCenter() const
HmdTypeEnum HmdType
float LensSurfaceToMidplateInMeters
struct HmdRenderInfo::EyeConfig EyeLeft
float LensSeparationInMeters
struct HmdRenderInfo::EyeConfig EyeRight
ovrSizef ScreenSizeInMeters
EyeCupType EyeCups
void SetUpInverseApprox()
SLVec3f DistortionFnScaleRadiusSquaredChroma(float rsq) const
float DistortionFn(float r) const
float K[NumCoefficients]
DistortionEqnType Eqn
void SetToIdentity()
float MetersPerTanAngleAtCenter
float DistortionFnScaleRadiusSquared(float rsq) const
float ChromaticAberration[4]
float InvK[NumCoefficients]
float DistortionFnInverse(float r) const
float DistortionFnInverseApprox(float r) const
ScaleAndOffset2D(float sx=0.0f, float sy=0.0f, float ox=0.0f, float oy=0.0f)
ovrDistortionVertex * pVertexData
unsigned int VertexCount
unsigned short * pIndexData
float DownTan
The tangent of the angle between the viewing vector and the bottom edge of the field of view.
float UpTan
The tangent of the angle between the viewing vector and the top edge of the field of view.
float RightTan
The tangent of the angle between the viewing vector and the right edge of the field of view.
float LeftTan
The tangent of the angle between the viewing vector and the left edge of the field of view.