Commit fce6c509 authored by Martin Schultz's avatar Martin Schultz

Merge branch 'master' of...

Merge branch 'master' of ssh://Renault.informatik.rwth-aachen.de/data/git-repository/acgl/libraries/acgl
parents b8f40245 1fab2513
#pragma once
/**
* IMPORTANT: DON'T EXPECT THIS CLASS TO HAVE A FINAL AND STABLE API!
*
* This class needs the LibOVR version 0.3.2 or higher to work.
* Headers of this lib need to be placed in the search path.
*
* In addition ACGL_USE_OCULUS_RIFT has to be defined.
*
*
*/
#ifdef ACGL_USE_OCULUS_RIFT
#include <ACGL/ACGL.hh>
#include <glm/glm.hpp>
#include <OVR_CAPI.h>
#if ACGL_RIFT_SDK_VERSION >= 32
namespace ACGL{
namespace HardwareSupport{
bool initRiftSDK();
void shutdownRiftSDK();
// create a standard Rift, can be replaced with own, more specialized code:
ovrHmd createRift(bool _headTrackingIsRequired = false, bool _headTranslationTrackingIsAllowed = true);
void destroyRift(ovrHmd _hmd);
// SDK gives different sizes per eye, return the max to make things easier:
glm::uvec2 getOptimalRenderSizePerEye(ovrHmd _hmd);
}
}
#endif // RIFT_VERSION
#endif // ACGL_USE_OCULUS_RIFT
......@@ -13,6 +13,7 @@
#include <ACGL/ACGL.hh>
#ifdef ACGL_USE_OCULUS_RIFT
#if ACGL_RIFT_SDK_VERSION < 32
#include <ACGL/Math/Math.hh>
#include <glm/gtc/quaternion.hpp>
......@@ -220,4 +221,5 @@ ACGL_SMARTPOINTER_TYPEDEFS(SimpleRiftController)
}
}
#endif // ACGL_USE_OCULUS_RIFT
#endif // RIFT_VERSION
#endif // ACGL_USE_OCULUS_RIFT
\ No newline at end of file
......@@ -13,11 +13,11 @@
namespace ACGL{
namespace OpenGL{
enum ColorSpace
enum class ColorSpace
{
COLOR_SPACE_AUTO_DETECT,
COLOR_SPACE_LINEAR,
COLOR_SPACE_SRGB
AUTO_DETECT,
LINEAR,
SRGB
};
//! Recommends an OpenGL internal format for a given pair of format and color spaces
......
......@@ -37,7 +37,7 @@ public:
mFormat(GL_RGBA),
mType(GL_UNSIGNED_BYTE),
mPaddingBytesPerRow(0),
mColorSpace(COLOR_SPACE_AUTO_DETECT)
mColorSpace(ColorSpace::AUTO_DETECT)
{}
virtual ~TextureData(void)
{
......@@ -143,6 +143,11 @@ private:
ACGL_SMARTPOINTER_TYPEDEFS(TextureData)
//! Converts the texture data in _from to the target format and type given in
//! _to. Overwrites width, height, and depth in _to. Old texture data is removed
//! and new memory is allocated.
void convertTextureData(const SharedTextureData& _from, const SharedTextureData& _to);
} // OpenGL
} // ACGL
......
......@@ -38,7 +38,7 @@ void registerTextureLoadFunction(std::vector<std::string> _endings, TextureLoadF
void unregisterTextureLoadFunction(TextureLoadFuncPtr _function);
//! generic load function that will use one of the loading functions below based on the file ending
SharedTextureData loadTextureData(const std::string &_filename, ColorSpace _colorSpace = COLOR_SPACE_AUTO_DETECT);
SharedTextureData loadTextureData(const std::string &_filename, ColorSpace _colorSpace = ColorSpace::AUTO_DETECT);
//! generic save function that will use one of the saving functions below based on the file ending
bool saveTextureData(const SharedTextureData &_textureData, const std::string &_filename);
......@@ -59,21 +59,21 @@ inline bool saveScreenshotWithDate( const std::string& _fileEnding = "png" ) {
///////////////////////////////////////////////////////////////////////////////////////////////////
//! loads from a PNG using the simple lodepng library
SharedTextureData loadTextureDataFromLodepng(const std::string& _filename, ColorSpace _colorSpace = COLOR_SPACE_AUTO_DETECT);
SharedTextureData loadTextureDataFromLodepng(const std::string& _filename, ColorSpace _colorSpace = ColorSpace::AUTO_DETECT);
#ifdef ACGL_COMPILE_WITH_QT
//! loads various formats from the QT library
SharedTextureData loadTextureDataFromQT(const std::string& _filename, ColorSpace _colorSpace = COLOR_SPACE_AUTO_DETECT);
SharedTextureData loadTextureDataFromQT(const std::string& _filename, ColorSpace _colorSpace = ColorSpace::AUTO_DETECT);
#endif
//! loads RGBE aka Radiance files
SharedTextureData loadTextureDataFromRGBE(const std::string& _filename, ColorSpace _colorSpace = COLOR_SPACE_AUTO_DETECT);
SharedTextureData loadTextureDataFromRGBE(const std::string& _filename, ColorSpace _colorSpace = ColorSpace::AUTO_DETECT);
//! loads EXR / OpenEXR files iff the library is present AT RUNTIME (linux only)
SharedTextureData loadTextureDataFromEXR(const std::string& _filename, ColorSpace _colorSpace = COLOR_SPACE_AUTO_DETECT);
SharedTextureData loadTextureDataFromEXR(const std::string& _filename, ColorSpace _colorSpace = ColorSpace::AUTO_DETECT);
//! loads PNM / PPM files:
SharedTextureData loadTextureDataFromPNM(const std::string& _filename, ColorSpace _colorSpace = COLOR_SPACE_AUTO_DETECT);
SharedTextureData loadTextureDataFromPNM(const std::string& _filename, ColorSpace _colorSpace = ColorSpace::AUTO_DETECT);
///////////////////////////////////////////////////////////////////////////////////////////////////
// library specific save
......
......@@ -21,13 +21,13 @@ namespace ACGL{
namespace OpenGL{
//! loads the texture and creates mip maps
SharedTexture2D loadTexture2D(const std::string& _filename, ColorSpace _colorSpace = COLOR_SPACE_AUTO_DETECT);
SharedTexture2D loadTexture2D(const std::string& _filename, ColorSpace _colorSpace = ColorSpace::AUTO_DETECT);
//! loads the texture including mipmaps from a DDS file
//! supports DXT1, DXT3 and DXT5 compression
SharedTexture2D loadTexture2DFromDDS (const std::string& _filename, ColorSpace _colorSpace = COLOR_SPACE_AUTO_DETECT);
SharedTexture3D loadTexture3DFromDDS (const std::string& _filename, ColorSpace _colorSpace = COLOR_SPACE_AUTO_DETECT);
SharedTextureCubeMap loadTextureCubeMapFromDDS(const std::string& _filename, ColorSpace _colorSpace = COLOR_SPACE_AUTO_DETECT);
SharedTexture2D loadTexture2DFromDDS (const std::string& _filename, ColorSpace _colorSpace = ColorSpace::AUTO_DETECT);
SharedTexture3D loadTexture3DFromDDS (const std::string& _filename, ColorSpace _colorSpace = ColorSpace::AUTO_DETECT);
SharedTextureCubeMap loadTextureCubeMapFromDDS(const std::string& _filename, ColorSpace _colorSpace = ColorSpace::AUTO_DETECT);
}
}
......@@ -119,7 +119,13 @@ public:
// ===================================================================================================== \/
public:
//////////// uniform (block) locations:
inline GLint getUniformLocation (const std::string& _nameInShader) const { return glGetUniformLocation (mObjectName, _nameInShader.c_str()); }
inline GLint getUniformLocation(const std::string& _nameInShader) const
{
// Cache uniform location
if ( !mUniformLocationCache.count(_nameInShader) )
mUniformLocationCache[_nameInShader] = glGetUniformLocation(mObjectName, _nameInShader.c_str());
return mUniformLocationCache[_nameInShader];
}
#if (ACGL_OPENGL_VERSION >= 31)
//! if the block name does not exist, GL_INVALID_INDEX will get returned
......@@ -363,6 +369,9 @@ public:
protected:
GLuint mObjectName;
ConstSharedShaderVec mShaders;
/// Cache for uniform locations
mutable std::map<std::string, int> mUniformLocationCache;
};
ACGL_SMARTPOINTER_TYPEDEFS(ShaderProgram)
......
/***********************************************************************
* Copyright 2011-2013 Computer Graphics Group RWTH Aachen University. *
* All rights reserved. *
* Distributed under the terms of the MIT License (see LICENSE.TXT). *
**********************************************************************/
#pragma once
/**
* The RiftCamera is controlled completely by the Oculus Rift. Some members are
* compatible with a GenericCamera but only in the getters as all internal state is
* defined by a connected Rift.
*
* The center of the coordinate system is roughly in the users head.
*
* This cameras View-Matrix gives the translation/rotation from the center inside the
* users body to the requested eye.
*
*/
#ifdef ACGL_USE_OCULUS_RIFT
#include <ACGL/ACGL.hh>
#include <ACGL/Scene/GenericCamera.hh>
#include <OVR_CAPI.h>
namespace ACGL{
namespace Scene {
class OculusRiftCamera : public GenericCamera {
public:
struct OvrEye
{
ovrEyeType Eye;
ovrFovPort Fov;
ovrSizei TextureSize;
ovrRecti RenderViewport;
};
void connectWithRift(ovrHmd _hmd);
virtual glm::vec3 getPosition() const override;
virtual glm::mat4 getViewMatrix() const override;
virtual glm::mat4 getProjectionMatrix() const override;
virtual glm::uvec2 getViewportSize() const override;
// Will update the pose based on the Rift tracking
// and define the correct eye
// _eyeNumber can be 0 or 1, it is undefined which is the
// left eye and which is the right eye. The eye set will
// be returned. Call stopRenderingEye() afterwards!
// does also call ovrHmd_BeginEyeRender internally!
GenericCamera::Eye startRenderingEye(int _eyeNumber);
void stopRenderingEye(int _eyeNumber, ovrTexture* eyeTexture);
void updateFromRift();
const OvrEye *getOVREyeDescription() const { return mEyeDescription; }
private:
ovrHmd mHmd;
OvrEye mEyeDescription[2]; // left, right
GenericCamera::Eye mEyeOrdering[2];
ovrPosef mPoseUsedForRendering[2];
int mActiveEye; // to index the two-element arrays
};
ACGL_SMARTPOINTER_TYPEDEFS(OculusRiftCamera)
}
}
#endif // ACGL_USE_OCULUS_RIFT
#include <ACGL/HardwareSupport/RiftSdk.hh>
#include <ACGL/OpenGL/Creator/ShaderProgramCreator.hh>
#include <iostream>
#include <ACGL/Utils/Log.hh>
#ifdef ACGL_USE_OCULUS_RIFT
#if ACGL_RIFT_SDK_VERSION >= 32
namespace ACGL{
namespace HardwareSupport{
using namespace std;
using namespace ACGL::Utils;
// C API helpers:
static bool ACGL_RiftSDKInitialized = false;
bool initRiftSDK()
{
if (ACGL_RiftSDKInitialized) return true; // don't init twice
ovrBool ok = ovr_Initialize();
if (!ok) {
error() << "could not initialize Oculus Rift library" << endl;
}
else {
ACGL_RiftSDKInitialized = true;
}
return ACGL_RiftSDKInitialized;
}
void shutdownRiftSDK()
{
ovr_Shutdown();
}
// For more sophisticated use cases build your own Rift for your needs based on the Rift SDK instead of using this default Rift.
//
// _headTrackingIsRequired = if false, the call will create a dummy device that won't generate any data in case no real Rift is connected
// (for developing without an actual device).
// _headTranslationTrackingIsAllowed = if true the Tracking of DK2 will get supported, if false even a DK2 will behave like a DK1
ovrHmd createRift(bool _headTrackingIsRequired, bool _headTranslationTrackingIsAllowed)
{
if (!ACGL_RiftSDKInitialized) {
error() << "Rift SDK not initialized correctly - did you call/check initRiftSDK()?" << endl;
}
ovrHmd mHmd = ovrHmd_Create(0);
if (!mHmd && _headTrackingIsRequired) {
error() << "could not connect to an Oculus Rift HMD" << endl;
return NULL;
}
else if (!mHmd && !_headTrackingIsRequired) {
#if ACGL_RIFT_USE_DUMMY
warning() << "could not connect to a real Oculus Rift HMD - generating sensorless dummy" << endl;
mHmd = ovrHmd_CreateDebug(ovrHmd_DK1);
#else
debug() << "could not connect to a real Oculus Rift HMD" << endl;
mHmd = NULL;
#endif
return mHmd;
}
ovrHmdDesc mHmdDesc;
ovrHmd_GetDesc(mHmd, &mHmdDesc);
// debug output:
debug() << "Connected to: " << mHmdDesc.ProductName << endl;
// start the tracking:
// what the application supports:
unsigned int supportedCaps = ovrSensorCap_Orientation | ovrSensorCap_YawCorrection | ovrHmdCap_LowPersistence | ovrHmdCap_LatencyTest | ovrHmdCap_DynamicPrediction;
if (_headTranslationTrackingIsAllowed) supportedCaps |= ovrSensorCap_Position;
// what the device must deliver as a bare minimum:
unsigned int requiredCaps = 0;
if (_headTrackingIsRequired) requiredCaps |= ovrSensorCap_Orientation;
ovrBool ok = ovrHmd_StartSensor(mHmd, supportedCaps, requiredCaps);
if (!ok) {
error() << "could not get connected to a Rift tracker - only rendering is supported" << endl;
}
return mHmd;
}
void destroyRift(ovrHmd _hmd)
{
ovrHmd_Destroy(_hmd);
}
glm::uvec2 getOptimalRenderSizePerEye(ovrHmd _hmd)
{
if (_hmd == NULL) return glm::uvec2(640, 800);
ovrHmdDesc hmdDesc;
ovrHmd_GetDesc(_hmd, &hmdDesc);
ovrSizei optimalLeft = ovrHmd_GetFovTextureSize(_hmd, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f);
ovrSizei optimalRight = ovrHmd_GetFovTextureSize(_hmd, ovrEye_Right, hmdDesc.DefaultEyeFov[1], 1.0f);
debug() << "optimalLeft " << optimalLeft.w << " " << optimalLeft.h << endl;
debug() << "optimalRight " << optimalRight.w << " " << optimalRight.h << endl;
debug() << "hmd: " << hmdDesc.ProductName << endl;
debug() << "hmd WindowsPos: " << hmdDesc.WindowsPos.x << " " << hmdDesc.WindowsPos.y << endl;
return glm::uvec2(glm::max(optimalLeft.w, optimalRight.w), glm::max(optimalLeft.h, optimalRight.h));
}
}
}
#endif
#endif
......@@ -2,6 +2,7 @@
#include <ACGL/OpenGL/Creator/ShaderProgramCreator.hh>
#ifdef ACGL_USE_OCULUS_RIFT
#if ACGL_RIFT_SDK_VERSION < 32
using namespace OVR;
using namespace ACGL;
......@@ -736,3 +737,4 @@ void SimpleRiftController::renderDistortedP( ACGL::OpenGL::ConstSharedShaderProg
}
#endif
#endif
\ No newline at end of file
......@@ -46,7 +46,7 @@ GLenum recommendedInternalFormat(GLenum _format, ColorSpace _colorSpace)
{
switch(_colorSpace)
{
case COLOR_SPACE_SRGB: return formatToSRGB(_format);
case ColorSpace::SRGB: return formatToSRGB(_format);
default: return _format;
}
}
......
......@@ -65,22 +65,34 @@ namespace
return r;
}
// Parses a string of space-separated numbers into a packed floating-point vector (_data) with a maximum number of _maxDimension elements
void parseVector(const char* _start , const char* _end, int _maxDimension, int& _dimension, float* _data)
void trim(const char*& _position)
{
const char* it = _start;
if (*it == ' ')
while(*_position == ' ' || *_position == '\t')
_position ++;
}
const char * nextObject(const char* _position, const char* _end)
{
while(_position < _end)
{
it++;
if(*_position == ' ' || *_position == '\t')
return _position;
++_position;
}
const char* end = _end;
return _position;
}
// Parses a string of space-separated numbers into a packed floating-point vector (_data) with a maximum number of _maxDimension elements
void parseVector(const char* _it , const char* _end, int _maxDimension, int& _dimension, float* _data)
{
const char* found;
_dimension = 0;
while (_dimension < _maxDimension && it < end)
while (_dimension < _maxDimension && _it < _end)
{
found = std::find(it, end, ' ');
_data[_dimension++] = fastAtof(it,found-1);
it = found == end ? end : found + 1;
trim(_it);
found = nextObject(_it,_end);
_data[_dimension++] = fastAtof(_it,found-1);
_it = found == _end ? _end : found + 1;
}
}
......@@ -92,8 +104,7 @@ namespace
indices.reserve(5);
const char* it = _start;
if (*it == ' ') //skip starting whitespace
it++;
trim(it);
const char* vsit;
const char* vsend;
const char* foundSlash;
......@@ -102,12 +113,13 @@ namespace
while (it < _end)
{
vsit = it;
vsend = std::find(it, _end, ' ');
vsend = nextObject(it, _end);
componentIndex = 0;
IndexTuple indexTuple;
//process the string now meaning we split by /
while (vsit < vsend)
{
trim(vsit);
foundSlash = std::find(vsit, vsend, '/');
index = std::atoi(vsit);
if (componentIndex == 0) indexTuple.position = index - 1;
......@@ -117,7 +129,8 @@ namespace
vsit = foundSlash == vsend ? vsend : foundSlash + 1;
}
indices.push_back(indexTuple);
it = vsend == _end ? _end : vsend + 1;
trim(vsend);
it = vsend;
}
return indices;
}
......@@ -160,21 +173,23 @@ SharedGeometryData loadGeometryDataFromOBJ(const std::string& _filename, bool _c
const char* parameters[2];
while (pchBuf < pchEnd)
{
trim(pchBuf);
// Parse the current line
const char* pchEOL = std::find(pchBuf, pchEnd, '\n');
// If the line starts with a #, it is a comment
if (*pchBuf == '#')
// skip empty lines or lines starting with #
if (*pchBuf == '#' || pchBuf == pchEOL)
{
pchBuf = pchEOL + 1;
continue;
}
// Otherwise, extract the first word and the remainder
const char* pchKey = std::find(pchBuf, pchEnd, ' ');
const char* pchKey = nextObject(pchBuf, pchEnd);
keyword = pchBuf;
keywordLength = pchKey - pchBuf;//std::string(pchBuf, pchKey);
parameters[0] = pchKey + 1;
keywordLength = pchKey - pchBuf;
trim(pchKey);
parameters[0] = pchKey;
parameters[1] = pchEOL;
if(strncmp(keyword,"v",keywordLength) == 0) // vertex position
......@@ -400,6 +415,7 @@ SharedGeometryData loadGeometryDataFromOBJ(const std::string& _filename, bool _c
data->setStrideSize(strideSize);
data->setSize(abDataElements * sizeof(GLfloat));
data->setData((GLubyte*)abData);
return data;
}
......
......@@ -7,8 +7,8 @@
#include <ACGL/OpenGL/Data/TextureData.hh>
#include <ACGL/Utils/Memory.hh>
using namespace ACGL;
using namespace ACGL::OpenGL;
namespace ACGL {
namespace OpenGL {
GLsizei TextureData::getPackAlignment() const
{
......@@ -304,3 +304,74 @@ void TextureData::setTexel( glm::uvec2 _texCoord, glm::vec4 _color )
}
}
float grayscaleMixdown(float _r, float _g, float _b)
{
return 0.299f * _r + 0.587f * _g + 0.114f * _b;
}
glm::vec4 convertTexelNumChannels(glm::vec4 _texel, GLsizei _from, GLsizei _to)
{
if (_from == _to) {
return _texel;
}
else if (_from == 1) {
switch (_to) {
case 2: return {_texel.r, 1.0, 0.0, 0.0};
case 3: return {_texel.r, _texel.r, _texel.r, 0.0};
case 4: return {_texel.r, _texel.r, _texel.r, 1.0};
}
}
else if (_from == 2) {
switch (_to) {
case 1: return {_texel.r, 0.0, 0.0, 0.0};
case 3: return {_texel.r, _texel.r, _texel.r, _texel.g};
case 4: return {_texel.r, _texel.r, _texel.r, _texel.g};
}
}
else if (_from == 3) {
switch (_to) {
case 1: return {grayscaleMixdown(_texel.r, _texel.g, _texel.b), 0.0, 0.0, 0.0};
case 2: return {grayscaleMixdown(_texel.r, _texel.g, _texel.b), 1.0, 0.0, 0.0};
case 4: return {_texel.r, _texel.r, _texel.r, 1.0};
}
}
else if (_from == 4) {
switch (_to) {
case 1: return {grayscaleMixdown(_texel.r, _texel.g, _texel.b), 0.0, 0.0, 0.0};
case 2: return {grayscaleMixdown(_texel.r, _texel.g, _texel.b), 1.0, 0.0, 0.0};
case 3: return {_texel.r, _texel.r, _texel.r, 0.0};
}
}
return _texel;
}
void convertTextureData(const SharedTextureData& _from, const SharedTextureData& _to)
{
assert(_from);
assert(_to);
if (!_from->getData()) {
ACGL::Utils::error() << "Cannot convert TextureData: source TextureData contains no data" << std::endl;
return;
}
// Setup target texture dimensions
_to->setWidth(_from->getWidth());
_to->setHeight(_from->getHeight());
_to->setDepth(_from->getDepth());
// Allocate new memory
_to->deleteData();
GLubyte* data = new GLubyte[_to->getSizeInBytes()];
_to->setData(data);
// Transfer pixels
for (GLsizei y = 0; y < _to->getHeight(); ++y) {
for (GLsizei x = 0; x < _to->getWidth(); ++x) {
auto texel = convertTexelNumChannels(_from->getTexel({x, y}), _from->getNumberOfChannels(), _to->getNumberOfChannels());
_to->setTexel({x, y}, texel);
}
}
}
} // namespace OpenGL
} // namespace ACGL
......@@ -18,7 +18,7 @@ using namespace nv_dds;
namespace {
GLenum getDDSInternalFormat(bool compressed, GLenum format, ColorSpace _colorSpace = COLOR_SPACE_AUTO_DETECT)
GLenum getDDSInternalFormat(bool compressed, GLenum format, ColorSpace _colorSpace = ColorSpace::AUTO_DETECT)
{
GLenum internal_format = format;
......
......@@ -15,6 +15,9 @@ using namespace ACGL::Utils;
bool ShaderProgram::link() const
{
// Clear uniform cache
mUniformLocationCache.clear();
glLinkProgram(mObjectName);
// check for program link errors:
......
/***********************************************************************
* Copyright 2011-2013 Computer Graphics Group RWTH Aachen University. *
* All rights reserved. *
* Distributed under the terms of the MIT License (see LICENSE.TXT). *
**********************************************************************/
#ifdef ACGL_USE_OCULUS_RIFT
#include <ACGL/Scene/OculusRiftCamera.hh>
#include <ACGL/Utils/Log.hh>
#include <ACGL/HardwareSupport/RiftSdk.hh>
#include <glm/ext.hpp>
#include <glm/gtc/matrix_transform.hpp>
namespace ACGL{
namespace Scene{
using namespace std;
using namespace ACGL::Utils;
using namespace ACGL::HardwareSupport;
static glm::quat ovr2glm(const ovrQuatf& _quat)
{
glm::quat q;
q.x = _quat.x;
q.y = _quat.y;
q.z = _quat.z;
q.w = _quat.w;
return q;
}
static glm::vec3 ovr2glm(const ovrVector3f& _vec)
{
// This cast is ok as ovrVector3f has the same internal structure
return *(glm::vec3*)&_vec;
}
static glm::mat4 ovr2glm(const ovrMatrix4f& _matrix)
{
// CAUTION: column-major vs. row-major difference
return glm::transpose(*(glm::mat4*)&_matrix);
/*glm::mat4 m;
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
m[i][j] = _matrix.M[j][i];
}
}
return m;*/
}
void OculusRiftCamera::connectWithRift(ovrHmd _hmd)
{
mHmd = _hmd;
ovrHmdDesc hmdDesc;
ovrHmd_GetDesc(_hmd, &hmdDesc);
mActiveEye = 0;
// both eyes are equal,
// two texture rendering, NOT side-by-side:
glm::uvec2 renderTargetSizeForOneEye = getOptimalRenderSizePerEye(_hmd);
ovrSizei ovrRenderTargetSizeForOneEye;
ovrRenderTargetSizeForOneEye.w = renderTargetSizeForOneEye.x;
ovrRenderTargetSizeForOneEye.h = renderTargetSizeForOneEye.y;
ovrRecti perEyeViewport;
perEyeViewport.Pos = { 0, 0 };
perEyeViewport.Size = ovrRenderTargetSizeForOneEye;
for (int i = 0; i < 2; ++i) {
// for all eyes:
mEyeDescription[i].Eye = hmdDesc.EyeRenderOrder[i];
mEyeDescription[i].Fov = hmdDesc.DefaultEyeFov[i];
mEyeDescription[i].TextureSize = ovrRenderTargetSizeForOneEye;
mEyeDescription[i].RenderViewport = perEyeViewport;
// the eye ordering (left-right or right-left) is defined by the SDK based on the
// hardware!
if (mEyeDescription[i].Eye == ovrEye_Left) {
mEyeOrdering[i] = GenericCamera::Eye::EYE_LEFT;
} else {
mEyeOrdering[i] = GenericCamera::Eye::EYE_RIGHT;
}
}
}
GenericCamera::Eye OculusRiftCamera::startRenderingEye(int _eyeNumber)
{
mPoseUsedForRendering[_eyeNumber] = ovrHmd_BeginEyeRender(mHmd, mEyeDescription[_eyeNumber].Eye);
setEye(mEyeOrdering[_eyeNumber]);
mActiveEye = _eyeNumber;
/*debug() << "Render Eye " << mActiveEye << ": "
<< mPoseUsedForRendering[_eyeNumber].Orientation.w << " "
<< mPoseUsedForRendering[_eyeNumber].Orientation.x << " "
<< mPoseUsedForRendering[_eyeNumber].Orientation.y << " "
<< mPoseUsedForRendering[_eyeNumber].Orientation.z << endl;*/
updateFromRift();
return mEyeOrdering[_eyeNumber];
}
void OculusRiftCamera::stopRenderingEye(int _eyeNumber, ovrTexture* eyeTexture)
{
ovrHmd_EndEyeRender(mHmd, mEyeDescription[_eyeNumber].Eye, mPoseUsedForRendering[_eyeNumber], eyeTexture);
}