diff --git a/src/geoms/Geometry.cpp b/src/geoms/Geometry.cpp index 47a3841f7e8836296309bed47668cc914c03d02c..3b95b634a9970e7fb5e318ab86cbeb1904749fe4 100644 --- a/src/geoms/Geometry.cpp +++ b/src/geoms/Geometry.cpp @@ -25,7 +25,7 @@ using namespace std; -Geometry::Geometry():m_hasTexCoords(false), m_points(false) { +Geometry::Geometry():m_hasTexCoords(false), m_points(false), m_isModel(false) { m_geomID=(Reveal::GEOM_ID)-1; } @@ -36,6 +36,10 @@ void Geometry::draw(const int& contextID, map<Reveal::REVIL_UNIFORM, GLint>& uniforms, const int& visibilityMask) { + if(m_modules.size()==0) { + return; + } + if(m_contexts.find(contextID)==m_contexts.end()) { ContextGeometry& cont = m_contexts[contextID]; cont.m_refresh=true; diff --git a/src/geoms/Geometry.hpp b/src/geoms/Geometry.hpp index 4b52904fae19e492f9bc31d7a3dcdb8896116bb8..fb3d3fdc31d9cbbee8eb75b32506961c1ab389d3 100644 --- a/src/geoms/Geometry.hpp +++ b/src/geoms/Geometry.hpp @@ -60,12 +60,16 @@ class Geometry { std::vector<int>& editCompoSizes(){return m_compoSizes;} void refresh(); - void hasTexCoords(bool tc){m_hasTexCoords=tc;} + inline void hasTexCoords(bool tc){m_hasTexCoords=tc;} + + inline void setIsModel(bool m){m_isModel=m;} + inline bool getIsModel(){return m_isModel;} protected: Reveal::GEOM_ID m_geomID; bool m_hasTexCoords; bool m_points; + bool m_isModel; std::vector<GeomModule*> m_modules; std::map<unsigned int, GeomModule*> m_modulesMap; diff --git a/src/modules/DepthCamModule.cpp b/src/modules/DepthCamModule.cpp index f41f8bde190d8300c0dcf1856125ed2a9e441849..b7a80f137b7961fe008560f7d66757c4bbb9d762 100644 --- a/src/modules/DepthCamModule.cpp +++ b/src/modules/DepthCamModule.cpp @@ -49,1331 +49,1330 @@ using namespace cv; void* processFramesThreadFunction(void* pvParam) { - DepthCamModule *pThis=(DepthCamModule*)pvParam; - pThis->processFrames(); - return NULL; + DepthCamModule *pThis=(DepthCamModule*)pvParam; + pThis->processFrames(); + return NULL; } DepthCamModule::DepthCamModule(): GroupModule(), - DepthModule(), - m_deviceID(""), - m_open(false), - m_defaultWidth(1280), - m_defaultHeight(720), - m_defaultFps(60), - m_horizontalFOV(1), - m_verticalFOV(1), - m_mirrored(false), - m_notVoronoi(false) + DepthModule(), + m_deviceID(""), + m_open(false), + m_defaultWidth(1280), + m_defaultHeight(720), + m_defaultFps(60), + m_horizontalFOV(1), + m_verticalFOV(1), + m_mirrored(false), + m_notVoronoi(false) { - m_type="DepthCam"; - m_name="depthcam"; - - m_calibrating=false; - m_depthType=0; - m_width=m_defaultWidth; - m_height=m_defaultHeight; - m_fps=m_defaultFps; - m_contourThreshold=30; - m_procSize.width=m_width; - m_procSize.height=m_height; - - m_defaultDepthFrame = new unsigned short[m_defaultWidth*m_defaultHeight]; - for(int i=0;i<m_defaultWidth*m_defaultHeight; ++i) { - m_defaultDepthFrame[i]=1000; - } - - //geometry - m_camGeom = new CamGeometry(640, 480); - m_camGeom->registerModule(this); - - //frames processing - m_framesThread=NULL; - - //filtered frame - m_filterSpat=3; - m_maxFilterSize=51; - m_filterIndex=0; - m_prevDepthFrames = new unsigned short*[m_maxFilterSize]; - for(int i=0; i<m_maxFilterSize; ++i) { - m_prevDepthFrames[i] - = new unsigned short[m_defaultWidth*m_defaultHeight]; - for(int j=0; j<m_defaultWidth*m_defaultHeight; ++j) { - m_prevDepthFrames[i][j] = 0; - } - } - m_filtDepthFrame = new unsigned short[m_defaultWidth*m_defaultHeight]; - m_sumDepthFrame = new int[m_defaultWidth*m_defaultHeight]; - for(int i=0;i<m_defaultWidth*m_defaultHeight; ++i) { - m_filtDepthFrame[i]=0; - m_sumDepthFrame[i]=0; - } - - //one euro filter - m_oefXFrame = new float[m_defaultWidth*m_defaultHeight]; - m_oefDXFrame = new float[m_defaultWidth*m_defaultHeight]; - m_oefRate=60; - m_oefDCutoff=1; - m_oefMinCutoff=1; //decrease to remove jitter - m_oefBeta=0.1; //increase to remove lag - m_oefInit=false; - - - //initialize marker detection - Size imageSize(1280, 720); - m_detRgbImg = Mat::zeros(imageSize, CV_8UC3); - m_detMarkImg = Mat::zeros(imageSize, CV_8UC3); - m_detectingMarkers=false; - m_markerPosY=0; - m_markerRotX=0; - m_markerRotY=0; - m_maxNbMarkers=10; - m_fillingMarkers=false; - m_arucoTracker = new ArucoTracker(); - m_imageTracker = new ImageTracker(); - m_currentTracker = m_arucoTracker; - m_debuggingMarkers=false; - /* - namedWindow("rgbcam"); - namedWindow("filtercam"); - */ - - //background capture - m_capturedBackground=false; - m_capturingBackground=0; - m_backgroundActive=false; - - //attributes - addAttribute(new Attribute("name", - Attribute::STRING_ATTRIBUTE, - nameCallback, this, 1, Attribute::LOCAL)); - m_attributesMap["name"]->setStrings(vector<string>(1, m_name)); - addAttribute(new Attribute("remove", - Attribute::ACTION_ATTRIBUTE, - removeCallback, this, - Attribute::LOCAL)); - addVisibleAttribute(); - addAttribute(new Attribute("refresh_devices", - Attribute::ACTION_ATTRIBUTE, - refreshCallback, this, - Attribute::LOCAL)); - addAttribute(new Attribute("open_device", - Attribute::STRING_ATTRIBUTE, - deviceCallback, this, 1, - Attribute::LOCAL)); - refreshDeviceList(); - - addAttribute(new Attribute("close_device", - Attribute::ACTION_ATTRIBUTE, - closeCallback, this, - Attribute::LOCAL)); - - addAttribute(new Attribute("mode", - Attribute::STRING_ATTRIBUTE, - modeCallback, this, 1, - Attribute::LOCAL)); - vector<string> devStr(1, ""); - m_attributesMap["mode"]->editStringValuesChoices().assign(1,devStr); - m_attributesMap["mode"]->initStrings(devStr); - - addAttribute(new Attribute("filter", - Attribute::STRING_ATTRIBUTE, - filterCallback, this, 1, - Attribute::LOCAL)); - - m_filters.push_back("none"); - m_filters.push_back("lowpass"); - m_filters.push_back("one_euro"); - m_filters.push_back("one_euro_spatial"); - m_attributesMap["filter"] - ->editStringValuesChoices().assign(1, m_filters); - m_attributesMap["filter"] - ->initStrings(vector<string>(1, m_filters[2])); - m_filter=m_filters[2]; - m_filtering=true; - - addAttribute(new Attribute("lowpass_smooth", - Attribute::FLOAT_ATTRIBUTE, - filterSizeCallback, this, 1, - Attribute::LOCAL)); - m_attributesMap["lowpass_smooth"]->initFloats(vector<float>(1,10)); - - addAttribute(new Attribute("one_euro_jitter", - Attribute::FLOAT_ATTRIBUTE, - filterMCOCallback, this, 1, - Attribute::LOCAL)); - m_attributesMap["one_euro_jitter"]->initFloats(vector<float>(1, - m_oefMinCutoff)); - addAttribute(new Attribute("one_euro_reactivity", - Attribute::FLOAT_ATTRIBUTE, - filterBetaCallback, this, 1, - Attribute::LOCAL)); - m_attributesMap["one_euro_reactivity"]->initFloats(vector<float>(1, - m_oefBeta)); - addAttribute(new Attribute("spatial_size", - Attribute::INT_ATTRIBUTE, - filterSpatCallback, this, 1, - Attribute::LOCAL)); - m_attributesMap["spatial_size"]->initInts(vector<int>(1, m_filterSpat)); - - addAttribute(new Attribute("contour_threshold", - Attribute::FLOAT_ATTRIBUTE, - contourThresholdCallback, this, 1, - Attribute::LOCAL)); - m_attributesMap["contour_threshold"]->initFloats(vector<float>(1, - m_contourThreshold)); - - addAttribute(new Attribute("calibrate_with_marker", - Attribute::ACTION_ATTRIBUTE, - calibrationCallback, this, - Attribute::LOCAL)); - - addTransformationAttributes(); - m_attributesMap["scale"]->setAccessibility(Attribute::HIDDEN); - vector<float> posVec(3,0); - posVec[2]=-700; - m_attributesMap["position"]->setFloats(posVec); - - /* - addAttribute(new Attribute("capture_background", - Attribute::BOOL_ATTRIBUTE, - captureBackgroundCallback, this, 1, - Attribute::LOCAL)); - */ - addAttribute(new Attribute("markers_detection", - Attribute::STRING_ATTRIBUTE, - markersDetectionCallback, this, 1, - Attribute::LOCAL)); - vector<string> detection; - detection.push_back("none"); - detection.push_back("aruco"); - detection.push_back("image"); - m_attributesMap["markers_detection"] - ->editStringValuesChoices().assign(1, detection); - m_attributesMap["markers_detection"] - ->initStrings(vector<string>(1, detection[0])); - addAttribute(new Attribute("image_markers_folder", - Attribute::FOLDER_OPEN_ATTRIBUTE, - imageMarkersFolderCallback, this)); - - addAttribute(new Attribute("marker_fill_method", - Attribute::STRING_ATTRIBUTE, - markersFillMethodCallback, this, 1, - Attribute::LOCAL)); - vector<string> methods; - methods.push_back("none"); - methods.push_back("flood"); - methods.push_back("voronoi"); - m_attributesMap["marker_fill_method"] - ->editStringValuesChoices().assign(1, methods); - m_attributesMap["marker_fill_method"] - ->initStrings(vector<string>(1, methods[0])); - addAttribute(new Attribute("marker_output", - Attribute::FLOAT_ATTRIBUTE, - outputMarkersCallback, this, 9, - Attribute::OUTPUT)); - addAttribute(new Attribute("debug_markers", - Attribute::BOOL_ATTRIBUTE, - debugMarkersCallback, this, 1, - Attribute::LOCAL)); - - Reveal::getInstance()->addUpdateObserver(this); - - - //start processing thread - m_framesThread = new std::thread(processFramesThreadFunction, this); + m_type="DepthCam"; + m_name="depthcam"; + + m_calibrating=false; + m_depthType=0; + m_width=m_defaultWidth; + m_height=m_defaultHeight; + m_fps=m_defaultFps; + m_contourThreshold=30; + m_procSize.width=m_width; + m_procSize.height=m_height; + + m_defaultDepthFrame = new unsigned short[m_defaultWidth*m_defaultHeight]; + for(int i=0;i<m_defaultWidth*m_defaultHeight; ++i) { + m_defaultDepthFrame[i]=1000; + } + + //geometry + m_camGeom = new CamGeometry(640, 480); + m_camGeom->registerModule(this); + + //frames processing + m_framesThread=NULL; + + //filtered frame + m_filterSpat=3; + m_maxFilterSize=51; + m_filterIndex=0; + m_prevDepthFrames = new unsigned short*[m_maxFilterSize]; + for(int i=0; i<m_maxFilterSize; ++i) { + m_prevDepthFrames[i] + = new unsigned short[m_defaultWidth*m_defaultHeight]; + for(int j=0; j<m_defaultWidth*m_defaultHeight; ++j) { + m_prevDepthFrames[i][j] = 0; + } + } + m_filtDepthFrame = new unsigned short[m_defaultWidth*m_defaultHeight]; + m_sumDepthFrame = new int[m_defaultWidth*m_defaultHeight]; + for(int i=0;i<m_defaultWidth*m_defaultHeight; ++i) { + m_filtDepthFrame[i]=0; + m_sumDepthFrame[i]=0; + } + + //one euro filter + m_oefXFrame = new float[m_defaultWidth*m_defaultHeight]; + m_oefDXFrame = new float[m_defaultWidth*m_defaultHeight]; + m_oefRate=60; + m_oefDCutoff=1; + m_oefMinCutoff=1; //decrease to remove jitter + m_oefBeta=0.1; //increase to remove lag + m_oefInit=false; + + + //initialize marker detection + Size imageSize(1280, 720); + m_detRgbImg = Mat::zeros(imageSize, CV_8UC3); + m_detMarkImg = Mat::zeros(imageSize, CV_8UC3); + m_detectingMarkers=false; + m_markerPosY=0; + m_markerRotX=0; + m_markerRotY=0; + m_maxNbMarkers=10; + m_fillingMarkers=false; + m_arucoTracker = new ArucoTracker(); + m_imageTracker = new ImageTracker(); + m_currentTracker = m_arucoTracker; + m_debuggingMarkers=false; + /* + namedWindow("rgbcam"); + namedWindow("filtercam"); + */ + + //background capture + m_capturedBackground=false; + m_capturingBackground=0; + m_backgroundActive=false; + + //attributes + addAttribute(new Attribute("name", + Attribute::STRING_ATTRIBUTE, + nameCallback, this, 1, Attribute::LOCAL)); + m_attributesMap["name"]->setStrings(vector<string>(1, m_name)); + addAttribute(new Attribute("remove", + Attribute::ACTION_ATTRIBUTE, + removeCallback, this, + Attribute::LOCAL)); + addVisibleAttribute(); + addAttribute(new Attribute("refresh_devices", + Attribute::ACTION_ATTRIBUTE, + refreshCallback, this, + Attribute::LOCAL)); + addAttribute(new Attribute("open_device", + Attribute::STRING_ATTRIBUTE, + deviceCallback, this, 1, + Attribute::LOCAL)); + refreshDeviceList(); + + addAttribute(new Attribute("close_device", + Attribute::ACTION_ATTRIBUTE, + closeCallback, this, + Attribute::LOCAL)); + + addAttribute(new Attribute("mode", + Attribute::STRING_ATTRIBUTE, + modeCallback, this, 1, + Attribute::LOCAL)); + vector<string> devStr(1, ""); + m_attributesMap["mode"]->editStringValuesChoices().assign(1,devStr); + m_attributesMap["mode"]->initStrings(devStr); + + addAttribute(new Attribute("filter", + Attribute::STRING_ATTRIBUTE, + filterCallback, this, 1, + Attribute::LOCAL)); + + m_filters.push_back("none"); + m_filters.push_back("lowpass"); + m_filters.push_back("one_euro"); + m_filters.push_back("one_euro_spatial"); + m_attributesMap["filter"] + ->editStringValuesChoices().assign(1, m_filters); + m_attributesMap["filter"] + ->initStrings(vector<string>(1, m_filters[2])); + m_filter=m_filters[2]; + m_filtering=true; + + addAttribute(new Attribute("lowpass_smooth", + Attribute::FLOAT_ATTRIBUTE, + filterSizeCallback, this, 1, + Attribute::LOCAL)); + m_attributesMap["lowpass_smooth"]->initFloats(vector<float>(1,10)); + + addAttribute(new Attribute("one_euro_jitter", + Attribute::FLOAT_ATTRIBUTE, + filterMCOCallback, this, 1, + Attribute::LOCAL)); + m_attributesMap["one_euro_jitter"]->initFloats(vector<float>(1, + m_oefMinCutoff)); + addAttribute(new Attribute("one_euro_reactivity", + Attribute::FLOAT_ATTRIBUTE, + filterBetaCallback, this, 1, + Attribute::LOCAL)); + m_attributesMap["one_euro_reactivity"]->initFloats(vector<float>(1, + m_oefBeta)); + addAttribute(new Attribute("spatial_size", + Attribute::INT_ATTRIBUTE, + filterSpatCallback, this, 1, + Attribute::LOCAL)); + m_attributesMap["spatial_size"]->initInts(vector<int>(1, m_filterSpat)); + + addAttribute(new Attribute("contour_threshold", + Attribute::FLOAT_ATTRIBUTE, + contourThresholdCallback, this, 1, + Attribute::LOCAL)); + m_attributesMap["contour_threshold"]->initFloats(vector<float>(1, + m_contourThreshold)); + + addAttribute(new Attribute("calibrate_with_marker", + Attribute::ACTION_ATTRIBUTE, + calibrationCallback, this, + Attribute::LOCAL)); + + addTransformationAttributes(); + m_attributesMap["scale"]->setAccessibility(Attribute::HIDDEN); + vector<float> posVec(3,0); + posVec[2]=-700; + m_attributesMap["position"]->setFloats(posVec); + + /* + addAttribute(new Attribute("capture_background", + Attribute::BOOL_ATTRIBUTE, + captureBackgroundCallback, this, 1, + Attribute::LOCAL)); + */ + addAttribute(new Attribute("markers_detection", + Attribute::STRING_ATTRIBUTE, + markersDetectionCallback, this, 1, + Attribute::LOCAL)); + vector<string> detection; + detection.push_back("none"); + detection.push_back("aruco"); + detection.push_back("image"); + m_attributesMap["markers_detection"] + ->editStringValuesChoices().assign(1, detection); + m_attributesMap["markers_detection"] + ->initStrings(vector<string>(1, detection[0])); + addAttribute(new Attribute("image_markers_folder", + Attribute::FOLDER_OPEN_ATTRIBUTE, + imageMarkersFolderCallback, this)); + + addAttribute(new Attribute("marker_fill_method", + Attribute::STRING_ATTRIBUTE, + markersFillMethodCallback, this, 1, + Attribute::LOCAL)); + vector<string> methods; + methods.push_back("none"); + methods.push_back("flood"); + methods.push_back("voronoi"); + m_attributesMap["marker_fill_method"] + ->editStringValuesChoices().assign(1, methods); + m_attributesMap["marker_fill_method"] + ->initStrings(vector<string>(1, methods[0])); + addAttribute(new Attribute("marker_output", + Attribute::FLOAT_ATTRIBUTE, + outputMarkersCallback, this, 9, + Attribute::OUTPUT)); + addAttribute(new Attribute("debug_markers", + Attribute::BOOL_ATTRIBUTE, + debugMarkersCallback, this, 1, + Attribute::LOCAL)); + + Reveal::getInstance()->addUpdateObserver(this); + + + //start processing thread + m_framesThread = new std::thread(processFramesThreadFunction, this); } DepthCamModule::~DepthCamModule() { - if(m_framesThread) { - m_framesThread->join(); - delete m_framesThread; - } - Reveal::getInstance()->removeUpdateObserver(this); - closeDevice(); + if(m_framesThread) { + m_framesThread->join(); + delete m_framesThread; + } + Reveal::getInstance()->removeUpdateObserver(this); + closeDevice(); } void DepthCamModule::refreshDeviceList() { - openni::Array<openni::DeviceInfo> deviceList; - openni::OpenNI::enumerateDevices(&deviceList); - for(int d=0; d<deviceList.getSize(); ++d) { - cout<<"Found OpenNI device "<<deviceList[d].getUri()<<endl; - } - - m_serialsVec.clear(); - m_serialsMap.clear(); - char serialNumber[1024]; - for(int d=0; d<deviceList.getSize(); ++d) { - //ad-hoc code for libfreenect2 - if(string(deviceList[d].getUri()).find("freenect")!=string::npos) { - string serialStr = string(deviceList[d].getUri()); - serialStr=serialStr.substr(string("freenect2://0?serial=").size()); - if(serialStr.find("?")==string::npos) { - m_serialsMap[serialStr] = deviceList[d].getUri(); - m_serialsVec.push_back(serialStr); - } - } - else { - ostringstream oss; - oss<<d; - openni::Device dev; - dev.open(deviceList[d].getUri()); - dev.getProperty(ONI_DEVICE_PROPERTY_SERIAL_NUMBER, &serialNumber); - m_serialsMap[string(serialNumber)] = deviceList[d].getUri(); - m_serialsVec.push_back(string(serialNumber)); - dev.close(); - } - } - m_attributesMap["open_device"] - ->editStringValuesChoices().assign(1, m_serialsVec); + openni::Array<openni::DeviceInfo> deviceList; + openni::OpenNI::enumerateDevices(&deviceList); + for(int d=0; d<deviceList.getSize(); ++d) { + cout<<"Found OpenNI device "<<deviceList[d].getUri()<<endl; + } + + m_serialsVec.clear(); + m_serialsMap.clear(); + char serialNumber[1024]; + for(int d=0; d<deviceList.getSize(); ++d) { + //ad-hoc code for libfreenect2 + if(string(deviceList[d].getUri()).find("freenect")!=string::npos) { + string serialStr = string(deviceList[d].getUri()); + serialStr=serialStr.substr(string("freenect2://0?serial=").size()); + if(serialStr.find("?")==string::npos) { + m_serialsMap[serialStr] = deviceList[d].getUri(); + m_serialsVec.push_back(serialStr); + } + } + else { + ostringstream oss; + oss<<d; + openni::Device dev; + dev.open(deviceList[d].getUri()); + dev.getProperty(ONI_DEVICE_PROPERTY_SERIAL_NUMBER, &serialNumber); + m_serialsMap[string(serialNumber)] = deviceList[d].getUri(); + m_serialsVec.push_back(string(serialNumber)); + dev.close(); + } + } + m_attributesMap["open_device"] + ->editStringValuesChoices().assign(1, m_serialsVec); } void DepthCamModule::updateModelMatrix() { - GroupModule::updateModelMatrix(); - map<int, ProjectorModule*>::iterator itProj=m_attachedProjs.begin(); - for(; itProj!=m_attachedProjs.end(); ++itProj) { - itProj->second->setModelMat(m_modelMat); - } + GroupModule::updateModelMatrix(); + map<int, ProjectorModule*>::iterator itProj=m_attachedProjs.begin(); + for(; itProj!=m_attachedProjs.end(); ++itProj) { + itProj->second->setModelMat(m_modelMat); + } } void DepthCamModule::deleteModule() { - if(m_space) { - m_space->removeCam(this); - } - Reveal::getInstance()->unregisterOutputDepthCam(this); - Module::deleteModule(); + if(m_space) { + m_space->removeCam(this); + } + Reveal::getInstance()->unregisterOutputDepthCam(this); + Module::deleteModule(); } void DepthCamModule::draw(const int& contextID, - const Reveal::REVIL_PROGRAM& prog, - map<Reveal::REVIL_UNIFORM, GLint>& uniforms, - const unsigned int& component) { - - - if(m_camTexMap.find(contextID)==m_camTexMap.end()) { - GLuint texID; - glGenTextures(1, &texID); - m_camTexMap[contextID]=texID; - m_upCamCounter[contextID]=0; - glGenTextures(1, &texID); - m_filtTexMap[contextID]=texID; - m_filtTexUpMap[contextID]=false; - glGenTextures(1, &texID); - m_markTexMap[contextID]=texID; - m_markTexUpMap[contextID]=false; - if(m_open) { - m_camTexUpMap[contextID]=false; - } - else { - m_camTexUpMap[contextID]=true; - } - } - - - /* - //captured background - if(m_backgroundActive) { - glActiveTexture(GL_TEXTURE1); - glUniform1i(uniforms[Reveal::CAMTEX2], 1); - glBindTexture(GL_TEXTURE_2D, m_camTexMap2[contextID]); - if(m_capturedBackground) { - glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, - m_width, m_height, 0, - GL_RGB, GL_UNSIGNED_BYTE, - m_backImg.data); - m_capturedBackground=false; - } - } - */ - - if(m_filtering) { - glActiveTexture(GL_TEXTURE1); - glUniform1i(uniforms[Reveal::CAMTEXFIL], 1); - glBindTexture(GL_TEXTURE_2D, m_filtTexMap[contextID]); - if(m_framesLock.try_lock()) { - if(m_filtTexUpMap[contextID]) { - glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); - if(m_open) { - if(m_filtDepthFrame!=NULL) { - glTexImage2D(GL_TEXTURE_2D, 0, GL_R16, - m_width, m_height, 0, - GL_RED, GL_UNSIGNED_SHORT, - m_filtDepthFrame); - } - } - m_filtTexUpMap[contextID]=false; - } - m_framesLock.unlock(); - } - } - glActiveTexture(GL_TEXTURE0); - glUniform1i(uniforms[Reveal::CAMTEX], 0); - glBindTexture(GL_TEXTURE_2D, m_camTexMap[contextID]); - if(m_camTexUpMap[contextID]) { - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); - if(m_open) { - if(!m_init) { - glTexImage2D(GL_TEXTURE_2D, 0, GL_R16, - m_width, m_height, 0, - GL_RED, GL_UNSIGNED_SHORT, - (unsigned char*)m_depthFrame.getData()); - m_init=true; - } - else { - glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, - m_width, m_height, - GL_RED, GL_UNSIGNED_SHORT, - (unsigned char*)m_depthFrame.getData()); - } - } - m_camTexUpMap[contextID]=false; - } - glActiveTexture(GL_TEXTURE2); - glUniform1i(uniforms[Reveal::MARKTEX], 2); - glBindTexture(GL_TEXTURE_2D, m_markTexMap[contextID]); - if(m_detectingMarkers) { - if(m_framesLock.try_lock()) { - if(m_markTexUpMap[contextID]) { - glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); - if(m_open) { - if(m_markersFrame!=NULL) { - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, - m_width, m_height, 0, - GL_RGB, GL_UNSIGNED_BYTE, - m_markersFrame); - } - } - m_markTexUpMap[contextID]=false; - } - - vector<float> mark; - vector<s_markerAndTime>::iterator it=m_sentDetectedMarkers.begin(); - for(; it != m_sentDetectedMarkers.end(); it++) { - mark.push_back(m_depthID+it->id); - mark.push_back(1); - /* - float depth = m_rgbDepthImg.at<cv::Vec3w> - (it->y, - it->x)[2]; - glm::vec4 pos((it->x/m_width-0.5) - *depth*tan(m_horizontalFOV/2.0)*2.0, - (0.5-it->y/m_height) - *depth*tan(m_verticalFOV/2.0)*2.0, - depth, 1.0); - pos = m_modelMat * pos; - pos[0]*=-1; - mark.push_back(pos[0]); - mark.push_back(pos[1]); - mark.push_back(pos[2]); - */ - - glm::vec3 pos(0, 0, 0); - for(int c=0; c<4; c++) { - float depth = m_rgbDepthImg.at<cv::Vec3w> - (it->corners[c].y, - it->corners[c].x)[2]; - glm::vec4 cpos((it->corners[c].x/m_width-0.5) - *depth*tan(m_horizontalFOV/2.0)*2.0, - (0.5-it->corners[c].y/m_height) - *depth*tan(m_verticalFOV/2.0)*2.0, - depth, 1.0); - cpos = m_modelMat * cpos; - cpos[0]*=-1; - for(int i=0; i<3; ++i) { - it->corners3D[c][i]=cpos[i]; - pos[i]+=cpos[i]/4.0; - } - } - - //compute position - mark.push_back(pos[0]); - mark.push_back(pos[1]); - mark.push_back(pos[2]); - - //compute rotation - glm::vec3 markUp = glm::normalize(it->corners3D[1] - - it->corners3D[2]); - glm::vec3 markR = glm::normalize(it->corners3D[1] - - it->corners3D[0]); - glm::vec3 markN = glm::normalize(glm::cross(markR, markUp)); - glm::mat4x4 markM( //r,up,n - markR[0], markR[1], markR[2], 0.0, //inverser - markUp[0], markUp[1], markUp[2],0.0, - markN[0], markN[1], markN[2], 0.0, - 0.0, 0.0, 0.0, 1.0 - ); - float rx, ry, rz; - glm::extractEulerAngleXYZ(markM, rx, ry, rz); - float tmprx = rx; - rx=-rz; - rz=-tmprx; - markM = glm::eulerAngleXYZ(rx,ry,rz); - - glm::quat markQ = glm::toQuat(markM); - /* - cout<<"Up:"<<markUp.x<<" "<<markUp.y<<" "<<markUp.z<<endl; - cout<<"R:"<<markR.x<<" "<<markR.y<<" "<<markR.z<<endl; - cout<<"N:"<<markN.x<<" "<<markN.y<<" "<<markN.z<<endl; - cout<<markQ[0]<<" "<<markQ[1]<<" "<<markQ[2]<<" "<<markQ[2]<<endl; - */ - mark.push_back(markQ[0]); - mark.push_back(markQ[1]); - mark.push_back(markQ[2]); - mark.push_back(markQ[3]); - m_attributesMap["marker_output"]->setFloats(mark); - mark.clear(); - } - m_sentDetectedMarkers.clear(); - it=m_sentRemovedMarkers.begin(); - for(; it!= m_sentRemovedMarkers.end(); it++) { - mark.push_back(m_depthID+it->id); - mark.push_back(0); - mark.push_back(0); - mark.push_back(0); - mark.push_back(0); - mark.push_back(0); - mark.push_back(0); - mark.push_back(0); - mark.push_back(0); - m_attributesMap["marker_output"]->setFloats(mark); - mark.clear(); - } - m_sentRemovedMarkers.clear(); - m_framesLock.unlock(); - } - } - glUniform1i(uniforms[Reveal::CAMFILTER], m_filtering); - glUniform1f(uniforms[Reveal::CAMCONTOURTHRESH], m_contourThreshold); - glUniform1f(uniforms[Reveal::CAMXRESO], m_width); - glUniform1f(uniforms[Reveal::CAMYRESO], m_height); - glUniform1f(uniforms[Reveal::CAMXZFACTOR], tan(m_horizontalFOV/2.0)*2.0); - glUniform1f(uniforms[Reveal::CAMYZFACTOR], tan(m_verticalFOV/2.0)*2.0); - glUniform1f(uniforms[Reveal::CAMMARKERS], m_fillingMarkers); - glUniform1i(uniforms[Reveal::BACKGROUND], m_backgroundActive); - DepthModule::draw(contextID, prog, uniforms, component); + const Reveal::REVIL_PROGRAM& prog, + map<Reveal::REVIL_UNIFORM, GLint>& uniforms, + const unsigned int& component) { + + + if(m_camTexMap.find(contextID)==m_camTexMap.end()) { + GLuint texID; + glGenTextures(1, &texID); + m_camTexMap[contextID]=texID; + m_upCamCounter[contextID]=0; + glGenTextures(1, &texID); + m_filtTexMap[contextID]=texID; + m_filtTexUpMap[contextID]=false; + glGenTextures(1, &texID); + m_markTexMap[contextID]=texID; + m_markTexUpMap[contextID]=false; + if(m_open) { + m_camTexUpMap[contextID]=false; + } + else { + m_camTexUpMap[contextID]=true; + } + } + + + /* + //captured background + if(m_backgroundActive) { + glActiveTexture(GL_TEXTURE1); + glUniform1i(uniforms[Reveal::CAMTEX2], 1); + glBindTexture(GL_TEXTURE_2D, m_camTexMap2[contextID]); + if(m_capturedBackground) { + glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, + m_width, m_height, 0, + GL_RGB, GL_UNSIGNED_BYTE, + m_backImg.data); + m_capturedBackground=false; + } + } + */ + + if(m_filtering) { + glActiveTexture(GL_TEXTURE1); + glUniform1i(uniforms[Reveal::CAMTEXFIL], 1); + glBindTexture(GL_TEXTURE_2D, m_filtTexMap[contextID]); + if(m_framesLock.try_lock()) { + if(m_filtTexUpMap[contextID]) { + glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); + if(m_open) { + if(m_filtDepthFrame!=NULL) { + glTexImage2D(GL_TEXTURE_2D, 0, GL_R16, + m_width, m_height, 0, + GL_RED, GL_UNSIGNED_SHORT, + m_filtDepthFrame); + } + } + m_filtTexUpMap[contextID]=false; + } + m_framesLock.unlock(); + } + } + glActiveTexture(GL_TEXTURE0); + glUniform1i(uniforms[Reveal::CAMTEX], 0); + glBindTexture(GL_TEXTURE_2D, m_camTexMap[contextID]); + if(m_camTexUpMap[contextID]) { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + if(m_open) { + if(!m_init) { + glTexImage2D(GL_TEXTURE_2D, 0, GL_R16, + m_width, m_height, 0, + GL_RED, GL_UNSIGNED_SHORT, + (unsigned char*)m_depthFrame.getData()); + m_init=true; + } + else { + glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, + m_width, m_height, + GL_RED, GL_UNSIGNED_SHORT, + (unsigned char*)m_depthFrame.getData()); + } + } + m_camTexUpMap[contextID]=false; + } + glActiveTexture(GL_TEXTURE2); + glUniform1i(uniforms[Reveal::MARKTEX], 2); + glBindTexture(GL_TEXTURE_2D, m_markTexMap[contextID]); + if(m_detectingMarkers) { + if(m_framesLock.try_lock()) { + if(m_markTexUpMap[contextID]) { + glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); + if(m_open) { + if(m_markersFrame!=NULL) { + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, + m_width, m_height, 0, + GL_RGB, GL_UNSIGNED_BYTE, + m_markersFrame); + } + } + m_markTexUpMap[contextID]=false; + } + + vector<float> mark; + vector<s_markerAndTime>::iterator it=m_sentDetectedMarkers.begin(); + for(; it != m_sentDetectedMarkers.end(); it++) { + mark.push_back(m_depthID+it->id); + mark.push_back(1); + /* + float depth = m_rgbDepthImg.at<cv::Vec3w> + (it->y, + it->x)[2]; + glm::vec4 pos((it->x/m_width-0.5) + *depth*tan(m_horizontalFOV/2.0)*2.0, + (0.5-it->y/m_height) + *depth*tan(m_verticalFOV/2.0)*2.0, + depth, 1.0); + pos = m_modelMat * pos; + pos[0]*=-1; + mark.push_back(pos[0]); + mark.push_back(pos[1]); + mark.push_back(pos[2]); + */ + + glm::vec3 pos(0, 0, 0); + for(int c=0; c<4; c++) { + float depth = m_rgbDepthImg.at<cv::Vec3w> + (it->corners[c].y, + it->corners[c].x)[2]; + glm::vec4 cpos((it->corners[c].x/m_width-0.5) + *depth*tan(m_horizontalFOV/2.0)*2.0, + (0.5-it->corners[c].y/m_height) + *depth*tan(m_verticalFOV/2.0)*2.0, + depth, 1.0); + cpos = m_modelMat * cpos; + cpos[0]*=-1; + for(int i=0; i<3; ++i) { + it->corners3D[c][i]=cpos[i]; + pos[i]+=cpos[i]/4.0; + } + } + + //compute position + mark.push_back(pos[0]); + mark.push_back(pos[1]); + mark.push_back(pos[2]); + + //compute rotation + glm::vec3 markUp = glm::normalize(it->corners3D[1] + - it->corners3D[2]); + glm::vec3 markR = glm::normalize(it->corners3D[1] + - it->corners3D[0]); + glm::vec3 markN = glm::normalize(glm::cross(markR, markUp)); + glm::mat4x4 markM( //r,up,n + markR[0], markR[1], markR[2], 0.0, //inverser + markUp[0], markUp[1], markUp[2],0.0, + markN[0], markN[1], markN[2], 0.0, + 0.0, 0.0, 0.0, 1.0 + ); + float rx, ry, rz; + glm::extractEulerAngleXYZ(markM, rx, ry, rz); + float tmprx = rx; + rx=-rz; + rz=-tmprx; + markM = glm::eulerAngleXYZ(rx,ry,rz); + + glm::quat markQ = glm::toQuat(markM); + /* + cout<<"Up:"<<markUp.x<<" "<<markUp.y<<" "<<markUp.z<<endl; + cout<<"R:"<<markR.x<<" "<<markR.y<<" "<<markR.z<<endl; + cout<<"N:"<<markN.x<<" "<<markN.y<<" "<<markN.z<<endl; + cout<<markQ[0]<<" "<<markQ[1]<<" "<<markQ[2]<<" "<<markQ[2]<<endl; + */ + mark.push_back(markQ[0]); + mark.push_back(markQ[1]); + mark.push_back(markQ[2]); + mark.push_back(markQ[3]); + m_attributesMap["marker_output"]->setFloats(mark); + mark.clear(); + } + m_sentDetectedMarkers.clear(); + it=m_sentRemovedMarkers.begin(); + for(; it!= m_sentRemovedMarkers.end(); it++) { + mark.push_back(m_depthID+it->id); + mark.push_back(0); + mark.push_back(0); + mark.push_back(0); + mark.push_back(0); + mark.push_back(0); + mark.push_back(0); + mark.push_back(0); + mark.push_back(0); + m_attributesMap["marker_output"]->setFloats(mark); + mark.clear(); + } + m_sentRemovedMarkers.clear(); + m_framesLock.unlock(); + } + } + glUniform1i(uniforms[Reveal::CAMFILTER], m_filtering); + glUniform1f(uniforms[Reveal::CAMCONTOURTHRESH], m_contourThreshold); + glUniform1f(uniforms[Reveal::CAMXRESO], m_width); + glUniform1f(uniforms[Reveal::CAMYRESO], m_height); + glUniform1f(uniforms[Reveal::CAMXZFACTOR], tan(m_horizontalFOV/2.0)*2.0); + glUniform1f(uniforms[Reveal::CAMYZFACTOR], tan(m_verticalFOV/2.0)*2.0); + glUniform1f(uniforms[Reveal::CAMMARKERS], m_fillingMarkers); + glUniform1i(uniforms[Reveal::BACKGROUND], m_backgroundActive); + DepthModule::draw(contextID, prog, uniforms, component); } void DepthCamModule::setSpace(SpaceModule* spa) { - DepthModule::setSpace(spa); - m_space->registerGeom(m_camGeom); + DepthModule::setSpace(spa); + m_space->registerGeom(m_camGeom); } void DepthCamModule::setMode(const std::string& mode) { - m_defaultWidth=m_depthModes[mode].width; - m_defaultHeight=m_depthModes[mode].height; - m_defaultFps=m_depthModes[mode].fps; - if(m_open) { - openDevice(m_deviceID); - } + m_defaultWidth=m_depthModes[mode].width; + m_defaultHeight=m_depthModes[mode].height; + m_defaultFps=m_depthModes[mode].fps; + if(m_open) { + openDevice(m_deviceID); + } } void DepthCamModule::openDevice(const std::string& devStr, bool calib) { - if(devStr.size()==0) { - return; - } - - if(m_open) { - closeDevice(); - } - - openni::Status rc; - - if(m_serialsMap.find(devStr)==m_serialsMap.end()) { - cout<<"Could not open OpenNI device "<<devStr<<endl; - return; - } - - rc = m_device.open(m_serialsMap[devStr].c_str()); - if(rc != openni::STATUS_OK) { - cout<<"Could not open OpenNI device "<<devStr<<endl; - return; - } - - openni::VideoMode depthVideoMode; - - //create streams - rc = m_depth.create(m_device, openni::SENSOR_DEPTH); - if(rc!=openni::STATUS_OK) { - cout<<"Could not create depth stream for device "<<devStr<<endl; - return; - } - - //list and store video modes for this device - m_depthModes.clear(); - m_modes.clear(); - const openni::Array<openni::VideoMode>& modes = m_depth.getSensorInfo() - .getSupportedVideoModes(); - for(int m=0; m<modes.getSize(); ++m) { - if(modes[m].getPixelFormat()==openni::PIXEL_FORMAT_DEPTH_1_MM) { - string name = to_string(modes[m].getResolutionX()) - +"x"+to_string(modes[m].getResolutionY()) - +"@"+to_string(modes[m].getFps()); - m_modes.push_back(name); - m_depthModes[name]=DepthMode(); - m_depthModes[name].width=modes[m].getResolutionX(); - m_depthModes[name].height=modes[m].getResolutionY(); - m_depthModes[name].fps=modes[m].getFps(); - } - } - - if(m_deviceID.compare("")==0) { //no already opened device - //open a default mode - int defaultMode = 0; - m_defaultWidth=m_depthModes[m_modes[defaultMode]].width; - m_defaultHeight=m_depthModes[m_modes[defaultMode]].height; - m_defaultFps=m_depthModes[m_modes[defaultMode]].fps; - m_attributesMap["mode"]->editStringValuesChoices().assign(1,m_modes); - m_attributesMap["mode"]->initStrings(vector<string>(1, - m_modes[defaultMode])); - Reveal::getInstance()->refreshModules(); - } - - if(calib) { //fixed reso if calibrating FIXME pick the first / highest one ? - m_width=640; - m_height=480; - m_fps=30; - } - else { - m_width=m_defaultWidth; - m_height=m_defaultHeight; - m_fps=m_defaultFps; - } - - //set depth stream size - depthVideoMode.setResolution(m_width, m_height); - depthVideoMode.setFps(m_fps); - depthVideoMode.setPixelFormat(openni::PIXEL_FORMAT_DEPTH_1_MM); - openni::Status rc1 = m_depth.setVideoMode(depthVideoMode); - if(rc1!=openni::STATUS_OK) { - cout<<"Could not set depth video mode"<<endl; - return; - } - - //refresh geom size - m_camGeom->setSize(m_width, m_height); - - //color stream - rc = m_color.create(m_device, openni::SENSOR_COLOR); - openni::VideoMode colorVideoMode; - if(rc==openni::STATUS_OK) { - colorVideoMode.setResolution(m_width, m_height); - colorVideoMode.setFps(m_fps); - colorVideoMode.setPixelFormat(openni::PIXEL_FORMAT_RGB888); - rc = m_color.setVideoMode(colorVideoMode); - if(rc!=openni::STATUS_OK) { - cout<<"Could not set color video mode"<<endl; - } - } - else { - m_color.stop(); - cout<<"Could not create color stream"<<endl; - /* - rc = m_color.create(m_device, openni::SENSOR_IR); - if(rc==openni::STATUS_OK) { - colorVideoMode.setResolution(m_width, m_height); - colorVideoMode.setFps(m_fps); - colorVideoMode.setPixelFormat(openni::PIXEL_FORMAT_RGB888); - rc = m_color.setVideoMode(colorVideoMode); - if(rc!=openni::STATUS_OK) { - cout<<"Could not set ir video mode"<<endl; - } - } - else { - cout<<"Could not create ir stream"<<endl; - return; - } - */ - } - - //start only depth for now - rc = m_depth.start(); - if(!m_depth.isValid()) { - cout<<"Depth Cam: No valid streams"<<endl; - return; - } - - m_horizontalFOV = m_depth.getHorizontalFieldOfView(); - m_verticalFOV = m_depth.getVerticalFieldOfView(); - cout<<m_horizontalFOV<<" "<<m_verticalFOV<<endl; - - m_depth.setMirroringEnabled(false); - m_color.setMirroringEnabled(false); - m_device.setImageRegistrationMode( - openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR); - m_streams = new openni::VideoStream*[2]; - m_streams[0] = &m_depth; - - m_gotColor=false; - m_gotDepth=false; - m_colorActivated=false; - - m_open=true; - m_init=false; - m_deviceID=devStr; - cout<<"Opened OpenNI device "<<m_deviceID - <<" of "<<m_width<<"x"<<m_height - <<" @ "<<m_fps<<"FPS"<<endl; + if(devStr.size()==0) { + return; + } + + if(m_open) { + closeDevice(); + } + + openni::Status rc; + + if(m_serialsMap.find(devStr)==m_serialsMap.end()) { + cout<<"Could not open OpenNI device "<<devStr<<endl; + return; + } + + rc = m_device.open(m_serialsMap[devStr].c_str()); + if(rc != openni::STATUS_OK) { + cout<<"Could not open OpenNI device "<<devStr<<endl; + return; + } + + openni::VideoMode depthVideoMode; + + //create streams + rc = m_depth.create(m_device, openni::SENSOR_DEPTH); + if(rc!=openni::STATUS_OK) { + cout<<"Could not create depth stream for device "<<devStr<<endl; + return; + } + + //list and store video modes for this device + m_depthModes.clear(); + m_modes.clear(); + const openni::Array<openni::VideoMode>& modes = m_depth.getSensorInfo() + .getSupportedVideoModes(); + for(int m=0; m<modes.getSize(); ++m) { + if(modes[m].getPixelFormat()==openni::PIXEL_FORMAT_DEPTH_1_MM) { + string name = to_string(modes[m].getResolutionX()) + +"x"+to_string(modes[m].getResolutionY()) + +"@"+to_string(modes[m].getFps()); + m_modes.push_back(name); + m_depthModes[name]=DepthMode(); + m_depthModes[name].width=modes[m].getResolutionX(); + m_depthModes[name].height=modes[m].getResolutionY(); + m_depthModes[name].fps=modes[m].getFps(); + } + } + + if(m_deviceID.compare("")==0) { //no already opened device + //open a default mode + int defaultMode = 0; + m_defaultWidth=m_depthModes[m_modes[defaultMode]].width; + m_defaultHeight=m_depthModes[m_modes[defaultMode]].height; + m_defaultFps=m_depthModes[m_modes[defaultMode]].fps; + m_attributesMap["mode"]->editStringValuesChoices().assign(1,m_modes); + m_attributesMap["mode"]->initStrings(vector<string>(1, + m_modes[defaultMode])); + Reveal::getInstance()->refreshModules(); + } + + if(calib) { //fixed reso if calibrating FIXME pick the first / highest one ? + m_width=640; + m_height=480; + m_fps=30; + } + else { + m_width=m_defaultWidth; + m_height=m_defaultHeight; + m_fps=m_defaultFps; + } + + //set depth stream size + depthVideoMode.setResolution(m_width, m_height); + depthVideoMode.setFps(m_fps); + depthVideoMode.setPixelFormat(openni::PIXEL_FORMAT_DEPTH_1_MM); + openni::Status rc1 = m_depth.setVideoMode(depthVideoMode); + if(rc1!=openni::STATUS_OK) { + cout<<"Could not set depth video mode"<<endl; + return; + } + + //refresh geom size + m_camGeom->setSize(m_width, m_height); + + //color stream + rc = m_color.create(m_device, openni::SENSOR_COLOR); + openni::VideoMode colorVideoMode; + if(rc==openni::STATUS_OK) { + colorVideoMode.setResolution(m_width, m_height); + colorVideoMode.setFps(m_fps); + colorVideoMode.setPixelFormat(openni::PIXEL_FORMAT_RGB888); + rc = m_color.setVideoMode(colorVideoMode); + if(rc!=openni::STATUS_OK) { + cout<<"Could not set color video mode"<<endl; + } + } + else { + m_color.stop(); + cout<<"Could not create color stream"<<endl; + /* + rc = m_color.create(m_device, openni::SENSOR_IR); + if(rc==openni::STATUS_OK) { + colorVideoMode.setResolution(m_width, m_height); + colorVideoMode.setFps(m_fps); + colorVideoMode.setPixelFormat(openni::PIXEL_FORMAT_RGB888); + rc = m_color.setVideoMode(colorVideoMode); + if(rc!=openni::STATUS_OK) { + cout<<"Could not set ir video mode"<<endl; + } + } + else { + cout<<"Could not create ir stream"<<endl; + return; + } + */ + } + + //start only depth for now + rc = m_depth.start(); + if(!m_depth.isValid()) { + cout<<"Depth Cam: No valid streams"<<endl; + return; + } + + m_horizontalFOV = m_depth.getHorizontalFieldOfView(); + m_verticalFOV = m_depth.getVerticalFieldOfView(); + + m_depth.setMirroringEnabled(false); + m_color.setMirroringEnabled(false); + m_device.setImageRegistrationMode( + openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR); + m_streams = new openni::VideoStream*[2]; + m_streams[0] = &m_depth; + + m_gotColor=false; + m_gotDepth=false; + m_colorActivated=false; + + m_open=true; + m_init=false; + m_deviceID=devStr; + cout<<"Opened OpenNI device "<<m_deviceID + <<" of "<<m_width<<"x"<<m_height + <<" @ "<<m_fps<<"FPS"<<endl; } void DepthCamModule::closeDevice(const bool& clear) { - if(m_open) { - m_open=false; - m_depth.destroy(); - m_color.destroy(); - delete [] m_streams; - m_device.close(); - - if(clear) { - vector<string> devStr(1, ""); - m_attributesMap["open_device"]->initStrings(devStr); - m_attributesMap["mode"]->initStrings(devStr); - m_deviceID=""; - } - } + if(m_open) { + m_open=false; + m_depth.destroy(); + m_color.destroy(); + delete [] m_streams; + m_device.close(); + + if(clear) { + vector<string> devStr(1, ""); + m_attributesMap["open_device"]->initStrings(devStr); + m_attributesMap["mode"]->initStrings(devStr); + m_deviceID=""; + } + } } void DepthCamModule::setModuleName(const std::string& val) { - Module::setModuleName(val); - if(m_space) { - m_space->refreshDepthCamList(); - } + Module::setModuleName(val); + if(m_space) { + m_space->refreshDepthCamList(); + } } void DepthCamModule::activateColor() { - if(m_open) { - openni::Status rc = m_color.start(); - if(!m_color.isValid()) { - cout<<"Color Cam: No valid streams"<<endl; - return; - } - m_colorActivated=true; - m_color.setMirroringEnabled(false); - m_streams[1] = &m_color; - m_device.setImageRegistrationMode( - openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR); - m_device.setDepthColorSyncEnabled(false); - } + if(m_open) { + openni::Status rc = m_color.start(); + if(!m_color.isValid()) { + cout<<"Color Cam: No valid streams"<<endl; + return; + } + m_colorActivated=true; + m_color.setMirroringEnabled(false); + m_streams[1] = &m_color; + m_device.setImageRegistrationMode( + openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR); + m_device.setDepthColorSyncEnabled(false); + } } void DepthCamModule::deactivateColor() { - if(m_open) { - m_color.stop(); - m_colorActivated=false; - } + if(m_open) { + m_color.stop(); + m_colorActivated=false; + } } bool DepthCamModule::getFrames() { - bool res=false; - - openni::VideoStream *tab[1]; - if(m_open){ - tab[0] = &m_depth; - int changedIndex=-1; - openni::OpenNI::waitForAnyStream(tab,1,&changedIndex,0); - if(changedIndex == 0) { - m_depth.readFrame(&m_depthFrame); - if(m_framesLock.try_lock()) { - m_gotDepth=true; - - createMatDepth(); - m_framesLock.unlock(); - } - if(m_capturingBackground>0) { - captureBackground(); - } - } - if(m_colorActivated){ - tab[0] = &m_color; - openni::OpenNI::waitForAnyStream(tab,1,&changedIndex,0); - if(changedIndex == 0) { - m_color.readFrame(&m_colorFrame); - if(m_framesLock.try_lock()) { - m_gotColor=true; - createMatColor(); - m_framesLock.unlock(); - } - } - } - res = m_gotDepth && (m_gotColor || !m_colorActivated); - } - return res; + bool res=false; + + openni::VideoStream *tab[1]; + if(m_open){ + tab[0] = &m_depth; + int changedIndex=-1; + openni::OpenNI::waitForAnyStream(tab,1,&changedIndex,0); + if(changedIndex == 0) { + m_depth.readFrame(&m_depthFrame); + if(m_framesLock.try_lock()) { + m_gotDepth=true; + + createMatDepth(); + m_framesLock.unlock(); + } + if(m_capturingBackground>0) { + captureBackground(); + } + } + if(m_colorActivated){ + tab[0] = &m_color; + openni::OpenNI::waitForAnyStream(tab,1,&changedIndex,0); + if(changedIndex == 0) { + m_color.readFrame(&m_colorFrame); + if(m_framesLock.try_lock()) { + m_gotColor=true; + createMatColor(); + m_framesLock.unlock(); + } + } + } + res = m_gotDepth && (m_gotColor || !m_colorActivated); + } + return res; } void DepthCamModule::attachProj(ProjectorModule* proj) { - int i=0; - while(m_attachedProjs.find(i)!=m_attachedProjs.end()) { - ++i; - } - m_attachedProjs[i]=proj; - proj->setProjID(i); - updateModelMatrix(); + int i=0; + while(m_attachedProjs.find(i)!=m_attachedProjs.end()) { + ++i; + } + m_attachedProjs[i]=proj; + proj->setProjID(i); + updateModelMatrix(); } void DepthCamModule::detachProj(ProjectorModule* proj) { - m_attachedProjs.erase(proj->getProjID()); + m_attachedProjs.erase(proj->getProjID()); } void DepthCamModule::setCaptureBackground(const bool& cap) { - if(cap) { - m_capturingBackground=50; - m_capturedBackground=false; - m_backgroundActive=true; - cv::Size imageSize(m_width, m_height); - m_backImg = cv::Mat::zeros(imageSize, CV_16UC1); - } - else { - m_capturingBackground=0; - m_capturedBackground=false; - m_backgroundActive=false; - } + if(cap) { + m_capturingBackground=50; + m_capturedBackground=false; + m_backgroundActive=true; + cv::Size imageSize(m_width, m_height); + m_backImg = cv::Mat::zeros(imageSize, CV_16UC1); + } + else { + m_capturingBackground=0; + m_capturedBackground=false; + m_backgroundActive=false; + } } void DepthCamModule::captureBackground() { - cv::Size imageSize(m_width, m_height); - for(int j=0; j<imageSize.height; ++j) { - for(int i=0; i<imageSize.width; ++i) { - ushort& dPix = m_backImg.at<ushort>(j,i); - int coord=j*imageSize.width+i; - openni::DepthPixel depthPix = - ((openni::DepthPixel*) m_depthFrame.getData())[coord]; - if(depthPix>dPix) { - dPix=depthPix; - } - } - } - m_capturingBackground--; - if(m_capturingBackground<=0) { - m_capturedBackground=true; - } + cv::Size imageSize(m_width, m_height); + for(int j=0; j<imageSize.height; ++j) { + for(int i=0; i<imageSize.width; ++i) { + ushort& dPix = m_backImg.at<ushort>(j,i); + int coord=j*imageSize.width+i; + openni::DepthPixel depthPix = + ((openni::DepthPixel*) m_depthFrame.getData())[coord]; + if(depthPix>dPix) { + dPix=depthPix; + } + } + } + m_capturingBackground--; + if(m_capturingBackground<=0) { + m_capturedBackground=true; + } } void DepthCamModule::setMarkersDetection(const string& val) { - if(m_open) { - m_framesLock.lock(); - m_framesLock.unlock(); - - if(val.compare("none")!=0) { - m_detectingMarkers=true; - activateColor(); - if(val.compare("aruco")==0) { - m_currentTracker = m_arucoTracker; - } - else { - m_currentTracker = m_imageTracker; - } - m_currentTracker->init(); - } - else { - m_detectingMarkers=false; - deactivateColor(); - } - } + if(m_open) { + m_framesLock.lock(); + m_framesLock.unlock(); + + if(val.compare("none")!=0) { + m_detectingMarkers=true; + activateColor(); + if(val.compare("aruco")==0) { + m_currentTracker = m_arucoTracker; + } + else { + m_currentTracker = m_imageTracker; + } + m_currentTracker->init(); + } + else { + m_detectingMarkers=false; + deactivateColor(); + } + } } void DepthCamModule::setImageMarkersFolder(const std::string& fol) { - m_imageTracker->setImgFolder(fol); - m_imageTracker->init(); + m_imageTracker->setImgFolder(fol); + m_imageTracker->init(); } void DepthCamModule::setMarkersFillMethod(const std::string& val) { - if(!val.compare("none")) { - m_fillingMarkers=false; - } - else { - m_fillingMarkers=true; - if(!val.compare("voronoi")) { - m_notVoronoi=false; - } - else { - m_notVoronoi=true; - } - } + if(!val.compare("none")) { + m_fillingMarkers=false; + } + else { + m_fillingMarkers=true; + if(!val.compare("voronoi")) { + m_notVoronoi=false; + } + else { + m_notVoronoi=true; + } + } } void DepthCamModule::update(const int& timeDiffMs) { - if(getFrames()) { - map<int, bool>::iterator itUp = m_camTexUpMap.begin(); - for(; itUp!=m_camTexUpMap.end(); ++itUp) { - itUp->second = true; - } - } + if(getFrames()) { + map<int, bool>::iterator itUp = m_camTexUpMap.begin(); + for(; itUp!=m_camTexUpMap.end(); ++itUp) { + itUp->second = true; + } + } } void DepthCamModule::gotAttributeListenersUpdate(Attribute* att) { - /* - int nbLis = m_attributesMap["cursor"]->editListeners().size(); - if(nbLis>0) { - // m_outCursUniform->set(float(1)); - Reveal::getInstance()->registerOutputDepthCam(this); - } - else { - // m_outCursUniform->set(float(0)); - Reveal::getInstance()->unregisterOutputDepthCam(this); - } - */ + /* + int nbLis = m_attributesMap["cursor"]->editListeners().size(); + if(nbLis>0) { + // m_outCursUniform->set(float(1)); + Reveal::getInstance()->registerOutputDepthCam(this); + } + else { + // m_outCursUniform->set(float(0)); + Reveal::getInstance()->unregisterOutputDepthCam(this); + } + */ } void DepthCamModule::processCursor(const std::vector<float>& pos) { - /* - //filter cursor position - unsigned int v=0; - for(unsigned int mv=0; mv<m_filterCursorValues.size(); ++mv) { - m_filterCursorSums[mv]-=m_filterCursorValues[mv].front(); - m_filterCursorValues[mv].pop_front(); - m_filterCursorValues[mv].push_back(pos[v]); - m_filterCursorSums[mv]+=m_filterCursorValues[mv].back(); - m_cursorValues[mv]=m_filterCursorSums[mv]/m_filterCursorSize; - if(v+1<pos.size()) { - ++v; - } - } - - //set attribute - m_attributesMap["cursor"]->setFloatsIfChanged(m_cursorValues); - */ + /* + //filter cursor position + unsigned int v=0; + for(unsigned int mv=0; mv<m_filterCursorValues.size(); ++mv) { + m_filterCursorSums[mv]-=m_filterCursorValues[mv].front(); + m_filterCursorValues[mv].pop_front(); + m_filterCursorValues[mv].push_back(pos[v]); + m_filterCursorSums[mv]+=m_filterCursorValues[mv].back(); + m_cursorValues[mv]=m_filterCursorSums[mv]/m_filterCursorSize; + if(v+1<pos.size()) { + ++v; + } + } + + //set attribute + m_attributesMap["cursor"]->setFloatsIfChanged(m_cursorValues); + */ } void DepthCamModule::refreshProjList(const vector<ProjectorModule*> projs) { - vector<string> labels; - labels.push_back("none"); - vector<ProjectorModule*>::const_iterator itProj=projs.begin(); - for(; itProj!=projs.end(); ++itProj) { - labels.push_back((*itProj)->getName()); - } + vector<string> labels; + labels.push_back("none"); + vector<ProjectorModule*>::const_iterator itProj=projs.begin(); + for(; itProj!=projs.end(); ++itProj) { + labels.push_back((*itProj)->getName()); + } } //-----------------------Calibration------------------------------------ static void cbMarkerPosY(int y, void* mod) { - static_cast<DepthCamModule*>(mod)->setCalibMarkerPosY(y); + static_cast<DepthCamModule*>(mod)->setCalibMarkerPosY(y); } void DepthCamModule::setCalibMarkerPosY(const int& y) { - m_markerPosY=y; + m_markerPosY=y; } static void cbMarkerRotX(int x, void* mod) { - static_cast<DepthCamModule*>(mod)->setCalibMarkerRotX(x); + static_cast<DepthCamModule*>(mod)->setCalibMarkerRotX(x); } void DepthCamModule::setCalibMarkerRotX(const int& x) { - m_markerRotX=x; + m_markerRotX=x; } static void cbMarkerRotY(int y, void* mod) { - static_cast<DepthCamModule*>(mod)->setCalibMarkerRotY(y); + static_cast<DepthCamModule*>(mod)->setCalibMarkerRotY(y); } void DepthCamModule::setCalibMarkerRotY(const int& y) { - m_markerRotY=y; + m_markerRotY=y; } static void cbCalib(int c, void* mod) { - DepthCamModule* cam = static_cast<DepthCamModule*>(mod); - cam->setCalib(c); + DepthCamModule* cam = static_cast<DepthCamModule*>(mod); + cam->setCalib(c); } void DepthCamModule::setCalib(const int& c) { - m_calibrating=c; - if(c==0) { - m_calibrationStopped=true; - } + m_calibrating=c; + if(c==0) { + m_calibrationStopped=true; + } } void DepthCamModule::calibrate() { - if(!m_open) { - return; - } - m_calibrating=0; - m_calibrationStopped=false; - - namedWindow("DepthCamRGB"); - namedWindow("Controls"); - createTrackbar("MarkerPosY", "Controls", - &m_markerPosY, 3000, cbMarkerPosY, this); - createTrackbar("MarkerRotX", "Controls", - &m_markerRotX, 360, cbMarkerRotX, this); - createTrackbar("MarkerRotY", "Controls", - &m_markerRotY, 360, cbMarkerRotY, this); - createTrackbar("Calibrate (1:Start, 0:Cancel)", "Controls", - &m_calibrating, 1, cbCalib, this); - - - Size imageSize(m_width, m_height); - Mat kinectRgbImg = Mat::zeros(imageSize, CV_8UC3); - Mat kinectDepthImg = cv::Mat::zeros(imageSize, CV_32FC1); - - activateColor(); - - while(!m_calibrationStopped ) { - for(int j=0; j<imageSize.height; ++j) { - for(int i=0; i<imageSize.width; ++i) { - kinectDepthImg.at<float>(j,i)=0; - } - } - int nbFrames=10; - int framesCnt=0; - while(framesCnt<nbFrames) { - if(getFrames()) { - //copy to opencv mat - for(int j=0; j<imageSize.height; ++j) { - for(int i=0; i<imageSize.width; ++i) { - cv::Vec3b& pix = kinectRgbImg.at<cv::Vec3b>(j,i); - float& dPix = kinectDepthImg.at<float>(j,i); - int coord=j*imageSize.width+i; - openni::RGB888Pixel colorPix = - ((openni::RGB888Pixel*)m_colorFrame.getData())[coord]; - pix[0]=colorPix.r; - pix[1]=colorPix.g; - pix[2]=colorPix.b; - - openni::DepthPixel depthPix = - ((openni::DepthPixel*)m_depthFrame.getData())[coord]; - dPix += float(depthPix)/float(nbFrames);; - } - } - framesCnt++; - } - } - - //find marker - vector<vector<cv::Point2f> > corners; - vector<int> ids; - cv::aruco::detectMarkers(kinectRgbImg, - cv::aruco::getPredefinedDictionary(cv::aruco::DICT_ARUCO_ORIGINAL), - corners, - ids); - cv::aruco::drawDetectedMarkers(kinectRgbImg, corners); - imshow("DepthCamRGB", kinectRgbImg); - waitKey(10); - - glm::vec3 cornersD[4]; - glm::vec3 center(0.0, 0.0, 0.0); - bool valid=false; - for(unsigned int m=0; m<ids.size(); ++m) { - if(ids[m]>=0) { - valid=true; - //retrieve cam space coordinates - for(int c=0; c<4; c++) { - - glm::vec2 dCoord = glm::vec2(corners[m][c].x, - corners[m][c].y); - float& dPix = kinectDepthImg.at<float>(corners[m][c].y, - corners[m][c].x); - if(dPix<500 || dPix>7000) { - valid=false; - } - openni::CoordinateConverter::convertDepthToWorld( - m_depth, - int(dCoord.x), - int(dCoord.y), - int(dPix), - &cornersD[c].x, - &cornersD[c].y, - &cornersD[c].z); - cornersD[c].z*=1.0; - center.x+=cornersD[c].x/4.0; - center.y+=cornersD[c].y/4.0; - center.z+=cornersD[c].z/4.0; - } - } - } - - if(valid && m_calibrating) { - cout<<"center at "<<center.x<<" " - <<center.y<<" " - <<center.z<<endl; - for(int c=0;c<4;++c) { - cout<<"corner "<<c<<" "<<cornersD[c].x<<" " - <<cornersD[c].y<<" "<<cornersD[c].z<<endl; - } - - //retrieve world marker rotation - double rotX = double(m_markerRotX)/180.0*M_PI; - double rotY = double(m_markerRotY)/180.0*M_PI; - glm::mat4 markRotMat = glm::eulerAngleYXZ(rotY, rotX, 0.0); - - //cam space marker center translation - glm::mat4 markMat = glm::translate(glm::mat4(1.0), center); - - //cam space marker rotation - glm::vec3 markUp = glm::normalize(cornersD[1] - cornersD[2]); - glm::vec3 markR = glm::normalize(cornersD[1] - cornersD[0]); - glm::vec3 markN = glm::normalize(glm::cross(markR, markUp)); - cout<<"Up:"<<markUp.x<<" "<<markUp.y<<" "<<markUp.z<<endl; - cout<<"R:"<<markR.x<<" "<<markR.y<<" "<<markR.z<<endl; - cout<<"N:"<<markN.x<<" "<<markN.y<<" "<<markN.z<<endl; - glm::mat4x4 markM( //r,up,n - markR[0], markR[1], markR[2], 0.0, - markUp[0], markUp[1], markUp[2],0.0, - markN[0], markN[1], markN[2], 0.0, - 0.0, 0.0, 0.0, 1.0 - ); - float rx, ry, rz; - glm::extractEulerAngleXYZ(markM, rx, ry, rz); - glm::mat4 rotMat = glm::eulerAngleXYZ(rx,ry,rz); - - //get transform of cam in marker space - glm::mat4 camMat = inverse(markMat*rotMat); - - //apply physical marker rotation to retrieve world space transfo - camMat = inverse(markRotMat)*camMat; - - //assign model matrix and update attached projectors - m_parentMat = glm::mat4(1.0); - setModelMat(camMat); - updateModelMatrix(); - - m_calibrationStopped=true; - } - usleep(10000); - } - cout<<"Camera Calibration Done"<<endl; - - deactivateColor(); - destroyWindow("DepthCamRGB"); - destroyWindow("Controls"); - waitKey(10); + if(!m_open) { + return; + } + m_calibrating=0; + m_calibrationStopped=false; + + namedWindow("DepthCamRGB"); + namedWindow("Controls"); + createTrackbar("MarkerPosY", "Controls", + &m_markerPosY, 3000, cbMarkerPosY, this); + createTrackbar("MarkerRotX", "Controls", + &m_markerRotX, 360, cbMarkerRotX, this); + createTrackbar("MarkerRotY", "Controls", + &m_markerRotY, 360, cbMarkerRotY, this); + createTrackbar("Calibrate (1:Start, 0:Cancel)", "Controls", + &m_calibrating, 1, cbCalib, this); + + + Size imageSize(m_width, m_height); + Mat kinectRgbImg = Mat::zeros(imageSize, CV_8UC3); + Mat kinectDepthImg = cv::Mat::zeros(imageSize, CV_32FC1); + + activateColor(); + + while(!m_calibrationStopped ) { + for(int j=0; j<imageSize.height; ++j) { + for(int i=0; i<imageSize.width; ++i) { + kinectDepthImg.at<float>(j,i)=0; + } + } + int nbFrames=10; + int framesCnt=0; + while(framesCnt<nbFrames) { + if(getFrames()) { + //copy to opencv mat + for(int j=0; j<imageSize.height; ++j) { + for(int i=0; i<imageSize.width; ++i) { + cv::Vec3b& pix = kinectRgbImg.at<cv::Vec3b>(j,i); + float& dPix = kinectDepthImg.at<float>(j,i); + int coord=j*imageSize.width+i; + openni::RGB888Pixel colorPix = + ((openni::RGB888Pixel*)m_colorFrame.getData())[coord]; + pix[0]=colorPix.r; + pix[1]=colorPix.g; + pix[2]=colorPix.b; + + openni::DepthPixel depthPix = + ((openni::DepthPixel*)m_depthFrame.getData())[coord]; + dPix += float(depthPix)/float(nbFrames);; + } + } + framesCnt++; + } + } + + //find marker + vector<vector<cv::Point2f> > corners; + vector<int> ids; + cv::aruco::detectMarkers(kinectRgbImg, + cv::aruco::getPredefinedDictionary(cv::aruco::DICT_ARUCO_ORIGINAL), + corners, + ids); + cv::aruco::drawDetectedMarkers(kinectRgbImg, corners); + imshow("DepthCamRGB", kinectRgbImg); + waitKey(10); + + glm::vec3 cornersD[4]; + glm::vec3 center(0.0, 0.0, 0.0); + bool valid=false; + for(unsigned int m=0; m<ids.size(); ++m) { + if(ids[m]>=0) { + valid=true; + //retrieve cam space coordinates + for(int c=0; c<4; c++) { + + glm::vec2 dCoord = glm::vec2(corners[m][c].x, + corners[m][c].y); + float& dPix = kinectDepthImg.at<float>(corners[m][c].y, + corners[m][c].x); + if(dPix<500 || dPix>7000) { + valid=false; + } + openni::CoordinateConverter::convertDepthToWorld( + m_depth, + int(dCoord.x), + int(dCoord.y), + int(dPix), + &cornersD[c].x, + &cornersD[c].y, + &cornersD[c].z); + cornersD[c].z*=1.0; + center.x+=cornersD[c].x/4.0; + center.y+=cornersD[c].y/4.0; + center.z+=cornersD[c].z/4.0; + } + } + } + + if(valid && m_calibrating) { + cout<<"center at "<<center.x<<" " + <<center.y<<" " + <<center.z<<endl; + for(int c=0;c<4;++c) { + cout<<"corner "<<c<<" "<<cornersD[c].x<<" " + <<cornersD[c].y<<" "<<cornersD[c].z<<endl; + } + + //retrieve world marker rotation + double rotX = double(m_markerRotX)/180.0*M_PI; + double rotY = double(m_markerRotY)/180.0*M_PI; + glm::mat4 markRotMat = glm::eulerAngleYXZ(rotY, rotX, 0.0); + + //cam space marker center translation + glm::mat4 markMat = glm::translate(glm::mat4(1.0), center); + + //cam space marker rotation + glm::vec3 markUp = glm::normalize(cornersD[1] - cornersD[2]); + glm::vec3 markR = glm::normalize(cornersD[1] - cornersD[0]); + glm::vec3 markN = glm::normalize(glm::cross(markR, markUp)); + cout<<"Up:"<<markUp.x<<" "<<markUp.y<<" "<<markUp.z<<endl; + cout<<"R:"<<markR.x<<" "<<markR.y<<" "<<markR.z<<endl; + cout<<"N:"<<markN.x<<" "<<markN.y<<" "<<markN.z<<endl; + glm::mat4x4 markM( //r,up,n + markR[0], markR[1], markR[2], 0.0, + markUp[0], markUp[1], markUp[2],0.0, + markN[0], markN[1], markN[2], 0.0, + 0.0, 0.0, 0.0, 1.0 + ); + float rx, ry, rz; + glm::extractEulerAngleXYZ(markM, rx, ry, rz); + glm::mat4 rotMat = glm::eulerAngleXYZ(rx,ry,rz); + + //get transform of cam in marker space + glm::mat4 camMat = inverse(markMat*rotMat); + + //apply physical marker rotation to retrieve world space transfo + camMat = inverse(markRotMat)*camMat; + + //assign model matrix and update attached projectors + m_parentMat = glm::mat4(1.0); + setModelMat(camMat); + updateModelMatrix(); + + m_calibrationStopped=true; + } + usleep(10000); + } + cout<<"Camera Calibration Done"<<endl; + + deactivateColor(); + destroyWindow("DepthCamRGB"); + destroyWindow("Controls"); + waitKey(10); } //---------------------Frames process------------------------------- void DepthCamModule::processFrames() { - bool done=false; - while(!done){ - if(m_open) { - bool gotFrame = false; - - m_framesLock.lock(); - - //update filtered frame texture - if(m_gotDepth) { - if(m_filtering) { - filterDepthFrame(); - } - - if(m_detectingMarkers) { - if(m_gotColor && m_gotDepth){ - gotFrame = createMatDepthAndColor(); - m_gotColor = m_gotDepth = false; - } - if(gotFrame) { - detectMarkers(); - } - } - else { - m_gotDepth=false; - } - m_gotDepth=false; - } - m_framesLock.unlock(); - - } - usleep(50000); - } + bool done=false; + while(!done){ + if(m_open) { + bool gotFrame = false; + + m_framesLock.lock(); + + //update filtered frame texture + if(m_gotDepth) { + if(m_filtering) { + filterDepthFrame(); + } + + if(m_detectingMarkers) { + if(m_gotColor && m_gotDepth){ + gotFrame = createMatDepthAndColor(); + m_gotColor = m_gotDepth = false; + } + if(gotFrame) { + detectMarkers(); + } + } + else { + m_gotDepth=false; + } + m_gotDepth=false; + } + m_framesLock.unlock(); + + } + usleep(50000); + } } bool DepthCamModule::createMatDepth(){ - if(m_width!=m_procSize.width) { - m_procSize.width=m_width; - m_procSize.height=m_height; - } + if(m_width!=m_procSize.width) { + m_procSize.width=m_width; + m_procSize.height=m_height; + } - m_depthImg = cv::Mat::zeros(m_procSize, CV_16UC1); + m_depthImg = cv::Mat::zeros(m_procSize, CV_16UC1); - for(int j=0; j<m_procSize.height; ++j) { - for(int i=0; i<m_procSize.width; ++i) { - ushort& dPix = m_depthImg.at<ushort>(j,i); + for(int j=0; j<m_procSize.height; ++j) { + for(int i=0; i<m_procSize.width; ++i) { + ushort& dPix = m_depthImg.at<ushort>(j,i); - int coord=j*m_procSize.width+i; + int coord=j*m_procSize.width+i; - openni::DepthPixel depthPix = - ((openni::DepthPixel*) m_depthFrame.getData())[coord]; + openni::DepthPixel depthPix = + ((openni::DepthPixel*) m_depthFrame.getData())[coord]; - dPix = depthPix; - } - } + dPix = depthPix; + } + } - return true; + return true; } bool DepthCamModule::createMatColor(){ - if(m_width!=m_procSize.width) { - m_procSize.width=m_width; - m_procSize.height=m_height; - } + if(m_width!=m_procSize.width) { + m_procSize.width=m_width; + m_procSize.height=m_height; + } - m_rgbImg = cv::Mat::zeros(m_procSize, CV_8UC3); + m_rgbImg = cv::Mat::zeros(m_procSize, CV_8UC3); - for(int j=0; j<m_procSize.height; ++j) { - for(int i=0; i<m_procSize.width; ++i) { - cv::Vec3b& cPix = m_rgbImg.at<cv::Vec3b>(j,i); + for(int j=0; j<m_procSize.height; ++j) { + for(int i=0; i<m_procSize.width; ++i) { + cv::Vec3b& cPix = m_rgbImg.at<cv::Vec3b>(j,i); - int coord=j*m_procSize.width+i; + int coord=j*m_procSize.width+i; - openni::RGB888Pixel colorPix = - ((openni::RGB888Pixel*) m_colorFrame.getData())[coord]; + openni::RGB888Pixel colorPix = + ((openni::RGB888Pixel*) m_colorFrame.getData())[coord]; - cPix[0]=colorPix.b; - cPix[1]=colorPix.g; - cPix[2]=colorPix.r; - } - } + cPix[0]=colorPix.b; + cPix[1]=colorPix.g; + cPix[2]=colorPix.r; + } + } - return true; + return true; } bool DepthCamModule::createMatDepthAndColor(){ - m_resImg = cv::Mat::zeros(m_procSize, CV_8UC3); - m_rgbDepthImg = cv::Mat::zeros(m_procSize, CV_16UC3); - - for(int j=0; j<m_procSize.height; ++j) { - for(int i=0; i<m_procSize.width; ++i) { - cv::Vec3b cPix = m_rgbImg.at<cv::Vec3b>(j,i); - ushort dPix = m_depthImg.at<ushort>(j,i); - cv::Vec3b &gl_pix = m_resImg.at<cv::Vec3b>(j,i); - cv::Vec3w &color_depth_pix = m_rgbDepthImg.at<cv::Vec3w>(j,i); - - color_depth_pix[0] = cPix[0]*255; - color_depth_pix[1] = cPix[1]*255; - color_depth_pix[2] = dPix; - - gl_pix[0] = 0; - gl_pix[1] = 0; - gl_pix[2] = 0; - } - } - - m_finalCombin = cv::Mat::zeros(m_procSize, CV_8UC3); - m_rgbDepthImg.convertTo(m_finalCombin, CV_8U, 255.0 / (5000)); - - return true; + m_resImg = cv::Mat::zeros(m_procSize, CV_8UC3); + m_rgbDepthImg = cv::Mat::zeros(m_procSize, CV_16UC3); + + for(int j=0; j<m_procSize.height; ++j) { + for(int i=0; i<m_procSize.width; ++i) { + cv::Vec3b cPix = m_rgbImg.at<cv::Vec3b>(j,i); + ushort dPix = m_depthImg.at<ushort>(j,i); + cv::Vec3b &gl_pix = m_resImg.at<cv::Vec3b>(j,i); + cv::Vec3w &color_depth_pix = m_rgbDepthImg.at<cv::Vec3w>(j,i); + + color_depth_pix[0] = cPix[0]*255; + color_depth_pix[1] = cPix[1]*255; + color_depth_pix[2] = dPix; + + gl_pix[0] = 0; + gl_pix[1] = 0; + gl_pix[2] = 0; + } + } + + m_finalCombin = cv::Mat::zeros(m_procSize, CV_8UC3); + m_rgbDepthImg.convertTo(m_finalCombin, CV_8U, 255.0 / (5000)); + + return true; } //----------------------Filtering----------------------------------- void DepthCamModule::setFilter(const std::string& fil) { - m_filter=fil; - if(m_filter.compare("none")==0) { - m_filtering=false; - } - else { - m_filtering=true; - } + m_filter=fil; + if(m_filter.compare("none")==0) { + m_filtering=false; + } + else { + m_filtering=true; + } } void DepthCamModule::setFilterSize(int siz) { - m_framesLock.lock(); - if(siz<m_maxFilterSize && siz>=0) { - m_filterSize=siz; - for(int i=0; i<m_procSize.width*m_procSize.height; ++i) { - for(int f=0; f<m_filterSize; ++f) { - m_prevDepthFrames[f][i]=0; - } - m_sumDepthFrame[i]=0; - } - } - m_framesLock.unlock(); + m_framesLock.lock(); + if(siz<m_maxFilterSize && siz>=0) { + m_filterSize=siz; + for(int i=0; i<m_procSize.width*m_procSize.height; ++i) { + for(int f=0; f<m_filterSize; ++f) { + m_prevDepthFrames[f][i]=0; + } + m_sumDepthFrame[i]=0; + } + } + m_framesLock.unlock(); } void DepthCamModule::setFilterSpat(int spat) { - m_framesLock.lock(); - m_filterSpat=spat; - m_framesLock.unlock(); + m_framesLock.lock(); + m_filterSpat=spat; + m_framesLock.unlock(); } void DepthCamModule::setFilterMCO(float mco) { - m_framesLock.lock(); - m_oefMinCutoff=mco; - m_framesLock.unlock(); + m_framesLock.lock(); + m_oefMinCutoff=mco; + m_framesLock.unlock(); } void DepthCamModule::setFilterBeta(float beta) { - m_framesLock.lock(); - m_oefBeta=beta; - m_framesLock.unlock(); + m_framesLock.lock(); + m_oefBeta=beta; + m_framesLock.unlock(); } void DepthCamModule::filterDepthFrame() { - if(m_filter.compare("lowpass")==0) { - for(int j=0; j<m_procSize.height; ++j) { - for(int i=0; i<m_procSize.width; ++i) { - ushort dPix = m_depthImg.at<ushort>(j,i); - int coord = j*m_procSize.width+i; - m_sumDepthFrame[coord]-= - int(m_prevDepthFrames[m_filterIndex][coord]); - m_prevDepthFrames[m_filterIndex][coord]=(unsigned short)dPix; - m_sumDepthFrame[coord]+= - int(m_prevDepthFrames[m_filterIndex][coord]); - m_filtDepthFrame[coord] = float(m_sumDepthFrame[coord]) - / float(m_filterSize); - } - } - m_filterIndex=(m_filterIndex+1)%m_filterSize; - } - else { - //1€ filter - for(int j=0; j<m_procSize.height; ++j) { - for(int i=0; i<m_procSize.width; ++i) { - ushort dPix = m_depthImg.at<ushort>(j,i); - //spatial filter - if(m_filter.compare("one_euro_spatial")==0) { - float sum=0, count=0; - for(int dy=max<int>(j-m_filterSpat,0); - dy<=min<int>(j+m_filterSpat,m_procSize.height-1); - dy++) { - for(int dx=max<int>(i-m_filterSpat,0); - dx<=min<int>(i+m_filterSpat,m_procSize.width-1); - dx++) { - ushort aPix = m_depthImg.at<ushort>(dy,dx); - if(abs(aPix-dPix)<m_contourThreshold) { - sum+=aPix; - count+=1; - } - } - } - dPix = sum/count; - } - - int coord = j*m_procSize.width+i; - float dx=0; - if(m_oefInit) { - dx=(float(dPix)-m_oefXFrame[coord])*m_oefRate; - } - float dalpha = 1.0 / (1.0 - + (1.0/(2*M_PI*m_oefDCutoff)) - / (1.0/m_oefRate)); - float edx = dx*dalpha - + (1-dalpha)*(!m_oefInit?dx:m_oefDXFrame[coord]); - m_oefDXFrame[coord]=edx; - float cutoff = m_oefMinCutoff + m_oefBeta*fabs(edx); - float alpha = 1.0 / (1.0 - + (1.0/(2*M_PI*cutoff)) - / (1.0/m_oefRate)); - - m_oefXFrame[coord] = float(dPix)*alpha - + (1-alpha)*(!m_oefInit?float(dPix):m_oefXFrame[coord]); - m_filtDepthFrame[coord]=m_oefXFrame[coord]; - } - } - if(!m_oefInit) { - m_oefInit=true; - } - } - - map<int, bool>::iterator itUp = m_filtTexUpMap.begin(); - for(; itUp!=m_filtTexUpMap.end(); ++itUp) { - itUp->second=true; - } + if(m_filter.compare("lowpass")==0) { + for(int j=0; j<m_procSize.height; ++j) { + for(int i=0; i<m_procSize.width; ++i) { + ushort dPix = m_depthImg.at<ushort>(j,i); + int coord = j*m_procSize.width+i; + m_sumDepthFrame[coord]-= + int(m_prevDepthFrames[m_filterIndex][coord]); + m_prevDepthFrames[m_filterIndex][coord]=(unsigned short)dPix; + m_sumDepthFrame[coord]+= + int(m_prevDepthFrames[m_filterIndex][coord]); + m_filtDepthFrame[coord] = float(m_sumDepthFrame[coord]) + / float(m_filterSize); + } + } + m_filterIndex=(m_filterIndex+1)%m_filterSize; + } + else { + //1€ filter + for(int j=0; j<m_procSize.height; ++j) { + for(int i=0; i<m_procSize.width; ++i) { + ushort dPix = m_depthImg.at<ushort>(j,i); + //spatial filter + if(m_filter.compare("one_euro_spatial")==0) { + float sum=0, count=0; + for(int dy=max<int>(j-m_filterSpat,0); + dy<=min<int>(j+m_filterSpat,m_procSize.height-1); + dy++) { + for(int dx=max<int>(i-m_filterSpat,0); + dx<=min<int>(i+m_filterSpat,m_procSize.width-1); + dx++) { + ushort aPix = m_depthImg.at<ushort>(dy,dx); + if(abs(aPix-dPix)<m_contourThreshold) { + sum+=aPix; + count+=1; + } + } + } + dPix = sum/count; + } + + int coord = j*m_procSize.width+i; + float dx=0; + if(m_oefInit) { + dx=(float(dPix)-m_oefXFrame[coord])*m_oefRate; + } + float dalpha = 1.0 / (1.0 + + (1.0/(2*M_PI*m_oefDCutoff)) + / (1.0/m_oefRate)); + float edx = dx*dalpha + + (1-dalpha)*(!m_oefInit?dx:m_oefDXFrame[coord]); + m_oefDXFrame[coord]=edx; + float cutoff = m_oefMinCutoff + m_oefBeta*fabs(edx); + float alpha = 1.0 / (1.0 + + (1.0/(2*M_PI*cutoff)) + / (1.0/m_oefRate)); + + m_oefXFrame[coord] = float(dPix)*alpha + + (1-alpha)*(!m_oefInit?float(dPix):m_oefXFrame[coord]); + m_filtDepthFrame[coord]=m_oefXFrame[coord]; + } + } + if(!m_oefInit) { + m_oefInit=true; + } + } + + map<int, bool>::iterator itUp = m_filtTexUpMap.begin(); + for(; itUp!=m_filtTexUpMap.end(); ++itUp) { + itUp->second=true; + } } @@ -1381,267 +1380,267 @@ void DepthCamModule::filterDepthFrame() { void DepthCamModule::detectMarkers() { - vector<vector<cv::Point2f> > corners; - vector<int> ids; + vector<vector<cv::Point2f> > corners; + vector<int> ids; - m_currentTracker->track(m_rgbImg, corners, ids); + m_currentTracker->track(m_rgbImg, corners, ids); - filterMarkers(corners, ids); + filterMarkers(corners, ids); - //if got any, do filling if needed - if(!m_aliveMarkers.empty() && m_fillingMarkers){ - globalFunction(m_finalCombin); - } + //if got any, do filling if needed + if(!m_aliveMarkers.empty() && m_fillingMarkers){ + globalFunction(m_finalCombin); + } - m_sentDetectedMarkers = m_detectedMarkers; - m_sentRemovedMarkers = m_removedMarkers; - m_removedMarkers.clear(); + m_sentDetectedMarkers = m_detectedMarkers; + m_sentRemovedMarkers = m_removedMarkers; + m_removedMarkers.clear(); - m_markersFrame = m_resImg.data; - map<int, bool>::iterator itUp = m_markTexUpMap.begin(); - for(; itUp!=m_markTexUpMap.end(); ++itUp) { - itUp->second=true; - } + m_markersFrame = m_resImg.data; + map<int, bool>::iterator itUp = m_markTexUpMap.begin(); + for(; itUp!=m_markTexUpMap.end(); ++itUp) { + itUp->second=true; + } - if(m_debuggingMarkers){ - showAliveMarkers(); - } + if(m_debuggingMarkers){ + showAliveMarkers(); + } } void DepthCamModule::draw_point(cv::Mat& img, cv::Point2f fp){ - return; + return; } void DepthCamModule::filterMarkers(vector<vector<cv::Point2f> >& cor, - vector<int>& ids) { - - //go through all detected markers, for each - m_detectedMarkers.clear(); - for(unsigned int m=0; m<ids.size(); ++m) { - if(ids[m]<10) { - //create a marker_time - m_detectedMarkers.push_back(s_markerAndTime()); - m_detectedMarkers.back().id=ids[m]; - m_detectedMarkers.back().corners.assign(cor[m].begin(),cor[m].end()); - m_detectedMarkers.back().corners3D.assign(4, glm::vec3(0)); - m_detectedMarkers.back().detected=true; - cv::Point pt = calculationCenterOfMarker(m_detectedMarkers.back()); - m_detectedMarkers.back().x=pt.x; - m_detectedMarkers.back().y=pt.y; - - //add/update detection time in map - time(&m_detectedMarkers.back().detection); - m_aliveMarkers[ids[m]] = m_detectedMarkers.back(); - } - } - - - //go through map elements, remove not detected and old ones - time_t now; - time(&now); - map<int, s_markerAndTime>::iterator itAl = m_aliveMarkers.begin(); - for(; itAl!=m_aliveMarkers.end();) { - if(difftime(now, itAl->second.detection) > 1) { - m_removedMarkers.push_back(itAl->second); - itAl = m_aliveMarkers.erase(itAl); - } - else { - itAl++; - } - } + vector<int>& ids) { + + //go through all detected markers, for each + m_detectedMarkers.clear(); + for(unsigned int m=0; m<ids.size(); ++m) { + if(ids[m]<10) { + //create a marker_time + m_detectedMarkers.push_back(s_markerAndTime()); + m_detectedMarkers.back().id=ids[m]; + m_detectedMarkers.back().corners.assign(cor[m].begin(),cor[m].end()); + m_detectedMarkers.back().corners3D.assign(4, glm::vec3(0)); + m_detectedMarkers.back().detected=true; + cv::Point pt = calculationCenterOfMarker(m_detectedMarkers.back()); + m_detectedMarkers.back().x=pt.x; + m_detectedMarkers.back().y=pt.y; + + //add/update detection time in map + time(&m_detectedMarkers.back().detection); + m_aliveMarkers[ids[m]] = m_detectedMarkers.back(); + } + } + + + //go through map elements, remove not detected and old ones + time_t now; + time(&now); + map<int, s_markerAndTime>::iterator itAl = m_aliveMarkers.begin(); + for(; itAl!=m_aliveMarkers.end();) { + if(difftime(now, itAl->second.detection) > 1) { + m_removedMarkers.push_back(itAl->second); + itAl = m_aliveMarkers.erase(itAl); + } + else { + itAl++; + } + } } vector<cv::Point2f> DepthCamModule::newSetOfPoint(vector<cv::Point2f> points, - bool readyToCoop){ - vector<cv::Point2f> setOfPoint; - int cmp; + bool readyToCoop){ + vector<cv::Point2f> setOfPoint; + int cmp; - if(!readyToCoop) - return points; + if(!readyToCoop) + return points; - for(int i = 0; i < (int)points.size(); i++){ - cmp = jointly(points, points[i]); - if(cmp == -1 || cmp > i){ - setOfPoint.push_back(points[i]); - } - } + for(int i = 0; i < (int)points.size(); i++){ + cmp = jointly(points, points[i]); + if(cmp == -1 || cmp > i){ + setOfPoint.push_back(points[i]); + } + } - return setOfPoint; + return setOfPoint; } int DepthCamModule::jointly(vector<cv::Point2f> points, cv::Point2f pt){ - int cpt = 0; + int cpt = 0; - for(vector<cv::Point2f>::iterator it=points.begin(); it!=points.end();it++){ - if(it->x - pt.x > -130 && it->x - pt.x < 130 && it->x != pt.x) - return cpt; + for(vector<cv::Point2f>::iterator it=points.begin(); it!=points.end();it++){ + if(it->x - pt.x > -130 && it->x - pt.x < 130 && it->x != pt.x) + return cpt; - cpt++; - } + cpt++; + } - return -1; + return -1; } void DepthCamModule::delimitation(cv::Mat& img) { - // Keep a copy around - //cv::Mat img_orig = img.clone(); - // Rectangle to be used with Subdiv2D - cv::Rect rect(0, 0, img.size().width, img.size().height); - // Create an instance of Subdiv2D - cv::Subdiv2D subdiv(rect); - // Create a vector of points. - vector<cv::Point2f> points; + // Keep a copy around + //cv::Mat img_orig = img.clone(); + // Rectangle to be used with Subdiv2D + cv::Rect rect(0, 0, img.size().width, img.size().height); + // Create an instance of Subdiv2D + cv::Subdiv2D subdiv(rect); + // Create a vector of points. + vector<cv::Point2f> points; - map<int, s_markerAndTime>::iterator it = m_aliveMarkers.begin(); - for(; it!=m_aliveMarkers.end(); it++){ - points.push_back(cv::Point2f(it->second.x, it->second.y)); - } + map<int, s_markerAndTime>::iterator it = m_aliveMarkers.begin(); + for(; it!=m_aliveMarkers.end(); it++){ + points.push_back(cv::Point2f(it->second.x, it->second.y)); + } - vector<cv::Point2f> setOfPoint = newSetOfPoint(points, false); + vector<cv::Point2f> setOfPoint = newSetOfPoint(points, false); - // Insert points into subdiv - subdiv.insert(setOfPoint); + // Insert points into subdiv + subdiv.insert(setOfPoint); - // Draw delaunay triangles - drawDelaunay(img, subdiv, cv::Scalar(13,240,231) ); + // Draw delaunay triangles + drawDelaunay(img, subdiv, cv::Scalar(13,240,231) ); - // Draw points - for(vector<cv::Point2f>::iterator it = points.begin(); - it != points.end(); it++){ - draw_point(img, *it); - } + // Draw points + for(vector<cv::Point2f>::iterator it = points.begin(); + it != points.end(); it++){ + draw_point(img, *it); + } - // Draw Voronoi diagram - drawVoronoi(m_resImg, subdiv); + // Draw Voronoi diagram + drawVoronoi(m_resImg, subdiv); - return; + return; } cv::Point DepthCamModule::calculationCenterOfMarker(s_markerAndTime& marker){ - return cv::Point((marker.corners[0].x - + marker.corners[1].x - + marker.corners[2].x - + marker.corners[3].x)/4.0, - (marker.corners[0].y - + marker.corners[1].y - + marker.corners[2].y - + marker.corners[3].y)/4.0); + return cv::Point((marker.corners[0].x + + marker.corners[1].x + + marker.corners[2].x + + marker.corners[3].x)/4.0, + (marker.corners[0].y + + marker.corners[1].y + + marker.corners[2].y + + marker.corners[3].y)/4.0); } void DepthCamModule::globalFunction(cv::Mat &mat) { - cv::Size asize(mat.size().width, mat.size().height); - cv::Size maskSize(mat.size().width+2, mat.size().height+2); - int width = 0, height = 0; - - if(!m_notVoronoi){ - delimitation(mat); - } - else { - cv::Mat mask = cv::Mat::zeros(maskSize, CV_8UC1); - map<int, s_markerAndTime>::iterator it = m_aliveMarkers.begin(); - for(; it!= m_aliveMarkers.end(); it++) { - //FIXME values ??? - cv::floodFill(mat, mask, cv::Point(it->second.x,it->second.y), - cv::Scalar(0), nullptr, cv::Scalar(255,255,5), - cv::Scalar(255,255,5), - 4 | cv::FLOODFILL_MASK_ONLY - | ((m_depthID+it->second.id) << 8 )); - } - - for(int j=1; j<maskSize.height-1; ++j){ - for(int i=1; i<maskSize.width-1; ++i){ - cv::Vec3b &res_pix = m_resImg.at<cv::Vec3b>(j-1, i-1); - uchar mask_pix = mask.at<uchar>(j, i); - res_pix[0] = mask_pix; - res_pix[1] = mask_pix; - res_pix[2] = mask_pix; - } - } - } - return ; + cv::Size asize(mat.size().width, mat.size().height); + cv::Size maskSize(mat.size().width+2, mat.size().height+2); + int width = 0, height = 0; + + if(!m_notVoronoi){ + delimitation(mat); + } + else { + cv::Mat mask = cv::Mat::zeros(maskSize, CV_8UC1); + map<int, s_markerAndTime>::iterator it = m_aliveMarkers.begin(); + for(; it!= m_aliveMarkers.end(); it++) { + //FIXME values ??? + cv::floodFill(mat, mask, cv::Point(it->second.x,it->second.y), + cv::Scalar(0), nullptr, cv::Scalar(255,255,5), + cv::Scalar(255,255,5), + 4 | cv::FLOODFILL_MASK_ONLY + | ((m_depthID+it->second.id) << 8 )); + } + + for(int j=1; j<maskSize.height-1; ++j){ + for(int i=1; i<maskSize.width-1; ++i){ + cv::Vec3b &res_pix = m_resImg.at<cv::Vec3b>(j-1, i-1); + uchar mask_pix = mask.at<uchar>(j, i); + res_pix[0] = mask_pix; + res_pix[1] = mask_pix; + res_pix[2] = mask_pix; + } + } + } + return ; } void DepthCamModule::drawVoronoi( cv::Mat& img, cv::Subdiv2D& subdiv) { - vector<vector<cv::Point2f> > facets; - vector<cv::Point2f> centers; - subdiv.getVoronoiFacetList(vector<int>(), facets, centers); - - vector<cv::Point> ifacet; - vector<vector<cv::Point> > pts(1); - - map<int, s_markerAndTime>::iterator it = m_aliveMarkers.begin(); - for(unsigned int f=0; - f<facets.size() && it!=m_aliveMarkers.end(); f++, it++) { - ifacet.resize(facets[f].size()); - for( size_t j = 0; j < facets[f].size(); j++ ) { - ifacet[j] = facets[f][j]; - } - cv::Scalar color((it->second.id+m_depthID), - (it->second.id+m_depthID), - (it->second.id+m_depthID)); - - cv::fillConvexPoly(img, ifacet, color, 8, 0); - - pts[0] = ifacet; - cv::polylines(img, pts, true, cv::Scalar(0,0,255), 1, LINE_AA, 0); - draw_point(img, centers[f]); - } + vector<vector<cv::Point2f> > facets; + vector<cv::Point2f> centers; + subdiv.getVoronoiFacetList(vector<int>(), facets, centers); + + vector<cv::Point> ifacet; + vector<vector<cv::Point> > pts(1); + + map<int, s_markerAndTime>::iterator it = m_aliveMarkers.begin(); + for(unsigned int f=0; + f<facets.size() && it!=m_aliveMarkers.end(); f++, it++) { + ifacet.resize(facets[f].size()); + for( size_t j = 0; j < facets[f].size(); j++ ) { + ifacet[j] = facets[f][j]; + } + cv::Scalar color((it->second.id+m_depthID), + (it->second.id+m_depthID), + (it->second.id+m_depthID)); + + cv::fillConvexPoly(img, ifacet, color, 8, 0); + + pts[0] = ifacet; + cv::polylines(img, pts, true, cv::Scalar(0,0,255), 1, LINE_AA, 0); + draw_point(img, centers[f]); + } } void DepthCamModule::drawDelaunay(cv::Mat& img, cv::Subdiv2D& subdiv, - cv::Scalar delaunayColor ){ - - vector<cv::Vec6f> triangleList; - subdiv.getTriangleList(triangleList); - vector<cv::Point> pt(3); - - cv::Rect rect(0,0, img.size().width, img.size().height); - for(int i = 0; i < (int)triangleList.size(); i++){ - cv::Vec6f t = triangleList[i]; - pt[0] = cv::Point(cvRound(t[0]), cvRound(t[1])); - pt[1] = cv::Point(cvRound(t[2]), cvRound(t[3])); - pt[2] = cv::Point(cvRound(t[4]), cvRound(t[5])); - - // Draw rectangles completely inside the image. - if(rect.contains(pt[0]) &&rect.contains(pt[1]) &&rect.contains(pt[2])){ - cv::line(img, pt[0], pt[1], delaunayColor, 1, LINE_AA, 0); - cv::line(img, pt[1], pt[2], delaunayColor, 1, LINE_AA, 0); - cv::line(img, pt[2], pt[0], delaunayColor, 1, LINE_AA, 0); - } - } + cv::Scalar delaunayColor ){ + + vector<cv::Vec6f> triangleList; + subdiv.getTriangleList(triangleList); + vector<cv::Point> pt(3); + + cv::Rect rect(0,0, img.size().width, img.size().height); + for(int i = 0; i < (int)triangleList.size(); i++){ + cv::Vec6f t = triangleList[i]; + pt[0] = cv::Point(cvRound(t[0]), cvRound(t[1])); + pt[1] = cv::Point(cvRound(t[2]), cvRound(t[3])); + pt[2] = cv::Point(cvRound(t[4]), cvRound(t[5])); + + // Draw rectangles completely inside the image. + if(rect.contains(pt[0]) &&rect.contains(pt[1]) &&rect.contains(pt[2])){ + cv::line(img, pt[0], pt[1], delaunayColor, 1, LINE_AA, 0); + cv::line(img, pt[1], pt[2], delaunayColor, 1, LINE_AA, 0); + cv::line(img, pt[2], pt[0], delaunayColor, 1, LINE_AA, 0); + } + } } void DepthCamModule::debugMarkers(const bool& val) { - if(!val && m_debuggingMarkers) { - destroyWindow("markers"); - destroyWindow("filled_markers"); - waitKey(10); - } - m_debuggingMarkers=val; + if(!val && m_debuggingMarkers) { + destroyWindow("markers"); + destroyWindow("filled_markers"); + waitKey(10); + } + m_debuggingMarkers=val; } void DepthCamModule::showAliveMarkers() { - cv::Mat markImg(m_rgbImg); - map<int, s_markerAndTime>::iterator itM = m_aliveMarkers.begin(); - for(; itM!=m_aliveMarkers.end(); ++itM) { - ostringstream oss; - oss<<itM->second.id; - cv::putText(markImg, oss.str().c_str(), - cv::Point2f(itM->second.x, itM->second.y), - cv::FONT_HERSHEY_SIMPLEX, 2, cv::Scalar(255,255,255)); - for(int c=0; c<4; c++) { - cv::line(markImg, - itM->second.corners[c], itM->second.corners[(c+1)%4], - cv::Scalar(255), 2); - } - } - imshow("markers", markImg); - imshow("filled_markers", m_resImg*50); - waitKey(10); + cv::Mat markImg(m_rgbImg); + map<int, s_markerAndTime>::iterator itM = m_aliveMarkers.begin(); + for(; itM!=m_aliveMarkers.end(); ++itM) { + ostringstream oss; + oss<<itM->second.id; + cv::putText(markImg, oss.str().c_str(), + cv::Point2f(itM->second.x, itM->second.y), + cv::FONT_HERSHEY_SIMPLEX, 2, cv::Scalar(255,255,255)); + for(int c=0; c<4; c++) { + cv::line(markImg, + itM->second.corners[c], itM->second.corners[(c+1)%4], + cv::Scalar(255), 2); + } + } + imshow("markers", markImg); + imshow("filled_markers", m_resImg*50); + waitKey(10); } void ArucoTracker::init() { @@ -1650,166 +1649,166 @@ void ArucoTracker::init() { } void ArucoTracker::track(cv::Mat& img, - std::vector<std::vector<cv::Point2f> >& corners, - std::vector<int>& ids) { - cv::aruco::detectMarkers(img, - cv::aruco::getPredefinedDictionary( - cv::aruco::DICT_ARUCO_ORIGINAL), - corners, - ids); + std::vector<std::vector<cv::Point2f> >& corners, + std::vector<int>& ids) { + cv::aruco::detectMarkers(img, + cv::aruco::getPredefinedDictionary( + cv::aruco::DICT_ARUCO_ORIGINAL), + corners, + ids); } ImageTracker::ImageTracker():Tracker() { - //m_detector = cv::ORB::create(); - //m_detector = cv::AKAZE::create(); - m_detector = cv::BRISK::create(); - m_matcher = cv::BFMatcher::create(NORM_HAMMING); + //m_detector = cv::ORB::create(); + //m_detector = cv::AKAZE::create(); + m_detector = cv::BRISK::create(); + m_matcher = cv::BFMatcher::create(NORM_HAMMING); } void ImageTracker::init() { - m_descriptors.clear(); - m_keypoints.clear(); - m_cols.clear(); - m_rows.clear(); - - //read images from folder and compute descriptors for each - dirent** filesList; - int nbFiles = fl_filename_list(m_imgFolder.c_str(), - &filesList, fl_alphasort); - for(int f=0; f<nbFiles; ++f) { - if(filesList[f]->d_name[0]!='.') { - string fullName = m_imgFolder+string(filesList[f]->d_name); - cv::Mat img = cv::imread(fullName); - if(img.data) { - std::vector<KeyPoint> keypoints; - cv::Mat descriptors; - m_detector->detectAndCompute(img,Mat(),keypoints,descriptors); - m_descriptors.push_back(descriptors); - m_keypoints.push_back(keypoints); - m_cols.push_back(img.cols); - m_rows.push_back(img.rows); - - /* - drawKeypoints(img, keypoints, img); - imshow("marker", img); - waitKey(10); - */ - } - } - } + m_descriptors.clear(); + m_keypoints.clear(); + m_cols.clear(); + m_rows.clear(); + + //read images from folder and compute descriptors for each + dirent** filesList; + int nbFiles = fl_filename_list(m_imgFolder.c_str(), + &filesList, fl_alphasort); + for(int f=0; f<nbFiles; ++f) { + if(filesList[f]->d_name[0]!='.') { + string fullName = m_imgFolder+string(filesList[f]->d_name); + cv::Mat img = cv::imread(fullName); + if(img.data) { + std::vector<KeyPoint> keypoints; + cv::Mat descriptors; + m_detector->detectAndCompute(img,Mat(),keypoints,descriptors); + m_descriptors.push_back(descriptors); + m_keypoints.push_back(keypoints); + m_cols.push_back(img.cols); + m_rows.push_back(img.rows); + + /* + drawKeypoints(img, keypoints, img); + imshow("marker", img); + waitKey(10); + */ + } + } + } } void ImageTracker::track(cv::Mat& img, - std::vector<std::vector<cv::Point2f> >& corners, - std::vector<int>& ids) { - - unsigned int minMatches=10; - float matchRatio=0.8; - - //get descriptors for the captured image - std::vector<KeyPoint> keypoints; - cv::Mat descriptors; - m_detector->detectAndCompute(img, Mat(), keypoints, descriptors); - //drawKeypoints(img, keypoints, img); - - - - //try to match with each image in the set - for(unsigned int m=0; m<m_descriptors.size(); m++) { - vector<cv::DMatch> matches; - vector<cv::DMatch> matches2; - vector<vector<cv::DMatch> > nn_matches; - try { - //m_matcher->match(m_descriptors[m], descriptors, matches); - //m_matcher->match(descriptors, m_descriptors[m], matches2); - m_matcher->knnMatch(m_descriptors[m], descriptors, nn_matches, 2); - } - catch(Exception& e) { - cout<<e.what()<<endl; - } - - /* - vector<DMatch> good_matches; - for(int i=0; i<matches.size(); i++) { - if(matches2[i].trainIdx == matches[matches[2].trainIdx].queryIdx) { - good_matches.push_back(matches[i]); - } - } - */ - vector<DMatch> good_matches; - for(unsigned int i=0; i<nn_matches.size(); i++) { - if(nn_matches[i].size()>=2) { - if(nn_matches[i][0].distance - <= matchRatio*nn_matches[i][1].distance) { - good_matches.push_back(nn_matches[i][0]); - } - } - } - - - /* - double max_dist = 0; double min_dist = 100; - //for(int i=0; i<m_descriptors[m].rows; i++) { - for(int i=0; i<matches.size(); i++) { - double dist = matches[i].distance; - if(dist < min_dist) {min_dist = dist;} - if(dist > max_dist) {max_dist = dist;} - } - - vector<DMatch> good_matches; - //for(int i=0; i<m_descriptors[m].rows; i++) { - for(int i=0; i<matches.size(); i++) { - if(matches[i].distance < 3*min_dist) { - good_matches.push_back(matches[i]); - } - }*/ - if(good_matches.size()>minMatches) { - vector<Point2f> obj; - vector<Point2f> scene; - - for(unsigned int i=0; i<good_matches.size(); i++) { - obj.push_back(m_keypoints[m][good_matches[i].queryIdx].pt); - scene.push_back(keypoints[good_matches[i].trainIdx].pt); - } - - Mat H; - - try { - H = findHomography(obj, scene, RANSAC); - } - catch(Exception& e) { - cout<<e.what()<<endl; - } - - if(!H.empty()) { - vector<Point2f> objCorners(4); - objCorners[0] = Point(0, 0); - objCorners[1] = Point(m_cols[m], 0); - objCorners[2] = Point(m_cols[m], m_rows[m]); - objCorners[3] = Point(0, m_rows[m]); - - vector<Point2f> sceneCorners(4); - perspectiveTransform(objCorners, sceneCorners, H); - bool valid=true; - for(int c=0; c<4; c++) { - if(sceneCorners[c].x<0 || sceneCorners[c].y<0 - || sceneCorners[c].x>640 - || sceneCorners[c].y>480) { - valid=false; - } - } - - if(valid) { - ids.push_back(m); - corners.push_back(sceneCorners); - } - } - } - } - - } + std::vector<std::vector<cv::Point2f> >& corners, + std::vector<int>& ids) { + + unsigned int minMatches=10; + float matchRatio=0.8; + + //get descriptors for the captured image + std::vector<KeyPoint> keypoints; + cv::Mat descriptors; + m_detector->detectAndCompute(img, Mat(), keypoints, descriptors); + //drawKeypoints(img, keypoints, img); + + + + //try to match with each image in the set + for(unsigned int m=0; m<m_descriptors.size(); m++) { + vector<cv::DMatch> matches; + vector<cv::DMatch> matches2; + vector<vector<cv::DMatch> > nn_matches; + try { + //m_matcher->match(m_descriptors[m], descriptors, matches); + //m_matcher->match(descriptors, m_descriptors[m], matches2); + m_matcher->knnMatch(m_descriptors[m], descriptors, nn_matches, 2); + } + catch(Exception& e) { + cout<<e.what()<<endl; + } + + /* + vector<DMatch> good_matches; + for(int i=0; i<matches.size(); i++) { + if(matches2[i].trainIdx == matches[matches[2].trainIdx].queryIdx) { + good_matches.push_back(matches[i]); + } + } + */ + vector<DMatch> good_matches; + for(unsigned int i=0; i<nn_matches.size(); i++) { + if(nn_matches[i].size()>=2) { + if(nn_matches[i][0].distance + <= matchRatio*nn_matches[i][1].distance) { + good_matches.push_back(nn_matches[i][0]); + } + } + } + + + /* + double max_dist = 0; double min_dist = 100; + //for(int i=0; i<m_descriptors[m].rows; i++) { + for(int i=0; i<matches.size(); i++) { + double dist = matches[i].distance; + if(dist < min_dist) {min_dist = dist;} + if(dist > max_dist) {max_dist = dist;} + } + + vector<DMatch> good_matches; + //for(int i=0; i<m_descriptors[m].rows; i++) { + for(int i=0; i<matches.size(); i++) { + if(matches[i].distance < 3*min_dist) { + good_matches.push_back(matches[i]); + } + }*/ + if(good_matches.size()>minMatches) { + vector<Point2f> obj; + vector<Point2f> scene; + + for(unsigned int i=0; i<good_matches.size(); i++) { + obj.push_back(m_keypoints[m][good_matches[i].queryIdx].pt); + scene.push_back(keypoints[good_matches[i].trainIdx].pt); + } + + Mat H; + + try { + H = findHomography(obj, scene, RANSAC); + } + catch(Exception& e) { + cout<<e.what()<<endl; + } + + if(!H.empty()) { + vector<Point2f> objCorners(4); + objCorners[0] = Point(0, 0); + objCorners[1] = Point(m_cols[m], 0); + objCorners[2] = Point(m_cols[m], m_rows[m]); + objCorners[3] = Point(0, m_rows[m]); + + vector<Point2f> sceneCorners(4); + perspectiveTransform(objCorners, sceneCorners, H); + bool valid=true; + for(int c=0; c<4; c++) { + if(sceneCorners[c].x<0 || sceneCorners[c].y<0 + || sceneCorners[c].x>640 + || sceneCorners[c].y>480) { + valid=false; + } + } + + if(valid) { + ids.push_back(m); + corners.push_back(sceneCorners); + } + } + } + } + + } diff --git a/src/modules/DepthShapeModule.cpp b/src/modules/DepthShapeModule.cpp index 3f9438ea4852b33c6210db3fd814cc2a899cf63e..1c9edbc7a98b63f66c06274ea851f7402489b8d2 100644 --- a/src/modules/DepthShapeModule.cpp +++ b/src/modules/DepthShapeModule.cpp @@ -59,7 +59,7 @@ void DepthShapeModule::updateModelMatrix() { void DepthShapeModule::setSpace(SpaceModule* space) { DepthModule::setSpace(space); - m_space->getGeom(m_shapesMap[m_currentShape])->registerModule(this); + setShape(m_currentShape); } void DepthShapeModule::setShape(const std::string& shape) { diff --git a/src/modules/ModelModule.cpp b/src/modules/ModelModule.cpp index 6b3f746e25393378d4d0b0b2f66e742000567989..13a0d96d4ff9cf85fa8b0a8e466b1ebc4bf38906 100644 --- a/src/modules/ModelModule.cpp +++ b/src/modules/ModelModule.cpp @@ -51,6 +51,7 @@ ModelModule::ModelModule(): GeomModule() { m_modelGeom = new Geometry(); Reveal::getInstance()->registerGeom(m_modelGeom); m_modelGeom->registerModule(this); + m_modelGeom->setIsModel(true); m_coordImage = new float[m_texWidth*m_texHeight*2]; } diff --git a/src/modules/ProjectorModule.cpp b/src/modules/ProjectorModule.cpp index 50c43f3cf54be28b05222079a3b1505f681c1548..1fe803ffa92cff08e8b33e9354256e8a37513afb 100644 --- a/src/modules/ProjectorModule.cpp +++ b/src/modules/ProjectorModule.cpp @@ -244,6 +244,7 @@ void ProjectorModule::draw() { glUniformMatrix4fv(m_uniforms[Reveal::SLICEPROG][Reveal::VIEWMAT], 1, GL_FALSE, value_ptr(m_transViewMat)); const vector<Geometry*>& depthGeoms = m_space->getGeoms(); + vector<Geometry*>::const_iterator itDGeom=depthGeoms.begin(); for(; itDGeom!=depthGeoms.end(); ++itDGeom) { (*itDGeom)->draw(m_contextHandlerID, @@ -252,7 +253,7 @@ void ProjectorModule::draw() { m_visibilityMask); } - //select to texture for 3D models only (GEOM_MODEL) + //select to texture for 3D models only glBindFramebuffer(GL_FRAMEBUFFER, m_selectFBO); glViewport(0, 0, m_rttWidth, m_rttHeight); glEnable(GL_DEPTH_TEST); @@ -279,7 +280,7 @@ void ProjectorModule::draw() { glBindTexture(GL_TEXTURE_2D, m_sliceRttTex); const vector<Geometry*>& sceneGeoms = Reveal::getInstance()->getGeoms(); for(auto& geom : sceneGeoms) { - if(geom->getGeomID()>=Reveal::GEOM_MODEL) { + if(geom->getIsModel()) { geom->draw(m_contextHandlerID, Reveal::SELECTPROG, m_uniforms[Reveal::SELECTPROG], @@ -917,6 +918,8 @@ void ProjectorModule::setModelMat(const glm::mat4& mat) { void ProjectorModule::updateViewProjMatrix() { m_transViewMat = m_modelMat * inverse(m_viewMat); m_viewProjMat = m_projMat * inverse(m_transViewMat); + //m_transViewMat = m_viewMat * m_modelMat; + //m_viewProjMat = m_projMat * m_transViewMat; } void ProjectorModule::setModuleName(const string& name) { diff --git a/src/modules/RevealedPathModule.cpp b/src/modules/RevealedPathModule.cpp index b78eed064d47ec589ba9f8536da6ff251f14ba28..738ffbff78bfbe1fceec7ba6e7f6102229c031d2 100644 --- a/src/modules/RevealedPathModule.cpp +++ b/src/modules/RevealedPathModule.cpp @@ -25,6 +25,7 @@ #include <iostream> #include <glm/gtx/quaternion.hpp> +#include <glm/gtx/euler_angles.hpp> #include "../Reveal.hpp" @@ -70,7 +71,6 @@ RevealedPathModule::RevealedPathModule(): RevealedModule() { addRevealedOutputAttributes(); m_attributesMap["inside_structure"]->setStrings(vector<string>(1,"along_z")); - m_attributesMap["surface_thickness"]->setFloats(vector<float>(1,0.05)); m_attributesMap["thickness"]->setFloats(vector<float>(1,100)); m_attributesMap["edges"]->setInts(vector<int>(1,8)); @@ -82,6 +82,7 @@ RevealedPathModule::RevealedPathModule(): RevealedModule() { m_shapeGeom=Reveal::GEOM_PATH; m_pathInitialised=2; + m_modelScale = vec3(1.0,1.0,1.0); } RevealedPathModule::~RevealedPathModule() { @@ -197,6 +198,7 @@ void RevealedPathModule::refreshGeometry() { unsigned int nexC=0; for(unsigned int p=0; p<m_points.size(); ++p) { glm::mat4 rot=glm::mat4(1.0); + glm::mat4 rotSub=glm::mat4(1.0); //find the corresponding component if(m_points.size()<3) { @@ -209,34 +211,56 @@ void RevealedPathModule::refreshGeometry() { } glm::vec3 dir(0.0); + glm::vec3 dirSub(0.0); if(p>0) { - dir = glm::vec3(m_points[p]->editPos() + // Direction for sub shape only uses current segment direction + dirSub = glm::vec3(m_points[p]->editPos() - m_points[int(p)-1]->editPos()); - glm::vec3 dirN = normalize(dir); - //rot = toMat4(rotation(dirZ, dirN)); - glm::vec3 markR(1.0,0.0,0.0); - glm::vec3 markUp = - glm::normalize(glm::cross(markR, dirN)); - rot = glm::mat4x4( //r,up,n - markR[0], markR[1], markR[2], 0.0, - markUp[0], markUp[1], markUp[2],0.0, - dirN[0], dirN[1], dirN[2], 0.0, - 0.0, 0.0, 0.0, 1.0 - ); + // Direction for vertices adapts to next segment + if(p+1==m_points.size()) { + dir = glm::vec3(m_points[p]->editPos() + - m_points[int(p)-1]->editPos()); + } + else { + dir = (glm::vec3(m_points[p]->editPos() + - m_points[int(p)-1]->editPos()) + + glm::vec3(m_points[p+1]->editPos() + - m_points[p]->editPos())) + /2.0f; + } + rot = toMat4(rotation(dirZ, normalize(dir))); + rotSub = toMat4(rotation(dirZ, normalize(dirSub))); + /* + glm::vec3 markR(1.0,0.0,0.0); + glm::vec3 markUp = + glm::normalize(glm::cross(markR, dirN)); + rot = glm::mat4x4( //r,up,n + markR[0], markR[1], markR[2], 0.0, + markUp[0], markUp[1], markUp[2],0.0, + dirN[0], dirN[1], dirN[2], 0.0, + 0.0, 0.0, 0.0, 1.0 + ); + */ + /* + float rx, ry, rz; + glm::extractEulerAngleXYZ(rot, rx, ry, rz); + rot = glm::eulerAngleXYZ(rx,ry,rz); + */ + m_compoLocalSizes[preC] = glm::vec4(m_thickness, m_thickness, length(dir), 1.0); - //build and store sub inverse matrix - mat4 subMat = scale(vec3(m_thickness, - m_thickness, - length(dir))); - subMat = rot * subMat; - subMat = translate(vec3(m_points[int(p)-1]->editPos() - + vec4(dir.x/2.0,dir.y/2.0,dir.z/2.0,1.0))) - * subMat; - m_subShapeInvMats.push_back(inverse(subMat)); + //build and store sub inverse matrix + mat4 subMat = scale(mat4(1.0),vec3(m_thickness*2.0, + m_thickness*2.0, + length(dirSub))); + subMat = rotSub * subMat; + subMat = translate(mat4(1.0),vec3(m_points[int(p)-1]->editPos() + + vec4(dir.x/2.0,dir.y/2.0,dir.z/2.0,1.0))) + * subMat; + m_subShapeInvMats.push_back(inverse(subMat)); } - else if(m_points.size()>1) { + else { dir = glm::vec3(m_points[p+1]->editPos() - m_points[p]->editPos()); glm::vec3 dirN = normalize(dir); diff --git a/src/shaders/render43.fs b/src/shaders/render43.fs index 197e2123b91b58f0b78743fa510a937c45b8947f..55aa96e93ea358fd125392a6917cebb0d814a6b1 100644 --- a/src/shaders/render43.fs +++ b/src/shaders/render43.fs @@ -408,6 +408,7 @@ void main(void) { case PATH : { vec2 d = abs(vec2(length((ratioWithinBox.xy-vec2(0.5, 0.5))*2.0), (ratioWithinBox.z-0.5)*2.0)) - vec2(1.0,1.0); gradRatio = 1.0 - abs(min(max(d.x,d.y),0.0) + length(max(d,0.0))); + } break; case TUBE : { vec2 d = abs(vec2(length((ratioWithinBox.xz-vec2(0.5, 0.5))*2.0), (ratioWithinBox.y-0.5)*2.0)) - vec2(1.0,1.0); @@ -572,8 +573,8 @@ void main(void) { } } //final test, output if rendered or not, output final color - if(rendered>0 && length(finalColor)>0) { - + //if(rendered>0 && length(finalColor)>0) { + if(rendered>0) { if(revSize>0) { imageAtomicOr(outputTex, ivec2(mod(outOffset,outCols), floor((outOffset))/outCols), rendered); @@ -594,8 +595,11 @@ void main(void) { else { color = vec4(0.0, 0.0, 0.0, 1.0); } + + //color = vec4(0.0, 1.0, 0.0, 1.0);//for testing purposes } else { //DISCARD + //color = vec4(1.0, 0.0, 0.0, 1.0);//for testing purposes discard; } };