void Object::loadObject(std::string filename, int x, int y)//Loads like loadRoom(); { int n = 0; int nr = 0; objsurf.x = x; objsurf.y = y; //Give positions of object in room objx = x; objy = y; Video &d = getVideo(); std::string data[30];//Output from file std::string name[30];//Name of object std::string value[30];//Value of object std::ifstream file;//Filestream file.open(("../script/objects/"+filename).c_str()); while(!file.eof()) { getline(file, data[n]);//Read line from file int strpos = data[n].find("=");//Split data at "=" name[n] = data[n].substr(0, strpos);//Cut name value[n] = data[n].substr(strpos+1,30);//Cut value //Different types of values: if(name[n]=="image") { objimage = d.loadImg("../data/objects/"+value[n]);//Load normal image objsurf.w = objimage->w;//Picture width objsurf.h = objimage->h;//Picture height } else if(name[n]=="inv") { inv = d.loadImg("../data/objects/"+value[n]);//Load inventory image } else if(name[n]=="name") { objname = value[n]; } else if(name[n]=="door") { door = value[n]; } else if(name[n]=="info") { objinfo = value[n]; } else if(name[n]=="oninteract") { } //These were all types n++; } file.close(); }
// set frame rate int Video_setFrameRate(PyImage *self, PyObject *value, void *closure) { // check validity of parameter if (value == NULL || !PyFloat_Check(value)) { PyErr_SetString(PyExc_TypeError, "The value must be a float"); return -1; } // set repeat getVideo(self)->setFrameRate(float(PyFloat_AsDouble(value))); // success return 0; }
// set repeat int Video_setRepeat(PyImage *self, PyObject *value, void *closure) { // check validity of parameter if (value == NULL || !PyLong_Check(value)) { PyErr_SetString(PyExc_TypeError, "The value must be an int"); return -1; } // set repeat getVideo(self)->setRepeat(int(PyLong_AsLong(value))); // success return 0; }
// refresh video PyObject *Video_refresh(PyImage *self, PyObject *args) { Py_buffer buffer; char *mode = NULL; unsigned int format; double ts = -1.0; memset(&buffer, 0, sizeof(buffer)); if (PyArg_ParseTuple(args, "|s*sd:refresh", &buffer, &mode, &ts)) { if (buffer.buf) { // a target buffer is provided, verify its format if (buffer.readonly) { PyErr_SetString(PyExc_TypeError, "Buffers passed in argument must be writable"); } else if (!PyBuffer_IsContiguous(&buffer, 'C')) { PyErr_SetString(PyExc_TypeError, "Buffers passed in argument must be contiguous in memory"); } else if (((intptr_t)buffer.buf & 3) != 0) { PyErr_SetString(PyExc_TypeError, "Buffers passed in argument must be aligned to 4 bytes boundary"); } else { // ready to get the image into our buffer try { if (mode == NULL || !strcmp(mode, "RGBA")) format = GL_RGBA; else if (!strcmp(mode, "BGRA")) format = GL_BGRA; else THRWEXCP(InvalidImageMode,S_OK); if (!self->m_image->loadImage((unsigned int *)buffer.buf, buffer.len, format, ts)) { PyErr_SetString(PyExc_TypeError, "Could not load the buffer, perhaps size is not compatible"); } } catch (Exception & exp) { exp.report(); } } PyBuffer_Release(&buffer); if (PyErr_Occurred()) return NULL; } } else { return NULL; } getVideo(self)->refresh(); return Video_getStatus(self, NULL); }
//according to the request this fuction composes //checks if the request is valid and construct //a response string to reply the client void getResponse(rtspd_t* rtspd, char response[]){ int status = BAD_REQUEST; printf("getting ----------- response\n"); if(strcmp(rtspd->request, "SETUP") == 0){ printf("send setup response\n"); rtspd->sessionid = getSessionId(); status = getVideo(); printf("video file ok: %d\n", status); //initMovie(rtspd->videoName, rtspd->client_fd); rtspd->data = send_frame_data_new(rtspd->videoName, rtspd->client_fd); composeResponse(rtspd, status, response); //setup complete change state to ready strcpy(rtspd->current_state, "READY"); }else if(strcmp(rtspd->request, "PLAY") == 0&& strcmp(rtspd->current_state,"READY")==0){ status = OK; composeResponse(rtspd, status, response); streamVideo(rtspd->data); strcpy(rtspd->current_state, "PLAYING"); }else if(strcmp(rtspd->request, "PLAY")==0 && strcmp(rtspd->current_state, "PLAYING")==0){ status = OK; printf("start playing, %s, %d\n\n", rtspd->videoName,rtspd->client_fd); composeResponse(rtspd, status, response); streamVideo(rtspd->data); strcpy(rtspd->current_state, "PLAYING"); }else if(strcmp(rtspd->request, "PAUSE")==0 && strcmp(rtspd->current_state, "PLAYING")==0){ status = OK; printf("pause video \n"); pauseVideo(rtspd->data); composeResponse(rtspd, status, response); strcpy(rtspd->current_state, "READY"); }else if(strcmp(rtspd->request, "TEARDOWN")==0 &&(strcmp(rtspd->current_state, "PLAYING")==0 || strcmp(rtspd->current_state, "READY")==0)){ status = OK; printf("TEARDOWN video \n"); deleteTimer(rtspd->data); composeResponse(rtspd, status, response); strcpy(rtspd->current_state, "INIT"); }else{ status = NOT_VALID; composeResponse(rtspd, status, response); } }
// set range int Video_setRange(PyImage *self, PyObject *value, void *closure) { // check validity of parameter if (value == NULL || !PySequence_Check(value) || PySequence_Size(value) != 2 || /* XXX - this is incorrect if the sequence is not a list/tuple! */ !PyFloat_Check(PySequence_Fast_GET_ITEM(value, 0)) || !PyFloat_Check(PySequence_Fast_GET_ITEM(value, 1))) { PyErr_SetString(PyExc_TypeError, "The value must be a sequence of 2 float"); return -1; } // set range getVideo(self)->setRange(PyFloat_AsDouble(PySequence_Fast_GET_ITEM(value, 0)), PyFloat_AsDouble(PySequence_Fast_GET_ITEM(value, 1))); // success return 0; }
int main(int argc, char *argv[]) { Video &d = getVideo(); int oldticks = 0; int fps;//Frames per second int test; Room::loadRoom("intro.txt"); while(!quit_engine) { Event::gameEvent(); Layers::drawLayers(); d.renderIt(); fps = 1000/(SDL_GetTicks()-oldticks); //std::cout<<fps<<std::endl; oldticks = SDL_GetTicks(); SDL_Delay(17); } }
// object initialization static int VideoFFmpeg_init(PyObject *pySelf, PyObject *args, PyObject *kwds) { PyImage *self = reinterpret_cast<PyImage*>(pySelf); // parameters - video source // file name or format type for capture (only for Linux: video4linux or dv1394) char * file = NULL; // capture device number short capt = -1; // capture width, only if capt is >= 0 short width = 0; // capture height, only if capt is >= 0 short height = 0; // capture rate, only if capt is >= 0 float rate = 25.f; static const char *kwlist[] = {"file", "capture", "rate", "width", "height", NULL}; // get parameters if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|hfhh", const_cast<char**>(kwlist), &file, &capt, &rate, &width, &height)) return -1; try { // create video object Video_init<VideoFFmpeg>(self); // set thread usage getVideoFFmpeg(self)->initParams(width, height, rate); // open video source Video_open(getVideo(self), file, capt); } catch (Exception & exp) { exp.report(); return -1; } // initialization succeded return 0; }
void YouTubeEnclosureRequest::checkStreams() { if (m_streamsRequest->status() == QYouTube::StreamsRequest::Ready) { const QVariantList items = m_streamsRequest->result().toList(); const int start = qMax(0, VIDEO_FORMATS.indexOf(m_settings.value("videoFormat", "22").toString())); for (int i = start; i < VIDEO_FORMATS.size(); i++) { foreach (const QVariant &item, items) { const QVariantMap stream = item.toMap(); if (stream.value("id") == VIDEO_FORMATS.at(i)) { m_result.request = QNetworkRequest(stream.value("url").toString()); getVideo(); return; } } } setErrorString(tr("No video streams found")); setResult(Enclosure()); setStatus(Error); emit finished(this); } else if (m_streamsRequest->status() == QYouTube::StreamsRequest::Failed) {
double Rental::getCharge() const { double thisAmount = 0; switch(getVideo().getPriceCode()) { case Video::REGULAR: thisAmount += 2; if (getDaysRented() > 2) thisAmount += (getDaysRented() - 2) * 1.5; break; case Video::NEW_RELEASE: thisAmount += getDaysRented() * 3; break; case Video::CHILDRENS: thisAmount += 1.5; if (getDaysRented() > 3) thisAmount += (getDaysRented() - 3) * 1.5; break; } return thisAmount; }
// get frame rate PyObject *Video_getFrameRate (PyImage *self, void *closure) { return Py_BuildValue("f", double(getVideo(self)->getFrameRate())); }
// get repeat PyObject *Video_getRepeat (PyImage *self, void *closure) { return Py_BuildValue("h", getVideo(self)->getRepeat()); }
PyObject *Video_stop(PyImage *self) { if (getVideo(self)->stop()) Py_RETURN_TRUE; else Py_RETURN_FALSE; }
// pause video PyObject *Video_pause(PyImage *self) { if (getVideo(self)->pause()) Py_RETURN_TRUE; else Py_RETURN_FALSE; }
ColorDetect::ColorDetect() { writer2 = std::unique_ptr< cv::VideoWriter > (new cv::VideoWriter("output.avi", CV_FOURCC('D', 'I', 'V', 'X'), 5, cv::Size(640, 480), true)); getVideo(); }
void Object::imageObject() { Video &d = getVideo(); d.onScreen(objimage, objx, objy); }
// get range PyObject *Video_getRange(PyImage *self, void *closure) { return Py_BuildValue("[ff]", getVideo(self)->getRange()[0], getVideo(self)->getRange()[1]); }