void* kma_malloc(kma_size_t size) { if(size >= PAGE_SIZE) return NULL; if(entry_page == NULL) init_first_page(); kma_frame* current = (kma_frame*)entry_page->ptr; //go until last in the list or we find one that fits bool fits = (current->occupied == FREE && frame_size(current) >= size); while(current->last == NOT_LAST && !fits){ current = current->next; fits = (current->occupied == FREE && frame_size(current) >= size); } void* ret_addr; //if it fits... if(fits){ allocate_frame(current,size); ret_addr = data_ptr(current); //if not... }else{ //if nothing in the resource map fits, we need to allocate a new page //ifwe are here, current should be last kma_page_t* new_page = get_page(); void* next = ((char*) new_page->ptr) + new_page->size; kma_frame* new_frame = write_new_frame(new_page->ptr, new_page, current, next, FREE, LAST); allocate_frame(new_frame,size); current->last = NOT_LAST; ret_addr = data_ptr(new_frame); } //print_debug(); return ret_addr; }
void frame::copy_to( char* sp, char* caller, char* pc, bool adjust) { sparc_sp *new_sp = (sparc_sp*)sp; if (adjust) { // make sure all memoized blocks exist, then adjust their scope abstract_vframe* callee = NULL; OopOopTable* dummy = EMPTY; for ( abstract_vframe* vf = new_vframe(this); vf && vf->fr == this; callee = vf, vf = vf->sender()) { vf->createBlocks(callee, dummy); } frame* new_f = new_sp->as_callers_frame(); ResourceMark rm; // for RegisterLocator adjust_blocks(block_scope_of_home_frame(), new_f, RegisterLocator::for_frame(new_f)); } copy_oops(my_sp()->as_oops(), new_sp->as_oops(), frame_size()); new_sp->set_link( (sparc_fp*) caller ); new_sp->set_return_addr( pc ); }
void frame::copy_to( char* sp, char* caller, char* pc, bool adjust) { frame* new_f = (frame*)sp; if (SpendTimeForDebugging) warning("untested frame::copy_to:"); // need SIC to test this if (adjust) { // make sure all memoized blocks exist, then adjust their scope abstract_vframe* callee = NULL; OopOopTable* dummy = EMPTY; for ( abstract_vframe* vf = new_vframe(this); vf && vf->fr == this; callee = vf, vf = vf->sender()) { vf->createBlocks(callee, dummy); } ResourceMark rm; // for RegisterLocators adjust_blocks(block_scope_of_home_frame(), new_f, RegisterLocator::for_frame(new_f) ); } copy_oops( (oop*)this, (oop*)new_f, frame_size()); my_sp()->set_link( ((frame*)caller)->my_sp() ); set_return_addr( pc ); }
static int lbuf_read_next(struct loop_buffer *lbuf, u8 **buf, u16 *size) { struct frame_head *fhead = (struct frame_head *)(lbuf->addr + lbuf->off_head); *buf = NULL; *size = 0; if (lbuf->in_reading) { lbuf->in_reading = 0; /* go over previous frame has been read */ lbuf->off_head += frame_size(fhead->length); lbuf->off_tail = lbuf->off_head; fhead = (struct frame_head *)(lbuf->addr + lbuf->off_head); } if (fhead->sign == LBUF_DISCARD_SIGN) { fhead = (struct frame_head *)lbuf->addr; lbuf->off_head = lbuf->off_tail = 0; } if (fhead->sign == LBUF_CELL_SIGN) { *buf = lbuf->addr + lbuf->off_head + sizeof(*fhead); *size = fhead->length; lbuf->in_reading = 1; } return !lbuf->in_reading; }
bool Movie::saveAsAVI(const std::string & filename, int fourcc) const { if (m_frames.empty()) { WARNM("There are no frames in the movie"); return true; } cv::Size frame_size(m_frame_w, m_frame_h); cv::VideoWriter writer(filename, fourcc, m_fps, frame_size, true); if (!writer.isOpened()) { ERRORM("Failed to create video writer"); return false; } cv::Mat frame; for (int i = 0; i < m_frames.size(); ++i) { if (!qImage2CvMat(m_frames[i], frame)) { ERRORM("Unsupported conversion from " << utils::qImageFormatToString(m_frames[i].format()) << " pixel format"); return false; } writer.write(frame); } return true; }
int Read_yuv_file::frame_count() const { struct stat status; stat(file_path, &status); total_frames = (status.st_size/frame_size()); return total_frames; }
static void set_default_params(struct hva_ctx *ctx) { struct hva_frameinfo *frameinfo = &ctx->frameinfo; struct hva_streaminfo *streaminfo = &ctx->streaminfo; frameinfo->pixelformat = V4L2_PIX_FMT_NV12; frameinfo->width = HVA_DEFAULT_WIDTH; frameinfo->height = HVA_DEFAULT_HEIGHT; frameinfo->aligned_width = ALIGN(frameinfo->width, HVA_WIDTH_ALIGNMENT); frameinfo->aligned_height = ALIGN(frameinfo->height, HVA_HEIGHT_ALIGNMENT); frameinfo->size = frame_size(frameinfo->aligned_width, frameinfo->aligned_height, frameinfo->pixelformat); streaminfo->streamformat = V4L2_PIX_FMT_H264; streaminfo->width = HVA_DEFAULT_WIDTH; streaminfo->height = HVA_DEFAULT_HEIGHT; ctx->colorspace = V4L2_COLORSPACE_REC709; ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT; ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; ctx->quantization = V4L2_QUANTIZATION_DEFAULT; ctx->max_stream_size = estimated_stream_size(streaminfo->width, streaminfo->height); }
void Frame::dirty() { Widget::dirty(); const int margin = frame_size(); size_hint((layout() ? layout()->size_hint() : 0) + margin * 2); }
void frame::print_compiled() { if (is_compiled_self_frame()) lprintf(" chain = %#lx;", (nmethod_frame_chain(code()))); lprintf("\n\tlocals = [%#lx, %#lx], currPC = %#lx, size = %ld words\n", (long unsigned)(my_sp()), (long unsigned)(currentPC()), long(frame_size())); }
// Note: we cannot have print_on as const, as we allocate inside the method void vframeArray::print_on_2(outputStream* st) { st->print_cr(" - sp: " INTPTR_FORMAT, sp()); st->print(" - thread: "); Thread::current()->print(); st->print_cr(" - frame size: %d", frame_size()); for (int index = 0; index < frames() ; index++ ) { element(index)->print(st); } }
void AudioBuffer::pop_frames(unsigned _frames_to_pop) { if(_frames_to_pop < frames()) { auto first = m_data.begin() + _frames_to_pop*frame_size(); std::copy(first, m_data.end(), m_data.begin()); resize_frames(frames()-_frames_to_pop); } else { clear(); } }
int Read_yuv_file::read(void* buffer, int size) { int size_frame = frame_size(); if (0 == file || size < size_frame) { return 0; } int ret_size = fread(buffer, 1, size_frame, file); return (ret_size == size_frame); }
int Read_yuv_file::seek(int frames, int whence) { int size_frame = frame_size(); if (0 == file) { return 0; } long offset = size_frame*frames; int status = fseek(file, offset, whence); return (0 == status); }
Box2i Frame::frame_geom() const { const Box2i & geom = this->geom(); const int margin = frame_size(); return Box2i( geom.x + margin, geom.y + margin, geom.w - margin * 2, geom.h - margin * 2); }
methodOop frame::method() const { assert(is_interpreted_frame(), "must be interpreter frame"); // First we will check the interpreter frame is valid by checking the frame size. // The interpreter guarantees hp is valid if the frame is at least 4 in size. // (return address, link, receiver, hcode pointer) if (frame_size() < minimum_size_for_deoptimized_frame) return NULL; u_char* h = hp(); if (!Universe::old_gen.contains(h)) return NULL; memOop obj = as_memOop(Universe::object_start((oop*) h)); return obj->is_method() ? methodOop(obj) : NULL; }
static void frame_pop(struct jq_state* jq) { assert(jq->curr_frame); struct frame* fp = frame_current(jq); if (stack_pop_will_free(&jq->stk, jq->curr_frame)) { int nlocals = fp->bc->nlocals; int i; for (i=0; i<nlocals; i++) { jv_free(*frame_local_var(jq, i, 0)); } } jq->curr_frame = stack_pop_block(&jq->stk, jq->curr_frame, frame_size(fp->bc)); }
void AudioBuffer::add_frames(const AudioBuffer &_source, unsigned _frames_count) { if(_source.spec() != m_spec) { throw std::logic_error("sound buffers must have the same spec"); } _frames_count = std::min(_frames_count, _source.frames()); if(_frames_count == 0) { return; } unsigned datalen = _frames_count * frame_size(); auto srcstart = _source.m_data.begin(); m_data.insert(m_data.end(), srcstart, srcstart+datalen); }
static int hva_try_fmt_frame(struct file *file, void *priv, struct v4l2_format *f) { struct hva_ctx *ctx = fh_to_ctx(file->private_data); struct device *dev = ctx_to_dev(ctx); struct v4l2_pix_format *pix = &f->fmt.pix; u32 pixelformat = pix->pixelformat; const struct hva_enc *enc; u32 width, height; enc = hva_find_encoder(ctx, pixelformat, ctx->streaminfo.streamformat); if (!enc) { dev_dbg(dev, "%s V4L2 TRY_FMT (OUTPUT): unsupported format %.4s\n", ctx->name, (char *)&pixelformat); return -EINVAL; } /* adjust width & height */ width = pix->width; height = pix->height; v4l_bound_align_image(&pix->width, HVA_MIN_WIDTH, HVA_MAX_WIDTH, frame_alignment(pixelformat) - 1, &pix->height, HVA_MIN_HEIGHT, HVA_MAX_HEIGHT, frame_alignment(pixelformat) - 1, 0); if ((pix->width != width) || (pix->height != height)) dev_dbg(dev, "%s V4L2 TRY_FMT (OUTPUT): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n", ctx->name, width, height, pix->width, pix->height); width = ALIGN(pix->width, HVA_WIDTH_ALIGNMENT); height = ALIGN(pix->height, HVA_HEIGHT_ALIGNMENT); if (!pix->colorspace) { pix->colorspace = V4L2_COLORSPACE_REC709; pix->xfer_func = V4L2_XFER_FUNC_DEFAULT; pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; pix->quantization = V4L2_QUANTIZATION_DEFAULT; } pix->bytesperline = frame_stride(width, pixelformat); pix->sizeimage = frame_size(width, height, pixelformat); pix->field = V4L2_FIELD_NONE; return 0; }
static int music_delivery(sp_session * session, const sp_audioformat * format, const void *frames, int num_frames) { /* TODO: This is called _all_ the time, make it faster? */ // Note that we do not try to shoe horn this into session_callback as it is // quite different in that this needs to handle return values and much more // complicated arguments. debug_printf(">> music_delivery called: frames %d", num_frames); int consumed = num_frames; // assume all consumed int size = frame_size(format); PyObject *callback, *client, *py_frames, *py_session, *result; PyGILState_STATE gstate = PyGILState_Ensure(); /* TODO: check if session creations succeeds. */ py_frames = PyBuffer_FromMemory((void *)frames, num_frames * size); py_session = Session_FromSpotify(session); /* TODO: check if callback get succeeds. */ client = (PyObject *)sp_session_userdata(session); callback = PyObject_GetAttrString(client, "music_delivery"); result = PyObject_CallFunction(callback, "NNiiiii", py_session, py_frames, size, num_frames, format->sample_type, format->sample_rate, format->channels); if (result == NULL) PyErr_WriteUnraisable(callback); else { if (PyInt_Check(result)) consumed = (int)PyInt_AsLong(result); else if (PyLong_Check(result)) consumed = (int)PyLong_AsLong(result); else { PyErr_SetString(PyExc_TypeError, "music_delivery must return an integer"); PyErr_WriteUnraisable(callback); } Py_DECREF(result); } Py_XDECREF(callback); PyGILState_Release(gstate); return consumed; }
bool Movie::saveAsAVI(const std::string & filename, int fourcc) { if (m_frames.empty()) { WARNM("There are no frames in the movie"); return true; } cv::Size frame_size(m_frame_w, m_frame_h); cv::VideoWriter writer(filename, fourcc, m_fps, frame_size, true); if (!writer.isOpened()) { ERRORM("Failed to create video writer"); return false; } //cv::Mat cv_frame(m_frame_h, m_frame_w, CV_8UC3, nullptr); cv::Mat cv_frame(m_frame_h, m_frame_w, CV_8UC4, nullptr); for (int i = 0; i < m_frames.size(); ++i) { QImage & frame = m_frames[i]; if (frame.format() != QImage::Format_RGB888) { INFOM("Converting frame #" << i << " from " << utils::qImageFormatToString(frame.format()) << " to QImage::Format_RGB888"); frame = frame.convertToFormat(QImage::Format_RGB888); if (frame.isNull()) { ERRORM("Failed to convert pixel format of frame #" << i << " to QImage::Format_RGB888"); return false; } } // the const cast is here because the function called is constBits which returns a const pointer // and constBits() in turn is called because it does not detach the pixel buffer from QImage, // which is the desired behaviour in this case. // The cost_cast should also be safe here, since write method of VideoWriter should write to the // pixels provided cv_frame.data = const_cast<uchar*>(frame.constBits()); writer.write(cv_frame); } return true; }
//************************************************************************************************* //! Common initialization code for all construction methods. //************************************************************************************************* void ossimVideoImageSource::initialize() { ossimVideoSource* video = dynamic_cast<ossimVideoSource*>(getInput()); if (!video) return; // Establish the frame rect: ossimIpt frame_size (video->frameSize()); m_frameRect = ossimIrect(0, 0, frame_size.x-1, frame_size.y-1); // Initialize the tile data buffer: m_tile = ossimImageDataFactory::instance()->create(this, this); m_tile->setWidth(frame_size.x); m_tile->setHeight(frame_size.y); m_tile->initialize(); }
void LatexPreviewWindow::SetImage(const wxBitmap& img) { Freeze(); int old_width = 0, old_height = 0; if (m_img.IsOk()) { old_width = m_img.GetWidth(); old_height = m_img.GetHeight(); } wxSize increase(img.GetWidth() - old_width, img.GetHeight() - old_height), frame_size( GetSize() ), new_img_size(img.GetWidth() + 40, img.GetHeight() + 40); m_img = img; // m_mainpanel->Layout(); // m_control_image->SetVirtualSizeHints( new_img_size ); m_control_image->SetInitialSize( new_img_size ); m_control_image->SetMinSize( new_img_size ); m_control_image->SetSize( new_img_size ); m_panel_formula->GetSizer()->Layout(); GetSizer()->SetSizeHints(this); if (m_resize_frame) { if (m_control_image->GetSize().x > m_panel_image->GetSize().x) frame_size.x += m_control_image->GetSize().x - m_panel_image->GetSize().x; frame_size.y += increase.y; } // m_control_image->Refresh(); // m_control_image->Update(); // m_mainpanel->GetSizer()->SetSizeHints(m_mainpanel); SetSize(frame_size); Thaw(); Refresh(); }
static int music_delivery(sp_session *session, const sp_audioformat *format, const void *frames, int num_frames) { PyGILState_STATE gstate; gstate = PyGILState_Ensure(); int siz = frame_size(format); PyObject *pyframes = PyBuffer_FromMemory((void *)frames, num_frames * siz); Py_INCREF(pyframes); Session *psession = (Session *)PyObject_CallObject((PyObject *)&SessionType, NULL); Py_INCREF(psession); psession->_session = session; PyObject *client = (PyObject *)sp_session_userdata(session); PyObject *c= PyObject_CallMethod(client, "music_delivery", "OOiiiii", psession, pyframes, siz, num_frames, format->sample_type, format->sample_rate, format->channels); int consumed = num_frames; // assume all consumed if(PyObject_TypeCheck(c, &PyInt_Type)) { consumed = (int)PyInt_AsLong(c); } Py_DECREF(pyframes); Py_DECREF(psession); PyGILState_Release(gstate); return consumed; }
void Text_Display::dirty() { Abstract_Text_Widget::dirty(); _widget->textfont(text_font()); _widget->textsize(Style::global()->font_size()); _widget->textcolor(text_color()); size_hint( V2i( //Math::max( // Style::global()->size_button(), // base::string_size(_size_string) //), Font::string_width(_size_string), fl_height() ) + Style::global()->margin_text() * 2 + frame_size() * 2); }
static struct frame* frame_push(struct jq_state* jq, struct closure callee, uint16_t* argdef, int nargs) { stack_ptr new_frame_idx = stack_push_block(&jq->stk, jq->curr_frame, frame_size(callee.bc)); struct frame* new_frame = stack_block(&jq->stk, new_frame_idx); new_frame->bc = callee.bc; new_frame->env = callee.env; assert(nargs == new_frame->bc->nclosures); union frame_entry* entries = new_frame->entries; int i; for (i=0; i<nargs; i++) { entries->closure = make_closure(jq, argdef + i * 2); entries++; } for (i=0; i<callee.bc->nlocals; i++) { entries->localvar = jv_invalid(); entries++; } jq->curr_frame = new_frame_idx; return new_frame; }
static int music_delivery(sp_session * session, const sp_audioformat * format, const void *frames, int num_frames) { PyGILState_STATE gstate; PyObject *res, *method; #ifdef DEBUG fprintf(stderr, "[DEBUG]-session- >> music_delivery called\n"); #endif gstate = PyGILState_Ensure(); int siz = frame_size(format); PyObject *pyframes = PyBuffer_FromMemory((void *)frames, num_frames * siz); Session *psession = (Session *) PyObject_CallObject((PyObject *)&SessionType, NULL); psession->_session = session; PyObject *client = (PyObject *)sp_session_userdata(session); method = PyObject_GetAttrString(client, "music_delivery"); res = PyObject_CallFunction(method, "OOiiiii", psession, pyframes, siz, num_frames, format->sample_type, format->sample_rate, format->channels); int consumed = num_frames; // assume all consumed if (!res) PyErr_WriteUnraisable(method); if (PyInt_Check(res)) consumed = (int)PyInt_AsLong(res); else if (PyLong_Check(res)) consumed = (int)PyLong_AsLong(res); else { PyErr_SetString(PyExc_TypeError, "music_delivery must return an integer"); PyErr_WriteUnraisable(method); } Py_DECREF(pyframes); Py_DECREF(psession); Py_XDECREF(res); Py_DECREF(method); PyGILState_Release(gstate); return consumed; }
//changes the kma_frame objects appropriately void allocate_frame(kma_frame* frame, int new_size){ frame->occupied = TAKEN; int sub_frame_size = frame_size(frame) - new_size - sizeof(kma_frame); if(sub_frame_size > 0){ //if theres anough room to allocate another frame void* sub_frame_addr = ((char*)frame) + sizeof(kma_frame) + new_size; write_new_frame( sub_frame_addr, frame->page, frame, frame->next, FREE, frame->last); //point the orginal frame to point to the new sub_frame frame->next = (sub_frame_addr); //point the original next's prev to point to the new sub_frame frame->next->next->prev = frame->next; //mark it as no longer the last in the chain frame->last = NOT_LAST; } }
static struct GrooveBuffer * frame_to_groove_buffer(struct GroovePlaylist *playlist, struct GrooveSink *sink, AVFrame *frame) { struct GrooveBufferPrivate *b = av_mallocz(sizeof(struct GrooveBufferPrivate)); if (!b) { av_log(NULL, AV_LOG_ERROR, "unable to allocate buffer\n"); return NULL; } struct GrooveBuffer *buffer = &b->externals; if (pthread_mutex_init(&b->mutex, NULL) != 0) { av_free(b); av_log(NULL, AV_LOG_ERROR, "unable to create mutex\n"); return NULL; } struct GroovePlaylistPrivate *p = (struct GroovePlaylistPrivate *) playlist; struct GrooveFile *file = p->decode_head->file; struct GrooveFilePrivate *f = (struct GrooveFilePrivate *) file; buffer->item = p->decode_head; buffer->pos = f->audio_clock; buffer->data = frame->extended_data; buffer->frame_count = frame->nb_samples; buffer->format.channel_layout = frame->channel_layout; buffer->format.sample_fmt = frame->format; buffer->format.sample_rate = frame->sample_rate; buffer->size = frame_size(frame); b->frame = frame; return buffer; }
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) { // for sparc changing the number of arguments doesn't change // anything about the frame size so we'll always lie and claim that // we are only passing 1 argument. set_num_rt_args(1); assert_not_delayed(); // bang stack before going to runtime set(-os::vm_page_size() + STACK_BIAS, G3_scratch); st(G0, SP, G3_scratch); // debugging support assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); set_last_Java_frame(SP, noreg); if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early save_thread(L7_thread_cache); // do the call call(entry_point, relocInfo::runtime_call_type); if (!VerifyThread) { delayed()->mov(G2_thread, O0); // pass thread as first argument } else { delayed()->nop(); // (thread already passed) } int call_offset = offset(); // offset of return address restore_thread(L7_thread_cache); reset_last_Java_frame(); // check for pending exceptions { Label L; Address exception_addr(G2_thread, Thread::pending_exception_offset()); ld_ptr(exception_addr, Gtemp); br_null_short(Gtemp, pt, L); Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); st_ptr(G0, vm_result_addr); Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); st_ptr(G0, vm_result_addr_2); if (frame_size() == no_frame_size) { // we use O7 linkage so that forward_exception_entry has the issuing PC call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); delayed()->restore(); } else if (_stub_id == Runtime1::forward_exception_id) { should_not_reach_here(); } else { AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id)); jump_to(exc, G4); delayed()->nop(); } bind(L); } // get oop result if there is one and reset the value in the thread if (oop_result1->is_valid()) { // get oop result if there is one and reset it in the thread get_vm_result (oop_result1); } else { // be a little paranoid and clear the result Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); st_ptr(G0, vm_result_addr); } // get second result if there is one and reset the value in the thread if (metadata_result->is_valid()) { get_vm_result_2 (metadata_result); } else { // be a little paranoid and clear the result Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); st_ptr(G0, vm_result_addr_2); } return call_offset; }
std::deque<frame_size> capturer::format::get_possible_framesizes() const{ std::deque<frame_size> ret; switch(type){ case fst_discrete: return framesizes; case fst_stepwise:{ if (check_framesize(frame_size(320,200))) ret.push_back(frame_size(320,200)); if (check_framesize(frame_size(640,480))) ret.push_back(frame_size(640,480)); if (check_framesize(frame_size(800,600))) ret.push_back(frame_size(800,600)); if (check_framesize(frame_size(1024,768))) ret.push_back(frame_size(1024,768)); if (check_framesize(frame_size(1280,720))) ret.push_back(frame_size(1280,720)); if (check_framesize(frame_size(1920,1080))) ret.push_back(frame_size(1920,1080)); } } return ret; }