void xtr_oggbase_c::create_standard_file(xtr_base_c *master, KaxTrackEntry &track, LacingType lacing) { KaxCodecPrivate *priv = FindChild<KaxCodecPrivate>(&track); if (!priv) mxerror(boost::format(Y("Track %1% with the CodecID '%2%' is missing the \"codec private\" element and cannot be extracted.\n")) % m_tid % m_codec_id); init_content_decoder(track); memory_cptr mpriv = decode_codec_private(priv); std::vector<memory_cptr> header_packets; try { if (lacing == LACING_NONE) header_packets.push_back(mpriv); else { header_packets = unlace_memory_xiph(mpriv); if (header_packets.empty()) throw false; } header_packets_unlaced(header_packets); } catch (...) { mxerror(boost::format(Y("Track %1% with the CodecID '%2%' does not contain valid headers.\n")) % m_tid % m_codec_id); } xtr_oggbase_c::create_file(master, track); ogg_packet op; for (m_packetno = 0; header_packets.size() > m_packetno; ++m_packetno) { // Handle all the header packets: ID header, comments, etc op.b_o_s = (0 == m_packetno ? 1 : 0); op.e_o_s = 0; op.packetno = m_packetno; op.packet = header_packets[m_packetno]->get_buffer(); op.bytes = header_packets[m_packetno]->get_size(); op.granulepos = 0; ogg_stream_packetin(&m_os, &op); if (0 == m_packetno) /* ID header must be alone on a separate page */ flush_pages(); } /* flush at last header, data must start on a new page */ flush_pages(); }
void xtr_oggkate_c::handle_frame(xtr_frame_t &f) { ogg_packet op; op.b_o_s = 0; op.e_o_s = (f.frame->get_size() == 1) && (f.frame->get_buffer()[0] == 0x7f); op.packetno = m_packetno; op.packet = f.frame->get_buffer(); op.bytes = f.frame->get_size(); /* we encode the backlink in the granulepos */ float f_timecode = f.timecode / 1000000000.0; int64_t g_backlink = 0; if (op.bytes >= static_cast<long>(1 + 3 * sizeof(int64_t))) g_backlink = get_uint64_le(op.packet + 1 + 2 * sizeof(int64_t)); float f_backlink = g_backlink * (float)m_kate_id_header.gden / m_kate_id_header.gnum; float f_base = f_timecode - f_backlink; float f_offset = f_timecode - f_base; int64_t g_base = (int64_t)(f_base * m_kate_id_header.gnum / m_kate_id_header.gden); int64_t g_offset = (int64_t)(f_offset * m_kate_id_header.gnum / m_kate_id_header.gden); op.granulepos = (g_base << m_kate_id_header.kfgshift) | g_offset; ogg_stream_packetin(&m_os, &op); flush_pages(); /* Kate is a data packet per page */ ++m_packetno; }
/** * @brief Clean up load status information * * @param self [in/out] Load status information * @return void */ static WriterResult DirectWriterClose(DirectWriter *self, bool onError) { WriterResult ret = { 0 }; Assert(self != NULL); /* Flush unflushed block buffer and close the heap file. */ if (!onError) flush_pages(self); close_data_file(self); UnlinkLSF(self); if (!onError) { SpoolerClose(&self->spooler); ret.num_dup_new = self->spooler.dup_new; ret.num_dup_old = self->spooler.dup_old; if (self->base.rel) heap_close(self->base.rel, AccessExclusiveLock); if (self->blocks) pfree(self->blocks); pfree(self); } return ret; }
void xtr_oggbase_c::finish_file() { if (-1 == m_queued_granulepos) return; // Set the "end of stream" marker on the last packet, handle it // and flush all remaining Ogg pages. m_queued_granulepos = m_previous_end * m_sfreq / 1000000000; write_queued_frame(true); flush_pages(); }
int main(int argc,char** argv) { printf("type a mem value to allocate and keep,\n" "type q to quit\n"); char str[256]; ul tg_mem; if(argc==2){ char* end; tg_mem = strtoul(argv[1],&end,10); tg_mem *= unit_expand(*end); visit_pages(tg_mem); } int flags = fcntl(STDIN_FILENO,F_GETFL,0); flags |= O_NONBLOCK; fcntl(STDIN_FILENO, F_SETFL,flags); printf(":>"); fflush(stdout); while(1){ /*#if LOW_CPU struct timespec tm_,rem_; tm_.tv_sec = 0; tm_.tv_nsec = NANO_SLEEP_TIME; nanosleep(&tm_,&rem_); #endif*/ int ret = read(0,str,sizeof(str)); if(ret == -1 && errno == EAGAIN){ flush_pages(); continue; } printf(":>"); fflush(stdout); free_all_pages(); if(str[0]=='q') break; char* end; tg_mem = strtoul(str,&end,10); tg_mem *= unit_expand(*end); visit_pages(tg_mem); } return 0; }
void xtr_oggkate_c::handle_frame(memory_cptr &frame, KaxBlockAdditions *, int64_t timecode, int64_t, int64_t, int64_t, bool, bool, bool) { m_content_decoder.reverse(frame, CONTENT_ENCODING_SCOPE_BLOCK); ogg_packet op; op.b_o_s = 0; op.e_o_s = (frame->get_size() == 1) && (frame->get_buffer()[0] == 0x7f); op.packetno = m_packetno; op.packet = frame->get_buffer(); op.bytes = frame->get_size(); /* we encode the backlink in the granulepos */ float f_timecode = timecode / 1000000000.0; int64_t g_backlink = 0; if (op.bytes >= static_cast<long>(1 + 3 * sizeof(int64_t))) g_backlink = get_uint64_le(op.packet + 1 + 2 * sizeof(int64_t)); float f_backlink = g_backlink * (float)m_kate_id_header.gden / m_kate_id_header.gnum; float f_base = f_timecode - f_backlink; float f_offset = f_timecode - f_base; int64_t g_base = (int64_t)(f_base * m_kate_id_header.gnum / m_kate_id_header.gden); int64_t g_offset = (int64_t)(f_offset * m_kate_id_header.gnum / m_kate_id_header.gden); op.granulepos = (g_base << m_kate_id_header.kfgshift) | g_offset; ogg_stream_packetin(&m_os, &op); flush_pages(); /* Kate is a data packet per page */ ++m_packetno; }
void xtr_oggopus_c::finish_file() { write_queued_frame(true); flush_pages(); }
/** * @brief Create LoadStatus file and load heap tuples directly. * @return void */ static void DirectWriterInsert(DirectWriter *self, HeapTuple tuple) { Page page; OffsetNumber offnum; ItemId itemId; Item item; LoadStatus *ls = &self->ls; /* Compress the tuple data if needed. */ if (tuple->t_len > TOAST_TUPLE_THRESHOLD) tuple = toast_insert_or_update(self->base.rel, tuple, NULL, 0); BULKLOAD_PROFILE(&prof_writer_toast); /* Assign oids if needed. */ if (self->base.rel->rd_rel->relhasoids) { Assert(!OidIsValid(HeapTupleGetOid(tuple))); HeapTupleSetOid(tuple, GetNewOid(self->base.rel)); } /* Assume the tuple has been toasted already. */ if (MAXALIGN(tuple->t_len) > MaxHeapTupleSize) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("row is too big: size %lu, maximum size %lu", (unsigned long) tuple->t_len, (unsigned long) MaxHeapTupleSize))); /* Fill current page, or go to next page if the page is full. */ page = GetCurrentPage(self); if (PageGetFreeSpace(page) < MAXALIGN(tuple->t_len) + RelationGetTargetPageFreeSpace(self->base.rel, HEAP_DEFAULT_FILLFACTOR)) { if (self->curblk < BLOCK_BUF_NUM - 1) self->curblk++; else { flush_pages(self); self->curblk = 0; /* recycle from first block */ } page = GetCurrentPage(self); /* Initialize current block */ PageInit(page, BLCKSZ, 0); PageSetTLI(page, ThisTimeLineID); } tuple->t_data->t_infomask &= ~(HEAP_XACT_MASK); tuple->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK); tuple->t_data->t_infomask |= HEAP_XMAX_INVALID; HeapTupleHeaderSetXmin(tuple->t_data, self->xid); HeapTupleHeaderSetCmin(tuple->t_data, self->cid); HeapTupleHeaderSetXmax(tuple->t_data, 0); /* put the tuple on local page. */ offnum = PageAddItem(page, (Item) tuple->t_data, tuple->t_len, InvalidOffsetNumber, false, true); ItemPointerSet(&(tuple->t_self), LS_TOTAL_CNT(ls) + self->curblk, offnum); itemId = PageGetItemId(page, offnum); item = PageGetItem(page, itemId); ((HeapTupleHeader) item)->t_ctid = tuple->t_self; BULKLOAD_PROFILE(&prof_writer_table); SpoolerInsert(&self->spooler, tuple); BULKLOAD_PROFILE(&prof_writer_index); }