TraceStack(const std::unordered_set<void*>& rhs) { get_chunk(); for (void* p : rhs) { assert(!isMarked(GCAllocation::fromUserData(p))); push(p); } }
static int tunnelError(TunnelPtr tunnel, int code, AtomPtr message) { int n; if(tunnel->fd2 > 0) { CLOSE(tunnel->fd2); tunnel->fd2 = -1; } if(tunnel->buf2.buf == NULL) tunnel->buf2.buf = get_chunk(); if(tunnel->buf2.buf == NULL) goto fail; n = httpWriteErrorHeaders(tunnel->buf2.buf, CHUNK_SIZE - 1, 0, 1, code, message, 1, NULL, NULL, 0, NULL); if(n <= 0) goto fail; tunnel->buf2.head = n; tunnelDispatch(tunnel); return 1; fail: CLOSE(tunnel->fd1); tunnel->fd1 = -1; tunnelDispatch(tunnel); return 1; }
TerrainObject *UnitType::place_beside(Unit *u, TerrainObject const *other) const { if (!u || !other) { return nullptr; } // find the range of possible tiles tile_range outline{other->pos.start - coord::tile_delta{1, 1}, other->pos.end + coord::tile_delta{1, 1}, other->pos.draw}; // find a free position adjacent to the object auto terrain = other->get_terrain(); for (coord::tile temp_pos : tile_list(outline)) { TerrainChunk *chunk = terrain->get_chunk(temp_pos); if (chunk == nullptr) { continue; } auto placed = this->place(u, terrain, temp_pos.to_phys2().to_phys3()); if (placed) { return placed; } } return nullptr; }
void free ( void *ptr) { struct chunk *test = get_chunk(ptr); if (test != NULL) { test->free = 1; } }
Chunk* World::get_block_chunk(int block_x, int block_y, int block_z) const { int chunk_x = (block_x < 0 ? block_x - WorldConstants::CHUNK_SIZE + 1 : block_x) / WorldConstants::CHUNK_SIZE; int chunk_y = (block_y < 0 ? block_y - WorldConstants::CHUNK_SIZE + 1 : block_y) / WorldConstants::CHUNK_SIZE; int chunk_z = (block_z < 0 ? block_z - WorldConstants::CHUNK_SIZE + 1 : block_z) / WorldConstants::CHUNK_SIZE; return get_chunk(chunk_x, chunk_y, chunk_z); }
bool MemoryPoolDynamicStatic::is_valid(ID p_id) { _THREAD_SAFE_METHOD_ Chunk *c = get_chunk(p_id); return c != NULL; }
sysv_zone_t sysv_new_block(struct super_block * sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); unsigned int block; sysv_zone_t nr; struct buffer_head * bh; unsigned count; mutex_lock(&sbi->s_lock); count = fs16_to_cpu(sbi, *sbi->s_bcache_count); if (count == 0) /* Applies only to Coherent FS */ goto Enospc; nr = sbi->s_bcache[--count]; if (nr == 0) /* Applies only to Xenix FS, SystemV FS */ goto Enospc; block = fs32_to_cpu(sbi, nr); *sbi->s_bcache_count = cpu_to_fs16(sbi, count); if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) { printk("sysv_new_block: new block %d is not in data zone\n", block); goto Enospc; } if (count == 0) { /* the last block continues the free list */ unsigned count; block += sbi->s_block_base; if (!(bh = sb_bread(sb, block))) { printk("sysv_new_block: cannot read free-list block\n"); /* retry this same block next time */ *sbi->s_bcache_count = cpu_to_fs16(sbi, 1); goto Enospc; } count = fs16_to_cpu(sbi, *(__fs16*)bh->b_data); if (count > sbi->s_flc_size) { printk("sysv_new_block: free-list block with %d >flc_size %d entries\n", count, sbi->s_flc_size ); brelse(bh); goto Enospc; } *sbi->s_bcache_count = cpu_to_fs16(sbi, count); memcpy(sbi->s_bcache, get_chunk(sb, bh), count * sizeof(sysv_zone_t)); brelse(bh); } /* Now the free list head in the superblock is valid again. */ fs32_add(sbi, sbi->s_free_blocks, -1); dirty_sb(sb); mutex_unlock(&sbi->s_lock); return nr; Enospc: mutex_unlock(&sbi->s_lock); return 0; }
bool MemoryPoolDynamicStatic::is_locked(ID p_id) const { _THREAD_SAFE_METHOD_ const Chunk *c = get_chunk(p_id); ERR_FAIL_COND_V(!c, false); return c->lock > 0; }
const char *MemoryPoolDynamicStatic::get_description(ID p_id) const { _THREAD_SAFE_METHOD_ const Chunk *c = get_chunk(p_id); ERR_FAIL_COND_V(!c, ""); return c->descr; }
size_t MemoryPoolDynamicStatic::get_size(ID p_id) const { _THREAD_SAFE_METHOD_ const Chunk *c = get_chunk(p_id); ERR_FAIL_COND_V(!c, 0); return c->size; }
void sysv_free_block(struct super_block * sb, sysv_zone_t nr) { struct sysv_sb_info * sbi = SYSV_SB(sb); struct buffer_head * bh; sysv_zone_t *blocks = sbi->s_bcache; unsigned count; unsigned block = fs32_to_cpu(sbi, nr); /* * This code does not work at all for AFS (it has a bitmap * free list). As AFS is supposed to be read-only no one * should call this for an AFS filesystem anyway... */ if (sbi->s_type == FSTYPE_AFS) return; if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) { printk("sysv_free_block: trying to free block not in datazone\n"); return; } mutex_lock(&sbi->s_lock); count = fs16_to_cpu(sbi, *sbi->s_bcache_count); if (count > sbi->s_flc_size) { printk("sysv_free_block: flc_count %d > flc_size %d\n", count, sbi->s_flc_size); mutex_unlock(&sbi->s_lock); return; } /* If the free list head in super-block is full, it is copied * into this block being freed, ditto if it's completely empty * (applies only on Coherent). */ if (count == sbi->s_flc_size || count == 0) { block += sbi->s_block_base; bh = sb_getblk(sb, block); if (!bh) { printk("sysv_free_block: getblk() failed\n"); mutex_unlock(&sbi->s_lock); return; } memset(bh->b_data, 0, sb->s_blocksize); *(__fs16*)bh->b_data = cpu_to_fs16(sbi, count); memcpy(get_chunk(sb,bh), blocks, count * sizeof(sysv_zone_t)); mark_buffer_dirty(bh); set_buffer_uptodate(bh); brelse(bh); count = 0; } sbi->s_bcache[count++] = nr; *sbi->s_bcache_count = cpu_to_fs16(sbi, count); fs32_add(sbi, sbi->s_free_blocks, 1); dirty_sb(sb); mutex_unlock(&sbi->s_lock); }
fc::optional<cafs::resource> cafs::get_resource( const cafs::link& l ) { try { fc::vector<char> ch = get_chunk( l.id ); derandomize( l.seed, ch ); return cafs::resource(fc::move(ch)); } catch( ... ) { wlog( "%s", fc::except_str().c_str() ); } return fc::optional<cafs::resource>(); }
void * MemoryPoolDynamicStatic::get(ID p_id) { _THREAD_SAFE_METHOD_ const Chunk *c = get_chunk(p_id); ERR_FAIL_COND_V(!c,NULL); ERR_FAIL_COND_V( c->lock==0, NULL ); return c->mem; }
void protect_chunk(void* p) { char* chunk = get_chunk(p); int ret = mprotect(chunk, get_chunk_size, PROT_READ); if (ret == -1) { printf("mprotect failure\n"); } }
static void fillSpecialObject(ObjectPtr object, void (*fn) (FILE *, char *), void *closure) { FILE *tmp = NULL; char *buf = NULL; int rc, len, offset; if (object->flags & OBJECT_INPROGRESS) return; buf = get_chunk(); if (buf == NULL) { abortObject(object, 503, internAtom("Couldn't allocate chunk")); goto done; } tmp = tmpfile(); if (tmp == NULL) { abortObject(object, 503, internAtom(pstrerror(errno))); goto done; } (*fn) (tmp, closure); fflush(tmp); rewind(tmp); offset = 0; while (1) { len = fread(buf, 1, CHUNK_SIZE, tmp); if (len <= 0 && ferror(tmp)) { abortObject(object, 503, internAtom(pstrerror(errno))); goto done; } if (len <= 0) break; rc = objectAddData(object, buf, offset, len); if (rc < 0) { abortObject(object, 503, internAtom("Couldn't add data to object")); goto done; } offset += len; } object->length = offset; done: if (buf) dispose_chunk(buf); if (tmp) fclose(tmp); notifyObject(object); }
Error MemoryPoolDynamicStatic::lock(ID p_id) { _THREAD_SAFE_METHOD_ Chunk *c = get_chunk(p_id); ERR_FAIL_COND_V(!c,ERR_INVALID_PARAMETER); c->lock++; return OK; }
void print_chunk(void* ptr) { char* chunk = get_chunk(ptr); int i; size_t chunk_size = get_chunk_size; printf("chunck:"); for (i = 0; i < chunk_size; i++) printf(" 0x%02h", chunk[i]); }
void push(void* p) { GCAllocation* al = GCAllocation::fromUserData(p); if (isMarked(al)) return; setMark(al); *cur++ = p; if (cur == end) { chunks.push_back(start); get_chunk(); } }
fc::optional<cafs::resource> cafs::get_resource( const file_ref& r ) { #if 0 fc::vector<char> rd = get_chunk( r.chunk ); //wlog( "rand %lld %s", rd.size(), fc::to_hex(rd.data()+r.pos,r.size).c_str() ); derandomize( r.seed, rd );//, r.pos ); //wlog( "derand %lld %s", rd.size(), fc::to_hex(rd.data()+r.pos,r.size).c_str() ); fc::vector<char> tmp( r.size+1 );//rd.data() + r.pos, rd.data()+r.pos+r.size ); tmp[0] = r.type; memcpy( tmp.data()+1, rd.data()+r.pos, r.size ); return cafs::resource( fc::move(tmp) ); #else try { fc::vector<char> rd = get_chunk( r.chunk, r.pos, r.size ); // fc::vector<char> rd2 = get_chunk( r.chunk ); // if( 0 != memcmp( rd.data(), rd2.data()+r.pos, r.size ) ) { // FC_THROW_REPORT( "get_chunk failed to match"); // } //wlog( "rand %lld %s", rd.size(), fc::to_hex(rd.data()+r.pos,r.size).c_str() ); derandomize( r.seed, rd, r.pos ); // derandomize( r.seed, rd2 ); // if( 0 != memcmp( rd.data(), rd2.data()+r.pos, r.size ) ) { // slog( "r.pos %d, \n%s\n%s", r.pos, fc::to_hex( rd.data(), rd.size() ).c_str(), // fc::to_hex( rd2.data()+r.pos, r.size ).c_str() ); // FC_THROW_REPORT( "derandomize failed to match", fc::value().set("r.size", r.size).set("rd.size",rd.size())); // } //wlog( "derand %lld %s", rd.size(), fc::to_hex(rd.data()+r.pos,r.size).c_str() ); fc::vector<char> tmp( r.size+1 );//rd.data() + r.pos, rd.data()+r.pos+r.size ); tmp[0] = r.type; memcpy( tmp.data()+1, rd.data(), r.size ); return cafs::resource( fc::move(tmp) ); } catch ( fc::error_report& e ) { throw FC_REPORT_PUSH( e, "Error getting resource", fc::value().set("file_ref",r) ); } #endif }
void MemoryPoolDynamicStatic::free(ID p_id) { _THREAD_SAFE_METHOD_ Chunk *c = get_chunk(p_id); ERR_FAIL_COND(!c); total_usage -= c->size; memfree(c->mem); c->mem = 0; if (c->lock > 0) { ERR_PRINT("Freed ID Still locked"); } }
/* Normaly a stream_filter is not able to provide *time* seeking, since a * stream_filter operates on a byte stream. Thus, in order to circumvent this * limitation, I treat a STREAM_SET_POSITION request which value "pos" is less * than FAKE_STREAM_SIZE as a *time* seek request, and more precisely a request * to jump at time position: pos / FAKE_STREAM_SIZE * total_video_duration. * For exemple, it pos == 500, it would be interpreted as a request to jump at * the middle of the video. * If pos > 1000, it would be treated as a normal byte seek request. That means * the demux is not able to request a byte seek with 0 <= pos <= 1000 * (unless it is in the current chunk), but that doesn't matter in practice. * Of course this a bit hack-ish, but if Smooth Streaming doesn't die, its * implementation will be moved to a access_demux module, and this hack won't * be needed anymore (among others). */ static int chunk_Seek( stream_t *s, const uint64_t pos ) { stream_sys_t *p_sys = s->p_sys; if( pos == p_sys->playback.boffset ) return VLC_SUCCESS; chunk_t *chunk = get_chunk( s, false ); if( chunk == NULL ) return VLC_EGENERIC; bool inside_chunk = pos >= chunk->offset && pos < (chunk->offset + chunk->size) ? true : false; if( inside_chunk ) { chunk->read_pos = pos - chunk->offset; p_sys->playback.boffset = pos; return VLC_SUCCESS; } else { if( p_sys->b_live ) { msg_Err( s, "Cannot seek outside the current chunk for a live stream" ); return VLC_EGENERIC; } msg_Info( s, "Seeking outside the current chunk" ); assert( pos <= FAKE_STREAM_SIZE ); vlc_mutex_lock( &p_sys->download.lock_wait ); p_sys->b_tseek = true; p_sys->time_pos = p_sys->vod_duration * pos / FAKE_STREAM_SIZE; for( int i = 0; i < 3; i++ ) p_sys->download.lead[i] = 0; p_sys->playback.toffset = 0; vlc_cond_signal( &p_sys->download.wait); vlc_mutex_unlock( &p_sys->download.lock_wait ); return VLC_SUCCESS; } }
/* The MP4 demux should never have to to peek outside the current chunk */ static int Peek( stream_t *s, const uint8_t **pp_peek, unsigned i_peek ) { chunk_t *chunk = get_chunk( s, true ); if( !chunk || !chunk->data ) return 0; int bytes = chunk->size - chunk->read_pos; assert( bytes > 0 ); if( (unsigned)bytes < i_peek ) { msg_Err( s, "could not peek %u bytes, only %i!", i_peek, bytes ); } msg_Dbg( s, "peeking at chunk %u!", chunk->sequence ); *pp_peek = chunk->data + chunk->read_pos; return bytes; }
/* PUBLIC */ void sb_append(String_buf sb, char *s) { int i; int n = sb->size; Chunk last = sb->first; while (last != NULL && last->next != NULL) last = last->next; for (i = 0; s[i] != '\0'; i++) { if (n % CHUNK_SIZE == 0) { Chunk new = get_chunk(); if (last != NULL) last->next = new; else sb->first = new; last = new; }
int main(int argc, char *argv[]) { unsigned int i=5; unsigned int j; chunk c,c2; unsigned char *ptr=heap; /* Q1 */ printf("i: %d\n",get_int(&i)); set_int(&j, i); printf("j: %d\n",get_int(&j)); /* Q2 */ c.free=0; c.size=5; c.addr=0; c.next_chunk=0; c.previous_chunk=0; set_chunk(&c, ptr); get_chunk(&c2,ptr); return 0; }
Error MemoryPoolDynamicStatic::realloc(ID p_id, size_t p_amount) { _THREAD_SAFE_METHOD_ Chunk *c = get_chunk(p_id); ERR_FAIL_COND_V(!c, ERR_INVALID_PARAMETER); ERR_FAIL_COND_V(c->lock > 0, ERR_LOCKED); void *new_mem = memrealloc(c->mem, p_amount); ERR_FAIL_COND_V(!new_mem, ERR_OUT_OF_MEMORY); total_usage -= c->size; c->mem = new_mem; c->size = p_amount; total_usage += c->size; if (total_usage > max_usage) max_usage = total_usage; return OK; }
int gst_adapter_masked_scan_uint32_compat (GstAdapter * adapter, guint32 mask, guint32 pattern, guint offset, guint n) { GSList *g; int j; int k; int skip; int m; g_return_val_if_fail (n >= 0, -1); g_return_val_if_fail (offset >= 0, -1); g_return_val_if_fail (offset + n + 4 <= adapter->size, -1); g = get_chunk (adapter, offset, &skip); j = 0; while (j < n) { m = MIN (GST_BUFFER_SIZE (GST_BUFFER_CAST (g->data)) - skip - 4, 0); if (m > 0) { k = scan_fast (GST_BUFFER_DATA (GST_BUFFER_CAST (g->data)) + skip, pattern, mask, m); if (k < m) { return offset + j + k; } j += m; skip += m; } else { if (scan_slow (adapter, g, skip, pattern, mask)) { return offset + j; } j++; skip++; } if (skip >= GST_BUFFER_SIZE (GST_BUFFER_CAST (g->data))) { g = g->next; skip = 0; } } return -1; }
static int tunnelHandlerParent(int fd, TunnelPtr tunnel) { char *message; int n; if(tunnel->buf1.buf == NULL) tunnel->buf1.buf = get_chunk(); if(tunnel->buf1.buf == NULL) { message = "Couldn't allocate buffer"; goto fail; } if(tunnel->buf1.tail != tunnel->buf1.head) { message = "Pipelined connect to parent proxy not implemented"; goto fail; } n = snnprintf(tunnel->buf1.buf, tunnel->buf1.tail, CHUNK_SIZE, "CONNECT %s:%d HTTP/1.1", tunnel->hostname->string, tunnel->port); if (parentAuthCredentials) n = buildServerAuthHeaders(tunnel->buf1.buf, n, CHUNK_SIZE, parentAuthCredentials); n = snnprintf(tunnel->buf1.buf, n, CHUNK_SIZE, "\r\n\r\n"); if(n < 0) { message = "Buffer overflow"; goto fail; } tunnel->buf1.head = n; tunnelDispatch(tunnel); return 1; fail: CLOSE(fd); tunnel->fd2 = -1; tunnelError(tunnel, 501, internAtom(message)); return 1; }
static int tunnelHandlerCommon(int fd, TunnelPtr tunnel) { const char *message = "HTTP/1.1 200 Tunnel established\r\n\r\n"; tunnel->fd2 = fd; if(parentHost) return tunnelHandlerParent(fd, tunnel); if(tunnel->buf2.buf == NULL) tunnel->buf2.buf = get_chunk(); if(tunnel->buf2.buf == NULL) { CLOSE(fd); tunnelError(tunnel, 501, internAtom("Couldn't allocate buffer")); return 1; } memcpy(tunnel->buf2.buf, message, MIN(CHUNK_SIZE - 1, strlen(message))); tunnel->buf2.head = MIN(CHUNK_SIZE - 1, strlen(message)); tunnelDispatch(tunnel); return 1; }
/* The MP4 demux should never have to to peek outside the current chunk */ static int Peek( stream_t *s, const uint8_t **pp_peek, unsigned i_peek ) { chunk_t *chunk = get_chunk( s, true, NULL ); if( !chunk || !chunk->data ) { if(!chunk) msg_Err( s, "cannot peek: no data" ); else msg_Err( s, "cannot peek: chunk pos %"PRIu64"", chunk->read_pos ); return 0; } int bytes = chunk->size - chunk->read_pos; assert( bytes > 0 ); if( (unsigned)bytes < i_peek ) { msg_Err( s, "could not peek %u bytes, only %i!", i_peek, bytes ); } msg_Dbg( s, "peeking at chunk %"PRIu64, chunk->start_time ); *pp_peek = chunk->data + chunk->read_pos; return bytes; }
int GIFFManager::get_chunks_number(const GUTF8String &name) // Returns the number of chunks with given fully qualified name { DEBUG_MSG("GIFFManager::get_chunks_number(): name='" << name << "'\n"); DEBUG_MAKE_INDENT(3); int retval; const int last_dot=name.rsearch('.'); if (last_dot<0) { retval=top_level->get_chunks_number(name); }else if(!last_dot) { retval=(top_level->get_name()==name.substr(1,(unsigned int)-1))?1:0; }else { GP<GIFFChunk> chunk=get_chunk(name.substr(0,last_dot)); retval=( chunk ?(chunk->get_chunks_number(name.substr(last_dot+1,(unsigned int)-1))) :0 ); } return retval; }