int findblock(int addr,int size,int begin) { int i,j,idx,ptr; ptr=addr; if (begin) { idx=OFF1+addr-PTR; while(1) { while(((!validaddr(ptr))||lookup[idx])&&(idx<OFF2)) { idx+=4; ptr+=4; } if (idx>=OFF2) return 0; if (freeblock(idx,size)) return idx; idx+=4; ptr+=4; } } else { idx=addr-PTR; while(1) { while(((!validaddr(ptr))||lookup[idx])&&(idx>OFF1)) { idx-=4; ptr-=4; } if (idx<OFF1) return 0; if (freeblock(idx,size)) return idx; idx-=4; ptr-=4; } } }
int findsblock(int sptr) { int optr,sidx,size; size=gfirst ? 0x2c:0x04; optr=sptr; while(sidx=findblock(sptr,size,1)) { sptr=IDX2PTR(sidx); if (gfirst) { if (validaddr(sptr)) { ALLOCBLOCK(sidx,size); break; } else sptr=optr; } else { if (validaddr(sptr-0x18)&&freeblock(sidx-0x18,4)&&freeblock(sidx+0x0c,4)&& freeblock(sidx+0x10,4)&&freeblock(sidx-0x0e,4)) { ALLOCBLOCK(sidx-0x18,4); ALLOCBLOCK(sidx-0x0e,2); ALLOCBLOCK(sidx,4); ALLOCBLOCK(sidx+0x0c,4); ALLOCBLOCK(sidx+0x10,4); sidx-=0x18; break; } else sptr=optr; } sptr+=4; optr=sptr; } gfirst=0; return sidx; }
void *luaM_realloc (void *block, unsigned long size) { unsigned long realsize = HEADER+size+MARKSIZE; if (realsize != (size_t)realsize) lua_error("memory allocation error: block too big"); if (size == 0) { freeblock(block); return NULL; } else { char *newblock = malloc(realsize); int i; if (block) { unsigned long oldsize = *blocksize(block); if (oldsize > size) oldsize = size; memcpy(newblock+HEADER, block, oldsize); freeblock(block); /* erase (and check) old copy */ } if (newblock == NULL) lua_error(memEM); totalmem += size; numblocks++; *(unsigned long *)newblock = size; for (i=0;i<MARKSIZE;i++) *(newblock+HEADER+size+i) = MARK+i; return newblock+HEADER; } }
static void *debug_realloc (void *block, size_t size) { if (size == 0) { freeblock(block); return NULL; } else if (memdebug_total+size > memdebug_memlimit) return NULL; /* to test memory allocation errors */ else { size_t realsize = HEADER+size+MARKSIZE; char *newblock = (char *)(malloc)(realsize); /* alloc a new block */ int i; if (realsize < size) return NULL; /* overflow! */ if (newblock == NULL) return NULL; if (block) { size_t oldsize = *blocksize(block); if (oldsize > size) oldsize = size; memcpy(newblock+HEADER, block, oldsize); freeblock(block); /* erase (and check) old copy */ } memdebug_total += size; if (memdebug_total > memdebug_maxmem) memdebug_maxmem = memdebug_total; memdebug_numblocks++; *(unsigned long *)newblock = size; for (i=0;i<MARKSIZE;i++) *(newblock+HEADER+size+i) = (char)(MARK+i); return newblock+HEADER; } }
static PyObject * deque_popleft(dequeobject *deque, PyObject *unused) { PyObject *item; block *prevblock; if (deque->len == 0) { PyErr_SetString(PyExc_IndexError, "pop from an empty deque"); return NULL; } assert(deque->leftblock != NULL); item = deque->leftblock->data[deque->leftindex]; deque->leftindex++; deque->len--; deque->state++; if (deque->leftindex == BLOCKLEN) { if (deque->len == 0) { assert(deque->leftblock == deque->rightblock); assert(deque->leftindex == deque->rightindex+1); /* re-center instead of freeing a block */ deque->leftindex = CENTER + 1; deque->rightindex = CENTER; } else { assert(deque->leftblock != deque->rightblock); prevblock = deque->leftblock->rightlink; freeblock(deque->leftblock); assert(prevblock != NULL); prevblock->leftlink = NULL; deque->leftblock = prevblock; deque->leftindex = 0; } } return item; }
// if we do memset, dumb_silence() would be neater... static int decompress8(DUMBFILE *f, signed char *data, int len, int cmwt) { int blocklen, blockpos; byte bitwidth; word val; char d1, d2; memset(data, 0, len * sizeof(*data)); while (len > 0) { //Read a block of compressed data: if (readblock(f)) return -1; //Set up a few variables blocklen = (len < 0x8000) ? len : 0x8000; //Max block length is 0x8000 bytes blockpos = 0; bitwidth = 9; d1 = d2 = 0; //Start the decompression: while (blockpos < blocklen) { //Read a value: val = (word)readbits(bitwidth); //Check for bit width change: if (bitwidth < 7) { //Method 1: if (val == (1 << (bitwidth - 1))) { val = (word)readbits(3) + 1; bitwidth = (val < bitwidth) ? val : val + 1; continue; } } else if (bitwidth < 9) { //Method 2 byte border = (0xFF >> (9 - bitwidth)) - 4; if (val > border && val <= (border + 8)) { val -= border; bitwidth = (val < bitwidth) ? val : val + 1; continue; } } else if (bitwidth == 9) { //Method 3 if (val & 0x100) { bitwidth = (val + 1) & 0xFF; continue; } } else { //Illegal width, abort ? freeblock(); return -1; } //Expand the value to signed byte: { char v; //The sample value: if (bitwidth < 8) { byte shift = 8 - bitwidth; v = (val << shift); v >>= shift; } else
int findfblock(int fptr,int i1,int i2,int i3) { int fidx,optr; optr=fptr; while(fidx=findblock(fptr,4,0)) { fptr=IDX2PTR(fidx); if (validaddr(fptr-i2)&&validaddr(fptr-i2-i3)&&freeblock(fidx-i3,4)&& freeblock(fidx-i2-i3,4)&&freeblock(fidx-i2-i3+i1,4)) { ALLOCBLOCK(fidx,4); ALLOCBLOCK(fidx-i3,4); ALLOCBLOCK(fidx-i2-i3,4); ALLOCBLOCK(fidx-i2-i3+i1,4); break; } else fptr=optr; fptr-=4; optr=fptr; } return fidx; }
static void free_unused_blocks(void) { struct buf *bp; int freed = 0, bytes = 0; printf("libminixfs: freeing; %d blocks in use\n", bufs_in_use); for(bp = &buf[0]; bp < &buf[nr_bufs]; bp++) { if(bp->lmfs_bytes > 0 && bp->lmfs_count == 0) { freed++; bytes += bp->lmfs_bytes; freeblock(bp); } } printf("libminixfs: freeing; %d blocks, %d bytes\n", freed, bytes); }
void CMPage::Free(void* ptr) { // 检测Pool是否分配以及待释放指针是否为空, 以防止发生错误 if (!ptr) return; if (Is_ContainsPointer(ptr) && isaligned(ptr, m_align_size)) { // 将释放的内存还给Pool freeblock((FreeBlock*)ptr); } else { assert(false && "object not allocated from this pool"); } }
int EiC_analyseCode(code_t *C) { /* returns the index to the last visited instruction */ block_t *block = NULL; int nb = 0; int i,j,rtn; int *visit; visit = calloc(sizeof(*visit),nextinst(C)+1); block = realloc(block,(nb+1)*sizeof(*block)); block[nb++] = initblock(C,0,visit); for(i=0;i<nb;++i) { for(j=0;j<block[i].nb;++j) { if(!visit[block[i].branch[j]]) { block = realloc(block,(nb+1)*sizeof(*block)); block[nb++] = initblock(C,block[i].branch[j],visit); } else visit[block[i].branch[j]]++; } } rtn = 0; for(i=0;i<=nextinst(C);) if(visit[i]) rtn = i++; else if(i < nextinst(C) && instline(C,i)) { EiC_warningerror("Unreachable code at line %d",instline(C,i)); for(;i<nextinst(C) && !visit[i];i++) ; } else i++; EiC_peephole(C,visit); /******* for(i=0;i<nextinst(C);++i) if(!visit[i]) setopcode(C,i,empty); *******/ freeblock(block,nb); free(visit); return rtn; }
/*===========================================================================* * lmfs_get_block_ino * *===========================================================================*/ struct buf *lmfs_get_block_ino(dev_t dev, block_t block, int only_search, ino_t ino, u64_t ino_off) { /* Check to see if the requested block is in the block cache. If so, return * a pointer to it. If not, evict some other block and fetch it (unless * 'only_search' is 1). All the blocks in the cache that are not in use * are linked together in a chain, with 'front' pointing to the least recently * used block and 'rear' to the most recently used block. If 'only_search' is * 1, the block being requested will be overwritten in its entirety, so it is * only necessary to see if it is in the cache; if it is not, any free buffer * will do. It is not necessary to actually read the block in from disk. * If 'only_search' is PREFETCH, the block need not be read from the disk, * and the device is not to be marked on the block, so callers can tell if * the block returned is valid. * In addition to the LRU chain, there is also a hash chain to link together * blocks whose block numbers end with the same bit strings, for fast lookup. */ int b; static struct buf *bp; u64_t dev_off = (u64_t) block * fs_block_size; struct buf *prev_ptr; assert(buf_hash); assert(buf); assert(nr_bufs > 0); ASSERT(fs_block_size > 0); assert(dev != NO_DEV); if((ino_off % fs_block_size)) { printf("cache: unaligned lmfs_get_block_ino ino_off %llu\n", ino_off); util_stacktrace(); } /* Search the hash chain for (dev, block). */ b = BUFHASH(block); bp = buf_hash[b]; while (bp != NULL) { if (bp->lmfs_blocknr == block && bp->lmfs_dev == dev) { if(bp->lmfs_flags & VMMC_EVICTED) { /* We had it but VM evicted it; invalidate it. */ ASSERT(bp->lmfs_count == 0); ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED)); ASSERT(!(bp->lmfs_flags & VMMC_DIRTY)); bp->lmfs_dev = NO_DEV; bp->lmfs_bytes = 0; bp->data = NULL; break; } ASSERT(bp->lmfs_needsetcache == 0); /* Block needed has been found. */ if (bp->lmfs_count == 0) { rm_lru(bp); ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED)); bp->lmfs_flags |= VMMC_BLOCK_LOCKED; } raisecount(bp); ASSERT(bp->lmfs_bytes == fs_block_size); ASSERT(bp->lmfs_dev == dev); ASSERT(bp->lmfs_dev != NO_DEV); ASSERT(bp->lmfs_flags & VMMC_BLOCK_LOCKED); ASSERT(bp->data); if(ino != VMC_NO_INODE) { if(bp->lmfs_inode == VMC_NO_INODE || bp->lmfs_inode != ino || bp->lmfs_inode_offset != ino_off) { bp->lmfs_inode = ino; bp->lmfs_inode_offset = ino_off; bp->lmfs_needsetcache = 1; } } return(bp); } else { /* This block is not the one sought. */ bp = bp->lmfs_hash; /* move to next block on hash chain */ } } /* Desired block is not on available chain. Find a free block to use. */ if(bp) { ASSERT(bp->lmfs_flags & VMMC_EVICTED); } else { if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs); } assert(bp); rm_lru(bp); /* Remove the block that was just taken from its hash chain. */ b = BUFHASH(bp->lmfs_blocknr); prev_ptr = buf_hash[b]; if (prev_ptr == bp) { buf_hash[b] = bp->lmfs_hash; } else { /* The block just taken is not on the front of its hash chain. */ while (prev_ptr->lmfs_hash != NULL) if (prev_ptr->lmfs_hash == bp) { prev_ptr->lmfs_hash = bp->lmfs_hash; /* found it */ break; } else { prev_ptr = prev_ptr->lmfs_hash; /* keep looking */ } } freeblock(bp); bp->lmfs_inode = ino; bp->lmfs_inode_offset = ino_off; bp->lmfs_flags = VMMC_BLOCK_LOCKED; bp->lmfs_needsetcache = 0; bp->lmfs_dev = dev; /* fill in device number */ bp->lmfs_blocknr = block; /* fill in block number */ ASSERT(bp->lmfs_count == 0); raisecount(bp); b = BUFHASH(bp->lmfs_blocknr); bp->lmfs_hash = buf_hash[b]; buf_hash[b] = bp; /* add to hash list */ assert(dev != NO_DEV); /* Block is not found in our cache, but we do want it * if it's in the vm cache. */ assert(!bp->data); assert(!bp->lmfs_bytes); if(vmcache) { if((bp->data = vm_map_cacheblock(dev, dev_off, ino, ino_off, &bp->lmfs_flags, fs_block_size)) != MAP_FAILED) { bp->lmfs_bytes = fs_block_size; ASSERT(!bp->lmfs_needsetcache); return bp; } } bp->data = NULL; /* Not in the cache; reserve memory for its contents. */ lmfs_alloc_block(bp); assert(bp->data); if(only_search == PREFETCH) { /* PREFETCH: don't do i/o. */ bp->lmfs_dev = NO_DEV; } else if (only_search == NORMAL) { read_block(bp); } else if(only_search == NO_READ) { /* This block will be overwritten by new contents. */ } else panic("unexpected only_search value: %d", only_search); assert(bp->data); return(bp); /* return the newly acquired block */ }
//function initializes the file system: takes path, total number of blocks and total number of //inodes as input. Command : initfs <directory_name_in_foreign_operating_system> <total_number_of_blocks> <total_number_of_inodes> //The given path directory is where the file system begins. int initialize_fs(char* path, unsigned short total_blcks,unsigned short total_inodes ) { char buffer[BLOCK_SIZE]; int bytes_written; if((total_inodes%16) == 0) super.isize = total_inodes/16; else super.isize = (total_inodes/16) + 1; super.fsize = total_blcks; unsigned short i = 0; if((fd = open(path,O_RDWR|O_CREAT,0600))== -1) { printf("\n open() failed with error [%s]\n",strerror(errno)); return 1; } for (i = 0; i<100; i++) super.free[i] = 0; //initializing free array to 0 to remove junk data. free array will be stored with data block numbers shortly. super.nfree = 0; super.ninode = 100; for (i=0; i < 100; i++) super.inode[i] = i; //initializing inode array to store inumbers. super.flock = 'f'; //flock,ilock and fmode are not used. super.ilock = 'i'; //initializing to fill up block super.fmod = 'f'; super.time[0] = 0; super.time[1] = 0; lseek(fd,BLOCK_SIZE,0); // Writing to super block if((bytes_written =write(fd,&super,BLOCK_SIZE)) < BLOCK_SIZE) { printf("\nERROR : error in writing the super block"); return 0; } // writing zeroes to all inodes in ilist for (i=0; i<BLOCK_SIZE; i++) buffer[i] = 0; for (i=0; i < super.isize; i++) write(fd,buffer,BLOCK_SIZE); // calling chaining data blocks procedure chaindatablocks(total_blcks); //filling free array to first 100 data blocks for (i=0; i<100; i++) freeblock(i+2+super.isize); // Make root directory create_root(); return 1; }
/*===========================================================================* * get_block_ino * *===========================================================================*/ static int get_block_ino(struct buf **bpp, dev_t dev, block64_t block, int how, ino_t ino, u64_t ino_off, size_t block_size) { /* Check to see if the requested block is in the block cache. The requested * block is identified by the block number in 'block' on device 'dev', counted * in the file system block size. The amount of data requested for this block * is given in 'block_size', which may be less than the file system block size * iff the requested block is the last (partial) block on a device. Note that * the given block size does *not* affect the conversion of 'block' to a byte * offset! Either way, if the block could be obtained, either from the cache * or by reading from the device, return OK, with a pointer to the buffer * structure stored in 'bpp'. If not, return a negative error code (and no * buffer). If necessary, evict some other block and fetch the contents from * disk (if 'how' is NORMAL). If 'how' is NO_READ, the caller intends to * overwrite the requested block in its entirety, so it is only necessary to * see if it is in the cache; if it is not, any free buffer will do. If 'how' * is PEEK, the function returns the block if it is in the cache or the VM * cache, and an ENOENT error code otherwise. * In addition to the LRU chain, there is also a hash chain to link together * blocks whose block numbers end with the same bit strings, for fast lookup. */ int b, r; static struct buf *bp; uint64_t dev_off; struct buf *prev_ptr; assert(buf_hash); assert(buf); assert(nr_bufs > 0); ASSERT(fs_block_size > 0); assert(dev != NO_DEV); assert(block <= UINT64_MAX / fs_block_size); dev_off = block * fs_block_size; if((ino_off % fs_block_size)) { printf("cache: unaligned lmfs_get_block_ino ino_off %llu\n", ino_off); util_stacktrace(); } /* See if the block is in the cache. If so, we can return it right away. */ bp = find_block(dev, block); if (bp != NULL && !(bp->lmfs_flags & VMMC_EVICTED)) { ASSERT(bp->lmfs_dev == dev); ASSERT(bp->lmfs_dev != NO_DEV); /* The block must have exactly the requested number of bytes. */ if (bp->lmfs_bytes != block_size) return EIO; /* Block needed has been found. */ if (bp->lmfs_count == 0) { rm_lru(bp); ASSERT(bp->lmfs_needsetcache == 0); ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED)); /* FIXME: race condition against the VMMC_EVICTED check */ bp->lmfs_flags |= VMMC_BLOCK_LOCKED; } raisecount(bp); ASSERT(bp->lmfs_flags & VMMC_BLOCK_LOCKED); ASSERT(bp->data); if(ino != VMC_NO_INODE) { if(bp->lmfs_inode == VMC_NO_INODE || bp->lmfs_inode != ino || bp->lmfs_inode_offset != ino_off) { bp->lmfs_inode = ino; bp->lmfs_inode_offset = ino_off; bp->lmfs_needsetcache = 1; } } *bpp = bp; return OK; } /* We had the block in the cache but VM evicted it; invalidate it. */ if (bp != NULL) { assert(bp->lmfs_flags & VMMC_EVICTED); ASSERT(bp->lmfs_count == 0); ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED)); ASSERT(!(bp->lmfs_flags & VMMC_DIRTY)); bp->lmfs_dev = NO_DEV; bp->lmfs_bytes = 0; bp->data = NULL; } /* Desired block is not on available chain. Find a free block to use. */ if(bp) { ASSERT(bp->lmfs_flags & VMMC_EVICTED); } else { if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs); } assert(bp); rm_lru(bp); /* Remove the block that was just taken from its hash chain. */ b = BUFHASH(bp->lmfs_blocknr); prev_ptr = buf_hash[b]; if (prev_ptr == bp) { buf_hash[b] = bp->lmfs_hash; } else { /* The block just taken is not on the front of its hash chain. */ while (prev_ptr->lmfs_hash != NULL) if (prev_ptr->lmfs_hash == bp) { prev_ptr->lmfs_hash = bp->lmfs_hash; /* found it */ break; } else { prev_ptr = prev_ptr->lmfs_hash; /* keep looking */ } } freeblock(bp); bp->lmfs_inode = ino; bp->lmfs_inode_offset = ino_off; bp->lmfs_flags = VMMC_BLOCK_LOCKED; bp->lmfs_needsetcache = 0; bp->lmfs_dev = dev; /* fill in device number */ bp->lmfs_blocknr = block; /* fill in block number */ ASSERT(bp->lmfs_count == 0); raisecount(bp); b = BUFHASH(bp->lmfs_blocknr); bp->lmfs_hash = buf_hash[b]; buf_hash[b] = bp; /* add to hash list */ assert(dev != NO_DEV); /* The block is not found in our cache, but we do want it if it's in the VM * cache. The exception is NO_READ, purely for context switching performance * reasons. NO_READ is used for 1) newly allocated blocks, 2) blocks being * prefetched, and 3) blocks about to be fully overwritten. In the first two * cases, VM will not have the block in its cache anyway, and for the third * we save on one VM call only if the block is in the VM cache. */ assert(!bp->data); assert(!bp->lmfs_bytes); if (how != NO_READ && vmcache) { if((bp->data = vm_map_cacheblock(dev, dev_off, ino, ino_off, &bp->lmfs_flags, roundup(block_size, PAGE_SIZE))) != MAP_FAILED) { bp->lmfs_bytes = block_size; ASSERT(!bp->lmfs_needsetcache); *bpp = bp; return OK; } } bp->data = NULL; /* The block is not in the cache, and VM does not know about it. If we were * requested to search for the block only, we can now return failure to the * caller. Return the block to the pool without allocating data pages, since * these would be freed upon recycling the block anyway. */ if (how == PEEK) { bp->lmfs_dev = NO_DEV; put_block(bp, ONE_SHOT); return ENOENT; } /* Not in the cache; reserve memory for its contents. */ lmfs_alloc_block(bp, block_size); assert(bp->data); if (how == NORMAL) { /* Try to read the block. Return an error code on failure. */ if ((r = read_block(bp, block_size)) != OK) { put_block(bp, 0); return r; } } else if(how == NO_READ) { /* This block will be overwritten by new contents. */ } else panic("unexpected 'how' value: %d", how); assert(bp->data); *bpp = bp; /* return the newly acquired block */ return OK; }
// if we do memset, dumb_silence() would be neater... static int decompress8(DUMBFILE *f, signed char *data, int len, int it215, int stereo) { int blocklen, blockpos; byte bitwidth; long val; signed char d1, d2; readblock_crap crap; memset(&crap, 0, sizeof(crap)); for (blocklen = 0, blockpos = 0; blocklen < len; blocklen++, blockpos += 1 + stereo) data[ blockpos ] = 0; while (len > 0) { //Read a block of compressed data: if (readblock(f, &crap)) return -1; //Set up a few variables blocklen = (len < 0x8000) ? len : 0x8000; //Max block length is 0x8000 bytes blockpos = 0; bitwidth = 9; d1 = d2 = 0; //Start the decompression: while (blockpos < blocklen) { //Read a value: val = readbits(bitwidth, &crap); //Check for bit width change: if (bitwidth < 7) { //Method 1: if (val == (1 << (bitwidth - 1))) { val = readbits(3, &crap) + 1; bitwidth = (val < bitwidth) ? val : val + 1; continue; } } else if (bitwidth < 9) { //Method 2 byte border = (0xFF >> (9 - bitwidth)) - 4; if (val > border && val <= (border + 8)) { val -= border; bitwidth = (val < bitwidth) ? val : val + 1; continue; } } else if (bitwidth == 9) { //Method 3 if (val & 0x100) { bitwidth = (val + 1) & 0xFF; continue; } } else { //Illegal width, abort ? freeblock(&crap); return -1; } //Expand the value to signed byte: { signed char v; //The sample value: if (bitwidth < 8) { byte shift = 8 - bitwidth; v = (val << shift); v >>= shift; } else
int __attribute__ ((visibility ("internal"))) decompress8 (FILE *module, void *dst, int len, char it215) { sbyte *destbuf; /* the destination buffer which will be returned */ word blklen; /* length of compressed data block in samples */ word blkpos; /* position in block */ byte width; /* actual "bit width" */ word value; /* value read from file to be processed */ sbyte d1, d2; /* integrator buffers (d2 for it2.15) */ sbyte *destpos; destbuf = (sbyte *)dst; if (!destbuf) return 0; memsetb(destbuf,0,len); destpos=destbuf; /* position in output buffer */ /* now unpack data till the dest buffer is full */ while (len) { /* read a new block of compressed data and reset variables */ if (!readblock(module)) return 0; blklen=(len<0x8000)?len:0x8000; blkpos=0; width=9; /* start with width of 9 bits */ d1=d2=0; /* reset integrator buffers */ /* now uncompress the data block */ while (blkpos<blklen) { sbyte v; value = readbits(width); /* read bits */ if (width<7) /* method 1 (1-6 bits) */ { if (value==(1<<(width-1))) /* check for "100..." */ { value = readbits(3)+1; /* yes -> read new width; */ width = (value<width)?value:value+1; /* and expand it */ continue; /* ... next value */ } } else if (width<9) /* method 2 (7-8 bits) */ { byte border = (0xFF>>(9-width)) - 4; /* lower border for width chg */ if (value>border && value <=(border+8)) { value-=border; /* convert width to 1-8 */ width = (value<width)?value:value+1; /* and expand it */ continue; /* ... next value */ } } else if (width==9) /* method 3 (9 bits) */ { if (value & 0x100) /* bit 8 set? */ { width=(value+1)&0xff; /* new width... */ continue; /* ... and next value */ } } else { /* illegal width, abort */ freeblock(); return 0; } /* now expand value to signed byte */ /* sbyte v; // sample value */ if (width<8) { byte shift=8-width; v = (value<<shift); v>>=shift; } else