Esempio n. 1
0
void blitscreen_dirty1_color16(struct osd_bitmap *bitmap)
{
	int x, y;
	int width=(bitmap->line[1] - bitmap->line[0])>>1;
	unsigned short *lb=((unsigned short*)(bitmap->line[skiplines])) + skipcolumns;
	unsigned short *address=SCREEN16 + gfx_xoffset + (gfx_yoffset * gfx_width);

	for (y = 0; y < gfx_display_lines; y += 16)
	{
		for (x = 0; x < gfx_display_columns; )
		{
			int w = 16;
			if (ISDIRTY(x,y))
			{
				int h;
				unsigned short *lb0 = lb + x;
				unsigned short *address0 = address + x;
				while (x + w < gfx_display_columns && ISDIRTY(x+w,y))
                    			w += 16;
				if (x + w > gfx_display_columns)
                    			w = gfx_display_columns - x;
				for (h = 0; ((h < 16) && ((y + h) < gfx_display_lines)); h++)
				{
					memcpy(address0,lb0,w<<1);
					lb0 += width;
					address0 += gfx_width;
				}
			}
			x += w;
        	}
		lb += 16 * width;
		address += 16 * gfx_width;
	}
}
static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{

	/* Check, if we have a dirty block now, or if it was dirty already */
	if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
		c->dirty_size += jeb->wasted_size;
		c->wasted_size -= jeb->wasted_size;
		jeb->dirty_size += jeb->wasted_size;
		jeb->wasted_size = 0;
		if (VERYDIRTY(c, jeb->dirty_size)) {
			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
			list_add_tail(&jeb->list, &c->very_dirty_list);
		} else {
			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
			list_add_tail(&jeb->list, &c->dirty_list);
		}
	} else {
		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
		  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
		list_add_tail(&jeb->list, &c->clean_list);
	}
	c->nextblock = NULL;

}
Esempio n. 3
0
void blitscreen_dirty1_palettized16(struct osd_bitmap *bitmap)
{
	int x, y;
	int width=(bitmap->line[1] - bitmap->line[0])>>1;
	unsigned short *lb=((unsigned short*)(bitmap->line[skiplines])) + skipcolumns;
	UINT32 *address = gp2x_screen32 + gfx_xoffset + (gfx_yoffset * gfx_width);

	for (y = 0; y < gfx_display_lines; y += 16)
	{
		for (x = 0; x < gfx_display_columns; )
		{
			int w = 16;
			if (ISDIRTY(x,y))
			{
				int h;
				unsigned short *lb0 = lb + x;
				UINT32 *address0 = address + x;
				while (x + w < gfx_display_columns && ISDIRTY(x+w,y))
                    			w += 16;
				if (x + w > gfx_display_columns)
                    			w = gfx_display_columns - x;
				for (h = 0; ((h < 16) && ((y + h) < gfx_display_lines)); h++)
				{
					int wx;
					for (wx=0;wx<w;wx++)
					{
						address0[wx] = palette_32bit_lookup[lb0[wx]];
					}
					lb0 += width;
					address0 += gfx_width;
				}
			}
			x += w;
        	}
		lb += 16 * width;
		address += 16 * gfx_width;
	}

	gp2x_video_flip();
}
Esempio n. 4
0
void blitscreen_dirty1_color8(struct osd_bitmap *bitmap)
{
	int x, y;
	int width=(bitmap->line[1] - bitmap->line[0]);
	unsigned char *lb=bitmap->line[skiplines] + skipcolumns;
	unsigned short *address=SCREEN16 + gfx_xoffset + (gfx_yoffset * gfx_width);

	for (y = 0; y < gfx_display_lines; y += 16)
	{
		for (x = 0; x < gfx_display_columns; )
		{
			int w = 16;
			if (ISDIRTY(x,y))
			{
				int h;
				unsigned char *lb0 = lb + x;
				unsigned short *address0 = address + x;
				while (x + w < gfx_display_columns && ISDIRTY(x+w,y))
                    			w += 16;
				if (x + w > gfx_display_columns)
                    			w = gfx_display_columns - x;
				for (h = 0; ((h < 16) && ((y + h) < gfx_display_lines)); h++)
				{
					int wx;
					for (wx=0;wx<w;wx++)
					{
						address0[wx] = gp2x_palette[lb0[wx]];
					}
					lb0 += width;
					address0 += gfx_width;
				}
			}
			x += w;
        	}
		lb += 16 * width;
		address += 16 * gfx_width;
	}
	
	FLIP_VIDEO;
}
Esempio n. 5
0
void blitscreen_dirty1_color8(struct osd_bitmap *bitmap)
{
	int x, y;
	int width=(bitmap->line[1] - bitmap->line[0]);
	unsigned char *lb=bitmap->line[skiplines] + skipcolumns;
	unsigned short *address=SCREEN8 + gfx_xoffset + (gfx_yoffset * gfx_width);

	for (y = 0; y < gfx_display_lines; y += 16)
	{
		for (x = 0; x < gfx_display_columns; )
		{
			int w = 16;
			if (ISDIRTY(x,y))
			{
				int h;
				unsigned char *lb0 = lb + x;
				unsigned short *address0 = address + x;
				while (x + w < gfx_display_columns && ISDIRTY(x+w,y))
                    			w += 16;
				if (x + w > gfx_display_columns)
                    			w = gfx_display_columns - x;
				for (h = 0; ((h < 16) && ((y + h) < gfx_display_lines)); h++)
				{
					//memcpy(address0,lb0,w);
					//BLIT_8_TO_16_32bit(address0,lb0,w)
					clut8to16(address0, lb0, dingoo_palette, w);
					lb0 += width;
					address0 += gfx_width;
				}
			}
			x += w;
        	}
		lb += 16 * width;
		address += 16 * gfx_width;
	}
	FLUSH_CACHE
}
Esempio n. 6
0
int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
	if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
	    && (!jeb->first_node || !ref_next(jeb->first_node)) )
		return BLK_STATE_CLEANMARKER;

	/* move blocks with max 4 byte dirty space to cleanlist */
	else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
		c->dirty_size -= jeb->dirty_size;
		c->wasted_size += jeb->dirty_size;
		jeb->wasted_size += jeb->dirty_size;
		jeb->dirty_size = 0;
		return BLK_STATE_CLEAN;
	} else if (jeb->used_size || jeb->unchecked_size)
		return BLK_STATE_PARTDIRTY;
	else
		return BLK_STATE_ALLDIRTY;
}
Esempio n. 7
0
/*===========================================================================*
 *				fs_sync					     *
 *===========================================================================*/
int fs_sync()
{
/* Perform the sync() system call.  Flush all the tables. 
 * The order in which the various tables are flushed is critical.  The
 * blocks must be flushed last, since rw_inode() leaves its results in
 * the block cache.
 */
  struct inode *rip;
  struct buf *bp;

  assert(nr_bufs > 0);
  assert(buf);

  /* Write all the dirty inodes to the disk. */
  for(rip = &inode[0]; rip < &inode[NR_INODES]; rip++)
	  if(rip->i_count > 0 && IN_ISDIRTY(rip)) rw_inode(rip, WRITING);

  /* Write all the dirty blocks to the disk, one drive at a time. */
  for(bp = &buf[0]; bp < &buf[nr_bufs]; bp++)
	  if(bp->b_dev != NO_DEV && ISDIRTY(bp)) 
		  flushall(bp->b_dev);

  return(OK);		/* sync() can't fail */
}
Esempio n. 8
0
/* Called with alloc sem _and_ erase_completion_lock */
static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len)
{
	struct jffs2_eraseblock *jeb = c->nextblock;
	
 restart:
	if (jeb && minsize > jeb->free_size) {
		/* Skip the end of this block and file it as having some dirty space */
		/* If there's a pending write to it, flush now */
		if (jffs2_wbuf_dirty(c)) {
			spin_unlock(&c->erase_completion_lock);
			D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));			    
			jffs2_flush_wbuf_pad(c);
			spin_lock(&c->erase_completion_lock);
			jeb = c->nextblock;
			goto restart;
		}
		c->wasted_size += jeb->free_size;
		c->free_size -= jeb->free_size;
		jeb->wasted_size += jeb->free_size;
		jeb->free_size = 0;
		
		/* Check, if we have a dirty block now, or if it was dirty already */
		if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
			c->dirty_size += jeb->wasted_size;
			c->wasted_size -= jeb->wasted_size;
			jeb->dirty_size += jeb->wasted_size;
			jeb->wasted_size = 0;
			if (VERYDIRTY(c, jeb->dirty_size)) {
				D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
				  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
				list_add_tail(&jeb->list, &c->very_dirty_list);
			} else {
				D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
				  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
				list_add_tail(&jeb->list, &c->dirty_list);
			}
		} else { 
			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
			list_add_tail(&jeb->list, &c->clean_list);
		}
		c->nextblock = jeb = NULL;
	}
	
	if (!jeb) {
		struct list_head *next;
		/* Take the next block off the 'free' list */

		if (list_empty(&c->free_list)) {

			if (!c->nr_erasing_blocks && 
			    !list_empty(&c->erasable_list)) {
				struct jffs2_eraseblock *ejeb;

				ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
				list_del(&ejeb->list);
				list_add_tail(&ejeb->list, &c->erase_pending_list);
				c->nr_erasing_blocks++;
				jffs2_erase_pending_trigger(c);
				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
					  ejeb->offset));
			}

			if (!c->nr_erasing_blocks && 
			    !list_empty(&c->erasable_pending_wbuf_list)) {
				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
				/* c->nextblock is NULL, no update to c->nextblock allowed */			    
				spin_unlock(&c->erase_completion_lock);
				jffs2_flush_wbuf_pad(c);
				spin_lock(&c->erase_completion_lock);
				/* Have another go. It'll be on the erasable_list now */
				return -EAGAIN;
			}

			if (!c->nr_erasing_blocks) {
				/* Ouch. We're in GC, or we wouldn't have got here.
				   And there's no space left. At all. */
				printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", 
				       c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", 
				       list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
				return -ENOSPC;
			}

			spin_unlock(&c->erase_completion_lock);
			/* Don't wait for it; just erase one right now */
			jffs2_erase_pending_blocks(c, 1);
			spin_lock(&c->erase_completion_lock);

			/* An erase may have failed, decreasing the
			   amount of free space available. So we must
			   restart from the beginning */
			return -EAGAIN;
		}

		next = c->free_list.next;
		list_del(next);
		c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
		c->nr_free_blocks--;

		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
			goto restart;
		}
	}
	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
	   enough space */
	*ofs = jeb->offset + (c->sector_size - jeb->free_size);
	*len = jeb->free_size;

	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
	    !jeb->first_node->next_in_ino) {
		/* Only node in it beforehand was a CLEANMARKER node (we think). 
		   So mark it obsolete now that there's going to be another node
		   in the block. This will reduce used_size to zero but We've 
		   already set c->nextblock so that jffs2_mark_node_obsolete()
		   won't try to refile it to the dirty_list.
		*/
		spin_unlock(&c->erase_completion_lock);
		jffs2_mark_node_obsolete(c, jeb->first_node);
		spin_lock(&c->erase_completion_lock);
	}

	D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
	return 0;
}
Esempio n. 9
0
/*===========================================================================*
 *				get_block				     *
 *===========================================================================*/
struct buf *get_block(
  register dev_t dev,		/* on which device is the block? */
  register block_t block,	/* which block is wanted? */
  int only_search		/* if NO_READ, don't read, else act normal */
)
{
/* Check to see if the requested block is in the block cache.  If so, return
 * a pointer to it.  If not, evict some other block and fetch it (unless
 * 'only_search' is 1).  All the blocks in the cache that are not in use
 * are linked together in a chain, with 'front' pointing to the least recently
 * used block and 'rear' to the most recently used block.  If 'only_search' is
 * 1, the block being requested will be overwritten in its entirety, so it is
 * only necessary to see if it is in the cache; if it is not, any free buffer
 * will do.  It is not necessary to actually read the block in from disk.
 * If 'only_search' is PREFETCH, the block need not be read from the disk,
 * and the device is not to be marked on the block, so callers can tell if
 * the block returned is valid.
 * In addition to the LRU chain, there is also a hash chain to link together
 * blocks whose block numbers end with the same bit strings, for fast lookup.
 */

  int b;
  static struct buf *bp, *prev_ptr;
  u64_t yieldid = VM_BLOCKID_NONE, getid = make64(dev, block);

  assert(buf_hash);
  assert(buf);
  assert(nr_bufs > 0);

  ASSERT(fs_block_size > 0);

  /* Search the hash chain for (dev, block). Do_read() can use 
   * get_block(NO_DEV ...) to get an unnamed block to fill with zeros when
   * someone wants to read from a hole in a file, in which case this search
   * is skipped
   */
  if (dev != NO_DEV) {
	b = BUFHASH(block);
	bp = buf_hash[b];
	while (bp != NULL) {
		if (bp->b_blocknr == block && bp->b_dev == dev) {
			/* Block needed has been found. */
			if (bp->b_count == 0) rm_lru(bp);
			bp->b_count++;	/* record that block is in use */
			ASSERT(bp->b_bytes == fs_block_size);
			ASSERT(bp->b_dev == dev);
			ASSERT(bp->b_dev != NO_DEV);
			ASSERT(bp->bp);
			return(bp);
		} else {
			/* This block is not the one sought. */
			bp = bp->b_hash; /* move to next block on hash chain */
		}
	}
  }

  /* Desired block is not on available chain.  Take oldest block ('front'). */
  if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs);

  if(bp->b_bytes < fs_block_size) {
	ASSERT(!bp->bp);
	ASSERT(bp->b_bytes == 0);
	if(!(bp->bp = alloc_contig( (size_t) fs_block_size, 0, NULL))) {
		printf("MFS: couldn't allocate a new block.\n");
		for(bp = front;
			bp && bp->b_bytes < fs_block_size; bp = bp->b_next)
			;
		if(!bp) {
			panic("no buffer available");
		}
	} else {
  		bp->b_bytes = fs_block_size;
	}
  }

  ASSERT(bp);
  ASSERT(bp->bp);
  ASSERT(bp->b_bytes == fs_block_size);
  ASSERT(bp->b_count == 0);

  rm_lru(bp);

  /* Remove the block that was just taken from its hash chain. */
  b = BUFHASH(bp->b_blocknr);
  prev_ptr = buf_hash[b];
  if (prev_ptr == bp) {
	buf_hash[b] = bp->b_hash;
  } else {
	/* The block just taken is not on the front of its hash chain. */
	while (prev_ptr->b_hash != NULL)
		if (prev_ptr->b_hash == bp) {
			prev_ptr->b_hash = bp->b_hash;	/* found it */
			break;
		} else {
			prev_ptr = prev_ptr->b_hash;	/* keep looking */
		}
  }

  /* If the block taken is dirty, make it clean by writing it to the disk.
   * Avoid hysteresis by flushing all other dirty blocks for the same device.
   */
  if (bp->b_dev != NO_DEV) {
	if (ISDIRTY(bp)) flushall(bp->b_dev);

	/* Are we throwing out a block that contained something?
	 * Give it to VM for the second-layer cache.
	 */
	yieldid = make64(bp->b_dev, bp->b_blocknr);
	assert(bp->b_bytes == fs_block_size);
	BP_CLEARDEV(bp);
  }

  /* Fill in block's parameters and add it to the hash chain where it goes. */
  if(dev == NO_DEV) BP_CLEARDEV(bp);
  else BP_SETDEV(bp, dev);
  bp->b_blocknr = block;	/* fill in block number */
  bp->b_count++;		/* record that block is being used */
  b = BUFHASH(bp->b_blocknr);
  bp->b_hash = buf_hash[b];

  buf_hash[b] = bp;		/* add to hash list */

  if(dev == NO_DEV) {
	if(vmcache && cmp64(yieldid, VM_BLOCKID_NONE) != 0) {
		vm_yield_block_get_block(yieldid, VM_BLOCKID_NONE,
			bp->bp, fs_block_size);
	}
	return(bp);	/* If the caller wanted a NO_DEV block, work is done. */
  }

  /* Go get the requested block unless searching or prefetching. */
  if(only_search == PREFETCH || only_search == NORMAL) {
	/* Block is not found in our cache, but we do want it
	 * if it's in the vm cache.
	 */
	if(vmcache) {
		/* If we can satisfy the PREFETCH or NORMAL request 
		 * from the vm cache, work is done.
		 */
		if(vm_yield_block_get_block(yieldid, getid,
			bp->bp, fs_block_size) == OK) {
			return bp;
		}
	}
  }

  if(only_search == PREFETCH) {
	/* PREFETCH: don't do i/o. */
	BP_CLEARDEV(bp);
  } else if (only_search == NORMAL) {
	read_block(bp);
  } else if(only_search == NO_READ) {
	/* we want this block, but its contents
	 * will be overwritten. VM has to forget
	 * about it.
	 */
	if(vmcache) {
		vm_forgetblock(getid);
	}
  } else
	panic("unexpected only_search value: %d", only_search);

  assert(bp->bp);

  return(bp);			/* return the newly acquired block */
}
Esempio n. 10
0
static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
				  unsigned char *buf, uint32_t buf_size) {
	struct jffs2_unknown_node *node;
	struct jffs2_unknown_node crcnode;
	uint32_t ofs, prevofs;
	uint32_t hdr_crc, buf_ofs, buf_len;
	int err;
	int noise = 0;
	int wasempty = 0;
	uint32_t empty_start = 0;
#ifdef CONFIG_JFFS2_FS_NAND
	int cleanmarkerfound = 0;
#endif

	ofs = jeb->offset;
	prevofs = jeb->offset - 1;

	D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs));

#ifdef CONFIG_JFFS2_FS_NAND
	if (jffs2_cleanmarker_oob(c)) {
		int ret = jffs2_check_nand_cleanmarker(c, jeb);
		D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret));
		/* Even if it's not found, we still scan to see
		   if the block is empty. We use this information
		   to decide whether to erase it or not. */
		switch (ret) {
		case 0:		cleanmarkerfound = 1; break;
		case 1: 	break;
		case 2: 	return BLK_STATE_BADBLOCK;
		case 3:		return BLK_STATE_ALLDIRTY; /* Block has failed to erase min. once */
		default: 	return ret;
		}
	}
#endif
	buf_ofs = jeb->offset;

	if (!buf_size) {
		buf_len = c->sector_size;
	} else {
		buf_len = EMPTY_SCAN_SIZE;
		err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
		if (err)
			return err;
	}
	
	/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
	ofs = 0;

	/* Scan only 4KiB of 0xFF before declaring it's empty */
	while(ofs < EMPTY_SCAN_SIZE && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
		ofs += 4;

	if (ofs == EMPTY_SCAN_SIZE) {
#ifdef CONFIG_JFFS2_FS_NAND
		if (jffs2_cleanmarker_oob(c)) {
			/* scan oob, take care of cleanmarker */
			int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
			D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret));
			switch (ret) {
			case 0:		return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
			case 1: 	return BLK_STATE_ALLDIRTY;
			case 2: 	return BLK_STATE_BADBLOCK; /* case 2/3 are paranoia checks */
			case 3:		return BLK_STATE_ALLDIRTY; /* Block has failed to erase min. once */
			default: 	return ret;
			}
		}
#endif
		D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset));
		return BLK_STATE_ALLFF;	/* OK to erase if all blocks are like this */
	}
	if (ofs) {
		D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
			  jeb->offset + ofs));
		DIRTY_SPACE(ofs);
	}

	/* Now ofs is a complete physical flash offset as it always was... */
	ofs += jeb->offset;

	noise = 10;

	while(ofs < jeb->offset + c->sector_size) {

		D1(ACCT_PARANOIA_CHECK(jeb));

		cond_resched();

		if (ofs & 3) {
			printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs);
			ofs = (ofs+3)&~3;
			continue;
		}
		if (ofs == prevofs) {
			printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs);
			DIRTY_SPACE(4);
			ofs += 4;
			continue;
		}
		prevofs = ofs;

		if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
			D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
				  jeb->offset, c->sector_size, ofs, sizeof(*node)));
			DIRTY_SPACE((jeb->offset + c->sector_size)-ofs);
			break;
		}

		if (buf_ofs + buf_len < ofs + sizeof(*node)) {
			buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
			D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
				  sizeof(struct jffs2_unknown_node), buf_len, ofs));
			err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
			if (err)
				return err;
			buf_ofs = ofs;
		}

		node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];

		if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
			uint32_t inbuf_ofs = ofs - buf_ofs + 4;
			uint32_t scanend;

			empty_start = ofs;
			ofs += 4;

			/* If scanning empty space after only a cleanmarker, don't
			   bother scanning the whole block */
			if (unlikely(empty_start == jeb->offset + c->cleanmarker_size &&
				     jeb->offset + EMPTY_SCAN_SIZE < buf_ofs + buf_len))
				scanend = jeb->offset + EMPTY_SCAN_SIZE - buf_ofs;
			else
				scanend = buf_len;

			D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs));
			while (inbuf_ofs < scanend) {
				if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)
					goto emptyends;

				inbuf_ofs+=4;
				ofs += 4;
			}
			/* Ran off end. */
			D1(printk(KERN_DEBUG "Empty flash ends normally at 0x%08x\n", ofs));

			if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && 
			    !jeb->first_node->next_in_ino && !jeb->dirty_size)
				return BLK_STATE_CLEANMARKER;
			wasempty = 1;
			continue;
		} else if (wasempty) {
		emptyends:
			printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n", empty_start, ofs);
			DIRTY_SPACE(ofs-empty_start);
			wasempty = 0;
			continue;
		}

		if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
			printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
			DIRTY_SPACE(4);
			ofs += 4;
			continue;
		}
		if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
			D1(printk(KERN_DEBUG "Empty bitmask at 0x%08x\n", ofs));
			DIRTY_SPACE(4);
			ofs += 4;
			continue;
		}
		if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
			printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
			printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
			DIRTY_SPACE(4);
			ofs += 4;
			continue;
		}
		if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
			/* OK. We're out of possibilities. Whinge and move on */
			noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", 
				     JFFS2_MAGIC_BITMASK, ofs, 
				     je16_to_cpu(node->magic));
			DIRTY_SPACE(4);
			ofs += 4;
			continue;
		}
		/* We seem to have a node of sorts. Check the CRC */
		crcnode.magic = node->magic;
		crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
		crcnode.totlen = node->totlen;
		hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);

		if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
			noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
				     ofs, je16_to_cpu(node->magic),
				     je16_to_cpu(node->nodetype), 
				     je32_to_cpu(node->totlen),
				     je32_to_cpu(node->hdr_crc),
				     hdr_crc);
			DIRTY_SPACE(4);
			ofs += 4;
			continue;
		}

		if (ofs + je32_to_cpu(node->totlen) > 
		    jeb->offset + c->sector_size) {
			/* Eep. Node goes over the end of the erase block. */
			printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
			       ofs, je32_to_cpu(node->totlen));
			printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
			DIRTY_SPACE(4);
			ofs += 4;
			continue;
		}

		if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
			/* Wheee. This is an obsoleted node */
			D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
			DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
			ofs += PAD(je32_to_cpu(node->totlen));
			continue;
		}

		switch(je16_to_cpu(node->nodetype)) {
		case JFFS2_NODETYPE_INODE:
			if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
				D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
					  sizeof(struct jffs2_raw_inode), buf_len, ofs));
				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
				if (err)
					return err;
				buf_ofs = ofs;
				node = (void *)buf;
			}
			err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs);
			if (err) return err;
			ofs += PAD(je32_to_cpu(node->totlen));
			break;
			
		case JFFS2_NODETYPE_DIRENT:
			if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
				D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
					  je32_to_cpu(node->totlen), buf_len, ofs));
				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
				if (err)
					return err;
				buf_ofs = ofs;
				node = (void *)buf;
			}
			err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs);
			if (err) return err;
			ofs += PAD(je32_to_cpu(node->totlen));
			break;

		case JFFS2_NODETYPE_CLEANMARKER:
			D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
			if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
				printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", 
				       ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
				DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
				ofs += PAD(sizeof(struct jffs2_unknown_node));
			} else if (jeb->first_node) {
				printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
				DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
				ofs += PAD(sizeof(struct jffs2_unknown_node));
			} else {
				struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref();
				if (!marker_ref) {
					printk(KERN_NOTICE "Failed to allocate node ref for clean marker\n");
					return -ENOMEM;
				}
				marker_ref->next_in_ino = NULL;
				marker_ref->next_phys = NULL;
				marker_ref->flash_offset = ofs | REF_NORMAL;
				marker_ref->totlen = c->cleanmarker_size;
				jeb->first_node = jeb->last_node = marker_ref;
			     
				USED_SPACE(PAD(c->cleanmarker_size));
				ofs += PAD(c->cleanmarker_size);
			}
			break;

		case JFFS2_NODETYPE_PADDING:
			DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
			ofs += PAD(je32_to_cpu(node->totlen));
			break;

		default:
			switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
			case JFFS2_FEATURE_ROCOMPAT:
				printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
			        c->flags |= JFFS2_SB_FLAG_RO;
				if (!(jffs2_is_readonly(c)))
					return -EROFS;
				DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
				ofs += PAD(je32_to_cpu(node->totlen));
				break;

			case JFFS2_FEATURE_INCOMPAT:
				printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
				return -EINVAL;

			case JFFS2_FEATURE_RWCOMPAT_DELETE:
				D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
				DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
				ofs += PAD(je32_to_cpu(node->totlen));
				break;

			case JFFS2_FEATURE_RWCOMPAT_COPY:
				D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
				USED_SPACE(PAD(je32_to_cpu(node->totlen)));
				ofs += PAD(je32_to_cpu(node->totlen));
				break;
			}
		}
	}


	D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset, 
		  jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size));

	/* mark_node_obsolete can add to wasted !! */
	if (jeb->wasted_size) {
		jeb->dirty_size += jeb->wasted_size;
		c->dirty_size += jeb->wasted_size;
		c->wasted_size -= jeb->wasted_size;
		jeb->wasted_size = 0;
	}

	if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size 
		&& (!jeb->first_node || jeb->first_node->next_in_ino) )
		return BLK_STATE_CLEANMARKER;
		
	/* move blocks with max 4 byte dirty space to cleanlist */	
	else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
		c->dirty_size -= jeb->dirty_size;
		c->wasted_size += jeb->dirty_size; 
		jeb->wasted_size += jeb->dirty_size;
		jeb->dirty_size = 0;
		return BLK_STATE_CLEAN;
	} else if (jeb->used_size || jeb->unchecked_size)
		return BLK_STATE_PARTDIRTY;
	else
		return BLK_STATE_ALLDIRTY;
}