Exemplo n.º 1
0
/* ldc callback */
static uint_t
i_vldc_cb(uint64_t event, caddr_t arg)
{
	int 		rv;
	vldc_port_t	*vport = (vldc_port_t *)arg;
	ldc_status_t	old_status;
	short		pollevents = 0;

	ASSERT(vport != NULL);
	ASSERT(vport->minorp != NULL);

	D1("i_vldc_cb: vldc@%d:%d callback invoked, channel=0x%lx, "
	    "event=0x%lx\n", vport->inst, vport->number, vport->ldc_id, event);

	/* ensure the port can't be destroyed while we are handling the cb */
	mutex_enter(&vport->minorp->lock);

	if (vport->status == VLDC_PORT_CLOSED) {
		return (LDC_SUCCESS);
	}

	old_status = vport->ldc_status;
	rv = ldc_status(vport->ldc_handle, &vport->ldc_status);
	if (rv != 0) {
		DWARN("i_vldc_cb: vldc@%d:%d could not get ldc status, "
		    "rv=%d\n", vport->inst, vport->number, rv);
		mutex_exit(&vport->minorp->lock);
		return (LDC_SUCCESS);
	}

	if (event & LDC_EVT_UP) {
		pollevents |= POLLOUT;
		vport->hanged_up = B_FALSE;

	} else if (event & LDC_EVT_RESET) {
		/*
		 * Mark the port in reset, if it is not CLOSED and
		 * the channel was previously in LDC_UP state. This
		 * implies that the port cannot be used until it has
		 * been closed and reopened.
		 */
		if (old_status == LDC_UP) {
			vport->status = VLDC_PORT_RESET;
			vport->hanged_up = B_TRUE;
			pollevents = POLLHUP;
		} else {
			rv = ldc_up(vport->ldc_handle);
			if (rv) {
				DWARN("i_vldc_cb: vldc@%d:%d cannot bring "
				    "channel UP rv=%d\n", vport->inst,
				    vport->number, rv);
				mutex_exit(&vport->minorp->lock);
				return (LDC_SUCCESS);
			}
			rv = ldc_status(vport->ldc_handle, &vport->ldc_status);
			if (rv != 0) {
				DWARN("i_vldc_cb: vldc@%d:%d could not get "
				    "ldc status, rv=%d\n", vport->inst,
				    vport->number, rv);
				mutex_exit(&vport->minorp->lock);
				return (LDC_SUCCESS);
			}
			if (vport->ldc_status == LDC_UP) {
				pollevents |= POLLOUT;
				vport->hanged_up = B_FALSE;
			}
		}

	} else if (event & LDC_EVT_DOWN) {
		/*
		 * The other side went away - mark port in RESET state
		 */
		vport->status = VLDC_PORT_RESET;
		vport->hanged_up = B_TRUE;
		pollevents = POLLHUP;
	}

	if (event & LDC_EVT_READ)
		pollevents |= POLLIN;

	mutex_exit(&vport->minorp->lock);

	if (pollevents != 0) {
		D1("i_vldc_cb: port@%d pollwakeup=0x%x\n",
		    vport->number, pollevents);
		pollwakeup(&vport->poll, pollevents);
	}

	return (LDC_SUCCESS);
}
Exemplo n.º 2
0
// Restored MultiVector tests
int main(int argc, char *argv[]) {

  int ierr = 0, i, j;

#ifdef EPETRA_MPI

  // Initialize MPI

  MPI_Init(&argc,&argv);
  int rank; // My process ID

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);

#else

  int rank = 0;
  Epetra_SerialComm Comm;

#endif

  Comm.SetTracebackMode(0); // This should shut down any error tracing
  bool verbose = false;

  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;

  //  char tmp;
  //  if (rank==0) cout << "Press any key to continue..."<< endl;
  //  if (rank==0) cin >> tmp;
  //  Comm.Barrier();

  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc(); 

  if (verbose && MyPID==0)
    cout << Epetra_Version() << endl << endl;

  if (verbose) cout << Comm <<endl;

  bool verbose1 = verbose;

  // Redefine verbose to only print on PE 0
  if (verbose && rank!=0) verbose = false;

  int NumMyElements = 10000;
  int NumMyElements1 = NumMyElements; // Needed for localmap
  int NumGlobalElements = NumMyElements*NumProc+EPETRA_MIN(NumProc,3);
  if (MyPID < 3) NumMyElements++;
  int IndexBase = 0;
  int ElementSize = 7;
  int NumVectors = 4;
  
  // Test LocalMap constructor
  // and Petra-defined uniform linear distribution constructor

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_LocalMap(NumMyElements1, IndexBase, Comm)" << endl;
  if (verbose) cout << "     and Epetra_BlockMap(NumGlobalElements, ElementSize, IndexBase, Comm)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  Epetra_LocalMap *LocalMap = new Epetra_LocalMap(NumMyElements1, IndexBase,
                              Comm);
  Epetra_BlockMap * BlockMap = new Epetra_BlockMap(NumGlobalElements, ElementSize, IndexBase, Comm);
  EPETRA_TEST_ERR(MultiVectorTests(*BlockMap, NumVectors, verbose),ierr);


  EPETRA_TEST_ERR(MatrixTests(*BlockMap, *LocalMap, NumVectors, verbose),ierr);

  delete BlockMap;

  // Test User-defined linear distribution constructor

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_BlockMap(NumGlobalElements, NumMyElements, ElementSize, IndexBase, Comm)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  BlockMap = new Epetra_BlockMap(NumGlobalElements, NumMyElements, ElementSize, IndexBase, Comm);

  EPETRA_TEST_ERR(MultiVectorTests(*BlockMap, NumVectors, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*BlockMap, *LocalMap, NumVectors, verbose),ierr);

  delete BlockMap;

  // Test User-defined arbitrary distribution constructor
  // Generate Global Element List.  Do in reverse for fun!

  int * MyGlobalElements = new int[NumMyElements];
  int MaxMyGID = (Comm.MyPID()+1)*NumMyElements-1+IndexBase;
  if (Comm.MyPID()>2) MaxMyGID+=3;
  for (i = 0; i<NumMyElements; i++) MyGlobalElements[i] = MaxMyGID-i;

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_BlockMap(NumGlobalElements, NumMyElements, MyGlobalElements,  ElementSize, IndexBase, Comm)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  BlockMap = new Epetra_BlockMap(NumGlobalElements, NumMyElements, MyGlobalElements, ElementSize,
		      IndexBase, Comm);
  EPETRA_TEST_ERR(MultiVectorTests(*BlockMap, NumVectors, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*BlockMap, *LocalMap, NumVectors, verbose),ierr);

  delete BlockMap;

  int * ElementSizeList = new int[NumMyElements];
  int NumMyEquations = 0;
  int NumGlobalEquations = 0;
  for (i = 0; i<NumMyElements; i++) 
    {
      ElementSizeList[i] = i%6+2; // blocksizes go from 2 to 7
      NumMyEquations += ElementSizeList[i];
    }
  ElementSize = 7; // Set to maximum for use in checkmap
  NumGlobalEquations = Comm.NumProc()*NumMyEquations;

  // Adjust NumGlobalEquations based on processor ID
  if (Comm.NumProc() > 3)
    {
      if (Comm.MyPID()>2)
	NumGlobalEquations += 3*((NumMyElements)%6+2);
      else 
	NumGlobalEquations -= (Comm.NumProc()-3)*((NumMyElements-1)%6+2);
    }

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_BlockMap(NumGlobalElements, NumMyElements, MyGlobalElements,  ElementSizeList, IndexBase, Comm)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  BlockMap = new Epetra_BlockMap(NumGlobalElements, NumMyElements, MyGlobalElements, ElementSizeList,
		      IndexBase, Comm);
  EPETRA_TEST_ERR(MultiVectorTests(*BlockMap, NumVectors, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*BlockMap, *LocalMap, NumVectors, verbose),ierr);

  // Test Copy constructor

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_BlockMap(*BlockMap)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  Epetra_BlockMap * BlockMap1 = new Epetra_BlockMap(*BlockMap);

  EPETRA_TEST_ERR(MultiVectorTests(*BlockMap, NumVectors, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*BlockMap, *LocalMap, NumVectors, verbose),ierr);

  delete [] ElementSizeList;
  delete [] MyGlobalElements;
  delete BlockMap;
  delete BlockMap1;


  // Test Petra-defined uniform linear distribution constructor

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_Map(NumGlobalElements, IndexBase, Comm)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  Epetra_Map * Map = new Epetra_Map(NumGlobalElements, IndexBase, Comm);
  EPETRA_TEST_ERR(MultiVectorTests(*Map, NumVectors, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*Map, *LocalMap, NumVectors, verbose),ierr);

  delete Map;

  // Test User-defined linear distribution constructor

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_Map(NumGlobalElements, NumMyElements, IndexBase, Comm)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  Map = new Epetra_Map(NumGlobalElements, NumMyElements, IndexBase, Comm);

  EPETRA_TEST_ERR(MultiVectorTests(*Map, NumVectors, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*Map, *LocalMap, NumVectors, verbose),ierr);

  delete Map;

  // Test User-defined arbitrary distribution constructor
  // Generate Global Element List.  Do in reverse for fun!

  MyGlobalElements = new int[NumMyElements];
  MaxMyGID = (Comm.MyPID()+1)*NumMyElements-1+IndexBase;
  if (Comm.MyPID()>2) MaxMyGID+=3;
  for (i = 0; i<NumMyElements; i++) MyGlobalElements[i] = MaxMyGID-i;

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_Map(NumGlobalElements, NumMyElements, MyGlobalElements,  IndexBase, Comm)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  Map = new Epetra_Map(NumGlobalElements, NumMyElements, MyGlobalElements, 
		      IndexBase, Comm);
  EPETRA_TEST_ERR(MultiVectorTests(*Map, NumVectors, verbose),ierr);

  //EPETRA_TEST_ERR(MatrixTests(*Map, *LocalMap, NumVectors, verbose),ierr);

  // Test Copy constructor

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_Map(*Map)" << endl;
  if (verbose) cout << "*********************************************************" << endl;
 
  Epetra_Map Map1(*Map);

  EPETRA_TEST_ERR(MultiVectorTests(*Map, NumVectors, verbose),ierr);

  //EPETRA_TEST_ERR(MatrixTests(*Map, *LocalMap, NumVectors, verbose),ierr);

  delete [] MyGlobalElements;
  delete Map;
  delete LocalMap;

  if (verbose1)
    {
      // Test MultiVector MFLOPS for 2D Dot Product
      int M = 27;
      int N = 27;
      int K = 10000;
      Epetra_Map Map2(-1, K, IndexBase, Comm);
      Epetra_LocalMap Map3(M, IndexBase, Comm);
      
      Epetra_MultiVector A(Map2,N);A.Random();
      Epetra_MultiVector B(Map2,N);B.Random();
      Epetra_MultiVector C(Map3,N);C.Random();

      if (verbose) cout << "Testing Assignment operator" << endl;

      double tmp1 = 1.00001* (double) (MyPID+1);
      double tmp2 = tmp1;
      A[1][1] = tmp1;
      tmp2 = A[1][1];
      cout << "On PE "<< MyPID << "  A[1][1] should equal = " << tmp1;
      if (tmp1==tmp2) cout << " and it does!" << endl;
      else cout << " but it equals " << tmp2;
 
      Comm.Barrier();
	  
      if (verbose) cout << "Testing MFLOPs" << endl;
      Epetra_Flops counter;
      C.SetFlopCounter(counter);
      Epetra_Time mytimer(Comm);
      C.Multiply('T', 'N', 0.5, A, B, 0.0);
      double Multiply_time = mytimer.ElapsedTime();
      double Multiply_flops = C.Flops();
      if (verbose) cout << "\n\nTotal FLOPs = " << Multiply_flops << endl;
      if (verbose) cout << "Total Time  = " << Multiply_time << endl;
      if (verbose) cout << "MFLOPs      = " << Multiply_flops/Multiply_time/1000000.0 << endl;

      Comm.Barrier();
	  
      // Test MultiVector ostream operator with Petra-defined uniform linear distribution constructor
      // and a small vector
      
      Epetra_Map Map4(100, IndexBase, Comm);
      double * Dp = new double[200]; 
      for (j=0; j<2; j++)
	for (i=0; i<100; i++)
	  Dp[i+j*100] = i+j*100;
      Epetra_MultiVector D(View, Map4,Dp, 100, 2);
	  
      if (verbose) cout << "\n\nTesting ostream operator:  Multivector  should be 100-by-2 and print i,j indices" 
	   << endl << endl;
      cout << D << endl;

      Epetra_BlockMap Map5(-1, 25, 4, IndexBase, Comm);
      Epetra_MultiVector D1(View, Map5,Dp, 100, 2);
      if (verbose) cout << "\n\nTesting ostream operator:  Same Multivector as before except using BlockMap of 25x4" 
	   << endl << endl;
      cout << D1 << endl;

      if (verbose) cout << "Traceback Mode value = " << D.GetTracebackMode() << endl;
      delete [] Dp;
    }

#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  return ierr;
}
Exemplo n.º 3
0
int jffs2_prepare_write (struct file *filp, struct page *pg, unsigned start, unsigned end)
{
	struct inode *inode = pg->mapping->host;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	__u32 pageofs = pg->index << PAGE_CACHE_SHIFT;
	int ret = 0;

	D1(printk(KERN_DEBUG "jffs2_prepare_write() nrpages %ld\n", inode->i_mapping->nrpages));

	if (pageofs > inode->i_size) {
		/* Make new hole frag from old EOF to new page */
		struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
		struct jffs2_raw_inode ri;
		struct jffs2_full_dnode *fn;
		__u32 phys_ofs, alloc_len;
		
		D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
			  (unsigned int)inode->i_size, pageofs));

		ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, ALLOC_NORMAL);
		if (ret)
			return ret;

		down(&f->sem);
		memset(&ri, 0, sizeof(ri));

		ri.magic = JFFS2_MAGIC_BITMASK;
		ri.nodetype = JFFS2_NODETYPE_INODE;
		ri.totlen = sizeof(ri);
		ri.hdr_crc = crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4);

		ri.ino = f->inocache->ino;
		ri.version = ++f->highest_version;
		ri.mode = inode->i_mode;
		ri.uid = inode->i_uid;
		ri.gid = inode->i_gid;
		ri.isize = max((__u32)inode->i_size, pageofs);
		ri.atime = ri.ctime = ri.mtime = CURRENT_TIME;
		ri.offset = inode->i_size;
		ri.dsize = pageofs - inode->i_size;
		ri.csize = 0;
		ri.compr = JFFS2_COMPR_ZERO;
		ri.node_crc = crc32(0, &ri, sizeof(ri)-8);
		ri.data_crc = 0;
		
		fn = jffs2_write_dnode(inode, &ri, NULL, 0, phys_ofs, NULL);
		jffs2_complete_reservation(c);
		if (IS_ERR(fn)) {
			ret = PTR_ERR(fn);
			up(&f->sem);
			return ret;
		}
		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
		if (f->metadata) {
			jffs2_mark_node_obsolete(c, f->metadata->raw);
			jffs2_free_full_dnode(f->metadata);
			f->metadata = NULL;
		}
		if (ret) {
			D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in prepare_write, returned %d\n", ret));
			jffs2_mark_node_obsolete(c, fn->raw);
			jffs2_free_full_dnode(fn);
			up(&f->sem);
			return ret;
		}
		inode->i_size = pageofs;
		up(&f->sem);
	}
	

	/* Read in the page if it wasn't already present, unless it's a whole page */
	if (!Page_Uptodate(pg) && (start || end < PAGE_CACHE_SIZE)) {
		down(&f->sem);
		ret = jffs2_do_readpage_nolock(inode, pg);
		up(&f->sem);
	}
	D1(printk(KERN_DEBUG "end prepare_write(). pg->flags %lx\n", pg->flags));
	return ret;
}
Exemplo n.º 4
0
int jffs2_scan_medium(struct jffs2_sb_info *c)
{
	int i, ret;
	uint32_t empty_blocks = 0, bad_blocks = 0;
	unsigned char *flashbuf = NULL;
	uint32_t buf_size = 0;
	struct jffs2_summary *s = NULL; /* summary info collected by the scan process */
#ifndef __ECOS
	size_t pointlen, try_size;

	if (c->mtd->point) {
		ret = c->mtd->point(c->mtd, 0, c->mtd->size, &pointlen,
				    (void **)&flashbuf, NULL);
		if (!ret && pointlen < c->mtd->size) {
			/* Don't muck about if it won't let us point to the whole flash */
			D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
			c->mtd->unpoint(c->mtd, 0, pointlen);
			flashbuf = NULL;
		}
		if (ret)
			D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
	}
#endif
	if (!flashbuf) {
		/* For NAND it's quicker to read a whole eraseblock at a time,
		   apparently */
		if (jffs2_cleanmarker_oob(c))
			try_size = c->sector_size;
		if (c->mtd->type == MTD_NANDFLASH)
			buf_size = c->sector_size;
		else
			try_size = PAGE_SIZE;

		D1(printk(KERN_DEBUG "Trying to allocate readbuf of %zu "
			"bytes\n", try_size));

		flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size);
		if (!flashbuf)
			return -ENOMEM;

		D1(printk(KERN_DEBUG "Allocated readbuf of %zu bytes\n",
			try_size));

		buf_size = (uint32_t)try_size;
	}

	if (jffs2_sum_active()) {
		s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
		if (!s) {
			JFFS2_WARNING("Can't allocate memory for summary\n");
			ret = -ENOMEM;
			goto out;
		}
	}

	for (i=0; i<c->nr_blocks; i++) {
		struct jffs2_eraseblock *jeb = &c->blocks[i];

		cond_resched();

		/* reset summary info for next eraseblock scan */
		jffs2_sum_reset_collected(s);

		ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
						buf_size, s);

		if (ret < 0)
			goto out;

		jffs2_dbg_acct_paranoia_check_nolock(c, jeb);

		/* Now decide which list to put it on */
		switch(ret) {
		case BLK_STATE_ALLFF:
			/*
			 * Empty block.   Since we can't be sure it
			 * was entirely erased, we just queue it for erase
			 * again.  It will be marked as such when the erase
			 * is complete.  Meanwhile we still count it as empty
			 * for later checks.
			 */
			empty_blocks++;
			list_add(&jeb->list, &c->erase_pending_list);
			c->nr_erasing_blocks++;
			break;

		case BLK_STATE_CLEANMARKER:
			/* Only a CLEANMARKER node is valid */
			if (!jeb->dirty_size) {
				/* It's actually free */
				list_add(&jeb->list, &c->free_list);
				c->nr_free_blocks++;
			} else {
				/* Dirt */
				D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset));
				list_add(&jeb->list, &c->erase_pending_list);
				c->nr_erasing_blocks++;
			}
			break;

		case BLK_STATE_CLEAN:
			/* Full (or almost full) of clean data. Clean list */
			list_add(&jeb->list, &c->clean_list);
			break;

		case BLK_STATE_PARTDIRTY:
			/* Some data, but not full. Dirty list. */
			/* We want to remember the block with most free space
			and stick it in the 'nextblock' position to start writing to it. */
			if (jeb->free_size > min_free(c) &&
					(!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
				/* Better candidate for the next writes to go to */
				if (c->nextblock) {
					ret = file_dirty(c, c->nextblock);
					if (ret)
						goto out;
					/* deleting summary information of the old nextblock */
					jffs2_sum_reset_collected(c->summary);
				}
				/* update collected summary information for the current nextblock */
				jffs2_sum_move_collected(c, s);
				D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset));
				c->nextblock = jeb;
			} else {
				ret = file_dirty(c, jeb);
				if (ret)
					goto out;
			}
			break;

		case BLK_STATE_ALLDIRTY:
			/* Nothing valid - not even a clean marker. Needs erasing. */
			/* For now we just put it on the erasing list. We'll start the erases later */
			D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset));
			list_add(&jeb->list, &c->erase_pending_list);
			c->nr_erasing_blocks++;
			break;

		case BLK_STATE_BADBLOCK:
			D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset));
			list_add(&jeb->list, &c->bad_list);
			c->bad_size += c->sector_size;
			c->free_size -= c->sector_size;
			bad_blocks++;
			break;
		default:
			printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n");
			BUG();
		}
	}

	/* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
	if (c->nextblock && (c->nextblock->dirty_size)) {
		c->nextblock->wasted_size += c->nextblock->dirty_size;
		c->wasted_size += c->nextblock->dirty_size;
		c->dirty_size -= c->nextblock->dirty_size;
		c->nextblock->dirty_size = 0;
	}
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
	if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) {
		/* If we're going to start writing into a block which already
		   contains data, and the end of the data isn't page-aligned,
		   skip a little and align it. */

		uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize;

		D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
			  skip));
		jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
		jffs2_scan_dirty_space(c, c->nextblock, skip);
	}
#endif
	if (c->nr_erasing_blocks) {
		if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
			printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
			printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks);
			ret = -EIO;
			goto out;
		}
		spin_lock(&c->erase_completion_lock);
		jffs2_garbage_collect_trigger(c);
		spin_unlock(&c->erase_completion_lock);
	}
	ret = 0;
 out:
	if (buf_size)
		kfree(flashbuf);
#ifndef __ECOS
	else
		c->mtd->unpoint(c->mtd, 0, c->mtd->size);
#endif
	if (s)
		kfree(s);

	return ret;
}
Exemplo n.º 5
0
void jffs2_free_inode_cache(struct jffs2_inode_cache *x)
{
	D1(printk(KERN_DEBUG "Freeing inocache at %p\n", x));
	kmem_cache_free(inode_cache_slab, x);
}
Exemplo n.º 6
0
/*
 * Program the macaddress and vlans of a port.
 *
 * Returns 0 on sucess, 1 on failure.
 */
static int
vsw_set_port_hw_addr(vsw_port_t *port)
{
	vsw_t			*vswp = port->p_vswp;
	mac_diag_t		diag;
	uint8_t			*macaddr;
	uint16_t		vid = VLAN_ID_NONE;
	int			rv;
	uint16_t		mac_flags = MAC_UNICAST_TAG_DISABLE |
	    MAC_UNICAST_STRIP_DISABLE;

	D1(vswp, "%s: enter", __func__);

	ASSERT(RW_WRITE_HELD(&port->maccl_rwlock));
	if (port->p_mch == NULL)
		return (0);

	/*
	 * If the port has a specific 'pvid', then
	 * register with that vlan-id, otherwise register
	 * with VLAN_ID_NONE.
	 */
	if (port->pvid != vswp->default_vlan_id) {
		vid = port->pvid;
	}
	macaddr = (uint8_t *)port->p_macaddr.ether_addr_octet;

	if (!(vswp->smode & VSW_LAYER2_PROMISC)) {
		mac_flags |= MAC_UNICAST_HW;
	}

	if (port->addr_set == B_FALSE) {
		port->p_muh = NULL;
		rv = mac_unicast_add(port->p_mch, macaddr, mac_flags,
		    &port->p_muh, vid, &diag);

		if (rv != 0) {
			cmn_err(CE_WARN, "vsw%d: Failed to program"
			    "macaddr,vid(%s, %d) err=%d",
			    vswp->instance, ether_sprintf((void *)macaddr),
			    vid, rv);
			return (rv);
		}
		port->addr_set = B_TRUE;

		D2(vswp, "%s:programmed macaddr(%s) vid(%d) into device %s",
		    __func__, ether_sprintf((void *)macaddr), vid,
		    vswp->physname);
	}

	/* Add vlans to the MAC layer */
	vsw_mac_add_vlans(vswp, port->p_mch, macaddr,
	    mac_flags, port->vids, port->nvids);

	/* Configure bandwidth to the MAC layer */
	vsw_maccl_set_bandwidth(NULL, port, VSW_VNETPORT, port->p_bandwidth);

	mac_rx_set(port->p_mch, vsw_port_rx_cb, (void *)port);

	D1(vswp, "%s: exit", __func__);
	return (rv);
}
Exemplo n.º 7
0
static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
				  struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s)
{
	struct jffs2_full_dirent *fd;
	struct jffs2_inode_cache *ic;
	uint32_t checkedlen;
	uint32_t crc;
	int err;

	D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs));

	/* We don't get here unless the node is still valid, so we don't have to
	   mask in the ACCURATE bit any more. */
	crc = crc32(0, rd, sizeof(*rd)-8);

	if (crc != je32_to_cpu(rd->node_crc)) {
		printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
		       ofs, je32_to_cpu(rd->node_crc), crc);
		/* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
		if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
			return err;
		return 0;
	}

	pseudo_random += je32_to_cpu(rd->version);

	/* Should never happen. Did. (OLPC trac #4184)*/
	checkedlen = strnlen(rd->name, rd->nsize);
	if (checkedlen < rd->nsize) {
		printk(KERN_ERR "Dirent at %08x has zeroes in name. Truncating to %d chars\n",
		       ofs, checkedlen);
	}
	fd = jffs2_alloc_full_dirent(checkedlen+1);
	if (!fd) {
		return -ENOMEM;
	}
	memcpy(&fd->name, rd->name, checkedlen);
	fd->name[checkedlen] = 0;

	crc = crc32(0, fd->name, rd->nsize);
	if (crc != je32_to_cpu(rd->name_crc)) {
		printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
		       ofs, je32_to_cpu(rd->name_crc), crc);
		D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino)));
		jffs2_free_full_dirent(fd);
		/* FIXME: Why do we believe totlen? */
		/* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
		if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
			return err;
		return 0;
	}
	ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
	if (!ic) {
		jffs2_free_full_dirent(fd);
		return -ENOMEM;
	}

	fd->raw = jffs2_link_node_ref(c, jeb, ofs | dirent_node_state(rd),
				      PAD(je32_to_cpu(rd->totlen)), ic);

	fd->next = NULL;
	fd->version = je32_to_cpu(rd->version);
	fd->ino = je32_to_cpu(rd->ino);
	fd->nhash = full_name_hash(fd->name, checkedlen);
	fd->type = rd->type;
	jffs2_add_fd_to_list(c, fd, &ic->scan_dents);

	if (jffs2_sum_active()) {
		jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset);
	}

	return 0;
}
Exemplo n.º 8
0
void Trr2kNNTN
( UpperOrLower uplo,
  Orientation orientationOfC,
  T alpha, const DistMatrix<T>& A, const DistMatrix<T>& B,
           const DistMatrix<T>& C, const DistMatrix<T>& D,
  T beta,        DistMatrix<T>& E )
{
#ifndef RELEASE
    CallStackEntry entry("internal::Trr2kNNTN");
    if( E.Height() != E.Width()  || A.Width()  != C.Height() ||
        A.Height() != E.Height() || C.Width()  != E.Height() ||
        B.Width()  != E.Width()  || D.Width()  != E.Width()  ||
        A.Width()  != B.Height() || C.Height() != D.Height() )
        throw std::logic_error("Nonconformal Trr2kNNTN");
#endif
    const Grid& g = E.Grid();

    DistMatrix<T> AL(g), AR(g),
                  A0(g), A1(g), A2(g);
    DistMatrix<T> BT(g),  B0(g),
                  BB(g),  B1(g),
                          B2(g);

    DistMatrix<T> CT(g),  C0(g),
                  CB(g),  C1(g),
                          C2(g);
    DistMatrix<T> DT(g),  D0(g),
                  DB(g),  D1(g),
                          D2(g);

    DistMatrix<T,MC,  STAR> A1_MC_STAR(g);
    DistMatrix<T,MR,  STAR> B1Trans_MR_STAR(g);
    DistMatrix<T,STAR,MC  > C1_STAR_MC(g);
    DistMatrix<T,MR,  STAR> D1Trans_MR_STAR(g);

    A1_MC_STAR.AlignWith( E );
    B1Trans_MR_STAR.AlignWith( E );
    C1_STAR_MC.AlignWith( E );
    D1Trans_MR_STAR.AlignWith( E );

    LockedPartitionRight( A, AL, AR, 0 );
    LockedPartitionDown
    ( B, BT,
         BB, 0 );
    LockedPartitionDown
    ( C, CT,
         CB, 0 );
    LockedPartitionDown
    ( D, DT,
         DB, 0 );
    while( AL.Width() < A.Width() )
    {
        LockedRepartitionRight
        ( AL, /**/ AR,
          A0, /**/ A1, A2 );
        LockedRepartitionDown
        ( BT,  B0,
         /**/ /**/
               B1,
          BB,  B2 );
        LockedRepartitionDown
        ( CT,  C0,
         /**/ /**/
               C1,
          CB,  C2 );
        LockedRepartitionDown
        ( DT,  D0,
         /**/ /**/
               D1,
          DB,  D2 );

        //--------------------------------------------------------------------//
        A1_MC_STAR = A1;
        C1_STAR_MC = C1;
        B1Trans_MR_STAR.TransposeFrom( B1 );
        D1Trans_MR_STAR.TransposeFrom( D1 );
        LocalTrr2k
        ( uplo, TRANSPOSE, orientationOfC, TRANSPOSE,
          alpha, A1_MC_STAR, B1Trans_MR_STAR, 
                 C1_STAR_MC, D1Trans_MR_STAR,
          beta,  E );
        //--------------------------------------------------------------------//

        SlideLockedPartitionDown
        ( DT,  D0,
               D1,
         /**/ /**/
          DB,  D2 );
        SlideLockedPartitionDown
        ( CT,  C0,
               C1,
         /**/ /**/
          CB,  C2 );
        SlideLockedPartitionDown
        ( BT,  B0,
               B1,
         /**/ /**/
          BB,  B2 );
        SlideLockedPartitionRight
        ( AL,     /**/ AR,
          A0, A1, /**/ A2 );
    }
}
Exemplo n.º 9
0
static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
			struct nameidata *nd)
{
	struct jffs2_raw_inode *ri;
	struct jffs2_inode_info *f, *dir_f;
	struct jffs2_sb_info *c;
	struct inode *inode;
	int ret;

	ri = jffs2_alloc_raw_inode();
	if (!ri)
		return -ENOMEM;

	c = JFFS2_SB_INFO(dir_i->i_sb);

	D1(printk(KERN_DEBUG "jffs2_create()\n"));

	inode = jffs2_new_inode(dir_i, mode, ri);

	if (IS_ERR(inode)) {
		D1(printk(KERN_DEBUG "jffs2_new_inode() failed\n"));
		jffs2_free_raw_inode(ri);
		return PTR_ERR(inode);
	}

	inode->i_op = &jffs2_file_inode_operations;
	inode->i_fop = &jffs2_file_operations;
	inode->i_mapping->a_ops = &jffs2_file_address_operations;
	inode->i_mapping->nrpages = 0;

	f = JFFS2_INODE_INFO(inode);
	dir_f = JFFS2_INODE_INFO(dir_i);

	ret = jffs2_do_create(c, dir_f, f, ri,
			      dentry->d_name.name, dentry->d_name.len);

	if (ret)
		goto fail;

	ret = jffs2_init_security(inode, dir_i);
	if (ret)
		goto fail;
	ret = jffs2_init_acl(inode, dir_i);
	if (ret)
		goto fail;

	dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime));

	jffs2_free_raw_inode(ri);
	d_instantiate(dentry, inode);

	D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n",
		  inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->nlink, inode->i_mapping->nrpages));
	return 0;

 fail:
	make_bad_inode(inode);
	iput(inode);
	jffs2_free_raw_inode(ri);
	return ret;
}
Exemplo n.º 10
0
/* close a vldc port */
static int
i_vldc_close_port(vldc_t *vldcp, uint_t portno)
{
	vldc_port_t	*vport;
	vldc_minor_t	*vminor;
	int		rv = DDI_SUCCESS;

	vport = &(vldcp->port[portno]);

	ASSERT(MUTEX_HELD(&vport->minorp->lock));

	D1("i_vldc_close_port: vldc@%d:%d: closing port\n",
	    vport->inst, vport->minorp->portno);

	vminor = vport->minorp;

	switch (vport->status) {
	case VLDC_PORT_CLOSED:
		/* nothing to do */
		DWARN("i_vldc_close_port: port %d in an unexpected "
		    "state (%d)\n", portno, vport->status);
		return (DDI_SUCCESS);

	case VLDC_PORT_READY:
	case VLDC_PORT_RESET:
		do {
			rv = i_vldc_ldc_close(vport);
			if (rv != EAGAIN)
				break;

			/*
			 * EAGAIN indicates that ldc_close() failed because
			 * ldc callback thread is active for the channel.
			 * cv_timedwait() is used to release vminor->lock and
			 * allow ldc callback thread to complete.
			 * after waking up, check if the port has been closed
			 * by another thread in the meantime.
			 */
			(void) cv_reltimedwait(&vminor->cv, &vminor->lock,
			    drv_usectohz(vldc_close_delay), TR_CLOCK_TICK);
			rv = 0;
		} while (vport->status != VLDC_PORT_CLOSED);

		if ((rv != 0) || (vport->status == VLDC_PORT_CLOSED))
			return (rv);

		break;

	case VLDC_PORT_OPEN:
		break;

	default:
		DWARN("i_vldc_close_port: port %d in an unexpected "
		    "state (%d)\n", portno, vport->status);
		ASSERT(0);	/* fail quickly to help diagnosis */
		return (EINVAL);
	}

	ASSERT(vport->status == VLDC_PORT_OPEN);

	/* free memory */
	kmem_free(vport->send_buf, vport->mtu);
	kmem_free(vport->recv_buf, vport->mtu);

	if (strcmp(vminor->sname, VLDC_HVCTL_SVCNAME) == 0)
		kmem_free(vport->cookie_buf, vldc_max_cookie);

	vport->status = VLDC_PORT_CLOSED;

	return (rv);
}
Exemplo n.º 11
0
/*
 * detach(9E): detach a device from the system.
 */
static int
vldc_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
	int 		i, instance;
	vldc_t		*vldcp;

	switch (cmd) {

	case DDI_DETACH:

		instance = ddi_get_instance(dip);

		vldcp = ddi_get_soft_state(vldc_ssp, instance);
		if (vldcp == NULL) {
			return (DDI_FAILURE);
		}

		D1("vldc_detach: DDI_DETACH instance=%d\n", instance);

		mutex_enter(&vldcp->lock);

		/* Fail the detach if all ports have not been removed. */
		for (i = 0; i < VLDC_MAX_MINORS; i++) {
			if (vldcp->minor_tbl[i].portno != VLDC_INVALID_PORTNO) {
				D1("vldc_detach: vldc@%d:%d is bound, "
				    "detach failed\n",
				    instance, vldcp->minor_tbl[i].portno);
				mutex_exit(&vldcp->lock);
				return (DDI_FAILURE);
			}
		}

		/*
		 * Prevent MDEG from adding new ports before the callback can
		 * be unregistered. The lock can't be held accross the
		 * unregistration call because a callback may be in progress
		 * and blocked on the lock.
		 */
		vldcp->detaching = B_TRUE;

		mutex_exit(&vldcp->lock);

		if (i_vldc_mdeg_unregister(vldcp) != MDEG_SUCCESS) {
			vldcp->detaching = B_FALSE;
			return (DDI_FAILURE);
		}

		/* Tear down all bound ports and free resources. */
		for (i = 0; i < VLDC_MAX_MINORS; i++) {
			if (vldcp->minor_tbl[i].portno != VLDC_INVALID_PORTNO) {
				(void) i_vldc_remove_port(vldcp, i);
			}
			mutex_destroy(&(vldcp->minor_tbl[i].lock));
			cv_destroy(&(vldcp->minor_tbl[i].cv));
		}

		mutex_destroy(&vldcp->lock);
		ddi_soft_state_free(vldc_ssp, instance);

		return (DDI_SUCCESS);

	case DDI_SUSPEND:

		return (DDI_SUCCESS);

	default:

		return (DDI_FAILURE);
	}
}
Exemplo n.º 12
0
/* add a vldc port */
static int
i_vldc_add_port(vldc_t *vldcp, md_t *mdp, mde_cookie_t node)
{
	vldc_port_t	*vport;
	char		*sname;
	uint64_t	portno;
	int		vldc_inst;
	minor_t		minor;
	int		minor_idx;
	boolean_t	new_minor;
	int		rv;

	ASSERT(MUTEX_HELD(&vldcp->lock));

	/* read in the port's id property */
	if (md_get_prop_val(mdp, node, "id", &portno)) {
		cmn_err(CE_NOTE, "?i_vldc_add_port: node 0x%lx of added "
		    "list has no 'id' property", node);
		return (MDEG_FAILURE);
	}

	if (portno >= VLDC_MAX_PORTS) {
		cmn_err(CE_NOTE, "?i_vldc_add_port: found port number (%lu) "
		    "larger than maximum supported number of ports", portno);
		return (MDEG_FAILURE);
	}

	vport = &(vldcp->port[portno]);

	if (vport->minorp != NULL) {
		cmn_err(CE_NOTE, "?i_vldc_add_port: trying to add a port (%lu)"
		    " which is already bound", portno);
		return (MDEG_FAILURE);
	}

	vport->number = portno;

	/* get all channels for this device (currently only one) */
	if (i_vldc_get_port_channel(mdp, node, &vport->ldc_id) == -1) {
		return (MDEG_FAILURE);
	}

	/* set the default MTU */
	vport->mtu = VLDC_DEFAULT_MTU;

	/* get the service being exported by this port */
	if (md_get_prop_str(mdp, node, "vldc-svc-name", &sname)) {
		cmn_err(CE_NOTE, "?i_vldc_add_port: vdevice has no "
		    "'vldc-svc-name' property");
		return (MDEG_FAILURE);
	}

	/* minor number look up */
	for (minor_idx = 0; minor_idx < vldcp->minors_assigned;
	    minor_idx++) {
		if (strcmp(vldcp->minor_tbl[minor_idx].sname, sname) == 0) {
			/* found previously assigned minor number */
			break;
		}
	}

	new_minor = B_FALSE;
	if (minor_idx == vldcp->minors_assigned) {
		/* end of lookup - assign new minor number */
		if (vldcp->minors_assigned == VLDC_MAX_MINORS) {
			cmn_err(CE_NOTE, "?i_vldc_add_port: too many minor "
			    "nodes (%d)", minor_idx);
			return (MDEG_FAILURE);
		}

		(void) strlcpy(vldcp->minor_tbl[minor_idx].sname,
		    sname, MAXPATHLEN);

		vldcp->minors_assigned++;
		new_minor = B_TRUE;
	}

	if (vldcp->minor_tbl[minor_idx].portno != VLDC_INVALID_PORTNO) {
		cmn_err(CE_NOTE, "?i_vldc_add_port: trying to add a port (%lu)"
		    " which has a minor number in use by port (%u)",
		    portno, vldcp->minor_tbl[minor_idx].portno);
		return (MDEG_FAILURE);
	}

	vldc_inst = ddi_get_instance(vldcp->dip);

	vport->inst = vldc_inst;
	vport->minorp = &vldcp->minor_tbl[minor_idx];
	vldcp->minor_tbl[minor_idx].portno = portno;
	vldcp->minor_tbl[minor_idx].in_use = 0;

	D1("i_vldc_add_port: vldc@%d:%d  mtu=%d, ldc=%ld, service=%s\n",
	    vport->inst, vport->number, vport->mtu, vport->ldc_id, sname);

	/*
	 * Create a minor node. The minor number is
	 * (vldc_inst << VLDC_INST_SHIFT) | minor_idx
	 */
	minor = (vldc_inst << VLDC_INST_SHIFT) | (minor_idx);

	rv = ddi_create_minor_node(vldcp->dip, sname, S_IFCHR,
	    minor, DDI_NT_SERIAL, 0);

	if (rv != DDI_SUCCESS) {
		cmn_err(CE_NOTE, "?i_vldc_add_port: failed to create minor"
		    "node (%u), err = %d", minor, rv);
		vldcp->minor_tbl[minor_idx].portno = VLDC_INVALID_PORTNO;
		if (new_minor) {
			vldcp->minors_assigned--;
		}
		return (MDEG_FAILURE);
	}

	/*
	 * The port is now bound to a minor node and is initially in the
	 * closed state.
	 */
	vport->status = VLDC_PORT_CLOSED;

	D1("i_vldc_add_port: port %lu initialized\n", portno);

	return (MDEG_SUCCESS);
}
Exemplo n.º 13
0
/* register callback to mdeg */
static int
i_vldc_mdeg_register(vldc_t *vldcp)
{
	mdeg_prop_spec_t *pspecp;
	mdeg_node_spec_t *inst_specp;
	mdeg_handle_t	mdeg_hdl;
	size_t		templatesz;
	int		inst;
	char		*name;
	size_t		namesz;
	char		*nameprop;
	int		rv;

	/* get the unique vldc instance assigned by the LDom manager */
	inst = ddi_prop_get_int(DDI_DEV_T_ANY, vldcp->dip,
	    DDI_PROP_DONTPASS, "reg", -1);
	if (inst == -1) {
		cmn_err(CE_NOTE, "?vldc%d has no 'reg' property",
		    ddi_get_instance(vldcp->dip));
		return (DDI_FAILURE);
	}

	/* get the name of the vldc instance */
	rv = ddi_prop_lookup_string(DDI_DEV_T_ANY, vldcp->dip,
	    DDI_PROP_DONTPASS, "name", &nameprop);
	if (rv != DDI_PROP_SUCCESS) {
		cmn_err(CE_NOTE, "?vldc%d has no 'name' property",
		    ddi_get_instance(vldcp->dip));
		return (DDI_FAILURE);
	}

	D1("i_vldc_mdeg_register: name=%s, instance=%d\n", nameprop, inst);

	/*
	 * Allocate and initialize a per-instance copy
	 * of the global property spec array that will
	 * uniquely identify this vldc instance.
	 */
	templatesz = sizeof (vldc_prop_template);
	pspecp = kmem_alloc(templatesz, KM_SLEEP);

	bcopy(vldc_prop_template, pspecp, templatesz);

	/* copy in the name property */
	namesz = strlen(nameprop) + 1;
	name = kmem_alloc(namesz, KM_SLEEP);

	bcopy(nameprop, name, namesz);
	VLDC_SET_MDEG_PROP_NAME(pspecp, name);
	ddi_prop_free(nameprop);

	/* copy in the instance property */
	VLDC_SET_MDEG_PROP_INST(pspecp, inst);

	/* initialize the complete prop spec structure */
	inst_specp = kmem_alloc(sizeof (mdeg_node_spec_t), KM_SLEEP);
	inst_specp->namep = "virtual-device";
	inst_specp->specp = pspecp;

	/* perform the registration */
	rv = mdeg_register(inst_specp, &vport_match, i_vldc_mdeg_cb,
	    vldcp, &mdeg_hdl);

	if (rv != MDEG_SUCCESS) {
		cmn_err(CE_NOTE, "?i_vldc_mdeg_register: mdeg_register "
		    "failed, err = %d", rv);
		kmem_free(name, namesz);
		kmem_free(pspecp, templatesz);
		kmem_free(inst_specp, sizeof (mdeg_node_spec_t));
		return (DDI_FAILURE);
	}

	/* save off data that will be needed later */
	vldcp->inst_spec = inst_specp;
	vldcp->mdeg_hdl = mdeg_hdl;

	return (DDI_SUCCESS);
}
Exemplo n.º 14
0
/* mdeg callback */
static int
i_vldc_mdeg_cb(void *cb_argp, mdeg_result_t *resp)
{
	vldc_t		*vldcp;
	int		idx;
	uint64_t	portno;
	int		rv;
	md_t		*mdp;
	mde_cookie_t	node;

	if (resp == NULL) {
		D1("i_vldc_mdeg_cb: no result returned\n");
		return (MDEG_FAILURE);
	}

	vldcp = (vldc_t *)cb_argp;

	mutex_enter(&vldcp->lock);
	if (vldcp->detaching == B_TRUE) {
		D1("i_vldc_mdeg_cb: detach in progress\n");
		mutex_exit(&vldcp->lock);
		return (MDEG_FAILURE);
	}

	D1("i_vldc_mdeg_cb: added=%d, removed=%d, matched=%d\n",
	    resp->added.nelem, resp->removed.nelem, resp->match_prev.nelem);

	/* process added ports */
	for (idx = 0; idx < resp->added.nelem; idx++) {
		mdp = resp->added.mdp;
		node = resp->added.mdep[idx];

		D1("i_vldc_mdeg_cb: processing added node 0x%lx\n", node);

		/* attempt to add a port */
		if ((rv = i_vldc_add_port(vldcp, mdp, node)) != MDEG_SUCCESS) {
			cmn_err(CE_NOTE, "?i_vldc_mdeg_cb: unable to add port, "
			    "err = %d", rv);
		}
	}

	/* process removed ports */
	for (idx = 0; idx < resp->removed.nelem; idx++) {
		mdp = resp->removed.mdp;
		node = resp->removed.mdep[idx];

		D1("i_vldc_mdeg_cb: processing removed node 0x%lx\n", node);

		/* read in the port's id property */
		if (md_get_prop_val(mdp, node, "id", &portno)) {
			cmn_err(CE_NOTE, "?i_vldc_mdeg_cb: node 0x%lx of "
			    "removed list has no 'id' property", node);
			continue;
		}

		/* attempt to remove a port */
		if ((rv = i_vldc_remove_port(vldcp, portno)) != 0) {
			cmn_err(CE_NOTE, "?i_vldc_mdeg_cb: unable to remove "
			    "port %lu, err %d", portno, rv);
		}
	}

	/*
	 * Currently no support for updating already active ports. So, ignore
	 * the match_curr and match_prev arrays for now.
	 */

	mutex_exit(&vldcp->lock);

	return (MDEG_SUCCESS);
}
/* Hmmm. Maybe we should accept the extra space it takes and make
   this a standard doubly-linked list? */
static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
			struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb)
{
	struct jffs2_inode_cache *ic = NULL;
	struct jffs2_raw_node_ref **prev;

	prev = &ref->next_in_ino;

	/* Walk the inode's list once, removing any nodes from this eraseblock */
	while (1) {
		if (!(*prev)->next_in_ino) {
			/* We're looking at the jffs2_inode_cache, which is 
			   at the end of the linked list. Stash it and continue
			   from the beginning of the list */
			ic = (struct jffs2_inode_cache *)(*prev);
			prev = &ic->nodes;
			continue;
		} 

		if (((*prev)->flash_offset & ~(c->sector_size -1)) == jeb->offset) {
			/* It's in the block we're erasing */
			struct jffs2_raw_node_ref *this;

			this = *prev;
			*prev = this->next_in_ino;
			this->next_in_ino = NULL;

			if (this == ref)
				break;

			continue;
		}
		/* Not to be deleted. Skip */
		prev = &((*prev)->next_in_ino);
	}

	/* PARANOIA */
	if (!ic) {
		printk(KERN_WARNING "inode_cache not found in remove_node_refs()!!\n");
		return;
	}

	D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n",
		  jeb->offset, jeb->offset + c->sector_size, ic->ino));

	D2({
		int i=0;
		struct jffs2_raw_node_ref *this;
		printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG);

		this = ic->nodes;
	   
		while(this) {
			printk( "0x%08x(%d)->", this->flash_offset & ~3, this->flash_offset &3);
			if (++i == 5) {
				printk("\n" KERN_DEBUG);
				i=0;
			}
			this = this->next_in_ino;
		}
		printk("\n");
	});
        void Render::addContainer()
        {
            glm::vec3 A1(0.0f, 0.0f, 0.0f);
            glm::vec3 B1(1.0f, 0.0f, 0.0f);
            glm::vec3 C1(1.0f, 0.0f, 1.0f);
            glm::vec3 D1(0.0f, 0.0f, 1.0f);
            glm::vec3 A2(0.0f, 1.0f, 0.0f);
            glm::vec3 B2(1.0f, 1.0f, 0.0f);
            glm::vec3 C2(1.0f, 1.0f, 1.0f);
            glm::vec3 D2(0.0f, 1.0f, 1.0f);
            glm::vec3 normal(0.0f, 0.0f, 0.0f);

            vertices.push_back(A1);
            normals.push_back(normal);
            vertices.push_back(B1);
            normals.push_back(normal);
            vertices.push_back(A1);
            normals.push_back(normal);
            vertices.push_back(D1);
            normals.push_back(normal);
            vertices.push_back(B1);
            normals.push_back(normal);
            vertices.push_back(C1);
            normals.push_back(normal);
            vertices.push_back(D1);
            normals.push_back(normal);
            vertices.push_back(C1);
            normals.push_back(normal);

            vertices.push_back(A2);
            normals.push_back(normal);
            vertices.push_back(B2);
            normals.push_back(normal);
            vertices.push_back(A2);
            normals.push_back(normal);
            vertices.push_back(D2);
            normals.push_back(normal);
            vertices.push_back(B2);
            normals.push_back(normal);
            vertices.push_back(C2);
            normals.push_back(normal);
            vertices.push_back(D2);
            normals.push_back(normal);
            vertices.push_back(C2);
            normals.push_back(normal);

            vertices.push_back(A1);
            normals.push_back(normal);
            vertices.push_back(A2);
            normals.push_back(normal);
            vertices.push_back(B1);
            normals.push_back(normal);
            vertices.push_back(B2);
            normals.push_back(normal);
            vertices.push_back(C1);
            normals.push_back(normal);
            vertices.push_back(C2);
            normals.push_back(normal);
            vertices.push_back(D1);
            normals.push_back(normal);
            vertices.push_back(D2);
            normals.push_back(normal);
        }
Exemplo n.º 17
0
//int jffs2_prepare_write (struct file *filp, struct page *pg, unsigned start, unsigned end)
int jffs2_prepare_write (struct inode *d_inode, struct page *pg, unsigned start, unsigned end)
{
	struct inode *inode = d_inode;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	uint32_t pageofs = pg->index << PAGE_CACHE_SHIFT;
	int ret = 0;

	D1(printk(KERN_DEBUG "jffs2_prepare_write()\n"));

	if (pageofs > inode->i_size) {
		/* Make new hole frag from old EOF to new page */
		struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
		struct jffs2_raw_inode ri;
		struct jffs2_full_dnode *fn;
		uint32_t phys_ofs, alloc_len;
		
		D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
			  (unsigned int)inode->i_size, pageofs));

		ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, ALLOC_NORMAL);
		if (ret)
			return ret;

		down(&f->sem);
		memset(&ri, 0, sizeof(ri));

		ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
		ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
		ri.totlen = cpu_to_je32(sizeof(ri));
		ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));

		ri.ino = cpu_to_je32(f->inocache->ino);
		ri.version = cpu_to_je32(++f->highest_version);
		ri.mode = cpu_to_jemode(inode->i_mode);
		ri.uid = cpu_to_je16(inode->i_uid);
		ri.gid = cpu_to_je16(inode->i_gid);

		ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
		ri.atime = ri.ctime = ri.mtime = cpu_to_je32(cyg_timestamp());
		ri.offset = cpu_to_je32(inode->i_size);
		ri.dsize = cpu_to_je32(pageofs - inode->i_size);
		ri.csize = cpu_to_je32(0);
		ri.compr = JFFS2_COMPR_ZERO;
		ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
		ri.data_crc = cpu_to_je32(0);
		
		fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_NORMAL);
		jffs2_complete_reservation(c);
		if (IS_ERR(fn)) {
			ret = PTR_ERR(fn);
			up(&f->sem);
			return ret;
		}
		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
		if (f->metadata) {
			jffs2_mark_node_obsolete(c, f->metadata->raw);
			jffs2_free_full_dnode(f->metadata);
			f->metadata = NULL;
		}
		if (ret) {
			D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in prepare_write, returned %d\n", ret));
			jffs2_mark_node_obsolete(c, fn->raw);
			jffs2_free_full_dnode(fn);
			up(&f->sem);
			return ret;
		}
		inode->i_size = pageofs;
		up(&f->sem);
	}
	

	/* Read in the page if it wasn't already present, unless it's a whole page */
        // eCos has no concept of uptodate and by default always reads pages afresh
	if (!Page_Uptodate(pg) && (start || end < PAGE_CACHE_SIZE)) {
		down(&f->sem);
		ret = jffs2_do_readpage_nolock(inode, pg);
		up(&f->sem);
	}
	D1(printk(KERN_DEBUG "end prepare_write()\n"));
	return ret;
}
Exemplo n.º 18
0
static int jffs2_garbage_collect_thread(void *_c)
{
	struct jffs2_sb_info *c = _c;

	daemonize("jffs2_gcd_mtd%d", c->mtd->index);
	allow_signal(SIGKILL);
	allow_signal(SIGSTOP);
	allow_signal(SIGCONT);

	c->gc_task = current;
	up(&c->gc_thread_start);

	set_user_nice(current, 10);

	for (;;) {
		allow_signal(SIGHUP);

		if (!jffs2_thread_should_wake(c)) {
			set_current_state (TASK_INTERRUPTIBLE);
			D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
			/* Yes, there's a race here; we checked jffs2_thread_should_wake()
			   before setting current->state to TASK_INTERRUPTIBLE. But it doesn't
			   matter - We don't care if we miss a wakeup, because the GC thread
			   is only an optimisation anyway. */
			schedule();
		}

		if (current->flags & PF_FREEZE) {
			refrigerator(0);
			/* refrigerator() should recalc sigpending for us
			   but doesn't. No matter - allow_signal() will. */
			continue;
		}

		cond_resched();

		/* Put_super will send a SIGKILL and then wait on the sem. 
		 */
		while (signal_pending(current)) {
			siginfo_t info;
			unsigned long signr;

			signr = dequeue_signal_lock(current, &current->blocked, &info);

			switch(signr) {
			case SIGSTOP:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n"));
				set_current_state(TASK_STOPPED);
				schedule();
				break;

			case SIGKILL:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n"));
				goto die;

			case SIGHUP:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n"));
				break;
			default:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr));
			}
		}
		/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
		disallow_signal(SIGHUP);

		D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n"));
		if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
			printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n");
			goto die;
		}
	}
 die:
	spin_lock(&c->erase_completion_lock);
	c->gc_task = NULL;
	spin_unlock(&c->erase_completion_lock);
	complete_and_exit(&c->gc_thread_exit, 0);
}
Exemplo n.º 19
0
/*
 * Program the macaddress and vlans of a port.
 *
 * Returns 0 on sucess, 1 on failure.
 */
static int
vsw_set_if_hw_addr(vsw_t *vswp)
{
	mac_diag_t		diag;
	uint8_t			*macaddr;
	uint8_t			primary_addr[ETHERADDRL];
	uint16_t		vid = VLAN_ID_NONE;
	int			rv;
	uint16_t		mac_flags = MAC_UNICAST_TAG_DISABLE |
	    MAC_UNICAST_STRIP_DISABLE;

	D1(vswp, "%s: enter", __func__);

	ASSERT(RW_WRITE_HELD(&vswp->maccl_rwlock));
	if (vswp->mch == NULL)
		return (0);

	macaddr = (uint8_t *)vswp->if_addr.ether_addr_octet;

	/* check if it is the primary macaddr of the card. */
	mac_unicast_primary_get(vswp->mh, primary_addr);
	if (ether_cmp((void *)primary_addr, (void*)macaddr) == 0) {
		mac_flags |= MAC_UNICAST_PRIMARY;
	}

	/*
	 * If the interface has a specific 'pvid', then
	 * register with that vlan-id, otherwise register
	 * with VLAN_ID_NONE.
	 */
	if (vswp->pvid != vswp->default_vlan_id) {
		vid = vswp->pvid;
	}

	if (!(vswp->smode & VSW_LAYER2_PROMISC)) {
		mac_flags |= MAC_UNICAST_HW;
	}

	if (vswp->addr_set == B_FALSE) {
		vswp->muh = NULL;
		rv = mac_unicast_add(vswp->mch, macaddr, mac_flags,
		    &vswp->muh, vid, &diag);

		if (rv != 0) {
			cmn_err(CE_WARN, "vsw%d: Failed to program"
			    "macaddr,vid(%s, %d) err=%d",
			    vswp->instance, ether_sprintf((void *)macaddr),
			    vid, rv);
			return (rv);
		}
		vswp->addr_set = B_TRUE;

		D2(vswp, "%s:programmed macaddr(%s) vid(%d) into device %s",
		    __func__, ether_sprintf((void *)macaddr), vid,
		    vswp->physname);
	}

	vsw_mac_add_vlans(vswp, vswp->mch, macaddr, mac_flags,
	    vswp->vids, vswp->nvids);

	vsw_maccl_set_bandwidth(vswp, NULL, VSW_LOCALDEV, vswp->bandwidth);

	mac_rx_set(vswp->mch, vsw_if_rx_cb, (void *)vswp);

	D1(vswp, "%s: exit", __func__);
	return (rv);
}
Exemplo n.º 20
0
	void F4::updatePairs(vector<Polynomial>& polys, bool initial) 
	{
		// Setup timer to measuere how long the 'update' takes.
		double timer = F4Logger::seconds();
		// The index of the current new element in the groebner basis.
		size_t t = groebnerBasis.size();
		// Since the groebner basis grows, we store the current size
		// of the groebner basis in a variable. 
		size_t is = groebnerBasis.size();
		// Iterate over all new polynomials, which have been found in the last
		// run of reduce(...)
		for(size_t i = 0; i < polys.size(); i++)
		{
			Polynomial& h = polys[i];

			// True if the current polynomial h will be inserted into the groebner basis
			bool insertIntoG = true;

			// Check if h should be inserted: h will not be inserted into g if there
			// is already a (new!) element in the groebner basis, which has a leading
			// term that divides the leading term of h
			// Note: An old element of the groebner basis means that this element was
			// already known before the last call of reduce, so h can't have a leading
			// term which is divisible by the leading term of the old element, because
			// if this would be the case, there would have been a reduction polynomial
			// reducing this leading term.
			if(!initial) {
				for(size_t i = is; insertIntoG && i < groebnerBasis.size(); i++)
				{
					if(inGroebnerBasis[i] && h.LT().isDivisibleBy(groebnerBasis[i].LT())) 
					{
						insertIntoG = false;
					}
				}
			}

			//Check the criteria only if h will be inserted into the groebner basis
			if(true) //insertIntoG): BUG FIX! This doesn't work!
			{   
				// Do the first criterium: 
				// Cancel in P all pairs (i,j) which satisfy T(i,j) = T(i,j,t), T(i,t) != T(i,j) != T(j,t) [ B_t(i,j) ]
				F4PairSet P1(pairs.key_comp());
				for(set<F4Pair>::iterator it = pairs.begin(); it != pairs.end(); it++) 
				{
					if( !it->LCM.isDivisibleBy(h.LT())  || h.lcmLT(groebnerBasis[it->i]) == it->LCM  ||  h.lcmLT(groebnerBasis[it->j]) == it->LCM  ) { 
						P1.insert( *it );
					}
				}   
				swap(pairs, P1);

				// Do the second criterium:
				// Cancel in D1 each (i,t) for which a (j,t) exists s.t. T(i,t) is a proper multiple of T(j,t) [ M(i,t) ]
				vector<bool> D1(inGroebnerBasis.begin(), inGroebnerBasis.end());
				for(size_t i = 0; i < D1.size(); i++) 
				{
					for(size_t j = i+1; D1[i] && j < D1.size(); j++)
					{
						if(D1[j])				
						{
							Term a = h.lcmLT(groebnerBasis[i]);
							Term b = h.lcmLT(groebnerBasis[j]);
							if(a != b) {
								if(a.isDivisibleBy(b))
								{
									D1[i] = false;
								}
								if(b.isDivisibleBy(a)) 
								{
									D1[j] = false;
								}
							}
						}
					}
				}

				// Do the thrd criterium:
				// In each nonvoid subset { (j,t) | T(j,t) = tau } ...
				// Attention P2 is not a multiset, so each element is unique.
				set<F4Pair, F4Pair::comparator> P2(pairs.key_comp());
				for(size_t i = 0; i < D1.size(); i++)
				{
					if(D1[i])
					{
						Term LCM = groebnerBasis[i].lcmLT(h);
						// Create a new pair: LCM, i, t, marked, sugar degree
						F4Pair newpair( LCM, i, t, LCM == groebnerBasis[i].LT().mul(h.LT()), max(groebnerBasis[i].sugar() - groebnerBasis[i].LT().deg(), h.sugar() - h.LT().deg()) + LCM.deg() );
						pair<F4PairSet::iterator,bool> ret;
						ret = P2.insert( newpair );
						// If there is a marked pair for the given LCM, store this,
						// since all pairs with this LCM will be deleted.
						if(newpair.marked && ret.second)
						{
							P2.erase(ret.first);
							P2.insert(newpair);
						}
					}
				}

				// Finally delete all (i,t) with T(i)T(j) = T(i,t).
				for(set<F4Pair, F4Pair::comparator>::iterator it = P2.begin(); it != P2.end(); it++)
				{   
					if(!it->marked)
					{ 
						pairs.insert(*it);
					}  
				}

				// Check all old elements of the groebner basis if the current element
				// divides the leading term, so the old element is reducible and can
				// removed from the result set.
				for(size_t j = 0; j < groebnerBasis.size(); j++)
				{   
					if(inGroebnerBasis[j] && groebnerBasis[j].LT().isDivisibleBy(h.LT()))
					{   
						inGroebnerBasis[j] = false;
					}
				}
				// Insert h into the groebner basis
				groebnerBasis.push_back( h );

				inGroebnerBasis.push_back( insertIntoG );
				t++;
			}
		}
		log->updateTime += F4Logger::seconds() - timer;
	}
Exemplo n.º 21
0
/* Called with 'buf_size == 0' if buf is in fact a pointer _directly_ into
   the flash, XIP-style */
static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
				  unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) {
	struct jffs2_unknown_node *node;
	struct jffs2_unknown_node crcnode;
	uint32_t ofs, prevofs, max_ofs;
	uint32_t hdr_crc, buf_ofs, buf_len;
	int err;
	int noise = 0;


#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
	int cleanmarkerfound = 0;
#endif

	ofs = jeb->offset;
	prevofs = jeb->offset - 1;

	D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs));

#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
	if (c->mtd->type == MTD_NANDFLASH) {
		int ret;

		if (c->mtd->block_isbad(c->mtd, jeb->offset))
			return BLK_STATE_BADBLOCK;

		if (jffs2_cleanmarker_oob(c)) {
			ret = jffs2_check_nand_cleanmarker(c, jeb);
			D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n", ret));

			/* Even if it's not found, we still scan to see
			   if the block is empty. We use this information
			   to decide whether to erase it or not. */
			switch (ret) {
			case 0:		cleanmarkerfound = 1; break;
			case 1: 	break;
			default: 	return ret;
			}
		}
	}
#endif

	if (jffs2_sum_active()) {
		struct jffs2_sum_marker *sm;
		void *sumptr = NULL;
		uint32_t sumlen;
	      
		if (!buf_size) {
			/* XIP case. Just look, point at the summary if it's there */
			sm = (void *)buf + c->sector_size - sizeof(*sm);
			if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
				sumptr = buf + je32_to_cpu(sm->offset);
				sumlen = c->sector_size - je32_to_cpu(sm->offset);
			}
		} else {
			/* If NAND flash, read a whole page of it. Else just the end */
			if (c->wbuf_pagesize)
				buf_len = c->wbuf_pagesize;
			else
				buf_len = sizeof(*sm);

			/* Read as much as we want into the _end_ of the preallocated buffer */
			err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len, 
						  jeb->offset + c->sector_size - buf_len,
						  buf_len);				
			if (err)
				return err;

			sm = (void *)buf + buf_size - sizeof(*sm);
			if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
				sumlen = c->sector_size - je32_to_cpu(sm->offset);
				sumptr = buf + buf_size - sumlen;

				/* Now, make sure the summary itself is available */
				if (sumlen > buf_size) {
					/* Need to kmalloc for this. */
					sumptr = kmalloc(sumlen, GFP_KERNEL);
					if (!sumptr)
						return -ENOMEM;
					memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len);
				}
				if (buf_len < sumlen) {
					/* Need to read more so that the entire summary node is present */
					err = jffs2_fill_scan_buf(c, sumptr, 
								  jeb->offset + c->sector_size - sumlen,
								  sumlen - buf_len);				
					if (err)
						return err;
				}
			}

		}

		if (sumptr) {
			err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random);

			if (buf_size && sumlen > buf_size)
				kfree(sumptr);
			/* If it returns with a real error, bail. 
			   If it returns positive, that's a block classification
			   (i.e. BLK_STATE_xxx) so return that too.
			   If it returns zero, fall through to full scan. */
			if (err)
				return err;
		}
	}

	buf_ofs = jeb->offset;

	if (!buf_size) {
		/* This is the XIP case -- we're reading _directly_ from the flash chip */
		buf_len = c->sector_size;
	} else {
		buf_len = EMPTY_SCAN_SIZE(c->sector_size);
		err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
		if (err)
			return err;
	}

	/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
	ofs = 0;
	max_ofs = EMPTY_SCAN_SIZE(c->sector_size);
	/* Scan only EMPTY_SCAN_SIZE of 0xFF before declaring it's empty */
	while(ofs < max_ofs && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
		ofs += 4;

	if (ofs == max_ofs) {
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
		if (jffs2_cleanmarker_oob(c)) {
			/* scan oob, take care of cleanmarker */
			int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
			D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret));
			switch (ret) {
			case 0:		return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
			case 1: 	return BLK_STATE_ALLDIRTY;
			default: 	return ret;
			}
		}
#endif
		D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset));
		if (c->cleanmarker_size == 0)
			return BLK_STATE_CLEANMARKER;	/* don't bother with re-erase */
		else
			return BLK_STATE_ALLFF;	/* OK to erase if all blocks are like this */
	}
	if (ofs) {
		D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
			  jeb->offset + ofs));
		if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
			return err;
		if ((err = jffs2_scan_dirty_space(c, jeb, ofs)))
			return err;
	}

	/* Now ofs is a complete physical flash offset as it always was... */
	ofs += jeb->offset;

	noise = 10;

	dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset);

scan_more:
	while(ofs < jeb->offset + c->sector_size) {

		jffs2_dbg_acct_paranoia_check_nolock(c, jeb);

		/* Make sure there are node refs available for use */
		err = jffs2_prealloc_raw_node_refs(c, jeb, 2);
		if (err)
			return err;

		cond_resched();

		if (ofs & 3) {
			printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs);
			ofs = PAD(ofs);
			continue;
		}
		if (ofs == prevofs) {
			printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs);
			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
				return err;
			ofs += 4;
			continue;
		}
		prevofs = ofs;

		if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
			D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
				  jeb->offset, c->sector_size, ofs, sizeof(*node)));
			if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs)))
				return err;
			break;
		}

		if (buf_ofs + buf_len < ofs + sizeof(*node)) {
			buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
			D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
				  sizeof(struct jffs2_unknown_node), buf_len, ofs));
			err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
			if (err)
				return err;
			buf_ofs = ofs;
		}

		node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];

		if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
			uint32_t inbuf_ofs;
			uint32_t empty_start, scan_end;

			empty_start = ofs;
			ofs += 4;
			scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len);

			D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs));
		more_empty:
			inbuf_ofs = ofs - buf_ofs;
			while (inbuf_ofs < scan_end) {
				if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) {
					printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
					       empty_start, ofs);
					if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start)))
						return err;
					goto scan_more;
				}

				inbuf_ofs+=4;
				ofs += 4;
			}
			/* Ran off end. */
			D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs));

			/* If we're only checking the beginning of a block with a cleanmarker,
			   bail now */
			if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
			    c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) {
				D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size)));
				return BLK_STATE_CLEANMARKER;
			}
			if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */
				scan_end = buf_len;
				goto more_empty;
			}
			
			/* See how much more there is to read in this eraseblock... */
			buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
			if (!buf_len) {
				/* No more to read. Break out of main loop without marking
				   this range of empty space as dirty (because it's not) */
				D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n",
					  empty_start));
				break;
			}
			/* point never reaches here */
			scan_end = buf_len;
			D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs));
			err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
			if (err)
				return err;
			buf_ofs = ofs;
			goto more_empty;
		}

		if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
			printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
				return err;
			ofs += 4;
			continue;
		}
		if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
			D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs));
			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
				return err;
			ofs += 4;
			continue;
		}
		if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
			printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
			printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
				return err;
			ofs += 4;
			continue;
		}
		if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
			/* OK. We're out of possibilities. Whinge and move on */
			noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
				     JFFS2_MAGIC_BITMASK, ofs,
				     je16_to_cpu(node->magic));
			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
				return err;
			ofs += 4;
			continue;
		}
		/* We seem to have a node of sorts. Check the CRC */
		crcnode.magic = node->magic;
		crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
		crcnode.totlen = node->totlen;
		hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);

		if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
			noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
				     ofs, je16_to_cpu(node->magic),
				     je16_to_cpu(node->nodetype),
				     je32_to_cpu(node->totlen),
				     je32_to_cpu(node->hdr_crc),
				     hdr_crc);
			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
				return err;
			ofs += 4;
			continue;
		}

		if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) {
			/* Eep. Node goes over the end of the erase block. */
			printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
			       ofs, je32_to_cpu(node->totlen));
			printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
				return err;
			ofs += 4;
			continue;
		}

		if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
			/* Wheee. This is an obsoleted node */
			D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
			if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
				return err;
			ofs += PAD(je32_to_cpu(node->totlen));
			continue;
		}

		switch(je16_to_cpu(node->nodetype)) {
		case JFFS2_NODETYPE_INODE:
			if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
				D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
					  sizeof(struct jffs2_raw_inode), buf_len, ofs));
				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
				if (err)
					return err;
				buf_ofs = ofs;
				node = (void *)buf;
			}
			err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s);
			if (err) return err;
			ofs += PAD(je32_to_cpu(node->totlen));
			break;

		case JFFS2_NODETYPE_DIRENT:
			if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
				D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
					  je32_to_cpu(node->totlen), buf_len, ofs));
				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
				if (err)
					return err;
				buf_ofs = ofs;
				node = (void *)buf;
			}
			err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s);
			if (err) return err;
			ofs += PAD(je32_to_cpu(node->totlen));
			break;

#ifdef CONFIG_JFFS2_FS_XATTR
		case JFFS2_NODETYPE_XATTR:
			if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
				D1(printk(KERN_DEBUG "Fewer than %d bytes (xattr node)"
					  " left to end of buf. Reading 0x%x at 0x%08x\n",
					  je32_to_cpu(node->totlen), buf_len, ofs));
				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
				if (err)
					return err;
				buf_ofs = ofs;
				node = (void *)buf;
			}
			err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s);
			if (err)
				return err;
			ofs += PAD(je32_to_cpu(node->totlen));
			break;
		case JFFS2_NODETYPE_XREF:
			if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
				D1(printk(KERN_DEBUG "Fewer than %d bytes (xref node)"
					  " left to end of buf. Reading 0x%x at 0x%08x\n",
					  je32_to_cpu(node->totlen), buf_len, ofs));
				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
				if (err)
					return err;
				buf_ofs = ofs;
				node = (void *)buf;
			}
			err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s);
			if (err)
				return err;
			ofs += PAD(je32_to_cpu(node->totlen));
			break;
#endif	/* CONFIG_JFFS2_FS_XATTR */

		case JFFS2_NODETYPE_CLEANMARKER:
			D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
			if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
				printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
				       ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
				if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
					return err;
				ofs += PAD(sizeof(struct jffs2_unknown_node));
			} else if (jeb->first_node) {
				printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
				if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
					return err;
				ofs += PAD(sizeof(struct jffs2_unknown_node));
			} else {
				jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL);

				ofs += PAD(c->cleanmarker_size);
			}
			break;

		case JFFS2_NODETYPE_PADDING:
			if (jffs2_sum_active())
				jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen));
			if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
				return err;
			ofs += PAD(je32_to_cpu(node->totlen));
			break;

		default:
			switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
			case JFFS2_FEATURE_ROCOMPAT:
				printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
				c->flags |= JFFS2_SB_FLAG_RO;
				if (!(jffs2_is_readonly(c)))
					return -EROFS;
				if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
					return err;
				ofs += PAD(je32_to_cpu(node->totlen));
				break;

			case JFFS2_FEATURE_INCOMPAT:
				printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
				return -EINVAL;

			case JFFS2_FEATURE_RWCOMPAT_DELETE:
				D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
				if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
					return err;
				ofs += PAD(je32_to_cpu(node->totlen));
				break;

			case JFFS2_FEATURE_RWCOMPAT_COPY: {
				D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));

				jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL);

				/* We can't summarise nodes we don't grok */
				jffs2_sum_disable_collecting(s);
				ofs += PAD(je32_to_cpu(node->totlen));
				break;
				}
			}
		}
	}

	if (jffs2_sum_active()) {
		if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) {
			dbg_summary("There is not enough space for "
				"summary information, disabling for this jeb!\n");
			jffs2_sum_disable_collecting(s);
		}
	}

	D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
		  jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size));
	
	/* mark_node_obsolete can add to wasted !! */
	if (jeb->wasted_size) {
		jeb->dirty_size += jeb->wasted_size;
		c->dirty_size += jeb->wasted_size;
		c->wasted_size -= jeb->wasted_size;
		jeb->wasted_size = 0;
	}

	return jffs2_scan_classify_jeb(c, jeb);
}
Exemplo n.º 22
0
/* Called by the VFS at mount time to initialize the whole file system.  */
static int jffs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct inode *root_inode;
	struct jffs_control *c;

	sb->s_flags |= MS_NODIRATIME;

	D1(printk(KERN_NOTICE "JFFS: Trying to mount device %s.\n",
		  sb->s_id));

	if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR) {
		printk(KERN_WARNING "JFFS: Trying to mount a "
		       "non-mtd device.\n");
		return -EINVAL;
	}

	sb->s_blocksize = PAGE_CACHE_SIZE;
	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
	sb->s_fs_info = (void *) 0;
	sb->s_maxbytes = 0xFFFFFFFF;

	/* Build the file system.  */
	if (jffs_build_fs(sb) < 0) {
		goto jffs_sb_err1;
	}

	/*
	 * set up enough so that we can read an inode
	 */
	sb->s_magic = JFFS_MAGIC_SB_BITMASK;
	sb->s_op = &jffs_ops;

	root_inode = iget(sb, JFFS_MIN_INO);
	if (!root_inode)
	        goto jffs_sb_err2;

	/* Get the root directory of this file system.  */
	if (!(sb->s_root = d_alloc_root(root_inode))) {
		goto jffs_sb_err3;
	}

	c = (struct jffs_control *) sb->s_fs_info;

#ifdef CONFIG_JFFS_PROC_FS
	/* Set up the jffs proc file system.  */
	if (jffs_register_jffs_proc_dir(MINOR(sb->s_dev), c) < 0) {
		printk(KERN_WARNING "JFFS: Failed to initialize the JFFS "
			"proc file system for device %s.\n",
			sb->s_id);
	}
#endif

	/* Set the Garbage Collection thresholds */

	/* GC if free space goes below 5% of the total size */
	c->gc_minfree_threshold = c->fmc->flash_size / 20;

	if (c->gc_minfree_threshold < c->fmc->sector_size)
		c->gc_minfree_threshold = c->fmc->sector_size;

	/* GC if dirty space exceeds 33% of the total size. */
	c->gc_maxdirty_threshold = c->fmc->flash_size / 3;

	if (c->gc_maxdirty_threshold < c->fmc->sector_size)
		c->gc_maxdirty_threshold = c->fmc->sector_size;


	c->thread_pid = kernel_thread (jffs_garbage_collect_thread, 
				        (void *) c, 
				        CLONE_KERNEL);
	D1(printk(KERN_NOTICE "JFFS: GC thread pid=%d.\n", (int) c->thread_pid));

	D1(printk(KERN_NOTICE "JFFS: Successfully mounted device %s.\n",
	       sb->s_id));
	return 0;

jffs_sb_err3:
	iput(root_inode);
jffs_sb_err2:
	jffs_cleanup_control((struct jffs_control *)sb->s_fs_info);
jffs_sb_err1:
	printk(KERN_WARNING "JFFS: Failed to mount device %s.\n",
	       sb->s_id);
	return -EINVAL;
}
Exemplo n.º 23
0
struct jffs2_inode_cache *jffs2_alloc_inode_cache(void)
{
	struct jffs2_inode_cache *ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
	D1(printk(KERN_DEBUG "Allocated inocache at %p\n", ret));
	return ret;
}
Exemplo n.º 24
0
int jffs2_commit_write (struct file *filp, struct page *pg, unsigned start, unsigned end)
{
	/* Actually commit the write from the page cache page we're looking at.
	 * For now, we write the full page out each time. It sucks, but it's simple
	 */
	struct inode *inode = pg->mapping->host;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_raw_inode *ri;
	unsigned aligned_start = start & ~3;
	int ret = 0;
	uint32_t writtenlen = 0;

	D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
		  inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));

	if (!start && end == PAGE_CACHE_SIZE) {
		/* We need to avoid deadlock with page_cache_read() in
		   jffs2_garbage_collect_pass(). So we have to mark the
		   page up to date, to prevent page_cache_read() from 
		   trying to re-lock it. */
		SetPageUptodate(pg);
	}

	ri = jffs2_alloc_raw_inode();

	if (!ri) {
		D1(printk(KERN_DEBUG "jffs2_commit_write(): Allocation of raw inode failed\n"));
		return -ENOMEM;
	}

	/* Set the fields that the generic jffs2_write_inode_range() code can't find */
	ri->ino = cpu_to_je32(inode->i_ino);
	ri->mode = cpu_to_jemode(inode->i_mode);
	ri->uid = cpu_to_je16(inode->i_uid);
	ri->gid = cpu_to_je16(inode->i_gid);
	ri->isize = cpu_to_je32((uint32_t)inode->i_size);
	ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds());

	/* In 2.4, it was already kmapped by generic_file_write(). Doesn't
	   hurt to do it again. The alternative is ifdefs, which are ugly. */
	kmap(pg);

	ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
				      (pg->index << PAGE_CACHE_SHIFT) + aligned_start,
				      end - aligned_start, &writtenlen);

	kunmap(pg);

	if (ret) {
		/* There was an error writing. */
		SetPageError(pg);
	}
	
	/* Adjust writtenlen for the padding we did, so we don't confuse our caller */
	if (writtenlen < (start&3))
		writtenlen = 0;
	else
		writtenlen -= (start&3);

	if (writtenlen) {
		if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) {
			inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen;
			inode->i_blocks = (inode->i_size + 511) >> 9;
			
			inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
		}
	}
Exemplo n.º 25
0
void __INLINE__ start_timer1(unsigned long delay_us)
{
    int freq_index = 0; /* This is the lowest resolution */
    unsigned long upper_limit = MAX_DELAY_US;

    unsigned long div;
    /* Start/Restart the timer to the new shorter value */
    /* t = 1/freq = 1/19200 = 53us
     * T=div*t,  div = T/t = delay_us*freq/1000000
     */
#if 1 /* Adaptive timer settings */
    while (delay_us < upper_limit && freq_index < MAX_USABLE_TIMER_FREQ)
    {
        freq_index++;
        upper_limit >>= 1; /* Divide by 2 using shift */
    }
    if (freq_index > 0)
    {
        freq_index--;
    }
#else
    freq_index = 6;
#endif
    div = delay_us * timer_freq_100[freq_index]/10000;
    if (div < 2)
    {
        /* Maybe increase timer freq? */
        div = 2;
    }
    if (div > 255)
    {
        div = 0; /* This means 256, the max the timer takes */
        /* If a longer timeout than the timer can handle is used,
         * then we must restart it when it goes off.
         */
    }

    timer_div_settings[fast_timers_started % NUM_TIMER_STATS] = div;
    timer_freq_settings[fast_timers_started % NUM_TIMER_STATS] = freq_index;
    timer_delay_settings[fast_timers_started % NUM_TIMER_STATS] = delay_us;

    D1(printk("start_timer1 : %d us freq: %i div: %i\n",
              delay_us, freq_index, div));
    /* Clear timer1 irq */
    *R_IRQ_MASK0_CLR = IO_STATE(R_IRQ_MASK0_CLR, timer1, clr);

    /* Set timer values */
    *R_TIMER_CTRL = r_timer_ctrl_shadow =
                        (r_timer_ctrl_shadow &
                         ~IO_MASK(R_TIMER_CTRL, timerdiv1) &
                         ~IO_MASK(R_TIMER_CTRL, tm1) &
                         ~IO_MASK(R_TIMER_CTRL, clksel1)) |
                        IO_FIELD(R_TIMER_CTRL, timerdiv1, div) |
                        IO_STATE(R_TIMER_CTRL, tm1, stop_ld) |
                        IO_FIELD(R_TIMER_CTRL, clksel1, freq_index ); /* 6=c19k2Hz */

    /* Ack interrupt */
    *R_TIMER_CTRL =  r_timer_ctrl_shadow |
                     IO_STATE(R_TIMER_CTRL, i1, clr);

    /* Start timer */
    *R_TIMER_CTRL = r_timer_ctrl_shadow =
                        (r_timer_ctrl_shadow & ~IO_MASK(R_TIMER_CTRL, tm1)) |
                        IO_STATE(R_TIMER_CTRL, tm1, run);

    /* Enable timer1 irq */
    *R_IRQ_MASK0_SET = IO_STATE(R_IRQ_MASK0_SET, timer1, set);
    fast_timers_started++;
    fast_timer_running = 1;
}
Exemplo n.º 26
0
static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
			struct nameidata *nd)
{
	struct jffs2_raw_inode *ri;
	struct jffs2_inode_info *f, *dir_f;
	struct jffs2_sb_info *c;
	struct inode *inode;
	int ret;

	ri = jffs2_alloc_raw_inode();
	if (!ri)
		return -ENOMEM;

	c = JFFS2_SB_INFO(dir_i->i_sb);

	D1(printk(KERN_DEBUG "jffs2_create()\n"));

	inode = jffs2_new_inode(dir_i, mode, ri);

	if (IS_ERR(inode)) {
		D1(printk(KERN_DEBUG "jffs2_new_inode() failed\n"));
		jffs2_free_raw_inode(ri);
		return PTR_ERR(inode);
	}

	inode->i_op = &jffs2_file_inode_operations;
	inode->i_fop = &jffs2_file_operations;
	inode->i_mapping->a_ops = &jffs2_file_address_operations;
	inode->i_mapping->nrpages = 0;

	f = JFFS2_INODE_INFO(inode);
	dir_f = JFFS2_INODE_INFO(dir_i);

	/* jffs2_do_create() will want to lock it, _after_ reserving
	   space and taking c-alloc_sem. If we keep it locked here,
	   lockdep gets unhappy (although it's a false positive;
	   nothing else will be looking at this inode yet so there's
	   no chance of AB-BA deadlock involving its f->sem). */
	mutex_unlock(&f->sem);

	ret = jffs2_do_create(c, dir_f, f, ri,
			      dentry->d_name.name, dentry->d_name.len);
	if (ret)
		goto fail;

	dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime));

	jffs2_free_raw_inode(ri);
	d_instantiate(dentry, inode);

	D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n",
		  inode->i_ino, inode->i_mode, inode->i_nlink,
		  f->inocache->pino_nlink, inode->i_mapping->nrpages));
	return 0;

 fail:
	make_bad_inode(inode);
	iput(inode);
	jffs2_free_raw_inode(ri);
	return ret;
}
Exemplo n.º 27
0
int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
{
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_node_frag *frag = f->fraglist;
	__u32 offset = pg->index << PAGE_CACHE_SHIFT;
	__u32 end = offset + PAGE_CACHE_SIZE;
	unsigned char *pg_buf;
	int ret;

	D1(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%x\n", inode->i_ino, offset));

	if (!PageLocked(pg))
                PAGE_BUG(pg);

	while(frag && frag->ofs + frag->size  <= offset) {
		//		D1(printk(KERN_DEBUG "skipping frag %d-%d; before the region we care about\n", frag->ofs, frag->ofs + frag->size));
		frag = frag->next;
	}

	pg_buf = kmap(pg);

	/* XXX FIXME: Where a single physical node actually shows up in two
	   frags, we read it twice. Don't do that. */
	/* Now we're pointing at the first frag which overlaps our page */
	while(offset < end) {
		D2(printk(KERN_DEBUG "jffs2_readpage: offset %d, end %d\n", offset, end));
		if (!frag || frag->ofs > offset) {
			__u32 holesize = end - offset;
			if (frag) {
				D1(printk(KERN_NOTICE "Eep. Hole in ino %ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", inode->i_ino, frag->ofs, offset));
				holesize = min(holesize, frag->ofs - offset);
				D1(jffs2_print_frag_list(f));
			}
			D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize));
			memset(pg_buf, 0, holesize);
			pg_buf += holesize;
			offset += holesize;
			continue;
		} else if (frag->ofs < offset && (offset & (PAGE_CACHE_SIZE-1)) != 0) {
			D1(printk(KERN_NOTICE "Eep. Overlap in ino #%ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n",
				  inode->i_ino, frag->ofs, offset));
			D1(jffs2_print_frag_list(f));
			memset(pg_buf, 0, end - offset);
			ClearPageUptodate(pg);
			SetPageError(pg);
			kunmap(pg);
			return -EIO;
		} else if (!frag->node) {
			__u32 holeend = min(end, frag->ofs + frag->size);
			D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size));
			memset(pg_buf, 0, holeend - offset);
			pg_buf += holeend - offset;
			offset = holeend;
			frag = frag->next;
			continue;
		} else {
			__u32 readlen;
			__u32 fragofs; /* offset within the frag to start reading */

			fragofs = offset - frag->ofs;
			readlen = min(frag->size - fragofs, end - offset);
			D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%x\n", frag->ofs+fragofs, 
				  fragofs+frag->ofs+readlen, frag->node->raw->flash_offset & ~3));
			ret = jffs2_read_dnode(c, frag->node, pg_buf, fragofs + frag->ofs - frag->node->ofs, readlen);
			D2(printk(KERN_DEBUG "node read done\n"));
			if (ret) {
				D1(printk(KERN_DEBUG"jffs2_readpage error %d\n",ret));
				memset(pg_buf, 0, readlen);
				ClearPageUptodate(pg);
				SetPageError(pg);
				kunmap(pg);
				return ret;
			}
		
			pg_buf += readlen;
			offset += readlen;
			frag = frag->next;
			D2(printk(KERN_DEBUG "node read was OK. Looping\n"));
		}
	}
	D2(printk(KERN_DEBUG "readpage finishing\n"));
	SetPageUptodate(pg);
	ClearPageError(pg);

	flush_dcache_page(pg);

	kunmap(pg);
	D1(printk(KERN_DEBUG "readpage finished\n"));
	return 0;
}
Exemplo n.º 28
0
static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char *target)
{
	struct jffs2_inode_info *f, *dir_f;
	struct jffs2_sb_info *c;
	struct inode *inode;
	struct jffs2_raw_inode *ri;
	struct jffs2_raw_dirent *rd;
	struct jffs2_full_dnode *fn;
	struct jffs2_full_dirent *fd;
	int namelen;
	uint32_t alloclen;
	int ret, targetlen = strlen(target);

	/* FIXME: If you care. We'd need to use frags for the target
	   if it grows much more than this */
	if (targetlen > 254)
		return -EINVAL;

	ri = jffs2_alloc_raw_inode();

	if (!ri)
		return -ENOMEM;

	c = JFFS2_SB_INFO(dir_i->i_sb);

	/* Try to reserve enough space for both node and dirent.
	 * Just the node will do for now, though
	 */
	namelen = dentry->d_name.len;
	ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &alloclen,
				  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);

	if (ret) {
		jffs2_free_raw_inode(ri);
		return ret;
	}

	inode = jffs2_new_inode(dir_i, S_IFLNK | S_IRWXUGO, ri);

	if (IS_ERR(inode)) {
		jffs2_free_raw_inode(ri);
		jffs2_complete_reservation(c);
		return PTR_ERR(inode);
	}

	inode->i_op = &jffs2_symlink_inode_operations;

	f = JFFS2_INODE_INFO(inode);

	inode->i_size = targetlen;
	ri->isize = ri->dsize = ri->csize = cpu_to_je32(inode->i_size);
	ri->totlen = cpu_to_je32(sizeof(*ri) + inode->i_size);
	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));

	ri->compr = JFFS2_COMPR_NONE;
	ri->data_crc = cpu_to_je32(crc32(0, target, targetlen));
	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));

	fn = jffs2_write_dnode(c, f, ri, target, targetlen, ALLOC_NORMAL);

	jffs2_free_raw_inode(ri);

	if (IS_ERR(fn)) {
		/* Eeek. Wave bye bye */
		mutex_unlock(&f->sem);
		jffs2_complete_reservation(c);
		jffs2_clear_inode(inode);
		return PTR_ERR(fn);
	}

	/* We use f->target field to store the target path. */
	f->target = kmalloc(targetlen + 1, GFP_KERNEL);
	if (!f->target) {
		printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1);
		mutex_unlock(&f->sem);
		jffs2_complete_reservation(c);
		jffs2_clear_inode(inode);
		return -ENOMEM;
	}

	memcpy(f->target, target, targetlen + 1);
	D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->target));

	/* No data here. Only a metadata node, which will be
	   obsoleted by the first data write
	*/
	f->metadata = fn;
	mutex_unlock(&f->sem);

	jffs2_complete_reservation(c);

	ret = jffs2_init_security(inode, dir_i);
	if (ret) {
		jffs2_clear_inode(inode);
		return ret;
	}
	ret = jffs2_init_acl_post(inode);
	if (ret) {
		jffs2_clear_inode(inode);
		return ret;
	}

	ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
				  ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
	if (ret) {
		/* Eep. */
		jffs2_clear_inode(inode);
		return ret;
	}

	rd = jffs2_alloc_raw_dirent();
	if (!rd) {
		/* Argh. Now we treat it like a normal delete */
		jffs2_complete_reservation(c);
		jffs2_clear_inode(inode);
		return -ENOMEM;
	}

	dir_f = JFFS2_INODE_INFO(dir_i);
	mutex_lock(&dir_f->sem);

	rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
	rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
	rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
	rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));

	rd->pino = cpu_to_je32(dir_i->i_ino);
	rd->version = cpu_to_je32(++dir_f->highest_version);
	rd->ino = cpu_to_je32(inode->i_ino);
	rd->mctime = cpu_to_je32(get_seconds());
	rd->nsize = namelen;
	rd->type = DT_LNK;
	rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
	rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));

	fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL);

	if (IS_ERR(fd)) {
		/* dirent failed to write. Delete the inode normally
		   as if it were the final unlink() */
		jffs2_complete_reservation(c);
		jffs2_free_raw_dirent(rd);
		mutex_unlock(&dir_f->sem);
		jffs2_clear_inode(inode);
		return PTR_ERR(fd);
	}

	dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));

	jffs2_free_raw_dirent(rd);

	/* Link the fd into the inode's list, obsoleting an old
	   one if necessary. */
	jffs2_add_fd_to_list(c, fd, &dir_f->dents);

	mutex_unlock(&dir_f->sem);
	jffs2_complete_reservation(c);

	d_instantiate(dentry, inode);
	return 0;
}
Exemplo n.º 29
0
int jffs2_commit_write (struct file *filp, struct page *pg, unsigned start, unsigned end)
{
	/* Actually commit the write from the page cache page we're looking at.
	 * For now, we write the full page out each time. It sucks, but it's simple
	 */
	struct inode *inode = pg->mapping->host;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	__u32 newsize = max_t(__u32, filp->f_dentry->d_inode->i_size, (pg->index << PAGE_CACHE_SHIFT) + end);
	__u32 file_ofs = (pg->index << PAGE_CACHE_SHIFT);
	__u32 writelen = min((__u32)PAGE_CACHE_SIZE, newsize - file_ofs);
	struct jffs2_raw_inode *ri;
	int ret = 0;
	ssize_t writtenlen = 0;

	D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));

	if (!start && end == PAGE_CACHE_SIZE) {
		/* We need to avoid deadlock with page_cache_read() in
		   jffs2_garbage_collect_pass(). So we have to mark the
		   page up to date, to prevent page_cache_read() from 
		   trying to re-lock it. */
		SetPageUptodate(pg);
	}

	ri = jffs2_alloc_raw_inode();
	if (!ri)
		return -ENOMEM;

	while(writelen) {
		struct jffs2_full_dnode *fn;
		unsigned char *comprbuf = NULL;
		unsigned char comprtype = JFFS2_COMPR_NONE;
		__u32 phys_ofs, alloclen;
		__u32 datalen, cdatalen;

		D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, file_ofs));

		ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen, ALLOC_NORMAL);
		if (ret) {
			SetPageError(pg);
			D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret));
			break;
		}
		down(&f->sem);
		datalen = writelen;
		cdatalen = min(alloclen - sizeof(*ri), writelen);

		comprbuf = kmalloc(cdatalen, GFP_KERNEL);
		if (comprbuf) {
			comprtype = jffs2_compress(page_address(pg)+ (file_ofs & (PAGE_CACHE_SIZE-1)), comprbuf, &datalen, &cdatalen);
		}
		if (comprtype == JFFS2_COMPR_NONE) {
			/* Either compression failed, or the allocation of comprbuf failed */
			if (comprbuf)
				kfree(comprbuf);
			comprbuf = page_address(pg) + (file_ofs & (PAGE_CACHE_SIZE -1));
			datalen = cdatalen;
		}
		/* Now comprbuf points to the data to be written, be it compressed or not.
		   comprtype holds the compression type, and comprtype == JFFS2_COMPR_NONE means
		   that the comprbuf doesn't need to be kfree()d. 
		*/

		ri->magic = JFFS2_MAGIC_BITMASK;
		ri->nodetype = JFFS2_NODETYPE_INODE;
		ri->totlen = sizeof(*ri) + cdatalen;
		ri->hdr_crc = crc32(0, ri, sizeof(struct jffs2_unknown_node)-4);

		ri->ino = inode->i_ino;
		ri->version = ++f->highest_version;
		ri->mode = inode->i_mode;
		ri->uid = inode->i_uid;
		ri->gid = inode->i_gid;
		ri->isize = max((__u32)inode->i_size, file_ofs + datalen);
		ri->atime = ri->ctime = ri->mtime = CURRENT_TIME;
		ri->offset = file_ofs;
		ri->csize = cdatalen;
		ri->dsize = datalen;
		ri->compr = comprtype;
		ri->node_crc = crc32(0, ri, sizeof(*ri)-8);
		ri->data_crc = crc32(0, comprbuf, cdatalen);

		fn = jffs2_write_dnode(inode, ri, comprbuf, cdatalen, phys_ofs, NULL);

		jffs2_complete_reservation(c);

		if (comprtype != JFFS2_COMPR_NONE)
			kfree(comprbuf);

		if (IS_ERR(fn)) {
			ret = PTR_ERR(fn);
			up(&f->sem);
			SetPageError(pg);
			break;
		}
		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
		if (f->metadata) {
			jffs2_mark_node_obsolete(c, f->metadata->raw);
			jffs2_free_full_dnode(f->metadata);
			f->metadata = NULL;
		}
		up(&f->sem);
		if (ret) {
			/* Eep */
			D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret));
			jffs2_mark_node_obsolete(c, fn->raw);
			jffs2_free_full_dnode(fn);
			SetPageError(pg);
			break;
		}
		inode->i_size = ri->isize;
		inode->i_blocks = (inode->i_size + 511) >> 9;
		inode->i_ctime = inode->i_mtime = ri->ctime;
		if (!datalen) {
			printk(KERN_WARNING "Eep. We didn't actually write any bloody data\n");
			ret = -EIO;
			SetPageError(pg);
			break;
		}
		D1(printk(KERN_DEBUG "increasing writtenlen by %d\n", datalen));
		writtenlen += datalen;
		file_ofs += datalen;
		writelen -= datalen;
	}

	jffs2_free_raw_inode(ri);

	if (writtenlen < end) {
		/* generic_file_write has written more to the page cache than we've
		   actually written to the medium. Mark the page !Uptodate so that 
		   it gets reread */
		D1(printk(KERN_DEBUG "jffs2_commit_write(): Not all bytes written. Marking page !uptodate\n"));
		SetPageError(pg);
		ClearPageUptodate(pg);
	}
	if (writtenlen <= start) {
		/* We didn't even get to the start of the affected part */
		ret = ret?ret:-ENOSPC;
		D1(printk(KERN_DEBUG "jffs2_commit_write(): Only %x bytes written to page. start (%x) not reached, returning %d\n", writtenlen, start, ret));
	}
	writtenlen = min(end-start, writtenlen-start);

	D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d. nrpages is %ld\n",writtenlen?writtenlen:ret, inode->i_mapping->nrpages));
	return writtenlen?writtenlen:ret;
}
Exemplo n.º 30
0
/*
 * Forward pkts to any devices or interfaces which have registered
 * an interest in them (i.e. multicast groups).
 */
static int
vsw_forward_grp(vsw_t *vswp, mblk_t *mp, int caller, vsw_port_t *arg)
{
	struct ether_header	*ehp = (struct ether_header *)mp->b_rptr;
	mfdb_ent_t		*entp = NULL;
	mfdb_ent_t		*tpp = NULL;
	vsw_port_t 		*port;
	uint64_t		key = 0;
	mblk_t			*nmp = NULL;
	mblk_t			*ret_m = NULL;
	boolean_t		check_if = B_TRUE;

	/*
	 * Convert address to hash table key
	 */
	KEY_HASH(key, &ehp->ether_dhost);

	D1(vswp, "%s: key 0x%llx", __func__, key);

	/*
	 * If pkt came from either a vnet or down the stack (if we are
	 * plumbed) and we are in layer 2 mode, then we send the pkt out
	 * over the physical adapter, and then check to see if any other
	 * vnets are interested in it.
	 */
	if ((vswp->smode & VSW_LAYER2) &&
	    ((caller == VSW_VNETPORT) || (caller == VSW_LOCALDEV))) {
		nmp = vsw_dupmsgchain(mp);
		if (nmp) {
			if ((ret_m = vsw_tx_msg(vswp, nmp, caller, arg))
			    != NULL) {
				DERR(vswp, "%s: dropping pkt(s) consisting of "
				    "%ld bytes of data for physical device",
				    __func__, MBLKL(ret_m));
				freemsgchain(ret_m);
			}
		}
	}

	READ_ENTER(&vswp->mfdbrw);
	if (mod_hash_find(vswp->mfdb, (mod_hash_key_t)key,
	    (mod_hash_val_t *)&entp) != 0) {
		D3(vswp, "%s: no table entry found for addr 0x%llx",
		    __func__, key);
	} else {
		/*
		 * Send to list of devices associated with this address...
		 */
		for (tpp = entp; tpp != NULL; tpp = tpp->nextp) {

			/* dont send to ourselves */
			if ((caller == VSW_VNETPORT) &&
			    (tpp->d_addr == (void *)arg)) {
				port = (vsw_port_t *)tpp->d_addr;
				D3(vswp, "%s: not sending to ourselves"
				    " : port %d", __func__, port->p_instance);
				continue;

			} else if ((caller == VSW_LOCALDEV) &&
			    (tpp->d_type == VSW_LOCALDEV)) {
				D2(vswp, "%s: not sending back up stack",
				    __func__);
				continue;
			}

			if (tpp->d_type == VSW_VNETPORT) {
				port = (vsw_port_t *)tpp->d_addr;
				D3(vswp, "%s: sending to port %ld for addr "
				    "0x%llx", __func__, port->p_instance, key);

				nmp = vsw_dupmsgchain(mp);
				if (nmp) {
					/*
					 * The vswp->mfdbrw is protecting the
					 * portp from getting destroyed here.
					 * So, no ref_cnt is incremented here.
					 */
					(void) vsw_portsend(port, nmp);
				}
			} else {
				vsw_mac_rx(vswp, NULL,
				    mp, VSW_MACRX_COPYMSG);
				D2(vswp, "%s: sending up stack"
				    " for addr 0x%llx", __func__, key);
				check_if = B_FALSE;
			}
		}
	}

	RW_EXIT(&vswp->mfdbrw);

	/*
	 * If the pkt came from either a vnet or from physical device,
	 * and if we havent already sent the pkt up the stack then we
	 * check now if we can/should (i.e. the interface is plumbed
	 * and in promisc mode).
	 */
	if ((check_if) &&
	    ((caller == VSW_VNETPORT) || (caller == VSW_PHYSDEV))) {
		vsw_mac_rx(vswp, NULL, mp,
		    VSW_MACRX_PROMISC | VSW_MACRX_COPYMSG);
	}

	freemsgchain(mp);

	D1(vswp, "%s: exit", __func__);

	return (0);
}