static void ipt_ulog_packet(unsigned int hooknum,
			    const struct sk_buff *skb,
			    const struct net_device *in,
			    const struct net_device *out,
			    const struct ipt_ulog_info *loginfo,
			    const char *prefix)
{
	ulog_buff_t *ub;
	ulog_packet_msg_t *pm;
	size_t size, copy_len;
	struct nlmsghdr *nlh;
	struct timeval tv;

	unsigned int groupnum = ffs(loginfo->nl_group) - 1;

	
	if (loginfo->copy_range == 0 || loginfo->copy_range > skb->len)
		copy_len = skb->len;
	else
		copy_len = loginfo->copy_range;

	size = NLMSG_SPACE(sizeof(*pm) + copy_len);

	ub = &ulog_buffers[groupnum];

	spin_lock_bh(&ulog_lock);

	if (!ub->skb) {
		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	} else if (ub->qlen >= loginfo->qthreshold ||
		   size > skb_tailroom(ub->skb)) {

		ulog_send(groupnum);

		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	}

	pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold);

	
	nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
			sizeof(*pm)+copy_len);
	ub->qlen++;

	pm = NLMSG_DATA(nlh);

	
	if (skb->tstamp.tv64 == 0)
		__net_timestamp((struct sk_buff *)skb);

	
	pm->data_len = copy_len;
	tv = ktime_to_timeval(skb->tstamp);
	put_unaligned(tv.tv_sec, &pm->timestamp_sec);
	put_unaligned(tv.tv_usec, &pm->timestamp_usec);
	put_unaligned(skb->mark, &pm->mark);
	pm->hook = hooknum;
	if (prefix != NULL)
		strncpy(pm->prefix, prefix, sizeof(pm->prefix));
	else if (loginfo->prefix[0] != '\0')
		strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
	else
		*(pm->prefix) = '\0';

	if (in && in->hard_header_len > 0 &&
	    skb->mac_header != skb->network_header &&
	    in->hard_header_len <= ULOG_MAC_LEN) {
		memcpy(pm->mac, skb_mac_header(skb), in->hard_header_len);
		pm->mac_len = in->hard_header_len;
	} else
		pm->mac_len = 0;

	if (in)
		strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
	else
		pm->indev_name[0] = '\0';

	if (out)
		strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
	else
		pm->outdev_name[0] = '\0';

	
	if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
		BUG();

	
	if (ub->qlen > 1)
		ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;

	ub->lastnlh = nlh;

	
	if (!timer_pending(&ub->timer)) {
		ub->timer.expires = jiffies + flushtimeout * HZ / 100;
		add_timer(&ub->timer);
	}

	
	if (ub->qlen >= loginfo->qthreshold) {
		if (loginfo->qthreshold > 1)
			nlh->nlmsg_type = NLMSG_DONE;
		ulog_send(groupnum);
	}

	spin_unlock_bh(&ulog_lock);

	return;

nlmsg_failure:
	pr_debug("error during NLMSG_PUT\n");
alloc_failure:
	pr_debug("Error building netlink message\n");
	spin_unlock_bh(&ulog_lock);
}
Exemplo n.º 2
0
void
nv50_constbufs_validate(struct nv50_context *nv50)
{
   struct nouveau_pushbuf *push = nv50->base.pushbuf;
   unsigned s;

   for (s = 0; s < 3; ++s) {
      unsigned p;

      if (s == PIPE_SHADER_FRAGMENT)
         p = NV50_3D_SET_PROGRAM_CB_PROGRAM_FRAGMENT;
      else
      if (s == PIPE_SHADER_GEOMETRY)
         p = NV50_3D_SET_PROGRAM_CB_PROGRAM_GEOMETRY;
      else
         p = NV50_3D_SET_PROGRAM_CB_PROGRAM_VERTEX;

      while (nv50->constbuf_dirty[s]) {
         const unsigned i = (unsigned)ffs(nv50->constbuf_dirty[s]) - 1;

         assert(i < NV50_MAX_PIPE_CONSTBUFS);
         nv50->constbuf_dirty[s] &= ~(1 << i);

         if (nv50->constbuf[s][i].user) {
            const unsigned b = NV50_CB_PVP + s;
            unsigned start = 0;
            unsigned words = nv50->constbuf[s][0].size / 4;
            if (i) {
               NOUVEAU_ERR("user constbufs only supported in slot 0\n");
               continue;
            }
            if (!nv50->state.uniform_buffer_bound[s]) {
               nv50->state.uniform_buffer_bound[s] = TRUE;
               BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
               PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
            }
            while (words) {
               unsigned nr;

               if (!PUSH_SPACE(push, 16))
                  break;
               nr = PUSH_AVAIL(push);
               assert(nr >= 16);
               nr = MIN2(MIN2(nr - 3, words), NV04_PFIFO_MAX_PACKET_LEN);

               BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
               PUSH_DATA (push, (start << 8) | b);
               BEGIN_NI04(push, NV50_3D(CB_DATA(0)), nr);
               PUSH_DATAp(push, &nv50->constbuf[s][0].u.data[start * 4], nr);

               start += nr;
               words -= nr;
            }
         } else {
            struct nv04_resource *res =
               nv04_resource(nv50->constbuf[s][i].u.buf);
            if (res) {
               /* TODO: allocate persistent bindings */
               const unsigned b = s * 16 + i;

               assert(nouveau_resource_mapped_by_gpu(&res->base));

               BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
               PUSH_DATAh(push, res->address + nv50->constbuf[s][i].offset);
               PUSH_DATA (push, res->address + nv50->constbuf[s][i].offset);
               PUSH_DATA (push, (b << 16) |
                          (nv50->constbuf[s][i].size & 0xffff));
               BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
               PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);

               BCTX_REFN(nv50->bufctx_3d, CB(s, i), res, RD);
            } else {
               BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
               PUSH_DATA (push, (i << 8) | p | 0);
            }
            if (i == 0)
               nv50->state.uniform_buffer_bound[s] = FALSE;
         }
      }
   }
}
Exemplo n.º 3
0
/*
 * xf86SetWeight sets scrp->weight, scrp->mask, scrp->offset, and for depths
 * greater than MAX_PSEUDO_DEPTH also scrp->rgbBits.
 */
Bool
xf86SetWeight(ScrnInfoPtr scrp, rgb weight, rgb mask)
{
    MessageType weightFrom = X_DEFAULT;

    scrp->weight.red = 0;
    scrp->weight.green = 0;
    scrp->weight.blue = 0;

    if (xf86Weight.red > 0 && xf86Weight.green > 0 && xf86Weight.blue > 0) {
	scrp->weight = xf86Weight;
	weightFrom = X_CMDLINE;
    } else if (scrp->display->weight.red > 0 && scrp->display->weight.green > 0
	       && scrp->display->weight.blue > 0) {
	scrp->weight = scrp->display->weight;
	weightFrom = X_CONFIG;
    } else if (weight.red > 0 && weight.green > 0 && weight.blue > 0) {
	scrp->weight = weight;
    } else {
	switch (scrp->depth) {
	case 1:
	case 4:
	case 8:
	    scrp->weight.red = scrp->weight.green =
		scrp->weight.blue = scrp->rgbBits;
	    break;
	case 15:
	    scrp->weight.red = scrp->weight.green = scrp->weight.blue = 5;
	    break;
	case 16:
	    scrp->weight.red = scrp->weight.blue = 5;
	    scrp->weight.green = 6;
	    break;
	case 18:
	    scrp->weight.red = scrp->weight.green = scrp->weight.blue = 6;
	    break;
	case 24:
	    scrp->weight.red = scrp->weight.green = scrp->weight.blue = 8;
	    break;
	case 30:
	    scrp->weight.red = scrp->weight.green = scrp->weight.blue = 10;
	    break;
	}
    }

    if (scrp->weight.red)
	xf86DrvMsg(scrp->scrnIndex, weightFrom, "RGB weight %d%d%d\n",
		   (int)scrp->weight.red, (int)scrp->weight.green,
		   (int)scrp->weight.blue);

    if (scrp->depth > MAX_PSEUDO_DEPTH &&
	(scrp->depth != scrp->weight.red + scrp->weight.green +
			scrp->weight.blue)) {
	xf86DrvMsg(scrp->scrnIndex, X_ERROR,
		   "Weight given (%d%d%d) is inconsistent with the "
		   "depth (%d)\n",
		   (int)scrp->weight.red, (int)scrp->weight.green,
		   (int)scrp->weight.blue, scrp->depth);
	return FALSE;
    }
    if (scrp->depth > MAX_PSEUDO_DEPTH && scrp->weight.red) {
	/*
	 * XXX Does this even mean anything for TrueColor visuals?
	 * If not, we shouldn't even be setting it here.  However, this
	 * matches the behaviour of 3.x versions of XFree86.
	 */
	scrp->rgbBits = scrp->weight.red;
	if (scrp->weight.green > scrp->rgbBits)
	    scrp->rgbBits = scrp->weight.green;
	if (scrp->weight.blue > scrp->rgbBits)
	    scrp->rgbBits = scrp->weight.blue;
    }

    /* Set the mask and offsets */
    if (mask.red == 0 || mask.green == 0 || mask.blue == 0) {
	/* Default to a setting common to PC hardware */
	scrp->offset.red = scrp->weight.green + scrp->weight.blue;
	scrp->offset.green = scrp->weight.blue;
	scrp->offset.blue = 0;
	scrp->mask.red = ((1 << scrp->weight.red) - 1) << scrp->offset.red;
	scrp->mask.green = ((1 << scrp->weight.green) - 1)
				<< scrp->offset.green;
	scrp->mask.blue = (1 << scrp->weight.blue) - 1;
    } else {
	/* Initialise to the values passed */
	scrp->mask.red = mask.red;
	scrp->mask.green = mask.green;
	scrp->mask.blue = mask.blue;
	scrp->offset.red = ffs(mask.red);
	scrp->offset.green = ffs(mask.green);
	scrp->offset.blue = ffs(mask.blue);
    }
    return TRUE;
}
Exemplo n.º 4
0
op_plan *op_plan_core(char const *name, op_set set, int part_size,
                      int nargs, op_arg *args, int ninds, int *inds, int staging )
{
  //set exec length
  int exec_length = set->size;
  for(int i = 0; i< nargs; i++)
  {
    if(args[i].opt && args[i].idx != -1 && args[i].acc != OP_READ )
    {
      exec_length += set->exec_size;
      break;
    }
  }

  /* first look for an existing execution plan */

  int ip = 0, match = 0;

  while ( match == 0 && ip < OP_plan_index )
  {
    if ( ( strcmp ( name, OP_plans[ip].name ) == 0 )
        && ( set == OP_plans[ip].set )
        && ( nargs == OP_plans[ip].nargs )
        && ( ninds == OP_plans[ip].ninds )
        && ( part_size == OP_plans[ip].part_size ) )
    {
      match = 1;
      for ( int m = 0; m < nargs; m++ )
      {
        if (args[m].dat != NULL && OP_plans[ip].dats[m] != NULL)
          match = match
          && ( args[m].dat->size == OP_plans[ip].dats[m]->size )
          && ( args[m].dat->dim == OP_plans[ip].dats[m]->dim )
          && ( args[m].map == OP_plans[ip].maps[m] )
          && ( args[m].idx == OP_plans[ip].idxs[m] )
          && ( args[m].acc == OP_plans[ip].accs[m] );
        else
          match = match
          && ( args[m].dat == OP_plans[ip].dats[m] )
          && ( args[m].map == OP_plans[ip].maps[m] )
          && ( args[m].idx == OP_plans[ip].idxs[m] )
          && ( args[m].acc == OP_plans[ip].accs[m] );
      }
    }
    ip++;
  }

  if ( match )
  {
    ip--;
    if ( OP_diags > 3 )
      printf ( " old execution plan #%d\n", ip );
    OP_plans[ip].count++;
    return &( OP_plans[ip] );
  }
  else
  {
    if ( OP_diags > 1 )
      printf ( " new execution plan #%d for kernel %s\n", ip, name );
  }
  double wall_t1, wall_t2, cpu_t1, cpu_t2;
  op_timers_core(&cpu_t1, &wall_t1);
  /* work out worst case shared memory requirement per element */

  int maxbytes = 0;
  for ( int m = 0; m < nargs; m++ )
  {
    if ( args[m].opt && inds[m] >= 0 ) {
      if ((staging == OP_STAGE_INC && args[m].acc == OP_INC) ||
          (staging == OP_STAGE_ALL || staging == OP_STAGE_PERMUTE))
          maxbytes += args[m].dat->size;
    }
  }

  /* set blocksize and number of blocks; adaptive size based on 48kB of shared memory */

  int bsize = part_size;        // blocksize
  if ( bsize == 0 && maxbytes > 0 )
    bsize = MAX(( 48 * 1024 / ( 64 * maxbytes ) ) * 64, 256);
  else if (bsize == 0 && maxbytes == 0)
    bsize = 256;

  int nblocks = 0;

  int indirect_reduce = 0;
  for ( int m = 0; m < nargs; m++ ) {
    indirect_reduce |= (args[m].acc != OP_READ && args[m].argtype == OP_ARG_GBL);
  }
  indirect_reduce &= (ninds>0);

  /* Work out indirection arrays for OP_INCs */
  int ninds_staged = 0; //number of distinct (unique dat) indirect incs
  int *inds_staged = (int *)malloc(nargs*sizeof(int));
  int *inds_to_inds_staged = (int *)malloc(ninds*sizeof(int));

  for (int i = 0; i < nargs; i++) inds_staged[i] = -1;
  for (int i = 0; i < ninds; i++) inds_to_inds_staged[i] = -1;
  for (int i = 0; i < nargs; i++) {
    if (inds[i]>=0 && ((staging == OP_STAGE_INC && args[i].acc == OP_INC) ||
        (staging == OP_STAGE_ALL || staging == OP_STAGE_PERMUTE))) {
      if (inds_to_inds_staged[inds[i]] == -1) {
        inds_to_inds_staged[inds[i]] = ninds_staged;
        inds_staged[i] = ninds_staged;
        ninds_staged++;
      } else {
        inds_staged[i] = inds_to_inds_staged[inds[i]];
      }
    }
  }

  int *invinds_staged = (int *)malloc(ninds_staged*sizeof(int));
  for (int i = 0; i < ninds_staged; i++) invinds_staged[i] = -1;
  for (int i = 0; i < nargs; i++)
    if (inds[i]>=0 && ((staging == OP_STAGE_INC && args[i].acc == OP_INC) ||
        (staging == OP_STAGE_ALL || staging == OP_STAGE_PERMUTE)) && invinds_staged[inds_staged[i]] == -1)
      invinds_staged[inds_staged[i]] = i;

  int prev_offset = 0;
  int next_offset = 0;

  while (next_offset < exec_length)
  {
    prev_offset = next_offset;
    if (prev_offset + bsize >= set->core_size && prev_offset < set->core_size) {
      next_offset = set->core_size;
    } else if (prev_offset + bsize >= set->size && prev_offset < set->size && indirect_reduce) {
      next_offset = set->size;
    } else if (prev_offset + bsize >= exec_length && prev_offset < exec_length) {
      next_offset = exec_length;
    } else {
      next_offset = prev_offset + bsize;
    }
    nblocks++;
  }

  /* enlarge OP_plans array if needed */

  if ( ip == OP_plan_max )
  {
    //printf("allocating more memory for OP_plans %d\n", OP_plan_max);
    OP_plan_max += 10;
    OP_plans = ( op_plan * ) realloc ( OP_plans, OP_plan_max * sizeof ( op_plan ) );
    if ( OP_plans == NULL )
    {
      printf ( " op_plan error -- error reallocating memory for OP_plans\n" );
      exit ( -1 );
    }
  }

  /* allocate memory for new execution plan and store input arguments */

  OP_plans[ip].dats = ( op_dat * ) malloc ( nargs * sizeof ( op_dat ) );
  OP_plans[ip].idxs = ( int * ) malloc ( nargs * sizeof ( int ) );
  OP_plans[ip].optflags = ( int * ) malloc ( nargs * sizeof ( int ) );
  OP_plans[ip].maps = ( op_map * ) malloc ( nargs * sizeof ( op_map ) );
  OP_plans[ip].accs = ( op_access * ) malloc ( nargs * sizeof ( op_access ) );
  OP_plans[ip].inds_staged = ( op_access * ) malloc ( ninds_staged * sizeof ( op_access ) );

  OP_plans[ip].nthrcol = ( int * ) malloc ( nblocks * sizeof ( int ) );
  OP_plans[ip].thrcol = ( int * ) malloc ( exec_length * sizeof ( int ) );
  OP_plans[ip].col_reord = ( int * ) malloc ( exec_length * sizeof ( int ) );
  OP_plans[ip].offset = ( int * ) malloc ( nblocks * sizeof ( int ) );
  OP_plans[ip].ind_maps = ( int ** ) malloc ( ninds_staged * sizeof ( int * ) );
  OP_plans[ip].ind_offs = ( int * ) malloc ( nblocks * ninds_staged * sizeof ( int ) );
  OP_plans[ip].ind_sizes = ( int * ) malloc ( nblocks * ninds_staged * sizeof ( int ) );
  OP_plans[ip].nindirect = ( int * ) calloc ( ninds, sizeof ( int ) );
  OP_plans[ip].loc_maps = ( short ** ) malloc ( nargs * sizeof ( short * ) );
  OP_plans[ip].nelems = ( int * ) malloc ( nblocks * sizeof ( int ) );
  OP_plans[ip].ncolblk = ( int * ) calloc ( exec_length, sizeof ( int ) );  /* max possibly needed */
  OP_plans[ip].blkmap = ( int * ) calloc ( nblocks, sizeof ( int ) );

  int *offsets = (int *)malloc((ninds_staged+1)*sizeof(int));
  offsets[0] = 0;
  for ( int m = 0; m < ninds_staged; m++ ) {
    int count = 0;
    for ( int m2 = 0; m2 < nargs; m2++ )
      if ( inds_staged[m2] == m )
        count++;
      offsets[m+1] = offsets[m] + count;
  }
  OP_plans[ip].ind_map = ( int * ) malloc ( offsets[ninds_staged] * exec_length * sizeof ( int ) );
  for ( int m = 0; m < ninds_staged; m++ ) {
    OP_plans[ip].ind_maps[m] = &OP_plans[ip].ind_map[exec_length*offsets[m]];
  }
  free(offsets);

  int counter = 0;
  for ( int m = 0; m < nargs; m++ ) {
    if ( inds_staged[m] >= 0 )
      counter++;
    else
      OP_plans[ip].loc_maps[m] = NULL;

    OP_plans[ip].dats[m] = args[m].dat;
    OP_plans[ip].idxs[m] = args[m].idx;
    OP_plans[ip].optflags[m] = args[m].opt;
    OP_plans[ip].maps[m] = args[m].map;
    OP_plans[ip].accs[m] = args[m].acc;
  }

  OP_plans[ip].loc_map = ( short * ) malloc ( counter * exec_length * sizeof ( short ) );
  counter = 0;
  for ( int m = 0; m < nargs; m++ ) {
    if ( inds_staged[m] >= 0 ) {
      OP_plans[ip].loc_maps[m] = &OP_plans[ip].loc_map[exec_length*(counter)];
      counter++;
    }
  }


  OP_plans[ip].name = name;
  OP_plans[ip].set = set;
  OP_plans[ip].nargs = nargs;
  OP_plans[ip].ninds = ninds;
  OP_plans[ip].ninds_staged = ninds_staged;
  OP_plans[ip].part_size = part_size;
  OP_plans[ip].nblocks = nblocks;
  OP_plans[ip].ncolors_core = 0;
  OP_plans[ip].ncolors_owned = 0;
  OP_plans[ip].count = 1;
  OP_plans[ip].inds_staged = inds_staged;

  OP_plan_index++;

  /* define aliases */

  op_dat * dats = OP_plans[ip].dats;
  int * idxs = OP_plans[ip].idxs;
  op_map * maps = OP_plans[ip].maps;
  op_access * accs = OP_plans[ip].accs;

  int * offset = OP_plans[ip].offset;
  int * nelems = OP_plans[ip].nelems;
  int ** ind_maps = OP_plans[ip].ind_maps;
  int * ind_offs = OP_plans[ip].ind_offs;
  int * ind_sizes = OP_plans[ip].ind_sizes;
  int * nindirect = OP_plans[ip].nindirect;

  /* allocate working arrays */
  uint **work;
  work = (uint **)malloc(ninds * sizeof(uint *));

  for ( int m = 0; m < ninds; m++ )
  {
    int m2 = 0;
    while ( inds[m2] != m )
      m2++;
    if (args[m2].opt == 0) {
      work[m] = NULL;
      continue;
    }

    int to_size = (maps[m2]->to)->exec_size + (maps[m2]->to)->nonexec_size + (maps[m2]->to)->size;
    work[m] = ( uint * )malloc( to_size * sizeof (uint));
  }

  int *work2;
  work2 = ( int * ) malloc ( nargs * bsize * sizeof ( int ) );  /* max possibly needed */

  /* process set one block at a time */

  float total_colors = 0;

  prev_offset = 0;
  next_offset = 0;
  for ( int b = 0; b < nblocks; b++ )
  {
    prev_offset = next_offset;
    if (prev_offset + bsize >= set->core_size && prev_offset < set->core_size) {
      next_offset = set->core_size;
    } else if (prev_offset + bsize >= set->size && prev_offset < set->size && indirect_reduce) {
      next_offset = set->size;
    } else if (prev_offset + bsize >= exec_length && prev_offset < exec_length) {
      next_offset = exec_length;
    } else {
      next_offset = prev_offset + bsize;
    }
    int bs = next_offset-prev_offset;

    offset[b] = prev_offset;  /* offset for block */
    nelems[b] = bs;   /* size of block */

    /* loop over indirection sets */
    for ( int m = 0; m < ninds; m++ )
    {
      int m2 = 0;
      while ( inds[m2] != m )
        m2++;
      int m3 = inds_staged[m2];
      if (m3 < 0) continue;
      if (args[m2].opt == 0) {
        if (b==0) {
          ind_offs[m3+b*ninds_staged] = 0;
          ind_sizes[m3+b*ninds_staged] = 0;
        } else {
          ind_offs[m3+b*ninds_staged] = ind_offs[m3+(b-1)*ninds_staged];
          ind_sizes[m3+b*ninds_staged] = 0;
        }
        continue;
      }
      /* build the list of elements indirectly referenced in this block */

      int ne = 0;/* number of elements */
      for ( int m2 = 0; m2 < nargs; m2++ )
      {
        if ( inds[m2] == m )
        {
          for ( int e = prev_offset; e < next_offset; e++ )
            work2[ne++] = maps[m2]->map[idxs[m2] + e * maps[m2]->dim];
        }
      }

      /* sort them, then eliminate duplicates */

      qsort(work2, ne, sizeof(int), comp);

      int nde = 0;
      int p = 0;
      while ( p < ne )
      {
        work2[nde] = work2[p];
        while ( p < ne && work2[p] == work2[nde] )
          p++;
        nde++;
      }
      ne = nde; /* number of distinct elements */

      /*
         if (OP_diags > 5) { printf(" indirection set %d: ",m); for (int e=0; e<ne; e++) printf("
         %d",work2[e]); printf(" \n"); } */


      /* store mapping and renumbered mappings in execution plan */

      for ( int e = 0; e < ne; e++ )
      {
        ind_maps[m3][nindirect[m]++] = work2[e];
        work[m][work2[e]] = e;  // inverse mapping
      }

      for ( int m2 = 0; m2 < nargs; m2++ )
      {
        if ( inds[m2] == m )
        {
          for ( int e = prev_offset; e < next_offset; e++ )
            OP_plans[ip].loc_maps[m2][e] = (short)(work[m][maps[m2]->map[idxs[m2] + e * maps[m2]->dim]]);
        }
      }

      if ( b == 0 )
      {
        ind_offs[m3 + b * ninds_staged] = 0;
        ind_sizes[m3 + b * ninds_staged] = nindirect[m];
      }
      else
      {
        ind_offs[m3 + b * ninds_staged] = ind_offs[m3 + ( b - 1 ) * ninds_staged]
          + ind_sizes[m3 + ( b - 1 ) * ninds_staged];
        ind_sizes[m3 + b * ninds_staged] = nindirect[m] - ind_offs[m3 + b * ninds_staged];
      }
    }

    /* now colour main set elements */

    for ( int e = prev_offset; e < next_offset; e++ )
      OP_plans[ip].thrcol[e] = -1;

    int repeat = 1;
    int ncolor = 0;
    int ncolors = 0;

    while ( repeat )
    {
      repeat = 0;

      for ( int m = 0; m < nargs; m++ )
      {
        if ( inds[m] >= 0 && args[m].opt )
          for ( int e = prev_offset; e < next_offset; e++ )
            work[inds[m]][maps[m]->map[idxs[m] + e * maps[m]->dim]] = 0; /* zero out color array */
      }

      for ( int e = prev_offset; e < next_offset; e++ )
      {
        if ( OP_plans[ip].thrcol[e] == -1 )
        {
          int mask = 0;
          for ( int m = 0; m < nargs; m++ )
            if ( inds[m] >= 0 && (accs[m] == OP_INC || accs[m] == OP_RW) && args[m].opt )
              mask |= work[inds[m]][maps[m]->map[idxs[m] + e * maps[m]->dim]]; /* set bits of mask */

          int color = ffs ( ~mask ) - 1;  /* find first bit not set */
          if ( color == -1 )
          {                     /* run out of colors on this pass */
            repeat = 1;
          }
          else
          {
            OP_plans[ip].thrcol[e] = ncolor + color;
            mask = 1 << color;
            ncolors = MAX ( ncolors, ncolor + color + 1 );

            for ( int m = 0; m < nargs; m++ )
              if ( inds[m] >= 0 && (accs[m] == OP_INC || accs[m] == OP_RW) && args[m].opt )
                work[inds[m]][maps[m]->map[idxs[m] + e * maps[m]->dim]] |= mask; /* set color bit */
          }
        }
      }

      ncolor += 32;             /* increment base level */
    }

    OP_plans[ip].nthrcol[b] = ncolors;  /* number of thread colors in this block */
    total_colors += ncolors;

    //if(ncolors>1) printf(" number of colors in this block = %d \n",ncolors);
  }

  /* create element permutation by color */
  if (staging == OP_STAGE_PERMUTE) {
    printf("Creating permuation for %s\n", name);
    op_keyvalue *kv = (op_keyvalue *)malloc(bsize * sizeof(op_keyvalue));
    for (int b = 0; b < nblocks; b++) {
      for (int e = 0; e < nelems[b]; e++) {
        kv[e].key = OP_plans[ip].thrcol[offset[b]+e];
        kv[e].value = e;
      }
      qsort ( kv, nelems[b], sizeof ( op_keyvalue ), comp2 );
      for (int e = 0; e < nelems[b]; e++) {
        OP_plans[ip].thrcol[offset[b]+e] = kv[e].key;
        OP_plans[ip].col_reord[offset[b]+e] = kv[e].value;
      }
    }
  }

  /* color the blocks, after initialising colors to 0 */

  int * blk_col;

  blk_col = ( int * ) malloc ( nblocks * sizeof ( int ) );
  for ( int b = 0; b < nblocks; b++ )
    blk_col[b] = -1;

  int repeat = 1;
  int ncolor = 0;
  int ncolors = 0;

  while ( repeat )
  {
    repeat = 0;

    for ( int m = 0; m < nargs; m++ )
    {
      if ( inds[m] >= 0 && args[m].opt)
      {
        int to_size = (maps[m]->to)->exec_size + (maps[m]->to)->nonexec_size + (maps[m]->to)->size;
        for ( int e = 0; e < to_size; e++ )
          work[inds[m]][e] = 0; // zero out color arrays
      }
    }
    prev_offset = 0;
    next_offset = 0;
    for ( int b = 0; b < nblocks; b++ )
    {
      prev_offset = next_offset;

      if (prev_offset + bsize >= set->core_size && prev_offset < set->core_size) {
        next_offset = set->core_size;
      } else if (prev_offset + bsize >= set->size && prev_offset < set->size && indirect_reduce) {
        next_offset = set->size;
      } else if (prev_offset + bsize >= exec_length && prev_offset < exec_length) {
        next_offset = exec_length;
      } else {
        next_offset = prev_offset + bsize;
      }
      if ( blk_col[b] == -1 )
      { // color not yet assigned to block
        uint mask = 0;
        if (next_offset > set->core_size) { //should not use block colors from the core set when doing the non_core ones
          if (prev_offset <= set->core_size) OP_plans[ip].ncolors_core = ncolors;
          for (int shifter = 0; shifter < OP_plans[ip].ncolors_core; shifter++) mask |= 1<<shifter;
          if (prev_offset == set->size && indirect_reduce) OP_plans[ip].ncolors_owned = ncolors;
          for (int shifter = OP_plans[ip].ncolors_core; indirect_reduce && shifter < OP_plans[ip].ncolors_owned; shifter++) mask |= 1<<shifter;
        }

        for ( int m = 0; m < nargs; m++ )
        {
          if ( inds[m] >= 0 && (accs[m] == OP_INC || accs[m] == OP_RW) && args[m].opt )
            for ( int e = prev_offset; e < next_offset; e++ )
              mask |= work[inds[m]][maps[m]->map[idxs[m] +
                e * maps[m]->dim]]; // set bits of mask
        }

        int color = ffs( ~mask ) - 1; // find first bit not set
        if ( color == -1 )
        { //run out of colors on this pass
          repeat = 1;
        }
        else
        {
          blk_col[b] = ncolor + color;
          mask = 1 << color;
          ncolors = MAX( ncolors, ncolor + color + 1 );

          for ( int m = 0; m < nargs; m++ )
          {
            if ( inds[m] >= 0 && (accs[m] == OP_INC || accs[m] == OP_RW) && args[m].opt )
              for ( int e = prev_offset; e < next_offset; e++ )
                work[inds[m]][maps[m]->map[idxs[m] +
                  e * maps[m]->dim]] |= mask;
          }
        }
      }
    }

    ncolor += 32;               // increment base level
  }

  /* store block mapping and number of blocks per color */

  if (indirect_reduce && OP_plans[ip].ncolors_owned == 0) OP_plans[ip].ncolors_owned = ncolors; //no MPI, so get the reduction arrays after everyting is done
  OP_plans[ip].ncolors = ncolors;

  /*for(int col = 0; col = OP_plans[ip].ncolors;col++) //should initialize to zero because calloc returns garbage!!
    {
    OP_plans[ip].ncolblk[col] = 0;
    }*/

  for ( int b = 0; b < nblocks; b++ )
    OP_plans[ip].ncolblk[blk_col[b]]++; // number of blocks of each color

  for ( int c = 1; c < ncolors; c++ )
    OP_plans[ip].ncolblk[c] += OP_plans[ip].ncolblk[c - 1]; // cumsum

  for ( int c = 0; c < ncolors; c++ )
    work2[c] = 0;

  for ( int b = 0; b < nblocks; b++ )
  {
    int c = blk_col[b];
    int b2 = work2[c]; // number of preceding blocks of this color
    if ( c > 0 )
      b2 += OP_plans[ip].ncolblk[c - 1];  // plus previous colors

    OP_plans[ip].blkmap[b2] = b;

    work2[c]++; //increment counter
  }

  for ( int c = ncolors - 1; c > 0; c-- )
    OP_plans[ip].ncolblk[c] -= OP_plans[ip].ncolblk[c - 1]; // undo cumsum


  /* reorder blocks by color? */

  /* work out shared memory requirements */
  OP_plans[ip].nsharedCol = (int *)malloc(ncolors * sizeof(int));
  float total_shared = 0;
  for (int col = 0; col < ncolors; col++) {
    OP_plans[ip].nsharedCol[col] = 0;
    for ( int b = 0; b < nblocks; b++ ) {
      if (blk_col[b] ==  col) {
        int nbytes = 0;
        for ( int m = 0; m < ninds_staged; m++ )  {
          int m2 = 0;
          while ( inds_staged[m2] != m )
            m2++;
          if (args[m2].opt==0) continue;

          nbytes += ROUND_UP_64 ( ind_sizes[m + b * ninds_staged] * dats[m2]->size );
        }
        OP_plans[ip].nsharedCol[col] = MAX ( OP_plans[ip].nsharedCol[col], nbytes );
        total_shared += nbytes;
      }
    }
  }

  OP_plans[ip].nshared = 0;
  total_shared = 0;

  for ( int b = 0; b < nblocks; b++ )
  {
    int nbytes = 0;
    for ( int m = 0; m < ninds_staged; m++ )
    {
      int m2 = 0;
      while ( inds_staged[m2] != m )
        m2++;
      if (args[m2].opt==0) continue;

      nbytes += ROUND_UP_64 ( ind_sizes[m + b * ninds_staged] * dats[m2]->size );
    }
    OP_plans[ip].nshared = MAX ( OP_plans[ip].nshared, nbytes );
    total_shared += nbytes;
  }

  /* work out total bandwidth requirements */

  OP_plans[ip].transfer = 0;
  OP_plans[ip].transfer2 = 0;
  float transfer3 = 0;

  for ( int b = 0; b < nblocks; b++ )
  {
    for ( int m = 0; m < nargs; m++ ) //for each argument
    {
      if (args[m].opt) {
        if ( inds[m] < 0 ) //if it is directly addressed
        {
          float fac = 2.0f;
          if ( accs[m] == OP_READ ) //if you only read it - only write???
            fac = 1.0f;
          if ( dats[m] != NULL )
          {
            OP_plans[ip].transfer += fac * nelems[b] * dats[m]->size; //cost of reading it all
            OP_plans[ip].transfer2 += fac * nelems[b] * dats[m]->size;
            transfer3 += fac * nelems[b] * dats[m]->size;
          }
        }
        else //if it is indirectly addressed: cost of reading the pointer to it
        {
          OP_plans[ip].transfer += nelems[b] * sizeof ( short );
          OP_plans[ip].transfer2 += nelems[b] * sizeof ( short );
          transfer3 += nelems[b] * sizeof ( short );
        }
      }
    }
    for ( int m = 0; m < ninds; m++ ) //for each indirect mapping
    {
      int m2 = 0;
      while ( inds[m2] != m ) //find the first argument that uses this mapping
        m2++;
      if ( args[m2].opt == 0 ) continue;
      float fac = 2.0f;
      if ( accs[m2] == OP_READ ) //only read it (write??)
        fac = 1.0f;
      OP_plans[ip].transfer += fac * ind_sizes[m + b * ninds] * dats[m2]->size; //simply read all data one by one

      /* work out how many cache lines are used by indirect addressing */

      int i_map, l_new, l_old;
      int e0 = ind_offs[m + b * ninds]; //where it starts
      int e1 = e0 + ind_sizes[m + b * ninds]; //where it ends

      l_old = -1;

      for ( int e = e0; e < e1; e++ ) //iterate through every indirectly accessed data element
      {
        i_map = ind_maps[m][e]; //the pointer to the data element
        l_new = ( i_map * dats[m2]->size ) / OP_cache_line_size; //which cache line it is on (full size, dim*sizeof(type))
        if ( l_new > l_old ) //if it is on a further cache line (that is not yet loaded, - i_map is ordered)
          OP_plans[ip].transfer2 += fac * OP_cache_line_size; //load the cache line
        l_old = l_new;
        l_new = ( ( i_map + 1 ) * dats[m2]->size - 1 ) / OP_cache_line_size; //the last byte of the data
        OP_plans[ip].transfer2 += fac * ( l_new - l_old ) * OP_cache_line_size; //again, if not loaded, load it (can be multiple cache lines)
        l_old = l_new;
      }

      l_old = -1;

      for ( int e = e0; e < e1; e++ )
      {
        i_map = ind_maps[m][e]; //pointer to the data element
        l_new = ( i_map * dats[m2]->size ) / ( dats[m2]->dim * OP_cache_line_size ); //which cache line the first dimension of the data is on
        if ( l_new > l_old )
          transfer3 += fac * dats[m2]->dim * OP_cache_line_size; //if not loaded yet, load all cache lines
        l_old = l_new;
        l_new = ( ( i_map + 1 ) * dats[m2]->size - 1 ) / ( dats[m2]->dim * OP_cache_line_size ); //primitve type's last byte
        transfer3 += fac * ( l_new - l_old ) * dats[m2]->dim * OP_cache_line_size; //load it
        l_old = l_new;
      }

      /* also include mappings to load/store data */

      fac = 1.0f;
      if ( accs[m2] == OP_RW )
        fac = 2.0f;
      OP_plans[ip].transfer += fac * ind_sizes[m + b * ninds] * sizeof ( int );
      OP_plans[ip].transfer2 += fac * ind_sizes[m + b * ninds] * sizeof ( int );
      transfer3 += fac * ind_sizes[m + b * ninds] * sizeof ( int );
    }
  }

  /* print out useful information */

  if ( OP_diags > 1 )
  {
    printf( " number of blocks       = %d \n", nblocks );
    printf( " number of block colors = %d \n", OP_plans[ip].ncolors );
    printf( " maximum block size     = %d \n", bsize );
    printf( " average thread colors  = %.2f \n", total_colors / nblocks );
    printf( " shared memory required = ");
    for (int i = 0; i < ncolors-1; i++) printf(" %.2f KB,", OP_plans[ip].nsharedCol[i] / 1024.0f );
    printf(" %.2f KB\n", OP_plans[ip].nsharedCol[ncolors-1] / 1024.0f );
    printf( " average data reuse     = %.2f \n", maxbytes * ( exec_length / total_shared ) );
    printf( " data transfer (used)   = %.2f MB \n",
        OP_plans[ip].transfer / ( 1024.0f * 1024.0f ) );
    printf( " data transfer (total)  = %.2f MB \n",
        OP_plans[ip].transfer2 / ( 1024.0f * 1024.0f ) );
    printf( " SoA/AoS transfer ratio = %.2f \n\n", transfer3 / OP_plans[ip].transfer2 );
  }

  /* validate plan info */

  op_plan_check ( OP_plans[ip], ninds_staged, inds_staged );

  /* free work arrays */

  for ( int m = 0; m < ninds; m++ )
    free ( work[m] );
  free ( work );
  free ( work2 );
  free ( blk_col );
  free(inds_to_inds_staged);
  free(invinds_staged);

  op_timers_core(&cpu_t2, &wall_t2);
  for (int i = 0; i < OP_kern_max; i++) {
    if (strcmp(name, OP_kernels[i].name)==0) {
      OP_kernels[i].plan_time += wall_t2-wall_t1;
      break;
    }
  }
  /* return pointer to plan */
  OP_plan_time += wall_t2-wall_t1;
  return &( OP_plans[ip] );
}
Exemplo n.º 5
0
static void
out_debug_aranges (segT aranges_seg, segT info_seg)
{
  unsigned int addr_size = sizeof_address;
  addressT size, skip;
  struct line_seg *s;
  expressionS expr;
  char *p;

  size = 4 + 2 + 4 + 1 + 1;

  skip = 2 * addr_size - (size & (2 * addr_size - 1));
  if (skip == 2 * addr_size)
    skip = 0;
  size += skip;

  for (s = all_segs; s; s = s->next)
    size += 2 * addr_size;

  size += 2 * addr_size;

  subseg_set (aranges_seg, 0);

  /* Length of the compilation unit.  */
  out_four (size - 4);

  /* Version.  */
  out_two (2);

  /* Offset to .debug_info.  */
  /* ??? sizeof_offset */
  TC_DWARF2_EMIT_OFFSET (section_symbol (info_seg), 4);

  /* Size of an address (offset portion).  */
  out_byte (addr_size);

  /* Size of a segment descriptor.  */
  out_byte (0);

  /* Align the header.  */
  if (skip)
    frag_align (ffs (2 * addr_size) - 1, 0, 0);

  for (s = all_segs; s; s = s->next)
    {
      fragS *frag;
      symbolS *beg, *end;

      frag = first_frag_for_seg (s->seg);
      beg = symbol_temp_new (s->seg, 0, frag);
      s->text_start = beg;

      frag = last_frag_for_seg (s->seg);
      end = symbol_temp_new (s->seg, get_frag_fix (frag), frag);
      s->text_end = end;

      expr.X_op = O_symbol;
      expr.X_add_symbol = beg;
      expr.X_add_number = 0;
      emit_expr (&expr, addr_size);

      expr.X_op = O_subtract;
      expr.X_add_symbol = end;
      expr.X_op_symbol = beg;
      expr.X_add_number = 0;
      emit_expr (&expr, addr_size);
    }

  p = frag_more (2 * addr_size);
  md_number_to_chars (p, 0, addr_size);
  md_number_to_chars (p + addr_size, 0, addr_size);
}
Exemplo n.º 6
0
int
main(int argc, char *argv[])
{
	struct stat sb;
	ino_t ino;
	int dirty;
	union dinode *dp;
	struct fstab *dt;
	char *map, *mntpt;
	int ch, mode, mntflags;
	int i, anydirskipped, bflag = 0, Tflag = 0, honorlevel = 1;
	int just_estimate = 0;
	ino_t maxino;
	char *tmsg;

	spcl.c_date = _time_to_time64(time(NULL));

	tsize = 0;	/* Default later, based on 'c' option for cart tapes */
	dumpdates = _PATH_DUMPDATES;
	popenout = NULL;
	tape = NULL;
	temp = _PATH_DTMP;
	if (TP_BSIZE / DEV_BSIZE == 0 || TP_BSIZE % DEV_BSIZE != 0)
		quit("TP_BSIZE must be a multiple of DEV_BSIZE\n");
	level = 0;
	rsync_friendly = 0;

	if (argc < 2)
		usage();

	obsolete(&argc, &argv);
	while ((ch = getopt(argc, argv,
	    "0123456789aB:b:C:cD:d:f:h:LnP:RrSs:T:uWw")) != -1)
		switch (ch) {
		/* dump level */
		case '0': case '1': case '2': case '3': case '4':
		case '5': case '6': case '7': case '8': case '9':
			level = 10 * level + ch - '0';
			break;

		case 'a':		/* `auto-size', Write to EOM. */
			unlimited = 1;
			break;

		case 'B':		/* blocks per output file */
			blocksperfile = numarg("number of blocks per file",
			    1L, 0L);
			break;

		case 'b':		/* blocks per tape write */
			ntrec = numarg("number of blocks per write",
			    1L, 1000L);
			break;

		case 'C':
			cachesize = numarg("cachesize", 0, 0) * 1024 * 1024;
			break;

		case 'c':		/* Tape is cart. not 9-track */
			cartridge = 1;
			break;

		case 'D':
			dumpdates = optarg;
			break;

		case 'd':		/* density, in bits per inch */
			density = numarg("density", 10L, 327670L) / 10;
			if (density >= 625 && !bflag)
				ntrec = HIGHDENSITYTREC;
			break;

		case 'f':		/* output file */
			if (popenout != NULL)
				errx(X_STARTUP, "You cannot use the P and f "
				    "flags together.\n");
			tape = optarg;
			break;

		case 'h':
			honorlevel = numarg("honor level", 0L, 10L);
			break;

		case 'L':
			snapdump = 1;
			break;

		case 'n':		/* notify operators */
			notify = 1;
			break;

		case 'P':
			if (tape != NULL)
				errx(X_STARTUP, "You cannot use the P and f "
				    "flags together.\n");
			popenout = optarg;
			break;

		case 'r': /* store slightly less data to be friendly to rsync */
			if (rsync_friendly < 1)
				rsync_friendly = 1;
			break;

		case 'R': /* store even less data to be friendlier to rsync */
			if (rsync_friendly < 2)
				rsync_friendly = 2;
			break;

		case 'S':               /* exit after estimating # of tapes */
			just_estimate = 1;
			break;

		case 's':		/* tape size, feet */
			tsize = numarg("tape size", 1L, 0L) * 12 * 10;
			break;

		case 'T':		/* time of last dump */
			spcl.c_ddate = unctime(optarg);
			if (spcl.c_ddate < 0) {
				(void)fprintf(stderr, "bad time \"%s\"\n",
				    optarg);
				exit(X_STARTUP);
			}
			Tflag = 1;
			lastlevel = -1;
			break;

		case 'u':		/* update /etc/dumpdates */
			uflag = 1;
			break;

		case 'W':		/* what to do */
		case 'w':
			lastdump(ch);
			exit(X_FINOK);	/* do nothing else */

		default:
			usage();
		}
	argc -= optind;
	argv += optind;

	if (argc < 1) {
		(void)fprintf(stderr, "Must specify disk or file system\n");
		exit(X_STARTUP);
	}
	disk = *argv++;
	argc--;
	if (argc >= 1) {
		(void)fprintf(stderr, "Unknown arguments to dump:");
		while (argc--)
			(void)fprintf(stderr, " %s", *argv++);
		(void)fprintf(stderr, "\n");
		exit(X_STARTUP);
	}
	if (rsync_friendly && (level > 0)) {
		(void)fprintf(stderr, "%s %s\n", "rsync friendly options",
		    "can be used only with level 0 dumps.");
		exit(X_STARTUP);
	}
	if (Tflag && uflag) {
	        (void)fprintf(stderr,
		    "You cannot use the T and u flags together.\n");
		exit(X_STARTUP);
	}
	if (popenout) {
		tape = "child pipeline process";
	} else if (tape == NULL && (tape = getenv("TAPE")) == NULL)
		tape = _PATH_DEFTAPE;
	if (strcmp(tape, "-") == 0) {
		pipeout++;
		tape = "standard output";
	}

	if (blocksperfile)
		blocksperfile = blocksperfile / ntrec * ntrec; /* round down */
	else if (!unlimited) {
		/*
		 * Determine how to default tape size and density
		 *
		 *         	density				tape size
		 * 9-track	1600 bpi (160 bytes/.1")	2300 ft.
		 * 9-track	6250 bpi (625 bytes/.1")	2300 ft.
		 * cartridge	8000 bpi (100 bytes/.1")	1700 ft.
		 *						(450*4 - slop)
		 * hilit19 hits again: "
		 */
		if (density == 0)
			density = cartridge ? 100 : 160;
		if (tsize == 0)
			tsize = cartridge ? 1700L*120L : 2300L*120L;
	}

	if (strchr(tape, ':')) {
		host = tape;
		tape = strchr(host, ':');
		*tape++ = '\0';
#ifdef RDUMP
		if (index(tape, '\n')) {
		    (void)fprintf(stderr, "invalid characters in tape\n");
		    exit(X_STARTUP);
		}
		if (rmthost(host) == 0)
			exit(X_STARTUP);
#else
		(void)fprintf(stderr, "remote dump not enabled\n");
		exit(X_STARTUP);
#endif
	}
	(void)setuid(getuid()); /* rmthost() is the only reason to be setuid */

	if (signal(SIGHUP, SIG_IGN) != SIG_IGN)
		signal(SIGHUP, sig);
	if (signal(SIGTRAP, SIG_IGN) != SIG_IGN)
		signal(SIGTRAP, sig);
	if (signal(SIGFPE, SIG_IGN) != SIG_IGN)
		signal(SIGFPE, sig);
	if (signal(SIGBUS, SIG_IGN) != SIG_IGN)
		signal(SIGBUS, sig);
	if (signal(SIGSEGV, SIG_IGN) != SIG_IGN)
		signal(SIGSEGV, sig);
	if (signal(SIGTERM, SIG_IGN) != SIG_IGN)
		signal(SIGTERM, sig);
	if (signal(SIGINT, interrupt) == SIG_IGN)
		signal(SIGINT, SIG_IGN);

	dump_getfstab();	/* /etc/fstab snarfed */
	/*
	 *	disk can be either the full special file name,
	 *	the suffix of the special file name,
	 *	the special name missing the leading '/',
	 *	the file system name with or without the leading '/'.
	 */
	dt = fstabsearch(disk);
	if (dt != NULL) {
		disk = rawname(dt->fs_spec);
 		if (disk == NULL)
 			errx(X_STARTUP, "%s: unknown file system", dt->fs_spec);
		(void)strncpy(spcl.c_dev, dt->fs_spec, NAMELEN);
		(void)strncpy(spcl.c_filesys, dt->fs_file, NAMELEN);
	} else {
		(void)strncpy(spcl.c_dev, disk, NAMELEN);
		(void)strncpy(spcl.c_filesys, "an unlisted file system",
		    NAMELEN);
	}
	spcl.c_dev[NAMELEN-1]='\0';
	spcl.c_filesys[NAMELEN-1]='\0';

	if ((mntpt = getmntpt(disk, &mntflags)) != 0) {
		if (mntflags & MNT_RDONLY) {
			if (snapdump != 0) {
				msg("WARNING: %s\n",
				    "-L ignored for read-only filesystem.");
				snapdump = 0;
			}
		} else if (snapdump == 0) {
			msg("WARNING: %s\n",
			    "should use -L when dumping live read-write "
			    "filesystems!");
		} else {
			char snapname[BUFSIZ], snapcmd[BUFSIZ];

			snprintf(snapname, sizeof snapname, "%s/.snap", mntpt);
			if ((stat(snapname, &sb) < 0) || !S_ISDIR(sb.st_mode)) {
				msg("WARNING: %s %s\n",
				    "-L requested but snapshot location",
				    snapname);
				msg("         %s: %s\n",
				    "is not a directory",
				    "dump downgraded, -L ignored");
				snapdump = 0;
			} else {
				snprintf(snapname, sizeof snapname,
				    "%s/.snap/dump_snapshot", mntpt);
				snprintf(snapcmd, sizeof snapcmd, "%s %s %s",
				    _PATH_MKSNAP_FFS, mntpt, snapname);
				unlink(snapname);
				if (system(snapcmd) != 0)
					errx(X_STARTUP, "Cannot create %s: %s\n",
					    snapname, strerror(errno));
				if ((diskfd = open(snapname, O_RDONLY)) < 0) {
					unlink(snapname);
					errx(X_STARTUP, "Cannot open %s: %s\n",
					    snapname, strerror(errno));
				}
				unlink(snapname);
				if (fstat(diskfd, &sb) != 0)
					err(X_STARTUP, "%s: stat", snapname);
				spcl.c_date = _time_to_time64(sb.st_mtime);
			}
		}
	} else if (snapdump != 0) {
		msg("WARNING: Cannot use -L on an unmounted filesystem.\n");
		snapdump = 0;
	}
	if (snapdump == 0) {
		if ((diskfd = open(disk, O_RDONLY)) < 0)
			err(X_STARTUP, "Cannot open %s", disk);
		if (fstat(diskfd, &sb) != 0)
			err(X_STARTUP, "%s: stat", disk);
		if (S_ISDIR(sb.st_mode))
			errx(X_STARTUP, "%s: unknown file system", disk);
	}

	(void)strcpy(spcl.c_label, "none");
	(void)gethostname(spcl.c_host, NAMELEN);
	spcl.c_level = level;
	spcl.c_type = TS_TAPE;
	if (rsync_friendly) {
		/* don't store real dump times */
		spcl.c_date = 0;
		spcl.c_ddate = 0;
	}
	if (spcl.c_date == 0) {
		tmsg = "the epoch\n";
	} else {
		time_t t = _time64_to_time(spcl.c_date);
		tmsg = ctime(&t);
	}
	msg("Date of this level %d dump: %s", level, tmsg);

	if (!Tflag && (!rsync_friendly))
	        getdumptime();		/* /etc/dumpdates snarfed */
	if (spcl.c_ddate == 0) {
		tmsg = "the epoch\n";
	} else {
		time_t t = _time64_to_time(spcl.c_ddate);
		tmsg = ctime(&t);
	}
	if (lastlevel < 0)
		msg("Date of last (level unknown) dump: %s", tmsg);
	else
		msg("Date of last level %d dump: %s", lastlevel, tmsg);

	msg("Dumping %s%s ", snapdump ? "snapshot of ": "", disk);
	if (dt != NULL)
		msgtail("(%s) ", dt->fs_file);
	if (host)
		msgtail("to %s on host %s\n", tape, host);
	else
		msgtail("to %s\n", tape);

	sync();
	sblock = (struct fs *)sblock_buf;
	for (i = 0; sblock_try[i] != -1; i++) {
		sblock->fs_fsize = SBLOCKSIZE; /* needed in bread */
		bread(sblock_try[i] >> dev_bshift, (char *) sblock, SBLOCKSIZE);
		if ((sblock->fs_magic == FS_UFS1_MAGIC ||
		     (sblock->fs_magic == FS_UFS2_MAGIC &&
		      sblock->fs_sblockloc == sblock_try[i])) &&
		    sblock->fs_bsize <= MAXBSIZE &&
		    sblock->fs_bsize >= sizeof(struct fs))
			break;
	}
	if (sblock_try[i] == -1)
		quit("Cannot find file system superblock\n");
	dev_bsize = sblock->fs_fsize / fsbtodb(sblock, 1);
	dev_bshift = ffs(dev_bsize) - 1;
	if (dev_bsize != (1 << dev_bshift))
		quit("dev_bsize (%ld) is not a power of 2", dev_bsize);
	tp_bshift = ffs(TP_BSIZE) - 1;
	if (TP_BSIZE != (1 << tp_bshift))
		quit("TP_BSIZE (%d) is not a power of 2", TP_BSIZE);
	maxino = sblock->fs_ipg * sblock->fs_ncg;
	mapsize = roundup(howmany(maxino, CHAR_BIT), TP_BSIZE);
	usedinomap = (char *)calloc((unsigned) mapsize, sizeof(char));
	dumpdirmap = (char *)calloc((unsigned) mapsize, sizeof(char));
	dumpinomap = (char *)calloc((unsigned) mapsize, sizeof(char));
	tapesize = 3 * (howmany(mapsize * sizeof(char), TP_BSIZE) + 1);

	nonodump = spcl.c_level < honorlevel;

	passno = 1;
	setproctitle("%s: pass 1: regular files", disk);
	msg("mapping (Pass I) [regular files]\n");
	anydirskipped = mapfiles(maxino, &tapesize);

	passno = 2;
	setproctitle("%s: pass 2: directories", disk);
	msg("mapping (Pass II) [directories]\n");
	while (anydirskipped) {
		anydirskipped = mapdirs(maxino, &tapesize);
	}

	if (pipeout || unlimited) {
		tapesize += 10;	/* 10 trailer blocks */
		msg("estimated %ld tape blocks.\n", tapesize);
	} else {
		double fetapes;

		if (blocksperfile)
			fetapes = (double) tapesize / blocksperfile;
		else if (cartridge) {
			/* Estimate number of tapes, assuming streaming stops at
			   the end of each block written, and not in mid-block.
			   Assume no erroneous blocks; this can be compensated
			   for with an artificially low tape size. */
			fetapes =
			(	  (double) tapesize	/* blocks */
				* TP_BSIZE	/* bytes/block */
				* (1.0/density)	/* 0.1" / byte " */
			  +
				  (double) tapesize	/* blocks */
				* (1.0/ntrec)	/* streaming-stops per block */
				* 15.48		/* 0.1" / streaming-stop " */
			) * (1.0 / tsize );	/* tape / 0.1" " */
		} else {
			/* Estimate number of tapes, for old fashioned 9-track
			   tape */
			int tenthsperirg = (density == 625) ? 3 : 7;
			fetapes =
			(	  (double) tapesize	/* blocks */
				* TP_BSIZE	/* bytes / block */
				* (1.0/density)	/* 0.1" / byte " */
			  +
				  (double) tapesize	/* blocks */
				* (1.0/ntrec)	/* IRG's / block */
				* tenthsperirg	/* 0.1" / IRG " */
			) * (1.0 / tsize );	/* tape / 0.1" " */
		}
		etapes = fetapes;		/* truncating assignment */
		etapes++;
		/* count the dumped inodes map on each additional tape */
		tapesize += (etapes - 1) *
			(howmany(mapsize * sizeof(char), TP_BSIZE) + 1);
		tapesize += etapes + 10;	/* headers + 10 trailer blks */
		msg("estimated %ld tape blocks on %3.2f tape(s).\n",
		    tapesize, fetapes);
	}

        /*
         * If the user only wants an estimate of the number of
         * tapes, exit now.
         */
        if (just_estimate)
                exit(0);

	/*
	 * Allocate tape buffer.
	 */
	if (!alloctape())
		quit(
	"can't allocate tape buffers - try a smaller blocking factor.\n");

	startnewtape(1);
	(void)time((time_t *)&(tstart_writing));
	dumpmap(usedinomap, TS_CLRI, maxino - 1);

	passno = 3;
	setproctitle("%s: pass 3: directories", disk);
	msg("dumping (Pass III) [directories]\n");
	dirty = 0;		/* XXX just to get gcc to shut up */
	for (map = dumpdirmap, ino = 1; ino < maxino; ino++) {
		if (((ino - 1) % CHAR_BIT) == 0)	/* map is offset by 1 */
			dirty = *map++;
		else
			dirty >>= 1;
		if ((dirty & 1) == 0)
			continue;
		/*
		 * Skip directory inodes deleted and maybe reallocated
		 */
		dp = getino(ino, &mode);
		if (mode != IFDIR)
			continue;
		(void)dumpino(dp, ino);
	}

	passno = 4;
	setproctitle("%s: pass 4: regular files", disk);
	msg("dumping (Pass IV) [regular files]\n");
	for (map = dumpinomap, ino = 1; ino < maxino; ino++) {
		if (((ino - 1) % CHAR_BIT) == 0)	/* map is offset by 1 */
			dirty = *map++;
		else
			dirty >>= 1;
		if ((dirty & 1) == 0)
			continue;
		/*
		 * Skip inodes deleted and reallocated as directories.
		 */
		dp = getino(ino, &mode);
		if (mode == IFDIR)
			continue;
		(void)dumpino(dp, ino);
	}

	(void)time((time_t *)&(tend_writing));
	spcl.c_type = TS_END;
	for (i = 0; i < ntrec; i++)
		writeheader(maxino - 1);
	if (pipeout)
		msg("DUMP: %jd tape blocks\n", (intmax_t)spcl.c_tapea);
	else
		msg("DUMP: %jd tape blocks on %d volume%s\n",
		    (intmax_t)spcl.c_tapea, spcl.c_volume,
		    (spcl.c_volume == 1) ? "" : "s");

	/* report dump performance, avoid division through zero */
	if (tend_writing - tstart_writing == 0)
		msg("finished in less than a second\n");
	else
		msg("finished in %jd seconds, throughput %jd KBytes/sec\n",
		    (intmax_t)tend_writing - tstart_writing, 
		    (intmax_t)(spcl.c_tapea / 
		    (tend_writing - tstart_writing)));

	putdumptime();
	trewind();
	broadcast("DUMP IS DONE!\a\a\n");
	msg("DUMP IS DONE\n");
	Exit(X_FINOK);
	/* NOTREACHED */
}
Exemplo n.º 7
0
Arquivo: grid.c Projeto: Jul13t/piglit
static inline unsigned
get_stage_idx(const struct image_stage_info *stage)
{
        return ffs(stage->bit) - 1;
}
Exemplo n.º 8
0
int
udf_mountfs(struct vnode *devvp, struct mount *mp, uint32_t lb, struct proc *p)
{
	struct buf *bp = NULL;
	struct anchor_vdp avdp;
	struct umount *ump = NULL;
	struct part_desc *pd;
	struct logvol_desc *lvd;
	struct fileset_desc *fsd;
	struct file_entry *root_fentry;
	uint32_t sector, size, mvds_start, mvds_end;
	uint32_t fsd_offset = 0;
	uint16_t part_num = 0, fsd_part = 0;
	int error = EINVAL;
	int logvol_found = 0, part_found = 0, fsd_found = 0;
	int bsize;

	/*
	 * Disallow multiple mounts of the same device.
	 * Disallow mounting of a device that is currently in use
	 * (except for root, which might share swap device for miniroot).
	 * Flush out any old buffers remaining from a previous use.
	 */
	if ((error = vfs_mountedon(devvp)))
		return (error);
	if (vcount(devvp) > 1 && devvp != rootvp)
		return (EBUSY);
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
	error = vinvalbuf(devvp, V_SAVE, p->p_ucred, p, 0, 0);
	VOP_UNLOCK(devvp, 0, p);
	if (error)
		return (error);

	error = VOP_OPEN(devvp, FREAD, FSCRED, p);
	if (error)
		return (error);

	MALLOC(ump, struct umount *, sizeof(struct umount), M_UDFMOUNT,
	    M_WAITOK);
	bzero(ump, sizeof(struct umount));

	mp->mnt_data = (qaddr_t) ump;
	mp->mnt_stat.f_fsid.val[0] = devvp->v_rdev;
	mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_UDF);
	mp->mnt_flag |= MNT_LOCAL;

	ump->um_mountp = mp;
	ump->um_dev = devvp->v_rdev;
	ump->um_devvp = devvp;

	bsize = 2048;	/* Should probe the media for its size. */

	/* 
	 * Get the Anchor Volume Descriptor Pointer from sector 256.
	 * Should also check sector n - 256, n, and 512.
	 */
	sector = 256;
	if ((error = bread(devvp, sector * btodb(bsize), bsize, NOCRED,
			   &bp)) != 0)
		goto bail;
	if ((error = udf_checktag((struct desc_tag *)bp->b_data, TAGID_ANCHOR)))
		goto bail;

	bcopy(bp->b_data, &avdp, sizeof(struct anchor_vdp));
	brelse(bp);
	bp = NULL;

	/*
	 * Extract the Partition Descriptor and Logical Volume Descriptor
	 * from the Volume Descriptor Sequence.
	 * Should we care about the partition type right now?
	 * What about multiple partitions?
	 */
	mvds_start = letoh32(avdp.main_vds_ex.loc);
	mvds_end = mvds_start + (letoh32(avdp.main_vds_ex.len) - 1) / bsize;
	for (sector = mvds_start; sector < mvds_end; sector++) {
		if ((error = bread(devvp, sector * btodb(bsize), bsize, 
				   NOCRED, &bp)) != 0) {
			printf("Can't read sector %d of VDS\n", sector);
			goto bail;
		}
		lvd = (struct logvol_desc *)bp->b_data;
		if (!udf_checktag(&lvd->tag, TAGID_LOGVOL)) {
			ump->um_bsize = letoh32(lvd->lb_size);
			ump->um_bmask = ump->um_bsize - 1;
			ump->um_bshift = ffs(ump->um_bsize) - 1;
			fsd_part = letoh16(lvd->_lvd_use.fsd_loc.loc.part_num);
			fsd_offset = letoh32(lvd->_lvd_use.fsd_loc.loc.lb_num);
			if (udf_find_partmaps(ump, lvd))
				break;
			logvol_found = 1;
		}
		pd = (struct part_desc *)bp->b_data;
		if (!udf_checktag(&pd->tag, TAGID_PARTITION)) {
			part_found = 1;
			part_num = letoh16(pd->part_num);
			ump->um_len = letoh32(pd->part_len);
			ump->um_start = letoh32(pd->start_loc);
		}

		brelse(bp); 
		bp = NULL;
		if ((part_found) && (logvol_found))
			break;
	}

	if (!part_found || !logvol_found) {
		error = EINVAL;
		goto bail;
	}

	if (fsd_part != part_num) {
		printf("FSD does not lie within the partition!\n");
		error = EINVAL;
		goto bail;
	}

	mtx_init(&ump->um_hashmtx, IPL_NONE);
	ump->um_hashtbl = hashinit(UDF_HASHTBLSIZE, M_UDFMOUNT, M_WAITOK,
	    &ump->um_hashsz);

	/* Get the VAT, if needed */
	if (ump->um_flags & UDF_MNT_FIND_VAT) {
		error = udf_vat_get(ump, lb);
		if (error)
			goto bail;
	}

	/*
	 * Grab the Fileset Descriptor
	 * Thanks to Chuck McCrobie <*****@*****.**> for pointing
	 * me in the right direction here.
	 */
	sector = fsd_offset;
	udf_vat_map(ump, &sector);
	if ((error = RDSECTOR(devvp, sector, ump->um_bsize, &bp)) != 0) {
		printf("Cannot read sector %d of FSD\n", sector);
		goto bail;
	}
	fsd = (struct fileset_desc *)bp->b_data;
	if (!udf_checktag(&fsd->tag, TAGID_FSD)) {
		fsd_found = 1;
		bcopy(&fsd->rootdir_icb, &ump->um_root_icb,
		    sizeof(struct long_ad));
	}

	brelse(bp);
	bp = NULL;

	if (!fsd_found) {
		printf("Couldn't find the fsd\n");
		error = EINVAL;
		goto bail;
	}

	/*
	 * Find the file entry for the root directory.
	 */
	sector = letoh32(ump->um_root_icb.loc.lb_num);
	size = letoh32(ump->um_root_icb.len);
	udf_vat_map(ump, &sector);
	if ((error = udf_readlblks(ump, sector, size, &bp)) != 0) {
		printf("Cannot read sector %d\n", sector);
		goto bail;
	}

	root_fentry = (struct file_entry *)bp->b_data;
	if ((error = udf_checktag(&root_fentry->tag, TAGID_FENTRY))) {
		printf("Invalid root file entry!\n");
		goto bail;
	}

	brelse(bp);
	bp = NULL;

	devvp->v_specmountpoint = mp;

	return (0);

bail:
	if (ump->um_hashtbl != NULL)
		free(ump->um_hashtbl, M_UDFMOUNT);

	if (ump != NULL) {
		FREE(ump, M_UDFMOUNT);
		mp->mnt_data = NULL;
		mp->mnt_flag &= ~MNT_LOCAL;
	}
	if (bp != NULL)
		brelse(bp);
	VOP_CLOSE(devvp, FREAD, FSCRED, p);

	return (error);
}
Exemplo n.º 9
0
static void
upload_ps_state(struct brw_context *brw)
{
    struct intel_context *intel = &brw->intel;
    struct gl_context *ctx = &intel->ctx;
    uint32_t dw2, dw4, dw5;
    const int max_threads_shift = brw->intel.is_haswell ?
                                  HSW_PS_MAX_THREADS_SHIFT : IVB_PS_MAX_THREADS_SHIFT;

    /* BRW_NEW_PS_BINDING_TABLE */
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS_PS << 16 | (2 - 2));
    OUT_BATCH(brw->wm.bind_bo_offset);
    ADVANCE_BATCH();

    /* CACHE_NEW_SAMPLER */
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_SAMPLER_STATE_POINTERS_PS << 16 | (2 - 2));
    OUT_BATCH(brw->sampler.offset);
    ADVANCE_BATCH();

    /* CACHE_NEW_WM_PROG */
    if (brw->wm.prog_data->nr_params == 0) {
        /* Disable the push constant buffers. */
        BEGIN_BATCH(7);
        OUT_BATCH(_3DSTATE_CONSTANT_PS << 16 | (7 - 2));
        OUT_BATCH(0);
        OUT_BATCH(0);
        OUT_BATCH(0);
        OUT_BATCH(0);
        OUT_BATCH(0);
        OUT_BATCH(0);
        ADVANCE_BATCH();
    } else {
        BEGIN_BATCH(7);
        OUT_BATCH(_3DSTATE_CONSTANT_PS << 16 | (7 - 2));

        OUT_BATCH(ALIGN(brw->wm.prog_data->nr_params,
                        brw->wm.prog_data->dispatch_width) / 8);
        OUT_BATCH(0);
        /* Pointer to the WM constant buffer.  Covered by the set of
         * state flags from gen6_upload_wm_push_constants.
         */
        OUT_BATCH(brw->wm.push_const_offset);
        OUT_BATCH(0);
        OUT_BATCH(0);
        OUT_BATCH(0);
        ADVANCE_BATCH();
    }

    dw2 = dw4 = dw5 = 0;

    /* CACHE_NEW_SAMPLER */
    dw2 |= (ALIGN(brw->sampler.count, 4) / 4) << GEN7_PS_SAMPLER_COUNT_SHIFT;

    /* Use ALT floating point mode for ARB fragment programs, because they
     * require 0^0 == 1.  Even though _CurrentFragmentProgram is used for
     * rendering, CurrentFragmentProgram is used for this check to
     * differentiate between the GLSL and non-GLSL cases.
     */
    if (intel->ctx.Shader.CurrentFragmentProgram == NULL)
        dw2 |= GEN7_PS_FLOATING_POINT_MODE_ALT;

    if (intel->is_haswell)
        dw4 |= SET_FIELD(1, HSW_PS_SAMPLE_MASK); /* 1 sample for now */

    dw4 |= (brw->max_wm_threads - 1) << max_threads_shift;

    /* CACHE_NEW_WM_PROG */
    if (brw->wm.prog_data->nr_params > 0)
        dw4 |= GEN7_PS_PUSH_CONSTANT_ENABLE;

    /* CACHE_NEW_WM_PROG | _NEW_COLOR
     *
     * The hardware wedges if you have this bit set but don't turn on any dual
     * source blend factors.
     */
    if (brw->wm.prog_data->dual_src_blend &&
            (ctx->Color.BlendEnabled & 1) &&
            ctx->Color.Blend[0]._UsesDualSrc) {
        dw4 |= GEN7_PS_DUAL_SOURCE_BLEND_ENABLE;
    }

    /* BRW_NEW_FRAGMENT_PROGRAM */
    if (brw->fragment_program->Base.InputsRead != 0)
        dw4 |= GEN7_PS_ATTRIBUTE_ENABLE;

    dw4 |= GEN7_PS_8_DISPATCH_ENABLE;
    if (brw->wm.prog_data->prog_offset_16)
        dw4 |= GEN7_PS_16_DISPATCH_ENABLE;

    dw5 |= (brw->wm.prog_data->first_curbe_grf <<
            GEN7_PS_DISPATCH_START_GRF_SHIFT_0);
    dw5 |= (brw->wm.prog_data->first_curbe_grf_16 <<
            GEN7_PS_DISPATCH_START_GRF_SHIFT_2);

    BEGIN_BATCH(8);
    OUT_BATCH(_3DSTATE_PS << 16 | (8 - 2));
    OUT_BATCH(brw->wm.prog_offset);
    OUT_BATCH(dw2);
    if (brw->wm.prog_data->total_scratch) {
        OUT_RELOC(brw->wm.scratch_bo,
                  I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
                  ffs(brw->wm.prog_data->total_scratch) - 11);
    } else {
        OUT_BATCH(0);
    }
    OUT_BATCH(dw4);
    OUT_BATCH(dw5);
    OUT_BATCH(0); /* kernel 1 pointer */
    OUT_BATCH(brw->wm.prog_offset + brw->wm.prog_data->prog_offset_16);
    ADVANCE_BATCH();
}
Exemplo n.º 10
0
static unsigned int ipt_ulog_target(struct sk_buff **pskb,
				    unsigned int hooknum,
				    const struct net_device *in,
				    const struct net_device *out,
				    const void *targinfo, void *userinfo)
{
	ulog_buff_t *ub;
	ulog_packet_msg_t *pm;
	size_t size, copy_len;
	struct nlmsghdr *nlh;
	struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo;

	/* ffs == find first bit set, necessary because userspace
	 * is already shifting groupnumber, but we need unshifted.
	 * ffs() returns [1..32], we need [0..31] */
	unsigned int groupnum = ffs(loginfo->nl_group) - 1;

	/* calculate the size of the skb needed */
	if ((loginfo->copy_range == 0) ||
	    (loginfo->copy_range > (*pskb)->len)) {
		copy_len = (*pskb)->len;
	} else {
		copy_len = loginfo->copy_range;
	}

	size = NLMSG_SPACE(sizeof(*pm) + copy_len);

	ub = &ulog_buffers[groupnum];
	
	LOCK_BH(&ulog_lock);

	if (!ub->skb) {
		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	} else if (ub->qlen >= loginfo->qthreshold ||
		   size > skb_tailroom(ub->skb)) {
		/* either the queue len is too high or we don't have 
		 * enough room in nlskb left. send it to userspace. */

		ulog_send(groupnum);

		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	}

	DEBUGP("ipt_ULOG: qlen %d, qthreshold %d\n", ub->qlen, 
		loginfo->qthreshold);

	/* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */
	nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT, 
			size - sizeof(*nlh));
	ub->qlen++;

	pm = NLMSG_DATA(nlh);

	/* copy hook, prefix, timestamp, payload, etc. */
	pm->data_len = copy_len;
	pm->timestamp_sec = (*pskb)->stamp.tv_sec;
	pm->timestamp_usec = (*pskb)->stamp.tv_usec;
	pm->mark = (*pskb)->nfmark;
	pm->hook = hooknum;
	if (loginfo->prefix[0] != '\0')
		strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
	else
		*(pm->prefix) = '\0';

	if (in && in->hard_header_len > 0
	    && (*pskb)->mac.raw != (void *) (*pskb)->nh.iph
	    && in->hard_header_len <= ULOG_MAC_LEN) {
		memcpy(pm->mac, (*pskb)->mac.raw, in->hard_header_len);
		pm->mac_len = in->hard_header_len;
	}

	if (in)
		strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
	else
		pm->indev_name[0] = '\0';

	if (out)
		strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
	else
		pm->outdev_name[0] = '\0';

	if (copy_len)
		memcpy(pm->payload, (*pskb)->data, copy_len);
	
	/* check if we are building multi-part messages */
	if (ub->qlen > 1) {
		ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
	}

	/* if threshold is reached, send message to userspace */
	if (qlen >= loginfo->qthreshold) {
		if (loginfo->qthreshold > 1)
			nlh->nlmsg_type = NLMSG_DONE;
	}

	ub->lastnlh = nlh;

	/* if timer isn't already running, start it */
	if (!timer_pending(&ub->timer)) {
		ub->timer.expires = jiffies + flushtimeout;
		add_timer(&ub->timer);
	}

	UNLOCK_BH(&ulog_lock);

	return IPT_CONTINUE;


nlmsg_failure:
	PRINTR("ipt_ULOG: error during NLMSG_PUT\n");

alloc_failure:
	PRINTR("ipt_ULOG: Error building netlink message\n");

	UNLOCK_BH(&ulog_lock);

	return IPT_CONTINUE;
}
Exemplo n.º 11
0
asmlinkage void __do_softirq(void)
{
	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
	unsigned long old_flags = current->flags;
	int max_restart = MAX_SOFTIRQ_RESTART;
	struct softirq_action *h;
	bool in_hardirq;
	__u32 pending;
	int softirq_bit;
	int cpu;

	/*
	 * Mask out PF_MEMALLOC s current task context is borrowed for the
	 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
	 * again if the socket is related to swap
	 */
	current->flags &= ~PF_MEMALLOC;

	pending = local_softirq_pending();
	account_irq_enter_time(current);

	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
	in_hardirq = lockdep_softirq_start();

	cpu = smp_processor_id();
restart:
	/* Reset the pending bitmask before enabling irqs */
	set_softirq_pending(0);

	local_irq_enable();

	h = softirq_vec;

	while ((softirq_bit = ffs(pending))) {
		unsigned int vec_nr;
		int prev_count;

		h += softirq_bit - 1;

		vec_nr = h - softirq_vec;
		prev_count = preempt_count();

		kstat_incr_softirqs_this_cpu(vec_nr);

		trace_softirq_entry(vec_nr);
		h->action(h);
		trace_softirq_exit(vec_nr);
		if (unlikely(prev_count != preempt_count())) {
			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
			       vec_nr, softirq_to_name[vec_nr], h->action,
			       prev_count, preempt_count());
			preempt_count_set(prev_count);
		}
		rcu_bh_qs(cpu);
		h++;
		pending >>= softirq_bit;
	}

	local_irq_disable();

	pending = local_softirq_pending();
	if (pending) {
		if (time_before(jiffies, end) && !need_resched() &&
		    --max_restart)
			goto restart;

		wakeup_softirqd();
	}

	lockdep_softirq_end(in_hardirq);
	account_irq_exit_time(current);
	__local_bh_enable(SOFTIRQ_OFFSET);
	WARN_ON_ONCE(in_interrupt());
	tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}
Exemplo n.º 12
0
static void
upload_wm_state(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->ctx;
   const struct brw_fragment_program *fp =
      brw_fragment_program_const(brw->fragment_program);
   uint32_t dw2, dw4, dw5, dw6;

   /* _NEW_BUFFERS */
   bool multisampled_fbo = ctx->DrawBuffer->Visual.samples > 1;

    /* CACHE_NEW_WM_PROG */
   if (brw->wm.prog_data->nr_params == 0) {
      /* Disable the push constant buffers. */
      BEGIN_BATCH(5);
      OUT_BATCH(_3DSTATE_CONSTANT_PS << 16 | (5 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   } else {
      BEGIN_BATCH(5);
      OUT_BATCH(_3DSTATE_CONSTANT_PS << 16 |
		GEN6_CONSTANT_BUFFER_0_ENABLE |
		(5 - 2));
      /* Pointer to the WM constant buffer.  Covered by the set of
       * state flags from gen6_upload_wm_push_constants.
       */
      OUT_BATCH(brw->wm.push_const_offset +
		ALIGN(brw->wm.prog_data->nr_params,
		      brw->wm.prog_data->dispatch_width) / 8 - 1);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   dw2 = dw4 = dw5 = dw6 = 0;
   dw4 |= GEN6_WM_STATISTICS_ENABLE;
   dw5 |= GEN6_WM_LINE_AA_WIDTH_1_0;
   dw5 |= GEN6_WM_LINE_END_CAP_AA_WIDTH_0_5;

   /* Use ALT floating point mode for ARB fragment programs, because they
    * require 0^0 == 1.  Even though _CurrentFragmentProgram is used for
    * rendering, CurrentFragmentProgram is used for this check to
    * differentiate between the GLSL and non-GLSL cases.
    */
   if (ctx->Shader.CurrentFragmentProgram == NULL)
      dw2 |= GEN6_WM_FLOATING_POINT_MODE_ALT;

   /* CACHE_NEW_SAMPLER */
   dw2 |= (ALIGN(brw->sampler.count, 4) / 4) << GEN6_WM_SAMPLER_COUNT_SHIFT;
   dw4 |= (brw->wm.prog_data->first_curbe_grf <<
	   GEN6_WM_DISPATCH_START_GRF_SHIFT_0);
   dw4 |= (brw->wm.prog_data->first_curbe_grf_16 <<
	   GEN6_WM_DISPATCH_START_GRF_SHIFT_2);

   dw5 |= (brw->max_wm_threads - 1) << GEN6_WM_MAX_THREADS_SHIFT;

   /* CACHE_NEW_WM_PROG */
   dw5 |= GEN6_WM_8_DISPATCH_ENABLE;
   if (brw->wm.prog_data->prog_offset_16)
      dw5 |= GEN6_WM_16_DISPATCH_ENABLE;

   /* CACHE_NEW_WM_PROG | _NEW_COLOR */
   if (brw->wm.prog_data->dual_src_blend &&
       (ctx->Color.BlendEnabled & 1) &&
       ctx->Color.Blend[0]._UsesDualSrc) {
      dw5 |= GEN6_WM_DUAL_SOURCE_BLEND_ENABLE;
   }

   /* _NEW_LINE */
   if (ctx->Line.StippleFlag)
      dw5 |= GEN6_WM_LINE_STIPPLE_ENABLE;

   /* _NEW_POLYGON */
   if (ctx->Polygon.StippleFlag)
      dw5 |= GEN6_WM_POLYGON_STIPPLE_ENABLE;

   /* BRW_NEW_FRAGMENT_PROGRAM */
   if (fp->program.Base.InputsRead & VARYING_BIT_POS)
      dw5 |= GEN6_WM_USES_SOURCE_DEPTH | GEN6_WM_USES_SOURCE_W;
   if (fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
      dw5 |= GEN6_WM_COMPUTED_DEPTH;
   /* CACHE_NEW_WM_PROG */
   dw6 |= brw->wm.prog_data->barycentric_interp_modes <<
      GEN6_WM_BARYCENTRIC_INTERPOLATION_MODE_SHIFT;

   /* _NEW_COLOR, _NEW_MULTISAMPLE */
   if (fp->program.UsesKill || ctx->Color.AlphaEnabled ||
       ctx->Multisample.SampleAlphaToCoverage)
      dw5 |= GEN6_WM_KILL_ENABLE;

   if (brw_color_buffer_write_enabled(brw) ||
       dw5 & (GEN6_WM_KILL_ENABLE | GEN6_WM_COMPUTED_DEPTH)) {
      dw5 |= GEN6_WM_DISPATCH_ENABLE;
   }

   dw6 |= _mesa_bitcount_64(brw->fragment_program->Base.InputsRead) <<
      GEN6_WM_NUM_SF_OUTPUTS_SHIFT;
   if (multisampled_fbo) {
      /* _NEW_MULTISAMPLE */
      if (ctx->Multisample.Enabled)
         dw6 |= GEN6_WM_MSRAST_ON_PATTERN;
      else
         dw6 |= GEN6_WM_MSRAST_OFF_PIXEL;
      dw6 |= GEN6_WM_MSDISPMODE_PERPIXEL;
   } else {
      dw6 |= GEN6_WM_MSRAST_OFF_PIXEL;
      dw6 |= GEN6_WM_MSDISPMODE_PERSAMPLE;
   }

   BEGIN_BATCH(9);
   OUT_BATCH(_3DSTATE_WM << 16 | (9 - 2));
   OUT_BATCH(brw->wm.prog_offset);
   OUT_BATCH(dw2);
   if (brw->wm.prog_data->total_scratch) {
      OUT_RELOC(brw->wm.scratch_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
		ffs(brw->wm.prog_data->total_scratch) - 11);
   } else {
      OUT_BATCH(0);
   }
   OUT_BATCH(dw4);
   OUT_BATCH(dw5);
   OUT_BATCH(dw6);
   OUT_BATCH(0); /* kernel 1 pointer */
   /* kernel 2 pointer */
   OUT_BATCH(brw->wm.prog_offset + brw->wm.prog_data->prog_offset_16);
   ADVANCE_BATCH();
}
Exemplo n.º 13
0
int _papi_hwd_merge(EventSetInfo_t *ESI, EventSetInfo_t *zero)
{ 
  int i, retval;
  hwd_control_state_t *this_state = &ESI->machdep;
  hwd_control_state_t *current_state = &zero->machdep;
  
  /* If we ARE NOT nested, 
     just copy the global counter structure to the current eventset */

  if (current_state->allocated_registers.selector == 0x0)
    {
      current_state->allocated_registers.selector = this_state->allocated_registers.selector;
      memcpy(&current_state->counter_cmd,&this_state->counter_cmd,sizeof(struct pmc_control));

      /* Stop the current context 

      retval = perf(PERF_RESET_COUNTERS, 0, 0);
      if (retval) 
	return(PAPI_ESYS);  */
      
      /* (Re)start the counters */
      
#ifdef DEBUG
      dump_cmd(&current_state->counter_cmd);
#endif

      if (pmc_control(current_state->self, &current_state->counter_cmd) < 0)
		return(PAPI_ESYS);
      
      return(PAPI_OK);
    }

  /* If we ARE nested, 
     carefully merge the global counter structure with the current eventset */
  else
    {
      int tmp, hwcntrs_in_both, hwcntrs_in_all, hwcntr;

      /* Stop the current context 

      retval = perf(PERF_STOP, 0, 0);
      if (retval) 
	return(PAPI_ESYS); */
  
      /* Update the global values */

      retval = update_global_hwcounters(zero);
      if (retval)
	return(retval);

      /* Delete the current context */

      hwcntrs_in_both = this_state->allocated_registers.selector & current_state->allocated_registers.selector;
      hwcntrs_in_all  = this_state->allocated_registers.selector | current_state->allocated_registers.selector;

      /* Check for events that are shared between eventsets and 
	 therefore require no modification to the control state. */

      /* First time through, error check */

      tmp = hwcntrs_in_all;
      while ((i = ffs(tmp)))
	{
	  hwcntr = 1 << (i-1);
	  tmp = tmp ^ hwcntr;
	  if (hwcntr & hwcntrs_in_both)
	    {
	      if (!(counter_event_shared(&this_state->counter_cmd, &current_state->counter_cmd, i-1)))
		return(PAPI_ECNFLCT);
	    }
	  else if (!(counter_event_compat(&this_state->counter_cmd, &current_state->counter_cmd, i-1)))
	    return(PAPI_ECNFLCT);
	}

      /* Now everything is good, so actually do the merge */

      tmp = hwcntrs_in_all;
      while ((i = ffs(tmp)))
	{
	  hwcntr = 1 << (i-1);
	  tmp = tmp ^ hwcntr;
	  if (hwcntr & hwcntrs_in_both)
	    {
	      ESI->hw_start[i-1] = zero->hw_start[i-1];
	      zero->multistart.SharedDepth[i-1]++; 
	    }
	  else if (hwcntr & this_state->allocated_registers.selector)
	    {
	      current_state->allocated_registers.selector |= hwcntr;
	      counter_event_copy(&this_state->counter_cmd, &current_state->counter_cmd, i-1);
	      ESI->hw_start[i-1] = 0;
	      zero->hw_start[i-1] = 0;
	    }
	}
    }

  /* Set up the new merged control structure */
  
#ifdef DEBUG
  dump_cmd(&current_state->counter_cmd);
#endif
      
  /* Stop the current context 

  retval = perf(PERF_RESET_COUNTERS, 0, 0);
  if (retval) 
    return(PAPI_ESYS); */

  /* (Re)start the counters */
  
  if (pmc_control(current_state->self, &current_state->counter_cmd) < 0)
    return(PAPI_ESYS);

  return(PAPI_OK);
} 
Exemplo n.º 14
0
Arquivo: otto.c Projeto: PSR-hunt/hunt
/**
 * Allows the player to wander in the waze.
 * [PSR]
 */
STATIC void wander() {
	int i, j, rel_dir, dir_mask, dir_count;

	for (i = 0; i < NUMDIRECTIONS; i++) {
		if (!(flbr[i].flags & BEEN) || flbr[i].distance <= 1) {
			break;
		}
	}
	if (i == NUMDIRECTIONS) {
		memset(been_there, 0, sizeof been_there);
	}
	dir_mask = dir_count = 0;
	for (i = 0; i < NUMDIRECTIONS; i++) {
		j = (RIGHT + i) % NUMDIRECTIONS;
		if (flbr[j].distance <= 1 || flbr[j].flags & DEADEND) {
			continue;
		}
		if (!(flbr[j].flags & BEEN_SAME)) {
			dir_mask = 1 << j;
			dir_count = 1;
			break;
		}
		if (j == FRONT
				&& num_turns > 4 + (random() %
						((flbr[FRONT].flags & BEEN) ? 7 : HEIGHT))) {
			continue;
		}
		dir_mask |= 1 << j;
# ifdef notdef
		dir_count++;
# else
		dir_count = 1;
		break;
# endif
	}
	if (dir_count == 0) {
		duck(random() % NUMDIRECTIONS);
		num_turns = 0;
		return;
	} else if (dir_count == 1)
	rel_dir = ffs(dir_mask) - 1;
	else {
		rel_dir = ffs(dir_mask) - 1;
		dir_mask &= ~(1 << rel_dir);
		while (dir_mask != 0) {
			i = ffs(dir_mask) - 1;
			if (random() % 5 == 0) {
				rel_dir = i;
			}
			dir_mask &= ~(1 << i);
		}
	}
	if (rel_dir == FRONT) {
		num_turns++;
	}
	else {
		num_turns = 0;
	}

# ifdef DEBUG
	fprintf(debug, " w(%c)", RELCHARS[rel_dir]);
# endif
	face_and_move_direction(rel_dir, 1);
}
Exemplo n.º 15
0
int gsc_try_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr)
{
	struct gsc_frame *f;
	struct gsc_dev *gsc = ctx->gsc_dev;
	struct gsc_variant *variant = gsc->variant;
	u32 mod_x = 0, mod_y = 0, tmp_w, tmp_h;
	u32 min_w, min_h, max_w, max_h;

	if (cr->c.top < 0 || cr->c.left < 0) {
		gsc_err("doesn't support negative values for top & left\n");
		return -EINVAL;
	}
	gsc_dbg("user put w: %d, h: %d", cr->c.width, cr->c.height);

	if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
		f = &ctx->d_frame;
	else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
		f = &ctx->s_frame;
	else
		return -EINVAL;

	max_w = f->f_width;
	max_h = f->f_height;
	tmp_w = cr->c.width;
	tmp_h = cr->c.height;

	if (V4L2_TYPE_IS_OUTPUT(cr->type)) {
		if ((is_yuv422(f->fmt->pixelformat) && f->fmt->nr_comp == 1) ||
		    is_rgb(f->fmt->pixelformat))
			min_w = 32;
		else
			min_w = 64;
		if ((is_yuv422(f->fmt->pixelformat) && f->fmt->nr_comp == 3) ||
		    is_yuv420(f->fmt->pixelformat))
			min_h = 32;
		else
			min_h = 16;
	} else {
		if (is_yuv420(f->fmt->pixelformat) ||
		    is_yuv422(f->fmt->pixelformat))
			mod_x = ffs(variant->pix_align->target_w) - 1;
		if (is_yuv420(f->fmt->pixelformat))
			mod_y = ffs(variant->pix_align->target_h) - 1;
		if (ctx->gsc_ctrls.rotate->val == 90 ||
		    ctx->gsc_ctrls.rotate->val == 270) {
			max_w = f->f_height;
			max_h = f->f_width;
			min_w = variant->pix_min->target_rot_en_w;
			min_h = variant->pix_min->target_rot_en_h;
			tmp_w = cr->c.height;
			tmp_h = cr->c.width;
		} else {
			min_w = variant->pix_min->target_rot_dis_w;
			min_h = variant->pix_min->target_rot_dis_h;
		}
	}
	gsc_dbg("mod_x: %d, mod_y: %d, min_w: %d, min_h = %d,\
		tmp_w : %d, tmp_h : %d",
		mod_x, mod_y, min_w, min_h, tmp_w, tmp_h);

	v4l_bound_align_image(&tmp_w, min_w, max_w, mod_x,
			      &tmp_h, min_h, max_h, mod_y, 0);

	if (!V4L2_TYPE_IS_OUTPUT(cr->type) &&
	    (ctx->gsc_ctrls.rotate->val == 90 ||
	    ctx->gsc_ctrls.rotate->val == 270)) {
		gsc_check_crop_change(tmp_h, tmp_w, &cr->c.width, &cr->c.height);
	} else {
		gsc_check_crop_change(tmp_w, tmp_h, &cr->c.width, &cr->c.height);
	}

	/* adjust left/top if cropping rectangle is out of bounds */
	/* Need to add code to algin left value with 2's multiple */
	if (cr->c.left + tmp_w > max_w)
		cr->c.left = max_w - tmp_w;
	if (cr->c.top + tmp_h > max_h)
		cr->c.top = max_h - tmp_h;

	if (is_yuv420(f->fmt->pixelformat) || is_yuv422(f->fmt->pixelformat))
		if (cr->c.left % 2)
			cr->c.left -= 1;

	gsc_dbg("Aligned l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
	    cr->c.left, cr->c.top, cr->c.width, cr->c.height, max_w, max_h);

	return 0;
}
Exemplo n.º 16
0
int
msdosfs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l, struct msdosfs_args *argp)
{
	struct msdosfsmount *pmp;
	struct buf *bp;
	dev_t dev = devvp->v_rdev;
	union bootsector *bsp;
	struct byte_bpb33 *b33;
	struct byte_bpb50 *b50;
	struct byte_bpb710 *b710;
	uint8_t SecPerClust;
	int	ronly, error, tmp;
	int	bsize;
	uint64_t psize;
	unsigned secsize;

	/* Flush out any old buffers remaining from a previous use. */
	if ((error = vinvalbuf(devvp, V_SAVE, l->l_cred, l, 0, 0)) != 0)
		return (error);

	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;

	bp  = NULL; /* both used in error_exit */
	pmp = NULL;

	error = fstrans_mount(mp);
	if (error)
		goto error_exit;

	error = getdisksize(devvp, &psize, &secsize);
	if (error) {
		if (argp->flags & MSDOSFSMNT_GEMDOSFS)
			goto error_exit;

		/* ok, so it failed.  we most likely don't need the info */
		secsize = DEV_BSIZE;
		psize = 0;
		error = 0;
	}

	if (argp->flags & MSDOSFSMNT_GEMDOSFS) {
		bsize = secsize;
		if (bsize != 512) {
			DPRINTF(("Invalid block bsize %d for GEMDOS\n", bsize));
			error = EINVAL;
			goto error_exit;
		}
	} else
		bsize = 0;

	/*
	 * Read the boot sector of the filesystem, and then check the
	 * boot signature.  If not a dos boot sector then error out.
	 */
	if ((error = bread(devvp, 0, secsize, NOCRED, 0, &bp)) != 0)
		goto error_exit;
	bsp = (union bootsector *)bp->b_data;
	b33 = (struct byte_bpb33 *)bsp->bs33.bsBPB;
	b50 = (struct byte_bpb50 *)bsp->bs50.bsBPB;
	b710 = (struct byte_bpb710 *)bsp->bs710.bsBPB;

	if (!(argp->flags & MSDOSFSMNT_GEMDOSFS)) {
		if (bsp->bs50.bsBootSectSig0 != BOOTSIG0
		    || bsp->bs50.bsBootSectSig1 != BOOTSIG1) {
			DPRINTF(("bootsig0 %d bootsig1 %d\n", 
			    bsp->bs50.bsBootSectSig0,
			    bsp->bs50.bsBootSectSig1));
			error = EINVAL;
			goto error_exit;
		}
	}

	pmp = malloc(sizeof *pmp, M_MSDOSFSMNT, M_WAITOK);
	memset(pmp, 0, sizeof *pmp);
	pmp->pm_mountp = mp;

	/*
	 * Compute several useful quantities from the bpb in the
	 * bootsector.  Copy in the dos 5 variant of the bpb then fix up
	 * the fields that are different between dos 5 and dos 3.3.
	 */
	SecPerClust = b50->bpbSecPerClust;
	pmp->pm_BytesPerSec = getushort(b50->bpbBytesPerSec);
	pmp->pm_ResSectors = getushort(b50->bpbResSectors);
	pmp->pm_FATs = b50->bpbFATs;
	pmp->pm_RootDirEnts = getushort(b50->bpbRootDirEnts);
	pmp->pm_Sectors = getushort(b50->bpbSectors);
	pmp->pm_FATsecs = getushort(b50->bpbFATsecs);
	pmp->pm_SecPerTrack = getushort(b50->bpbSecPerTrack);
	pmp->pm_Heads = getushort(b50->bpbHeads);
	pmp->pm_Media = b50->bpbMedia;

	if (!(argp->flags & MSDOSFSMNT_GEMDOSFS)) {
		/* XXX - We should probably check more values here */
    		if (!pmp->pm_BytesPerSec || !SecPerClust
	    		|| pmp->pm_SecPerTrack > 63) {
			DPRINTF(("bytespersec %d secperclust %d "
			    "secpertrack %d\n", 
			    pmp->pm_BytesPerSec, SecPerClust,
			    pmp->pm_SecPerTrack));
			error = EINVAL;
			goto error_exit;
		}
	}

	if (pmp->pm_Sectors == 0) {
		pmp->pm_HiddenSects = getulong(b50->bpbHiddenSecs);
		pmp->pm_HugeSectors = getulong(b50->bpbHugeSectors);
	} else {
		pmp->pm_HiddenSects = getushort(b33->bpbHiddenSecs);
		pmp->pm_HugeSectors = pmp->pm_Sectors;
	}

	if (pmp->pm_RootDirEnts == 0) {
		unsigned short vers = getushort(b710->bpbFSVers);
		/*
		 * Some say that bsBootSectSig[23] must be zero, but
		 * Windows does not require this and some digital cameras
		 * do not set these to zero.  Therefore, do not insist.
		 */
		if (pmp->pm_Sectors || pmp->pm_FATsecs || vers) {
			DPRINTF(("sectors %d fatsecs %lu vers %d\n",
			    pmp->pm_Sectors, pmp->pm_FATsecs, vers));
			error = EINVAL;
			goto error_exit;
		}
		pmp->pm_fatmask = FAT32_MASK;
		pmp->pm_fatmult = 4;
		pmp->pm_fatdiv = 1;
		pmp->pm_FATsecs = getulong(b710->bpbBigFATsecs);

		/* mirrorring is enabled if the FATMIRROR bit is not set */
		if ((getushort(b710->bpbExtFlags) & FATMIRROR) == 0)
			pmp->pm_flags |= MSDOSFS_FATMIRROR;
		else
			pmp->pm_curfat = getushort(b710->bpbExtFlags) & FATNUM;
	} else
		pmp->pm_flags |= MSDOSFS_FATMIRROR;

	if (argp->flags & MSDOSFSMNT_GEMDOSFS) {
		if (FAT32(pmp)) {
			DPRINTF(("FAT32 for GEMDOS\n"));
			/*
			 * GEMDOS doesn't know FAT32.
			 */
			error = EINVAL;
			goto error_exit;
		}

		/*
		 * Check a few values (could do some more):
		 * - logical sector size: power of 2, >= block size
		 * - sectors per cluster: power of 2, >= 1
		 * - number of sectors:   >= 1, <= size of partition
		 */
		if ( (SecPerClust == 0)
		  || (SecPerClust & (SecPerClust - 1))
		  || (pmp->pm_BytesPerSec < bsize)
		  || (pmp->pm_BytesPerSec & (pmp->pm_BytesPerSec - 1))
		  || (pmp->pm_HugeSectors == 0)
		  || (pmp->pm_HugeSectors * (pmp->pm_BytesPerSec / bsize)
		      > psize)) {
			DPRINTF(("consistency checks for GEMDOS\n"));
			error = EINVAL;
			goto error_exit;
		}
		/*
		 * XXX - Many parts of the msdosfs driver seem to assume that
		 * the number of bytes per logical sector (BytesPerSec) will
		 * always be the same as the number of bytes per disk block
		 * Let's pretend it is.
		 */
		tmp = pmp->pm_BytesPerSec / bsize;
		pmp->pm_BytesPerSec  = bsize;
		pmp->pm_HugeSectors *= tmp;
		pmp->pm_HiddenSects *= tmp;
		pmp->pm_ResSectors  *= tmp;
		pmp->pm_Sectors     *= tmp;
		pmp->pm_FATsecs     *= tmp;
		SecPerClust         *= tmp;
	}

	/* Check that fs has nonzero FAT size */
	if (pmp->pm_FATsecs == 0) {
		DPRINTF(("FATsecs is 0\n"));
		error = EINVAL;
		goto error_exit;
	}

	pmp->pm_fatblk = pmp->pm_ResSectors;
	if (FAT32(pmp)) {
		pmp->pm_rootdirblk = getulong(b710->bpbRootClust);
		pmp->pm_firstcluster = pmp->pm_fatblk
			+ (pmp->pm_FATs * pmp->pm_FATsecs);
		pmp->pm_fsinfo = getushort(b710->bpbFSInfo);
	} else {
		pmp->pm_rootdirblk = pmp->pm_fatblk +
			(pmp->pm_FATs * pmp->pm_FATsecs);
		pmp->pm_rootdirsize = (pmp->pm_RootDirEnts * sizeof(struct direntry)
				       + pmp->pm_BytesPerSec - 1)
			/ pmp->pm_BytesPerSec;/* in sectors */
		pmp->pm_firstcluster = pmp->pm_rootdirblk + pmp->pm_rootdirsize;
	}

	pmp->pm_nmbrofclusters = (pmp->pm_HugeSectors - pmp->pm_firstcluster) /
	    SecPerClust;
	pmp->pm_maxcluster = pmp->pm_nmbrofclusters + 1;
	pmp->pm_fatsize = pmp->pm_FATsecs * pmp->pm_BytesPerSec;

	if (argp->flags & MSDOSFSMNT_GEMDOSFS) {
		if (pmp->pm_nmbrofclusters <= (0xff0 - 2)) {
			pmp->pm_fatmask = FAT12_MASK;
			pmp->pm_fatmult = 3;
			pmp->pm_fatdiv = 2;
		} else {
			pmp->pm_fatmask = FAT16_MASK;
			pmp->pm_fatmult = 2;
			pmp->pm_fatdiv = 1;
		}
	} else if (pmp->pm_fatmask == 0) {
		if (pmp->pm_maxcluster
		    <= ((CLUST_RSRVD - CLUST_FIRST) & FAT12_MASK)) {
			/*
			 * This will usually be a floppy disk. This size makes
			 * sure that one FAT entry will not be split across
			 * multiple blocks.
			 */
			pmp->pm_fatmask = FAT12_MASK;
			pmp->pm_fatmult = 3;
			pmp->pm_fatdiv = 2;
		} else {
			pmp->pm_fatmask = FAT16_MASK;
			pmp->pm_fatmult = 2;
			pmp->pm_fatdiv = 1;
		}
	}
	if (FAT12(pmp))
		pmp->pm_fatblocksize = 3 * pmp->pm_BytesPerSec;
	else
		pmp->pm_fatblocksize = MAXBSIZE;

	pmp->pm_fatblocksec = pmp->pm_fatblocksize / pmp->pm_BytesPerSec;
	pmp->pm_bnshift = ffs(pmp->pm_BytesPerSec) - 1;

	/*
	 * Compute mask and shift value for isolating cluster relative byte
	 * offsets and cluster numbers from a file offset.
	 */
	pmp->pm_bpcluster = SecPerClust * pmp->pm_BytesPerSec;
	pmp->pm_crbomask = pmp->pm_bpcluster - 1;
	pmp->pm_cnshift = ffs(pmp->pm_bpcluster) - 1;

	/*
	 * Check for valid cluster size
	 * must be a power of 2
	 */
	if (pmp->pm_bpcluster ^ (1 << pmp->pm_cnshift)) {
		DPRINTF(("bpcluster %lu cnshift %lu\n", 
		    pmp->pm_bpcluster, pmp->pm_cnshift));
		error = EINVAL;
		goto error_exit;
	}

	/*
	 * Cluster size must be within limit of MAXBSIZE.
	 * Many FAT filesystems will not have clusters larger than
	 * 32KiB due to limits in Windows versions before Vista.
	 */
	if (pmp->pm_bpcluster > MAXBSIZE) {
		DPRINTF(("bpcluster %lu > MAXBSIZE %d\n",
		    pmp->pm_bpcluster, MAXBSIZE));
		error = EINVAL;
		goto error_exit;
	}

	/*
	 * Release the bootsector buffer.
	 */
	brelse(bp, BC_AGE);
	bp = NULL;

	/*
	 * Check FSInfo.
	 */
	if (pmp->pm_fsinfo) {
		struct fsinfo *fp;

		/*
		 * XXX	If the fsinfo block is stored on media with
		 *	2KB or larger sectors, is the fsinfo structure
		 *	padded at the end or in the middle?
		 */
		if ((error = bread(devvp, de_bn2kb(pmp, pmp->pm_fsinfo),
		    pmp->pm_BytesPerSec, NOCRED, 0, &bp)) != 0)
			goto error_exit;
		fp = (struct fsinfo *)bp->b_data;
		if (!memcmp(fp->fsisig1, "RRaA", 4)
		    && !memcmp(fp->fsisig2, "rrAa", 4)
		    && !memcmp(fp->fsisig3, "\0\0\125\252", 4)
		    && !memcmp(fp->fsisig4, "\0\0\125\252", 4))
			pmp->pm_nxtfree = getulong(fp->fsinxtfree);
		else
			pmp->pm_fsinfo = 0;
		brelse(bp, 0);
		bp = NULL;
	}

	/*
	 * Check and validate (or perhaps invalidate?) the fsinfo structure?
	 * XXX
	 */
	if (pmp->pm_fsinfo) {
		if ((pmp->pm_nxtfree == 0xffffffffUL) ||
		    (pmp->pm_nxtfree > pmp->pm_maxcluster))
			pmp->pm_fsinfo = 0;
	}

	/*
	 * Allocate memory for the bitmap of allocated clusters, and then
	 * fill it in.
	 */
	pmp->pm_inusemap = malloc(((pmp->pm_maxcluster + N_INUSEBITS)
				   / N_INUSEBITS)
				  * sizeof(*pmp->pm_inusemap),
				  M_MSDOSFSFAT, M_WAITOK);

	/*
	 * fillinusemap() needs pm_devvp.
	 */
	pmp->pm_dev = dev;
	pmp->pm_devvp = devvp;

	/*
	 * Have the inuse map filled in.
	 */
	if ((error = fillinusemap(pmp)) != 0) {
		DPRINTF(("fillinusemap %d\n", error));
		goto error_exit;
	}

	/*
	 * If they want FAT updates to be synchronous then let them suffer
	 * the performance degradation in exchange for the on disk copy of
	 * the FAT being correct just about all the time.  I suppose this
	 * would be a good thing to turn on if the kernel is still flakey.
	 */
	if (mp->mnt_flag & MNT_SYNCHRONOUS)
		pmp->pm_flags |= MSDOSFSMNT_WAITONFAT;

	/*
	 * Finish up.
	 */
	if (ronly)
		pmp->pm_flags |= MSDOSFSMNT_RONLY;
	else
		pmp->pm_fmod = 1;
	mp->mnt_data = pmp;
	mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
	mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_MSDOS);
	mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
	mp->mnt_stat.f_namemax = MSDOSFS_NAMEMAX(pmp);
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_dev_bshift = pmp->pm_bnshift;
	mp->mnt_fs_bshift = pmp->pm_cnshift;

	/*
	 * If we ever do quotas for DOS filesystems this would be a place
	 * to fill in the info in the msdosfsmount structure. You dolt,
	 * quotas on dos filesystems make no sense because files have no
	 * owners on dos filesystems. of course there is some empty space
	 * in the directory entry where we could put uid's and gid's.
	 */

	spec_node_setmountedfs(devvp, mp);

	return (0);

error_exit:
	fstrans_unmount(mp);
	if (bp)
		brelse(bp, BC_AGE);
	if (pmp) {
		if (pmp->pm_inusemap)
			free(pmp->pm_inusemap, M_MSDOSFSFAT);
		free(pmp, M_MSDOSFSMNT);
		mp->mnt_data = NULL;
	}
	return (error);
}
Exemplo n.º 17
0
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
{
	struct mlx4_en_rx_ring *ring;
	int i;
	int ring_ind;
	int err;
	int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
					DS_SIZE * priv->num_frags);

	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
		ring = &priv->rx_ring[ring_ind];

		ring->prod = 0;
		ring->cons = 0;
		ring->actual_size = 0;
		ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;

		ring->stride = stride;
		if (ring->stride <= TXBB_SIZE)
			ring->buf += TXBB_SIZE;

		ring->log_stride = ffs(ring->stride) - 1;
		ring->buf_size = ring->size * ring->stride;

		memset(ring->buf, 0, ring->buf_size);
		mlx4_en_update_rx_prod_db(ring);

		/* Initailize all descriptors */
		for (i = 0; i < ring->size; i++)
			mlx4_en_init_rx_desc(priv, ring, i);

		/* Initialize page allocators */
		err = mlx4_en_init_allocator(priv, ring);
		if (err) {
			en_err(priv, "Failed initializing ring allocator\n");
			if (ring->stride <= TXBB_SIZE)
				ring->buf -= TXBB_SIZE;
			ring_ind--;
			goto err_allocator;
		}
	}
	err = mlx4_en_fill_rx_buffers(priv);
	if (err)
		goto err_buffers;

	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
		ring = &priv->rx_ring[ring_ind];

		ring->size_mask = ring->actual_size - 1;
		mlx4_en_update_rx_prod_db(ring);
	}

	return 0;

err_buffers:
	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
		mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);

	ring_ind = priv->rx_ring_num - 1;
err_allocator:
	while (ring_ind >= 0) {
		if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
			priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
		mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
		ring_ind--;
	}
	return err;
}
Exemplo n.º 18
0
/*
 * Read information about /boot's inode, then put this and filesystem
 * parameters from the superblock into pbr_symbols.
 */
static int
getbootparams(char *boot, int devfd, struct disklabel *dl)
{
	int		fd;
	struct stat	dsb, fsb;
	struct statfs	fssb;
	struct partition *pp;
	struct fs	*fs;
	char		*buf;
	u_int		blk, *ap;
	struct ufs1_dinode *ip1;
	struct ufs2_dinode *ip2;
	int		ndb;
	int		mib[3];
	size_t		size;
	dev_t		dev;
	int		skew;

	/*
	 * Open 2nd-level boot program and record enough details about
	 * where it is on the filesystem represented by `devfd'
	 * (inode block, offset within that block, and various filesystem
	 * parameters essentially taken from the superblock) for biosboot
	 * to be able to load it later.
	 */

	/* Make sure the (probably new) boot file is on disk. */
	sync(); sleep(1);

	if ((fd = open(boot, O_RDONLY)) < 0)
		err(1, "open: %s", boot);

	if (fstatfs(fd, &fssb) != 0)
		err(1, "statfs: %s", boot);

	if (strncmp(fssb.f_fstypename, "ffs", MFSNAMELEN) &&
	    strncmp(fssb.f_fstypename, "ufs", MFSNAMELEN) )
		errx(1, "%s: not on an FFS filesystem", boot);

#if 0
	if (read(fd, &eh, sizeof(eh)) != sizeof(eh))
		errx(1, "read: %s", boot);

	if (!IS_ELF(eh)) {
		errx(1, "%s: bad magic: 0x%02x%02x%02x%02x",
		    boot,
		    eh.e_ident[EI_MAG0], eh.e_ident[EI_MAG1],
		    eh.e_ident[EI_MAG2], eh.e_ident[EI_MAG3]);
	}
#endif

	if (fsync(fd) != 0)
		err(1, "fsync: %s", boot);

	if (fstat(fd, &fsb) != 0)
		err(1, "fstat: %s", boot);

	if (fstat(devfd, &dsb) != 0)
		err(1, "fstat: %d", devfd);

	/* Check devices. */
	mib[0] = CTL_MACHDEP;
	mib[1] = CPU_CHR2BLK;
	mib[2] = dsb.st_rdev;
	size = sizeof(dev);
	if (sysctl(mib, 3, &dev, &size, NULL, 0) >= 0) {
		if (fsb.st_dev / MAXPARTITIONS != dev / MAXPARTITIONS)
			errx(1, "cross-device install");
	}

	pp = &dl->d_partitions[DISKPART(fsb.st_dev)];
	close(fd);

	sbread(devfd, DL_SECTOBLK(dl, DL_GETPOFFSET(pp)), &fs);

	/* Read inode. */
	if ((buf = malloc(fs->fs_bsize)) == NULL)
		err(1, NULL);

	blk = fsbtodb(fs, ino_to_fsba(fs, fsb.st_ino));

	/*
	 * Have the inode.  Figure out how many filesystem blocks (not disk
	 * sectors) there are for biosboot to load.
	 */
	devread(devfd, buf, DL_SECTOBLK(dl, pp->p_offset) + blk,
	    fs->fs_bsize, "inode");
	if (fs->fs_magic == FS_UFS2_MAGIC) {
		ip2 = (struct ufs2_dinode *)(buf) +
		    ino_to_fsbo(fs, fsb.st_ino);
		ndb = howmany(ip2->di_size, fs->fs_bsize);
		ap = (u_int *)ip2->di_db;
		skew = sizeof(u_int32_t);
	} else {
		ip1 = (struct ufs1_dinode *)(buf) +
		    ino_to_fsbo(fs, fsb.st_ino);
		ndb = howmany(ip1->di_size, fs->fs_bsize);
		ap = (u_int *)ip1->di_db;
		skew = 0;
	}
	if (ndb <= 0)
		errx(1, "No blocks to load");

	/*
	 * Now set the values that will need to go into biosboot
	 * (the partition boot record, a.k.a. the PBR).
	 */
	sym_set_value(pbr_symbols, "_fs_bsize_p", (fs->fs_bsize / 16));
	sym_set_value(pbr_symbols, "_fs_bsize_s", (fs->fs_bsize / 
	    dl->d_secsize));

	/*
	 * fs_fsbtodb is the shift to convert fs_fsize to DEV_BSIZE. The
	 * ino_to_fsba() return value is the number of fs_fsize units.
	 * Calculate the shift to convert fs_fsize into physical sectors,
	 * which are added to p_offset to get the sector address BIOS
	 * will use.
	 *
	 * N.B.: ASSUMES fs_fsize is a power of 2 of d_secsize.
	 */
	sym_set_value(pbr_symbols, "_fsbtodb",
	    ffs(fs->fs_fsize / dl->d_secsize) - 1);

	if (pp->p_offseth != 0)
		errx(1, "partition offset too high");
	sym_set_value(pbr_symbols, "_p_offset", pp->p_offset);
	sym_set_value(pbr_symbols, "_inodeblk",
	    ino_to_fsba(fs, fsb.st_ino));
	sym_set_value(pbr_symbols, "_inodedbl",
	    ((((char *)ap) - buf) + INODEOFF));
	sym_set_value(pbr_symbols, "_nblocks", ndb);
	sym_set_value(pbr_symbols, "_blkskew", skew);

	if (verbose) {
		fprintf(stderr, "%s is %d blocks x %d bytes\n",
		    boot, ndb, fs->fs_bsize);
		fprintf(stderr, "fs block shift %u; part offset %llu; "
		    "inode block %lld, offset %u\n",
		    ffs(fs->fs_fsize / dl->d_secsize) - 1,
		    DL_GETPOFFSET(pp), ino_to_fsba(fs, fsb.st_ino),
		    (unsigned int)((((char *)ap) - buf) + INODEOFF));
		fprintf(stderr, "expecting %d-bit fs blocks (skew %d)\n",
		    skew ? 64 : 32, skew);
	}

	return 0;
}
Exemplo n.º 19
0
static void
upload_gs_state(struct brw_context *brw)
{
    const struct brw_stage_state *stage_state = &brw->gs.base;
    const int max_threads_shift = brw->is_haswell ?
                                  HSW_GS_MAX_THREADS_SHIFT : GEN6_GS_MAX_THREADS_SHIFT;
    /* BRW_NEW_GEOMETRY_PROGRAM */
    bool active = brw->geometry_program;
    /* CACHE_NEW_GS_PROG */
    const struct brw_vec4_prog_data *prog_data = &brw->gs.prog_data->base;

    /* BRW_NEW_GS_BINDING_TABLE */
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS_GS << 16 | (2 - 2));
    OUT_BATCH(stage_state->bind_bo_offset);
    ADVANCE_BATCH();

    /* CACHE_NEW_SAMPLER */
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_SAMPLER_STATE_POINTERS_GS << 16 | (2 - 2));
    OUT_BATCH(stage_state->sampler_offset);
    ADVANCE_BATCH();

    gen7_upload_constant_state(brw, stage_state, active, _3DSTATE_CONSTANT_GS);

    if (active) {
        BEGIN_BATCH(7);
        OUT_BATCH(_3DSTATE_GS << 16 | (7 - 2));
        OUT_BATCH(stage_state->prog_offset);
        OUT_BATCH(((ALIGN(stage_state->sampler_count, 4)/4) <<
                   GEN6_GS_SAMPLER_COUNT_SHIFT));

        if (brw->gs.prog_data->base.total_scratch) {
            OUT_RELOC(stage_state->scratch_bo,
                      I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
                      ffs(brw->gs.prog_data->base.total_scratch) - 11);
        } else {
            OUT_BATCH(0);
        }

        uint32_t dw4 =
            ((brw->gs.prog_data->output_vertex_size_hwords * 2 - 1) <<
             GEN7_GS_OUTPUT_VERTEX_SIZE_SHIFT) |
            (brw->gs.prog_data->output_topology <<
             GEN7_GS_OUTPUT_TOPOLOGY_SHIFT) |
            (prog_data->urb_read_length <<
             GEN6_GS_URB_READ_LENGTH_SHIFT) |
            (0 << GEN6_GS_URB_ENTRY_READ_OFFSET_SHIFT) |
            (prog_data->dispatch_grf_start_reg <<
             GEN6_GS_DISPATCH_START_GRF_SHIFT);
        uint32_t dw5 =
            ((brw->max_gs_threads - 1) << max_threads_shift) |
            (brw->gs.prog_data->control_data_header_size_hwords <<
             GEN7_GS_CONTROL_DATA_HEADER_SIZE_SHIFT) |
            GEN7_GS_DISPATCH_MODE_DUAL_OBJECT |
            GEN6_GS_STATISTICS_ENABLE |
            (brw->gs.prog_data->include_primitive_id ?
             GEN7_GS_INCLUDE_PRIMITIVE_ID : 0) |
            GEN7_GS_ENABLE;
        uint32_t dw6 = 0;

        if (brw->is_haswell) {
            dw6 |= brw->gs.prog_data->control_data_format <<
                   HSW_GS_CONTROL_DATA_FORMAT_SHIFT;
        } else {
            dw5 |= brw->gs.prog_data->control_data_format <<
                   IVB_GS_CONTROL_DATA_FORMAT_SHIFT;
        }

        OUT_BATCH(dw4);
        OUT_BATCH(dw5);
        OUT_BATCH(dw6);
        ADVANCE_BATCH();
    } else {
        BEGIN_BATCH(7);
        OUT_BATCH(_3DSTATE_GS << 16 | (7 - 2));
        OUT_BATCH(0); /* prog_bo */
        OUT_BATCH((0 << GEN6_GS_SAMPLER_COUNT_SHIFT) |
                  (0 << GEN6_GS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
        OUT_BATCH(0); /* scratch space base offset */
        OUT_BATCH((1 << GEN6_GS_DISPATCH_START_GRF_SHIFT) |
                  (0 << GEN6_GS_URB_READ_LENGTH_SHIFT) |
                  GEN7_GS_INCLUDE_VERTEX_HANDLES |
                  (0 << GEN6_GS_URB_ENTRY_READ_OFFSET_SHIFT));
        OUT_BATCH((0 << GEN6_GS_MAX_THREADS_SHIFT) |
                  GEN6_GS_STATISTICS_ENABLE);
        OUT_BATCH(0);
        ADVANCE_BATCH();
    }
}
Exemplo n.º 20
0
static int mwl_mac80211_add_interface(struct ieee80211_hw *hw,
				      struct ieee80211_vif *vif)
{
	struct mwl_priv *priv = hw->priv;
	struct mwl_vif *mwl_vif;
	u32 macids_supported;
	int macid;

	switch (vif->type) {
	case NL80211_IFTYPE_AP:
	case NL80211_IFTYPE_MESH_POINT:
		macids_supported = priv->ap_macids_supported;
		break;
	case NL80211_IFTYPE_STATION:
		macids_supported = priv->sta_macids_supported;
		break;
	default:
		return -EINVAL;
	}

	macid = ffs(macids_supported & ~priv->macids_used);

	if (!macid) {
		wiphy_warn(hw->wiphy, "no macid can be allocated\n");
		return -EBUSY;
	}
	macid--;

	/* Setup driver private area. */
	mwl_vif = mwl_dev_get_vif(vif);
	memset(mwl_vif, 0, sizeof(*mwl_vif));
	mwl_vif->macid = macid;
	mwl_vif->seqno = 0;
	mwl_vif->is_hw_crypto_enabled = false;
	mwl_vif->beacon_info.valid = false;
	mwl_vif->iv16 = 1;
	mwl_vif->iv32 = 0;
	mwl_vif->keyidx = 0;

	switch (vif->type) {
	case NL80211_IFTYPE_AP:
		ether_addr_copy(mwl_vif->bssid, vif->addr);
		mwl_fwcmd_set_new_stn_add_self(hw, vif);
		break;
	case NL80211_IFTYPE_MESH_POINT:
		ether_addr_copy(mwl_vif->bssid, vif->addr);
		mwl_fwcmd_set_new_stn_add_self(hw, vif);
		break;
	case NL80211_IFTYPE_STATION:
		ether_addr_copy(mwl_vif->sta_mac, vif->addr);
		mwl_fwcmd_bss_start(hw, vif, true);
		mwl_fwcmd_set_infra_mode(hw, vif);
		mwl_fwcmd_set_mac_addr_client(hw, vif, vif->addr);
		break;
	default:
		return -EINVAL;
	}

	priv->macids_used |= 1 << mwl_vif->macid;
	spin_lock_bh(&priv->vif_lock);
	list_add_tail(&mwl_vif->list, &priv->vif_list);
	spin_unlock_bh(&priv->vif_lock);

	return 0;
}
Exemplo n.º 21
0
static void ipt_ulog_packet(struct net *net,
			    unsigned int hooknum,
			    const struct sk_buff *skb,
			    const struct net_device *in,
			    const struct net_device *out,
			    const struct ipt_ulog_info *loginfo,
			    const char *prefix)
{
	ulog_buff_t *ub;
	ulog_packet_msg_t *pm;
	size_t size, copy_len;
	struct nlmsghdr *nlh;
	struct timeval tv;
	struct ulog_net *ulog = ulog_pernet(net);

	/* ffs == find first bit set, necessary because userspace
	 * is already shifting groupnumber, but we need unshifted.
	 * ffs() returns [1..32], we need [0..31] */
	unsigned int groupnum = ffs(loginfo->nl_group) - 1;

	/* calculate the size of the skb needed */
	if (loginfo->copy_range == 0 || loginfo->copy_range > skb->len)
		copy_len = skb->len;
	else
		copy_len = loginfo->copy_range;

	size = nlmsg_total_size(sizeof(*pm) + copy_len);

	ub = &ulog->ulog_buffers[groupnum];

	spin_lock_bh(&ulog->lock);

	if (!ub->skb) {
		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	} else if (ub->qlen >= loginfo->qthreshold ||
		   size > skb_tailroom(ub->skb)) {
		/* either the queue len is too high or we don't have
		 * enough room in nlskb left. send it to userspace. */

		ulog_send(ulog, groupnum);

		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	}

	pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold);

	nlh = nlmsg_put(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
			sizeof(*pm)+copy_len, 0);
	if (!nlh) {
		pr_debug("error during nlmsg_put\n");
		goto out_unlock;
	}
	ub->qlen++;

	pm = nlmsg_data(nlh);
	memset(pm, 0, sizeof(*pm));

	/* We might not have a timestamp, get one */
	if (skb->tstamp.tv64 == 0)
		__net_timestamp((struct sk_buff *)skb);

	/* copy hook, prefix, timestamp, payload, etc. */
	pm->data_len = copy_len;
	tv = ktime_to_timeval(skb->tstamp);
	put_unaligned(tv.tv_sec, &pm->timestamp_sec);
	put_unaligned(tv.tv_usec, &pm->timestamp_usec);
	put_unaligned(skb->mark, &pm->mark);
	pm->hook = hooknum;
	if (prefix != NULL) {
		strncpy(pm->prefix, prefix, sizeof(pm->prefix) - 1);
		pm->prefix[sizeof(pm->prefix) - 1] = '\0';
	}
	else if (loginfo->prefix[0] != '\0')
		strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));

	if (in && in->hard_header_len > 0 &&
	    skb->mac_header != skb->network_header &&
	    in->hard_header_len <= ULOG_MAC_LEN) {
		memcpy(pm->mac, skb_mac_header(skb), in->hard_header_len);
		pm->mac_len = in->hard_header_len;
	} else
		pm->mac_len = 0;

	if (in)
		strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));

	if (out)
		strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));

	/* copy_len <= skb->len, so can't fail. */
	if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
		BUG();

	/* check if we are building multi-part messages */
	if (ub->qlen > 1)
		ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;

	ub->lastnlh = nlh;

	/* if timer isn't already running, start it */
	if (!timer_pending(&ub->timer)) {
		ub->timer.expires = jiffies + flushtimeout * HZ / 100;
		add_timer(&ub->timer);
	}

	/* if threshold is reached, send message to userspace */
	if (ub->qlen >= loginfo->qthreshold) {
		if (loginfo->qthreshold > 1)
			nlh->nlmsg_type = NLMSG_DONE;
		ulog_send(ulog, groupnum);
	}
out_unlock:
	spin_unlock_bh(&ulog->lock);

	return;

alloc_failure:
	pr_debug("Error building netlink message\n");
	spin_unlock_bh(&ulog->lock);
}
Exemplo n.º 22
0
int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
{
	struct nand_chip *nand_chip = (struct nand_chip *)&b47n->nand_chip;
	int err;
	u32 freq;
	u16 clock;
	u8 w0, w1, w2, w3, w4;

	unsigned long chipsize; /* MiB */
	u8 tbits, col_bits, col_size, row_bits, row_bsize;
	u32 val;

	nand_chip->legacy.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
	nand_chip->legacy.cmd_ctrl = bcm47xxnflash_ops_bcm4706_cmd_ctrl;
	nand_chip->legacy.dev_ready = bcm47xxnflash_ops_bcm4706_dev_ready;
	b47n->nand_chip.legacy.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
	b47n->nand_chip.legacy.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
	b47n->nand_chip.legacy.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
	b47n->nand_chip.legacy.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
	b47n->nand_chip.legacy.set_features = nand_get_set_features_notsupp;
	b47n->nand_chip.legacy.get_features = nand_get_set_features_notsupp;

	nand_chip->legacy.chip_delay = 50;
	b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
	b47n->nand_chip.ecc.mode = NAND_ECC_NONE; /* TODO: implement ECC */

	/* Enable NAND flash access */
	bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
		      BCMA_CC_4706_FLASHSCFG_NF1);

	/* Configure wait counters */
	if (b47n->cc->status & BCMA_CC_CHIPST_4706_PKG_OPTION) {
		/* 400 MHz */
		freq = 400000000 / 4;
	} else {
		freq = bcma_chipco_pll_read(b47n->cc, 4);
		freq = (freq & 0xFFF) >> 3;
		/* Fixed reference clock 25 MHz and m = 2 */
		freq = (freq * 25000000 / 2) / 4;
	}
	clock = freq / 1000000;
	w0 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(15, clock);
	w1 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(20, clock);
	w2 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
	w3 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
	w4 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(100, clock);
	bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_WAITCNT0,
			(w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0));

	/* Scan NAND */
	err = nand_scan(&b47n->nand_chip, 1);
	if (err) {
		pr_err("Could not scan NAND flash: %d\n", err);
		goto exit;
	}

	/* Configure FLASH */
	chipsize = b47n->nand_chip.chipsize >> 20;
	tbits = ffs(chipsize); /* find first bit set */
	if (!tbits || tbits != fls(chipsize)) {
		pr_err("Invalid flash size: 0x%lX\n", chipsize);
		err = -ENOTSUPP;
		goto exit;
	}
	tbits += 19; /* Broadcom increases *index* by 20, we increase *pos* */

	col_bits = b47n->nand_chip.page_shift + 1;
	col_size = (col_bits + 7) / 8;

	row_bits = tbits - col_bits + 1;
	row_bsize = (row_bits + 7) / 8;

	val = ((row_bsize - 1) << 6) | ((col_size - 1) << 4) | 2;
	bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_CONF, val);

exit:
	if (err)
		bcma_cc_mask32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
			       ~BCMA_CC_4706_FLASHSCFG_NF1);
	return err;
}
/*
 * Common code for mount and mountroot
 */
int
ext2fs_mountfs(struct vnode *devvp, struct mount *mp)
{
	struct lwp *l = curlwp;
	struct ufsmount *ump;
	struct buf *bp;
	struct ext2fs *fs;
	struct m_ext2fs *m_fs;
	dev_t dev;
	int error, i, ronly;
	kauth_cred_t cred;

	dev = devvp->v_rdev;
	cred = l->l_cred;

	/* Flush out any old buffers remaining from a previous use. */
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
	error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
	VOP_UNLOCK(devvp);
	if (error)
		return (error);

	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;

	bp = NULL;
	ump = NULL;

	/* Read the superblock from disk, and swap it directly. */
	error = bread(devvp, SBLOCK, SBSIZE, 0, &bp);
	if (error)
		goto out;
	fs = (struct ext2fs *)bp->b_data;
	m_fs = kmem_zalloc(sizeof(struct m_ext2fs), KM_SLEEP);
	e2fs_sbload(fs, &m_fs->e2fs);

	brelse(bp, 0);
	bp = NULL;

	/* Once swapped, validate and fill in the superblock. */
	error = ext2fs_sbfill(m_fs, ronly);
	if (error) {
		kmem_free(m_fs, sizeof(struct m_ext2fs));
		goto out;
	}
	m_fs->e2fs_ronly = ronly;

	ump = kmem_zalloc(sizeof(*ump), KM_SLEEP);
	ump->um_fstype = UFS1;
	ump->um_ops = &ext2fs_ufsops;
	ump->um_e2fs = m_fs;

	if (ronly == 0) {
		if (m_fs->e2fs.e2fs_state == E2FS_ISCLEAN)
			m_fs->e2fs.e2fs_state = 0;
		else
			m_fs->e2fs.e2fs_state = E2FS_ERRORS;
		m_fs->e2fs_fmod = 1;
	}

	/* XXX: should be added in ext2fs_sbfill()? */
	m_fs->e2fs_gd = kmem_alloc(m_fs->e2fs_ngdb * m_fs->e2fs_bsize, KM_SLEEP);
	for (i = 0; i < m_fs->e2fs_ngdb; i++) {
		error = bread(devvp,
		    EXT2_FSBTODB(m_fs, m_fs->e2fs.e2fs_first_dblock +
		    1 /* superblock */ + i),
		    m_fs->e2fs_bsize, 0, &bp);
		if (error) {
			kmem_free(m_fs->e2fs_gd,
			    m_fs->e2fs_ngdb * m_fs->e2fs_bsize);
			goto out;
		}
		e2fs_cgload((struct ext2_gd *)bp->b_data,
		    &m_fs->e2fs_gd[
			i * m_fs->e2fs_bsize / sizeof(struct ext2_gd)],
		    m_fs->e2fs_bsize);
		brelse(bp, 0);
		bp = NULL;
	}

	mp->mnt_data = ump;
	mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
	mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_EXT2FS);
	mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
	mp->mnt_stat.f_namemax = EXT2FS_MAXNAMLEN;
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_dev_bshift = DEV_BSHIFT;	/* XXX */
	mp->mnt_fs_bshift = m_fs->e2fs_bshift;
	mp->mnt_iflag |= IMNT_DTYPE;
	ump->um_flags = 0;
	ump->um_mountp = mp;
	ump->um_dev = dev;
	ump->um_devvp = devvp;
	ump->um_nindir = EXT2_NINDIR(m_fs);
	ump->um_lognindir = ffs(EXT2_NINDIR(m_fs)) - 1;
	ump->um_bptrtodb = m_fs->e2fs_fsbtodb;
	ump->um_seqinc = 1; /* no frags */
	ump->um_maxsymlinklen = EXT2_MAXSYMLINKLEN;
	ump->um_dirblksiz = m_fs->e2fs_bsize;
	ump->um_maxfilesize = ((uint64_t)0x80000000 * m_fs->e2fs_bsize - 1);
	spec_node_setmountedfs(devvp, mp);
	return (0);

out:
	if (bp != NULL)
		brelse(bp, 0);
	if (ump) {
		kmem_free(ump->um_e2fs, sizeof(struct m_ext2fs));
		kmem_free(ump, sizeof(*ump));
		mp->mnt_data = NULL;
	}
	return (error);
}
Exemplo n.º 24
0
static void intc_irq_dispatch(void)
{
#ifdef IRQ_TIME_MONITOR_DEBUG
	unsigned long long time_num[2];
	unsigned long long time_sub;
	unsigned int	irq_con;
#endif
	unsigned long ipr[2],gpr[2];
	unsigned long ipr_intc;
#ifdef CONFIG_SMP
	unsigned long cpuid = smp_processor_id();
	unsigned long nextcpu;
#endif
	ipr_intc = readl(intc_base + IPR_OFF);
#ifdef CONFIG_SMP

	ipr[0] = ipr_intc & cpu_irq_unmask[cpuid];
#else
	ipr[0] = ipr_intc;
#endif
	gpr[0] = ipr[0] & 0x3f000;
	ipr[0] &= ~0x3f000;

	ipr[1] = readl(intc_base + PART_OFF + IPR_OFF);
	gpr[1] = ipr[1] & 0x9f0f0004;
	ipr[1] &= ~0x9f0f0004;

#ifdef IRQ_TIME_MONITOR_DEBUG
	time_num[0] = sched_clock();
	if(gpr[1] & 0x1f000000) {
		generic_handle_irq(ffs(gpr[1]) +31 +IRQ_INTC_BASE);
		irq_con = ffs(gpr[1]) +31 +IRQ_INTC_BASE;
	}else {

		if (ipr[0]) {
			generic_handle_irq(ffs(ipr[0]) -1 +IRQ_INTC_BASE);
			irq_con = ffs(ipr[0]) -1 +IRQ_INTC_BASE;
		}
		if (gpr[0]) {
			generic_handle_irq(ffs(gpr[0]) -1 +IRQ_INTC_BASE);
			irq_con = ffs(gpr[0]) -1 +IRQ_INTC_BASE;
		}

		if (ipr[1]) {
			generic_handle_irq(ffs(ipr[1]) +31 +IRQ_INTC_BASE);
			irq_con = ffs(ipr[1]) +31 +IRQ_INTC_BASE;
		}
		if (gpr[1]) {
			generic_handle_irq(ffs(gpr[1]) +31 +IRQ_INTC_BASE);
			irq_con = ffs(gpr[1]) +31 +IRQ_INTC_BASE;
		}
	}
	time_num[1] = sched_clock();
	time_sub = time_num[1] - time_num[0];
	if (echo_success && (time_sub > time_monitor[499]))
		time_over[irq_con] += 1;
	if (time_sub > time_monitor[irq_con])
		time_monitor[irq_con] = time_sub;
#else

	if (ipr[0]) {
		do_IRQ(ffs(ipr[0]) -1 +IRQ_INTC_BASE);
	}
	if (gpr[0]) {
		generic_handle_irq(ffs(gpr[0]) -1 +IRQ_INTC_BASE);
	}

	if (ipr[1]) {
		do_IRQ(ffs(ipr[1]) +31 +IRQ_INTC_BASE);
	}
	if (gpr[1]) {
		generic_handle_irq(ffs(gpr[1]) +31 +IRQ_INTC_BASE);
	}
#endif


#ifdef CONFIG_SMP
	nextcpu = switch_cpu_irq(cpuid);
	if(nextcpu & 0x80000000) {
		nextcpu &= ~0x80000000;
		ipr_intc = ipr_intc & cpu_irq_affinity[nextcpu];
		if(ipr_intc) {
			cpu_mask_affinity[nextcpu] |= ipr_intc;
			writel(ipr_intc, intc_base + IMSR_OFF);
		}
	}else if(nextcpu) {
		if(cpu_mask_affinity[nextcpu]) {
			writel(cpu_mask_affinity[nextcpu], intc_base + IMCR_OFF);
			cpu_mask_affinity[nextcpu] = 0;
		}
	}
#endif
}
Exemplo n.º 25
0
void
dwarf2_finish (void)
{
  segT line_seg;
  struct line_seg *s;
  segT info_seg;
  int emit_other_sections = 0;

  info_seg = bfd_get_section_by_name (stdoutput, ".debug_info");
  emit_other_sections = info_seg == NULL || !seg_not_empty_p (info_seg);

  if (!all_segs && emit_other_sections)
    /* There is no line information and no non-empty .debug_info
       section.  */
    return;

  /* Calculate the size of an address for the target machine.  */
  sizeof_address = DWARF2_ADDR_SIZE (stdoutput);

  /* Create and switch to the line number section.  */
  line_seg = subseg_new (".debug_line", 0);
  bfd_set_section_flags (stdoutput, line_seg, SEC_READONLY | SEC_DEBUGGING);

  /* For each subsection, chain the debug entries together.  */
  for (s = all_segs; s; s = s->next)
    {
      struct line_subseg *ss = s->head;
      struct line_entry **ptail = ss->ptail;

      while ((ss = ss->next) != NULL)
	{
	  *ptail = ss->head;
	  ptail = ss->ptail;
	}
    }

  out_debug_line (line_seg);

  /* If this is assembler generated line info, and there is no
     debug_info already, we need .debug_info and .debug_abbrev
     sections as well.  */
  if (emit_other_sections)
    {
      segT abbrev_seg;
      segT aranges_seg;

      assert (all_segs);
      
      info_seg = subseg_new (".debug_info", 0);
      abbrev_seg = subseg_new (".debug_abbrev", 0);
      aranges_seg = subseg_new (".debug_aranges", 0);

      bfd_set_section_flags (stdoutput, info_seg,
			     SEC_READONLY | SEC_DEBUGGING);
      bfd_set_section_flags (stdoutput, abbrev_seg,
			     SEC_READONLY | SEC_DEBUGGING);
      bfd_set_section_flags (stdoutput, aranges_seg,
			     SEC_READONLY | SEC_DEBUGGING);

      record_alignment (aranges_seg, ffs (2 * sizeof_address) - 1);

      out_debug_aranges (aranges_seg, info_seg);
      out_debug_abbrev (abbrev_seg);
      out_debug_info (info_seg, abbrev_seg, line_seg);
    }
}
/*ARGSUSED*/
Bool
EstablishNewConnections(ClientPtr clientUnused, pointer closure)
{
    fd_set  readyconnections;     /* set of listeners that are ready */
    int curconn;                  /* fd of listener that's ready */
    register int newconn;         /* fd of new client */
    CARD32 connect_time;
    register int i;
    register ClientPtr client;
    register OsCommPtr oc;
    fd_set tmask;

    XFD_ANDSET (&tmask, (fd_set*)closure, &WellKnownConnections);
    XFD_COPYSET(&tmask, &readyconnections);
    if (!XFD_ANYSET(&readyconnections))
	return TRUE;
    connect_time = GetTimeInMillis();
    /* kill off stragglers */
    for (i=1; i<currentMaxClients; i++)
    {
	if ((client = clients[i]))
	{
	    oc = (OsCommPtr)(client->osPrivate);
	    if ((oc && (oc->conn_time != 0) &&
		(connect_time - oc->conn_time) >= TimeOutValue) || 
		(client->noClientException != Success && !client->clientGone))
		CloseDownClient(client);     
	}
    }
#ifndef WIN32
    for (i = 0; i < howmany(XFD_SETSIZE, NFDBITS); i++)
    {
      while (readyconnections.fds_bits[i])
#else
      for (i = 0; i < XFD_SETCOUNT(&readyconnections); i++) 
#endif
      {
	XtransConnInfo trans_conn, new_trans_conn;
	int status;

#ifndef WIN32
	curconn = ffs (readyconnections.fds_bits[i]) - 1;
	readyconnections.fds_bits[i] &= ~((fd_mask)1 << curconn);
	curconn += (i * (sizeof(fd_mask)*8));
#else
	curconn = XFD_FD(&readyconnections, i);
#endif

	if ((trans_conn = lookup_trans_conn (curconn)) == NULL)
	    continue;

	if ((new_trans_conn = _XSERVTransAccept (trans_conn, &status)) == NULL)
	    continue;

	newconn = _XSERVTransGetConnectionNumber (new_trans_conn);

	if (newconn < lastfdesc)
	{
		int clientid;
#if !defined(WIN32)
  		clientid = ConnectionTranslation[newconn];
#else
  		clientid = GetConnectionTranslation(newconn);
#endif
		if(clientid && (client = clients[clientid]))
 			CloseDownClient(client);
	}

	_XSERVTransSetOption(new_trans_conn, TRANS_NONBLOCKING, 1);

	if (!AllocNewConnection (new_trans_conn, newconn, connect_time))
	{
	    ErrorConnMax(new_trans_conn);
	    _XSERVTransClose(new_trans_conn);
	}
      }
#ifndef WIN32
    }
#endif
    return TRUE;
}
Exemplo n.º 27
0
int
isic_probe_avma1(struct isa_device *dev)
{
	struct isic_softc *sc = &isic_sc[dev->id_unit];
	u_char savebyte;
	u_char byte;
	
	/* check max unit range */
	
	if(dev->id_unit >= ISIC_MAXUNIT)
	{
		printf("isic%d: Error, unit %d >= ISIC_MAXUNIT for AVM A1/Fritz!\n",
				dev->id_unit, dev->id_unit);
		return(0);	
	}	
	sc->sc_unit = dev->id_unit;

	/* check IRQ validity */
	
	switch(ffs(dev->id_irq)-1)
	{
		case 3:
		case 4:
		case 5:
		case 6:
		case 7:
		case 8:
		case 10:
		case 11:
		case 12:
		case 13:
		case 14:
		case 15:
			break;
			
		default:
			printf("isic%d: Error, invalid IRQ [%d] specified for AVM A1/Fritz!\n",
				dev->id_unit, ffs(dev->id_irq)-1);
			return(0);
			break;
	}		
	sc->sc_irq = dev->id_irq;

	/* check if memory addr specified */

	if(dev->id_maddr)
	{
		printf("isic%d: Error, mem addr 0x%lx specified for AVM A1/Fritz!\n",
			dev->id_unit, (u_long)dev->id_maddr);
		return(0);
	}
		
	dev->id_msize = 0;
	
	/* check if we got an iobase */

	switch(dev->id_iobase)
	{
		case 0x200:
		case 0x240:
		case 0x300:
		case 0x340:		
			break;
			
		default:
			printf("isic%d: Error, invalid iobase 0x%x specified for AVM A1/Fritz!\n",
				dev->id_unit, dev->id_iobase);
			return(0);
			break;
	}
	sc->sc_port = dev->id_iobase;

	sc->clearirq = NULL;
	sc->readreg = avma1_read_reg;
	sc->writereg = avma1_write_reg;

	sc->readfifo = avma1_read_fifo;
	sc->writefifo = avma1_write_fifo;

	/* setup card type */

	sc->sc_cardtyp = CARD_TYPEP_AVMA1;

	/* setup IOM bus type */
	
	sc->sc_bustyp = BUS_TYPE_IOM2;

	sc->sc_ipac = 0;
	sc->sc_bfifolen = HSCX_FIFO_LEN;

	/* setup ISAC and HSCX base addr */
	
	ISAC_BASE = (caddr_t)dev->id_iobase + 0x1400 - 0x20;

	HSCX_A_BASE = (caddr_t)dev->id_iobase + 0x400 - 0x20;
	HSCX_B_BASE = (caddr_t)dev->id_iobase + 0xc00 - 0x20;

	/* 
	 * Read HSCX A/B VSTR.
	 * Expected value for AVM A1 is 0x04 or 0x05 and for the
	 * AVM Fritz!Card is 0x05 in the least significant bits.
	 */

	if( (((HSCX_READ(0, H_VSTR) & 0xf) != 0x5) &&
	     ((HSCX_READ(0, H_VSTR) & 0xf) != 0x4))	||
            (((HSCX_READ(1, H_VSTR) & 0xf) != 0x5) &&
	     ((HSCX_READ(1, H_VSTR) & 0xf) != 0x4)) )  
	{
		printf("isic%d: HSCX VSTR test failed for AVM A1/Fritz\n",
			dev->id_unit);
		printf("isic%d: HSC0: VSTR: %#x\n",
			dev->id_unit, HSCX_READ(0, H_VSTR));
		printf("isic%d: HSC1: VSTR: %#x\n",
			dev->id_unit, HSCX_READ(1, H_VSTR));
		return (0);
	}                   

	/* AVM A1 or Fritz! control register bits:	*/
	/*        read                write		*/
	/* 0x01  hscx irq*           RESET		*/
	/* 0x02  isac irq*           clear counter1	*/
	/* 0x04  counter irq*        clear counter2	*/
	/* 0x08  always 0            irq enable		*/
	/* 0x10  read test bit       set test bit	*/
	/* 0x20  always 0            unused		*/

	/*
	 * XXX the following test may be destructive, to prevent the
	 * worst case, we save the byte first, and in case the test
	 * fails, we write back the saved byte .....
	 */

	savebyte = inb(dev->id_iobase + AVM_CONF_REG);
	
	/* write low to test bit */

	outb(dev->id_iobase + AVM_CONF_REG, 0x00);
	
	/* test bit and next higher and lower bit must be 0 */

	if((byte = inb(dev->id_iobase + AVM_CONF_REG) & 0x38) != 0x00)
	{
		printf("isic%d: Error, probe-1 failed, 0x%02x should be 0x00 for AVM A1/Fritz!\n",
				dev->id_unit, byte);
		outb(dev->id_iobase + AVM_CONF_REG, savebyte);
		return (0);
	}

	/* write high to test bit */

	outb(dev->id_iobase + AVM_CONF_REG, 0x10);
	
	/* test bit must be high, next higher and lower bit must be 0 */

	if((byte = inb(dev->id_iobase + AVM_CONF_REG) & 0x38) != 0x10)
	{
		printf("isic%d: Error, probe-2 failed, 0x%02x should be 0x10 for AVM A1/Fritz!\n",
				dev->id_unit, byte);
		outb(dev->id_iobase + AVM_CONF_REG, savebyte);
		return (0);
	}

	return (1);
}
Exemplo n.º 28
0
int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
{
	struct gsc_dev *gsc = ctx->gsc_dev;
	struct gsc_variant *variant = gsc->variant;
	struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
	struct gsc_fmt *fmt;
	u32 max_w, max_h, mod_x, mod_y;
	u32 min_w, min_h, tmp_w, tmp_h;
	int i;

	gsc_dbg("user put w: %d, h: %d", pix_mp->width, pix_mp->height);

	fmt = find_format(&pix_mp->pixelformat, NULL, 0);
	if (!fmt) {
		gsc_err("pixelformat format (0x%X) invalid\n", pix_mp->pixelformat);
		return -EINVAL;
	}

	if (pix_mp->field == V4L2_FIELD_ANY)
		pix_mp->field = V4L2_FIELD_NONE;
	else if (pix_mp->field != V4L2_FIELD_NONE) {
		gsc_err("Not supported field order(%d)\n", pix_mp->field);
		return -EINVAL;
	}

	max_w = variant->pix_max->target_rot_dis_w;
	max_h = variant->pix_max->target_rot_dis_h;
	if (V4L2_TYPE_IS_OUTPUT(f->type)) {
		mod_x = ffs(variant->pix_align->org_w) - 1;
		if (is_yuv420(fmt->pixelformat))
			mod_y = ffs(variant->pix_align->org_h) - 1;
		else
			mod_y = ffs(variant->pix_align->org_h) - 2;
		min_w = variant->pix_min->org_w;
		min_h = variant->pix_min->org_h;
	} else {
		mod_x = ffs(variant->pix_align->org_w) - 1;
		if (is_yuv420(fmt->pixelformat))
			mod_y = ffs(variant->pix_align->org_h) - 1;
		else
			mod_y = ffs(variant->pix_align->org_h) - 2;
		min_w = variant->pix_min->target_rot_dis_w;
		min_h = variant->pix_min->target_rot_dis_h;
	}
	gsc_dbg("mod_x: %d, mod_y: %d, max_w: %d, max_h = %d",
	     mod_x, mod_y, max_w, max_h);
	/* To check if image size is modified to adjust parameter against
	   hardware abilities */
	tmp_w = pix_mp->width;
	tmp_h = pix_mp->height;

	v4l_bound_align_image(&pix_mp->width, min_w, max_w, mod_x,
		&pix_mp->height, min_h, max_h, mod_y, 0);
	if (tmp_w != pix_mp->width || tmp_h != pix_mp->height)
		gsc_info("Image size has been modified from %dx%d to %dx%d",
			 tmp_w, tmp_h, pix_mp->width, pix_mp->height);

	pix_mp->num_planes = fmt->num_planes;

	if (ctx->gsc_ctrls.csc_eq_mode->val)
		ctx->gsc_ctrls.csc_eq->val =
			(pix_mp->width >= 1280) ? 1 : 0;
	if (ctx->gsc_ctrls.csc_eq->val) /* HD */
		pix_mp->colorspace = V4L2_COLORSPACE_REC709;
	else	/* SD */
		pix_mp->colorspace = V4L2_COLORSPACE_SMPTE170M;


	for (i = 0; i < pix_mp->num_planes; ++i) {
		int bpl = (pix_mp->width * fmt->depth[i]) >> 3;
		pix_mp->plane_fmt[i].bytesperline = bpl;
		pix_mp->plane_fmt[i].sizeimage = bpl * pix_mp->height;

		gsc_dbg("[%d]: bpl: %d, sizeimage: %d",
		    i, bpl, pix_mp->plane_fmt[i].sizeimage);
	}

	return 0;
}
/*
 * This function fills in xfs_mount_t fields based on mount args.
 * Note: the superblock has _not_ yet been read in.
 *
 * Note that this function leaks the various device name allocations on
 * failure.  The caller takes care of them.
 */
STATIC int
xfs_parseargs(
	struct xfs_mount	*mp,
	char			*options)
{
	struct super_block	*sb = mp->m_super;
	char			*this_char, *value, *eov;
	int			dsunit = 0;
	int			dswidth = 0;
	int			iosize = 0;
	__uint8_t		iosizelog = 0;

	/*
	 * set up the mount name first so all the errors will refer to the
	 * correct device.
	 */
	mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
	if (!mp->m_fsname)
		return ENOMEM;
	mp->m_fsname_len = strlen(mp->m_fsname) + 1;

	/*
	 * Copy binary VFS mount flags we are interested in.
	 */
	if (sb->s_flags & MS_RDONLY)
		mp->m_flags |= XFS_MOUNT_RDONLY;
	if (sb->s_flags & MS_DIRSYNC)
		mp->m_flags |= XFS_MOUNT_DIRSYNC;
	if (sb->s_flags & MS_SYNCHRONOUS)
		mp->m_flags |= XFS_MOUNT_WSYNC;

	/*
	 * Set some default flags that could be cleared by the mount option
	 * parsing.
	 */
	mp->m_flags |= XFS_MOUNT_BARRIER;
	mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
	mp->m_flags |= XFS_MOUNT_SMALL_INUMS;

	/*
	 * These can be overridden by the mount option parsing.
	 */
	mp->m_logbufs = -1;
	mp->m_logbsize = -1;

	if (!options)
		goto done;

	while ((this_char = strsep(&options, ",")) != NULL) {
		if (!*this_char)
			continue;
		if ((value = strchr(this_char, '=')) != NULL)
			*value++ = 0;

		if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
			if (!value || !*value) {
				xfs_warn(mp, "%s option requires an argument",
					this_char);
				return EINVAL;
			}
			mp->m_logbufs = simple_strtoul(value, &eov, 10);
		} else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
			if (!value || !*value) {
				xfs_warn(mp, "%s option requires an argument",
					this_char);
				return EINVAL;
			}
			mp->m_logbsize = suffix_strtoul(value, &eov, 10);
		} else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
			if (!value || !*value) {
				xfs_warn(mp, "%s option requires an argument",
					this_char);
				return EINVAL;
			}
			mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
			if (!mp->m_logname)
				return ENOMEM;
		} else if (!strcmp(this_char, MNTOPT_MTPT)) {
			xfs_warn(mp, "%s option not allowed on this system",
				this_char);
			return EINVAL;
		} else if (!strcmp(this_char, MNTOPT_RTDEV)) {
			if (!value || !*value) {
				xfs_warn(mp, "%s option requires an argument",
					this_char);
				return EINVAL;
			}
			mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
			if (!mp->m_rtname)
				return ENOMEM;
		} else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
			if (!value || !*value) {
				xfs_warn(mp, "%s option requires an argument",
					this_char);
				return EINVAL;
			}
			iosize = simple_strtoul(value, &eov, 10);
			iosizelog = ffs(iosize) - 1;
		} else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
			if (!value || !*value) {
				xfs_warn(mp, "%s option requires an argument",
					this_char);
				return EINVAL;
			}
			iosize = suffix_strtoul(value, &eov, 10);
			iosizelog = ffs(iosize) - 1;
		} else if (!strcmp(this_char, MNTOPT_GRPID) ||
			   !strcmp(this_char, MNTOPT_BSDGROUPS)) {
			mp->m_flags |= XFS_MOUNT_GRPID;
		} else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
			   !strcmp(this_char, MNTOPT_SYSVGROUPS)) {
			mp->m_flags &= ~XFS_MOUNT_GRPID;
		} else if (!strcmp(this_char, MNTOPT_WSYNC)) {
			mp->m_flags |= XFS_MOUNT_WSYNC;
		} else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
			mp->m_flags |= XFS_MOUNT_NORECOVERY;
		} else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
			mp->m_flags |= XFS_MOUNT_NOALIGN;
		} else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
			mp->m_flags |= XFS_MOUNT_SWALLOC;
		} else if (!strcmp(this_char, MNTOPT_SUNIT)) {
			if (!value || !*value) {
				xfs_warn(mp, "%s option requires an argument",
					this_char);
				return EINVAL;
			}
			dsunit = simple_strtoul(value, &eov, 10);
		} else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
			if (!value || !*value) {
				xfs_warn(mp, "%s option requires an argument",
					this_char);
				return EINVAL;
			}
			dswidth = simple_strtoul(value, &eov, 10);
		} else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
			mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
#if !XFS_BIG_INUMS
			xfs_warn(mp, "%s option not allowed on this system",
				this_char);
			return EINVAL;
#endif
		} else if (!strcmp(this_char, MNTOPT_NOUUID)) {
			mp->m_flags |= XFS_MOUNT_NOUUID;
		} else if (!strcmp(this_char, MNTOPT_BARRIER)) {
			mp->m_flags |= XFS_MOUNT_BARRIER;
		} else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
			mp->m_flags &= ~XFS_MOUNT_BARRIER;
		} else if (!strcmp(this_char, MNTOPT_IKEEP)) {
			mp->m_flags |= XFS_MOUNT_IKEEP;
		} else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
			mp->m_flags &= ~XFS_MOUNT_IKEEP;
		} else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
			mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
		} else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
			mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
		} else if (!strcmp(this_char, MNTOPT_ATTR2)) {
			mp->m_flags |= XFS_MOUNT_ATTR2;
		} else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
			mp->m_flags &= ~XFS_MOUNT_ATTR2;
			mp->m_flags |= XFS_MOUNT_NOATTR2;
		} else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
			mp->m_flags |= XFS_MOUNT_FILESTREAMS;
		} else if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
			mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
			mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
			mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
		} else if (!strcmp(this_char, MNTOPT_QUOTA) ||
			   !strcmp(this_char, MNTOPT_UQUOTA) ||
			   !strcmp(this_char, MNTOPT_USRQUOTA)) {
			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
					 XFS_UQUOTA_ENFD);
		} else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
			   !strcmp(this_char, MNTOPT_UQUOTANOENF)) {
			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
			mp->m_qflags &= ~XFS_UQUOTA_ENFD;
		} else if (!strcmp(this_char, MNTOPT_PQUOTA) ||
			   !strcmp(this_char, MNTOPT_PRJQUOTA)) {
			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
					 XFS_OQUOTA_ENFD);
		} else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) {
			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
			mp->m_qflags &= ~XFS_OQUOTA_ENFD;
		} else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
			   !strcmp(this_char, MNTOPT_GRPQUOTA)) {
			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
					 XFS_OQUOTA_ENFD);
		} else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
			mp->m_qflags &= ~XFS_OQUOTA_ENFD;
		} else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
			xfs_warn(mp,
	"delaylog is the default now, option is deprecated.");
		} else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
			xfs_warn(mp,
	"nodelaylog support has been removed, option is deprecated.");
		} else if (!strcmp(this_char, MNTOPT_DISCARD)) {
			mp->m_flags |= XFS_MOUNT_DISCARD;
		} else if (!strcmp(this_char, MNTOPT_NODISCARD)) {
			mp->m_flags &= ~XFS_MOUNT_DISCARD;
		} else if (!strcmp(this_char, "ihashsize")) {
			xfs_warn(mp,
	"ihashsize no longer used, option is deprecated.");
		} else if (!strcmp(this_char, "osyncisdsync")) {
			xfs_warn(mp,
	"osyncisdsync has no effect, option is deprecated.");
		} else if (!strcmp(this_char, "osyncisosync")) {
			xfs_warn(mp,
	"osyncisosync has no effect, option is deprecated.");
		} else if (!strcmp(this_char, "irixsgid")) {
			xfs_warn(mp,
	"irixsgid is now a sysctl(2) variable, option is deprecated.");
		} else {
			xfs_warn(mp, "unknown mount option [%s].", this_char);
			return EINVAL;
		}
	}

	/*
	 * no recovery flag requires a read-only mount
	 */
	if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
		xfs_warn(mp, "no-recovery mounts must be read-only.");
		return EINVAL;
	}

	if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
		xfs_warn(mp,
	"sunit and swidth options incompatible with the noalign option");
		return EINVAL;
	}

#ifndef CONFIG_XFS_QUOTA
	if (XFS_IS_QUOTA_RUNNING(mp)) {
		xfs_warn(mp, "quota support not available in this kernel.");
		return EINVAL;
	}
#endif

	if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
	    (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) {
		xfs_warn(mp, "cannot mount with both project and group quota");
		return EINVAL;
	}

	if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
		xfs_warn(mp, "sunit and swidth must be specified together");
		return EINVAL;
	}

	if (dsunit && (dswidth % dsunit != 0)) {
		xfs_warn(mp,
	"stripe width (%d) must be a multiple of the stripe unit (%d)",
			dswidth, dsunit);
		return EINVAL;
	}

done:
	if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
		/*
		 * At this point the superblock has not been read
		 * in, therefore we do not know the block size.
		 * Before the mount call ends we will convert
		 * these to FSBs.
		 */
		if (dsunit) {
			mp->m_dalign = dsunit;
			mp->m_flags |= XFS_MOUNT_RETERR;
		}

		if (dswidth)
			mp->m_swidth = dswidth;
	}

	if (mp->m_logbufs != -1 &&
	    mp->m_logbufs != 0 &&
	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
		return XFS_ERROR(EINVAL);
	}
	if (mp->m_logbsize != -1 &&
	    mp->m_logbsize !=  0 &&
	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
	     !is_power_of_2(mp->m_logbsize))) {
		xfs_warn(mp,
			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
			mp->m_logbsize);
		return XFS_ERROR(EINVAL);
	}

	if (iosizelog) {
		if (iosizelog > XFS_MAX_IO_LOG ||
		    iosizelog < XFS_MIN_IO_LOG) {
			xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
				iosizelog, XFS_MIN_IO_LOG,
				XFS_MAX_IO_LOG);
			return XFS_ERROR(EINVAL);
		}

		mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
		mp->m_readio_log = iosizelog;
		mp->m_writeio_log = iosizelog;
	}

	return 0;
}
Exemplo n.º 30
0
/*
 * Find a UNARY op that satisfy the needs.
 * For now, the destination is always a register.
 * Both source and dest types must match, but only source (left)
 * shape is of interest.
 */
int
finduni(NODE *p, int cookie)
{
	extern int *qtable[];
	struct optab *q;
	NODE *l, *r;
	int i, shl = 0, num = 4;
	int *ixp, idx = 0;
	int sh;

	F2DEBUG(("finduni tree: %s\n", prcook(cookie)));
	F2WALK(p);

	l = getlr(p, 'L');
	if (p->n_op == CALL || p->n_op == FORTCALL || p->n_op == STCALL)
		r = p;
	else
		r = getlr(p, 'R');
	ixp = qtable[p->n_op];
	for (i = 0; ixp[i] >= 0; i++) {
		q = &table[ixp[i]];

		F2DEBUG(("finduni: ixp %d\n", ixp[i]));
		if (!acceptable(q))		/* target-dependent filter */
			continue;

		if (ttype(l->n_type, q->ltype) == 0)
			continue; /* Type must be correct */

		F2DEBUG(("finduni got left type\n"));
		if (ttype(r->n_type, q->rtype) == 0)
			continue; /* Type must be correct */

		F2DEBUG(("finduni got types\n"));
		if ((shl = chcheck(l, q->lshape, q->rewrite & RLEFT)) == SRNOPE)
			continue;

		F2DEBUG(("finduni got shapes %d\n", shl));

		if ((cookie & q->visit) == 0)	/* check correct return value */
			continue;		/* XXX - should check needs */

		/* avoid clobbering of longlived regs */
		/* let register allocator coalesce */
		if ((q->rewrite & RLEFT) && (shl == SRDIR) /* && isreg(l) */)
			shl = SRREG;

		F2DEBUG(("finduni got cookie\n"));
		if (q->needs & REWRITE)
			break;	/* Done here */

		if (shl >= num)
			continue;
		num = shl;
		idx = ixp[i];

		if (shl == SRDIR)
			break;
	}

	if (num == 4) {
		F2DEBUG(("finduni failed\n"));
	} else
		F2DEBUG(("finduni entry %d(%s)\n", idx, srtyp[num]));

	if (num == 4) {
		if (setuni(p, cookie))
			return FRETRY;
		return FFAIL;
	}
	q = &table[idx];

	sh = shswitch(-1, p->n_left, q->lshape, cookie,
	    q->rewrite & RLEFT, num);
	if (sh == -1)
		sh = ffs(cookie & q->visit & INREGS)-1;
	if (sh == -1)
		sh = 0;

	F2DEBUG(("finduni: node %p (%s)\n", p, prcook(1 << sh)));
	p->n_su = MKIDX(idx, 0);
	SCLASS(p->n_su, sh);
	return sh;
}