Esempio n. 1
0
/**
 * \brief sets the memory allocation mask.
 *
 * \param nodemask  bitmap representing the nodes
 *
 * The task will only allocate memory from the nodes set in nodemask.
 *
 * an empty mask or not allowed nodes in the mask will result in an error
 */
errval_t numa_set_membind(struct bitmap *nodemask)
{
    assert(numa_alloc_bind_mask);
    assert(numa_alloc_interleave_mask);

    if (!nodemask) {
        return NUMA_ERR_BITMAP_PARSE;
    }

    if (bitmap_get_nbits(nodemask) < NUMA_MAX_NUMNODES) {
        NUMA_WARNING("supplied interleave mask (%p) has to less bits!", nodemask);
        return NUMA_ERR_BITMAP_RANGE;
    }

    /* copy new membind mask and clear out invalid bits */
    bitmap_copy(numa_alloc_bind_mask, nodemask);
    bitmap_clear_range(numa_alloc_bind_mask, numa_num_configured_nodes(),
                       bitmap_get_nbits(numa_alloc_bind_mask));

    if (bitmap_get_weight(numa_alloc_bind_mask) == 0) {
        /* cannot bind to no node, restore with all nodes pointer*/
        bitmap_copy(numa_alloc_bind_mask, numa_all_nodes_ptr);
        return NUMA_ERR_NUMA_MEMBIND;
    }

    /* disable interleaving mode */
    bitmap_clear_all(numa_alloc_interleave_mask);

    return SYS_ERR_OK;
}
Esempio n. 2
0
File: fwprop.c Progetto: AHelper/gcc
void
single_def_use_dom_walker::before_dom_children (basic_block bb)
{
  int bb_index = bb->index;
  struct df_md_bb_info *md_bb_info = df_md_get_bb_info (bb_index);
  struct df_lr_bb_info *lr_bb_info = df_lr_get_bb_info (bb_index);
  rtx_insn *insn;

  bitmap_copy (local_md, &md_bb_info->in);
  bitmap_copy (local_lr, &lr_bb_info->in);

  /* Push a marker for the leave_block callback.  */
  reg_defs_stack.safe_push (NULL);

  process_uses (df_get_artificial_uses (bb_index), DF_REF_AT_TOP);
  process_defs (df_get_artificial_defs (bb_index), DF_REF_AT_TOP);

  /* We don't call df_simulate_initialize_forwards, as it may overestimate
     the live registers if there are unused artificial defs.  We prefer
     liveness to be underestimated.  */

  FOR_BB_INSNS (bb, insn)
    if (INSN_P (insn))
      {
        unsigned int uid = INSN_UID (insn);
        process_uses (DF_INSN_UID_USES (uid), 0);
        process_uses (DF_INSN_UID_EQ_USES (uid), 0);
        process_defs (DF_INSN_UID_DEFS (uid), 0);
	df_simulate_one_insn_forwards (bb, insn, local_lr);
      }

  process_uses (df_get_artificial_uses (bb_index), 0);
  process_defs (df_get_artificial_defs (bb_index), 0);
}
Esempio n. 3
0
File: lcm.c Progetto: Droufte/gcc
static void
compute_farthest (struct edge_list *edge_list, int n_exprs,
		  sbitmap *st_avout, sbitmap *st_avin, sbitmap *st_antin,
		  sbitmap *kill, sbitmap *farthest)
{
  int x, num_edges;
  basic_block pred, succ;

  num_edges = NUM_EDGES (edge_list);

  auto_sbitmap difference (n_exprs), temp_bitmap (n_exprs);
  for (x = 0; x < num_edges; x++)
    {
      pred = INDEX_EDGE_PRED_BB (edge_list, x);
      succ = INDEX_EDGE_SUCC_BB (edge_list, x);
      if (succ == EXIT_BLOCK_PTR_FOR_FN (cfun))
	bitmap_copy (farthest[x], st_avout[pred->index]);
      else
	{
	  if (pred == ENTRY_BLOCK_PTR_FOR_FN (cfun))
	    bitmap_clear (farthest[x]);
	  else
	    {
	      bitmap_and_compl (difference, st_avout[pred->index],
				  st_antin[succ->index]);
	      bitmap_not (temp_bitmap, st_avin[succ->index]);
	      bitmap_and_or (farthest[x], difference,
				    kill[succ->index], temp_bitmap);
	    }
	}
    }
}
Esempio n. 4
0
/* Create a new policy */
static struct mempolicy *mpol_new(int mode, unsigned long *nodes)
{
	struct mempolicy *policy;

	PDprintk("setting mode %d nodes[0] %lx\n", mode, nodes[0]);
	if (mode == MPOL_DEFAULT)
		return NULL;
	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
	if (!policy)
		return ERR_PTR(-ENOMEM);
	atomic_set(&policy->refcnt, 1);
	switch (mode) {
	case MPOL_INTERLEAVE:
		bitmap_copy(policy->v.nodes, nodes, MAX_NUMNODES);
		break;
	case MPOL_PREFERRED:
		policy->v.preferred_node = find_first_bit(nodes, MAX_NUMNODES);
		if (policy->v.preferred_node >= MAX_NUMNODES)
			policy->v.preferred_node = -1;
		break;
	case MPOL_BIND:
		policy->v.zonelist = bind_zonelist(nodes);
		if (policy->v.zonelist == NULL) {
			kmem_cache_free(policy_cache, policy);
			return ERR_PTR(-ENOMEM);
		}
		break;
	}
	policy->policy = mode;
	return policy;
}
Esempio n. 5
0
void s390_feat_bitmap_to_ascii(const S390FeatBitmap features, void *opaque,
                               void (*fn)(const char *name, void *opaque))
{
    S390FeatBitmap bitmap, tmp;
    S390FeatGroup group;
    S390Feat feat;

    bitmap_copy(bitmap, features, S390_FEAT_MAX);

    /* process whole groups first */
    for (group = 0; group < S390_FEAT_GROUP_MAX; group++) {
        const S390FeatGroupDef *def = s390_feat_group_def(group);

        bitmap_and(tmp, bitmap, def->feat, S390_FEAT_MAX);
        if (bitmap_equal(tmp, def->feat, S390_FEAT_MAX)) {
            bitmap_andnot(bitmap, bitmap, def->feat, S390_FEAT_MAX);
            fn(def->name, opaque);
        }
    }

    /* report leftovers as separate features */
    feat = find_first_bit(bitmap, S390_FEAT_MAX);
    while (feat < S390_FEAT_MAX) {
        fn(s390_feat_def(feat)->name, opaque);
        feat = find_next_bit(bitmap, S390_FEAT_MAX, feat + 1);
    };
}
Esempio n. 6
0
static S390CPUModel *get_max_cpu_model(Error **errp)
{
#ifndef CONFIG_USER_ONLY
    static S390CPUModel max_model;
    static bool cached;

    if (cached) {
        return &max_model;
    }

    if (kvm_enabled()) {
        kvm_s390_get_host_cpu_model(&max_model, errp);
    } else {
        /* TCG enulates a z900 */
        max_model.def = &s390_cpu_defs[0];
        bitmap_copy(max_model.features, max_model.def->default_feat,
                    S390_FEAT_MAX);
    }
    if (!*errp) {
        cached = true;
        return &max_model;
    }
#endif
    return NULL;
}
Esempio n. 7
0
/** Install I/O Permission bitmap.
 *
 * Current task's I/O permission bitmap, if any, is installed
 * in the current CPU's TSS.
 *
 * Interrupts must be disabled prior this call.
 *
 */
void io_perm_bitmap_install(void)
{
    /* First, copy the I/O Permission Bitmap. */
    irq_spinlock_lock(&TASK->lock, false);

    size_t ver = TASK->arch.iomapver;
    size_t elements = TASK->arch.iomap.elements;

    if (elements > 0) {
        ASSERT(TASK->arch.iomap.bits);

        bitmap_t iomap;
        bitmap_initialize(&iomap, TSS_IOMAP_SIZE * 8,
                          CPU->arch.tss->iomap);
        bitmap_copy(&iomap, &TASK->arch.iomap, elements);

        /*
         * Set the trailing bits in the last byte of the map to disable
         * I/O access.
         */
        bitmap_set_range(&iomap, elements,
                         ALIGN_UP(elements, 8) - elements);

        /*
         * It is safe to set the trailing eight bits because of the
         * extra convenience byte in TSS_IOMAP_SIZE.
         */
        bitmap_set_range(&iomap, ALIGN_UP(elements, 8), 8);
    }

    irq_spinlock_unlock(&TASK->lock, false);

    /*
     * Second, adjust TSS segment limit.
     * Take the extra ending byte with all bits set into account.
     */
    ptr_16_64_t cpugdtr;
    gdtr_store(&cpugdtr);

    descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base;
    size_t size = bitmap_size(elements);
    gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + size);
    gdtr_load(&cpugdtr);

    /*
     * Before we load new TSS limit, the current TSS descriptor
     * type must be changed to describe inactive TSS.
     */
    tss_descriptor_t *tss_desc = (tss_descriptor_t *) &gdt_p[TSS_DES];
    tss_desc->type = AR_TSS;
    tr_load(GDT_SELECTOR(TSS_DES));

    /*
     * Update the generation count so that faults caused by
     * early accesses can be serviced.
     */
    CPU->arch.iomapver_copy = ver;
}
Esempio n. 8
0
/*
 * Fill a DRP IE's allocation fields from a MAS bitmap.
 */
static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie,
			       struct uwb_mas_bm *mas)
{
	int z, i, num_fields = 0, next = 0;
	struct uwb_drp_alloc *zones;
	__le16 current_bmp;
	DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS);
	DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE);

	zones = drp_ie->allocs;

	bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS);

	/* Determine unique MAS bitmaps in zones from bitmap. */
	for (z = 0; z < UWB_NUM_ZONES; z++) {
		bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE);
		if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) {
			bool found = false;
			current_bmp = (__le16) *tmp_mas_bm;
			for (i = 0; i < next; i++) {
				if (current_bmp == zones[i].mas_bm) {
					zones[i].zone_bm |= 1 << z;
					found = true;
					break;
				}
			}
			if (!found)  {
				num_fields++;
				zones[next].zone_bm = 1 << z;
				zones[next].mas_bm = current_bmp;
				next++;
			}
		}
		bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
	}

	/* Store in format ready for transmission (le16). */
	for (i = 0; i < num_fields; i++) {
		drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm);
		drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm);
	}

	drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr)
		+ num_fields * sizeof(struct uwb_drp_alloc);
}
Esempio n. 9
0
static void s390_cpu_model_initfn(Object *obj)
{
    S390CPU *cpu = S390_CPU(obj);
    S390CPUClass *xcc = S390_CPU_GET_CLASS(cpu);

    cpu->model = g_malloc0(sizeof(*cpu->model));
    /* copy the model, so we can modify it */
    cpu->model->def = xcc->cpu_def;
    if (xcc->is_static) {
        /* base model - features will never change */
        bitmap_copy(cpu->model->features, cpu->model->def->base_feat,
                    S390_FEAT_MAX);
    } else {
        /* latest model - features can change */
        bitmap_copy(cpu->model->features,
                    cpu->model->def->default_feat, S390_FEAT_MAX);
    }
}
Esempio n. 10
0
static void s390_qemu_cpu_model_initfn(Object *obj)
{
    S390CPU *cpu = S390_CPU(obj);

    cpu->model = g_malloc0(sizeof(*cpu->model));
    /* TCG emulates a z900 */
    cpu->model->def = &s390_cpu_defs[0];
    bitmap_copy(cpu->model->features, cpu->model->def->default_feat,
                S390_FEAT_MAX);
}
Esempio n. 11
0
/**
 * \brief returns the mask of nodes from which memory can currently be allocated.
 *
 * \return bitmap of nodes from which can be allocated
 */
struct bitmap *numa_get_membind(void)
{
    assert(numa_alloc_bind_mask);
    struct bitmap *im = numa_allocate_nodemask();
    if (im == NULL) {
        return NULL;
    }
    bitmap_copy(im, numa_alloc_bind_mask);
    return im;
}
Esempio n. 12
0
/** \brief   returns the current interleave mask
 *
 * \returns bitmask representing the current interleave state
 *
 * returns the current interleave mask if the task's memory allocation policy is
 * page interleaved. Otherwise, this function returns an empty mask.
 */
struct bitmap *numa_get_interleave_mask(void)
{
    assert(numa_alloc_interleave_mask);
    struct bitmap *im = numa_allocate_nodemask();
    if (im == NULL) {
        return NULL;
    }
    bitmap_copy(im, numa_alloc_interleave_mask);
    return im;
}
Esempio n. 13
0
/* Check if all specified nodes are online */
static int nodes_online(unsigned long *nodes)
{
	DECLARE_BITMAP(online2, MAX_NUMNODES);

	bitmap_copy(online2, nodes_addr(node_online_map), MAX_NUMNODES);
	if (bitmap_empty(online2, MAX_NUMNODES))
		set_bit(0, online2);
	if (!bitmap_subset(nodes, online2, MAX_NUMNODES))
		return -EINVAL;
	return 0;
}
Esempio n. 14
0
/* For each queue, from the most- to least-constrained:
 * find an LSB that can be assigned to the queue. If there are N queues that
 * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
 * dedicated LSB. Remaining LSB regions become a shared resource.
 * If we have fewer LSBs than queues, all LSB regions become shared resources.
 */
static int ccp_assign_lsbs(struct ccp_device *ccp)
{
	DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT);
	DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
	int n_lsbs = 0;
	int bitno;
	int i, lsb_cnt;
	int rc = 0;

	bitmap_zero(lsb_pub, MAX_LSB_CNT);

	/* Create an aggregate bitmap to get a total count of available LSBs */
	for (i = 0; i < ccp->cmd_q_count; i++)
		bitmap_or(lsb_pub,
			  lsb_pub, ccp->cmd_q[i].lsbmask,
			  MAX_LSB_CNT);

	n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT);

	if (n_lsbs >= ccp->cmd_q_count) {
		/* We have enough LSBS to give every queue a private LSB.
		 * Brute force search to start with the queues that are more
		 * constrained in LSB choice. When an LSB is privately
		 * assigned, it is removed from the public mask.
		 * This is an ugly N squared algorithm with some optimization.
		 */
		for (lsb_cnt = 1;
		     n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
		     lsb_cnt++) {
			rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
							  lsb_pub);
			if (rc < 0)
				return -EINVAL;
			n_lsbs = rc;
		}
	}

	rc = 0;
	/* What's left of the LSBs, according to the public mask, now become
	 * shared. Any zero bits in the lsb_pub mask represent an LSB region
	 * that can't be used as a shared resource, so mark the LSB slots for
	 * them as "in use".
	 */
	bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT);

	bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
	while (bitno < MAX_LSB_CNT) {
		bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
		bitmap_set(qlsb, bitno, 1);
		bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
	}

	return rc;
}
Esempio n. 15
0
static void s390_qemu_cpu_model_initfn(Object *obj)
{
    static S390CPUDef s390_qemu_cpu_defs;
    S390CPU *cpu = S390_CPU(obj);

    cpu->model = g_malloc0(sizeof(*cpu->model));
    /* TCG emulates a z900 (with some optional additional features) */
    memcpy(&s390_qemu_cpu_defs, &s390_cpu_defs[0], sizeof(s390_qemu_cpu_defs));
    add_qemu_cpu_model_features(s390_qemu_cpu_defs.full_feat);
    cpu->model->def = &s390_qemu_cpu_defs;
    bitmap_copy(cpu->model->features, cpu->model->def->default_feat,
                S390_FEAT_MAX);
}
Esempio n. 16
0
/** Enable I/O space range for task.
 *
 * Interrupts are disabled and task is locked.
 *
 * @param task   Task.
 * @param ioaddr Starting I/O space address.
 * @param size   Size of the enabled I/O range.
 *
 * @return EOK on success or an error code from errno.h.
 *
 */
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
{
	size_t elements = ioaddr + size;
	if (elements > IO_PORTS)
		return ENOENT;
	
	if (task->arch.iomap.elements < elements) {
		/*
		 * The I/O permission bitmap is too small and needs to be grown.
		 */
		
		void *store = malloc(bitmap_size(elements), FRAME_ATOMIC);
		if (!store)
			return ENOMEM;
		
		bitmap_t oldiomap;
		bitmap_initialize(&oldiomap, task->arch.iomap.elements,
		    task->arch.iomap.bits);
		
		bitmap_initialize(&task->arch.iomap, elements, store);
		
		/*
		 * Mark the new range inaccessible.
		 */
		bitmap_set_range(&task->arch.iomap, oldiomap.elements,
		    elements - oldiomap.elements);
		
		/*
		 * In case there really existed smaller iomap,
		 * copy its contents and deallocate it.
		 */
		if (oldiomap.bits) {
			bitmap_copy(&task->arch.iomap, &oldiomap,
			    oldiomap.elements);
			
			free(oldiomap.bits);
		}
	}
	
	/*
	 * Enable the range and we are done.
	 */
	bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, size);
	
	/*
	 * Increment I/O Permission bitmap generation counter.
	 */
	task->arch.iomapver++;
	
	return EOK;
}
Esempio n. 17
0
static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
					int lsb_cnt, int n_lsbs,
					unsigned long *lsb_pub)
{
	DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
	int bitno;
	int qlsb_wgt;
	int i;

	/* For each queue:
	 * If the count of potential LSBs available to a queue matches the
	 * ordinal given to us in lsb_cnt:
	 * Copy the mask of possible LSBs for this queue into "qlsb";
	 * For each bit in qlsb, see if the corresponding bit in the
	 * aggregation mask is set; if so, we have a match.
	 *     If we have a match, clear the bit in the aggregation to
	 *     mark it as no longer available.
	 *     If there is no match, clear the bit in qlsb and keep looking.
	 */
	for (i = 0; i < ccp->cmd_q_count; i++) {
		struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];

		qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);

		if (qlsb_wgt == lsb_cnt) {
			bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT);

			bitno = find_first_bit(qlsb, MAX_LSB_CNT);
			while (bitno < MAX_LSB_CNT) {
				if (test_bit(bitno, lsb_pub)) {
					/* We found an available LSB
					 * that this queue can access
					 */
					cmd_q->lsb = bitno;
					bitmap_clear(lsb_pub, bitno, 1);
					dev_info(ccp->dev,
						 "Queue %d gets LSB %d\n",
						 i, bitno);
					break;
				}
				bitmap_clear(qlsb, bitno, 1);
				bitno = find_first_bit(qlsb, MAX_LSB_CNT);
			}
			if (bitno >= MAX_LSB_CNT)
				return -EINVAL;
			n_lsbs--;
		}
	}
	return n_lsbs;
}
Esempio n. 18
0
/**
 * iio_scan_mask_set() - set particular bit in the scan mask
 * @buffer: the buffer whose scan mask we are interested in
 * @bit: the bit to be set.
 **/
int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
{
	struct iio_dev *indio_dev = buffer->indio_dev;
	unsigned long *mask;
	unsigned long *trialmask;

	trialmask = kmalloc(sizeof(*trialmask)*
			    BITS_TO_LONGS(indio_dev->masklength),
			    GFP_KERNEL);

	if (trialmask == NULL)
		return -ENOMEM;
	if (!indio_dev->masklength) {
		WARN_ON("trying to set scanmask prior to registering buffer\n");
		kfree(trialmask);
		return -EINVAL;
	}
	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
	set_bit(bit, trialmask);

	if (indio_dev->available_scan_masks) {
		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
					   indio_dev->masklength,
					   trialmask);
		if (!mask) {
			kfree(trialmask);
			return -EINVAL;
		}
	}
	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
	buffer->scan_count++;

	kfree(trialmask);

	return 0;
};
Esempio n. 19
0
static void
clear_regs_live_in_seq (HARD_REG_SET * regs, rtx insn, int length)
{
  basic_block bb;
  regset_head live;
  HARD_REG_SET hlive;
  rtx x;
  int i;

  /* Initialize liveness propagation.  */
  bb = BLOCK_FOR_INSN (insn);
  INIT_REG_SET (&live);
  bitmap_copy (&live, DF_LR_OUT (bb));
  df_simulate_initialize_backwards (bb, &live);

  /* Propagate until INSN if found.  */
  for (x = BB_END (bb); x != insn; x = PREV_INSN (x))
    df_simulate_one_insn_backwards (bb, x, &live);

  /* Clear registers live after INSN.  */
  renumbered_reg_set_to_hard_reg_set (&hlive, &live);
  AND_COMPL_HARD_REG_SET (*regs, hlive);

  /* Clear registers live in and before the sequence.  */
  for (i = 0; i < length;)
    {
      rtx prev = PREV_INSN (x);
      df_simulate_one_insn_backwards (bb, x, &live);

      if (INSN_P (x))
        {
          renumbered_reg_set_to_hard_reg_set (&hlive, &live);
          AND_COMPL_HARD_REG_SET (*regs, hlive);
          i++;
        }

      x = prev;
    }

  /* Free unused data.  */
  CLEAR_REG_SET (&live);
}
Esempio n. 20
0
File: regd.c Progetto: E-LLP/n900
static void
ath9k_regd_get_wmodes_nreg(struct ath_hal *ah,
			   struct country_code_to_enum_rd *country,
			   struct regDomain *rd5GHz,
			   unsigned long *modes_allowed)
{
	bitmap_copy(modes_allowed, ah->ah_caps.wireless_modes, ATH9K_MODE_MAX);

	if (test_bit(ATH9K_MODE_11G, ah->ah_caps.wireless_modes) &&
	    (!country->allow11g))
		clear_bit(ATH9K_MODE_11G, modes_allowed);

	if (test_bit(ATH9K_MODE_11A, ah->ah_caps.wireless_modes) &&
	    (ath9k_regd_is_chan_bm_zero(rd5GHz->chan11a)))
		clear_bit(ATH9K_MODE_11A, modes_allowed);

	if (test_bit(ATH9K_MODE_11NG_HT20, ah->ah_caps.wireless_modes)
	    && (!country->allow11ng20))
		clear_bit(ATH9K_MODE_11NG_HT20, modes_allowed);

	if (test_bit(ATH9K_MODE_11NA_HT20, ah->ah_caps.wireless_modes)
	    && (!country->allow11na20))
		clear_bit(ATH9K_MODE_11NA_HT20, modes_allowed);

	if (test_bit(ATH9K_MODE_11NG_HT40PLUS, ah->ah_caps.wireless_modes) &&
	    (!country->allow11ng40))
		clear_bit(ATH9K_MODE_11NG_HT40PLUS, modes_allowed);

	if (test_bit(ATH9K_MODE_11NG_HT40MINUS, ah->ah_caps.wireless_modes) &&
	    (!country->allow11ng40))
		clear_bit(ATH9K_MODE_11NG_HT40MINUS, modes_allowed);

	if (test_bit(ATH9K_MODE_11NA_HT40PLUS, ah->ah_caps.wireless_modes) &&
	    (!country->allow11na40))
		clear_bit(ATH9K_MODE_11NA_HT40PLUS, modes_allowed);

	if (test_bit(ATH9K_MODE_11NA_HT40MINUS, ah->ah_caps.wireless_modes) &&
	    (!country->allow11na40))
		clear_bit(ATH9K_MODE_11NA_HT40MINUS, modes_allowed);
}
Esempio n. 21
0
/**
 * \brief sets the memory interleave mask for the current task to nodemask
 *
 * \param nodemask bitmask representing the nodes
 *
 * All new memory allocations are page interleaved over all nodes in the interleave
 * mask. Interleaving can be turned off again by passing an empty mask.
 *
 * This bitmask is considered to be a hint. Fallback to other nodes may be possible
 */
void numa_set_interleave_mask(struct bitmap *nodemask)
{
    assert(numa_alloc_interleave_mask);

    if (!nodemask) {
        bitmap_clear_all(numa_alloc_interleave_mask);
        return;
    }

    if (bitmap_get_nbits(nodemask) < NUMA_MAX_NUMNODES) {
        NUMA_WARNING("supplied interleave mask (%p) has to less bits!", nodemask);
        return;
    }
    bitmap_copy(numa_alloc_interleave_mask, nodemask);

    /* clear out the invalid nodes */
    bitmap_clear_range(numa_alloc_interleave_mask, numa_num_configured_nodes(),
                       bitmap_get_nbits(numa_alloc_interleave_mask));

    /* clear the bind mask as we are using interleaving mode now */
    bitmap_clear_all(numa_alloc_bind_mask);
}
Esempio n. 22
0
static void quirk_cmi8330_resources(struct pnp_dev *dev)
{
	struct pnp_option *res = dev->dependent;
	unsigned long tmp;

	for (; res; res = res->next) {

		struct pnp_irq *irq;
		struct pnp_dma *dma;

		for (irq = res->irq; irq; irq = irq->next) {	// Valid irqs are 5, 7, 10
			tmp = 0x04A0;
			bitmap_copy(irq->map, &tmp, 16);	// 0000 0100 1010 0000
		}

		for (dma = res->dma; dma; dma = dma->next)	// Valid 8bit dma channels are 1,3
			if ((dma->flags & IORESOURCE_DMA_TYPE_MASK) ==
			    IORESOURCE_DMA_8BIT)
				dma->map = 0x000A;
	}
	printk(KERN_INFO "pnp: CMI8330 quirk - fixing interrupts and dma\n");
}
Esempio n. 23
0
void
duplicate_ssa_name_ptr_info (tree name, struct ptr_info_def *ptr_info)
{
  struct ptr_info_def *new_ptr_info;

  gcc_assert (POINTER_TYPE_P (TREE_TYPE (name)));
  gcc_assert (!SSA_NAME_PTR_INFO (name));

  if (!ptr_info)
    return;

  new_ptr_info = ggc_alloc (sizeof (struct ptr_info_def));
  *new_ptr_info = *ptr_info;

  if (ptr_info->pt_vars)
    {
      new_ptr_info->pt_vars = BITMAP_GGC_ALLOC ();
      bitmap_copy (new_ptr_info->pt_vars, ptr_info->pt_vars);
    }

  SSA_NAME_PTR_INFO (name) = new_ptr_info;
}
Esempio n. 24
0
File: lcm.c Progetto: krnowak/gcc
static void
compute_earliest (struct edge_list *edge_list, int n_exprs, sbitmap *antin,
		  sbitmap *antout, sbitmap *avout, sbitmap *kill,
		  sbitmap *earliest)
{
  sbitmap difference, temp_bitmap;
  int x, num_edges;
  basic_block pred, succ;

  num_edges = NUM_EDGES (edge_list);

  difference = sbitmap_alloc (n_exprs);
  temp_bitmap = sbitmap_alloc (n_exprs);

  for (x = 0; x < num_edges; x++)
    {
      pred = INDEX_EDGE_PRED_BB (edge_list, x);
      succ = INDEX_EDGE_SUCC_BB (edge_list, x);
      if (pred == ENTRY_BLOCK_PTR_FOR_FN (cfun))
	bitmap_copy (earliest[x], antin[succ->index]);
      else
	{
	  if (succ == EXIT_BLOCK_PTR_FOR_FN (cfun))
	    bitmap_clear (earliest[x]);
	  else
	    {
	      bitmap_and_compl (difference, antin[succ->index],
				  avout[pred->index]);
	      bitmap_not (temp_bitmap, antout[pred->index]);
	      bitmap_and_or (earliest[x], difference,
				    kill[pred->index], temp_bitmap);
	    }
	}
    }

  sbitmap_free (temp_bitmap);
  sbitmap_free (difference);
}
Esempio n. 25
0
File: lcm.c Progetto: krnowak/gcc
static void
compute_nearerout (struct edge_list *edge_list, sbitmap *farthest,
		   sbitmap *st_avloc, sbitmap *nearer, sbitmap *nearerout)
{
  int num_edges, i;
  edge e;
  basic_block *worklist, *tos, bb;
  edge_iterator ei;

  num_edges = NUM_EDGES (edge_list);

  /* Allocate a worklist array/queue.  Entries are only added to the
     list if they were not already on the list.  So the size is
     bounded by the number of basic blocks.  */
  tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);

  /* Initialize NEARER for each edge and build a mapping from an edge to
     its index.  */
  for (i = 0; i < num_edges; i++)
    INDEX_EDGE (edge_list, i)->aux = (void *) (size_t) i;

  /* We want a maximal solution.  */
  bitmap_vector_ones (nearer, num_edges);

  /* Note that even though we want an optimistic setting of NEARER, we
     do not want to be overly optimistic.  Consider an incoming edge to
     the exit block.  That edge should always have a NEARER value the
     same as FARTHEST for that edge.  */
  FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
    bitmap_copy (nearer[(size_t)e->aux], farthest[(size_t)e->aux]);

  /* Add all the blocks to the worklist.  This prevents an early exit
     from the loop given our optimistic initialization of NEARER.  */
  FOR_EACH_BB_FN (bb, cfun)
    {
      *tos++ = bb;
      bb->aux = bb;
    }
Esempio n. 26
0
File: quirks.c Progetto: 274914765/C
static void quirk_cmi8330_resources(struct pnp_dev *dev)
{
    struct pnp_option *res = dev->dependent;
    unsigned long tmp;

    for (; res; res = res->next) {

        struct pnp_irq *irq;
        struct pnp_dma *dma;

        for (irq = res->irq; irq; irq = irq->next) {    // Valid irqs are 5, 7, 10
            tmp = 0x04A0;
            bitmap_copy(irq->map, &tmp, 16);    // 0000 0100 1010 0000
        }

        for (dma = res->dma; dma; dma = dma->next)    // Valid 8bit dma channels are 1,3
            if ((dma->flags & IORESOURCE_DMA_TYPE_MASK) ==
                IORESOURCE_DMA_8BIT)
                dma->map = 0x000A;
    }
    dev_info(&dev->dev, "CMI8330 quirk - forced possible IRQs to 5, 7, 10 "
        "and DMA channels to 1, 3\n");
}
Esempio n. 27
0
static S390CPUModel *get_max_cpu_model(Error **errp)
{
    static S390CPUModel max_model;
    static bool cached;

    if (cached) {
        return &max_model;
    }

    if (kvm_enabled()) {
        kvm_s390_get_host_cpu_model(&max_model, errp);
    } else {
        /* TCG emulates a z900 (with some optional additional features) */
        max_model.def = &s390_cpu_defs[0];
        bitmap_copy(max_model.features, max_model.def->default_feat,
                    S390_FEAT_MAX);
        add_qemu_cpu_model_features(max_model.features);
    }
    if (!*errp) {
        cached = true;
        return &max_model;
    }
    return NULL;
}
Esempio n. 28
0
void texture_copy(char *path,char *name,char *sub_name)
{
    char			*c,dest_name[256],srce_path[1024],dest_path[1024],
                    sub_path[1024];

    strcpy(srce_path,path);

    if (sub_name!=NULL) {
        c=strrchr(srce_path,'.');
        if (c==NULL) return;

        *c=0x0;
        strcat(srce_path,sub_name);
        strcat(srce_path,".png");
    }

    strcpy(dest_name,name);

    if (sub_name!=NULL) strcat(dest_name,sub_name);

    sprintf(sub_path,"Models/%s/Textures",model.name);
    file_paths_data_default(&file_path_setup,dest_path,sub_path,dest_name,"png");
    bitmap_copy(srce_path,dest_path);
}
Esempio n. 29
0
static void __init test_copy(void)
{
	DECLARE_BITMAP(bmap1, 1024);
	DECLARE_BITMAP(bmap2, 1024);

	bitmap_zero(bmap1, 1024);
	bitmap_zero(bmap2, 1024);

	/* single-word bitmaps */
	bitmap_set(bmap1, 0, 19);
	bitmap_copy(bmap2, bmap1, 23);
	expect_eq_pbl("0-18", bmap2, 1024);

	bitmap_set(bmap2, 0, 23);
	bitmap_copy(bmap2, bmap1, 23);
	expect_eq_pbl("0-18", bmap2, 1024);

	/* multi-word bitmaps */
	bitmap_set(bmap1, 0, 109);
	bitmap_copy(bmap2, bmap1, 1024);
	expect_eq_pbl("0-108", bmap2, 1024);

	bitmap_fill(bmap2, 1024);
	bitmap_copy(bmap2, bmap1, 1024);
	expect_eq_pbl("0-108", bmap2, 1024);

	/* the following tests assume a 32- or 64-bit arch (even 128b
	 * if we care)
	 */

	bitmap_fill(bmap2, 1024);
	bitmap_copy(bmap2, bmap1, 109);  /* ... but 0-padded til word length */
	expect_eq_pbl("0-108,128-1023", bmap2, 1024);

	bitmap_fill(bmap2, 1024);
	bitmap_copy(bmap2, bmap1, 97);  /* ... but aligned on word length */
	expect_eq_pbl("0-108,128-1023", bmap2, 1024);
}
Esempio n. 30
0
static void
collect_pattern_seqs (void)
{
  htab_iterator hti0, hti1, hti2;
  p_hash_bucket hash_bucket;
  p_hash_elem e0, e1;
#if defined STACK_REGS || defined HAVE_cc0
  basic_block bb;
  bitmap_head dont_collect;

  /* Extra initialization step to ensure that no stack registers (if present)
     or cc0 code (if present) are live across abnormal edges.
     Set a flag in DONT_COLLECT for an insn if a stack register is live
     after the insn or the insn is cc0 setter or user.  */
  bitmap_initialize (&dont_collect, NULL);

#ifdef STACK_REGS
  FOR_EACH_BB (bb)
  {
    regset_head live;
    rtx insn;
    rtx prev;

    /* Initialize liveness propagation.  */
    INIT_REG_SET (&live);
    bitmap_copy (&live, DF_LR_OUT (bb));
    df_simulate_initialize_backwards (bb, &live);

    /* Propagate liveness info and mark insns where a stack reg is live.  */
    insn = BB_END (bb);
    for (insn = BB_END (bb); ; insn = prev)
      {
	prev = PREV_INSN (insn);
	if (INSN_P (insn))
	  {
	    int reg;
	    for (reg = FIRST_STACK_REG; reg <= LAST_STACK_REG; reg++)
	      {
		if (REGNO_REG_SET_P (&live, reg))
		  {
		    bitmap_set_bit (&dont_collect, INSN_UID (insn));
		    break;
		  }
	      }
	    
	  }
	if (insn == BB_HEAD (bb))
	  break;
	df_simulate_one_insn_backwards (bb, insn, &live);
	insn = prev;
      }

    /* Free unused data.  */
    CLEAR_REG_SET (&live);
  }
#endif

#ifdef HAVE_cc0
  /* Mark CC0 setters and users as ineligible for collection into sequences.
     This is an over-conservative fix, since it is OK to include
     a cc0_setter, but only if we also include the corresponding cc0_user,
     and vice versa.  */
  FOR_EACH_BB (bb)
  {
    rtx insn;
    rtx next_tail;

    next_tail = NEXT_INSN (BB_END (bb));

    for (insn = BB_HEAD (bb); insn != next_tail; insn = NEXT_INSN (insn))
      {
	if (INSN_P (insn) && reg_mentioned_p (cc0_rtx, PATTERN (insn)))
	  bitmap_set_bit (&dont_collect, INSN_UID (insn));
      }
  }
#endif

#endif /* defined STACK_REGS || defined HAVE_cc0 */

  /* Initialize PATTERN_SEQS to empty.  */
  pattern_seqs = 0;

  /* Try to match every abstractable insn with every other insn in the same
     HASH_BUCKET.  */

  FOR_EACH_HTAB_ELEMENT (hash_buckets, hash_bucket, p_hash_bucket, hti0)
    if (htab_elements (hash_bucket->seq_candidates) > 1)
      FOR_EACH_HTAB_ELEMENT (hash_bucket->seq_candidates, e0, p_hash_elem, hti1)
        FOR_EACH_HTAB_ELEMENT (hash_bucket->seq_candidates, e1, p_hash_elem,
                               hti2)
          if (e0 != e1
#if defined STACK_REGS || defined HAVE_cc0
              && !bitmap_bit_p (&dont_collect, INSN_UID (e0->insn))
              && !bitmap_bit_p (&dont_collect, INSN_UID (e1->insn))
#endif
             )
            match_seqs (e0, e1);
#if defined STACK_REGS || defined HAVE_cc0
  /* Free unused data.  */
  bitmap_clear (&dont_collect);
#endif
}