Exemplo n.º 1
0
int BitVectorFixed::ffs() const {
  size_t i;
  int ans = 0;
  for(i = 0; i < cap && !ffsl(data[i]); i++) {
    ans += sizeof(elem_type) * 8;
  }
  if (i == cap) {
    return -1;
  }
  return ans + ffsl(data[i]) - 1;
}
Exemplo n.º 2
0
int main()
{
	printf("Writing out to %d bytes of data, striding by increasing "
		"lengths, wrapping around when the end is reached\n", DATASIZE);
	dump(ffsl(DATASIZE / sizeof(long)), "%d");
	printf("%16s%16s%16s\n", "stride length", "num of strides", "time taken (ms)");
	for (long i = 0; i < ffsl(DATASIZE / sizeof(long)); i++) {
		printf("%16ld%16ld%16ld\n", sizeof(long) << i,
			DATASIZE / (sizeof(long) << i),
			time_stride(DATASIZE, sizeof(long) << i));
	}
	return 0;
}
Exemplo n.º 3
0
static __inline void
vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *event_flags,
    int flag_cnt)
{
	int f;

	for (f = 0; f < flag_cnt; ++f) {
		uint32_t chid_base;
		u_long flags;
		int chid_ofs;

		if (event_flags[f] == 0)
			continue;

		flags = atomic_swap_long(&event_flags[f], 0);
		chid_base = f << VMBUS_EVTFLAG_SHIFT;

		while ((chid_ofs = ffsl(flags)) != 0) {
			struct vmbus_channel *chan;

			--chid_ofs; /* NOTE: ffsl is 1-based */
			flags &= ~(1UL << chid_ofs);

			chan = sc->vmbus_chmap[chid_base + chid_ofs];

			/* if channel is closed or closing */
			if (chan == NULL || chan->ch_tq == NULL)
				continue;

			if (chan->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD)
				vmbus_rxbr_intr_mask(&chan->ch_rxbr);
			taskqueue_enqueue(chan->ch_tq, &chan->ch_task);
		}
	}
}
Exemplo n.º 4
0
/**
 * virBitmapNextSetBit:
 * @bitmap: the bitmap
 * @pos: the position after which to search for a set bit
 *
 * Search for the first set bit after position @pos in bitmap @bitmap.
 * @pos can be -1 to search for the first set bit. Position starts
 * at 0.
 *
 * Returns the position of the found bit, or -1 if no bit found.
 */
ssize_t
virBitmapNextSetBit(virBitmapPtr bitmap, ssize_t pos)
{
    size_t nl;
    size_t nb;
    unsigned long bits;

    if (pos < 0)
        pos = -1;

    pos++;

    if (pos >= bitmap->max_bit)
        return -1;

    nl = pos / VIR_BITMAP_BITS_PER_UNIT;
    nb = pos % VIR_BITMAP_BITS_PER_UNIT;

    bits = bitmap->map[nl] & ~((1UL << nb) - 1);

    while (bits == 0 && ++nl < bitmap->map_len) {
        bits = bitmap->map[nl];
    }

    if (bits == 0)
        return -1;

    return ffsl(bits) - 1 + nl * VIR_BITMAP_BITS_PER_UNIT;
}
Exemplo n.º 5
0
static int read_mapping(int idx, const char* which, cpu_set_t** set, size_t *sz)
{
	/* Max CPUs = 4096 */

	int	ret = -1;
	char buf[4096/4     /* enough chars for hex data (4 CPUs per char) */
	       + 4096/(4*8) /* for commas (separate groups of 8 chars) */
	       + 1] = {0};  /* for \0 */
	char fname[80] = {0};

	char* chunk_str;
	int len, nbits;
	int i;

	/* init vals returned to callee */
	*set = NULL;
	*sz = 0;

	if (num_online_cpus() > 4096)
		goto out;

	/* Read string is in the format of <mask>[,<mask>]*. All <mask>s following
	   a comma are 8 chars (representing a 32-bit mask). The first <mask> may
	   have fewer chars. Bits are MSB to LSB, left to right. */
	snprintf(fname, sizeof(fname), "/proc/litmus/%s/%d", which, idx);
	ret = read_file(fname, &buf, sizeof(buf)-1);
	if (ret <= 0)
		goto out;

	len = strnlen(buf, sizeof(buf));
	nbits = 32*(len/9) + 4*(len%9); /* compute bits, accounting for commas */

	*set = CPU_ALLOC(nbits);
	*sz = CPU_ALLOC_SIZE(nbits);
	CPU_ZERO_S(*sz, *set);

	/* process LSB chunks first (at the end of the str) and move backward */
	chunk_str = buf + len - 8;
	i = 0;
	do
	{
		unsigned long chunk;
		if(chunk_str < buf)
			chunk_str = buf; /* when MSB mask is less than 8 chars */
		chunk = strtoul(chunk_str, NULL, 16);
		while (chunk) {
			int j = ffsl(chunk) - 1;
			int x = i*32 + j;
			CPU_SET_S(x, *sz, *set);
			chunk &= ~(1ul << j);
		}
		chunk_str -= 9;
		i += 1;
	} while(chunk_str >= buf - 8);

	ret = 0;

out:
	return ret;
}
Exemplo n.º 6
0
int
hv_nv_get_next_send_section(netvsc_dev *net_dev)
{
	unsigned long bitsmap_words = net_dev->bitsmap_words;
	unsigned long *bitsmap = net_dev->send_section_bitsmap;
	unsigned long idx;
	int ret = NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
	int i;

	for (i = 0; i < bitsmap_words; i++) {
		idx = ffsl(~bitsmap[i]);
		if (0 == idx)
			continue;

		idx--;
		KASSERT(i * BITS_PER_LONG + idx < net_dev->send_section_count,
		    ("invalid i %d and idx %lu", i, idx));

		if (atomic_testandset_long(&bitsmap[i], idx))
			continue;

		ret = i * BITS_PER_LONG + idx;
		break;
	}

	return (ret);
}
Exemplo n.º 7
0
/* get kvm's dirty pages bitmap and update qemu's */
static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
                                         unsigned long *bitmap)
{
    unsigned int i, j;
    unsigned long page_number, c;
    target_phys_addr_t addr, addr1;
    unsigned int len = ((section->size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / HOST_LONG_BITS;

    /*
     * bitmap-traveling is faster than memory-traveling (for addr...)
     * especially when most of the memory is not dirty.
     */
    for (i = 0; i < len; i++) {
        if (bitmap[i] != 0) {
            c = leul_to_cpu(bitmap[i]);
            do {
                j = ffsl(c) - 1;
                c &= ~(1ul << j);
                page_number = i * HOST_LONG_BITS + j;
                addr1 = page_number * TARGET_PAGE_SIZE;
                addr = section->offset_within_region + addr1;
                memory_region_set_dirty(section->mr, addr, TARGET_PAGE_SIZE);
            } while (c != 0);
        }
    }
    return 0;
}
Exemplo n.º 8
0
uint32_t *mlx5_alloc_dbrec(struct mlx5_context *context)
{
	struct mlx5_db_page *page;
	uint32_t *db = NULL;
	int i, j;

	pthread_mutex_lock(&context->db_list_mutex);

	for (page = context->db_list; page; page = page->next)
		if (page->use_cnt < page->num_db)
			goto found;

	page = __add_page(context);
	if (!page)
		goto out;

found:
	++page->use_cnt;

	for (i = 0; !page->free[i]; ++i)
		/* nothing */;

	j = ffsl(page->free[i]);
	--j;
	page->free[i] &= ~(1UL << j);
	db = page->buf.buf + (i * 8 * sizeof(long) + j) * context->cache_line_size;

out:
	pthread_mutex_unlock(&context->db_list_mutex);

	return db;
}
Exemplo n.º 9
0
/**
 * Generic functions which loops over all dimensions of a set of
 * multi-dimensional arrays and calls a given function for each position.
 * This functions tries to parallelize over the dimensions indicated
 * with flags.
 */
void md_parallel_nary(unsigned int C, unsigned int D, const long dim[D], unsigned long flags, const long* str[C], void* ptr[C], void* data, md_nary_fun_t fun)
{
	if (0 == flags) {

		md_nary(C, D, dim, str, ptr, data, fun);
		return;
	}

	int b = ffsl(flags & -flags) - 1;
	assert(MD_IS_SET(flags, b));

	flags = MD_CLEAR(flags, b);

	long dimc[D];
	md_select_dims(D, ~MD_BIT(b), dimc, dim);

	debug_printf(DP_DEBUG4, "Parallelize: %d\n", dim[b]);

	// FIXME: this probably doesn't nest
	// (maybe collect all parallelizable dims into one giant loop?)
	#pragma omp parallel for
	for (long i = 0; i < dim[b]; i++) {

		void* moving_ptr[C];

		for (unsigned int j = 0; j < C; j++)
			moving_ptr[j] = ptr[j] + i * str[j][b];

		md_parallel_nary(C, D, dimc, flags, str, moving_ptr, data, fun);
	}
}
Exemplo n.º 10
0
/*
** Multiqueue Transmit driver
**
*/
int
ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
{
	struct ixl_vsi		*vsi = ifp->if_softc;
	struct ixl_queue	*que;
	struct tx_ring		*txr;
	int 			err, i;

	/* Which queue to use */
	if ((m->m_flags & M_FLOWID) != 0)
		i = m->m_pkthdr.flowid % vsi->num_queues;
	else
		i = curcpu % vsi->num_queues;

	/* Check for a hung queue and pick alternative */
	if (((1 << i) & vsi->active_queues) == 0)
		i = ffsl(vsi->active_queues);

	que = &vsi->queues[i];
	txr = &que->txr;

	err = drbr_enqueue(ifp, txr->br, m);
	if (err)
		return(err);
	if (IXL_TX_TRYLOCK(txr)) {
		ixl_mq_start_locked(ifp, txr);
		IXL_TX_UNLOCK(txr);
	} else
		taskqueue_enqueue(que->tq, &que->tx_task);

	return (0);
}
Exemplo n.º 11
0
Arquivo: apic.c Projeto: ksandstr/mung
void isr_apic_bottom(struct x86_exregs *regs)
{
	struct lapic_info *apic = &cpu_apics[0];
#if 0
	printf("%s: frame at %p, exregs=%p, frame_len=%u (size=%u)\n",
		__func__, &apic, regs, (unsigned)x86_frame_len(regs),
		(unsigned)sizeof(*regs));
#endif

	/* figure out which interrupt this is from the ISRs. */
	int vecnum;
	bool vec_found = false;
	for(int i=0; i <= apic->max_vector; i += 32) {
		uint32_t isr_limb = mm_inl(apic->base_addr + APIC_ISR(i >> 5));
		if(i == 0) isr_limb &= ~0xffffu;
		if(isr_limb != 0) {
			vecnum = i + ffsl(isr_limb) - 1;
			vec_found = true;
			break;
		}
	}
	if(!vec_found) {
		apic->num_spurious++;
		ioapic_send_eoi(0);		/* just in case. */
		return;
	}

#if 0
	if(vecnum == 0x21) {
		printf("i'm a keyboard, toot toot\n");
#define KBD_STATUS_REG 0x64
#define KBD_DATA_REG 0x60
#define KBD_STAT_OBF 0x01
		for(;;) {
			uint8_t st = inb(KBD_STATUS_REG);
			if((st & KBD_STAT_OBF) == 0) break;
			inb(KBD_DATA_REG);	/* and throw it away */
		}
	} else {
		printf("APIC interrupt vector %d\n", vecnum);
	}
#endif

	regs->reason = vecnum;
	isr_irq_bottom(regs);
	return;

#if 0
	/* TODO: move this into an x86_print_exregs() function */
	printf("exregs: reason=%#lx, es=%#lx, ds=%#lx, cs=%#lx, ss=%#lx\n"
		"\tedi=%#lx, esi=%#lx, ebp=%#lx, __esp=%#lx\n"
		"\tebx=%#lx, edx=%#lx, ecx=%#lx, eax=%#lx\n"
		"\terror=%#lx, eip=%#lx, eflags=%#lx, esp=%#lx\n",
		regs->reason, regs->es, regs->ds, regs->cs, regs->ss,
		regs->edi, regs->esi, regs->ebp, regs->__esp,
		regs->ebx, regs->edx, regs->ecx, regs->eax,
		regs->error, regs->eip, regs->eflags, regs->esp);
#endif
}
Exemplo n.º 12
0
uint64_t mbm_set_first_clear(struct m_bitmap *bm)
{
	int offset;
	int i;
	uint64_t blk;
	uint64_t mid_index = 0, low_index = 0;
	int n_low_ints = bm->size / BITMAP_WIDTH;
	int n_mid_ints = (n_low_ints + (LOWMAP_SZ * BITMAP_WIDTH) - 1) / (LOWMAP_SZ * BITMAP_WIDTH);
	int n_top_ints = (n_mid_ints + (MIDMAP_SZ * BITMAP_WIDTH) - 1) / (MIDMAP_SZ * BITMAP_WIDTH);
	
	/* Find first free block in top level map */
	if (bm->top_map != NULL) {
		T(("Entered top level\n"));
		for (i = 0; i < n_top_ints; i++) {
			if ((bm->top_map)[i] != MAX_VAL_64BIT) {
				offset =  ffsl(~(bm->top_map)[i]) - 1;
				mid_index = (i * BITMAP_WIDTH + offset) * 16;
				break;
			}
		}
	}
	/* Find the first free block in mid level map */
	T(("Mid index = %lu\n", mid_index));
	if (bm->mid_map != NULL) {
		for (i = mid_index; i < 16; i++) {
			if ((bm->mid_map)[mid_index+i] != MAX_VAL_64BIT) {
				offset =  ffsl(~(bm->mid_map)[mid_index+i]) - 1;
				T(("Offset = %d, iteration = %d\n", offset, i));
				low_index = ((mid_index + i) * BITMAP_WIDTH + offset) * 16;
				break;
			}
		}
	}
	T(("low index = %lu\n", low_index));
	
	/* Find the exact free block in the low level map */
	for (i = 0; i < 16; i++) {
		if ((bm->low_map)[low_index+i] != MAX_VAL_64BIT) {
			offset =  ffsl(~(bm->low_map)[low_index + i]) - 1;
			blk = set_bit(bm, low_index+i, offset);
			return blk;
		}
	}
	
	return 0;
}
Exemplo n.º 13
0
int kvm_get_dirty_fb_lines(short *rettable, int table_size_in_bytes)
{
    struct kvm_dirty_log d;
    unsigned int i, j;
    unsigned long page_number, addr, c;
    int known_start = 0;

    /* no fb mapped */
    if (fb_slot == -1)
        return 0;

    rettable[0] = 0; // starting y
    rettable[1] = fb_height - 1; // ending y

    memset(fb_bitmap, 0, fb_len);

    d.dirty_bitmap = fb_bitmap;
    d.slot = fb_slot;

    if (kvm_vm_ioctl(KVM_GET_DIRTY_LOG, &d) == -1) {
        /* failed -> expose all screen as updated */
        return 1;
    }

    rettable[1] = 0;
    for (i = 0; i < fb_len; i++) {
        if (fb_bitmap[i] != 0) {
            c = bswap_32(fb_bitmap[i]);
            do {
                j = ffsl(c) - 1;
                c &= ~(1ul << j);
                page_number = i * 32 + j;
                addr = page_number * TARGET_PAGE_SIZE;

                if (!known_start) {
                    rettable[0] = addr / fb_bytes_per_row;
                    known_start = 1;
                }

                rettable[1] = ((addr + TARGET_PAGE_SIZE) / fb_bytes_per_row);
            } while (c != 0);
        }
    }

    /* not dirty */
    if (rettable[0] == rettable[1])
        return 0;

    /* cap on fb_height */
    if (rettable[1] > (fb_height - 1))
        rettable[1] = (fb_height - 1);

    return 1;
}
Exemplo n.º 14
0
void tstLowBit (unsigned long x)
{
	unsigned	a;
	unsigned	b;

	a = simpleLowBit(x);
	b = ffsl(x);
	if (a != b) {
		fatal("%lx %x %x", x, a, b);
	}
}
Exemplo n.º 15
0
 inline size_t front() {
    size_t pos = 0;
    // look into others
    for (size_t j(0); j < SIZE; ++j) {
       BITMAP_TYPE a(*(data + j));
       if (a != (BITMAP_TYPE)0)
          return pos + ffsl(a) - 1;
       else
          pos += BITMAP_BITS;
    }
    return -1;
 }
Exemplo n.º 16
0
static void test_ffs(void *p)
{
	/* ffs */
	int_check(ffs(0), 0);
	int_check(ffs(1), 1);
	int_check(ffs(3), 1);
	int_check(ffs((int)-1), 1);
	int_check(ffs(ror32(1,1)), 32);

	/* flsl */
	int_check(ffsl(0), 0);
	int_check(ffsl(1), 1);
	int_check(ffsl(3), 1);
	int_check(ffsl((long)-1), 1);
	if (sizeof(long) == 4)
		int_check(ffsl(ror32(1,1)), 32);
	else
		int_check(ffsl(ror64(1,1)), 64);

	/* ffsll */
	int_check(ffsll(0), 0);
	int_check(ffsll(1), 1);
	int_check(ffsll(3), 1);
	int_check(ffsll((long long)-1), 1);
	ull_check((1ULL << 63), ror64(1,1));
	int_check(ffsll(1ULL << 63), 64);
	int_check(ffsll(ror64(1,1)), 64);
end:;
}
Exemplo n.º 17
0
 inline size_t remove_front(const size_t inv = 0) {
    (void)inv;
    size_t pos(0);
    for (size_t j(0); j < SIZE; ++j, pos += BITMAP_BITS) {
       BITMAP_TYPE a(*(data + j));
       if (a != (BITMAP_TYPE)0) {
          pos += ffsl(a) - 1;
          *(data + j) = a & (a - (BITMAP_TYPE)1);
          return pos;
       }
    }
    return -1;
 }
Exemplo n.º 18
0
/*
 * Calculate the ffs() of the cpuset.
 */
int
cpusetobj_ffs(const cpuset_t *set)
{
	size_t i;
	int cbit;

	cbit = 0;
	for (i = 0; i < _NCPUWORDS; i++) {
		if (set->__bits[i] != 0) {
			cbit = ffsl(set->__bits[i]);
			cbit += i * _NCPUBITS;
			break;
		}
	}
	return (cbit);
}
Exemplo n.º 19
0
void
svc_getreqset (fd_set *readfds)
{
  register fd_mask mask;
  register fd_mask *maskp;
  register int setsize;
  register int sock;
  register int bit;

  setsize = _rpc_dtablesize ();
  if (setsize > FD_SETSIZE)
    setsize = FD_SETSIZE;
  maskp = readfds->fds_bits;
  for (sock = 0; sock < setsize; sock += NFDBITS)
    for (mask = *maskp++; (bit = ffsl (mask)); mask ^= (1L << (bit - 1)))
      svc_getreq_common (sock + bit - 1);
}
Exemplo n.º 20
0
static void vtd_init_fault_nmi(void)
{
	void *reg_base = dmar_reg_base;
	struct per_cpu *cpu_data;
	unsigned int apic_id;
	int i;

	/* Assume that at least one bit is set somewhere as
	* we don't support configurations when Linux is left with no CPUs */
	for (i = 0; root_cell.cpu_set->bitmap[i] == 0; i++)
		/* Empty loop */;
	cpu_data = per_cpu(ffsl(root_cell.cpu_set->bitmap[i]));
	apic_id = cpu_data->apic_id;

	/* Save this value globally to avoid multiple reporting
	 * of the same case from different CPUs*/
	fault_reporting_cpu_id = cpu_data->cpu_id;

	for (i = 0; i < dmar_units; i++, reg_base += PAGE_SIZE) {
		/* Mask events*/
		mmio_write32_field(reg_base+VTD_FECTL_REG, VTD_FECTL_IM_MASK,
				   VTD_FECTL_IM_SET);

		/* We use xAPIC mode. Hence, TRGM and LEVEL aren't required.
		 Set Delivery Mode to NMI */
		mmio_write32(reg_base + VTD_FEDATA_REG, APIC_MSI_DATA_DM_NMI);

		/* The vector information is ignored in the case of NMI,
		* hence there's no need to set that field.
		* Redirection mode is set to use physical address by default */
		mmio_write32(reg_base + VTD_FEADDR_REG,
			((apic_id << APIC_MSI_ADDR_DESTID_SHIFT) &
			 APIC_MSI_ADDR_DESTID_MASK) | APIC_MSI_ADDR_FIXED_VAL);

		/* APIC ID can exceed 8-bit value for x2APIC mode */
		if (using_x2apic)
			mmio_write32(reg_base + VTD_FEUADDR_REG, apic_id);

		/* Unmask events */
		mmio_write32_field(reg_base+VTD_FECTL_REG, VTD_FECTL_IM_MASK,
				   VTD_FECTL_IM_CLEAR);
	}
}
Exemplo n.º 21
0
void apic_send_irq(struct apic_irq_message irq_msg)
{
	u32 delivery_mode = irq_msg.delivery_mode << APIC_ICR_DLVR_SHIFT;

	/* IA-32 SDM 10.6: "lowest priority IPI [...] should be avoided" */
	if (delivery_mode == APIC_ICR_DLVR_LOWPRI) {
		delivery_mode = APIC_ICR_DLVR_FIXED;
		/* Fixed mode performs a multicast, so reduce the number of
		 * receivers to one. */
		if (irq_msg.dest_logical && irq_msg.destination != 0)
			irq_msg.destination = 1UL << ffsl(irq_msg.destination);
	}
	apic_ops.send_ipi(irq_msg.destination,
			  irq_msg.vector | delivery_mode |
			  (irq_msg.dest_logical ? APIC_ICR_DEST_LOGICAL : 0) |
			  APIC_ICR_LV_ASSERT |
			  (irq_msg.level_triggered ? APIC_ICR_TM_LEVEL : 0) |
			  APIC_ICR_SH_NONE);
}
Exemplo n.º 22
0
/* call latent interrupt handlers, resolve preemptions, return the winner.
 * caller must check for retval == current && I ∈ current.PreemptFlags, and
 * engage max_delay as appropriate.
 */
struct thread *irq_call_deferred(struct thread *next)
{
	assert(!x86_irq_is_enabled());
	assert(!kernel_irq_ok);

	/* initialize resolution state, feed it a primary event. */
	struct thread *current = get_current_thread();
	void *cur_utcb = current != NULL ? thread_get_utcb(current) : NULL;
	next = sched_resolve_next(current, cur_utcb, current, next);

	int n_defer_masks = (max_irq_handler + WORD_BITS) / WORD_BITS, n_done;
	do {
		if(!irq_defer_active) break;
		n_done = 0;
		for(int i=0; i < n_defer_masks; i++) {
			L4_Word_t m = defer_set_masks[i];
			while(m != 0) {
				int b = ffsl(m) - 1;
				assert(b >= 0);
				m &= ~(1ul << b);
				int vecn = i * WORD_BITS + b;
				int n_defers = deferred_vecs[vecn];
				assert(n_defers > 0);
				irq_handler_fn handler = choose_handler(vecn);
				deferred_vecs[vecn] = 0;
				defer_set_masks[i] &= ~(1ul << b);

				x86_irq_enable();
				next = sched_resolve_next(current, cur_utcb, next,
					(*handler)(n_defers > 1 ? -vecn : vecn));
				x86_irq_disable();
				n_done++;
			}
		}
	} while(n_done > 0);
	irq_defer_active = false;

	return next;
}
Exemplo n.º 23
0
int main ()
{
    static struct t_s {
	long val;
	unsigned char pattern;
    } t[] = {
	{ 0, 0 },
	{ 1, 1 },
	{ 0xffffffff, 1 },
	{ 0x00000002, 2 },
	{ 0x80000002, 2 },
	{ 0xfffffff2, 2 },
	{ 0x00000040, 7 },
	{ 0xffffff40, 7 },
	{ 0x00000080, 8 },
	{ 0xffffff80, 8 },

	{ 0x00000100, 9 },
	{ 0xffffff00, 9 },
	{ 0x00008000, 16 },
	{ 0xffff8000, 16 },
	
	{ 0x00010000, 17 },
	{ 0xffff0000, 17 },
	{ 0x00800000, 24 },
	{ 0xff800000, 24 },
	
	{ 0x01000000, 25 },
	{ 0xff000000, 25 },
	{ 0x80000000, 32 },
    };
    int i;
    
    for (i = 0; i != (int)(sizeof(t)/sizeof(t[0])); i++) {
	if (ffsl (t[i].val) != t[i].pattern)
	    exit (1 + i);
    }
    return 0;
}
Exemplo n.º 24
0
static void
ps3pic_dispatch(device_t dev, struct trapframe *tf)
{
	uint64_t bitmap, mask;
	int irq;
	struct ps3pic_softc *sc;

	sc = device_get_softc(dev);

	if (PCPU_GET(cpuid) == 0) {
		bitmap = sc->bitmap_thread0[0];
		mask = sc->mask_thread0[0];
	} else {
		bitmap = sc->bitmap_thread1[0];
		mask = sc->mask_thread1[0];
	}

	while ((irq = ffsl(bitmap & mask) - 1) != -1) {
		bitmap &= ~(1UL << irq);
		powerpc_dispatch_intr(sc->sc_vector[63 - irq], tf);
	}
}
Exemplo n.º 25
0
vaddr_t pkmalloc_bucket(struct pkmem_bucket *bucket)
{
	unsigned long word_idx, bit_idx, entry_idx;
	unsigned long bitmap_words = get_bitmap_words(bucket->num_entries);
	vaddr_t ret = NULL;

	for (word_idx = 0; word_idx < bitmap_words; word_idx++) {
		if (bucket->bitmap[word_idx] != ~0UL) {
			printf("1:bitmap[%lu]=%lx\n", word_idx, bucket->bitmap[word_idx]);
			bit_idx = ffsl(~bucket->bitmap[word_idx]) - 1;
			printf("2:setting bit %lu\n", bit_idx);
			bucket->bitmap[word_idx] |= (1UL << bit_idx);
			printf("3:bitmap[%lu]=%lx\n", word_idx, bucket->bitmap[word_idx]);

			entry_idx = word_idx * WORD_BIT_SIZE + bit_idx;
			ret = bucket->address + entry_idx * bucket->entry_size;
			bucket->free_entries--;
			break;
		}
	}

	return ret;
}
Exemplo n.º 26
0
static void apic_send_logical_dest_ipi(unsigned long dest, u32 lo_val,
				       u32 hi_val)
{
	unsigned int target_cpu_id = CPU_ID_INVALID;
	unsigned int logical_id;
	unsigned int cluster_id;
	unsigned int apic_id;

	if (using_x2apic) {
		cluster_id = (dest & X2APIC_DEST_CLUSTER_ID_MASK) >>
			X2APIC_DEST_CLUSTER_ID_SHIFT;
		dest &= X2APIC_DEST_LOGICAL_ID_MASK;
		while (dest != 0) {
			logical_id = ffsl(dest);
			dest &= ~(1UL << logical_id);
			apic_id = logical_id |
				(cluster_id << X2APIC_CLUSTER_ID_SHIFT);
			if (apic_id <= APIC_MAX_PHYS_ID)
				target_cpu_id = apic_to_cpu_id[apic_id];
			apic_send_ipi(target_cpu_id, hi_val, lo_val);
		}
	} else
		while (dest != 0) {
Exemplo n.º 27
0
/*
  =============
  PortalCompleted

  Mark the portal completed and propogate new vis information across
  to the complementry portals.

  Called with the lock held.
  =============
*/
static void
PortalCompleted(portal_t *completed)
{
    int i, j, k, bit, numblocks;
    int leafnum;
    const portal_t *p, *p2;
    const leaf_t *myleaf;
    const leafblock_t *might, *vis;
    leafblock_t changed;

    ThreadLock();

    completed->status = pstat_done;

    /*
     * For each portal on the leaf, check the leafs we eliminated from
     * mightsee during the full vis so far.
     */
    myleaf = &leafs[completed->leaf];
    for (i = 0; i < myleaf->numportals; i++) {
        p = myleaf->portals[i];
        if (p->status != pstat_done)
            continue;

        might = p->mightsee->bits;
        vis = p->visbits->bits;
        numblocks = (portalleafs + LEAFMASK) >> LEAFSHIFT;
        for (j = 0; j < numblocks; j++) {
            changed = might[j] & ~vis[j];
            if (!changed)
                continue;

            /*
             * If any of these changed bits are still visible from another
             * portal, we can't update yet.
             */
            for (k = 0; k < myleaf->numportals; k++) {
                if (k == i)
                    continue;
                p2 = myleaf->portals[k];
                if (p2->status == pstat_done)
                    changed &= ~p2->visbits->bits[j];
                else
                    changed &= ~p2->mightsee->bits[j];
                if (!changed)
                    break;
            }

            /*
             * Update mightsee for any of the changed bits that survived
             */
            while (changed) {
                bit = ffsl(changed) - 1;
                changed &= ~(1UL << bit);
                leafnum = (j << LEAFSHIFT) + bit;
                UpdateMightsee(leafs + leafnum, myleaf);
            }
        }
    }

    ThreadUnlock();
}
Exemplo n.º 28
0
	if (using_x2apic) {
		cluster_id = (dest & X2APIC_DEST_CLUSTER_ID_MASK) >>
			X2APIC_DEST_CLUSTER_ID_SHIFT;
		dest &= X2APIC_DEST_LOGICAL_ID_MASK;
		while (dest != 0) {
			logical_id = ffsl(dest);
			dest &= ~(1UL << logical_id);
			apic_id = logical_id |
				(cluster_id << X2APIC_CLUSTER_ID_SHIFT);
			if (apic_id <= APIC_MAX_PHYS_ID)
				target_cpu_id = apic_to_cpu_id[apic_id];
			apic_send_ipi(target_cpu_id, hi_val, lo_val);
		}
	} else
		while (dest != 0) {
			target_cpu_id = ffsl(dest);
			dest &= ~(1UL << target_cpu_id);
			apic_send_ipi(target_cpu_id, hi_val, lo_val);
		}
}

static bool apic_handle_icr_write(u32 lo_val, u32 hi_val)
{
	unsigned int target_cpu_id;
	unsigned long dest;

	if (!apic_valid_ipi_mode(lo_val))
		return false;

	if ((lo_val & APIC_ICR_SH_MASK) == APIC_ICR_SH_SELF) {
		apic_ops.write(APIC_REG_ICR, (lo_val & APIC_ICR_VECTOR_MASK) |
Exemplo n.º 29
0
/*
 * Could be implemented as get_new_above(idr, ptr, 0, idp) but written
 * first for simplicity sake.
 */
int
idr_get_new(struct idr *idr, void *ptr, int *idp)
{
	struct idr_layer *stack[MAX_LEVEL];
	struct idr_layer *il;
	int error;
	int layer;
	int idx;
	int id;

	error = -EAGAIN;
	mtx_lock(&idr->lock);
	/*
	 * Expand the tree until there is free space.
	 */
	if (idr->top == NULL || idr->top->bitmap == 0) {
		if (idr->layers == MAX_LEVEL + 1) {
			error = -ENOSPC;
			goto out;
		}
		il = idr_get(idr);
		if (il == NULL)
			goto out;
		il->ary[0] = idr->top;
		if (idr->top)
			il->bitmap &= ~1;
		idr->top = il;
		idr->layers++;
	}
	il = idr->top;
	id = 0;
	/*
	 * Walk the tree following free bitmaps, record our path.
	 */
	for (layer = idr->layers - 1;; layer--) {
		stack[layer] = il;
		idx = ffsl(il->bitmap);
		if (idx == 0)
			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
			    idr, il);
		idx--;
		id |= idx << (layer * IDR_BITS);
		if (layer == 0)
			break;
		if (il->ary[idx] == NULL) {
			il->ary[idx] = idr_get(idr);
			if (il->ary[idx] == NULL)
				goto out;
		}
		il = il->ary[idx];
	}
	/*
	 * Allocate the leaf to the consumer.
	 */
	il->bitmap &= ~(1 << idx);
	il->ary[idx] = ptr;
	*idp = id;
	/*
	 * Clear bitmaps potentially up to the root.
	 */
	while (il->bitmap == 0 && ++layer < idr->layers) {
		il = stack[layer];
		il->bitmap &= ~(1 << idr_pos(id, layer));
	}
	error = 0;
out:
#ifdef INVARIANTS
	if (error == 0 && idr_find_locked(idr, id) != ptr) {
		panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n",
		    idr, id, ptr);
	}
#endif
	mtx_unlock(&idr->lock);
	return (error);
}