Esempio n. 1
0
static int _put_module(struct module *mod)
{
    int count = 0;

    if(mod == NULL)
    {
        DE("module is NULL.\n");
        return -1;
    }

    mod->state = MODULE_STATE_LIVE;
    /*mod->exit = force_exit;*/
    count = module_refcount(mod);
    while(count)
    {
        module_put(mod);
        --count;
    }

#if 0
#ifdef CONFIG_SMP
    DE("CONFIG_SMP.\n");
    for(i = 0;i < NR_CPUS; i++)
    {
        refparams = (local_t *)&mod->refparams[i];
        local_set(refparams, 0);
    }
#else
    DE("NO CONFIG_SMP.\n");
    local_set(&mod->ref, 0);
#endif
#endif

    return 0;
}
Esempio n. 2
0
static void cache_module( const char *filename, aptime time, value main ) {
	cache *c = (cache*)local_get(cache_root), *prev = NULL;
	value fname = alloc_string(filename);
	while( c != NULL ) {
		if( val_compare(fname,c->file) == 0 ) {
			if( main == NULL ) {
				if( prev == NULL )
					local_set(cache_root,c->next);
				else
					prev->next = c->next;
				free_root((value*)c);
			} else {
				c->main = main;
				c->time = time;
			}
			return;
		}
		prev = c;
		c = c->next;
	}
	c = (cache*)alloc_root(sizeof(struct cache) / sizeof(value));
	c->file = fname;
	c->main = main;
	c->time = time;
	c->hits = 0;
	c->next = (cache*)local_get(cache_root);
	local_set(cache_root,c);
}
Esempio n. 3
0
void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
{
	int sum;
	int touched = 0;

	sum = read_pda(apic_timer_irqs);
	if (__get_cpu_var(nmi_touch)) {
		__get_cpu_var(nmi_touch) = 0;
		touched = 1;
	}
#ifdef CONFIG_X86_MCE
	/* Could check oops_in_progress here too, but it's safer
	   not too */
	if (atomic_read(&mce_entry) > 0)
		touched = 1;
#endif
	if (!touched && __get_cpu_var(last_irq_sum) == sum) {
		/*
		 * Ayiee, looks like this CPU is stuck ...
		 * wait a few IRQs (5 seconds) before doing the oops ...
		 */
		local_inc(&__get_cpu_var(alert_counter));
		if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz) {
			if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
							== NOTIFY_STOP) {
				local_set(&__get_cpu_var(alert_counter), 0);
				return;
			}
			die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs);
		}
	} else {
		__get_cpu_var(last_irq_sum) = sum;
		local_set(&__get_cpu_var(alert_counter), 0);
	}
	if (nmi_perfctr_msr) {
 		if (nmi_perfctr_msr == MSR_P4_IQ_COUNTER0) {
 			/*
 			 * P4 quirks:
 			 * - An overflown perfctr will assert its interrupt
 			 *   until the OVF flag in its CCCR is cleared.
 			 * - LVTPC is masked on interrupt and must be
 			 *   unmasked by the LVTPC handler.
 			 */
 			wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
 			apic_write(APIC_LVTPC, APIC_DM_NMI);
 		} else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
			/*
			 * For Intel based architectural perfmon
			 * - LVTPC is masked on interrupt and must be
			 *   unmasked by the LVTPC handler.
			 */
			apic_write(APIC_LVTPC, APIC_DM_NMI);
		}
		wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
	}
}
Esempio n. 4
0
static void etm_disable(struct coresight_device *csdev)
{
	u32 mode;
	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);

	/*
	 * For as long as the tracer isn't disabled another entity can't
	 * change its status.  As such we can read the status here without
	 * fearing it will change under us.
	 */
	mode = local_read(&drvdata->mode);

	switch (mode) {
	case CS_MODE_DISABLED:
		break;
	case CS_MODE_SYSFS:
		etm_disable_sysfs(csdev);
		break;
	case CS_MODE_PERF:
		etm_disable_perf(csdev);
		break;
	default:
		WARN_ON_ONCE(mode);
		return;
	}

	if (mode)
		local_set(&drvdata->mode, CS_MODE_DISABLED);
}
Esempio n. 5
0
notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
{
	unsigned int sum, touched = 0;
	int cpu = smp_processor_id();

	clear_softint(1 << irq);
	pcr_ops->write(PCR_PIC_PRIV);

	local_cpu_data().__nmi_count++;

	if (notify_die(DIE_NMI, "nmi", regs, 0,
		       pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
		touched = 1;

	sum = kstat_irqs_cpu(0, cpu);
	if (__get_cpu_var(nmi_touch)) {
		__get_cpu_var(nmi_touch) = 0;
		touched = 1;
	}
	if (!touched && __get_cpu_var(last_irq_sum) == sum) {
		local_inc(&__get_cpu_var(alert_counter));
		if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
			die_nmi("BUG: NMI Watchdog detected LOCKUP",
				regs, panic_on_timeout);
	} else {
		__get_cpu_var(last_irq_sum) = sum;
		local_set(&__get_cpu_var(alert_counter), 0);
	}
	if (nmi_usable) {
		write_pic(picl_value(nmi_hz));
		pcr_ops->write(pcr_enable);
	}
}
Esempio n. 6
0
static value cache_find( request_rec *r ) {
	cache *c = (cache*)local_get(cache_root);
	cache *prev = NULL;
	value fname = alloc_string(r->filename);
	while( c != NULL ) {
		if( val_compare(fname,c->file) == 0 ) {
			if( config.use_cache && FTIME(r) == c->time ) {
				c->hits++;
				return c->main;
			}
			if( prev == NULL )
				local_set(cache_root,c->next);
			else
				prev->next = c->next;
			free_root((value*)c);
			// try to lower memory partitioning
			// when a module is updated
			c = NULL;
			gc_major();
			break;
		}
		prev = c;
		c = c->next;
	}
	return NULL;
}
Esempio n. 7
0
static int etm_enable(struct coresight_device *csdev,
		      struct perf_event_attr *attr, u32 mode)
{
	int ret;
	u32 val;
	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);

	val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);

	/* Someone is already using the tracer */
	if (val)
		return -EBUSY;

	switch (mode) {
	case CS_MODE_SYSFS:
		ret = etm_enable_sysfs(csdev);
		break;
	case CS_MODE_PERF:
		ret = etm_enable_perf(csdev, attr);
		break;
	default:
		ret = -EINVAL;
	}

	/* The tracer didn't start */
	if (ret)
		local_set(&drvdata->mode, CS_MODE_DISABLED);

	return ret;
}
static void bts_update(struct bts_ctx *bts)
{
	int cpu = raw_smp_processor_id();
	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
	struct bts_buffer *buf = perf_get_aux(&bts->handle);
	unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head;

	if (!buf)
		return;

	head = index + bts_buffer_offset(buf, buf->cur_buf);
	old = local_xchg(&buf->head, head);

	if (!buf->snapshot) {
		if (old == head)
			return;

		if (ds->bts_index >= ds->bts_absolute_maximum)
			local_inc(&buf->lost);

		/*
		 * old and head are always in the same physical buffer, so we
		 * can subtract them to get the data size.
		 */
		local_add(head - old, &buf->data_size);
	} else {
		local_set(&buf->data_size, head);
	}
}
Esempio n. 9
0
static int etb_release(struct inode *inode, struct file *file)
{
	struct etb_drvdata *drvdata = container_of(file->private_data,
						   struct etb_drvdata, miscdev);
	local_set(&drvdata->reading, 0);

	dev_dbg(drvdata->dev, "%s: released\n", __func__);
	return 0;
}
Esempio n. 10
0
static void etb_disable(struct coresight_device *csdev)
{
	struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
	unsigned long flags;

	spin_lock_irqsave(&drvdata->spinlock, flags);
	etb_disable_hw(drvdata);
	etb_dump_hw(drvdata);
	spin_unlock_irqrestore(&drvdata->spinlock, flags);

	local_set(&drvdata->mode, CS_MODE_DISABLED);

	dev_info(drvdata->dev, "ETB disabled\n");
}
Esempio n. 11
0
static void tmc_disable_etf_link(struct coresight_device *csdev,
				 int inport, int outport)
{
	unsigned long flags;
	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);

	spin_lock_irqsave(&drvdata->spinlock, flags);
	if (drvdata->reading) {
		spin_unlock_irqrestore(&drvdata->spinlock, flags);
		return;
	}

	tmc_etf_disable_hw(drvdata);
	local_set(&drvdata->mode, CS_MODE_DISABLED);
	spin_unlock_irqrestore(&drvdata->spinlock, flags);

	dev_info(drvdata->dev, "TMC disabled\n");
}
Esempio n. 12
0
static int etb_set_buffer(struct coresight_device *csdev,
			  struct perf_output_handle *handle,
			  void *sink_config)
{
	int ret = 0;
	unsigned long head;
	struct cs_buffers *buf = sink_config;

	/* wrap head around to the amount of space we have */
	head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);

	/* find the page to write to */
	buf->cur = head / PAGE_SIZE;

	/* and offset within that page */
	buf->offset = head % PAGE_SIZE;

	local_set(&buf->data_size, 0);

	return ret;
}
Esempio n. 13
0
static void stm_disable(struct coresight_device *csdev,
			struct perf_event *event)
{
	struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);

	/*
	 * For as long as the tracer isn't disabled another entity can't
	 * change its status.  As such we can read the status here without
	 * fearing it will change under us.
	 */
	if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
		spin_lock(&drvdata->spinlock);
		stm_disable_hw(drvdata);
		spin_unlock(&drvdata->spinlock);

		/* Wait until the engine has completely stopped */
		coresight_timeout(drvdata->base, STMTCSR, STMTCSR_BUSY_BIT, 0);

		pm_runtime_put(drvdata->dev);

		local_set(&drvdata->mode, CS_MODE_DISABLED);
		dev_dbg(drvdata->dev, "STM tracing disabled\n");
	}
}
Esempio n. 14
0
static void etb_update_buffer(struct coresight_device *csdev,
			      struct perf_output_handle *handle,
			      void *sink_config)
{
	bool lost = false;
	int i, cur;
	u8 *buf_ptr;
	const u32 *barrier;
	u32 read_ptr, write_ptr, capacity;
	u32 status, read_data, to_read;
	unsigned long offset;
	struct cs_buffers *buf = sink_config;
	struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);

	if (!buf)
		return;

	capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;

	etb_disable_hw(drvdata);
	CS_UNLOCK(drvdata->base);

	/* unit is in words, not bytes */
	read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
	write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);

	/*
	 * Entries should be aligned to the frame size.  If they are not
	 * go back to the last alignment point to give decoding tools a
	 * chance to fix things.
	 */
	if (write_ptr % ETB_FRAME_SIZE_WORDS) {
		dev_err(drvdata->dev,
			"write_ptr: %lu not aligned to formatter frame size\n",
			(unsigned long)write_ptr);

		write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
		lost = true;
	}

	/*
	 * Get a hold of the status register and see if a wrap around
	 * has occurred.  If so adjust things accordingly.  Otherwise
	 * start at the beginning and go until the write pointer has
	 * been reached.
	 */
	status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
	if (status & ETB_STATUS_RAM_FULL) {
		lost = true;
		to_read = capacity;
		read_ptr = write_ptr;
	} else {
		to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth);
		to_read *= ETB_FRAME_SIZE_WORDS;
	}

	/*
	 * Make sure we don't overwrite data that hasn't been consumed yet.
	 * It is entirely possible that the HW buffer has more data than the
	 * ring buffer can currently handle.  If so adjust the start address
	 * to take only the last traces.
	 *
	 * In snapshot mode we are looking to get the latest traces only and as
	 * such, we don't care about not overwriting data that hasn't been
	 * processed by user space.
	 */
	if (!buf->snapshot && to_read > handle->size) {
		u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1);

		/* The new read pointer must be frame size aligned */
		to_read = handle->size & mask;
		/*
		 * Move the RAM read pointer up, keeping in mind that
		 * everything is in frame size units.
		 */
		read_ptr = (write_ptr + drvdata->buffer_depth) -
					to_read / ETB_FRAME_SIZE_WORDS;
		/* Wrap around if need be*/
		if (read_ptr > (drvdata->buffer_depth - 1))
			read_ptr -= drvdata->buffer_depth;
		/* let the decoder know we've skipped ahead */
		lost = true;
	}

	if (lost)
		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);

	/* finally tell HW where we want to start reading from */
	writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);

	cur = buf->cur;
	offset = buf->offset;
	barrier = barrier_pkt;

	for (i = 0; i < to_read; i += 4) {
		buf_ptr = buf->data_pages[cur] + offset;
		read_data = readl_relaxed(drvdata->base +
					  ETB_RAM_READ_DATA_REG);
		if (lost && *barrier) {
			read_data = *barrier;
			barrier++;
		}

		*(u32 *)buf_ptr = read_data;
		buf_ptr += 4;

		offset += 4;
		if (offset >= PAGE_SIZE) {
			offset = 0;
			cur++;
			/* wrap around at the end of the buffer */
			cur &= buf->nr_pages - 1;
		}
	}

	/* reset ETB buffer for next run */
	writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
	writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);

	/*
	 * In snapshot mode all we have to do is communicate to
	 * perf_aux_output_end() the address of the current head.  In full
	 * trace mode the same function expects a size to move rb->aux_head
	 * forward.
	 */
	if (buf->snapshot)
		local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
	else
		local_add(to_read, &buf->data_size);

	etb_enable_hw(drvdata);
	CS_LOCK(drvdata->base);
}
Esempio n. 15
0
notrace __kprobes int
nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
{
	/*
	 * Since current_thread_info()-> is always on the stack, and we
	 * always switch the stack NMI-atomically, it's safe to use
	 * smp_processor_id().
	 */
	unsigned int sum;
	int touched = 0;
	int cpu = smp_processor_id();
	int rc = 0;

	/* check for other users first */
	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
			== NOTIFY_STOP) {
		rc = 1;
		touched = 1;
	}

	sum = get_timer_irqs(cpu);

	if (__get_cpu_var(nmi_touch)) {
		__get_cpu_var(nmi_touch) = 0;
		touched = 1;
	}

	/* We can be called before check_nmi_watchdog, hence NULL check. */
	if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
		static DEFINE_SPINLOCK(lock);	/* Serialise the printks */

		spin_lock(&lock);
		printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
		show_regs(regs);
		dump_stack();
		spin_unlock(&lock);
		cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));

		rc = 1;
	}

	/* Could check oops_in_progress here too, but it's safer not to */
	if (mce_in_progress())
		touched = 1;

	/* if the none of the timers isn't firing, this cpu isn't doing much */
	if (!touched && __get_cpu_var(last_irq_sum) == sum) {
		/*
		 * Ayiee, looks like this CPU is stuck ...
		 * wait a few IRQs (5 seconds) before doing the oops ...
		 */
		local_inc(&__get_cpu_var(alert_counter));
		if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
			/*
			 * die_nmi will return ONLY if NOTIFY_STOP happens..
			 */
			die_nmi("BUG: NMI Watchdog detected LOCKUP",
				regs, panic_on_timeout);
	} else {
		__get_cpu_var(last_irq_sum) = sum;
		local_set(&__get_cpu_var(alert_counter), 0);
	}

	/* see if the nmi watchdog went off */
	if (!__get_cpu_var(wd_enabled))
		return rc;
	switch (nmi_watchdog) {
	case NMI_LOCAL_APIC:
		rc |= lapic_wd_event(nmi_hz);
		break;
	case NMI_IO_APIC:
		/*
		 * don't know how to accurately check for this.
		 * just assume it was a watchdog timer interrupt
		 * This matches the old behaviour.
		 */
		rc = 1;
		break;
	}
	return rc;
}
Esempio n. 16
0
EXTERN void neko_vm_select( neko_vm *vm ) {
	local_set(neko_vm_context,vm);
}
Esempio n. 17
0
	basic_dxFrame_():dxArea_<DrawFuncType>(){local_set(dxRGB(0,0,0),dxDMode(),dxDMode(),dxDMode());}
Esempio n. 18
0
File: sock.c Progetto: pfq/PFQ
int pfq_sock_init(struct pfq_sock *so, pfq_id_t id, size_t caplen, size_t xmitlen)
{
	int i;

	/* setup stats */

	so->stats = alloc_percpu(pfq_sock_stats_t);
	if (!so->stats)
		return -ENOMEM;

	for_each_present_cpu(i)
	{
		pfq_sock_stats_t * stat = per_cpu_ptr(so->stats, i);

		local_set(&stat->recv, 0);
		local_set(&stat->lost, 0);
		local_set(&stat->drop, 0);
		local_set(&stat->sent, 0);
		local_set(&stat->disc, 0);
		local_set(&stat->fail, 0);
		local_set(&stat->frwd, 0);
		local_set(&stat->kern, 0);
	}

	/* setup id */

	so->id = id;

        /* memory mapped queues are allocated later, when the socket is enabled */

	so->egress_type = Q_ENDPOINT_SOCKET;
	so->egress_index = 0;
	so->egress_queue = 0;

	/* default weight */

	so->weight = 1;

        so->shmem.addr = NULL;
        so->shmem.size = 0;
        so->shmem.kind = 0;
        so->shmem.hugepages_descr = NULL;

        atomic_long_set(&so->shmem_addr,0);

        /* disable tiemstamping by default */

        so->tstamp = false;

        /* initialize waitqueue */

        pfq_sock_init_waitqueue_head(&so->waitqueue);

	/* Rx queue setup */

	pfq_queue_info_init(&so->rx);

        so->rx_len = caplen;
        so->rx_queue_len = 0;
        so->rx_slot_size  = PFQ_SHARED_QUEUE_SLOT_SIZE(caplen);

	/* Tx queues setup */

	pfq_queue_info_init(&so->tx);

        so->tx_len = xmitlen;
        so->tx_queue_len  = 0;
        so->tx_slot_size  = PFQ_SHARED_QUEUE_SLOT_SIZE(xmitlen);
	so->txq_num_async = 0;

	/* Tx async queues setup */

	for(i = 0; i < Q_MAX_TX_QUEUES; ++i)
	{
		pfq_queue_info_init(&so->tx_async[i]);
	}
        return 0;
}
Esempio n. 19
0
static void tmc_update_etf_buffer(struct coresight_device *csdev,
				  struct perf_output_handle *handle,
				  void *sink_config)
{
	int i, cur;
	u32 *buf_ptr;
	u32 read_ptr, write_ptr;
	u32 status, to_read;
	unsigned long offset;
	struct cs_buffers *buf = sink_config;
	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);

	if (!buf)
		return;

	/* This shouldn't happen */
	if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF))
		return;

	CS_UNLOCK(drvdata->base);

	tmc_flush_and_stop(drvdata);

	read_ptr = readl_relaxed(drvdata->base + TMC_RRP);
	write_ptr = readl_relaxed(drvdata->base + TMC_RWP);

	/*
	 * Get a hold of the status register and see if a wrap around
	 * has occurred.  If so adjust things accordingly.
	 */
	status = readl_relaxed(drvdata->base + TMC_STS);
	if (status & TMC_STS_FULL) {
		local_inc(&buf->lost);
		to_read = drvdata->size;
	} else {
		to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
	}

	/*
	 * The TMC RAM buffer may be bigger than the space available in the
	 * perf ring buffer (handle->size).  If so advance the RRP so that we
	 * get the latest trace data.
	 */
	if (to_read > handle->size) {
		u32 mask = 0;

		/*
		 * The value written to RRP must be byte-address aligned to
		 * the width of the trace memory databus _and_ to a frame
		 * boundary (16 byte), whichever is the biggest. For example,
		 * for 32-bit, 64-bit and 128-bit wide trace memory, the four
		 * LSBs must be 0s. For 256-bit wide trace memory, the five
		 * LSBs must be 0s.
		 */
		switch (drvdata->memwidth) {
		case TMC_MEM_INTF_WIDTH_32BITS:
		case TMC_MEM_INTF_WIDTH_64BITS:
		case TMC_MEM_INTF_WIDTH_128BITS:
			mask = GENMASK(31, 5);
			break;
		case TMC_MEM_INTF_WIDTH_256BITS:
			mask = GENMASK(31, 6);
			break;
		}

		/*
		 * Make sure the new size is aligned in accordance with the
		 * requirement explained above.
		 */
		to_read = handle->size & mask;
		/* Move the RAM read pointer up */
		read_ptr = (write_ptr + drvdata->size) - to_read;
		/* Make sure we are still within our limits */
		if (read_ptr > (drvdata->size - 1))
			read_ptr -= drvdata->size;
		/* Tell the HW */
		writel_relaxed(read_ptr, drvdata->base + TMC_RRP);
		local_inc(&buf->lost);
	}

	cur = buf->cur;
	offset = buf->offset;

	/* for every byte to read */
	for (i = 0; i < to_read; i += 4) {
		buf_ptr = buf->data_pages[cur] + offset;
		*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);

		offset += 4;
		if (offset >= PAGE_SIZE) {
			offset = 0;
			cur++;
			/* wrap around at the end of the buffer */
			cur &= buf->nr_pages - 1;
		}
	}

	/*
	 * In snapshot mode all we have to do is communicate to
	 * perf_aux_output_end() the address of the current head.  In full
	 * trace mode the same function expects a size to move rb->aux_head
	 * forward.
	 */
	if (buf->snapshot)
		local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
	else
		local_add(to_read, &buf->data_size);

	CS_LOCK(drvdata->base);
}
Esempio n. 20
0
	basic_dxFrame_(DrawFuncType& _DrawFunc,const Pint& _size,const dxRGB& _Clr,
		const dxDMode& _Out=dxDMode(196),const dxDMode& _In=dxDMode(128),const dxDMode& _Passive=dxDMode(128))
		:dxArea_<DrawFuncType>(_DrawFunc,_size){local_set(_Clr,_Out,_In,_Passive);}
Esempio n. 21
0
	void set(DrawFuncType& _DrawFunc,const Pint& _size,const dxRGB& _Clr,
		const dxDMode& _Out=dxDMode(196),const dxDMode& _In=dxDMode(128),const dxDMode& _Passive=dxDMode(128)){
		dxArea_<DrawFuncType>::set(_DrawFunc,_size);
		local_set(_Clr,_Out,_In,_Passive);
	}
Esempio n. 22
0
static void rb_init_page(struct buffer_data_page *bpage)
{
	local_set(&bpage->commit, 0);
}
Esempio n. 23
0
	dxArea_(){local_set(0,Pint());}
Esempio n. 24
0
	void set(DrawFuncType& _DrawFunc,const Pint& _size,const dxRGB& _Clr,const std::string& _name="",
		const dxDMode& _Out=dxDMode(196),const dxDMode& _In=dxDMode(128),const dxDMode& _Passive=dxDMode(128)){
			basic_dxFrame_<DrawFuncType>::set(_DrawFunc,_size,_Clr,_Out,_In,_Passive);
			local_set(_name);
	}
Esempio n. 25
0
	basic_dxWindow_():basic_dxFrame_<DrawFuncType>(){local_set("");}
static int
bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
{
	unsigned long head, space, next_space, pad, gap, skip, wakeup;
	unsigned int next_buf;
	struct bts_phys *phys, *next_phys;
	int ret;

	if (buf->snapshot)
		return 0;

	head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
	if (WARN_ON_ONCE(head != local_read(&buf->head)))
		return -EINVAL;

	phys = &buf->buf[buf->cur_buf];
	space = phys->offset + phys->displacement + phys->size - head;
	pad = space;
	if (space > handle->size) {
		space = handle->size;
		space -= space % BTS_RECORD_SIZE;
	}
	if (space <= BTS_SAFETY_MARGIN) {
		/* See if next phys buffer has more space */
		next_buf = buf->cur_buf + 1;
		if (next_buf >= buf->nr_bufs)
			next_buf = 0;
		next_phys = &buf->buf[next_buf];
		gap = buf_size(phys->page) - phys->displacement - phys->size +
		      next_phys->displacement;
		skip = pad + gap;
		if (handle->size >= skip) {
			next_space = next_phys->size;
			if (next_space + skip > handle->size) {
				next_space = handle->size - skip;
				next_space -= next_space % BTS_RECORD_SIZE;
			}
			if (next_space > space || !space) {
				if (pad)
					bts_buffer_pad_out(phys, head);
				ret = perf_aux_output_skip(handle, skip);
				if (ret)
					return ret;
				/* Advance to next phys buffer */
				phys = next_phys;
				space = next_space;
				head = phys->offset + phys->displacement;
				/*
				 * After this, cur_buf and head won't match ds
				 * anymore, so we must not be racing with
				 * bts_update().
				 */
				buf->cur_buf = next_buf;
				local_set(&buf->head, head);
			}
		}
	}

	/* Don't go far beyond wakeup watermark */
	wakeup = BTS_SAFETY_MARGIN + BTS_RECORD_SIZE + handle->wakeup -
		 handle->head;
	if (space > wakeup) {
		space = wakeup;
		space -= space % BTS_RECORD_SIZE;
	}

	buf->end = head + space;

	/*
	 * If we have no space, the lost notification would have been sent when
	 * we hit absolute_maximum - see bts_update()
	 */
	if (!space)
		return -ENOSPC;

	return 0;
}
Esempio n. 27
0
	void set(DrawFuncType& _DrawFunc,const Pint& _size){local_set(_DrawFunc,_size);}