static int __init sgi_uv_sysfs_init(void)
{
	unsigned long ret;

	if (!is_uv_system())
		return -ENODEV;

	if (!sgi_uv_kobj)
		sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
	if (!sgi_uv_kobj) {
		printk(KERN_WARNING "kobject_create_and_add sgi_uv failed\n");
		return -EINVAL;
	}

	ret = sysfs_create_file(sgi_uv_kobj, &partition_id_attr.attr);
	if (ret) {
		printk(KERN_WARNING "sysfs_create_file partition_id failed\n");
		return ret;
	}

	ret = sysfs_create_file(sgi_uv_kobj, &coherence_id_attr.attr);
	if (ret) {
		printk(KERN_WARNING "sysfs_create_file coherence_id failed\n");
		return ret;
	}

	return 0;
}
Exemple #2
0
void native_flush_tlb_others(const struct cpumask *cpumask,
				 struct mm_struct *mm, unsigned long start,
				 unsigned long end)
{
	struct flush_tlb_info info;

	if (end == 0)
		end = start + PAGE_SIZE;
	info.flush_mm = mm;
	info.flush_start = start;
	info.flush_end = end;

	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
	if (end == TLB_FLUSH_ALL)
		trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
	else
		trace_tlb_flush(TLB_REMOTE_SEND_IPI,
				(end - start) >> PAGE_SHIFT);

	if (is_uv_system()) {
		unsigned int cpu;

		cpu = smp_processor_id();
		cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
		if (cpumask)
			smp_call_function_many(cpumask, flush_tlb_func,
								&info, 1);
		return;
	}
	smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
}
Exemple #3
0
void native_flush_tlb_others(const struct cpumask *cpumask,
			     struct mm_struct *mm, unsigned long va)
{
	if (is_uv_system()) {
		unsigned int cpu;

		cpu = get_cpu();
		cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
		if (cpumask)
			flush_tlb_others_ipi(cpumask, mm, va);
		put_cpu();
		return;
	}
	flush_tlb_others_ipi(cpumask, mm, va);
}
Exemple #4
0
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
			     unsigned long va)
{
	int sender;
	union smp_flush_state *f;
	cpumask_t cpumask = *cpumaskp;

	if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va))
		return;

	/* Caller has disabled preemption */
	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
	f = &per_cpu(flush_state, sender);

	/*
	 * Could avoid this lock when
	 * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
	 * probably not worth checking this for a cache-hot lock.
	 */
	spin_lock(&f->tlbstate_lock);

	f->flush_mm = mm;
	f->flush_va = va;
	cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);

	/*
	 * Make the above memory operations globally visible before
	 * sending the IPI.
	 */
	smp_mb();
	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);

	while (!cpus_empty(f->flush_cpumask))
		cpu_relax();

	f->flush_mm = NULL;
	f->flush_va = 0;
	spin_unlock(&f->tlbstate_lock);
}
Exemple #5
0
Fichier : tlb.c Projet : EvanHa/rbp
void native_flush_tlb_others(const struct cpumask *cpumask,
				 struct mm_struct *mm, unsigned long start,
				 unsigned long end)
{
	struct flush_tlb_info info;
	info.flush_mm = mm;
	info.flush_start = start;
	info.flush_end = end;

	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
	if (is_uv_system()) {
		unsigned int cpu;

		cpu = smp_processor_id();
		cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
		if (cpumask)
			smp_call_function_many(cpumask, flush_tlb_func,
								&info, 1);
		return;
	}
	smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
}
Exemple #6
0
Fichier : bus.c Projet : 710leo/LVS
static void acpi_bus_osc_support(void)
{
	u32 capbuf[2];
	struct acpi_osc_context context = {
		.uuid_str = sb_uuid_str,
		.rev = 1,
		.cap.length = 8,
		.cap.pointer = capbuf,
	};
	acpi_handle handle;

	capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
	capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
			defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
	capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT;
#endif

#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
	capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
#endif
	if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
		return;

	if (is_uv_system() && is_kdump_kernel()) {
		/*
		 * There is no need to parse the OS Capabilities table
		 * in the crash kernel. And it should not be done, as
		 * that parsing includes destructive writes to io ports to
		 * initialize UV system controller interrupts.
		 */
		return;
	}

	if (ACPI_SUCCESS(acpi_run_osc(handle, &context)))
		kfree(context.ret.pointer);
	/* do we need to check the returned cap? Sounds no */
}

/* --------------------------------------------------------------------------
                                Event Management
   -------------------------------------------------------------------------- */

#ifdef CONFIG_ACPI_PROC_EVENT
static DEFINE_SPINLOCK(acpi_bus_event_lock);

LIST_HEAD(acpi_bus_event_list);
DECLARE_WAIT_QUEUE_HEAD(acpi_bus_event_queue);

extern int event_is_open;

int acpi_bus_generate_proc_event4(const char *device_class, const char *bus_id, u8 type, int data)
{
	struct acpi_bus_event *event;
	unsigned long flags = 0;

	/* drop event on the floor if no one's listening */
	if (!event_is_open)
		return 0;

	event = kmalloc(sizeof(struct acpi_bus_event), GFP_ATOMIC);
	if (!event)
		return -ENOMEM;

	strcpy(event->device_class, device_class);
	strcpy(event->bus_id, bus_id);
	event->type = type;
	event->data = data;

	spin_lock_irqsave(&acpi_bus_event_lock, flags);
	list_add_tail(&event->node, &acpi_bus_event_list);
	spin_unlock_irqrestore(&acpi_bus_event_lock, flags);

	wake_up_interruptible(&acpi_bus_event_queue);

	return 0;

}

EXPORT_SYMBOL_GPL(acpi_bus_generate_proc_event4);

int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
{
	if (!device)
		return -EINVAL;
	return acpi_bus_generate_proc_event4(device->pnp.device_class,
					     device->pnp.bus_id, type, data);
}

EXPORT_SYMBOL(acpi_bus_generate_proc_event);

int acpi_bus_receive_event(struct acpi_bus_event *event)
{
	unsigned long flags = 0;
	struct acpi_bus_event *entry = NULL;

	DECLARE_WAITQUEUE(wait, current);


	if (!event)
		return -EINVAL;

	if (list_empty(&acpi_bus_event_list)) {

		set_current_state(TASK_INTERRUPTIBLE);
		add_wait_queue(&acpi_bus_event_queue, &wait);

		if (list_empty(&acpi_bus_event_list))
			schedule();

		remove_wait_queue(&acpi_bus_event_queue, &wait);
		set_current_state(TASK_RUNNING);

		if (signal_pending(current))
			return -ERESTARTSYS;
	}

	spin_lock_irqsave(&acpi_bus_event_lock, flags);
	if (!list_empty(&acpi_bus_event_list)) {
		entry = list_entry(acpi_bus_event_list.next,
				   struct acpi_bus_event, node);
		list_del(&entry->node);
	}