int register_die_notifier(struct notifier_block *nb)
{
	vmalloc_sync_all();
	return atomic_notifier_chain_register(&die_chain, nb);
}
/*****************************************************************************
 函 数 名  : udi_acm_init
 功能描述  : acm端口注册到udi适配层初始化函数,用于将acm设备回调挂接到udi上
 输入参数  : void
 输出参数  : 无
 返 回 值  : int 成功返回ADP_OK 失败返回ADP_ERROR
 调用函数  :
 被调函数  :

 修改历史      :
  1.日    期   : 2012年9月12日
    作    者   : 夏青 00195127
    修改内容   : 新生成函数

*****************************************************************************/
int udi_acm_init(void)
{
    int dev_num;
    int i;
    UDI_DRV_INTEFACE_TABLE *pDrvInterface = NULL;
    struct device_node *np = NULL;
    int phyaddr_from_app = 1;/*默认使用协议栈传下来的物理地址*/

    pDrvInterface = (UDI_DRV_INTEFACE_TABLE *)kmalloc(sizeof(UDI_DRV_INTEFACE_TABLE),GFP_KERNEL);
    if (NULL == pDrvInterface) {
        ACM_ADP_ERR("kmalloc failed\n");
        return ADP_ERROR;
    }
    ACM_ADP_DBG("enter\n");

    /* 挂接UDI回调函数 */
    pDrvInterface->udi_open_cb  = (UDI_OPEN_CB_T)udi_acm_open;
    pDrvInterface->udi_close_cb = (UDI_CLOSE_CB_T)udi_acm_close;
    pDrvInterface->udi_write_cb = (UDI_WRITE_CB_T)udi_acm_write;
    pDrvInterface->udi_read_cb  = (UDI_READ_CB_T)udi_acm_read;
    pDrvInterface->udi_ioctl_cb = (UDI_IOCTL_CB_T)udi_acm_ioctl;

    /* 设置acm设备驱动回调函数,3个acm使用同一套驱动指针 */
    for (dev_num = UDI_USB_ACM_CTRL; dev_num < UDI_USB_ACM_MAX; dev_num++){
        (void)BSP_UDI_SetCapability((UDI_DEVICE_ID_E)UDI_BUILD_DEV_ID(UDI_DEV_USB_ACM, dev_num), UDI_USB_ACM_CAPA);
        (void)BSP_UDI_SetInterfaceTable((UDI_DEVICE_ID_E)UDI_BUILD_DEV_ID(UDI_DEV_USB_ACM, dev_num), pDrvInterface);
    }

    np = of_find_compatible_node(NULL, NULL, "hisilicon,hi6xxx-usb-otg");
    if (np) {
        if (of_property_read_u32(np, "huawei,phyaddr_from_app",&phyaddr_from_app)){
            printk(KERN_ERR "%s:huawei,phyaddr_from_app read error\n",__FUNCTION__);
        }
        printk(KERN_INFO "%s:huawei,phyaddr_from_app %d\n",__FUNCTION__,phyaddr_from_app);
    }

    for (i = 0; i < USB_ACM_COM_UDI_NUM; i++) {
        g_acm_priv[i].tty_name      = acm_dev_name[i];
        g_acm_priv[i].bopen         = ACM_DEV_CLOSE;
        g_acm_priv[i].readDoneCB    = NULL;
        g_acm_priv[i].writeDoneCB   = NULL;
        g_acm_priv[i].eventCB       = NULL;
        g_acm_priv[i].freeSendbuff  = NULL;
        g_acm_priv[i].acm_ncpy      = NULL;
        g_acm_priv[i].tty           = NULL;
        g_acm_priv[i].acm_ldisc_ctx = NULL;
        g_acm_priv[i].do_copy       = 0;
        g_acm_priv[i].cur_evt       = ACM_EVT_DEV_SUSPEND;
        g_acm_priv[i].ncopy         = 0;
        g_acm_priv[i].phyaddr_from_app = 0;
        INIT_WORK(&g_acm_priv[i].notify_work, acm_event_notify);

        if (ACM_NEED_NCOPY(g_acm_priv[i].tty_name)) {
            g_acm_priv[i].ncopy = 1;
            g_acm_priv[i].phyaddr_from_app = phyaddr_from_app;
        }

    }

    atomic_notifier_chain_register(&android_usb_suspend_notifier_list, &acm_suspend_notifier);
    atomic_notifier_chain_register(&android_usb_resume_notifier_list,  &acm_resume_notifier);

    BSP_ACM_RegCmdHandle((ACM_HANDLE_COMMAND_CB_T)acm_handle_command);

    return ADP_OK;
}
static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
						unsigned long arg)
{
	struct omap_wdt_dev *wdev;
	int new_margin;
	static const struct watchdog_info ident = {
		.identity = "OMAP Watchdog",
		.options = WDIOF_SETTIMEOUT,
		.firmware_version = 0,
	};

	wdev = file->private_data;

	switch (cmd) {
	case WDIOC_GETSUPPORT:
		return copy_to_user((struct watchdog_info __user *)arg, &ident,
				sizeof(ident));
	case WDIOC_GETSTATUS:
		return put_user(0, (int __user *)arg);
	case WDIOC_GETBOOTSTATUS:
		if (cpu_is_omap16xx())
			return put_user(__raw_readw(ARM_SYSST),
					(int __user *)arg);
		if (cpu_is_omap24xx())
			return put_user(omap_prcm_get_reset_sources(),
					(int __user *)arg);
	case WDIOC_KEEPALIVE:
		spin_lock(&wdt_lock);
		omap_wdt_ping(wdev);
		spin_unlock(&wdt_lock);
		return 0;
	case WDIOC_SETTIMEOUT:
		if (get_user(new_margin, (int __user *)arg))
			return -EFAULT;
		omap_wdt_adjust_timeout(new_margin);

		spin_lock(&wdt_lock);
		omap_wdt_disable(wdev);
		omap_wdt_set_timeout(wdev);
		omap_wdt_enable(wdev);

		omap_wdt_ping(wdev);
		spin_unlock(&wdt_lock);
		/* Fall */
	case WDIOC_GETTIMEOUT:
		return put_user(timer_margin, (int __user *)arg);
	default:
		return -ENOTTY;
	}
}

static const struct file_operations omap_wdt_fops = {
	.owner = THIS_MODULE,
	.write = omap_wdt_write,
	.unlocked_ioctl = omap_wdt_ioctl,
	.open = omap_wdt_open,
	.release = omap_wdt_release,
	.llseek = no_llseek,
};

static int omap_wdt_nb_func(struct notifier_block *nb, unsigned long val,
	void *v)
{
	struct omap_wdt_dev *wdev = container_of(nb, struct omap_wdt_dev, nb);
	omap_wdt_ping(wdev);

	return NOTIFY_OK;
}

static int __devinit omap_wdt_probe(struct platform_device *pdev)
{
	struct resource *res, *mem, *res_irq;
	struct omap_wdt_dev *wdev;
	int ret;

	/* reserve static register mappings */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		ret = -ENOENT;
		goto err_get_resource;
	}

	if (omap_wdt_dev) {
		ret = -EBUSY;
		goto err_busy;
	}

	mem = request_mem_region(res->start, resource_size(res), pdev->name);
	if (!mem) {
		ret = -EBUSY;
		goto err_busy;
	}

	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);

	wdev = kzalloc(sizeof(struct omap_wdt_dev), GFP_KERNEL);
	if (!wdev) {
		ret = -ENOMEM;
		goto err_kzalloc;
	}

	wdev->omap_wdt_users = 0;
	wdev->mem = mem;
	wdev->dev = &pdev->dev;

	wdev->base = ioremap(res->start, resource_size(res));
	if (!wdev->base) {
		ret = -ENOMEM;
		goto err_ioremap;
	}

	if (res_irq) {
		ret = request_irq(res_irq->start, omap_wdt_interrupt, 0,
				  dev_name(&pdev->dev), wdev);

		if (ret)
			goto err_irq;

		wdev->irq = res_irq->start;
	}

	platform_set_drvdata(pdev, wdev);

	/*
	 * Note: PM runtime functions must be used only when watchdog driver
	 * is enabling/disabling. Dynamic handling of clock of watchdog timer on
	 * OMAP4/5 (like provided with runtime API) will cause system failure
	 */
	pm_runtime_enable(wdev->dev);
	pm_runtime_irq_safe(wdev->dev);
	pm_runtime_get_sync(wdev->dev);

	omap_wdt_disable(wdev);
	omap_wdt_adjust_timeout(timer_margin);

	wdev->omap_wdt_miscdev.parent = &pdev->dev;
	wdev->omap_wdt_miscdev.minor = WATCHDOG_MINOR;
	wdev->omap_wdt_miscdev.name = "watchdog";
	wdev->omap_wdt_miscdev.fops = &omap_wdt_fops;

	ret = misc_register(&(wdev->omap_wdt_miscdev));
	if (ret)
		goto err_misc;

	pr_info("OMAP Watchdog Timer Rev 0x%02x: initial timeout %d sec\n",
		__raw_readl(wdev->base + OMAP_WATCHDOG_REV) & 0xFF,
		timer_margin);

	omap_wdt_dev = pdev;

	if (kernelpet && wdev->irq) {
		wdev->nb.notifier_call = omap_wdt_nb_func;
		atomic_notifier_chain_register(&touch_watchdog_notifier_head,
			&wdev->nb);
		return omap_wdt_setup(wdev);
	}

	pm_runtime_put_sync(wdev->dev);
	return 0;

err_misc:
	pm_runtime_put_sync(wdev->dev);
	platform_set_drvdata(pdev, NULL);

	if (wdev->irq)
		free_irq(wdev->irq, wdev);

err_irq:
	iounmap(wdev->base);

err_ioremap:
	wdev->base = NULL;
	kfree(wdev);

err_kzalloc:
	release_mem_region(res->start, resource_size(res));

err_busy:
err_get_resource:

	return ret;
}

static void omap_wdt_shutdown(struct platform_device *pdev)
{
	struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);

	if (wdev->omap_wdt_users) {
		omap_wdt_disable(wdev);
		pm_runtime_put_sync(wdev->dev);
	}
}

static int __devexit omap_wdt_remove(struct platform_device *pdev)
{
	struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

	if (!res)
		return -ENOENT;

	misc_deregister(&(wdev->omap_wdt_miscdev));
	release_mem_region(res->start, resource_size(res));
	platform_set_drvdata(pdev, NULL);

	if (wdev->irq)
		free_irq(wdev->irq, wdev);

	if (kernelpet && wdev->irq)
		atomic_notifier_chain_unregister(&touch_watchdog_notifier_head,
			&wdev->nb);

	iounmap(wdev->base);

	kfree(wdev);
	omap_wdt_dev = NULL;

	return 0;
}

#ifdef	CONFIG_PM

/* REVISIT ... not clear this is the best way to handle system suspend; and
 * it's very inappropriate for selective device suspend (e.g. suspending this
 * through sysfs rather than by stopping the watchdog daemon).  Also, this
 * may not play well enough with NOWAYOUT...
 */

static int omap_wdt_suspend(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);

	if (wdev->omap_wdt_users) {
		omap_wdt_disable(wdev);
		pm_runtime_put_sync_suspend(wdev->dev);
	}

	return 0;
}

static int omap_wdt_resume(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);

	if (wdev->omap_wdt_users) {
		pm_runtime_get_sync(wdev->dev);
		omap_wdt_enable(wdev);
		omap_wdt_ping(wdev);
	}

	return 0;
}

#else
#define	omap_wdt_suspend	NULL
#define	omap_wdt_resume		NULL
#endif

static const struct dev_pm_ops omap_wdt_pm_ops = {
	.suspend_noirq	= omap_wdt_suspend,
	.resume_noirq	= omap_wdt_resume,
};

static struct platform_driver omap_wdt_driver = {
	.probe		= omap_wdt_probe,
	.remove		= __devexit_p(omap_wdt_remove),
	.shutdown	= omap_wdt_shutdown,
	.driver		= {
		.owner	= THIS_MODULE,
		.name	= "omap_wdt",
		.pm	= &omap_wdt_pm_ops,
	},
};

static int __init omap_wdt_init(void)
{
	spin_lock_init(&wdt_lock);
	return platform_driver_register(&omap_wdt_driver);
}
int topology_register_notifier(struct notifier_block *nb)
{

	return atomic_notifier_chain_register(
				&topology_update_notifier_list, nb);
}
void vrf_notifier_register(struct notifier_block *n)
{
	atomic_notifier_chain_register(&vrf_notifier, n);
}
Exemple #6
0
int register_memory_isolate_notifier(struct notifier_block *nb)
{
    return atomic_notifier_chain_register(&memory_isolate_chain, nb);
}
int task_free_register(struct notifier_block *n)
{
	return atomic_notifier_chain_register(&task_free_notifier, n);
}
void __init setup_arch(char **cmdline_p)
{
	unsigned long kernel_end;

#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
	struct e820entry *machine_e820;
	struct xen_memory_map memmap;
#endif

#ifdef CONFIG_XEN
	/* Register a call for panic conditions. */
	atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);

 	ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); 
	kernel_end = 0;		/* dummy */
 	screen_info = SCREEN_INFO;

	if (xen_start_info->flags & SIF_INITDOMAIN) {
		/* This is drawn from a dump from vgacon:startup in
		 * standard Linux. */
		screen_info.orig_video_mode = 3;
		screen_info.orig_video_isVGA = 1;
		screen_info.orig_video_lines = 25;
		screen_info.orig_video_cols = 80;
		screen_info.orig_video_ega_bx = 3;
		screen_info.orig_video_points = 16;
	} else
		screen_info.orig_video_isVGA = 0;

	edid_info = EDID_INFO;
	saved_video_mode = SAVED_VIDEO_MODE;
	bootloader_type = LOADER_TYPE;

#ifdef CONFIG_BLK_DEV_RAM
	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);


#endif

	setup_xen_features();

	HYPERVISOR_vm_assist(VMASST_CMD_enable,
			     VMASST_TYPE_writable_pagetables);

	ARCH_SETUP
#else
 	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
 	screen_info = SCREEN_INFO;
	edid_info = EDID_INFO;
	saved_video_mode = SAVED_VIDEO_MODE;
	bootloader_type = LOADER_TYPE;

#ifdef CONFIG_BLK_DEV_RAM
	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
#endif
#endif	/* !CONFIG_XEN */
	setup_memory_region();
	copy_edd();

	if (!MOUNT_ROOT_RDONLY)
		root_mountflags &= ~MS_RDONLY;
	init_mm.start_code = (unsigned long) &_text;
	init_mm.end_code = (unsigned long) &_etext;
	init_mm.end_data = (unsigned long) &_edata;
	init_mm.brk = (unsigned long) &_end;

#ifndef CONFIG_XEN
	code_resource.start = virt_to_phys(&_text);
	code_resource.end = virt_to_phys(&_etext)-1;
	data_resource.start = virt_to_phys(&_etext);
	data_resource.end = virt_to_phys(&_edata)-1;
#endif

	parse_cmdline_early(cmdline_p);

	early_identify_cpu(&boot_cpu_data);

	/*
	 * partially used pages are not usable - thus
	 * we are rounding upwards:
	 */
	end_pfn = e820_end_of_ram();
	num_physpages = end_pfn;		/* for pfn_valid */

	check_efer();

#ifndef CONFIG_XEN
	discover_ebda();
#endif

	init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));

#ifdef CONFIG_ACPI_NUMA
	/*
	 * Parse SRAT to discover nodes.
	 */
	acpi_numa_init();
#endif

#ifdef CONFIG_NUMA
	numa_initmem_init(0, end_pfn); 
#else
	contig_initmem_init(0, end_pfn);
#endif

	/* Reserve direct mapping */
	reserve_bootmem_generic(table_start << PAGE_SHIFT, 
				(table_end - table_start) << PAGE_SHIFT);

	/* reserve kernel */
	kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
	reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);

#ifdef CONFIG_XEN
	/* reserve physmap, start info and initial page tables */
	reserve_bootmem(kernel_end, (table_start<<PAGE_SHIFT)-kernel_end);
#else
	/*
	 * reserve physical page 0 - it's a special BIOS page on many boxes,
	 * enabling clean reboots, SMP operation, laptop functions.
	 */
	reserve_bootmem_generic(0, PAGE_SIZE);

	/* reserve ebda region */
	if (ebda_addr)
		reserve_bootmem_generic(ebda_addr, ebda_size);
#endif

#ifdef CONFIG_SMP
	/*
	 * But first pinch a few for the stack/trampoline stuff
	 * FIXME: Don't need the extra page at 4K, but need to fix
	 * trampoline before removing it. (see the GDT stuff)
	 */
	reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);

	/* Reserve SMP trampoline */
	reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
#endif

#ifdef CONFIG_ACPI_SLEEP
       /*
        * Reserve low memory region for sleep support.
        */
       acpi_reserve_bootmem();
#endif
#ifdef CONFIG_XEN
#ifdef CONFIG_BLK_DEV_INITRD
	if (xen_start_info->mod_start) {
		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
			/*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
			initrd_start = INITRD_START + PAGE_OFFSET;
			initrd_end = initrd_start+INITRD_SIZE;
			initrd_below_start_ok = 1;
		} else {
			printk(KERN_ERR "initrd extends beyond end of memory "
				"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
				(unsigned long)(INITRD_START + INITRD_SIZE),
				(unsigned long)(end_pfn << PAGE_SHIFT));
			initrd_start = 0;
		}
	}
#endif
#else	/* CONFIG_XEN */
#ifdef CONFIG_BLK_DEV_INITRD
	if (LOADER_TYPE && INITRD_START) {
		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
			reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
			initrd_start =
				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
			initrd_end = initrd_start+INITRD_SIZE;
		}
		else {
			printk(KERN_ERR "initrd extends beyond end of memory "
			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
			    (unsigned long)(INITRD_START + INITRD_SIZE),
			    (unsigned long)(end_pfn << PAGE_SHIFT));
			initrd_start = 0;
		}
	}
#endif
#endif	/* !CONFIG_XEN */
#ifdef CONFIG_KEXEC
	if (crashk_res.start != crashk_res.end) {
		reserve_bootmem(crashk_res.start,
			crashk_res.end - crashk_res.start + 1);
	}
#endif

	paging_init();
#ifdef CONFIG_X86_LOCAL_APIC
	/*
	 * Find and reserve possible boot-time SMP configuration:
	 */
	find_smp_config();
#endif
#ifdef CONFIG_XEN
	{
		int i, j, k, fpp;
		unsigned long va;

		/* 'Initial mapping' of initrd must be destroyed. */
		for (va = xen_start_info->mod_start;
		     va < (xen_start_info->mod_start+xen_start_info->mod_len);
		     va += PAGE_SIZE) {
			HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
		}

		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
			/* Make sure we have a large enough P->M table. */
			phys_to_machine_mapping = alloc_bootmem(
				end_pfn * sizeof(unsigned long));
			memset(phys_to_machine_mapping, ~0,
			       end_pfn * sizeof(unsigned long));
			memcpy(phys_to_machine_mapping,
			       (unsigned long *)xen_start_info->mfn_list,
			       xen_start_info->nr_pages * sizeof(unsigned long));
			free_bootmem(
				__pa(xen_start_info->mfn_list),
				PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
						sizeof(unsigned long))));

			/* Destroyed 'initial mapping' of old p2m table. */
			for (va = xen_start_info->mfn_list;
			     va < (xen_start_info->mfn_list +
				   (xen_start_info->nr_pages*sizeof(unsigned long)));
			     va += PAGE_SIZE) {
				HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
			}

			/*
			 * Initialise the list of the frames that specify the
			 * list of frames that make up the p2m table. Used by
                         * save/restore.
			 */
			pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE);
			HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
				virt_to_mfn(pfn_to_mfn_frame_list_list);

			fpp = PAGE_SIZE/sizeof(unsigned long);
			for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
				if ((j % fpp) == 0) {
					k++;
					BUG_ON(k>=fpp);
					pfn_to_mfn_frame_list[k] =
						alloc_bootmem(PAGE_SIZE);
					pfn_to_mfn_frame_list_list[k] =
						virt_to_mfn(pfn_to_mfn_frame_list[k]);
					j=0;
				}
				pfn_to_mfn_frame_list[k][j] =
					virt_to_mfn(&phys_to_machine_mapping[i]);
			}
			HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
		}

	}

	if (xen_start_info->flags & SIF_INITDOMAIN)
		dmi_scan_machine();

	if ( ! (xen_start_info->flags & SIF_INITDOMAIN))
	{
		acpi_disabled = 1;
#ifdef  CONFIG_ACPI
		acpi_ht = 0;
#endif
	}
#endif

#ifndef CONFIG_XEN
	check_ioapic();
#endif

	zap_low_mappings(0);

	/*
	 * set this early, so we dont allocate cpu0
	 * if MADT list doesnt list BSP first
	 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
	 */
	cpu_set(0, cpu_present_map);
#ifdef CONFIG_ACPI
	/*
	 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
	 * Call this early for SRAT node setup.
	 */
	acpi_boot_table_init();

	/*
	 * Read APIC and some other early information from ACPI tables.
	 */
	acpi_boot_init();
#endif

	init_cpu_to_node();

#ifdef CONFIG_X86_LOCAL_APIC
	/*
	 * get boot-time SMP configuration:
	 */
	if (smp_found_config)
		get_smp_config();
#ifndef CONFIG_XEN
	init_apic_mappings();
#endif
#endif
#if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
	prefill_possible_map();
#endif

	/*
	 * Request address space for all standard RAM and ROM resources
	 * and also for regions reported as reserved by the e820.
	 */
#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
	probe_roms();
	if (xen_start_info->flags & SIF_INITDOMAIN) {
		machine_e820 = alloc_bootmem_low_pages(PAGE_SIZE);

		memmap.nr_entries = E820MAX;
		set_xen_guest_handle(memmap.buffer, machine_e820);

		BUG_ON(HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap));

		e820_reserve_resources(machine_e820, memmap.nr_entries);
	}
#elif !defined(CONFIG_XEN)
	probe_roms();
	e820_reserve_resources(e820.map, e820.nr_map);
#endif

	request_resource(&iomem_resource, &video_ram_resource);

	{
	unsigned i;
	/* request I/O space for devices used on all i[345]86 PCs */
	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
		request_resource(&ioport_resource, &standard_io_resources[i]);
	}

#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
	if (xen_start_info->flags & SIF_INITDOMAIN) {
		e820_setup_gap(machine_e820, memmap.nr_entries);
		free_bootmem(__pa(machine_e820), PAGE_SIZE);
	}
#elif !defined(CONFIG_XEN)
	e820_setup_gap(e820.map, e820.nr_map);
#endif

#ifdef CONFIG_GART_IOMMU
	iommu_hole_init();
#endif

#ifdef CONFIG_XEN
	{
		struct physdev_set_iopl set_iopl;

		set_iopl.iopl = 1;
		HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);

		if (xen_start_info->flags & SIF_INITDOMAIN) {
			if (!(xen_start_info->flags & SIF_PRIVILEGED))
				panic("Xen granted us console access "
				      "but not privileged status");
		       
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
			conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
			conswitchp = &dummy_con;
#endif
#endif
		} else {
			extern int console_use_vt;
			console_use_vt = 0;
		}
	}
#else	/* CONFIG_XEN */

#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
	conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
	conswitchp = &dummy_con;
#endif
#endif

#endif /* !CONFIG_XEN */
}
/*
 * Register a simple callback for handling varm context save/restore
 */
int context_arm_notifier_register(struct notifier_block *nb)
{
	return atomic_notifier_chain_register(&context_arm_notifier_list, nb);
}
Exemple #10
0
void mce_register_decode_chain(struct notifier_block *nb)
{
	atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
	drain_mcelog_buffer();
}
Exemple #11
0
static int register_xaction_notifier(struct notifier_block *nb)
{
	return atomic_notifier_chain_register(&xaction_notifier_list, nb);
}
Exemple #12
0
static int hvfb_probe(struct hv_device *hdev,
		      const struct hv_vmbus_device_id *dev_id)
{
	struct fb_info *info;
	struct hvfb_par *par;
	int ret;

	info = framebuffer_alloc(sizeof(struct hvfb_par), &hdev->device);
	if (!info) {
		pr_err("No memory for framebuffer info\n");
		return -ENOMEM;
	}

	par = info->par;
	par->info = info;
	par->fb_ready = false;
	init_completion(&par->wait);
	INIT_DELAYED_WORK(&par->dwork, hvfb_update_work);

	/* Connect to VSP */
	hv_set_drvdata(hdev, info);
	ret = synthvid_connect_vsp(hdev);
	if (ret) {
		pr_err("Unable to connect to VSP\n");
		goto error1;
	}

	ret = hvfb_getmem(info);
	if (ret) {
		pr_err("No memory for framebuffer\n");
		goto error2;
	}

	hvfb_get_option(info);
	pr_info("Screen resolution: %dx%d, Color depth: %d\n",
		screen_width, screen_height, screen_depth);


	/* Set up fb_info */
	info->flags = FBINFO_DEFAULT;

	info->var.xres_virtual = info->var.xres = screen_width;
	info->var.yres_virtual = info->var.yres = screen_height;
	info->var.bits_per_pixel = screen_depth;

	if (info->var.bits_per_pixel == 16) {
		info->var.red = (struct fb_bitfield){11, 5, 0};
		info->var.green = (struct fb_bitfield){5, 6, 0};
		info->var.blue = (struct fb_bitfield){0, 5, 0};
		info->var.transp = (struct fb_bitfield){0, 0, 0};
	} else {
		info->var.red = (struct fb_bitfield){16, 8, 0};
		info->var.green = (struct fb_bitfield){8, 8, 0};
		info->var.blue = (struct fb_bitfield){0, 8, 0};
		info->var.transp = (struct fb_bitfield){24, 8, 0};
	}

	info->var.activate = FB_ACTIVATE_NOW;
	info->var.height = -1;
	info->var.width = -1;
	info->var.vmode = FB_VMODE_NONINTERLACED;

	strcpy(info->fix.id, KBUILD_MODNAME);
	info->fix.type = FB_TYPE_PACKED_PIXELS;
	info->fix.visual = FB_VISUAL_TRUECOLOR;
	info->fix.line_length = screen_width * screen_depth / 8;
	info->fix.accel = FB_ACCEL_NONE;

	info->fbops = &hvfb_ops;
	info->pseudo_palette = par->pseudo_palette;

	/* Send config to host */
	ret = synthvid_send_config(hdev);
	if (ret)
		goto error;

	ret = register_framebuffer(info);
	if (ret) {
		pr_err("Unable to register framebuffer\n");
		goto error;
	}

	par->fb_ready = true;

	par->synchronous_fb = false;
	par->hvfb_panic_nb.notifier_call = hvfb_on_panic;
	atomic_notifier_chain_register(&panic_notifier_list,
				       &par->hvfb_panic_nb);

	return 0;

error:
	hvfb_putmem(info);
error2:
	vmbus_close(hdev->channel);
error1:
	cancel_delayed_work_sync(&par->dwork);
	hv_set_drvdata(hdev, NULL);
	framebuffer_release(info);
	return ret;
}


static int hvfb_remove(struct hv_device *hdev)
{
	struct fb_info *info = hv_get_drvdata(hdev);
	struct hvfb_par *par = info->par;

	atomic_notifier_chain_unregister(&panic_notifier_list,
					 &par->hvfb_panic_nb);

	par->update = false;
	par->fb_ready = false;

	unregister_framebuffer(info);
	cancel_delayed_work_sync(&par->dwork);

	vmbus_close(hdev->channel);
	hv_set_drvdata(hdev, NULL);

	hvfb_putmem(info);
	framebuffer_release(info);

	return 0;
}


static const struct pci_device_id pci_stub_id_table[] = {
	{
		.vendor      = PCI_VENDOR_ID_MICROSOFT,
		.device      = PCI_DEVICE_ID_HYPERV_VIDEO,
	},
	{ /* end of list */ }
};

static const struct hv_vmbus_device_id id_table[] = {
	/* Synthetic Video Device GUID */
	{HV_SYNTHVID_GUID},
	{}
};

MODULE_DEVICE_TABLE(pci, pci_stub_id_table);
MODULE_DEVICE_TABLE(vmbus, id_table);

static struct hv_driver hvfb_drv = {
	.name = KBUILD_MODNAME,
	.id_table = id_table,
	.probe = hvfb_probe,
	.remove = hvfb_remove,
};

static int hvfb_pci_stub_probe(struct pci_dev *pdev,
			       const struct pci_device_id *ent)
{
	return 0;
}

static void hvfb_pci_stub_remove(struct pci_dev *pdev)
{
}

static struct pci_driver hvfb_pci_stub_driver = {
	.name =		KBUILD_MODNAME,
	.id_table =	pci_stub_id_table,
	.probe =	hvfb_pci_stub_probe,
	.remove =	hvfb_pci_stub_remove,
};

static int __init hvfb_drv_init(void)
{
	int ret;

	ret = vmbus_driver_register(&hvfb_drv);
	if (ret != 0)
		return ret;

	ret = pci_register_driver(&hvfb_pci_stub_driver);
	if (ret != 0) {
		vmbus_driver_unregister(&hvfb_drv);
		return ret;
	}

	return 0;
}

static void __exit hvfb_drv_exit(void)
{
	pci_unregister_driver(&hvfb_pci_stub_driver);
	vmbus_driver_unregister(&hvfb_drv);
}

module_init(hvfb_drv_init);
module_exit(hvfb_drv_exit);

MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Microsoft Hyper-V Synthetic Video Frame Buffer Driver");
int show_mem_notifier_register(struct notifier_block *nb)
{
	return atomic_notifier_chain_register(&show_mem_notifier, nb);
}
Exemple #14
0
/**
 *	register_restart_handler - Register function to be called to reset
 *				   the system
 *	@nb: Info about handler function to be called
 *	@nb->priority:	Handler priority. Handlers should follow the
 *			following guidelines for setting priorities.
 *			0:	Restart handler of last resort,
 *				with limited restart capabilities
 *			128:	Default restart handler; use if no other
 *				restart handler is expected to be available,
 *				and/or if restart functionality is
 *				sufficient to restart the entire system
 *			255:	Highest priority restart handler, will
 *				preempt all other restart handlers
 *
 *	Registers a function with code to be called to restart the
 *	system.
 *
 *	Registered functions will be called from machine_restart as last
 *	step of the restart sequence (if the architecture specific
 *	machine_restart function calls do_kernel_restart - see below
 *	for details).
 *	Registered functions are expected to restart the system immediately.
 *	If more than one function is registered, the restart handler priority
 *	selects which function will be called first.
 *
 *	Restart handlers are expected to be registered from non-architecture
 *	code, typically from drivers. A typical use case would be a system
 *	where restart functionality is provided through a watchdog. Multiple
 *	restart handlers may exist; for example, one restart handler might
 *	restart the entire system, while another only restarts the CPU.
 *	In such cases, the restart handler which only restarts part of the
 *	hardware is expected to register with low priority to ensure that
 *	it only runs if no other means to restart the system is available.
 *
 *	Currently always returns zero, as atomic_notifier_chain_register()
 *	always returns zero.
 */
int register_restart_handler(struct notifier_block *nb)
{
	return atomic_notifier_chain_register(&restart_handler_list, nb);
}
Exemple #15
0
void ibmasm_register_panic_notifier(void)
{
	atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier);
}
Exemple #16
0
void libcfs_register_panic_notifier(void)
{
        atomic_notifier_chain_register(&panic_notifier_list, &libcfs_panic_notifier);
}
Exemple #17
0
void idle_notifier_register(struct notifier_block *n)
{
	atomic_notifier_chain_register(&idle_notifier, n);
}
int set_otg_notify(struct otg_notify *n)
{
	int ret = 0;

	if (!u_notify) {
		ret = create_usb_notify();
		if (ret) {
			pr_err("unable create_usb_notify\n");
			goto err;
		}
	}

	if (u_notify->o_notify && n) {
		pr_err("error : already set o_notify\n");
		goto err;
	}

	pr_info("registered otg_notify +\n");
	if (!n) {
		pr_err("otg notify structure is null\n");
		ret = -EFAULT;
		goto err1;
	}
	u_notify->o_notify = n;

	ATOMIC_INIT_NOTIFIER_HEAD(&u_notify->o_notify->otg_notifier);
	u_notify->otg_nb.notifier_call = otg_notifier_callback;
	ret = atomic_notifier_chain_register(&u_notify->o_notify->otg_notifier,
				&u_notify->otg_nb);
	if (ret < 0) {
		pr_err("atomic_notifier_chain_register failed\n");
		goto err1;
	}

	BLOCKING_INIT_NOTIFIER_HEAD(&u_notify->o_notify->extra_notifier);
	u_notify->extra_nb.notifier_call = extra_notifier_callback;
	ret = blocking_notifier_chain_register
		(&u_notify->o_notify->extra_notifier, &u_notify->extra_nb);
	if (ret < 0) {
		pr_err("blocking_notifier_chain_register failed\n");
		goto err2;
	}

	if (!n->unsupport_host) {
		u_notify->ndev.name = "usb_otg";
		u_notify->ndev.set_booster = n->vbus_drive;
		ret = host_notify_dev_register(&u_notify->ndev);
		if (ret < 0) {
			pr_err("host_notify_dev_register is failed\n");
			goto err3;
		}

		if (!n->vbus_drive) {
			pr_err("vbus_drive is null\n");
			goto err4;
		}
	}

	if (gpio_is_valid(n->vbus_detect_gpio) ||
			gpio_is_valid(n->redriver_en_gpio)) {
		ret = register_gpios(n);
		if (ret < 0) {
			pr_err("register_gpios is failed\n");
			goto err4;
		}
	}

	if (n->is_wakelock)
		wake_lock_init(&u_notify->wlock,
			WAKE_LOCK_SUSPEND, "usb_notify");

	if (n->booting_delay_sec) {
		INIT_DELAYED_WORK(&u_notify->b_delay.booting_work,
				  reserve_state_check);
		schedule_delayed_work(&u_notify->b_delay.booting_work,
				n->booting_delay_sec*HZ);
	}
	register_usbdev_notify();

	pr_info("registered otg_notify -\n");
	return 0;
err4:
	if (!n->unsupport_host)
		host_notify_dev_unregister(&u_notify->ndev);
err3:
	blocking_notifier_chain_unregister(&u_notify->o_notify->extra_notifier,
				&u_notify->extra_nb);
err2:
	atomic_notifier_chain_unregister(&u_notify->o_notify->otg_notifier,
				&u_notify->otg_nb);
err1:
	u_notify->o_notify = NULL;
err:
	return ret;
}
int msm_jtag_restore_register(struct notifier_block *nb)
{
	return atomic_notifier_chain_register(&etm_restore_notifier_list, nb);
}
Exemple #20
0
/**
 *	register_netevent_notifier - register a netevent notifier block
 *	@nb: notifier
 *
 *	Register a notifier to be called when a netevent occurs.
 *	The notifier passed is linked into the kernel structures and must
 *	not be reused until it has been unregistered. A negative errno code
 *	is returned on a failure.
 */
int register_netevent_notifier(struct notifier_block *nb)
{
	return atomic_notifier_chain_register(&netevent_notif_chain, nb);
}
int register_keyboard_notifier(struct notifier_block *nb)
{
	return atomic_notifier_chain_register(&keyboard_notifier_list, nb);
}
Exemple #22
0
int __init linux_main(int argc, char **argv)
{
	unsigned long avail, diff;
	unsigned long virtmem_size, max_physmem;
	unsigned long stack;
	unsigned int i;
	int add;
	char * mode;

	for (i = 1; i < argc; i++) {
		if ((i == 1) && (argv[i][0] == ' '))
			continue;
		add = 1;
		uml_checksetup(argv[i], &add);
		if (add)
			add_arg(argv[i]);
	}
	if (have_root == 0)
		add_arg(DEFAULT_COMMAND_LINE);

	host_task_size = os_get_top_address();
	/*
	 * TASK_SIZE needs to be PGDIR_SIZE aligned or else exit_mmap craps
	 * out
	 */
	task_size = host_task_size & PGDIR_MASK;

	/* OS sanity checks that need to happen before the kernel runs */
	os_early_checks();

	can_do_skas();

	if (proc_mm && ptrace_faultinfo)
		mode = "SKAS3";
	else
		mode = "SKAS0";

	printf("UML running in %s mode\n", mode);

	brk_start = (unsigned long) sbrk(0);

	/*
	 * Increase physical memory size for exec-shield users
	 * so they actually get what they asked for. This should
	 * add zero for non-exec shield users
	 */

	diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
	if (diff > 1024 * 1024) {
		printf("Adding %ld bytes to physical memory to account for "
		       "exec-shield gap\n", diff);
		physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
	}

	uml_physmem = (unsigned long) &__binary_start & PAGE_MASK;

	/* Reserve up to 4M after the current brk */
	uml_reserved = ROUND_4M(brk_start) + (1 << 22);

	setup_machinename(init_utsname()->machine);

	highmem = 0;
	iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK;
	max_physmem = TASK_SIZE - uml_physmem - iomem_size - MIN_VMALLOC;

	/*
	 * Zones have to begin on a 1 << MAX_ORDER page boundary,
	 * so this makes sure that's true for highmem
	 */
	max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1);
	if (physmem_size + iomem_size > max_physmem) {
		highmem = physmem_size + iomem_size - max_physmem;
		physmem_size -= highmem;
#ifndef CONFIG_HIGHMEM
		highmem = 0;
		printf("CONFIG_HIGHMEM not enabled - physical memory shrunk "
		       "to %Lu bytes\n", physmem_size);
#endif
	}

	high_physmem = uml_physmem + physmem_size;
	end_iomem = high_physmem + iomem_size;
	high_memory = (void *) end_iomem;

	start_vm = VMALLOC_START;

	setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
	if (init_maps(physmem_size, iomem_size, highmem)) {
		printf("Failed to allocate mem_map for %Lu bytes of physical "
		       "memory and %Lu bytes of highmem\n", physmem_size,
		       highmem);
		exit(1);
	}

	virtmem_size = physmem_size;
	stack = (unsigned long) argv;
	stack &= ~(1024 * 1024 - 1);
	avail = stack - start_vm;
	if (physmem_size > avail)
		virtmem_size = avail;
	end_vm = start_vm + virtmem_size;

	if (virtmem_size < physmem_size)
		printf("Kernel virtual memory size shrunk to %lu bytes\n",
		       virtmem_size);

	atomic_notifier_chain_register(&panic_notifier_list,
				       &panic_exit_notifier);

	uml_postsetup();

	stack_protections((unsigned long) &init_thread_info);
	os_flush_stdout();

	return start_uml();
}
Exemple #23
0
/**
 * nvec_register_notifier - Register a notifier with nvec
 * @nvec: A &struct nvec_chip
 * @nb: The notifier block to register
 *
 * Registers a notifier with @nvec. The notifier will be added to an atomic
 * notifier chain that is called for all received messages except those that
 * correspond to a request initiated by nvec_write_sync().
 */
int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
			   unsigned int events)
{
	return atomic_notifier_chain_register(&nvec->notifier_list, nb);
}
Exemple #24
0
int register_inet6addr_notifier(struct notifier_block *nb)
{
	return atomic_notifier_chain_register(&inet6addr_chain, nb);
}