示例#1
0
/* Called by the VFS at mount time to initialize the whole file system.  */
static struct super_block *
jffs_read_super(struct super_block *sb, void *data, int silent)
{
	kdev_t dev = sb->s_dev;
	struct inode *root_inode;
	struct jffs_control *c;

	D1(printk(KERN_NOTICE "JFFS: Trying to mount device %s.\n",
		  kdevname(dev)));

	if (MAJOR(dev) != MTD_BLOCK_MAJOR) {
		printk(KERN_WARNING "JFFS: Trying to mount a "
		       "non-mtd device.\n");
		return 0;
	}

	sb->s_blocksize = PAGE_CACHE_SIZE;
	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
	sb->u.generic_sbp = (void *) 0;
	sb->s_maxbytes = 0xFFFFFFFF;

	/* Build the file system.  */
	if (jffs_build_fs(sb) < 0) {
		goto jffs_sb_err1;
	}

	/*
	 * set up enough so that we can read an inode
	 */
	sb->s_magic = JFFS_MAGIC_SB_BITMASK;
	sb->s_op = &jffs_ops;

	root_inode = iget(sb, JFFS_MIN_INO);
	if (!root_inode)
	        goto jffs_sb_err2;

	/* Get the root directory of this file system.  */
	if (!(sb->s_root = d_alloc_root(root_inode))) {
		goto jffs_sb_err3;
	}

	c = (struct jffs_control *) sb->u.generic_sbp;

#ifdef CONFIG_JFFS_PROC_FS
	/* Set up the jffs proc file system.  */
	if (jffs_register_jffs_proc_dir(dev, c) < 0) {
		printk(KERN_WARNING "JFFS: Failed to initialize the JFFS "
			"proc file system for device %s.\n",
			kdevname(dev));
	}
#endif

	/* Set the Garbage Collection thresholds */

	/* GC if free space goes below 5% of the total size */
	c->gc_minfree_threshold = c->fmc->flash_size / 20;

	if (c->gc_minfree_threshold < c->fmc->sector_size)
		c->gc_minfree_threshold = c->fmc->sector_size;

	/* GC if dirty space exceeds 33% of the total size. */
	c->gc_maxdirty_threshold = c->fmc->flash_size / 3;

	if (c->gc_maxdirty_threshold < c->fmc->sector_size)
		c->gc_maxdirty_threshold = c->fmc->sector_size;


	c->thread_pid = kernel_thread (jffs_garbage_collect_thread, 
				        (void *) c, 
				        CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
	D1(printk(KERN_NOTICE "JFFS: GC thread pid=%d.\n", (int) c->thread_pid));

	D1(printk(KERN_NOTICE "JFFS: Successfully mounted device %s.\n",
	       kdevname(dev)));
	return sb;

jffs_sb_err3:
	iput(root_inode);
jffs_sb_err2:
	jffs_cleanup_control((struct jffs_control *)sb->u.generic_sbp);
jffs_sb_err1:
	printk(KERN_WARNING "JFFS: Failed to mount device %s.\n",
	       kdevname(dev));
	return 0;
}
示例#2
0
int
sh7722_init( void )
{
     int i;
     int ret;

     /* Register the SH7722 graphics device. */
     ret = misc_register( &sh7722gfx_miscdev );
     if (ret < 0) {
          printk( KERN_ERR "%s: misc_register() for minor %d failed! (error %d)\n",
                  __FUNCTION__, sh7722gfx_miscdev.minor, ret );
          return ret;
     }

     /* Allocate and initialize the shared area. */
     shared_order = get_order(sizeof(SH772xGfxSharedArea));
     shared_page  = alloc_pages( GFP_DMA | GFP_KERNEL, shared_order );
     shared       = ioremap( virt_to_phys( page_address(shared_page) ),
                             PAGE_ALIGN(sizeof(SH772xGfxSharedArea)) );

     for (i=0; i<1<<shared_order; i++)
          SetPageReserved( shared_page + i );

     printk( KERN_INFO "sh7722gfx: shared area (order %d) at %p [%lx] using %d bytes\n",
             shared_order, shared, virt_to_phys(shared), sizeof(SH772xGfxSharedArea) );


     /* Allocate and initialize the JPEG area. */
     jpeg_order = get_order(SH7722GFX_JPEG_SIZE);
     jpeg_page  = alloc_pages( GFP_DMA | GFP_KERNEL, jpeg_order );
     jpeg_area  = ioremap( virt_to_phys( page_address(jpeg_page) ),
                           PAGE_ALIGN(SH7722GFX_JPEG_SIZE) );

     for (i=0; i<1<<jpeg_order; i++)
          SetPageReserved( jpeg_page + i );

     printk( KERN_INFO "sh7722gfx: jpeg area (order %d) at %p [%lx] using %d bytes\n",
             jpeg_order, jpeg_area, virt_to_phys(jpeg_area), SH7722GFX_JPEG_SIZE );


     /* Register the BEU interrupt handler. */
     ret = request_irq( SH7722_BEU_IRQ, sh7722_beu_irq, IRQF_DISABLED, "BEU", (void*) shared );
     if (ret) {
          printk( KERN_ERR "%s: request_irq() for interrupt %d failed! (error %d)\n",
                  __FUNCTION__, SH7722_BEU_IRQ, ret );
          goto error_beu;
     }

#ifdef SH7722GFX_IRQ_POLLER
     kernel_thread( sh7722_tdg_irq_poller, (void*) shared, CLONE_KERNEL );
#else
     /* Register the TDG interrupt handler. */
     ret = request_irq( SH7722_TDG_IRQ, sh7722_tdg_irq, IRQF_DISABLED, "TDG", (void*) shared );
     if (ret) {
          printk( KERN_ERR "%s: request_irq() for interrupt %d failed! (error %d)\n",
                  __FUNCTION__, SH7722_TDG_IRQ, ret );
          goto error_tdg;
     }
#endif

     /* Register the JPU interrupt handler. */
     ret = request_irq( SH7722_JPU_IRQ, sh7722_jpu_irq, IRQF_DISABLED, "JPU", (void*) shared );
     if (ret) {
          printk( KERN_ERR "%s: request_irq() for interrupt %d failed! (error %d)\n",
                  __FUNCTION__, SH7722_JPU_IRQ, ret );
          goto error_jpu;
     }

     /* Register the VEU interrupt handler. */
     ret = request_irq( SH7722_VEU_IRQ, sh7722_veu_irq, IRQF_DISABLED, "VEU", (void*) shared );
     if (ret) {
          printk( KERN_ERR "%s: request_irq() for interrupt %d failed! (error %d)\n",
                  __FUNCTION__, SH7722_VEU_IRQ, ret );
          goto error_veu;
     }

     sh7722_reset( shared );

     return 0;


error_veu:
     free_irq( SH7722_JPU_IRQ, (void*) shared );

error_jpu:
#ifndef SH7722GFX_IRQ_POLLER
     free_irq( SH7722_TDG_IRQ, (void*) shared );

error_tdg:
#endif
     free_irq( SH7722_BEU_IRQ, (void*) shared );

error_beu:
     for (i=0; i<1<<jpeg_order; i++)
          ClearPageReserved( jpeg_page + i );

     __free_pages( jpeg_page, jpeg_order );


     for (i=0; i<1<<shared_order; i++)
          ClearPageReserved( shared_page + i );

     __free_pages( shared_page, shared_order );


     misc_deregister( &sh7722gfx_miscdev );

     return ret;
}
示例#3
0
/*
 * s390_init_machine_check
 *
 * initialize machine check handling
 */
void s390_init_machine_check( void )
{
	crwe_t  *pcrwe;	 /* CRW buffer element pointer */
	mache_t *pmache;   /* machine check element pointer */

	init_MUTEX_LOCKED( &s_sem );

	pcrwe = kmalloc( MAX_CRW_PENDING * sizeof( crwe_t), GFP_KERNEL);

	if ( pcrwe )
	{
		int i;

		crw_buffer_anchor = pcrwe;

		for ( i=0; i < MAX_CRW_PENDING-1; i++)
		{
			pcrwe->crwe_next = (crwe_t *)((unsigned long)pcrwe + sizeof(crwe_t));
   		pcrwe            = pcrwe->crwe_next;

		} /* endfor */	

		pcrwe->crwe_next = NULL;

	}
	else
	{
		panic( "s390_init_machine_check : unable to obtain memory\n");		

	} /* endif */

	pmache = kmalloc( MAX_MACH_PENDING * sizeof( mache_t), GFP_KERNEL);

	if ( pmache )
	{
		int i;

		for ( i=0; i < MAX_MACH_PENDING; i++)
		{
			s390_enqueue_free_mchchk( pmache );
		   pmache = (mache_t *)((unsigned long)pmache + sizeof(mache_t));

		} /* endfor */	
	}
	else
	{
		panic( "s390_init_machine_check : unable to obtain memory\n");		

	} /* endif */

#ifdef S390_MACHCHK_DEBUG
	printk( KERN_NOTICE "init_mach : starting machine check handler\n");
#endif	

	kernel_thread( s390_machine_check_handler, &s_sem, CLONE_FS | CLONE_FILES);

	ctl_clear_bit( 14, 25 );  // disable damage MCH 	

	ctl_set_bit( 14, 26 ); /* enable degradation MCH */
	ctl_set_bit( 14, 27 ); /* enable system recovery MCH */
#if 1
  	ctl_set_bit( 14, 28 );		// enable channel report MCH
#endif
#ifdef CONFIG_MACHCK_WARNING
	ctl_set_bit( 14, 24);   /* enable warning MCH */
#endif

#ifdef S390_MACHCHK_DEBUG
	printk( KERN_DEBUG "init_mach : machine check buffer : head = %08X\n",
            (unsigned)&mchchk_queue_head);
	printk( KERN_DEBUG "init_mach : machine check buffer : tail = %08X\n",
            (unsigned)&mchchk_queue_tail);
	printk( KERN_DEBUG "init_mach : machine check buffer : free = %08X\n",
            (unsigned)&mchchk_queue_free);
	printk( KERN_DEBUG "init_mach : CRW entry buffer anchor = %08X\n",
            (unsigned)&crw_buffer_anchor);
	printk( KERN_DEBUG "init_mach : machine check handler ready\n");
#endif	

	return;
}
示例#4
0
static int __init
init_cifs(void)
{
	int rc = 0;
#ifdef CONFIG_PROC_FS
	cifs_proc_init();
#endif
	INIT_LIST_HEAD(&GlobalServerList);	/* BB not implemented yet */
	INIT_LIST_HEAD(&GlobalSMBSessionList);
	INIT_LIST_HEAD(&GlobalTreeConnectionList);
	INIT_LIST_HEAD(&GlobalOplock_Q);
/*
 *  Initialize Global counters
 */
	atomic_set(&sesInfoAllocCount, 0);
	atomic_set(&tconInfoAllocCount, 0);
	atomic_set(&tcpSesAllocCount,0);
	atomic_set(&tcpSesReconnectCount, 0);
	atomic_set(&tconInfoReconnectCount, 0);

	atomic_set(&bufAllocCount, 0);
	atomic_set(&midCount, 0);
	GlobalCurrentXid = 0;
	GlobalTotalActiveXid = 0;
	GlobalMaxActiveXid = 0;
	rwlock_init(&GlobalSMBSeslock);
	spin_lock_init(&GlobalMid_Lock);

	if(cifs_max_pending < 2) {
		cifs_max_pending = 2;
		cFYI(1,("cifs_max_pending set to min of 2"));
	} else if(cifs_max_pending > 256) {
		cifs_max_pending = 256;
		cFYI(1,("cifs_max_pending set to max of 256"));
	}

	rc = cifs_init_inodecache();
	if (!rc) {
		rc = cifs_init_mids();
		if (!rc) {
			rc = cifs_init_request_bufs();
			if (!rc) {
				rc = register_filesystem(&cifs_fs_type);
				if (!rc) {                
					rc = (int)kernel_thread(cifs_oplock_thread, NULL, 
						CLONE_FS | CLONE_FILES | CLONE_VM);
					if(rc > 0)
						return 0;
					else 
						cERROR(1,("error %d create oplock thread",rc));
				}
				cifs_destroy_request_bufs();
			}
			cifs_destroy_mids();
		}
		cifs_destroy_inodecache();
	}
#ifdef CONFIG_PROC_FS
	cifs_proc_clean();
#endif
	return rc;
}
示例#5
0
static void __init handle_initrd(void)
{
    int error;
    int pid;

    real_root_dev = new_encode_dev(ROOT_DEV);
    create_dev("/dev/root.old", Root_RAM0);
    /* mount initrd on rootfs' /root */
    mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
    sys_mkdir("/old", 0700);
    root_fd = sys_open("/", 0, 0);
    old_fd = sys_open("/old", 0, 0);
    /* move initrd over / and chdir/chroot in initrd root */
    sys_chdir("/root");
    sys_mount(".", "/", NULL, MS_MOVE, NULL);
    sys_chroot(".");

    /*
     * In case that a resume from disk is carried out by linuxrc or one of
     * its children, we need to tell the freezer not to wait for us.
     */
    current->flags |= PF_FREEZER_SKIP;

    pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD);
    if (pid > 0)
        while (pid != sys_wait4(-1, NULL, 0, NULL))
            yield();

    current->flags &= ~PF_FREEZER_SKIP;

    /* move initrd to rootfs' /old */
    sys_fchdir(old_fd);
    sys_mount("/", ".", NULL, MS_MOVE, NULL);
    /* switch root and cwd back to / of rootfs */
    sys_fchdir(root_fd);
    sys_chroot(".");
    sys_close(old_fd);
    sys_close(root_fd);

    if (new_decode_dev(real_root_dev) == Root_RAM0) {
        sys_chdir("/old");
        return;
    }

    ROOT_DEV = new_decode_dev(real_root_dev);
    mount_root();

    printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
    error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
    if (!error)
        printk("okay\n");
    else {
        int fd = sys_open("/dev/root.old", O_RDWR, 0);
        if (error == -ENOENT)
            printk("/initrd does not exist. Ignored.\n");
        else
            printk("failed\n");
        printk(KERN_NOTICE "Unmounting old root\n");
        sys_umount("/old", MNT_DETACH);
        printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
        if (fd < 0) {
            error = fd;
        } else {
            error = sys_ioctl(fd, BLKFLSBUF, 0);
            sys_close(fd);
        }
        printk(!error ? "okay\n" : "failed\n");
    }
}
示例#6
0
static int cdfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) {
  return get_sb_bdev(fs_type, flags, dev_name, data, cdfs_fill_super, mnt);
#else
static struct super_block *cdfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) {
  return get_sb_bdev(fs_type, flags, dev_name, data, cdfs_fill_super);
#endif
}

static struct file_system_type cdfs_fs_type = {
  .owner    = THIS_MODULE,
  .name     = "cdfs",
  .get_sb   = cdfs_get_sb,
  .kill_sb  = kill_block_super,
  .fs_flags = FS_REQUIRES_DEV
};
#endif

/******************************************************/

MODULE_AUTHOR("Michiel Ronsse ([email protected])");
MODULE_DESCRIPTION("CDfs: a CD filesystem");
MODULE_LICENSE("GPL"); 

#ifdef OLD_KERNEL
EXPORT_NO_SYMBOLS;
#endif

/******************************************************************/

static int __init cdfs_init(void) {
  int err;
  PRINT("init_module (insmod)\n");

  printk(FSNAME" "VERSION" loaded.\n");
 
  // register file system
  err = register_filesystem(&cdfs_fs_type);
  if (err < 0) return err;

  // register /proc entry
  if ((cdfs_proc_entry = create_proc_entry(FSNAME, 0, NULL )))
    cdfs_proc_entry->proc_fops = &proc_cdfs_operations;
  cdfs_proc_cd=NULL;

  // start kernel thread
  if ((kcdfsd_pid = kernel_thread(kcdfsd_thread, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND)) >0 ) {
    return 0;
  } else {
    printk(FSNAME" kernel_thread failed.\n");
    if (cdfs_proc_entry) remove_proc_entry(FSNAME, NULL);
    unregister_filesystem(&cdfs_fs_type);
    return -1;
  }
}

/******************************************************************/

static void __exit cdfs_exit(void) {
  PRINT("cleanup_module (rmmod)\n");
  kcdfsd_cleanup_thread();
  if (cdfs_proc_entry) remove_proc_entry(FSNAME, NULL);
  unregister_filesystem(&cdfs_fs_type);
}
示例#7
0
文件: blkmtd.c 项目: nhanh0/hah
/* Startup */
static int __init init_blkmtd(void)
{
  struct file *file = NULL;
  struct inode *inode;
  mtd_raw_dev_data_t *rawdevice = NULL;
  int maj, min;
  int i, blocksize, blocksize_bits;
  loff_t size = 0;
  int readonly = 0;
  int erase_size = CONFIG_MTD_BLKDEV_ERASESIZE;
  kdev_t rdev;
  int err;
  int mode;
  int totalsize = 0, total_sectors = 0;
  int regions;

  mtd_info = NULL;

  // Check args
  if(device == 0) {
    printk("blkmtd: error, missing `device' name\n");
    return 1;
  }

  if(ro)
    readonly = 1;

  if(erasesz)
    erase_size = erasesz;

  DEBUG(1, "blkmtd: got device = `%s' erase size = %dK readonly = %s\n", device, erase_size, readonly ? "yes" : "no");
  // Get a handle on the device
  mode = (readonly) ? O_RDONLY : O_RDWR;
  file = filp_open(device, mode, 0);
  if(IS_ERR(file)) {
    DEBUG(2, "blkmtd: open_namei returned %ld\n", PTR_ERR(file));
    return 1;
  }
  
  /* determine is this is a block device and if so get its major and minor
     numbers */
  inode = file->f_dentry->d_inode;
  if(!S_ISBLK(inode->i_mode)) {
    printk("blkmtd: %s not a block device\n", device);
    filp_close(file, NULL);
    return 1;
  }
  rdev = inode->i_rdev;
  //filp_close(file, NULL);
  DEBUG(1, "blkmtd: found a block device major = %d, minor = %d\n",
	 MAJOR(rdev), MINOR(rdev));
  maj = MAJOR(rdev);
  min = MINOR(rdev);

  if(maj == MTD_BLOCK_MAJOR) {
    printk("blkmtd: attempting to use an MTD device as a block device\n");
    return 1;
  }

  DEBUG(1, "blkmtd: devname = %s\n", bdevname(rdev));
  blocksize = BLOCK_SIZE;

  if(bs) {
    blocksize = bs;
  } else {
    if (blksize_size[maj] && blksize_size[maj][min]) {
      DEBUG(2, "blkmtd: blksize_size = %d\n", blksize_size[maj][min]);
      blocksize = blksize_size[maj][min];
    }
  }
  i = blocksize;
  blocksize_bits = 0;
  while(i != 1) {
    blocksize_bits++;
    i >>= 1;
  }

  if(count) {
    size = count;
  } else {
    if (blk_size[maj]) {
      size = ((loff_t) blk_size[maj][min] << BLOCK_SIZE_BITS) >> blocksize_bits;
    }
  }
  total_sectors = size;
  size *= blocksize;
  totalsize = size;
  DEBUG(1, "blkmtd: size = %ld\n", (long int)size);

  if(size == 0) {
    printk("blkmtd: cant determine size\n");
    return 1;
  }
  rawdevice = (mtd_raw_dev_data_t *)kmalloc(sizeof(mtd_raw_dev_data_t), GFP_KERNEL);
  if(rawdevice == NULL) {
    err = -ENOMEM;
    goto init_err;
  }
  memset(rawdevice, 0, sizeof(mtd_raw_dev_data_t));
  // get the block device
  rawdevice->binding = bdget(kdev_t_to_nr(MKDEV(maj, min)));
  err = blkdev_get(rawdevice->binding, mode, 0, BDEV_RAW);
  if (err) {
    goto init_err;
  }
  rawdevice->totalsize = totalsize;
  rawdevice->total_sectors = total_sectors;
  rawdevice->sector_size = blocksize;
  rawdevice->sector_bits = blocksize_bits;
  rawdevice->readonly = readonly;

  DEBUG(2, "sector_size = %d, sector_bits = %d\n", rawdevice->sector_size, rawdevice->sector_bits);

  mtd_info = (struct mtd_info *)kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
  if (mtd_info == NULL) {
    err = -ENOMEM;
    goto init_err;
  }
  memset(mtd_info, 0, sizeof(*mtd_info));

  // Setup the MTD structure
  mtd_info->name = "blkmtd block device";
  if(readonly) {
    mtd_info->type = MTD_ROM;
    mtd_info->flags = MTD_CAP_ROM;
    mtd_info->erasesize = erase_size << 10;
  } else {
    mtd_info->type = MTD_RAM;
    mtd_info->flags = MTD_CAP_RAM;
    mtd_info->erasesize = erase_size << 10;
  }
  mtd_info->size = size;
  mtd_info->erase = blkmtd_erase;
  mtd_info->read = blkmtd_read;
  mtd_info->write = blkmtd_write;
  mtd_info->sync = blkmtd_sync;
  mtd_info->point = 0;
  mtd_info->unpoint = 0;

  mtd_info->priv = rawdevice;
  regions = calc_erase_regions(NULL, erase_size << 10, size);
  DEBUG(1, "blkmtd: init: found %d erase regions\n", regions);
  mtd_info->eraseregions = kmalloc(regions * sizeof(struct mtd_erase_region_info), GFP_KERNEL);
  if(mtd_info->eraseregions == NULL) {
  }
  mtd_info->numeraseregions = regions;
  calc_erase_regions(mtd_info->eraseregions, erase_size << 10, size);

  /* setup the page cache info */
  INIT_LIST_HEAD(&rawdevice->as.clean_pages);
  INIT_LIST_HEAD(&rawdevice->as.dirty_pages);
  INIT_LIST_HEAD(&rawdevice->as.locked_pages);
  rawdevice->as.nrpages = 0;
  rawdevice->as.a_ops = &blkmtd_aops;
  rawdevice->as.host = inode;
  rawdevice->as.i_mmap = NULL;
  rawdevice->as.i_mmap_shared = NULL;
  spin_lock_init(&rawdevice->as.i_shared_lock);
  rawdevice->as.gfp_mask = GFP_KERNEL;
  rawdevice->file = file;

  file->private_data = rawdevice;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
   mtd_info->module = THIS_MODULE;			
#endif
   if (add_mtd_device(mtd_info)) {
     err = -EIO;
     goto init_err;
   }
   init_waitqueue_head(&thr_wq);
   init_waitqueue_head(&mtbd_sync_wq);
   DEBUG(3, "blkmtd: init: kernel task @ %p\n", write_queue_task);
   DEBUG(2, "blkmtd: init: starting kernel task\n");
   kernel_thread(write_queue_task, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
   DEBUG(2, "blkmtd: init: started\n");
   printk("blkmtd loaded: version = %s using %s erase_size = %dK %s\n", VERSION, device, erase_size, (readonly) ? "(read-only)" : "");
   return 0;

 init_err:
   if(!rawdevice) {
     if(rawdevice->binding) 
       blkdev_put(rawdevice->binding, BDEV_RAW);

     kfree(rawdevice);
     rawdevice = NULL;
   }
   if(mtd_info) {
     if(mtd_info->eraseregions)
       kfree(mtd_info->eraseregions);
     kfree(mtd_info);
     mtd_info = NULL;
   }
   return err;
}
示例#8
0
文件: cmm.c 项目: 12019/hg556a_source
static void
cmm_start_thread(void)
{
	kernel_thread(cmm_thread, 0, 0);
}
示例#9
0
static void
__adb_probe_task(void *data)
{
	adb_probe_task_pid = kernel_thread(adb_probe_task, NULL, SIGCHLD | CLONE_KERNEL);
}
示例#10
0
/*
 * This is the task which runs the usermode application
 */
static int ____call_usermodehelper(void *data)
{
	struct subprocess_info *sub_info = data;
	int retval;

	/* Install input pipe when needed */
	if (sub_info->stdin) {
		struct files_struct *f = current->files;
		struct fdtable *fdt;
		/* no races because files should be private here */
		sys_close(0);
		fd_install(0, sub_info->stdin);
		spin_lock(&f->file_lock);
		fdt = files_fdtable(f);
		FD_SET(0, fdt->open_fds);
		FD_CLR(0, fdt->close_on_exec);
		spin_unlock(&f->file_lock);

		/* and disallow core files too */
		current->signal->rlim[RLIMIT_CORE] = (struct rlimit){0, 0};
	}
 

	/* We can run anywhere, unlike our parent keventd(). */
	set_cpus_allowed(current, CPU_MASK_ALL);

	retval = __exec_usermodehelper(sub_info->path,
			sub_info->argv, sub_info->envp, sub_info->ring);

	/* Exec failed? */
	sub_info->retval = retval;
	do_exit(0);
}

/* Keventd can't block, but this (a child) can. */
static int wait_for_helper(void *data)
{
	struct subprocess_info *sub_info = data;
	pid_t pid;
	struct k_sigaction sa;

	/* Install a handler: if SIGCLD isn't handled sys_wait4 won't
	 * populate the status, but will return -ECHILD. */
	sa.sa.sa_handler = SIG_IGN;
	sa.sa.sa_flags = 0;
	siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
	do_sigaction(SIGCHLD, &sa, NULL);
	allow_signal(SIGCHLD);

	pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD);
	if (pid < 0) {
		sub_info->retval = pid;
	} else {
		/*
		 * Normally it is bogus to call wait4() from in-kernel because
		 * wait4() wants to write the exit code to a userspace address.
		 * But wait_for_helper() always runs as keventd, and put_user()
		 * to a kernel address works OK for kernel threads, due to their
		 * having an mm_segment_t which spans the entire address space.
		 *
		 * Thus the __user pointer cast is valid here.
		 */
		sys_wait4(pid, (int __user *) &sub_info->retval, 0, NULL);
	}

	complete(sub_info->complete);
	return 0;
}

/* This is run by khelper thread  */
static void __call_usermodehelper(void *data)
{
	struct subprocess_info *sub_info = data;
	pid_t pid;
	int wait = sub_info->wait;

	/* CLONE_VFORK: wait until the usermode helper has execve'd
	 * successfully We need the data structures to stay around
	 * until that is done.  */
	if (wait)
		pid = kernel_thread(wait_for_helper, sub_info,
				    CLONE_FS | CLONE_FILES | SIGCHLD);
	else
		pid = kernel_thread(____call_usermodehelper, sub_info,
				    CLONE_VFORK | SIGCHLD);

	if (pid < 0) {
		sub_info->retval = pid;
		complete(sub_info->complete);
	} else if (!wait)
		complete(sub_info->complete);
}

/**
 * call_usermodehelper_keys - start a usermode application
 * @path: pathname for the application
 * @argv: null-terminated argument list
 * @envp: null-terminated environment list
 * @session_keyring: session keyring for process (NULL for an empty keyring)
 * @wait: wait for the application to finish and return status.
 *
 * Runs a user-space application.  The application is started
 * asynchronously if wait is not set, and runs as a child of keventd.
 * (ie. it runs with full root capabilities).
 *
 * Must be called from process context.  Returns a negative error code
 * if program was not execed successfully, or 0.
 */
int call_usermodehelper_keys(char *path, char **argv, char **envp,
			     struct key *session_keyring, int wait)
{
	DECLARE_COMPLETION_ONSTACK(done);
	struct subprocess_info sub_info = {
		.complete	= &done,
		.path		= path,
		.argv		= argv,
		.envp		= envp,
		.ring		= session_keyring,
		.wait		= wait,
		.retval		= 0,
	};
	DECLARE_WORK(work, __call_usermodehelper, &sub_info);

	if (!khelper_wq)
		return -EBUSY;

	if (path[0] == '\0')
		return 0;

	queue_work(khelper_wq, &work);
	wait_for_completion(&done);
	return sub_info.retval;
}
EXPORT_SYMBOL(call_usermodehelper_keys);

int call_usermodehelper_pipe(char *path, char **argv, char **envp,
			     struct file **filp)
{
	DECLARE_COMPLETION(done);
	struct subprocess_info sub_info = {
		.complete	= &done,
		.path		= path,
		.argv		= argv,
		.envp		= envp,
		.retval		= 0,
	};
	struct file *f;
	DECLARE_WORK(work, __call_usermodehelper, &sub_info);

	if (!khelper_wq)
		return -EBUSY;

	if (path[0] == '\0')
		return 0;

	f = create_write_pipe();
	if (!f)
		return -ENOMEM;
	*filp = f;

	f = create_read_pipe(f);
	if (!f) {
		free_write_pipe(*filp);
		return -ENOMEM;
	}
	sub_info.stdin = f;

	queue_work(khelper_wq, &work);
	wait_for_completion(&done);
	return sub_info.retval;
}
EXPORT_SYMBOL(call_usermodehelper_pipe);

void __init usermodehelper_init(void)
{
	khelper_wq = create_singlethread_workqueue("khelper");
	BUG_ON(!khelper_wq);
}
示例#11
0
static void create_kthread(struct kthread_create_info *create)
{
	int pid;

	/* We want our own signal handler (we take no signals by default). */
	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
	if (pid < 0) {
		create->result = ERR_PTR(pid);
	} else {
		struct sched_param param = { .sched_priority = 0 };
		wait_for_completion(&create->started);
		read_lock(&tasklist_lock);
		create->result = find_task_by_pid_ns(pid, &init_pid_ns);
		read_unlock(&tasklist_lock);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler(create->result, SCHED_NORMAL, &param);
		set_user_nice(create->result, KTHREAD_NICE_LEVEL);
		set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR);
	}
	complete(&create->done);
}

/**
 * kthread_create - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run(), kthread_create_on_cpu().
 *
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which noone will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create(int (*threadfn)(void *data),
				   void *data,
				   const char namefmt[],
				   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	init_completion(&create.started);
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		va_list args;
		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @k: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *k, unsigned int cpu)
{
	if (k->state != TASK_UNINTERRUPTIBLE) {
		WARN_ON(1);
		return;
	}
	/* Must have done schedule() in kthread() before we set_task_cpu */
	wait_task_inactive(k, 0);
	set_task_cpu(k, cpu);
	k->cpus_allowed = cpumask_of_cpu(cpu);
	k->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit.  Your threadfn() must not call do_exit()
 * itself if you use this function!  This can also be called after
 * kthread_create() instead of calling wake_up_process(): the thread
 * will exit without calling threadfn().
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	int ret;

	mutex_lock(&kthread_stop_lock);

	/* It could exit after stop_info.k set, but before wake_up_process. */
	get_task_struct(k);

	/* Must init completion *before* thread sees kthread_stop_info.k */
	init_completion(&kthread_stop_info.done);
	smp_wmb();

	/* Now set kthread_should_stop() to true, and wake it up. */
	kthread_stop_info.k = k;
	wake_up_process(k);
	put_task_struct(k);

	/* Once it dies, reset stop ptr, gather result and we're done. */
	wait_for_completion(&kthread_stop_info.done);
	kthread_stop_info.k = NULL;
	ret = kthread_stop_info.err;
	mutex_unlock(&kthread_stop_lock);

	return ret;
}
示例#12
0
static int __init multipdp_init(void)
{
	int ret;

	wake_lock_init(&pdp_wake_lock, WAKE_LOCK_SUSPEND, "MULTI_PDP");

	pdp_arg_t pdp_arg = { .id = 1, .ifname = "ttyCSD", };
	pdp_arg_t efs_arg = { .id = 8, .ifname = "ttyEFS", };
	pdp_arg_t gps_arg = { .id = 5, .ifname = "ttyGPS", };
	pdp_arg_t xtra_arg = { .id = 6, .ifname = "ttyXTRA", };
	pdp_arg_t smd_arg = { .id = 25, .ifname = "ttySMD", };
	pdp_arg_t pcm_arg = { .id = 30, .ifname = "ttyPCM", } ;
	
#ifdef LOOP_BACK_TEST	
	pdp_arg_t loopback_arg = { .id = 31, .ifname = "ttyLOBK", };
#endif

	/* run DPRAM I/O thread */
	ret = kernel_thread(dpram_thread, NULL, CLONE_FS | CLONE_FILES);
	if (ret < 0) {
		EPRINTK("kernel_thread() failed\n");
		return ret;
	}
	wait_for_completion(&dpram_complete);
	if (!dpram_task) {
		EPRINTK("DPRAM I/O thread error\n");
		return -EIO;
	}

	/* create serial device for Circuit Switched Data */
	ret = pdp_activate(&pdp_arg, DEV_TYPE_SERIAL, DEV_FLAG_STICKY);
	if (ret < 0) {
		EPRINTK("failed to create a serial device for CSD\n");
		goto err0;
	}

	ret = pdp_activate(&efs_arg, DEV_TYPE_SERIAL, DEV_FLAG_STICKY);
	if (ret < 0) {
		EPRINTK("failed to create a serial device for EFS\n");
		goto err1;
	}

	ret = pdp_activate(&gps_arg, DEV_TYPE_SERIAL, DEV_FLAG_STICKY);
	if (ret < 0) {
		EPRINTK("failed to create a serial device for GPS\n");
		goto err2;
	}

	ret = pdp_activate(&xtra_arg, DEV_TYPE_SERIAL, DEV_FLAG_STICKY);
	if (ret < 0) {
		EPRINTK("failed to create a serial device for XTRA\n");
		goto err3;
	}
	
	ret = pdp_activate(&smd_arg, DEV_TYPE_SERIAL, DEV_FLAG_STICKY);
	if (ret < 0) {
		EPRINTK("failed to create a serial device for SMD\n");
		goto err4;
	}

	ret = pdp_activate(&pcm_arg, DEV_TYPE_SERIAL, DEV_FLAG_STICKY);
	if (ret < 0) {
		EPRINTK("failed to create a serial device for SMD\n");
		goto err5;
	}

#ifdef LOOP_BACK_TEST	
	ret = pdp_activate(&loopback_arg, DEV_TYPE_SERIAL, DEV_FLAG_STICKY);
	if (ret < 0) {
		EPRINTK("failed to create a serial device for LoopBack\n");
		goto err6;
	}
#endif
	/* create app. interface device */
	ret = misc_register(&multipdp_dev);
	if (ret < 0) {
		EPRINTK("misc_register() failed\n");
		goto err1;
	}


#ifdef LOOP_BACK_TEST
	ret = device_create_file(multipdp_dev.this_device, &dev_attr_loopback);
#endif	

#ifdef CONFIG_PROC_FS
	create_proc_read_entry(APP_DEVNAME, 0, 0, 
			       multipdp_proc_read, NULL);
#endif

#ifdef	NO_TTY_DPRAM
	printk("multipdp_init:multipdp_rx_noti_regi calling");
	multipdp_rx_noti_regi(multipdp_rx_cback );	
#endif
//	printk(KERN_INFO 
//	       "$Id: multipdp.c,v 1.10 2008/01/11 05:40:56 melonzz Exp $\n");
	return 0;

#ifdef LOOP_BACK_TEST	
err6:
	pdp_deactivate(&loopback_arg, 1);
#endif	
err5:
	/* undo serial device for Circuit Switched Data */
	pdp_deactivate(&pcm_arg, 1);

err4:
	/* undo serial device for Circuit Switched Data */
	pdp_deactivate(&smd_arg, 1);

err3:
	/* undo serial device for Circuit Switched Data */
	pdp_deactivate(&xtra_arg, 1);
err2:
	/* undo serial device for Circuit Switched Data */
	pdp_deactivate(&gps_arg, 1);
err1:
	/* undo serial device for Circuit Switched Data */
	pdp_deactivate(&pdp_arg, 1);
err0:
	/* kill DPRAM I/O thread */
	if (dpram_task) {
		send_sig(SIGUSR1, dpram_task, 1);
		wait_for_completion(&dpram_complete);
	}
	return ret;
}

static void __exit multipdp_exit(void)
{
	wake_lock_destroy(&pdp_wake_lock);
#ifdef CONFIG_PROC_FS
	remove_proc_entry(APP_DEVNAME, 0);
#endif

	/* remove app. interface device */
	misc_deregister(&multipdp_dev);

	/* clean up PDP context table */
	pdp_cleanup();

	/* kill DPRAM I/O thread */
	if (dpram_task) {
		send_sig(SIGUSR1, dpram_task, 1);
		wait_for_completion(&dpram_complete);
	}
}

//module_init(multipdp_init);
late_initcall(multipdp_init);
module_exit(multipdp_exit);

MODULE_AUTHOR("SAMSUNG ELECTRONICS CO., LTD");
MODULE_DESCRIPTION("Multiple PDP Muxer / Demuxer");
MODULE_LICENSE("GPL");