int main(void) { int pipefd[2]; int exec_fd; vsyscall(); atexit(ate); make_private_tmp(); /* Reserve fd 0 for 1-byte pipe ping from child. */ close(0); if (open("/", O_RDONLY|O_DIRECTORY|O_PATH) != 0) { return 1; } exec_fd = make_exe(payload, sizeof(payload)); if (pipe(pipefd) == -1) { return 1; } if (dup2(pipefd[1], 0) != 0) { return 1; } pid = fork(); if (pid == -1) { return 1; } if (pid == 0) { sys_execveat(exec_fd, "", NULL, NULL, AT_EMPTY_PATH); return 1; } char _; if (read(pipefd[0], &_, 1) != 1) { return 1; } struct stat st; if (fstat(exec_fd, &st) == -1) { return 1; } /* Generate "head -n1 /proc/$PID/maps" */ char buf0[256]; memset(buf0, ' ', sizeof(buf0)); int len = snprintf(buf0, sizeof(buf0), "%08lx-%08lx r-xp 00000000 %02lx:%02lx %llu", VADDR, VADDR + PAGE_SIZE, MAJOR(st.st_dev), MINOR(st.st_dev), (unsigned long long)st.st_ino); buf0[len] = ' '; snprintf(buf0 + MAPS_OFFSET, sizeof(buf0) - MAPS_OFFSET, "/tmp/#%llu (deleted)\n", (unsigned long long)st.st_ino); /* Test /proc/$PID/maps */ { const size_t len = strlen(buf0) + (g_vsyscall ? strlen(str_vsyscall) : 0); char buf[256]; ssize_t rv; int fd; snprintf(buf, sizeof(buf), "/proc/%u/maps", pid); fd = open(buf, O_RDONLY); if (fd == -1) { return 1; } rv = read(fd, buf, sizeof(buf)); assert(rv == len); assert(memcmp(buf, buf0, strlen(buf0)) == 0); if (g_vsyscall) { assert(memcmp(buf + strlen(buf0), str_vsyscall, strlen(str_vsyscall)) == 0); } } /* Test /proc/$PID/smaps */ { char buf[4096]; ssize_t rv; int fd; snprintf(buf, sizeof(buf), "/proc/%u/smaps", pid); fd = open(buf, O_RDONLY); if (fd == -1) { return 1; } rv = read(fd, buf, sizeof(buf)); assert(0 <= rv && rv <= sizeof(buf)); assert(rv >= strlen(buf0)); assert(memcmp(buf, buf0, strlen(buf0)) == 0); #define RSS1 "Rss: 4 kB\n" #define RSS2 "Rss: 0 kB\n" #define PSS1 "Pss: 4 kB\n" #define PSS2 "Pss: 0 kB\n" assert(memmem(buf, rv, RSS1, strlen(RSS1)) || memmem(buf, rv, RSS2, strlen(RSS2))); assert(memmem(buf, rv, PSS1, strlen(PSS1)) || memmem(buf, rv, PSS2, strlen(PSS2))); static const char *S[] = { "Size: 4 kB\n", "KernelPageSize: 4 kB\n", "MMUPageSize: 4 kB\n", "Anonymous: 0 kB\n", "AnonHugePages: 0 kB\n", "Shared_Hugetlb: 0 kB\n", "Private_Hugetlb: 0 kB\n", "Locked: 0 kB\n", }; int i; for (i = 0; i < sizeof(S)/sizeof(S[0]); i++) { assert(memmem(buf, rv, S[i], strlen(S[i]))); } if (g_vsyscall) { assert(memmem(buf, rv, str_vsyscall, strlen(str_vsyscall))); } } /* Test /proc/$PID/smaps_rollup */ { char bufr[256]; memset(bufr, ' ', sizeof(bufr)); len = snprintf(bufr, sizeof(bufr), "%08lx-%08lx ---p 00000000 00:00 0", VADDR, VADDR + PAGE_SIZE); bufr[len] = ' '; snprintf(bufr + MAPS_OFFSET, sizeof(bufr) - MAPS_OFFSET, "[rollup]\n"); char buf[1024]; ssize_t rv; int fd; snprintf(buf, sizeof(buf), "/proc/%u/smaps_rollup", pid); fd = open(buf, O_RDONLY); if (fd == -1) { return 1; } rv = read(fd, buf, sizeof(buf)); assert(0 <= rv && rv <= sizeof(buf)); assert(rv >= strlen(bufr)); assert(memcmp(buf, bufr, strlen(bufr)) == 0); assert(memmem(buf, rv, RSS1, strlen(RSS1)) || memmem(buf, rv, RSS2, strlen(RSS2))); assert(memmem(buf, rv, PSS1, strlen(PSS1)) || memmem(buf, rv, PSS2, strlen(PSS2))); static const char *S[] = { "Anonymous: 0 kB\n", "AnonHugePages: 0 kB\n", "Shared_Hugetlb: 0 kB\n", "Private_Hugetlb: 0 kB\n", "Locked: 0 kB\n", }; int i; for (i = 0; i < sizeof(S)/sizeof(S[0]); i++) { assert(memmem(buf, rv, S[i], strlen(S[i]))); } } /* Test /proc/$PID/statm */ { char buf[64]; ssize_t rv; int fd; snprintf(buf, sizeof(buf), "/proc/%u/statm", pid); fd = open(buf, O_RDONLY); if (fd == -1) { return 1; } rv = read(fd, buf, sizeof(buf)); assert(rv == 7 * 2); assert(buf[0] == '1'); /* ->total_vm */ assert(buf[1] == ' '); assert(buf[2] == '0' || buf[2] == '1'); /* rss */ assert(buf[3] == ' '); assert(buf[4] == '0' || buf[2] == '1'); /* file rss */ assert(buf[5] == ' '); assert(buf[6] == '1'); /* ELF executable segments */ assert(buf[7] == ' '); assert(buf[8] == '0'); assert(buf[9] == ' '); assert(buf[10] == '0'); /* ->data_vm + ->stack_vm */ assert(buf[11] == ' '); assert(buf[12] == '0'); assert(buf[13] == '\n'); } return 0; }
dev_t name_to_dev_t(char *name) { char s[32]; char *p; dev_t res = 0; int part; #ifdef CONFIG_BLOCK if (strncmp(name, "PARTUUID=", 9) == 0) { name += 9; res = devt_from_partuuid(name); if (!res) goto fail; goto done; } #endif if (strncmp(name, "/dev/", 5) != 0) { unsigned maj, min; if (sscanf(name, "%u:%u", &maj, &min) == 2) { res = MKDEV(maj, min); if (maj != MAJOR(res) || min != MINOR(res)) goto fail; } else { res = new_decode_dev(simple_strtoul(name, &p, 16)); if (*p) goto fail; } goto done; } name += 5; res = Root_NFS; if (strcmp(name, "nfs") == 0) goto done; res = Root_RAM0; if (strcmp(name, "ram") == 0) goto done; if (strlen(name) > 31) goto fail; strcpy(s, name); for (p = s; *p; p++) if (*p == '/') *p = '!'; res = blk_lookup_devt(s, 0); if (res) goto done; /* * try non-existent, but valid partition, which may only exist * after revalidating the disk, like partitioned md devices */ while (p > s && isdigit(p[-1])) p--; if (p == s || !*p || *p == '0') goto fail; /* try disk name without <part number> */ part = simple_strtoul(p, NULL, 10); *p = '\0'; res = blk_lookup_devt(s, part); if (res) goto done; /* try disk name without p<part number> */ if (p < s + 2 || !isdigit(p[-2]) || p[-1] != 'p') goto fail; p[-1] = '\0'; res = blk_lookup_devt(s, part); if (res) goto done; fail: return 0; done: return res; }
int tty_ioctl(int dev, int cmd, int arg) { struct tty_struct * tty; if (MAJOR(dev) == 5) { dev=current->tty; if (dev<0) panic("tty_ioctl: dev<0"); } else dev=MINOR(dev); tty = dev + tty_table; switch (cmd) { case TCGETS: return get_termios(tty,(struct termios *) arg); case TCSETSF: flush(&tty->read_q); /* fallthrough */ case TCSETSW: wait_until_sent(tty); /* fallthrough */ case TCSETS: return set_termios(tty,(struct termios *) arg); case TCGETA: return get_termio(tty,(struct termio *) arg); case TCSETAF: flush(&tty->read_q); /* fallthrough */ case TCSETAW: wait_until_sent(tty); /* fallthrough */ case TCSETA: return set_termio(tty,(struct termio *) arg); case TCSBRK: if (!arg) { wait_until_sent(tty); send_break(tty); } return 0; case TCXONC: return -EINVAL; /* not implemented */ case TCFLSH: if (arg==0) flush(&tty->read_q); else if (arg==1) flush(&tty->write_q); else if (arg==2) { flush(&tty->read_q); flush(&tty->write_q); } else return -EINVAL; return 0; case TIOCEXCL: return -EINVAL; /* not implemented */ case TIOCNXCL: return -EINVAL; /* not implemented */ case TIOCSCTTY: return -EINVAL; /* set controlling term NI */ case TIOCGPGRP: verify_area((void *) arg,4); put_fs_long(tty->pgrp,(unsigned long *) arg); return 0; case TIOCSPGRP: tty->pgrp=get_fs_long((unsigned long *) arg); return 0; case TIOCOUTQ: verify_area((void *) arg,4); put_fs_long(CHARS(tty->write_q),(unsigned long *) arg); return 0; case TIOCSTI: return -EINVAL; /* not implemented */ case TIOCGWINSZ: return -EINVAL; /* not implemented */ case TIOCSWINSZ: return -EINVAL; /* not implemented */ case TIOCMGET: return -EINVAL; /* not implemented */ case TIOCMBIS: return -EINVAL; /* not implemented */ case TIOCMBIC: return -EINVAL; /* not implemented */ case TIOCMSET: return -EINVAL; /* not implemented */ case TIOCGSOFTCAR: return -EINVAL; /* not implemented */ case TIOCSSOFTCAR: return -EINVAL; /* not implemented */ default: return -EINVAL; } }
int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode, dev_t device_number) { int rc = -EPERM; unsigned int xid; int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *tcon; struct cifs_io_parms io_parms; char *full_path = NULL; struct inode *newinode = NULL; __u32 oplock = 0; struct cifs_fid fid; struct cifs_open_parms oparms; FILE_ALL_INFO *buf = NULL; unsigned int bytes_written; struct win_dev *pdev; struct kvec iov[2]; if (!old_valid_dev(device_number)) return -EINVAL; cifs_sb = CIFS_SB(inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); xid = get_xid(); full_path = build_path_from_dentry(direntry); if (full_path == NULL) { rc = -ENOMEM; goto mknod_out; } if (tcon->unix_ext) { struct cifs_unix_set_info_args args = { .mode = mode & ~current_umask(), .ctime = NO_CHANGE_64, .atime = NO_CHANGE_64, .mtime = NO_CHANGE_64, .device = device_number, }; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { args.uid = current_fsuid(); args.gid = current_fsgid(); } else { args.uid = INVALID_UID; /* no change */ args.gid = INVALID_GID; /* no change */ } rc = CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, cifs_sb->local_nls, cifs_remap(cifs_sb)); if (rc) goto mknod_out; rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, xid); if (rc == 0) d_instantiate(direntry, newinode); goto mknod_out; } if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) goto mknod_out; cifs_dbg(FYI, "sfu compat create special file\n"); buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (buf == NULL) { kfree(full_path); rc = -ENOMEM; free_xid(xid); return rc; } if (backup_cred(cifs_sb)) create_options |= CREATE_OPEN_BACKUP_INTENT; oparms.tcon = tcon; oparms.cifs_sb = cifs_sb; oparms.desired_access = GENERIC_WRITE; oparms.create_options = create_options; oparms.disposition = FILE_CREATE; oparms.path = full_path; oparms.fid = &fid; oparms.reconnect = false; if (tcon->ses->server->oplocks) oplock = REQ_OPLOCK; else oplock = 0; rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf); if (rc) goto mknod_out; /* * BB Do not bother to decode buf since no local inode yet to put * timestamps in, but we can reuse it safely. */ pdev = (struct win_dev *)buf; io_parms.pid = current->tgid; io_parms.tcon = tcon; io_parms.offset = 0; io_parms.length = sizeof(struct win_dev); iov[1].iov_base = buf; iov[1].iov_len = sizeof(struct win_dev); if (S_ISCHR(mode)) { memcpy(pdev->type, "IntxCHR", 8); pdev->major = cpu_to_le64(MAJOR(device_number)); pdev->minor = cpu_to_le64(MINOR(device_number)); rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, &bytes_written, iov, 1); } else if (S_ISBLK(mode)) { memcpy(pdev->type, "IntxBLK", 8); pdev->major = cpu_to_le64(MAJOR(device_number)); pdev->minor = cpu_to_le64(MINOR(device_number)); rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, &bytes_written, iov, 1); } /* else if (S_ISFIFO) */ tcon->ses->server->ops->close(xid, tcon, &fid); d_drop(direntry); /* FIXME: add code here to set EAs */ mknod_out: kfree(full_path); kfree(buf); free_xid(xid); cifs_put_tlink(tlink); return rc; }
/** * <Ring 1> Perform read/wrte syscall. * @param p Ptr to message. * @return On success, the number of bytes read is returned. Otherwise a * negative error code is returned. */ PUBLIC int do_rdwt(MESSAGE * p) { int fd = p->FD; struct file_desc * filp = pcaller->filp[fd]; int rw_flag = p->type; char * buf = p->BUF; int src = p->source; int len = p->CNT; int bytes_rdwt = 0, retval = 0; u64 newpos; if (!filp) return -EBADF; int position = filp->fd_pos; int flags = (mode_t)filp->fd_mode; struct inode * pin = filp->fd_inode; /* TODO: pipe goes here */ /* if (PIPE) ... */ int file_type = pin->i_mode & I_TYPE; /* TODO: read/write for block special */ if (file_type == I_CHAR_SPECIAL) { int t = p->type == READ ? DEV_READ : DEV_WRITE; p->type = t; int dev = pin->i_specdev; p->DEVICE = MINOR(dev); p->BUF = buf; p->CNT = len; p->PROC_NR = src; assert(dd_map[MAJOR(dev)].driver_nr != INVALID_DRIVER); send_recv(BOTH, dd_map[MAJOR(dev)].driver_nr, p); assert(p->CNT == len); return p->CNT; } else if (file_type == I_REGULAR) { /* check for O_APPEND */ if (rw_flag == WRITE) { if (flags & O_APPEND) position = pin->i_size; } /* issue the request */ int bytes = 0; retval = request_readwrite(pin->i_fs_ep, pin->i_dev, pin->i_num, position, rw_flag, src, buf, len, &newpos, &bytes); bytes_rdwt += bytes; position = newpos; } else { printl("VFS: do_rdwt: unknown file type: %x\n", file_type); } if (rw_flag == WRITE) { if (position > pin->i_size) pin->i_size = position; } filp->fd_pos = position; if (!retval) { return bytes_rdwt; } return retval; }
/* * FUNCTION NAME: ppi_open * * INPUTS/OUTPUTS: * in_inode - Description of openned file. * in_filp - Description of openned file. * * RETURN * 0: Open ok. * -ENXIO No such device * * FUNCTION(S) CALLED: * * GLOBAL VARIABLES REFERENCED: ppiinfo * * GLOBAL VARIABLES MODIFIED: NIL * * DESCRIPTION: It is invoked when user call 'open' system call * to open ppi device. * * CAUTION: */ static int ppi_open(struct inode *inode, struct file *filp) { char intname[20]; unsigned long flags; int minor = MINOR(inode->i_rdev); pr_debug("ppi_open:\n"); /* PPI ? */ if (minor != PPI0_MINOR) return -ENXIO; spin_lock_irqsave(&ppifcd_lock, flags); if (ppiinfo.opened) { spin_unlock_irqrestore(&ppifcd_lock, flags); return -EMFILE; } /* Clear configuration information */ memset(&ppiinfo, 0, sizeof(ppi_device_t)); if (filp->f_flags & O_NONBLOCK) ppiinfo.nonblock = 1; ppiinfo.opened = 1; ppiinfo.done = 0; ppiinfo.dma_config = (DMA_FLOW_MODE | WNR | RESTART | DMA_WDSIZE_16 | DMA2D | DI_EN); ppiinfo.pixel_per_line = PIXEL_PER_LINE; ppiinfo.lines_per_frame = LINES_PER_FRAME; ppiinfo.bpp = 8; ppiinfo.ppi_control = POL_S | POL_C | PPI_DATA_LEN | PPI_PACKING | CFG_GP_Input_3Syncs | GP_Input_Mode; ppiinfo.ppi_status = 0; ppiinfo.ppi_delay = 0; ppiinfo.ppi_trigger_gpio = NO_TRIGGER; ppiinfo.rx_avail = &ppirxq0; strcpy(intname, PPI_INTNAME); ppiinfo.irqnum = IRQ_PPI; filp->private_data = &ppiinfo; ppifcd_reg_reset(filp->private_data); /* Request DMA channel, and pass the interrupt handler */ if (request_dma(CH_PPI, "BF533_PPI_DMA") < 0) { panic("Unable to attach BlackFin PPI DMA channel\n"); ppiinfo.opened = 0; spin_unlock_irqrestore(&ppifcd_lock, flags); return -EFAULT; } else set_dma_callback(CH_PPI, (void *)ppifcd_irq, filp->private_data); request_irq(IRQ_PPI_ERROR, (void *)ppifcd_irq_error, IRQF_DISABLED, "PPI ERROR", filp->private_data); spin_unlock_irqrestore(&ppifcd_lock, flags); pr_debug("ppi_open: return\n"); return 0; }
/** * software_resume - Resume from a saved hibernation image. * * This routine is called as a late initcall, when all devices have been * discovered and initialized already. * * The image reading code is called to see if there is a hibernation image * available for reading. If that is the case, devices are quiesced and the * contents of memory is restored from the saved image. * * If this is successful, control reappears in the restored target kernel in * hibernation_snapshot() which returns to hibernate(). Otherwise, the routine * attempts to recover gracefully and make the kernel return to the normal mode * of operation. */ static int software_resume(void) { int error, nr_calls = 0; /* * If the user said "noresume".. bail out early. */ if (noresume || !hibernation_available()) return 0; /* * name_to_dev_t() below takes a sysfs buffer mutex when sysfs * is configured into the kernel. Since the regular hibernate * trigger path is via sysfs which takes a buffer mutex before * calling hibernate functions (which take pm_mutex) this can * cause lockdep to complain about a possible ABBA deadlock * which cannot happen since we're in the boot code here and * sysfs can't be invoked yet. Therefore, we use a subclass * here to avoid lockdep complaining. */ mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); if (swsusp_resume_device) goto Check_image; if (!strlen(resume_file)) { error = -ENOENT; goto Unlock; } pr_debug("PM: Checking hibernation image partition %s\n", resume_file); if (resume_delay) { printk(KERN_INFO "Waiting %dsec before reading resume device...\n", resume_delay); ssleep(resume_delay); } /* Check if the device is there */ swsusp_resume_device = name_to_dev_t(resume_file); /* * name_to_dev_t is ineffective to verify parition if resume_file is in * integer format. (e.g. major:minor) */ if (isdigit(resume_file[0]) && resume_wait) { int partno; while (!get_gendisk(swsusp_resume_device, &partno)) msleep(10); } if (!swsusp_resume_device) { /* * Some device discovery might still be in progress; we need * to wait for this to finish. */ wait_for_device_probe(); if (resume_wait) { while ((swsusp_resume_device = name_to_dev_t(resume_file)) == 0) msleep(10); async_synchronize_full(); } swsusp_resume_device = name_to_dev_t(resume_file); if (!swsusp_resume_device) { error = -ENODEV; goto Unlock; } } Check_image: pr_debug("PM: Hibernation image partition %d:%d present\n", MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); pr_debug("PM: Looking for hibernation image.\n"); error = swsusp_check(); if (error) goto Unlock; /* The snapshot device should not be opened while we're running */ if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { error = -EBUSY; swsusp_close(FMODE_READ); goto Unlock; } pm_prepare_console(); error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls); if (error) { nr_calls--; goto Close_Finish; } pr_debug("PM: Preparing processes for restore.\n"); error = freeze_processes(); if (error) goto Close_Finish; error = load_image_and_restore(); thaw_processes(); Finish: __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL); pm_restore_console(); atomic_inc(&snapshot_device_available); /* For success case, the suspend path will release the lock */ Unlock: mutex_unlock(&pm_mutex); pr_debug("PM: Hibernation image not present or could not be loaded.\n"); return error; Close_Finish: swsusp_close(FMODE_READ); goto Finish; }
static int nftl_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg) { struct NFTLrecord *nftl; int p; nftl = NFTLs[MINOR(inode->i_rdev) >> NFTL_PARTN_BITS]; if (!nftl) return -EINVAL; switch (cmd) { case HDIO_GETGEO: { struct hd_geometry g; g.heads = nftl->heads; g.sectors = nftl->sectors; g.cylinders = nftl->cylinders; g.start = part_table[MINOR(inode->i_rdev)].start_sect; return copy_to_user((void *)arg, &g, sizeof g) ? -EFAULT : 0; } case BLKGETSIZE: /* Return device size */ return put_user(part_table[MINOR(inode->i_rdev)].nr_sects, (unsigned long *) arg); #ifdef BLKGETSIZE64 case BLKGETSIZE64: return put_user((u64)part_table[MINOR(inode->i_rdev)].nr_sects << 9, (u64 *)arg); #endif case BLKFLSBUF: if (!capable(CAP_SYS_ADMIN)) return -EACCES; fsync_dev(inode->i_rdev); invalidate_buffers(inode->i_rdev); if (nftl->mtd->sync) nftl->mtd->sync(nftl->mtd); return 0; case BLKRRPART: if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (nftl->usecount > 1) return -EBUSY; /* * We have to flush all buffers and invalidate caches, * or we won't be able to re-use the partitions, * if there was a change and we don't want to reboot */ p = (1<<NFTL_PARTN_BITS) - 1; while (p-- > 0) { kdev_t devp = MKDEV(MAJOR(inode->i_dev), MINOR(inode->i_dev)+p); if (part_table[p].nr_sects > 0) invalidate_device (devp, 1); part_table[MINOR(inode->i_dev)+p].start_sect = 0; part_table[MINOR(inode->i_dev)+p].nr_sects = 0; } #if LINUX_VERSION_CODE < 0x20328 resetup_one_dev(&nftl_gendisk, MINOR(inode->i_rdev) >> NFTL_PARTN_BITS); #else grok_partitions(&nftl_gendisk, MINOR(inode->i_rdev) >> NFTL_PARTN_BITS, 1<<NFTL_PARTN_BITS, nftl->nr_sects); #endif return 0; #if (LINUX_VERSION_CODE < 0x20303) RO_IOCTLS(inode->i_rdev, arg); /* ref. linux/blk.h */ #else case BLKROSET: case BLKROGET: case BLKSSZGET: return blk_ioctl(inode->i_rdev, cmd, arg); #endif default: return -EINVAL; } }
void nftl_request(RQFUNC_ARG) { unsigned int dev, block, nsect; struct NFTLrecord *nftl; char *buffer; struct request *req; int res; while (1) { INIT_REQUEST; /* blk.h */ req = CURRENT; /* We can do this because the generic code knows not to touch the request at the head of the queue */ spin_unlock_irq(&io_request_lock); DEBUG(MTD_DEBUG_LEVEL2, "NFTL_request\n"); DEBUG(MTD_DEBUG_LEVEL3, "NFTL %s request, from sector 0x%04lx for 0x%04lx sectors\n", (req->cmd == READ) ? "Read " : "Write", req->sector, req->current_nr_sectors); dev = MINOR(req->rq_dev); block = req->sector; nsect = req->current_nr_sectors; buffer = req->buffer; res = 1; /* succeed */ if (dev >= MAX_NFTLS * (1<<NFTL_PARTN_BITS)) { /* there is no such partition */ printk("nftl: bad minor number: device = %s\n", kdevname(req->rq_dev)); res = 0; /* fail */ goto repeat; } nftl = NFTLs[dev / (1<<NFTL_PARTN_BITS)]; DEBUG(MTD_DEBUG_LEVEL3, "Waiting for mutex\n"); down(&nftl->mutex); DEBUG(MTD_DEBUG_LEVEL3, "Got mutex\n"); if (block + nsect > part_table[dev].nr_sects) { /* access past the end of device */ printk("nftl%c%d: bad access: block = %d, count = %d\n", (MINOR(req->rq_dev)>>6)+'a', dev & 0xf, block, nsect); up(&nftl->mutex); res = 0; /* fail */ goto repeat; } block += part_table[dev].start_sect; if (req->cmd == READ) { DEBUG(MTD_DEBUG_LEVEL2, "NFTL read request of 0x%x sectors @ %x " "(req->nr_sectors == %lx)\n", nsect, block, req->nr_sectors); for ( ; nsect > 0; nsect-- , block++, buffer += 512) { /* Read a single sector to req->buffer + (512 * i) */ if (NFTL_readblock(nftl, block, buffer)) { DEBUG(MTD_DEBUG_LEVEL2, "NFTL read request failed\n"); up(&nftl->mutex); res = 0; goto repeat; } } DEBUG(MTD_DEBUG_LEVEL2,"NFTL read request completed OK\n"); up(&nftl->mutex); goto repeat; } else if (req->cmd == WRITE) { DEBUG(MTD_DEBUG_LEVEL2, "NFTL write request of 0x%x sectors @ %x " "(req->nr_sectors == %lx)\n", nsect, block, req->nr_sectors); #ifdef CONFIG_NFTL_RW for ( ; nsect > 0; nsect-- , block++, buffer += 512) { /* Read a single sector to req->buffer + (512 * i) */ if (NFTL_writeblock(nftl, block, buffer)) { DEBUG(MTD_DEBUG_LEVEL1,"NFTL write request failed\n"); up(&nftl->mutex); res = 0; goto repeat; } } DEBUG(MTD_DEBUG_LEVEL2,"NFTL write request completed OK\n"); #else res = 0; /* Writes always fail */ #endif /* CONFIG_NFTL_RW */ up(&nftl->mutex); goto repeat; } else { DEBUG(MTD_DEBUG_LEVEL0, "NFTL unknown request\n"); up(&nftl->mutex); res = 0; goto repeat; } repeat: DEBUG(MTD_DEBUG_LEVEL3, "end_request(%d)\n", res); spin_lock_irq(&io_request_lock); end_request(res); }
Matrix4 Matrix4::adjoint() const { return Matrix4( MINOR(*this, 1, 2, 3, 1, 2, 3), -MINOR(*this, 0, 2, 3, 1, 2, 3), MINOR(*this, 0, 1, 3, 1, 2, 3), -MINOR(*this, 0, 1, 2, 1, 2, 3), -MINOR(*this, 1, 2, 3, 0, 2, 3), MINOR(*this, 0, 2, 3, 0, 2, 3), -MINOR(*this, 0, 1, 3, 0, 2, 3), MINOR(*this, 0, 1, 2, 0, 2, 3), MINOR(*this, 1, 2, 3, 0, 1, 3), -MINOR(*this, 0, 2, 3, 0, 1, 3), MINOR(*this, 0, 1, 3, 0, 1, 3), -MINOR(*this, 0, 1, 2, 0, 1, 3), -MINOR(*this, 1, 2, 3, 0, 1, 2), MINOR(*this, 0, 2, 3, 0, 1, 2), -MINOR(*this, 0, 1, 3, 0, 1, 2), MINOR(*this, 0, 1, 2, 0, 1, 2)); }
static int miyabi_sb_mount(char *dev_name, struct path *path, char *type, unsigned long flags, void *data) { static char realpath[PATH_MAX]; int r; struct block_device* bdev; unsigned char major, minor; r = _xx_realpath_from_path(path, realpath, PATH_MAX-1); if (r != 0) return r; if (strncmp(realpath, CONFIG_SECURITY_MIYABI_SYSTEM_DIR_PATH, strlen(CONFIG_SECURITY_MIYABI_SYSTEM_DIR_PATH)) == 0) { if (strcmp(realpath, CONFIG_SECURITY_MIYABI_SYSTEM_DIR_PATH) == 0) { if (strcmp(dev_name, CONFIG_SECURITY_MIYABI_SYSTEM_DEV_PATH) != 0) { printk(KERN_ERR "%s: REJECT dev_name=%s realpath=%s\n", __FUNCTION__, dev_name, realpath); return -EPERM; } else { if ((flags & MS_REMOUNT) && (!(flags & MS_RDONLY))) { printk(KERN_ERR "%s: REJECT dev_name=%s realpath=%s ro remount\n", __FUNCTION__, dev_name, realpath); return -EPERM; } if (flags & MS_BIND) { printk(KERN_ERR "%s: REJECT dev_name=%s realpath=%s loopback mount\n", __FUNCTION__, dev_name, realpath); return -EPERM; } bdev = lookup_bdev((const char*)dev_name); if( bdev == NULL ) { printk("cannot lookup\n"); return -EPERM; } major = MAJOR(bdev->bd_dev); minor = MINOR(bdev->bd_dev); bdput(bdev); if((major != CONFIG_SECURITY_MIYABI_SYSTEM_DEV_MAJOR) || (minor != CONFIG_SECURITY_MIYABI_SYSTEM_DEV_MINOR)) { printk(KERN_ERR "%s: REJECT dev_name=%s realpath=%s mismatch major or minor\n", __FUNCTION__, dev_name, realpath); return -EPERM; } } } else { printk(KERN_ERR "%s: REJECT realpath=%s\n", __FUNCTION__, realpath); return -EPERM; } } return 0; }
static int software_resume(void) { int error; unsigned int flags; /* * If the user said "noresume".. bail out early. */ if (noresume) return 0; /* * name_to_dev_t() below takes a sysfs buffer mutex when sysfs * is configured into the kernel. Since the regular hibernate * trigger path is via sysfs which takes a buffer mutex before * calling hibernate functions (which take pm_mutex) this can * cause lockdep to complain about a possible ABBA deadlock * which cannot happen since we're in the boot code here and * sysfs can't be invoked yet. Therefore, we use a subclass * here to avoid lockdep complaining. */ mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); if (swsusp_resume_device) goto Check_image; if (!strlen(resume_file)) { error = -ENOENT; goto Unlock; } pr_debug("PM: Checking hibernation image partition %s\n", resume_file); /* Check if the device is there */ swsusp_resume_device = name_to_dev_t(resume_file); if (!swsusp_resume_device) { /* * Some device discovery might still be in progress; we need * to wait for this to finish. */ wait_for_device_probe(); /* * We can't depend on SCSI devices being available after loading * one of their modules until scsi_complete_async_scans() is * called and the resume device usually is a SCSI one. */ scsi_complete_async_scans(); swsusp_resume_device = name_to_dev_t(resume_file); if (!swsusp_resume_device) { error = -ENODEV; goto Unlock; } } Check_image: pr_debug("PM: Hibernation image partition %d:%d present\n", MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); pr_debug("PM: Looking for hibernation image.\n"); error = swsusp_check(); if (error) goto Unlock; /* The snapshot device should not be opened while we're running */ if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { error = -EBUSY; swsusp_close(FMODE_READ); goto Unlock; } pm_prepare_console(); error = pm_notifier_call_chain(PM_RESTORE_PREPARE); if (error) goto close_finish; error = usermodehelper_disable(); if (error) goto close_finish; error = create_basic_memory_bitmaps(); if (error) goto close_finish; pr_debug("PM: Preparing processes for restore.\n"); error = prepare_processes(); if (error) { swsusp_close(FMODE_READ); goto Done; } pr_debug("PM: Loading hibernation image.\n"); error = swsusp_read(&flags); swsusp_close(FMODE_READ); if (!error) hibernation_restore(flags & SF_PLATFORM_MODE); printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n"); swsusp_free(); thaw_processes(); Done: free_basic_memory_bitmaps(); usermodehelper_enable(); Finish: pm_notifier_call_chain(PM_POST_RESTORE); pm_restore_console(); atomic_inc(&snapshot_device_available); /* For success case, the suspend path will release the lock */ Unlock: mutex_unlock(&pm_mutex); pr_debug("PM: Hibernation image not present or could not be loaded.\n"); return error; close_finish: swsusp_close(FMODE_READ); goto Finish; }
int main(int argc, char ** argv) { int i,c,id; char buf[1024]; char major_root, minor_root; struct stat sb; if ((argc != 4) && (argc != 5)) usage(); if (argc == 5) { if (strcmp(argv[4], "FLOPPY")) { if (stat(argv[4], &sb)) { perror(argv[4]); die("Couldn't stat root device."); } major_root = MAJOR(sb.st_rdev); minor_root = MINOR(sb.st_rdev); } else { major_root = 0; minor_root = 0; } } else { major_root = DEFAULT_MAJOR_ROOT; minor_root = DEFAULT_MINOR_ROOT; } fprintf(stderr, "Root device is (%d, %d)\n", major_root, minor_root); if ((major_root != 2) && (major_root != 3) && (major_root != 0)) { fprintf(stderr, "Illegal root device (major = %d)\n", major_root); die("Bad root device --- major #"); } for (i=0;i<sizeof buf; i++) buf[i]=0; if ((id=open(argv[1],O_RDONLY,0))<0) die("Unable to open 'boot'"); if (read(id,buf,MINIX_HEADER) != MINIX_HEADER) die("Unable to read header of 'boot'"); if (((long *) buf)[0]!=0x04100301) die("Non-Minix header of 'boot'"); if (((long *) buf)[1]!=MINIX_HEADER) die("Non-Minix header of 'boot'"); if (((long *) buf)[3]!=0) die("Illegal data segment in 'boot'"); if (((long *) buf)[4]!=0) die("Illegal bss in 'boot'"); if (((long *) buf)[5] != 0) die("Non-Minix header of 'boot'"); if (((long *) buf)[7] != 0) die("Illegal symbol table in 'boot'"); i=read(id,buf,sizeof buf); fprintf(stderr,"Boot sector %d bytes.\n",i); if (i != 512) die("Boot block must be exactly 512 bytes"); if ((*(unsigned short *)(buf+510)) != 0xAA55) die("Boot block hasn't got boot flag (0xAA55)"); buf[508] = (char) minor_root; buf[509] = (char) major_root; i=write(1,buf,512); if (i!=512) die("Write call failed"); close (id); if ((id=open(argv[2],O_RDONLY,0))<0) die("Unable to open 'setup'"); if (read(id,buf,MINIX_HEADER) != MINIX_HEADER) die("Unable to read header of 'setup'"); if (((long *) buf)[0]!=0x04100301) die("Non-Minix header of 'setup'"); if (((long *) buf)[1]!=MINIX_HEADER) die("Non-Minix header of 'setup'"); if (((long *) buf)[3]!=0) die("Illegal data segment in 'setup'"); if (((long *) buf)[4]!=0) die("Illegal bss in 'setup'"); if (((long *) buf)[5] != 0) die("Non-Minix header of 'setup'"); if (((long *) buf)[7] != 0) die("Illegal symbol table in 'setup'"); for (i=0 ; (c=read(id,buf,sizeof buf))>0 ; i+=c ) if (write(1,buf,c)!=c) die("Write call failed"); close (id); if (i > SETUP_SECTS*512) die("Setup exceeds " STRINGIFY(SETUP_SECTS) " sectors - rewrite build/boot/setup"); fprintf(stderr,"Setup is %d bytes.\n",i); for (c=0 ; c<sizeof(buf) ; c++) buf[c] = '\0'; while (i<SETUP_SECTS*512) { c = SETUP_SECTS*512-i; if (c > sizeof(buf)) c = sizeof(buf); if (write(1,buf,c) != c) die("Write call failed"); i += c; } if ((id=open(argv[3],O_RDONLY,0))<0) die("Unable to open 'system'"); // if (read(id,buf,GCC_HEADER) != GCC_HEADER) // die("Unable to read header of 'system'"); // if (((long *) buf)[5] != 0) // die("Non-GCC header of 'system'"); for (i=0 ; (c=read(id,buf,sizeof buf))>0 ; i+=c ) if (write(1,buf,c)!=c) die("Write call failed"); close(id); fprintf(stderr,"System is %d bytes.\n",i); if (i > SYS_SIZE*16) die("System is too big"); return(0); }
static int xordev_probe(struct pci_dev *dev, const struct pci_device_id *id) /* PCI device initializer */ { uint32_t i, j; int err = 0; struct xordev_str *xordev; dev_t devt; mutex_lock(&global_lock); /* find empty slot and initialize */ for (i = 0; xordev_data[i].devno != 0 && i < MAX_DEVICES; i++); if (i == MAX_DEVICES){ printk (KERN_NOTICE "Too many xordev devices!"); return -1; } xordev = &xordev_data[i]; memset(xordev, 0, sizeof(struct xordev_str)); /* clear everything */ xordev->devno = MKDEV(major, i * 3); mutex_init(&xordev->lock); xordev->spinlock = SPIN_LOCK_UNLOCKED; for (j = 0; j < 3; j++) { devt = MKDEV(major, i * 3 + j); /* creating char device */ cdev_init(&xordev->cdev[j], &xordev_fops); xordev->cdev[j].ops = &xordev_fops; err = cdev_add(&xordev->cdev[j], devt, 1); if (err) { printk(KERN_NOTICE "Error %d adding XorDev %d", err, i); goto fail; } /* udev device create */ xordev->dev[j] = device_create(xordev_class, 0, devt, xordev, dev_names[j], i); if (IS_ERR(xordev->dev[j])) { err = PTR_ERR(xordev->dev[j]); printk(KERN_NOTICE "Error %d creating udev device", err); goto udev_create_fail; } } pci_set_drvdata(dev, xordev); /* enable PCI device */ err = pci_enable_device(dev); if (err) { printk(KERN_NOTICE "Error %d enabling XorDev PCI device %d", err, i); goto pci_enable_fail; } err = pci_request_regions(dev, DRVNAME); if (err) { printk(KERN_NOTICE "Error %d requesting XorDev %d regions", err, i); goto pci_regions_fail; } xordev->bar = pci_iomap(dev, 0, 4096); if (NULL == xordev->bar) { printk(KERN_NOTICE "Can't map XorDev %d iomem", i); goto pci_iomap_fail; } /* enable DMA */ pci_set_master(dev); err = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); if (err) { printk(KERN_NOTICE "Can't set DMA mask for XorDev %d", i); goto pci_iomap_fail; } err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)); if (err) { printk(KERN_NOTICE "Can't set consistent DMA mask for XorDev %d", i); goto pci_iomap_fail; } /* alloc buffers */ xordev->cpu_addr[0] = dma_alloc_coherent(&dev->dev, BUFFER_SIZE * 3, &xordev->dma_addr[0], 0); if (xordev->cpu_addr) { xordev->cpu_addr[1] = xordev->cpu_addr[0] + BUFFER_SIZE; xordev->dma_addr[1] = xordev->dma_addr[0] + BUFFER_SIZE; xordev->cpu_addr[2] = xordev->cpu_addr[1] + BUFFER_SIZE; xordev->dma_addr[2] = xordev->dma_addr[1] + BUFFER_SIZE; memset(xordev->offset, 0, 12); iowrite32(xordev->dma_addr[0], xordev->bar + BAR_SRC1); iowrite32(xordev->dma_addr[1], xordev->bar + BAR_SRC2); iowrite32(xordev->dma_addr[2], xordev->bar + BAR_DST); } else { printk(KERN_NOTICE "Can't allocate DMA memory for XorDev %d", i); goto pci_iomap_fail; } /* waiting queue */ init_waitqueue_head(&xordev->queue); /* interruptions */ err = request_irq(dev->irq, xordev_irq, IRQF_SHARED, DRVNAME, xordev); if (err) { printk(KERN_NOTICE "Can't register irq handler for XorDev %d", i); goto request_irq_fail; } mutex_unlock(&global_lock); return 0; request_irq_fail: pci_iounmap(dev, xordev->bar); pci_iomap_fail: pci_release_regions(dev); pci_regions_fail: pci_disable_device(dev); pci_enable_fail: for (i = 0; i < j; i++) device_destroy(xordev_class, MKDEV(MAJOR(xordev->devno), MINOR(xordev->devno) + i)); udev_create_fail: for (i = 0; i < j; i++) cdev_del(&xordev->cdev[j]); fail: mutex_destroy(&xordev->mutex); xordev->devno = 0; mutex_unlock(&global_lock); return err; }
static int frandom_init_module(void) { int result; /* The buffer size MUST be at least 256 bytes, because we assume that minimal length in init_rand_state(). */ if (frandom_bufsize < 256) { pr_err("frandom: Invalid frandom_bufsize: %d\n", frandom_bufsize); return -EINVAL; } if ((frandom_chunklimit != 0) && (frandom_chunklimit < 256)) { pr_err("frandom: Invalid frandom_chunklimit: %d\n", frandom_chunklimit); return -EINVAL; } erandom_state = kmalloc(sizeof(struct frandom_state), GFP_KERNEL); if (!erandom_state) return -ENOMEM; /* This specific buffer is only used for seeding, so we need 256 bytes exactly */ erandom_state->buf = kmalloc(256, GFP_KERNEL); if (!erandom_state->buf) { kfree(erandom_state); return -ENOMEM; } sema_init(&erandom_state->sem, 1); /* Init semaphore as a mutex */ erandom_seeded = 0; frandom_class = class_create(THIS_MODULE, "fastrng"); if (IS_ERR(frandom_class)) { result = PTR_ERR(frandom_class); pr_warn("frandom: Failed to register class fastrng\n"); goto error0; } /* * Register your major, and accept a dynamic number. This is the * first thing to do, in order to avoid releasing other module's * fops in frandom_cleanup_module() */ result = alloc_chrdev_region(&frandom_devt, 0, NR_FRANDOM_DEVS, "frandom"); if (result < 0) { pr_warn("frandom: failed to alloc frandom region\n"); goto error1; } frandom_minor = MINOR(frandom_devt); erandom_minor = frandom_minor + 1; erandom_devt = MKDEV(MAJOR(frandom_devt), erandom_minor); cdev_init(&frandom_cdev, &frandom_fops); frandom_cdev.owner = THIS_MODULE; result = cdev_add(&frandom_cdev, frandom_devt, 1); if (result) { pr_warn("frandom: Failed to add cdev for /dev/frandom\n"); goto error2; } frandom_device = device_create(frandom_class, NULL, frandom_devt, NULL, "frandom"); if (IS_ERR(frandom_device)) { pr_warn("frandom: Failed to create frandom device\n"); goto error3; } cdev_init(&erandom_cdev, &frandom_fops); erandom_cdev.owner = THIS_MODULE; result = cdev_add(&erandom_cdev, erandom_devt, 1); if (result) { pr_warn("frandom: Failed to add cdev for /dev/erandom\n"); goto error4; } erandom_device = device_create(frandom_class, NULL, erandom_devt, NULL, "erandom"); if (IS_ERR(erandom_device)) { pr_warn("frandom: Failed to create erandom device\n"); goto error5; } return 0; /* succeed */ error5: cdev_del(&erandom_cdev); error4: device_destroy(frandom_class, frandom_devt); error3: cdev_del(&frandom_cdev); error2: unregister_chrdev_region(frandom_devt, NR_FRANDOM_DEVS); error1: class_destroy(frandom_class); error0: kfree(erandom_state->buf); kfree(erandom_state); return result; }
static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt) { struct mount *r = real_mount(mnt); int err = 0; struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; struct super_block *sb = mnt_path.dentry->d_sb; if (sb->s_op->show_devname) { err = sb->s_op->show_devname(m, mnt_path.dentry); if (err) goto out; } else { mangle(m, r->mnt_devname ? r->mnt_devname : "none"); } seq_putc(m, ' '); seq_path(m, &mnt_path, " \t\n\\"); seq_putc(m, ' '); show_type(m, sb); seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw"); err = show_sb_opts(m, sb); if (err) goto out; show_mnt_opts(m, mnt); if (sb->s_op->show_options) err = sb->s_op->show_options(m, mnt_path.dentry); seq_puts(m, " 0 0\n"); out: return err; } static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt) { struct proc_mounts *p = proc_mounts(m); struct mount *r = real_mount(mnt); struct super_block *sb = mnt->mnt_sb; struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; struct path root = p->root; int err = 0; seq_printf(m, "%i %i %u:%u ", r->mnt_id, r->mnt_parent->mnt_id, MAJOR(sb->s_dev), MINOR(sb->s_dev)); if (sb->s_op->show_path) err = sb->s_op->show_path(m, mnt->mnt_root); else seq_dentry(m, mnt->mnt_root, " \t\n\\"); if (err) goto out; seq_putc(m, ' '); /* mountpoints outside of chroot jail will give SEQ_SKIP on this */ err = seq_path_root(m, &mnt_path, &root, " \t\n\\"); if (err) goto out; seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw"); show_mnt_opts(m, mnt); /* Tagged fields ("foo:X" or "bar") */ if (IS_MNT_SHARED(r)) seq_printf(m, " shared:%i", r->mnt_group_id); if (IS_MNT_SLAVE(r)) { int master = r->mnt_master->mnt_group_id; int dom = get_dominating_id(r, &p->root); seq_printf(m, " master:%i", master); if (dom && dom != master) seq_printf(m, " propagate_from:%i", dom); } if (IS_MNT_UNBINDABLE(r)) seq_puts(m, " unbindable"); /* Filesystem specific data */ seq_puts(m, " - "); show_type(m, sb); seq_putc(m, ' '); if (sb->s_op->show_devname) err = sb->s_op->show_devname(m, mnt->mnt_root); else mangle(m, r->mnt_devname ? r->mnt_devname : "none"); if (err) goto out; seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw"); err = show_sb_opts(m, sb); if (err) goto out; if (sb->s_op->show_options) err = sb->s_op->show_options(m, mnt->mnt_root); seq_putc(m, '\n'); out: return err; } static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt) { struct mount *r = real_mount(mnt); struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; struct super_block *sb = mnt_path.dentry->d_sb; int err = 0; /* device */ if (sb->s_op->show_devname) { seq_puts(m, "device "); err = sb->s_op->show_devname(m, mnt_path.dentry); } else { if (r->mnt_devname) { seq_puts(m, "device "); mangle(m, r->mnt_devname); } else seq_puts(m, "no device"); } /* mount point */ seq_puts(m, " mounted on "); seq_path(m, &mnt_path, " \t\n\\"); seq_putc(m, ' '); /* file system type */ seq_puts(m, "with fstype "); show_type(m, sb); /* optional statistics */ if (sb->s_op->show_stats) { seq_putc(m, ' '); if (!err) err = sb->s_op->show_stats(m, mnt_path.dentry); } seq_putc(m, '\n'); return err; } static int mounts_open_common(struct inode *inode, struct file *file, int (*show)(struct seq_file *, struct vfsmount *)) { struct task_struct *task = get_proc_task(inode); struct nsproxy *nsp; struct mnt_namespace *ns = NULL; struct path root; struct proc_mounts *p; int ret = -EINVAL; if (!task) goto err; task_lock(task); nsp = task->nsproxy; if (!nsp || !nsp->mnt_ns) { task_unlock(task); put_task_struct(task); goto err; } ns = nsp->mnt_ns; get_mnt_ns(ns); if (!task->fs) { task_unlock(task); put_task_struct(task); ret = -ENOENT; goto err_put_ns; } get_fs_root(task->fs, &root); task_unlock(task); put_task_struct(task); ret = -ENOMEM; p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL); if (!p) goto err_put_path; file->private_data = &p->m; ret = seq_open(file, &mounts_op); if (ret) goto err_free; p->ns = ns; p->root = root; p->m.poll_event = ns->event; p->show = show; p->cached_event = ~0ULL; return 0; err_free: kfree(p); err_put_path: path_put(&root); err_put_ns: put_mnt_ns(ns); err: return ret; } static int mounts_release(struct inode *inode, struct file *file) { struct proc_mounts *p = proc_mounts(file->private_data); path_put(&p->root); put_mnt_ns(p->ns); return seq_release(inode, file); } static int mounts_open(struct inode *inode, struct file *file) { return mounts_open_common(inode, file, show_vfsmnt); } static int mountinfo_open(struct inode *inode, struct file *file) { return mounts_open_common(inode, file, show_mountinfo); } static int mountstats_open(struct inode *inode, struct file *file) { return mounts_open_common(inode, file, show_vfsstat); } const struct file_operations proc_mounts_operations = { .open = mounts_open, .read = seq_read, .llseek = seq_lseek, .release = mounts_release, .poll = mounts_poll, }; const struct file_operations proc_mountinfo_operations = { .open = mountinfo_open, .read = seq_read, .llseek = seq_lseek, .release = mounts_release, .poll = mounts_poll, }; const struct file_operations proc_mountstats_operations = { .open = mountstats_open, .read = seq_read, .llseek = seq_lseek, .release = mounts_release, };
static int __init bt_hwctl_init(void) { int ret = -1, err = -1; BT_HWCTL_DEBUG("bt_hwctl_init\n"); platform_driver_register(&mt6622_driver); if (!(bh = kzalloc(sizeof(struct bt_hwctl), GFP_KERNEL))) { BT_HWCTL_ALERT("bt_hwctl_init allocate dev struct failed\n"); err = -ENOMEM; goto ERR_EXIT; } ret = alloc_chrdev_region(&bh->dev_t, 0, 1, BTHWCTL_NAME); if (ret) { BT_HWCTL_ALERT("alloc chrdev region failed\n"); goto ERR_EXIT; } BT_HWCTL_DEBUG("alloc %s:%d:%d\n", BTHWCTL_NAME, MAJOR(bh->dev_t), MINOR(bh->dev_t)); cdev_init(&bh->cdev, &bt_hwctl_fops); bh->cdev.owner = THIS_MODULE; bh->cdev.ops = &bt_hwctl_fops; err = cdev_add(&bh->cdev, bh->dev_t, 1); if (err) { BT_HWCTL_ALERT("add chrdev failed\n"); goto ERR_EXIT; } bh->cls = class_create(THIS_MODULE, BTHWCTL_NAME); if (IS_ERR(bh->cls)) { err = PTR_ERR(bh->cls); BT_HWCTL_ALERT("class_create failed, errno:%d\n", err); goto ERR_EXIT; } bh->dev = device_create(bh->cls, NULL, bh->dev_t, NULL, BTHWCTL_NAME); mutex_init(&bh->sem); init_waitqueue_head(&eint_wait); INIT_WORK(&mtk_wcn_bt_event_work, mtk_wcn_bt_work_fun); mtk_wcn_bt_workqueue = create_singlethread_workqueue("mtk_wcn_bt"); if (!mtk_wcn_bt_workqueue) { printk("create_singlethread_workqueue failed.\n"); err = -ESRCH; goto ERR_EXIT; } /* request gpio used by BT */ //mt_bt_gpio_init(); BT_HWCTL_DEBUG("bt_hwctl_init ok\n"); return 0; ERR_EXIT: if (err == 0) cdev_del(&bh->cdev); if (ret == 0) unregister_chrdev_region(bh->dev_t, 1); if (bh){ kfree(bh); bh = NULL; } return -1; }
/* * Print device name (in decimal, hexadecimal or symbolic) - * at present hexadecimal only. * Note: returns pointer to static data! */ char * kdevname(kdev_t dev) { static char buffer[32]; sprintf(buffer, "%02x:%02x", MAJOR(dev), MINOR(dev)); return buffer; }
static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf,"%d:%d\n", MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); }
static int fdev_init(void) { int result = 0; char *name = "firstdev"; pr_alert("DEVICE:%s\n", name); pr_alert("The process is \"%s\" (pid %i)\n", current->comm, current->pid); pr_alert("UTS_RELEASE:%s", UTS_RELEASE); pr_alert("KERNEL_VERSION:%d", KERNEL_VERSION(2, 6, 10)); unsigned int firstminor = 0; int err; err = alloc_chrdev_region(&dev, firstminor, count, name); if (!err) { pr_alert("alloc_chrdev_region successful."); pr_alert("dev_t:%d,Major=%d,Minor=%d", dev, MAJOR(dev), MINOR(dev)); } else { pr_alert("alloc_chrdev_region failed."); } fdev_p = kmalloc_array(count, sizeof(struct fdev), GFP_KERNEL); if (!fdev_p) { result = -ENOMEM; pr_alert("kmalloc fdev_p failed."); goto fail; } else { pr_alert("kmalloc fdev_p successful."); } memset(fdev_p, 0, count * sizeof(struct fdev)); int i, major, devno; major = MAJOR(dev); for (i = 0; i < count; ++i) { struct fdev *devp = &fdev_p[i]; sema_init(&devp->sem, 1); devno = MKDEV(major, i); devp->major = major; devp->minor = i; devp->quantum_count = QUANTUM_DEFAULT; devp->qset_count = QSET_DEFAULT; cdev_init(&devp->cdev, &fops); devp->cdev.owner = THIS_MODULE; devp->cdev.ops = &fops; err = cdev_add(&devp->cdev, devno, 1); if (err) pr_alert("Error %d adding firstdev %d", err, i); else pr_alert("Successful adding firstdev %d", i); } return 0; fail: fdev_exit(); return result; }
static int __init diagchar_init(void) { dev_t dev; int error; printk(KERN_INFO "diagfwd initializing ..\n"); driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL); if (driver) { driver->used = 0; timer_in_progress = 0; driver->debug_flag = 1; driver->alert_count = 0; setup_timer(&drain_timer, drain_timer_func, 1234); driver->itemsize = itemsize; driver->poolsize = poolsize; driver->itemsize_hdlc = itemsize_hdlc; driver->poolsize_hdlc = poolsize_hdlc; driver->itemsize_usb_struct = itemsize_usb_struct; driver->poolsize_usb_struct = poolsize_usb_struct; driver->num_clients = max_clients; driver->logging_mode = USB_MODE; mutex_init(&driver->diagchar_mutex); init_waitqueue_head(&driver->wait_q); INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn); INIT_WORK(&(driver->diag_read_smd_work), diag_read_smd_work_fn); INIT_WORK(&(driver->diag_read_smd_qdsp_work), diag_read_smd_qdsp_work_fn); diagfwd_init(); printk(KERN_INFO "diagchar initializing ..\n"); driver->num = 1; driver->name = ((void *)driver) + sizeof(struct diagchar_dev); strlcpy(driver->name, "diag", 4); /* Get major number from kernel and initialize */ error = alloc_chrdev_region(&dev, driver->minor_start, driver->num, driver->name); if (!error) { driver->major = MAJOR(dev); driver->minor_start = MINOR(dev); } else { printk(KERN_INFO "Major number not allocated\n"); goto fail; } driver->cdev = cdev_alloc(); error = diagchar_setup_cdev(dev); if (error) goto fail; } else { printk(KERN_INFO "kzalloc failed\n"); goto fail; } printk(KERN_INFO "diagchar initialized\n"); return 0; fail: diagchar_cleanup(); diagfwd_exit(); return -1; }
static int tun_chr_open(struct inode *inode, struct file * file) { unsigned int minor = MINOR(inode->i_rdev); unsigned int index = minor & TUN_MINOR_MASK; struct tun_struct *tun = NULL; if( minor > TUN_MAX_DEV ){ DBG1(KERN_ERR "tun: Device minor is too large %d\n", minor); return -ENODEV; } DBG1(KERN_INFO "tun%d: tun_chr_open\n", index); /* Only one process is allowed to open */ if( test_and_set_bit(minor, tun_open_mask) ) return -EBUSY; DBG1(KERN_INFO "tun%d: Allocating device\n", index); if( !(tun=kmalloc(sizeof(struct tun_struct), GFP_KERNEL)) ) { clear_bit(minor, tun_open_mask); return -ENOMEM; } file->private_data = tun; memset(tun, 0, sizeof(struct tun_struct)); /* Set device type */ if( minor < TUN_TAP_MINOR ){ /* TUN device */ tun->flags |= TUN_TUN_DEV; sprintf(tun->name, "tun%d", index); } else { /* TAP device */ tun->flags |= TUN_TAP_DEV; sprintf(tun->name, "tap%d", index); } /* Initialize and register net device */ skb_queue_head_init(&tun->txq); tun->dev.name = tun->name; tun->dev.init = tun_net_init; tun->dev.base_addr = index; tun->dev.priv = tun; if( register_netdev(&tun->dev) ){ printk(KERN_ERR "%s: Can't register net device\n", tun->name); file->private_data = NULL; clear_bit(minor, tun_open_mask); kfree(tun); return -ENODEV; } MOD_INC_USE_COUNT; return 0; }
int device_is_usable(struct device *dev) { struct dm_task *dmt; struct dm_info info; const char *name, *uuid; uint64_t start, length; char *target_type = NULL; char *params, *vgname = NULL, *lvname, *layer; void *next = NULL; int r = 0; if (!(dmt = dm_task_create(DM_DEVICE_STATUS))) { log_error("Failed to allocate dm_task struct to check dev status"); return 0; } if (!dm_task_set_major_minor(dmt, MAJOR(dev->dev), MINOR(dev->dev), 1)) goto_out; if (!dm_task_run(dmt)) { log_error("Failed to get state of mapped device"); goto out; } if (!dm_task_get_info(dmt, &info)) goto_out; if (!info.exists || info.suspended) goto out; name = dm_task_get_name(dmt); uuid = dm_task_get_uuid(dmt); /* FIXME Also check for mirror block_on_error and mpath no paths */ /* For now, we exclude all mirrors */ do { next = dm_get_next_target(dmt, next, &start, &length, &target_type, ¶ms); /* Skip if target type doesn't match */ if (target_type && !strcmp(target_type, "mirror")) { log_debug("%s: Mirror device not usable.", dev_name(dev)); goto out; } } while (next); /* FIXME Also check dependencies? */ /* Check internal lvm devices */ if (uuid && !strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1)) { if (!(vgname = dm_strdup(name)) || !dm_split_lvm_name(NULL, NULL, &vgname, &lvname, &layer)) goto_out; if (lvname && (is_reserved_lvname(lvname) || *layer)) { log_debug("%s: Reserved internal LV device %s/%s%s%s not usable.", dev_name(dev), vgname, lvname, *layer ? "-" : "", layer); goto out; } } r = 1; out: dm_free(vgname); dm_task_destroy(dmt); return r; }
static int nfs3_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) { struct nfs3_linkargs arg = { .fromfh = NFS_FH(inode), .tofh = NFS_FH(dir), .toname = name->name, .tolen = name->len }; struct nfs3_linkres res; struct rpc_message msg = { .rpc_proc = &nfs3_procedures[NFS3PROC_LINK], .rpc_argp = &arg, .rpc_resp = &res, }; int status = -ENOMEM; dprintk("NFS call link %s\n", name->name); res.fattr = nfs_alloc_fattr(); res.dir_attr = nfs_alloc_fattr(); if (res.fattr == NULL || res.dir_attr == NULL) goto out; status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs_post_op_update_inode(dir, res.dir_attr); nfs_post_op_update_inode(inode, res.fattr); out: nfs_free_fattr(res.dir_attr); nfs_free_fattr(res.fattr); dprintk("NFS reply link: %d\n", status); return status; } static int nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page, unsigned int len, struct iattr *sattr) { struct nfs3_createdata *data; int status = -ENOMEM; if (len > NFS3_MAXPATHLEN) return -ENAMETOOLONG; dprintk("NFS call symlink %pd\n", dentry); data = nfs3_alloc_createdata(); if (data == NULL) goto out; data->msg.rpc_proc = &nfs3_procedures[NFS3PROC_SYMLINK]; data->arg.symlink.fromfh = NFS_FH(dir); data->arg.symlink.fromname = dentry->d_name.name; data->arg.symlink.fromlen = dentry->d_name.len; data->arg.symlink.pages = &page; data->arg.symlink.pathlen = len; data->arg.symlink.sattr = sattr; status = nfs3_do_create(dir, dentry, data); nfs3_free_createdata(data); out: dprintk("NFS reply symlink: %d\n", status); return status; } static int nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr) { struct nfs3_createdata *data; umode_t mode = sattr->ia_mode; int status = -ENOMEM; dprintk("NFS call mkdir %pd\n", dentry); sattr->ia_mode &= ~current_umask(); data = nfs3_alloc_createdata(); if (data == NULL) goto out; data->msg.rpc_proc = &nfs3_procedures[NFS3PROC_MKDIR]; data->arg.mkdir.fh = NFS_FH(dir); data->arg.mkdir.name = dentry->d_name.name; data->arg.mkdir.len = dentry->d_name.len; data->arg.mkdir.sattr = sattr; status = nfs3_do_create(dir, dentry, data); if (status != 0) goto out; status = nfs3_proc_set_default_acl(dir, dentry->d_inode, mode); out: nfs3_free_createdata(data); dprintk("NFS reply mkdir: %d\n", status); return status; } static int nfs3_proc_rmdir(struct inode *dir, struct qstr *name) { struct nfs_fattr *dir_attr; struct nfs3_diropargs arg = { .fh = NFS_FH(dir), .name = name->name, .len = name->len }; struct rpc_message msg = { .rpc_proc = &nfs3_procedures[NFS3PROC_RMDIR], .rpc_argp = &arg, }; int status = -ENOMEM; dprintk("NFS call rmdir %s\n", name->name); dir_attr = nfs_alloc_fattr(); if (dir_attr == NULL) goto out; msg.rpc_resp = dir_attr; status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_post_op_update_inode(dir, dir_attr); nfs_free_fattr(dir_attr); out: dprintk("NFS reply rmdir: %d\n", status); return status; } /* * The READDIR implementation is somewhat hackish - we pass the user buffer * to the encode function, which installs it in the receive iovec. * The decode function itself doesn't perform any decoding, it just makes * sure the reply is syntactically correct. * * Also note that this implementation handles both plain readdir and * readdirplus. */ static int nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page **pages, unsigned int count, int plus) { struct inode *dir = dentry->d_inode; __be32 *verf = NFS_I(dir)->cookieverf; struct nfs3_readdirargs arg = { .fh = NFS_FH(dir), .cookie = cookie, .verf = {verf[0], verf[1]}, .plus = plus, .count = count, .pages = pages }; struct nfs3_readdirres res = { .verf = verf, .plus = plus }; struct rpc_message msg = { .rpc_proc = &nfs3_procedures[NFS3PROC_READDIR], .rpc_argp = &arg, .rpc_resp = &res, .rpc_cred = cred }; int status = -ENOMEM; if (plus) msg.rpc_proc = &nfs3_procedures[NFS3PROC_READDIRPLUS]; dprintk("NFS call readdir%s %d\n", plus? "plus" : "", (unsigned int) cookie); res.dir_attr = nfs_alloc_fattr(); if (res.dir_attr == NULL) goto out; status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_invalidate_atime(dir); nfs_refresh_inode(dir, res.dir_attr); nfs_free_fattr(res.dir_attr); out: dprintk("NFS reply readdir%s: %d\n", plus? "plus" : "", status); return status; } static int nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr, dev_t rdev) { struct nfs3_createdata *data; umode_t mode = sattr->ia_mode; int status = -ENOMEM; dprintk("NFS call mknod %pd %u:%u\n", dentry, MAJOR(rdev), MINOR(rdev)); sattr->ia_mode &= ~current_umask(); data = nfs3_alloc_createdata(); if (data == NULL) goto out; data->msg.rpc_proc = &nfs3_procedures[NFS3PROC_MKNOD]; data->arg.mknod.fh = NFS_FH(dir); data->arg.mknod.name = dentry->d_name.name; data->arg.mknod.len = dentry->d_name.len; data->arg.mknod.sattr = sattr; data->arg.mknod.rdev = rdev; switch (sattr->ia_mode & S_IFMT) { case S_IFBLK: data->arg.mknod.type = NF3BLK; break; case S_IFCHR: data->arg.mknod.type = NF3CHR; break; case S_IFIFO: data->arg.mknod.type = NF3FIFO; break; case S_IFSOCK: data->arg.mknod.type = NF3SOCK; break; default: status = -EINVAL; goto out; } status = nfs3_do_create(dir, dentry, data); if (status != 0) goto out; status = nfs3_proc_set_default_acl(dir, dentry->d_inode, mode); out: nfs3_free_createdata(data); dprintk("NFS reply mknod: %d\n", status); return status; } static int nfs3_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *stat) { struct rpc_message msg = { .rpc_proc = &nfs3_procedures[NFS3PROC_FSSTAT], .rpc_argp = fhandle, .rpc_resp = stat, }; int status; dprintk("NFS call fsstat\n"); nfs_fattr_init(stat->fattr); status = rpc_call_sync(server->client, &msg, 0); dprintk("NFS reply fsstat: %d\n", status); return status; } static int do_proc_fsinfo(struct rpc_clnt *client, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { struct rpc_message msg = { .rpc_proc = &nfs3_procedures[NFS3PROC_FSINFO], .rpc_argp = fhandle, .rpc_resp = info, }; int status; dprintk("NFS call fsinfo\n"); nfs_fattr_init(info->fattr); status = rpc_call_sync(client, &msg, 0); dprintk("NFS reply fsinfo: %d\n", status); return status; } /* * Bare-bones access to fsinfo: this is for nfs_get_root/nfs_get_sb via * nfs_create_server */ static int nfs3_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { int status; status = do_proc_fsinfo(server->client, fhandle, info); if (status && server->nfs_client->cl_rpcclient != server->client) status = do_proc_fsinfo(server->nfs_client->cl_rpcclient, fhandle, info); return status; }
static int __init pppoesta_init(void){ int err = -1; dev_t dev = 0; struct device* temp = NULL; printk(KERN_ALERT"Initializing pppoesta device.\n"); err = alloc_chrdev_region(&dev, 0, 1, PPPOESTA_DEVICE_NODE_NAME); if(err < 0) { printk(KERN_ALERT"Failed to alloc char dev region.\n"); goto fail; } pppoesta_major = MAJOR(dev); pppoesta_minor = MINOR(dev); pppoesta_dev = kmalloc(sizeof(struct pppoesta_android_dev), GFP_KERNEL); if(!pppoesta_dev) { err = -ENOMEM; printk(KERN_ALERT"Failed to alloc pppoesta_dev.\n"); goto unregister; } err = __pppoesta_setup_dev(pppoesta_dev); if(err) { printk(KERN_ALERT"Failed to setup dev: %d.\n", err); goto cleanup; } pppoesta_class = class_create(THIS_MODULE, PPPOESTA_DEVICE_CLASS_NAME); if(IS_ERR(pppoesta_class)) { err = PTR_ERR(pppoesta_class); printk(KERN_ALERT"Failed to create pppoesta class.\n"); goto destroy_cdev; } temp = device_create(pppoesta_class, NULL, dev, "%s", PPPOESTA_DEVICE_FILE_NAME); if(IS_ERR(temp)) { err = PTR_ERR(temp); printk(KERN_ALERT"Failed to create pppoesta device."); goto destroy_class; } err = device_create_file(temp, &dev_attr_val); if(err < 0) { printk(KERN_ALERT"Failed to create attribute val."); goto destroy_device; } dev_set_drvdata(temp, pppoesta_dev); printk(KERN_ALERT"Succedded to initialize pppoesta device.\n"); return 0; destroy_device: device_destroy(pppoesta_class, dev); destroy_class: class_destroy(pppoesta_class); destroy_cdev: cdev_del(&(pppoesta_dev->dev)); cleanup: kfree(pppoesta_dev); unregister: unregister_chrdev_region(MKDEV(pppoesta_major, pppoesta_minor), 1); fail: return err; }
static int module_close(struct inode * inode, struct file * file){ unsigned int dev_minor = MINOR(inode->i_rdev); sd1303_is_open = 0; printk("%s : Minor %d has been closed\n", DRIVER_NAME, dev_minor); return 0; }
int i2cdev_ioctl (struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { struct i2c_client *client = (struct i2c_client *)file->private_data; struct i2c_rdwr_ioctl_data rdwr_arg; struct i2c_smbus_ioctl_data data_arg; union i2c_smbus_data temp; struct i2c_msg *rdwr_pa; int i,datasize,res; unsigned long funcs; #ifdef DEBUG printk("i2c-dev.o: i2c-%d ioctl, cmd: 0x%x, arg: %lx.\n", MINOR(inode->i_rdev),cmd, arg); #endif /* DEBUG */ switch ( cmd ) { case I2C_SLAVE: case I2C_SLAVE_FORCE: if ((arg > 0x3ff) || (((client->flags & I2C_M_TEN) == 0) && arg > 0x7f)) return -EINVAL; if ((cmd == I2C_SLAVE) && i2c_check_addr(client->adapter,arg)) return -EBUSY; client->addr = arg; return 0; case I2C_TENBIT: if (arg) client->flags |= I2C_M_TEN; else client->flags &= ~I2C_M_TEN; return 0; case I2C_FUNCS: funcs = i2c_get_functionality(client->adapter); return (copy_to_user((unsigned long *)arg,&funcs, sizeof(unsigned long)))?-EFAULT:0; case I2C_RDWR: if (copy_from_user(&rdwr_arg, (struct i2c_rdwr_ioctl_data *)arg, sizeof(rdwr_arg))) return -EFAULT; rdwr_pa = (struct i2c_msg *) kmalloc(rdwr_arg.nmsgs * sizeof(struct i2c_msg), GFP_KERNEL); if (rdwr_pa == NULL) return -ENOMEM; res = 0; for( i=0; i<rdwr_arg.nmsgs; i++ ) { if(copy_from_user(&(rdwr_pa[i]), &(rdwr_arg.msgs[i]), sizeof(rdwr_pa[i]))) { res = -EFAULT; break; } rdwr_pa[i].buf = kmalloc(rdwr_pa[i].len, GFP_KERNEL); if(rdwr_pa[i].buf == NULL) { res = -ENOMEM; break; } if(copy_from_user(rdwr_pa[i].buf, rdwr_arg.msgs[i].buf, rdwr_pa[i].len)) { kfree(rdwr_pa[i].buf); res = -EFAULT; break; } } if (!res) { res = i2c_transfer(client->adapter, rdwr_pa, rdwr_arg.nmsgs); } while(i-- > 0) { if( res>=0 && (rdwr_pa[i].flags & I2C_M_RD)) { if(copy_to_user( rdwr_arg.msgs[i].buf, rdwr_pa[i].buf, rdwr_pa[i].len)) { res = -EFAULT; } } kfree(rdwr_pa[i].buf); } kfree(rdwr_pa); return res; case I2C_SMBUS: if (copy_from_user(&data_arg, (struct i2c_smbus_ioctl_data *) arg, sizeof(struct i2c_smbus_ioctl_data))) return -EFAULT; if ((data_arg.size != I2C_SMBUS_BYTE) && (data_arg.size != I2C_SMBUS_QUICK) && (data_arg.size != I2C_SMBUS_BYTE_DATA) && (data_arg.size != I2C_SMBUS_WORD_DATA) && (data_arg.size != I2C_SMBUS_PROC_CALL) && (data_arg.size != I2C_SMBUS_BLOCK_DATA) && (data_arg.size != I2C_SMBUS_I2C_BLOCK_DATA)) { #ifdef DEBUG printk("i2c-dev.o: size out of range (%x) in ioctl I2C_SMBUS.\n", data_arg.size); #endif return -EINVAL; } /* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1, so the check is valid if size==I2C_SMBUS_QUICK too. */ if ((data_arg.read_write != I2C_SMBUS_READ) && (data_arg.read_write != I2C_SMBUS_WRITE)) { #ifdef DEBUG printk("i2c-dev.o: read_write out of range (%x) in ioctl I2C_SMBUS.\n", data_arg.read_write); #endif return -EINVAL; } /* Note that command values are always valid! */ if ((data_arg.size == I2C_SMBUS_QUICK) || ((data_arg.size == I2C_SMBUS_BYTE) && (data_arg.read_write == I2C_SMBUS_WRITE))) /* These are special: we do not use data */ return i2c_smbus_xfer(client->adapter, client->addr, client->flags, data_arg.read_write, data_arg.command, data_arg.size, NULL); if (data_arg.data == NULL) { #ifdef DEBUG printk("i2c-dev.o: data is NULL pointer in ioctl I2C_SMBUS.\n"); #endif return -EINVAL; } if ((data_arg.size == I2C_SMBUS_BYTE_DATA) || (data_arg.size == I2C_SMBUS_BYTE)) datasize = sizeof(data_arg.data->byte); else if ((data_arg.size == I2C_SMBUS_WORD_DATA) || (data_arg.size == I2C_SMBUS_PROC_CALL)) datasize = sizeof(data_arg.data->word); else /* size == I2C_SMBUS_BLOCK_DATA */ datasize = sizeof(data_arg.data->block); if ((data_arg.size == I2C_SMBUS_PROC_CALL) || (data_arg.read_write == I2C_SMBUS_WRITE)) { if (copy_from_user(&temp, data_arg.data, datasize)) return -EFAULT; } res = i2c_smbus_xfer(client->adapter,client->addr,client->flags, data_arg.read_write, data_arg.command,data_arg.size,&temp); if (! res && ((data_arg.size == I2C_SMBUS_PROC_CALL) || (data_arg.read_write == I2C_SMBUS_READ))) { if (copy_to_user(data_arg.data, &temp, datasize)) return -EFAULT; } return res; default: return i2c_control(client,cmd,arg); } return 0; }
static int motor_add_one(unsigned int id, unsigned int *params) { int status, err; struct cdev *motor_cdev; struct platform_device *pdev; struct gpio_pwm_platform_data pdata; if ( mot_nump[id] < 4 ) { printk(KERN_INFO "stepper: nothing to register for id: %d.\n", id); return 0; } g_enable[id] = params[1]; g_dir[id] = params[2]; g_step[id] = params[3]; g_lpwr[id] = params[4]; polarity[id] = params[5]; /* sanity check */ if ( !( g_enable[id] && g_dir[id] && g_step[id])) { printk(KERN_INFO "stepper: missing parameters, exit driver.\n"); goto err_para; } /* request and set pwm channel and gpio pins */ pdev = platform_device_alloc("gpio_pwm", g_step[id]); if (!pdev) { err = -ENOMEM; goto err_para; } pdata.gpio = g_step[id]; err = platform_device_add_data(pdev, &pdata, sizeof(pdata)); if (err) goto err; err = platform_device_add(pdev); if (err) goto err; pwmc[id] = pwm_request("gpio_pwm", g_step[id], "stepper"); if (pwmc[id] != NULL) { goto err_pwm; } motor_pwm_set (pwmc[id], 0); /* set default pwm pulse */ if ( gpio_request(g_enable[id], "motor-enable") < 0 ) { goto err_gpioenable; } gpio_direction_output(g_enable[id] ,0); if ( gpio_request(g_dir[id], "motor-ccw") < 0) { goto err_gpiodir; } gpio_direction_output(g_dir[id] ,0); if (g_lpwr[id] != 0) { if ( gpio_request(g_lpwr[id], "motor-lowpwr") < 0 ) { goto err_gpiolwr; } gpio_direction_output(g_lpwr[id] ,0); } /* set to home */ steps[id] = 0; /* alloc a new device number (major: dynamic, minor: 0) */ status = alloc_chrdev_region(&motor_devno, 0, 1, "motor"); /* create a new char device */ motor_cdev = cdev_alloc(); if(motor_cdev == NULL) { status=-ENOMEM; goto err_dev; } /*save the cdev for id's */ mot_map[id] = (int) motor_cdev; motor_cdev->owner = THIS_MODULE; motor_cdev->ops = &motor_fops; status = cdev_add(motor_cdev, motor_devno, 1); if(status){ goto err_dev; } device_create(motor_class, NULL, motor_devno, NULL, "motor%d", params[0]); printk(KERN_INFO "stepper: motor%d registred on major: %u; minor: %u\n", \ params[0], MAJOR(motor_devno), MINOR(motor_devno)); return 0; err: printk(KERN_INFO "stepper: err\n"); err_dev: printk(KERN_INFO "stepper: err_dev\n"); err_gpiolwr: printk(KERN_INFO "stepper: err_gpiolwr\n"); err_gpiodir: printk(KERN_INFO "stepper: err_gpiodir\n"); err_gpioenable: printk(KERN_INFO "stepper: err_gpioenable\n"); err_gpiostep: printk(KERN_INFO "stepper: err_gpiostep "); err_pwm: printk(KERN_INFO "stepper: err_pwm\n"); err_para: printk(KERN_INFO "stepper: Error management not yet implemented. \ Please reboot your board %d\n",g_step[id]); return -1; }
int tty_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg) { struct tty_struct * tty; struct tty_struct * other_tty; struct tty_struct * termios_tty; int pgrp; int dev; int termios_dev; int retval; if (MAJOR(file->f_rdev) != 4) { printk("tty_ioctl: tty pseudo-major != 4\n"); return -EINVAL; } dev = MINOR(file->f_rdev); tty = TTY_TABLE(dev); if (!tty) return -EINVAL; if (IS_A_PTY(dev)) other_tty = tty_table[PTY_OTHER(dev)]; else other_tty = NULL; termios_tty = tty; termios_dev = dev; if (IS_A_PTY_MASTER(dev)) { termios_tty = other_tty; termios_dev = PTY_OTHER(dev); } switch (cmd) { case TCGETS: return get_termios(termios_tty,(struct termios *) arg); case TCSETSF: flush_input(tty); /* fallthrough */ case TCSETSW: wait_until_sent(tty); /* fallthrough */ case TCSETS: return set_termios(termios_tty,(struct termios *) arg, termios_dev); case TCGETA: return get_termio(termios_tty,(struct termio *) arg); case TCSETAF: flush_input(tty); /* fallthrough */ case TCSETAW: wait_until_sent(tty); /* fallthrough */ case TCSETA: return set_termio(termios_tty,(struct termio *) arg, termios_dev); case TCXONC: switch (arg) { case TCOOFF: tty->stopped = 1; TTY_WRITE_FLUSH(tty); return 0; case TCOON: tty->stopped = 0; TTY_WRITE_FLUSH(tty); return 0; case TCIOFF: if (STOP_CHAR(tty)) put_tty_queue(STOP_CHAR(tty), &tty->write_q); return 0; case TCION: if (START_CHAR(tty)) put_tty_queue(START_CHAR(tty), &tty->write_q); return 0; } return -EINVAL; /* not implemented */ case TCFLSH: if (arg==0) flush_input(tty); else if (arg==1) flush_output(tty); else if (arg==2) { flush_input(tty); flush_output(tty); } else return -EINVAL; return 0; case TIOCEXCL: return -EINVAL; /* not implemented */ case TIOCNXCL: return -EINVAL; /* not implemented */ case TIOCSCTTY: if ((current->leader && current->tty < 0 && tty->session == 0) || (arg == 1 && suser())) { current->tty = dev; tty->session = current->session; tty->pgrp = current->pgrp; return 0; } return -EPERM; case TIOCGPGRP: retval = verify_area(VERIFY_WRITE, (void *) arg,4); if (!retval) put_fs_long(termios_tty->pgrp,(unsigned long *) arg); return retval; case TIOCSPGRP: if ((current->tty < 0) || (current->tty != termios_dev) || (termios_tty->session != current->session)) return -ENOTTY; pgrp=get_fs_long((unsigned long *) arg); if (pgrp < 0) return -EINVAL; if (session_of_pgrp(pgrp) != current->session) return -EPERM; termios_tty->pgrp = pgrp; return 0; case TIOCOUTQ: retval = verify_area(VERIFY_WRITE, (void *) arg,4); if (!retval) put_fs_long(CHARS(&tty->write_q), (unsigned long *) arg); return retval; case TIOCINQ: retval = verify_area(VERIFY_WRITE, (void *) arg,4); if (retval) return retval; if (L_CANON(tty) && !tty->secondary.data) put_fs_long(0, (unsigned long *) arg); else put_fs_long(CHARS(&tty->secondary), (unsigned long *) arg); return 0; case TIOCSTI: return -EINVAL; /* not implemented */ case TIOCGWINSZ: return get_window_size(tty,(struct winsize *) arg); case TIOCSWINSZ: if (IS_A_PTY_MASTER(dev)) set_window_size(other_tty,(struct winsize *) arg); return set_window_size(tty,(struct winsize *) arg); case TIOCGSOFTCAR: return -EINVAL; /* not implemented */ case TIOCSSOFTCAR: return -EINVAL; /* not implemented */ case TIOCLINUX: switch (get_fs_byte((char *)arg)) { case 0: return do_screendump(arg); case 1: return do_get_ps_info(arg); default: return -EINVAL; } case TIOCCONS: if (IS_A_CONSOLE(dev)) { if (!suser()) return -EPERM; redirect = NULL; return 0; } if (redirect) return -EBUSY; if (!suser()) return -EPERM; if (IS_A_PTY_MASTER(dev)) redirect = other_tty; else if (IS_A_PTY_SLAVE(dev)) redirect = tty; else return -EINVAL; return 0; case FIONBIO: arg = get_fs_long((unsigned long *) arg); if (arg) file->f_flags |= O_NONBLOCK; else file->f_flags &= ~O_NONBLOCK; return 0; case TIOCNOTTY: if (MINOR(file->f_rdev) != current->tty) return -EINVAL; current->tty = -1; if (current->leader) { if (tty->pgrp > 0) kill_pg(tty->pgrp, SIGHUP, 0); tty->pgrp = -1; tty->session = 0; } return 0; case TIOCGETD: retval = verify_area(VERIFY_WRITE, (void *) arg,4); if (!retval) put_fs_long(tty->disc, (unsigned long *) arg); return retval; case TIOCSETD: arg = get_fs_long((unsigned long *) arg); return tty_set_ldisc(tty, arg); case TIOCPKT: { int on; if (!IS_A_PTY_MASTER(dev)) return -EINVAL; retval = verify_area(VERIFY_READ, (unsigned long *)arg, sizeof (int)); if (retval) return retval; on=get_fs_long ((unsigned long *)arg); if (on ) tty->packet = 1; else tty->packet = 0; return (0); } default: if (tty->ioctl) { retval = (tty->ioctl)(tty, file, cmd, arg); if (retval != -EINVAL) return retval; } if (ldiscs[tty->disc].ioctl) { retval = (ldiscs[tty->disc].ioctl) (tty, file, cmd, arg); return retval; } return -EINVAL; } }
static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) { struct mm_struct *mm = vma->vm_mm; struct file *file = vma->vm_file; struct proc_maps_private *priv = m->private; vm_flags_t flags = vma->vm_flags; unsigned long ino = 0; unsigned long long pgoff = 0; unsigned long start, end; dev_t dev = 0; const char *name = NULL; if (file) { struct inode *inode = file_inode(vma->vm_file); dev = inode->i_sb->s_dev; ino = inode->i_ino; pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; } /* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; if (stack_guard_page_start(vma, start)) start += PAGE_SIZE; end = vma->vm_end; if (stack_guard_page_end(vma, end)) end -= PAGE_SIZE; seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", start, end, flags & VM_READ ? 'r' : '-', flags & VM_WRITE ? 'w' : '-', flags & VM_EXEC ? 'x' : '-', flags & VM_MAYSHARE ? 's' : 'p', pgoff, MAJOR(dev), MINOR(dev), ino); /* * Print the dentry name for named mappings, and a * special [heap] marker for the heap: */ if (file) { seq_pad(m, ' '); seq_path(m, &file->f_path, "\n"); goto done; } if (vma->vm_ops && vma->vm_ops->name) { name = vma->vm_ops->name(vma); if (name) goto done; } name = arch_vma_name(vma); if (!name) { pid_t tid; if (!mm) { name = "[vdso]"; goto done; } if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { name = "[heap]"; goto done; } tid = pid_of_stack(priv, vma, is_pid); if (tid != 0) { /* * Thread stack in /proc/PID/task/TID/maps or * the main process stack. */ if (!is_pid || (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack)) { name = "[stack]"; } else { /* Thread stack in /proc/PID/maps */ seq_pad(m, ' '); seq_printf(m, "[stack:%d]", tid); } goto done; } if (vma_get_anon_name(vma)) { seq_pad(m, ' '); seq_print_vma_name(m, vma); } } done: if (name) { seq_pad(m, ' '); seq_puts(m, name); } seq_putc(m, '\n'); }