static ssize_t proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct inode *ino = file_inode(file); struct pci_dev *dev = PDE_DATA(ino); int pos = *ppos; int size = dev->cfg_size; int cnt; if (pos >= size) return 0; if (nbytes >= size) nbytes = size; if (pos + nbytes > size) nbytes = size - pos; cnt = nbytes; if (!access_ok(VERIFY_READ, buf, cnt)) return -EINVAL; pci_config_pm_runtime_get(dev); if ((pos & 1) && cnt) { unsigned char val; __get_user(val, buf); pci_user_write_config_byte(dev, pos, val); buf++; pos++; cnt--; } if ((pos & 3) && cnt > 2) { __le16 val; __get_user(val, (__le16 __user *) buf); pci_user_write_config_word(dev, pos, le16_to_cpu(val)); buf += 2; pos += 2; cnt -= 2; } while (cnt >= 4) { __le32 val; __get_user(val, (__le32 __user *) buf); pci_user_write_config_dword(dev, pos, le32_to_cpu(val)); buf += 4; pos += 4; cnt -= 4; } if (cnt >= 2) { __le16 val; __get_user(val, (__le16 __user *) buf); pci_user_write_config_word(dev, pos, le16_to_cpu(val)); buf += 2; pos += 2; cnt -= 2; } if (cnt) { unsigned char val; __get_user(val, buf); pci_user_write_config_byte(dev, pos, val); buf++; pos++; cnt--; } pci_config_pm_runtime_put(dev); *ppos = pos; i_size_write(ino, dev->cfg_size); return nbytes; }
ssize_t sycamore_write(struct file *filp, const char *buf, size_t count, loff_t *offset_out){ struct sycamore_dev *dev; unsigned long current_address; unsigned long actual_count; unsigned long offset; int resource_num; int i; int value; unsigned long size; int result; int *kern_buf; int *kern_buf_tmp; dev = filp->private_data; current_address = dev->page_addr + dev->base_page_offset + dev->offset; resource_num = dev->current_resource; size = dev->base_size[resource_num]; offset = dev->offset; if (dev->current_resource < 0){ //resource number is too big return -ENODEV; } if (offset == size){ //attempted to write to a location further than the size return 0; } if ((offset + count) > size){ //trip the count to be within the size actual_count = size - offset; } else { actual_count = count; } //verify if we can copy from that range if ((result = access_ok(VERIFY_READ, buf, actual_count)) == 0){ return 0; } //allocation some space for the buffer in kernel land kern_buf = kmalloc(actual_count, GFP_KERNEL | GFP_DMA); kern_buf_tmp = kern_buf; if (kern_buf <= 0){ return 0; } i = actual_count / 4; while (i--){ //copy 32bits at a time get_user(value, ((int *) buf)); *kern_buf = value; buf += 4; ++kern_buf; } //copy it out to the IO memcpy_toio(current_address, kern_buf_tmp, actual_count); kfree(kern_buf_tmp); dev->offset = dev->offset + actual_count; *(offset_out) = dev->offset; return actual_count; }
static void ad1889_update_ptr(ad1889_dev_t *dev, int wake) { ad1889_state_t *state; struct dmabuf *dmabuf; unsigned long hwptr; int diff; /* check ADC first */ state = &dev->adc_state; dmabuf = &state->dmabuf; if (dmabuf->enable & ADC_RUNNING) { hwptr = ad1889_get_dma_addr(state); diff = (dmabuf->dmasize + hwptr - dmabuf->hwptr) % dmabuf->dmasize; dmabuf->hwptr = hwptr; dmabuf->total_bytes += diff; dmabuf->count += diff; if (dmabuf->count > dmabuf->dmasize) dmabuf->count = dmabuf->dmasize; if (dmabuf->mapped) { if (wake & dmabuf->count >= dmabuf->fragsize) wake_up(&dmabuf->wait); } else { if (wake & dmabuf->count > 0) wake_up(&dmabuf->wait); } } /* check DAC */ state = &dev->wav_state; dmabuf = &state->dmabuf; if (dmabuf->enable & DAC_RUNNING) { XXX } #endif /************************* /dev/dsp interfaces ************************* */ static ssize_t ad1889_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { return 0; } static ssize_t ad1889_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { ad1889_dev_t *dev = (ad1889_dev_t *)file->private_data; ad1889_state_t *state = &dev->state[AD_WAV_STATE]; volatile struct dmabuf *dmabuf = &state->dmabuf; ssize_t ret = 0; DECLARE_WAITQUEUE(wait, current); down(&state->sem); #if 0 if (dmabuf->mapped) { ret = -ENXIO; goto err1; } #endif if (!access_ok(VERIFY_READ, buffer, count)) { ret = -EFAULT; goto err1; } add_wait_queue(&state->dmabuf.wait, &wait); /* start filling dma buffer.... */ while (count > 0) { long rem; long cnt = count; unsigned long flags; for (;;) { long used_bytes; long timeout; /* max time for DMA in jiffies */ /* buffer is full if wr catches up to rd */ spin_lock_irqsave(&state->card->lock, flags); used_bytes = dmabuf->wr_ptr - dmabuf->rd_ptr; timeout = (dmabuf->dma_len * HZ) / dmabuf->rate; spin_unlock_irqrestore(&state->card->lock, flags); /* adjust for buffer wrap around */ used_bytes = (used_bytes + DMA_SIZE) & (DMA_SIZE - 1); /* If at least one page unused */ if (used_bytes < (DMA_SIZE - 0x1000)) break; /* dma buffer full */ if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; goto err2; } set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(timeout + 1); if (signal_pending(current)) { ret = -ERESTARTSYS; goto err2; } } /* watch out for wrapping around static buffer */ spin_lock_irqsave(&state->card->lock, flags); rem = DMA_SIZE - dmabuf->wr_ptr; if (cnt > rem) cnt = rem; rem = dmabuf->wr_ptr; /* update dma pointers */ dmabuf->wr_ptr += cnt; dmabuf->wr_ptr &= DMA_SIZE - 1; /* wrap ptr if necessary */ spin_unlock_irqrestore(&state->card->lock, flags); /* transfer unwrapped chunk */ if (copy_from_user(dmabuf->rawbuf + rem, buffer, cnt)) { ret = -EFAULT; goto err2; } DBG("Writing 0x%lx bytes to +0x%lx\n", cnt, rem); /* update counters */ count -= cnt; buffer += cnt; ret += cnt; /* we have something to play - go play it! */ ad1889_trigger_playback(dev); } err2: remove_wait_queue(&state->dmabuf.wait, &wait); err1: up(&state->sem); return ret; }
long ia64_rt_sigreturn (struct sigscratch *scr) { extern char ia64_strace_leave_kernel, ia64_leave_kernel; struct sigcontext *sc; struct siginfo si; sigset_t set; long retval; sc = &((struct sigframe *) (scr->pt.r12 + 16))->sc; /* * When we return to the previously executing context, r8 and r10 have already * been setup the way we want them. Indeed, if the signal wasn't delivered while * in a system call, we must not touch r8 or r10 as otherwise user-level state * could be corrupted. */ retval = (long) &ia64_leave_kernel; if (current->ptrace & PT_TRACESYS) /* * strace expects to be notified after sigreturn returns even though the * context to which we return may not be in the middle of a syscall. * Thus, the return-value that strace displays for sigreturn is * meaningless. */ retval = (long) &ia64_strace_leave_kernel; if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) goto give_sigsegv; if (GET_SIGSET(&set, &sc->sc_mask)) goto give_sigsegv; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(¤t->sigmask_lock); { current->blocked = set; recalc_sigpending(current); } spin_unlock_irq(¤t->sigmask_lock); if (restore_sigcontext(sc, scr)) goto give_sigsegv; #if DEBUG_SIG printk("SIG return (%s:%d): sp=%lx ip=%lx\n", current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip); #endif /* * It is more difficult to avoid calling this function than to * call it and ignore errors. */ do_sigaltstack(&sc->sc_stack, 0, scr->pt.r12); return retval; give_sigsegv: si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SI_KERNEL; si.si_pid = current->pid; si.si_uid = current->uid; si.si_addr = sc; force_sig_info(SIGSEGV, &si, current); return retval; }
static int proc_dodebug(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { char tmpbuf[20], c, *s; char __user *p; unsigned int value; size_t left, len; if ((*ppos && !write) || !*lenp) { *lenp = 0; return 0; } left = *lenp; if (write) { if (!access_ok(VERIFY_READ, buffer, left)) return -EFAULT; p = buffer; while (left && __get_user(c, p) >= 0 && isspace(c)) left--, p++; if (!left) goto done; if (left > sizeof(tmpbuf) - 1) return -EINVAL; if (copy_from_user(tmpbuf, p, left)) return -EFAULT; tmpbuf[left] = '\0'; for (s = tmpbuf, value = 0; '0' <= *s && *s <= '9'; s++, left--) value = 10 * value + (*s - '0'); if (*s && !isspace(*s)) return -EINVAL; while (left && isspace(*s)) left--, s++; *(unsigned int *) table->data = value; /* Display the RPC tasks on writing to rpc_debug */ if (strcmp(table->procname, "rpc_debug") == 0) rpc_show_tasks(); } else { if (!access_ok(VERIFY_WRITE, buffer, left)) return -EFAULT; len = sprintf(tmpbuf, "%d", *(unsigned int *) table->data); if (len > left) len = left; if (__copy_to_user(buffer, tmpbuf, len)) return -EFAULT; if ((left -= len) > 0) { if (put_user('\n', (char __user *)buffer + len)) return -EFAULT; left--; } } done: *lenp -= left; *ppos += *lenp; return 0; }
int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts) { return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || __put_user(ts->tv_sec, &cts->tv_sec) || __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; }
//static int adxl345_ioctl(struct inode *inode, struct file *file, unsigned int cmd, // unsigned long arg) static long tpd_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { //struct i2c_client *client = (struct i2c_client*)file->private_data; //struct adxl345_i2c_data *obj = (struct adxl345_i2c_data*)i2c_get_clientdata(client); //char strbuf[256]; void __user *data; long err = 0; if(_IOC_DIR(cmd) & _IOC_READ) { err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); } else if(_IOC_DIR(cmd) & _IOC_WRITE) { err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); } if(err) { printk("tpd: access error: %08X, (%2d, %2d)\n", cmd, _IOC_DIR(cmd), _IOC_SIZE(cmd)); return -EFAULT; } switch(cmd) { case TPD_GET_VELOCITY_CUSTOM_X: data = (void __user *) arg; if(data == NULL) { err = -EINVAL; break; } if(copy_to_user(data, &g_v_magnify_x, sizeof(g_v_magnify_x))) { err = -EFAULT; break; } break; case TPD_GET_VELOCITY_CUSTOM_Y: data = (void __user *) arg; if(data == NULL) { err = -EINVAL; break; } if(copy_to_user(data, &g_v_magnify_y, sizeof(g_v_magnify_y))) { err = -EFAULT; break; } break; default: printk("tpd: unknown IOCTL: 0x%08x\n", cmd); err = -ENOIOCTLCMD; break; } return err; }
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int err = 0; int signal; frame = get_sigframe(ka, regs->regs[15], sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; signal = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err |= copy_siginfo_to_user(&frame->info, info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->regs[15]), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { regs->pr = (unsigned long) ka->sa.sa_restorer; #ifdef CONFIG_VSYSCALL } else if (likely(current->mm->context.vdso)) { regs->pr = VDSO_SYM(&__kernel_rt_sigreturn); #endif } else { /* Generate return code (system call to rt_sigreturn) */ err |= __put_user(MOVW(7), &frame->retcode[0]); err |= __put_user(TRAP_NOARG, &frame->retcode[1]); err |= __put_user(OR_R0_R0, &frame->retcode[2]); err |= __put_user(OR_R0_R0, &frame->retcode[3]); err |= __put_user(OR_R0_R0, &frame->retcode[4]); err |= __put_user(OR_R0_R0, &frame->retcode[5]); err |= __put_user(OR_R0_R0, &frame->retcode[6]); err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]); regs->pr = (unsigned long) frame->retcode; } if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->regs[15] = (unsigned long) frame; regs->regs[4] = signal; /* Arg for signal handler */ regs->regs[5] = (unsigned long) &frame->info; regs->regs[6] = (unsigned long) &frame->uc; if (current->personality & FDPIC_FUNCPTRS) { struct fdpic_func_descriptor __user *funcptr = (struct fdpic_func_descriptor __user *)ka->sa.sa_handler; __get_user(regs->pc, &funcptr->text); __get_user(regs->regs[12], &funcptr->GOT); } else regs->pc = (unsigned long)ka->sa.sa_handler; set_fs(USER_DS); pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode)); return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
static int lcd_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { struct lcd_display button_display; unsigned long address, a; int index; switch (cmd) { case LCD_On: udelay(150); BusyCheck(); LCDWriteInst(0x0F); break; case LCD_Off: udelay(150); BusyCheck(); LCDWriteInst(0x08); break; case LCD_Reset: udelay(150); LCDWriteInst(0x3F); udelay(150); LCDWriteInst(0x3F); udelay(150); LCDWriteInst(0x3F); udelay(150); LCDWriteInst(0x3F); udelay(150); LCDWriteInst(0x01); udelay(150); LCDWriteInst(0x06); break; case LCD_Clear: udelay(150); BusyCheck(); LCDWriteInst(0x01); break; case LCD_Cursor_Left: udelay(150); BusyCheck(); LCDWriteInst(0x10); break; case LCD_Cursor_Right: udelay(150); BusyCheck(); LCDWriteInst(0x14); break; case LCD_Cursor_Off: udelay(150); BusyCheck(); LCDWriteInst(0x0C); break; case LCD_Cursor_On: udelay(150); BusyCheck(); LCDWriteInst(0x0F); break; case LCD_Blink_Off: udelay(150); BusyCheck(); LCDWriteInst(0x0E); break; case LCD_Get_Cursor_Pos:{ struct lcd_display display; udelay(150); BusyCheck(); display.cursor_address = ( LCDReadInst ); display.cursor_address = ( display.cursor_address & 0x07F ); if(copy_to_user((struct lcd_display*)arg, &display, sizeof(struct lcd_display))) return -EFAULT; break; } case LCD_Set_Cursor_Pos: { struct lcd_display display; if(copy_from_user(&display, (struct lcd_display*)arg, sizeof(struct lcd_display))) return -EFAULT; a = (display.cursor_address | kLCD_Addr ); udelay(150); BusyCheck(); LCDWriteInst( a ); break; } case LCD_Get_Cursor: { struct lcd_display display; udelay(150); BusyCheck(); display.character = LCDReadData; if(copy_to_user((struct lcd_display*)arg, &display, sizeof(struct lcd_display))) return -EFAULT; udelay(150); BusyCheck(); LCDWriteInst(0x10); break; } case LCD_Set_Cursor:{ struct lcd_display display; if(copy_from_user(&display, (struct lcd_display*)arg, sizeof(struct lcd_display))) return -EFAULT; udelay(150); BusyCheck(); LCDWriteData( display.character ); udelay(150); BusyCheck(); LCDWriteInst(0x10); break; } case LCD_Disp_Left: udelay(150); BusyCheck(); LCDWriteInst(0x18); break; case LCD_Disp_Right: udelay(150); BusyCheck(); LCDWriteInst(0x1C); break; case LCD_Home: udelay(150); BusyCheck(); LCDWriteInst(0x02); break; case LCD_Write: { struct lcd_display display; if(copy_from_user(&display, (struct lcd_display*)arg, sizeof(struct lcd_display))) return -EFAULT; udelay(150); BusyCheck(); LCDWriteInst(0x80); udelay(150); BusyCheck(); for (index = 0; index < (display.size1); index++) { udelay(150); BusyCheck(); LCDWriteData( display.line1[index]); BusyCheck(); } udelay(150); BusyCheck(); LCDWriteInst(0xC0); udelay(150); BusyCheck(); for (index = 0; index < (display.size2); index++) { udelay(150); BusyCheck(); LCDWriteData( display.line2[index]); } break; } case LCD_Read: { struct lcd_display display; BusyCheck(); for (address = kDD_R00; address <= kDD_R01; address++) { a = (address | kLCD_Addr ); udelay(150); BusyCheck(); LCDWriteInst( a ); udelay(150); BusyCheck(); display.line1[address] = LCDReadData; } display.line1[ 0x27 ] = '\0'; for (address = kDD_R10; address <= kDD_R11; address++) { a = (address | kLCD_Addr ); udelay(150); BusyCheck(); LCDWriteInst( a ); udelay(150); BusyCheck(); display.line2[address - 0x40 ] = LCDReadData; } display.line2[ 0x27 ] = '\0'; if(copy_to_user((struct lcd_display*)arg, &display, sizeof(struct lcd_display))) return -EFAULT; break; } // set all GPIO leds to led_display.leds case LED_Set: { struct lcd_display led_display; if(copy_from_user(&led_display, (struct lcd_display*)arg, sizeof(struct lcd_display))) return -EFAULT; led_state = led_display.leds; LEDSet(led_state); break; } // set only bit led_display.leds case LED_Bit_Set: { int i; int bit=1; struct lcd_display led_display; if(copy_from_user(&led_display, (struct lcd_display*)arg, sizeof(struct lcd_display))) return -EFAULT; for (i=0;i<(int)led_display.leds;i++) { bit = 2*bit; } led_state = led_state | bit; LEDSet(led_state); break; } // clear only bit led_display.leds case LED_Bit_Clear: { int i; int bit=1; struct lcd_display led_display; if(copy_from_user(&led_display, (struct lcd_display*)arg, sizeof(struct lcd_display))) return -EFAULT; for (i=0;i<(int)led_display.leds;i++) { bit = 2*bit; } led_state = led_state & ~bit; LEDSet(led_state); break; } case BUTTON_Read: { button_display.buttons = GPIRead; if(copy_to_user((struct lcd_display*)arg, &button_display, sizeof(struct lcd_display))) return -EFAULT; break; } case LINK_Check: { button_display.buttons = *((volatile unsigned long *) (0xB0100060) ); if(copy_to_user((struct lcd_display*)arg, &button_display, sizeof(struct lcd_display))) return -EFAULT; break; } case LINK_Check_2: { int iface_num; /* panel-utils should pass in the desired interface status is wanted for * in "buttons" of the structure. We will set this to non-zero if the * link is in fact up for the requested interface. --DaveM */ if(copy_from_user(&button_display, (struct lcd_display *)arg, sizeof(button_display))) return -EFAULT; iface_num = button_display.buttons; if (iface_num >= 0 && iface_num < MAX_INTERFACES && linkcheck_callbacks[iface_num] != NULL) { button_display.buttons = linkcheck_callbacks[iface_num](linkcheck_cookies[iface_num]); } else { button_display.buttons = 0; } if(__copy_to_user((struct lcd_display*)arg, &button_display, sizeof(struct lcd_display))) return -EFAULT; break; } // Erase the flash case FLASH_Erase: { int ctr=0; // Chip Erase Sequence WRITE_FLASH( kFlash_Addr1, kFlash_Data1 ); WRITE_FLASH( kFlash_Addr2, kFlash_Data2 ); WRITE_FLASH( kFlash_Addr1, kFlash_Erase3 ); WRITE_FLASH( kFlash_Addr1, kFlash_Data1 ); WRITE_FLASH( kFlash_Addr2, kFlash_Data2 ); WRITE_FLASH( kFlash_Addr1, kFlash_Erase6 ); printk( "Erasing Flash.\n"); while ( (!dqpoll(0x00000000,0xFF)) && (!timeout(0x00000000)) ) { ctr++; } printk("\n"); printk("\n"); printk("\n"); if (READ_FLASH(0x07FFF0)==0xFF) { printk("Erase Successful\r\n"); } else if (timeout) { printk("Erase Timed Out\r\n"); } break; } // burn the flash case FLASH_Burn: { volatile unsigned long burn_addr; unsigned long flags; int i; unsigned char *rom; struct lcd_display display; if(copy_from_user(&display, (struct lcd_display*)arg, sizeof(struct lcd_display))) return -EFAULT; rom = (unsigned char *) kmalloc((128),GFP_ATOMIC); if ( rom == NULL ) { printk ("broken\n"); return 1; } printk("Churning and Burning -"); save_flags(flags); for (i=0; i<FLASH_SIZE; i=i+128) { if(copy_from_user(rom, display.RomImage + i, 128)) return -EFAULT; burn_addr = kFlashBase + i; cli(); for ( index = 0; index < ( 128 ) ; index++ ) { WRITE_FLASH( kFlash_Addr1, kFlash_Data1 ); WRITE_FLASH( kFlash_Addr2, kFlash_Data2 ); WRITE_FLASH( kFlash_Addr1, kFlash_Prog ); *((volatile unsigned char *)burn_addr) = (volatile unsigned char) rom[index]; while ( (!dqpoll(burn_addr,(volatile unsigned char) rom[index])) && (!timeout(burn_addr)) ) { } burn_addr++; } restore_flags(flags); if ( *((volatile unsigned char *)(burn_addr-1)) == (volatile unsigned char) rom[index-1] ) { } else if (timeout) { printk("Program timed out\r\n"); } } kfree(rom); break; } // read the flash all at once case FLASH_Read: { unsigned char *user_bytes; volatile unsigned long read_addr; int i; user_bytes = &(((struct lcd_display *)arg)->RomImage[0]); if(!access_ok(VERIFY_WRITE, user_bytes, FLASH_SIZE)) return -EFAULT; printk("Reading Flash"); for (i=0; i<FLASH_SIZE; i++) { unsigned char tmp_byte; read_addr = kFlashBase + i; tmp_byte = *((volatile unsigned char *)read_addr); if(__put_user (tmp_byte, &user_bytes[i])) return -EFAULT; } break; } default: return 0; break; } return 0; }
// // IOCTL // Control teensy by sending cmd to INTR endpoint // // Currently this function is unused. We leave it here // for possible future use. (It is replaced by vote_write()). // int vote_ioctl( struct inode * i_node, struct file * file, unsigned int cmd, unsigned long arg ) { struct usb_vote *vote_dev = file->private_data; int err = 0; int rc = 0; struct urb *vote_urb; // usb request char command; // g char *buf; printk(KERN_DEBUG "vote: process %i (%s) vote_ioctl cmd=%d\n", current->pid, current->comm, cmd); // // Extract the type and number bitfields, and don't decode // wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() // if (_IOC_TYPE(cmd) != VOTE_IOCTL_MAGIC) { printk(KERN_NOTICE "vote_ioctl: !vote_IOC_MAGIC\n"); return -ENOTTY; } if (_IOC_NR(cmd) > VOTE_IOCTL_MAX) { printk(KERN_NOTICE "vote_ioctl: > vote_IOC_MAXNR\n"); return -ENOTTY; } // // TURNED THIS OFF: If not root/sysadmin, go away // // if (! capable (CAP_SYS_ADMIN)) return -EPERM; // // The direction is a bitmask, // and VERIFY_WRITE catches R/W transfers. // `Type' is user-oriented, // while access_ok is kernel-oriented, // so the concept of "read" and "write" is reversed. // if (_IOC_DIR(cmd) & _IOC_READ) { err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); } else if (_IOC_DIR(cmd) & _IOC_WRITE) { err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); } if (err) { printk(KERN_NOTICE "vote_ioctl: access !ok\n"); return -EFAULT; } switch(cmd) { case VOTE_IOCTL_INIT: printk(KERN_NOTICE "VOTE_IOCTL_INIT\n"); command = 'g'; break; default: printk( KERN_NOTICE "%s: Not a known command: %x\n", __FUNCTION__, cmd ); return -EINVAL; } // switch(cmd) vote_urb = usb_alloc_urb(0, GFP_KERNEL); if (!vote_urb) { return -ENOMEM; } buf = usb_buffer_alloc( vote_dev->udev, 64, GFP_KERNEL, &vote_urb->transfer_dma ); if (!buf) { printk (KERN_NOTICE "%s: usb_buffer_alloc failed\n", __FUNCTION__); usb_buffer_free( vote_dev->udev, vote_urb->transfer_buffer_length, vote_urb->transfer_buffer, vote_urb->transfer_dma ); return -ENOMEM; } buf[0] = command; usb_fill_int_urb( vote_urb, vote_dev->udev, usb_sndintpipe(vote_dev->udev, vote_dev->intr_out_endpointAddr), buf, 64, (usb_complete_t)vote_write_intr_callback, vote_dev, 250 ); vote_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if ((rc = usb_submit_urb(vote_urb, GFP_KERNEL))) { err("%s - failed submitting write urb, error %d", __FUNCTION__, rc); } return 0; } // vote_ioctl()
// read // // wait until the teensy returns data // ssize_t vote_read (struct file *file, char __user *buf, size_t count, loff_t *pos) { struct usb_vote *vote_dev = file->private_data; int rc = 0; struct urb *vote_urb; // usb request int wqrc = 0; // return from wait queue int bytes_notcopied = 0; int copycount = 0; int actual_count; char *usbbuf; char vote_from_teensy; // single character vote from teensy printk(KERN_NOTICE "In %s\n", __FUNCTION__); if (!access_ok(VERIFY_WRITE, buf, count)) return -EINVAL; vote_urb = usb_alloc_urb(0, GFP_KERNEL); if (!vote_urb) return -ENOMEM; usbbuf = usb_buffer_alloc( vote_dev->udev, 64, GFP_KERNEL, &vote_urb->transfer_dma ); if (!usbbuf) { printk (KERN_NOTICE "%s: usb_buffer_alloc failed\n", __FUNCTION__); usb_buffer_free( vote_dev->udev, vote_urb->transfer_buffer_length, vote_urb->transfer_buffer, vote_urb->transfer_dma ); return -ENOMEM; } usb_fill_int_urb( vote_urb, vote_dev->udev, usb_rcvintpipe(vote_dev->udev, vote_dev->intr_in_endpointAddr), usbbuf, 64, (usb_complete_t) vote_read_intr_callback, vote_dev, 250 ); vote_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; // vote_dev->data_from_teensy = false; data_from_teensy = false; if ((rc = usb_submit_urb(vote_urb, GFP_KERNEL))) { err("%s - failed submitting write urb, error %d", __FUNCTION__, rc); } // // Wait until teensy returns data // We will be awoken by our callback function, // vote_read_intr_callback(). // printk(KERN_DEBUG "%s: process %i (%s) going to sleep\n", __FUNCTION__, current->pid, current->comm); wqrc = wait_event_interruptible( // vote_dev->wait4voter, // vote_dev->data_from_teensy == true wait4voter, data_from_teensy == true ); if (wqrc == -ERESTARTSYS) { // Got a signal usb_buffer_free( vote_urb->dev, vote_urb->transfer_buffer_length, vote_urb->transfer_buffer, vote_urb->transfer_dma ); printk(KERN_DEBUG "%s got signal while waiting: %i (%s):\n", __FUNCTION__, current->pid, current->comm); // What needs to be deallocated here??? kgb return -EINTR; } printk(KERN_DEBUG "%s awoken %i (%s):\n", __FUNCTION__, current->pid, current->comm); // the actual amount of data from the URB actual_count = vote_urb->actual_length; printk(KERN_NOTICE "%s: Actual Count in the URB = %d\n", __FUNCTION__, actual_count); // We are expecting exactly 1 byte from the teensy. if (actual_count == 1) { vote_from_teensy = ((char *) vote_urb->transfer_buffer)[0]; // Set the vote in vote_dev->votecast // to reflect the voter's choice. // This is for viewing in sysfs. vote_dev->votecast = vote_from_teensy; } // actual_count is 64 bytes -- not the amount of data sent from teensy // kgb: Since we know its just one byte vote_from_teensy = ((char *) vote_urb->transfer_buffer)[0]; vote_dev->votecast = vote_from_teensy; printk(KERN_NOTICE "%s: Vote from teensy. [%c]\n", __FUNCTION__, vote_dev->votecast); if(count <= actual_count) copycount = count; else copycount = actual_count; bytes_notcopied = copy_to_user( buf, (char *) vote_urb->transfer_buffer, copycount ); if (bytes_notcopied != 0) { printk(KERN_NOTICE "%s: copy_to_user failed. \n", __FUNCTION__); } printk(KERN_NOTICE "%s: after copy_to_user. bytes_not_copied=%d \n", __FUNCTION__, bytes_notcopied); printk(KERN_NOTICE "%s: after copy_to_user. %c\n", __FUNCTION__, ((char *) vote_urb->transfer_buffer)[0]); // dealloc the URB usb_buffer_free( vote_urb->dev, vote_urb->transfer_buffer_length, vote_urb->transfer_buffer, vote_urb->transfer_dma ); // return the number of bytes read return copycount; } // vote_read()
ssize_t vote_write ( struct file *file, const char __user *buf, size_t count, loff_t *pos) { unsigned long not_copied; // return from copy_to_user() struct urb *wrt_urb; // usb request: write char *tbuf; // usb transfer buffer int rc; struct usb_vote *vote_dev = file->private_data; printk(KERN_NOTICE "In %s\n", __FUNCTION__); if (!access_ok(VERIFY_READ, buf, count)) return -EINVAL; if (count > TMAX) { printk(KERN_NOTICE "In %s with count=%d > TMAX\n", __FUNCTION__, count); return -EINVAL; } // // Set up the urb // wrt_urb = usb_alloc_urb(0, GFP_KERNEL); if (!wrt_urb) return -ENOMEM; tbuf = usb_buffer_alloc( vote_dev->udev, 64, GFP_KERNEL, &wrt_urb->transfer_dma ); if (!tbuf) { printk (KERN_NOTICE "%s: usb_buffer_alloc failed\n", __FUNCTION__); usb_buffer_free( vote_dev->udev, wrt_urb->transfer_buffer_length, wrt_urb->transfer_buffer, wrt_urb->transfer_dma ); return -ENOMEM; } // Copy user data to URB transfer buffer not_copied = copy_from_user(tbuf, buf, count); // kernel user if (not_copied != 0) return -EPROTO; // really an internal error usb_fill_int_urb( wrt_urb, vote_dev->udev, usb_sndintpipe(vote_dev->udev, vote_dev->intr_out_endpointAddr), tbuf, 64, // TMAX or count kgb (usb_complete_t) vote_write_intr_callback, vote_dev, 250 ); wrt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if ((rc = usb_submit_urb(wrt_urb, GFP_KERNEL))) { err("%s - failed submitting write urb, error %d", __FUNCTION__, rc); return rc; } // Success return count; } // vote_write()
int do_syslog(int type, char __user *buf, int len, bool from_file) { unsigned i, j, limit, count; int do_clear = 0; char c; int error; error = check_syslog_permissions(type, from_file); if (error) goto out; error = security_syslog(type); if (error) return error; switch (type) { case SYSLOG_ACTION_CLOSE: /* Close log */ break; case SYSLOG_ACTION_OPEN: /* Open log */ break; case SYSLOG_ACTION_READ: /* Read from log */ error = -EINVAL; if (!buf || len < 0) goto out; error = 0; if (!len) goto out; if (!access_ok(VERIFY_WRITE, buf, len)) { error = -EFAULT; goto out; } error = wait_event_interruptible(log_wait, (log_start - log_end)); if (error) goto out; i = 0; spin_lock_irq(&logbuf_lock); while (!error && (log_start != log_end) && i < len) { c = LOG_BUF(log_start); log_start++; spin_unlock_irq(&logbuf_lock); error = __put_user(c,buf); buf++; i++; cond_resched(); spin_lock_irq(&logbuf_lock); } spin_unlock_irq(&logbuf_lock); if (!error) error = i; break; /* Read/clear last kernel messages */ case SYSLOG_ACTION_READ_CLEAR: do_clear = 1; /* FALL THRU */ /* Read last kernel messages */ case SYSLOG_ACTION_READ_ALL: error = -EINVAL; if (!buf || len < 0) goto out; error = 0; if (!len) goto out; if (!access_ok(VERIFY_WRITE, buf, len)) { error = -EFAULT; goto out; } count = len; if (count > log_buf_len) count = log_buf_len; spin_lock_irq(&logbuf_lock); if (count > logged_chars) count = logged_chars; if (do_clear) logged_chars = 0; limit = log_end; /* * __put_user() could sleep, and while we sleep * printk() could overwrite the messages * we try to copy to user space. Therefore * the messages are copied in reverse. <manfreds> */ for (i = 0; i < count && !error; i++) { j = limit-1-i; if (j + log_buf_len < log_end) break; c = LOG_BUF(j); spin_unlock_irq(&logbuf_lock); error = __put_user(c,&buf[count-1-i]); cond_resched(); spin_lock_irq(&logbuf_lock); } spin_unlock_irq(&logbuf_lock); if (error) break; error = i; if (i != count) { int offset = count-error; /* buffer overflow during copy, correct user buffer. */ for (i = 0; i < error; i++) { if (__get_user(c,&buf[i+offset]) || __put_user(c,&buf[i])) { error = -EFAULT; break; } cond_resched(); } } break; /* Clear ring buffer */ case SYSLOG_ACTION_CLEAR: logged_chars = 0; break; /* Disable logging to console */ case SYSLOG_ACTION_CONSOLE_OFF: if (saved_console_loglevel == -1) saved_console_loglevel = console_loglevel; console_loglevel = minimum_console_loglevel; break; /* Enable logging to console */ case SYSLOG_ACTION_CONSOLE_ON: if (saved_console_loglevel != -1) { console_loglevel = saved_console_loglevel; saved_console_loglevel = -1; } break; /* Set level of messages printed to console */ case SYSLOG_ACTION_CONSOLE_LEVEL: error = -EINVAL; if (len < 1 || len > 8) goto out; if (len < minimum_console_loglevel) len = minimum_console_loglevel; console_loglevel = len; /* Implicitly re-enable logging to console */ saved_console_loglevel = -1; error = 0; break; /* Number of chars in the log buffer */ case SYSLOG_ACTION_SIZE_UNREAD: error = log_end - log_start; break; /* Size of the log buffer */ case SYSLOG_ACTION_SIZE_BUFFER: error = log_buf_len; break; default: error = -EINVAL; break; } out: return error; }
static ssize_t proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct pci_dev *dev = PDE_DATA(file_inode(file)); unsigned int pos = *ppos; unsigned int cnt, size; /* * Normal users can read only the standardized portion of the * configuration space as several chips lock up when trying to read * undefined locations (think of Intel PIIX4 as a typical example). */ if (capable(CAP_SYS_ADMIN)) size = dev->cfg_size; else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) size = 128; else size = 64; if (pos >= size) return 0; if (nbytes >= size) nbytes = size; if (pos + nbytes > size) nbytes = size - pos; cnt = nbytes; if (!access_ok(VERIFY_WRITE, buf, cnt)) return -EINVAL; pci_config_pm_runtime_get(dev); if ((pos & 1) && cnt) { unsigned char val; pci_user_read_config_byte(dev, pos, &val); __put_user(val, buf); buf++; pos++; cnt--; } if ((pos & 3) && cnt > 2) { unsigned short val; pci_user_read_config_word(dev, pos, &val); __put_user(cpu_to_le16(val), (__le16 __user *) buf); buf += 2; pos += 2; cnt -= 2; } while (cnt >= 4) { unsigned int val; pci_user_read_config_dword(dev, pos, &val); __put_user(cpu_to_le32(val), (__le32 __user *) buf); buf += 4; pos += 4; cnt -= 4; } if (cnt >= 2) { unsigned short val; pci_user_read_config_word(dev, pos, &val); __put_user(cpu_to_le16(val), (__le16 __user *) buf); buf += 2; pos += 2; cnt -= 2; } if (cnt) { unsigned char val; pci_user_read_config_byte(dev, pos, &val); __put_user(val, buf); buf++; pos++; cnt--; } pci_config_pm_runtime_put(dev); *ppos = pos; return nbytes; }
int compat_mc_getsockopt(struct sock *sock, int level, int optname, char __user *optval, int __user *optlen, int (*getsockopt)(struct sock *,int,int,char __user *,int __user *)) { struct compat_group_filter __user *gf32 = (void *)optval; struct group_filter __user *kgf; int __user *koptlen; u32 interface, fmode, numsrc; int klen, ulen, err; if (optname != MCAST_MSFILTER) return getsockopt(sock, level, optname, optval, optlen); koptlen = compat_alloc_user_space(sizeof(*koptlen)); if (!access_ok(VERIFY_READ, optlen, sizeof(*optlen)) || __get_user(ulen, optlen)) return -EFAULT; /* adjust len for pad */ klen = ulen + sizeof(*kgf) - sizeof(*gf32); if (klen < GROUP_FILTER_SIZE(0)) return -EINVAL; if (!access_ok(VERIFY_WRITE, koptlen, sizeof(*koptlen)) || __put_user(klen, koptlen)) return -EFAULT; /* have to allow space for previous compat_alloc_user_space, too */ kgf = compat_alloc_user_space(klen+sizeof(*optlen)); if (!access_ok(VERIFY_READ, gf32, __COMPAT_GF0_SIZE) || __get_user(interface, &gf32->gf_interface) || __get_user(fmode, &gf32->gf_fmode) || __get_user(numsrc, &gf32->gf_numsrc) || __put_user(interface, &kgf->gf_interface) || __put_user(fmode, &kgf->gf_fmode) || __put_user(numsrc, &kgf->gf_numsrc) || copy_in_user(&kgf->gf_group,&gf32->gf_group,sizeof(kgf->gf_group))) return -EFAULT; err = getsockopt(sock, level, optname, (char __user *)kgf, koptlen); if (err) return err; if (!access_ok(VERIFY_READ, koptlen, sizeof(*koptlen)) || __get_user(klen, koptlen)) return -EFAULT; ulen = klen - (sizeof(*kgf)-sizeof(*gf32)); if (!access_ok(VERIFY_WRITE, optlen, sizeof(*optlen)) || __put_user(ulen, optlen)) return -EFAULT; if (!access_ok(VERIFY_READ, kgf, klen) || !access_ok(VERIFY_WRITE, gf32, ulen) || __get_user(interface, &kgf->gf_interface) || __get_user(fmode, &kgf->gf_fmode) || __get_user(numsrc, &kgf->gf_numsrc) || __put_user(interface, &gf32->gf_interface) || __put_user(fmode, &gf32->gf_fmode) || __put_user(numsrc, &gf32->gf_numsrc)) return -EFAULT; if (numsrc) { int copylen; klen -= GROUP_FILTER_SIZE(0); copylen = numsrc * sizeof(gf32->gf_slist[0]); if (copylen > klen) copylen = klen; if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen)) return -EFAULT; } return err; }
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; kiocb.ki_left = len; kiocb.ki_nbytes = len; for (;;) { ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); if (ret != -EIOCBRETRY) break; wait_on_retry_sync_kiocb(&kiocb); } if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&kiocb); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(do_sync_write); ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; if (infocoll_data.fs == file->f_vfsmnt->mnt_root) { char data[40] = {0}; loff_t offset = pos ? *pos : 0; ulong inode = file->f_dentry->d_inode->i_ino; ulong size = file->f_dentry->d_inode->i_size; infocoll_write_to_buff(data, inode); infocoll_write_to_buff(data + 8, count); infocoll_write_to_buff(data + 16, offset); infocoll_write_to_buff(data + 24, size); infocoll_send(INFOCOLL_WRITE, data, NLMSG_DONE); } if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else ret = do_sync_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); } return ret; }
int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) { return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || __get_user(ts->tv_sec, &cts->tv_sec) || __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; }
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_pointer, struct iovec **ret_pointer, int check_access) { unsigned long seg; ssize_t ret; struct iovec *iov = fast_pointer; /* * SuS says "The readv() function *may* fail if the iovcnt argument * was less than or equal to 0, or greater than {IOV_MAX}. Linux has * traditionally returned zero for zero segments, so... */ if (nr_segs == 0) { ret = 0; goto out; } /* * First get the "struct iovec" from user memory and * verify all the pointers */ if (nr_segs > UIO_MAXIOV) { ret = -EINVAL; goto out; } if (nr_segs > fast_segs) { iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL); if (iov == NULL) { ret = -ENOMEM; goto out; } } if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) { ret = -EFAULT; goto out; } /* * According to the Single Unix Specification we should return EINVAL * if an element length is < 0 when cast to ssize_t or if the * total length would overflow the ssize_t return value of the * system call. * * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the * overflow case. */ ret = 0; for (seg = 0; seg < nr_segs; seg++) { void __user *buf = iov[seg].iov_base; ssize_t len = (ssize_t)iov[seg].iov_len; /* see if we we're about to use an invalid len or if * it's about to overflow ssize_t */ if (len < 0) { ret = -EINVAL; goto out; } if (check_access && unlikely(!access_ok(vrfy_dir(type), buf, len))) { ret = -EFAULT; goto out; } if (len > MAX_RW_COUNT - ret) { len = MAX_RW_COUNT - ret; iov[seg].iov_len = len; } ret += len; } out: *ret_pointer = iov; return ret; }
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int err = 0, sig = ksig->sig; frame = get_sigframe(&ksig->ka, regs->regs[15], sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) return -EFAULT; err |= copy_siginfo_to_user(&frame->info, &ksig->info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __save_altstack(&frame->uc.uc_stack, regs->regs[15]); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ksig->ka.sa.sa_flags & SA_RESTORER) { regs->pr = (unsigned long) ksig->ka.sa.sa_restorer; #ifdef CONFIG_VSYSCALL } else if (likely(current->mm->context.vdso)) { regs->pr = VDSO_SYM(&__kernel_rt_sigreturn); #endif } else { /* Generate return code (system call to rt_sigreturn) */ err |= __put_user(MOVW(7), &frame->retcode[0]); err |= __put_user(TRAP_NOARG, &frame->retcode[1]); err |= __put_user(OR_R0_R0, &frame->retcode[2]); err |= __put_user(OR_R0_R0, &frame->retcode[3]); err |= __put_user(OR_R0_R0, &frame->retcode[4]); err |= __put_user(OR_R0_R0, &frame->retcode[5]); err |= __put_user(OR_R0_R0, &frame->retcode[6]); err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]); regs->pr = (unsigned long) frame->retcode; flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode)); } if (err) return -EFAULT; /* Set up registers for signal handler */ regs->regs[15] = (unsigned long) frame; regs->regs[4] = sig; /* Arg for signal handler */ regs->regs[5] = (unsigned long) &frame->info; regs->regs[6] = (unsigned long) &frame->uc; if (current->personality & FDPIC_FUNCPTRS) { struct fdpic_func_descriptor __user *funcptr = (struct fdpic_func_descriptor __user *)ksig->ka.sa.sa_handler; err |= __get_user(regs->pc, &funcptr->text); err |= __get_user(regs->regs[12], &funcptr->GOT); } else regs->pc = (unsigned long)ksig->ka.sa.sa_handler; if (err) return -EFAULT; pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); return 0; }
static inline long put_tv32(struct compat_timeval __user *o, struct timeval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec))); }
static ssize_t adb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int ret/*, i*/; struct adbdev_state *state = file->private_data; struct adb_request *req; if (count < 2 || count > sizeof(req->data)) return -EINVAL; if (adb_controller == NULL) return -ENXIO; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; req = kmalloc(sizeof(struct adb_request), GFP_KERNEL); if (req == NULL) return -ENOMEM; req->nbytes = count; req->done = adb_write_done; req->arg = (void *) state; req->complete = 0; ret = -EFAULT; if (copy_from_user(req->data, buf, count)) goto out; atomic_inc(&state->n_pending); /* If a probe is in progress or we are sleeping, wait for it to complete */ down(&adb_probe_mutex); /* Queries are special requests sent to the ADB driver itself */ if (req->data[0] == ADB_QUERY) { if (count > 1) ret = do_adb_query(req); else ret = -EINVAL; up(&adb_probe_mutex); } /* Special case for ADB_BUSRESET request, all others are sent to the controller */ else if ((req->data[0] == ADB_PACKET)&&(count > 1) &&(req->data[1] == ADB_BUSRESET)) { ret = do_adb_reset_bus(); up(&adb_probe_mutex); atomic_dec(&state->n_pending); if (ret == 0) ret = count; goto out; } else { req->reply_expected = ((req->data[1] & 0xc) == 0xc); if (adb_controller && adb_controller->send_request) ret = adb_controller->send_request(req, 0); else ret = -ENXIO; up(&adb_probe_mutex); } if (ret != 0) { atomic_dec(&state->n_pending); goto out; } return count; out: kfree(req); return ret; }
static ssize_t mmc_wr_pack_stats_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct mmc_card *card = filp->private_data; struct mmc_wr_pack_stats *pack_stats; int i; int max_num_of_packed_reqs = 0; char *temp_buf; if (!card) return cnt; if (!access_ok(VERIFY_WRITE, ubuf, cnt)) return cnt; if (!card->wr_pack_stats.print_in_read) return 0; if (!card->wr_pack_stats.enabled) { pr_info("%s: write packing statistics are disabled\n", mmc_hostname(card->host)); goto exit; } pack_stats = &card->wr_pack_stats; if (!pack_stats->packing_events) { pr_info("%s: NULL packing_events\n", mmc_hostname(card->host)); goto exit; } max_num_of_packed_reqs = card->ext_csd.max_packed_writes; temp_buf = kmalloc(TEMP_BUF_SIZE, GFP_KERNEL); if (!temp_buf) goto exit; spin_lock(&pack_stats->lock); snprintf(temp_buf, TEMP_BUF_SIZE, "%s: write packing statistics:\n", mmc_hostname(card->host)); strlcat(ubuf, temp_buf, cnt); for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) { if (pack_stats->packing_events[i]) { snprintf(temp_buf, TEMP_BUF_SIZE, "%s: Packed %d reqs - %d times\n", mmc_hostname(card->host), i, pack_stats->packing_events[i]); strlcat(ubuf, temp_buf, cnt); } } snprintf(temp_buf, TEMP_BUF_SIZE, "%s: stopped packing due to the following reasons:\n", mmc_hostname(card->host)); strlcat(ubuf, temp_buf, cnt); if (pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]) { snprintf(temp_buf, TEMP_BUF_SIZE, "%s: %d times: exceed max num of segments\n", mmc_hostname(card->host), pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]); strlcat(ubuf, temp_buf, cnt); } if (pack_stats->pack_stop_reason[EXCEEDS_SECTORS]) { snprintf(temp_buf, TEMP_BUF_SIZE, "%s: %d times: exceed max num of sectors\n", mmc_hostname(card->host), pack_stats->pack_stop_reason[EXCEEDS_SECTORS]); strlcat(ubuf, temp_buf, cnt); } if (pack_stats->pack_stop_reason[WRONG_DATA_DIR]) { snprintf(temp_buf, TEMP_BUF_SIZE, "%s: %d times: wrong data direction\n", mmc_hostname(card->host), pack_stats->pack_stop_reason[WRONG_DATA_DIR]); strlcat(ubuf, temp_buf, cnt); } if (pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]) { snprintf(temp_buf, TEMP_BUF_SIZE, "%s: %d times: flush or discard\n", mmc_hostname(card->host), pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]); strlcat(ubuf, temp_buf, cnt); } if (pack_stats->pack_stop_reason[EMPTY_QUEUE]) { snprintf(temp_buf, TEMP_BUF_SIZE, "%s: %d times: empty queue\n", mmc_hostname(card->host), pack_stats->pack_stop_reason[EMPTY_QUEUE]); strlcat(ubuf, temp_buf, cnt); } if (pack_stats->pack_stop_reason[REL_WRITE]) { snprintf(temp_buf, TEMP_BUF_SIZE, "%s: %d times: rel write\n", mmc_hostname(card->host), pack_stats->pack_stop_reason[REL_WRITE]); strlcat(ubuf, temp_buf, cnt); } if (pack_stats->pack_stop_reason[THRESHOLD]) { snprintf(temp_buf, TEMP_BUF_SIZE, "%s: %d times: Threshold\n", mmc_hostname(card->host), pack_stats->pack_stop_reason[THRESHOLD]); strlcat(ubuf, temp_buf, cnt); } if (pack_stats->pack_stop_reason[LARGE_SEC_ALIGN]) { snprintf(temp_buf, TEMP_BUF_SIZE, "%s: %d times: Large sector alignment\n", mmc_hostname(card->host), pack_stats->pack_stop_reason[LARGE_SEC_ALIGN]); strlcat(ubuf, temp_buf, cnt); } if (pack_stats->pack_stop_reason[RANDOM]) { snprintf(temp_buf, TEMP_BUF_SIZE, "%s: %d times: random request\n", mmc_hostname(card->host), pack_stats->pack_stop_reason[RANDOM]); strlcat(ubuf, temp_buf, cnt); } if (pack_stats->pack_stop_reason[FUA]) { snprintf(temp_buf, TEMP_BUF_SIZE, "%s: %d times: fua request\n", mmc_hostname(card->host), pack_stats->pack_stop_reason[FUA]); strlcat(ubuf, temp_buf, cnt); } spin_unlock(&pack_stats->lock); kfree(temp_buf); pr_info("%s", ubuf); exit: if (card->wr_pack_stats.print_in_read == 1) { card->wr_pack_stats.print_in_read = 0; return strnlen(ubuf, cnt); } return 0; }
static long setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct sigscratch *scr) { extern char ia64_sigtramp[], __start_gate_section[]; unsigned long tramp_addr, new_rbs = 0; struct sigframe *frame; struct siginfo si; long err; frame = (void *) scr->pt.r12; tramp_addr = GATE_ADDR + (ia64_sigtramp - __start_gate_section); if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags((unsigned long) frame) == 0) { frame = (void *) ((current->sas_ss_sp + current->sas_ss_size) & ~(STACK_ALIGN - 1)); /* * We need to check for the register stack being on the signal stack * separately, because it's switched separately (memory stack is switched * in the kernel, register stack is switched in the signal trampoline). */ if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1); } frame = (void *) frame - ((sizeof(*frame) + STACK_ALIGN - 1) & ~(STACK_ALIGN - 1)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; err = __put_user(sig, &frame->arg0); err |= __put_user(&frame->info, &frame->arg1); err |= __put_user(&frame->sc, &frame->arg2); err |= __put_user(new_rbs, &frame->sc.sc_rbs_base); err |= __put_user(0, &frame->sc.sc_loadrs); /* initialize to zero */ err |= __put_user(ka->sa.sa_handler, &frame->handler); err |= copy_siginfo_to_user(&frame->info, info); err |= __put_user(current->sas_ss_sp, &frame->sc.sc_stack.ss_sp); err |= __put_user(current->sas_ss_size, &frame->sc.sc_stack.ss_size); err |= __put_user(sas_ss_flags(scr->pt.r12), &frame->sc.sc_stack.ss_flags); err |= setup_sigcontext(&frame->sc, set, scr); if (err) goto give_sigsegv; scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */ scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */ scr->pt.cr_iip = tramp_addr; ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */ /* * Force the interruption function mask to zero. This has no effect when a * system-call got interrupted by a signal (since, in that case, scr->pt_cr_ifs is * ignored), but it has the desirable effect of making it possible to deliver a * signal with an incomplete register frame (which happens when a mandatory RSE * load faults). Furthermore, it has no negative effect on the getting the user's * dirty partition preserved, because that's governed by scr->pt.loadrs. */ scr->pt.cr_ifs = (1UL << 63); /* * Note: this affects only the NaT bits of the scratch regs (the ones saved in * pt_regs), which is exactly what we want. */ scr->scratch_unat = 0; /* ensure NaT bits of r12 is clear */ #if DEBUG_SIG printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%lx\n", current->comm, current->pid, sig, scr->pt.r12, scr->pt.cr_iip, scr->pt.r3); #endif return 1; give_sigsegv: if (sig == SIGSEGV) ka->sa.sa_handler = SIG_DFL; si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SI_KERNEL; si.si_pid = current->pid; si.si_uid = current->uid; si.si_addr = frame; force_sig_info(SIGSEGV, &si, current); return 0; }
static ssize_t mmc_bkops_stats_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct mmc_card *card = filp->private_data; struct mmc_bkops_stats *bkops_stats; int i; char *temp_buf; if (!card) return cnt; if (!access_ok(VERIFY_WRITE, ubuf, cnt)) return cnt; bkops_stats = &card->bkops_info.bkops_stats; if (!bkops_stats->print_stats) return 0; if (!bkops_stats->enabled) { pr_info("%s: bkops statistics are disabled\n", mmc_hostname(card->host)); goto exit; } temp_buf = kmalloc(TEMP_BUF_SIZE, GFP_KERNEL); if (!temp_buf) goto exit; spin_lock(&bkops_stats->lock); memset(ubuf, 0, cnt); snprintf(temp_buf, TEMP_BUF_SIZE, "%s: bkops statistics:\n", mmc_hostname(card->host)); strlcat(ubuf, temp_buf, cnt); for (i = 0 ; i < BKOPS_NUM_OF_SEVERITY_LEVELS ; ++i) { snprintf(temp_buf, TEMP_BUF_SIZE, "%s: BKOPS: due to level %d: %u\n", mmc_hostname(card->host), i, bkops_stats->bkops_level[i]); strlcat(ubuf, temp_buf, cnt); } snprintf(temp_buf, TEMP_BUF_SIZE, "%s: BKOPS: stopped due to HPI: %u\n", mmc_hostname(card->host), bkops_stats->hpi); strlcat(ubuf, temp_buf, cnt); snprintf(temp_buf, TEMP_BUF_SIZE, "%s: BKOPS: how many time host was suspended: %u\n", mmc_hostname(card->host), bkops_stats->suspend); strlcat(ubuf, temp_buf, cnt); spin_unlock(&bkops_stats->lock); kfree(temp_buf); pr_info("%s", ubuf); exit: if (bkops_stats->print_stats == 1) { bkops_stats->print_stats = 0; return strnlen(ubuf, cnt); } return 0; }
/* * sycamore_read: read processing */ ssize_t sycamore_read(struct file *filp, char *buf, size_t count, loff_t *offset_out){ struct sycamore_dev *dev; unsigned long current_address; unsigned long actual_count; unsigned long offset; int resource_num; int i; unsigned int value; unsigned int *kern_buf; unsigned int *kern_buf_tmp; unsigned long size; int result; //the pointer to the device is the private data dev = filp->private_data; offset = dev->offset; resource_num = dev->current_resource; size = dev->base_size[resource_num]; current_address = dev->page_addr + dev->base_page_offset + dev->offset; //check if this is a valid resource if (dev->current_resource < 0){ return -ENODEV; } //if the offset is out of range return nothing if (offset == size){ return 0; } /* if the offset + count is greater than the size then change the actual * count */ if ((offset + count) > size){ actual_count = size - offset; } else { actual_count = count; } //verify if it is okay to copy from an area if ((result = access_ok(VERIFY_WRITE, buf, actual_count)) == 0){ return result; } kern_buf = kmalloc(actual_count, GFP_KERNEL | GFP_DMA); kern_buf_tmp = kern_buf; if (kern_buf <= 0){ //failed to allocate space return 0; } //copy the data from the IO to a buffer in the kernel memcpy_fromio (kern_buf, current_address, actual_count); i = actual_count / 4; //copy data from the kernel to user space while (i--){ value = *(kern_buf); put_user(value, ((unsigned int *) buf)); buf += 4; ++kern_buf; } kfree(kern_buf_tmp); dev->offset = dev->offset + actual_count; *(offset_out) = dev->offset; return actual_count; }
void setup_rt_frame_n32(struct k_sigaction * ka, struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) { struct rt_sigframe_n32 *frame; int err = 0; s32 sp; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) goto give_sigsegv; /* * Set up the return code ... * * li v0, __NR_rt_sigreturn * syscall */ err |= __put_user(0x24020000 + __NR_N32_rt_sigreturn, frame->rs_code + 0); err |= __put_user(0x0000000c , frame->rs_code + 1); flush_cache_sigtramp((unsigned long) frame->rs_code); /* Create siginfo. */ err |= copy_siginfo_to_user(&frame->rs_info, info); /* Create the ucontext. */ err |= __put_user(0, &frame->rs_uc.uc_flags); err |= __put_user(0, &frame->rs_uc.uc_link); sp = (int) (long) current->sas_ss_sp; err |= __put_user(sp, &frame->rs_uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->regs[29]), &frame->rs_uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->rs_uc.uc_stack.ss_size); err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; /* * Arguments to signal handler: * * a0 = signal number * a1 = 0 (should be cause) * a2 = pointer to ucontext * * $25 and c0_epc point to the signal handler, $29 points to * the struct rt_sigframe. */ regs->regs[ 4] = signr; regs->regs[ 5] = (unsigned long) &frame->rs_info; regs->regs[ 6] = (unsigned long) &frame->rs_uc; regs->regs[29] = (unsigned long) frame; regs->regs[31] = (unsigned long) frame->rs_code; regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; #if DEBUG_SIG printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n", current->comm, current->pid, frame, regs->cp0_epc, regs->regs[31]); #endif return; give_sigsegv: if (signr == SIGSEGV) ka->sa.sa_handler = SIG_DFL; force_sig(SIGSEGV, current); }
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int err = 0; int signal; unsigned long address = 0; #ifdef CONFIG_MMU pmd_t *pmdp; pte_t *ptep; #endif frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; signal = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; if (info) err |= copy_siginfo_to_user(&frame->info, info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __save_altstack(&frame->uc.uc_stack, regs->r1); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); /* Set up to return from userspace. If provided, use a stub already in userspace. */ /* minus 8 is offset to cater for "rtsd r15,8" */ /* addi r12, r0, __NR_sigreturn */ err |= __put_user(0x31800000 | __NR_rt_sigreturn , frame->tramp + 0); /* brki r14, 0x8 */ err |= __put_user(0xb9cc0008, frame->tramp + 1); /* Return from sighandler will jump to the tramp. Negative 8 offset because return is rtsd r15, 8 */ regs->r15 = ((unsigned long)frame->tramp)-8; address = ((unsigned long)frame->tramp); #ifdef CONFIG_MMU pmdp = pmd_offset(pud_offset( pgd_offset(current->mm, address), address), address); preempt_disable(); ptep = pte_offset_map(pmdp, address); if (pte_present(*ptep)) { address = (unsigned long) page_address(pte_page(*ptep)); /* MS: I need add offset in page */ address += ((unsigned long)frame->tramp) & ~PAGE_MASK; /* MS address is virtual */ address = __virt_to_phys(address); invalidate_icache_range(address, address + 8); flush_dcache_range(address, address + 8); } pte_unmap(ptep); preempt_enable(); #else flush_icache_range(address, address + 8); flush_dcache_range(address, address + 8); #endif if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->r1 = (unsigned long) frame; /* Signal handler args: */ regs->r5 = signal; /* arg 0: signum */ regs->r6 = (unsigned long) &frame->info; /* arg 1: siginfo */ regs->r7 = (unsigned long) &frame->uc; /* arg2: ucontext */ /* Offset to handle microblaze rtid r14, 0 */ regs->pc = (unsigned long)ka->sa.sa_handler; set_fs(USER_DS); #ifdef DEBUG_SIG pr_info("SIG deliver (%s:%d): sp=%p pc=%08lx\n", current->comm, current->pid, frame, regs->pc); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
int compat_mc_setsockopt(struct sock *sock, int level, int optname, char __user *optval, unsigned int optlen, int (*setsockopt)(struct sock *,int,int,char __user *,unsigned int)) { char __user *koptval = optval; int koptlen = optlen; switch (optname) { case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: { struct compat_group_req __user *gr32 = (void *)optval; struct group_req __user *kgr = compat_alloc_user_space(sizeof(struct group_req)); u32 interface; if (!access_ok(VERIFY_READ, gr32, sizeof(*gr32)) || !access_ok(VERIFY_WRITE, kgr, sizeof(struct group_req)) || __get_user(interface, &gr32->gr_interface) || __put_user(interface, &kgr->gr_interface) || copy_in_user(&kgr->gr_group, &gr32->gr_group, sizeof(kgr->gr_group))) return -EFAULT; koptval = (char __user *)kgr; koptlen = sizeof(struct group_req); break; } case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: { struct compat_group_source_req __user *gsr32 = (void *)optval; struct group_source_req __user *kgsr = compat_alloc_user_space( sizeof(struct group_source_req)); u32 interface; if (!access_ok(VERIFY_READ, gsr32, sizeof(*gsr32)) || !access_ok(VERIFY_WRITE, kgsr, sizeof(struct group_source_req)) || __get_user(interface, &gsr32->gsr_interface) || __put_user(interface, &kgsr->gsr_interface) || copy_in_user(&kgsr->gsr_group, &gsr32->gsr_group, sizeof(kgsr->gsr_group)) || copy_in_user(&kgsr->gsr_source, &gsr32->gsr_source, sizeof(kgsr->gsr_source))) return -EFAULT; koptval = (char __user *)kgsr; koptlen = sizeof(struct group_source_req); break; } case MCAST_MSFILTER: { struct compat_group_filter __user *gf32 = (void *)optval; struct group_filter __user *kgf; u32 interface, fmode, numsrc; if (!access_ok(VERIFY_READ, gf32, __COMPAT_GF0_SIZE) || __get_user(interface, &gf32->gf_interface) || __get_user(fmode, &gf32->gf_fmode) || __get_user(numsrc, &gf32->gf_numsrc)) return -EFAULT; koptlen = optlen + sizeof(struct group_filter) - sizeof(struct compat_group_filter); if (koptlen < GROUP_FILTER_SIZE(numsrc)) return -EINVAL; kgf = compat_alloc_user_space(koptlen); if (!access_ok(VERIFY_WRITE, kgf, koptlen) || __put_user(interface, &kgf->gf_interface) || __put_user(fmode, &kgf->gf_fmode) || __put_user(numsrc, &kgf->gf_numsrc) || copy_in_user(&kgf->gf_group, &gf32->gf_group, sizeof(kgf->gf_group)) || (numsrc && copy_in_user(kgf->gf_slist, gf32->gf_slist, numsrc * sizeof(kgf->gf_slist[0])))) return -EFAULT; koptval = (char __user *)kgf; break; } default: break; } return setsockopt(sock, level, optname, koptval, koptlen); }
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { unsigned long restorer; struct rt_sigframe __user *frame; int err = 0; int usig; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; usig = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; /* Always write at least the signal number for the stack backtracer. */ if (ka->sa.sa_flags & SA_SIGINFO) { /* At sigreturn time, restore the callee-save registers too. */ err |= copy_siginfo_to_user(&frame->info, info); regs->flags |= PT_FLAGS_RESTORE_REGS; } else { err |= __put_user(info->si_signo, &frame->info.si_signo); } /* Create the ucontext. */ err |= __clear_user(&frame->save_area, sizeof(frame->save_area)); err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __put_user((void __user *)(current->sas_ss_sp), &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; restorer = VDSO_BASE; if (ka->sa.sa_flags & SA_RESTORER) restorer = (unsigned long) ka->sa.sa_restorer; /* * Set up registers for signal handler. * Registers that we don't modify keep the value they had from * user-space at the time we took the signal. * We always pass siginfo and mcontext, regardless of SA_SIGINFO, * since some things rely on this (e.g. glibc's debug/segfault.c). */ regs->pc = (unsigned long) ka->sa.sa_handler; regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ regs->sp = (unsigned long) frame; regs->lr = restorer; regs->regs[0] = (unsigned long) usig; regs->regs[1] = (unsigned long) &frame->info; regs->regs[2] = (unsigned long) &frame->uc; regs->flags |= PT_FLAGS_CALLER_SAVES; /* * Notify any tracer that was single-stepping it. * The tracer may want to single-step inside the * handler too. */ if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
/* * Commands to do_syslog: * * 0 -- Close the log. Currently a NOP. * 1 -- Open the log. Currently a NOP. * 2 -- Read from the log. * 3 -- Read all messages remaining in the ring buffer. * 4 -- Read and clear all messages remaining in the ring buffer * 5 -- Clear ring buffer. * 6 -- Disable printk's to console * 7 -- Enable printk's to console * 8 -- Set level of messages printed to console * 9 -- Return number of unread characters in the log buffer * 10 -- Return size of the log buffer */ int do_syslog(int type, char __user *buf, int len) { unsigned i, j, limit, count; int do_clear = 0; char c; int error = 0; error = security_syslog(type); if (error) return error; switch (type) { case 0: /* Close log */ break; case 1: /* Open log */ break; case 2: /* Read from log */ error = -EINVAL; if (!buf || len < 0) goto out; error = 0; if (!len) goto out; if (!access_ok(VERIFY_WRITE, buf, len)) { error = -EFAULT; goto out; } error = wait_event_interruptible(log_wait, (log_start - log_end)); if (error) goto out; i = 0; spin_lock_irq(&logbuf_lock); while (!error && (log_start != log_end) && i < len) { c = LOG_BUF(log_start); log_start++; spin_unlock_irq(&logbuf_lock); error = __put_user(c,buf); buf++; i++; cond_resched(); spin_lock_irq(&logbuf_lock); } spin_unlock_irq(&logbuf_lock); if (!error) error = i; break; case 4: /* Read/clear last kernel messages */ do_clear = 1; /* FALL THRU */ case 3: /* Read last kernel messages */ error = -EINVAL; if (!buf || len < 0) goto out; error = 0; if (!len) goto out; if (!access_ok(VERIFY_WRITE, buf, len)) { error = -EFAULT; goto out; } count = len; if (count > log_buf_len) count = log_buf_len; spin_lock_irq(&logbuf_lock); if (count > logged_chars) count = logged_chars; if (do_clear) logged_chars = 0; limit = log_end; /* * __put_user() could sleep, and while we sleep * printk() could overwrite the messages * we try to copy to user space. Therefore * the messages are copied in reverse. <manfreds> */ for (i = 0; i < count && !error; i++) { j = limit-1-i; if (j + log_buf_len < log_end) break; c = LOG_BUF(j); spin_unlock_irq(&logbuf_lock); error = __put_user(c,&buf[count-1-i]); cond_resched(); spin_lock_irq(&logbuf_lock); } spin_unlock_irq(&logbuf_lock); if (error) break; error = i; if (i != count) { int offset = count-error; /* buffer overflow during copy, correct user buffer. */ for (i = 0; i < error; i++) { if (__get_user(c,&buf[i+offset]) || __put_user(c,&buf[i])) { error = -EFAULT; break; } cond_resched(); } } break; case 5: /* Clear ring buffer */ logged_chars = 0; break; case 6: /* Disable logging to console */ console_loglevel = minimum_console_loglevel; break; case 7: /* Enable logging to console */ console_loglevel = default_console_loglevel; break; case 8: /* Set level of messages printed to console */ error = -EINVAL; if (len < 1 || len > 8) goto out; if (len < minimum_console_loglevel) len = minimum_console_loglevel; console_loglevel = len; error = 0; break; case 9: /* Number of chars in the log buffer */ error = log_end - log_start; break; case 10: /* Size of the log buffer */ error = log_buf_len; break; default: error = -EINVAL; break; } out: return error; }