static int qlcnic_set_led(struct net_device *dev, enum ethtool_phys_id_state state) { struct qlcnic_adapter *adapter = netdev_priv(dev); int max_sds_rings = adapter->max_sds_rings; int err = -EIO, active = 1; if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { netdev_warn(dev, "LED test not supported for non " "privilege function\n"); return -EOPNOTSUPP; } switch (state) { case ETHTOOL_ID_ACTIVE: if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) return -EBUSY; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) break; if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) break; set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state); } if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0) { err = 0; break; } dev_err(&adapter->pdev->dev, "Failed to set LED blink state.\n"); break; case ETHTOOL_ID_INACTIVE: active = 0; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) break; if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) break; set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state); } if (adapter->nic_ops->config_led(adapter, 0, 0xf)) dev_err(&adapter->pdev->dev, "Failed to reset LED blink state.\n"); break; default: return -EINVAL; } if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) qlcnic_diag_free_res(dev, max_sds_rings); if (!active || err) clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); return err; }
void __destroy_context(unsigned long ctx) { clear_bit(ctx, context_map); }
static void enable_lguest_irq(unsigned int irq) { clear_bit(irq, lguest_data.blocked_interrupts); }
/*************************************** * bt_close - close handle to the device ***************************************/ static int bt_close(struct hci_dev *hdev) { PRIN_LOG("HCI PAL: bt_close - enter\n"); clear_bit(HCI_RUNNING, &hdev->flags); return 0; }
static int start_usb_playback(struct ua101 *ua) { unsigned int i, frames; struct urb *urb; int err = 0; if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (test_bit(USB_PLAYBACK_RUNNING, &ua->states)) return 0; kill_stream_urbs(&ua->playback); tasklet_kill(&ua->playback_tasklet); err = enable_iso_interface(ua, INTF_PLAYBACK); if (err < 0) return err; clear_bit(PLAYBACK_URB_COMPLETED, &ua->states); ua->playback.urbs[0]->urb.complete = first_playback_urb_complete; spin_lock_irq(&ua->lock); INIT_LIST_HEAD(&ua->ready_playback_urbs); spin_unlock_irq(&ua->lock); /* * We submit the initial URBs all at once, so we have to wait for the * packet size FIFO to be full. */ wait_event(ua->rate_feedback_wait, ua->rate_feedback_count >= ua->playback.queue_length || !test_bit(USB_CAPTURE_RUNNING, &ua->states) || test_bit(DISCONNECTED, &ua->states)); if (test_bit(DISCONNECTED, &ua->states)) { stop_usb_playback(ua); return -ENODEV; } if (!test_bit(USB_CAPTURE_RUNNING, &ua->states)) { stop_usb_playback(ua); return -EIO; } for (i = 0; i < ua->playback.queue_length; ++i) { /* all initial URBs contain silence */ spin_lock_irq(&ua->lock); frames = ua->rate_feedback[ua->rate_feedback_start]; add_with_wraparound(ua, &ua->rate_feedback_start, 1); ua->rate_feedback_count--; spin_unlock_irq(&ua->lock); urb = &ua->playback.urbs[i]->urb; urb->iso_frame_desc[0].length = frames * ua->playback.frame_bytes; memset(urb->transfer_buffer, 0, urb->iso_frame_desc[0].length); } set_bit(USB_PLAYBACK_RUNNING, &ua->states); err = submit_stream_urbs(ua, &ua->playback); if (err < 0) stop_usb_playback(ua); return err; }
/** * Return the data stream that will be used */ int select_data_stream(FF_PKT *ff_pkt, bool compatible) { int stream; /* This is a plugin special restore object */ if (ff_pkt->type == FT_RESTORE_FIRST) { clear_all_bits(FO_MAX, ff_pkt->flags); return STREAM_FILE_DATA; } /* * Fix all incompatible options */ /** * No sparse option for encrypted data */ if (bit_is_set(FO_ENCRYPT, ff_pkt->flags)) { clear_bit(FO_SPARSE, ff_pkt->flags); } /* * Note, no sparse option for win32_data */ if (!is_portable_backup(&ff_pkt->bfd)) { stream = STREAM_WIN32_DATA; clear_bit(FO_SPARSE, ff_pkt->flags); } else if (bit_is_set(FO_SPARSE, ff_pkt->flags)) { stream = STREAM_SPARSE_DATA; } else { stream = STREAM_FILE_DATA; } if (bit_is_set(FO_OFFSETS, ff_pkt->flags)) { stream = STREAM_SPARSE_DATA; } /* * Encryption is only supported for file data */ if (stream != STREAM_FILE_DATA && stream != STREAM_WIN32_DATA && stream != STREAM_MACOS_FORK_DATA) { clear_bit(FO_ENCRYPT, ff_pkt->flags); } /* * Compression is not supported for Mac fork data */ if (stream == STREAM_MACOS_FORK_DATA) { clear_bit(FO_COMPRESS, ff_pkt->flags); } /* * Handle compression and encryption options */ if (bit_is_set(FO_COMPRESS, ff_pkt->flags)) { if (compatible && ff_pkt->Compress_algo == COMPRESS_GZIP) { switch (stream) { case STREAM_WIN32_DATA: stream = STREAM_WIN32_GZIP_DATA; break; case STREAM_SPARSE_DATA: stream = STREAM_SPARSE_GZIP_DATA; break; case STREAM_FILE_DATA: stream = STREAM_GZIP_DATA; break; default: /** * All stream types that do not support compression should clear out * FO_COMPRESS above, and this code block should be unreachable. */ ASSERT(!bit_is_set(FO_COMPRESS, ff_pkt->flags)); return STREAM_NONE; } } else { switch (stream) { case STREAM_WIN32_DATA: stream = STREAM_WIN32_COMPRESSED_DATA; break; case STREAM_SPARSE_DATA: stream = STREAM_SPARSE_COMPRESSED_DATA; break; case STREAM_FILE_DATA: stream = STREAM_COMPRESSED_DATA; break; default: /* * All stream types that do not support compression should clear out * FO_COMPRESS above, and this code block should be unreachable. */ ASSERT(!bit_is_set(FO_COMPRESS, ff_pkt->flags)); return STREAM_NONE; } } } #ifdef HAVE_CRYPTO if (bit_is_set(FO_ENCRYPT, ff_pkt->flags)) { switch (stream) { case STREAM_WIN32_DATA: stream = STREAM_ENCRYPTED_WIN32_DATA; break; case STREAM_WIN32_GZIP_DATA: stream = STREAM_ENCRYPTED_WIN32_GZIP_DATA; break; case STREAM_WIN32_COMPRESSED_DATA: stream = STREAM_ENCRYPTED_WIN32_COMPRESSED_DATA; break; case STREAM_FILE_DATA: stream = STREAM_ENCRYPTED_FILE_DATA; break; case STREAM_GZIP_DATA: stream = STREAM_ENCRYPTED_FILE_GZIP_DATA; break; case STREAM_COMPRESSED_DATA: stream = STREAM_ENCRYPTED_FILE_COMPRESSED_DATA; break; default: /* * All stream types that do not support encryption should clear out * FO_ENCRYPT above, and this code block should be unreachable. */ ASSERT(!bit_is_set(FO_ENCRYPT, ff_pkt->flags)); return STREAM_NONE; } } #endif return stream; }
static int startup(struct async_struct *info) { unsigned long flags; int retval=0; irqreturn_t (*handler)(int, void *, struct pt_regs *); struct serial_state *state= info->state; unsigned long page; page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; local_irq_save(flags); if (info->flags & ASYNC_INITIALIZED) { free_page(page); goto errout; } if (!state->port || !state->type) { if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); free_page(page); goto errout; } if (info->xmit.buf) free_page(page); else info->xmit.buf = (unsigned char *) page; #ifdef SIMSERIAL_DEBUG printk("startup: ttys%d (irq %d)...", info->line, state->irq); #endif /* * Allocate the IRQ if necessary */ if (state->irq && (!IRQ_ports[state->irq] || !IRQ_ports[state->irq]->next_port)) { if (IRQ_ports[state->irq]) { retval = -EBUSY; goto errout; } else handler = rs_interrupt_single; retval = request_irq(state->irq, handler, IRQ_T(info), "simserial", NULL); if (retval) { if (capable(CAP_SYS_ADMIN)) { if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); retval = 0; } goto errout; } } /* * Insert serial port into IRQ chain. */ info->prev_port = 0; info->next_port = IRQ_ports[state->irq]; if (info->next_port) info->next_port->prev_port = info; IRQ_ports[state->irq] = info; if (info->tty) clear_bit(TTY_IO_ERROR, &info->tty->flags); info->xmit.head = info->xmit.tail = 0; #if 0 /* * Set up serial timers... */ timer_table[RS_TIMER].expires = jiffies + 2*HZ/100; timer_active |= 1 << RS_TIMER; #endif /* * Set up the tty->alt_speed kludge */ if (info->tty) { if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) info->tty->alt_speed = 57600; if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) info->tty->alt_speed = 115200; if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) info->tty->alt_speed = 230400; if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) info->tty->alt_speed = 460800; } info->flags |= ASYNC_INITIALIZED; local_irq_restore(flags); return 0; errout: local_irq_restore(flags); return retval; }
/* * run through the list of inodes in the FS that need * defragging */ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) { struct inode_defrag *defrag; struct btrfs_root *inode_root; struct inode *inode; struct rb_node *n; struct btrfs_key key; struct btrfs_ioctl_defrag_range_args range; u64 first_ino = 0; u64 root_objectid = 0; int num_defrag; int defrag_batch = 1024; memset(&range, 0, sizeof(range)); range.len = (u64)-1; atomic_inc(&fs_info->defrag_running); spin_lock(&fs_info->defrag_inodes_lock); while(1) { n = NULL; /* find an inode to defrag */ defrag = btrfs_find_defrag_inode(fs_info, root_objectid, first_ino, &n); if (!defrag) { if (n) { defrag = rb_entry(n, struct inode_defrag, rb_node); } else if (root_objectid || first_ino) { root_objectid = 0; first_ino = 0; continue; } else { break; } } /* remove it from the rbtree */ first_ino = defrag->ino + 1; root_objectid = defrag->root; rb_erase(&defrag->rb_node, &fs_info->defrag_inodes); if (btrfs_fs_closing(fs_info)) goto next_free; spin_unlock(&fs_info->defrag_inodes_lock); /* get the inode */ key.objectid = defrag->root; btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); key.offset = (u64)-1; inode_root = btrfs_read_fs_root_no_name(fs_info, &key); if (IS_ERR(inode_root)) goto next; key.objectid = defrag->ino; btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); key.offset = 0; inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); if (IS_ERR(inode)) goto next; /* do a chunk of defrag */ clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); range.start = defrag->last_offset; num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid, defrag_batch); /* * if we filled the whole defrag batch, there * must be more work to do. Queue this defrag * again */ if (num_defrag == defrag_batch) { defrag->last_offset = range.start; __btrfs_add_inode_defrag(inode, defrag); /* * we don't want to kfree defrag, we added it back to * the rbtree */ defrag = NULL; } else if (defrag->last_offset && !defrag->cycled) { /* * we didn't fill our defrag batch, but * we didn't start at zero. Make sure we loop * around to the start of the file. */ defrag->last_offset = 0; defrag->cycled = 1; __btrfs_add_inode_defrag(inode, defrag); defrag = NULL; } iput(inode); next: spin_lock(&fs_info->defrag_inodes_lock); next_free: kfree(defrag); }
static int clamp_thread(void *arg) { int cpunr = (unsigned long)arg; DEFINE_TIMER(wakeup_timer, noop_timer, 0, 0); static const struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; unsigned int count = 0; unsigned int target_ratio; set_bit(cpunr, cpu_clamping_mask); set_freezable(); init_timer_on_stack(&wakeup_timer); sched_setscheduler(current, SCHED_FIFO, ¶m); while (true == clamping && !kthread_should_stop() && cpu_online(cpunr)) { int sleeptime; unsigned long target_jiffies; unsigned int guard; unsigned int compensation = 0; int interval; /* jiffies to sleep for each attempt */ unsigned int duration_jiffies = msecs_to_jiffies(duration); unsigned int window_size_now; try_to_freeze(); /* * make sure user selected ratio does not take effect until * the next round. adjust target_ratio if user has changed * target such that we can converge quickly. */ target_ratio = set_target_ratio; guard = 1 + target_ratio/20; window_size_now = window_size; count++; /* * systems may have different ability to enter package level * c-states, thus we need to compensate the injected idle ratio * to achieve the actual target reported by the HW. */ compensation = get_compensation(target_ratio); interval = duration_jiffies*100/(target_ratio+compensation); /* align idle time */ target_jiffies = roundup(jiffies, interval); sleeptime = target_jiffies - jiffies; if (sleeptime <= 0) sleeptime = 1; schedule_timeout_interruptible(sleeptime); /* * only elected controlling cpu can collect stats and update * control parameters. */ if (cpunr == control_cpu && !(count%window_size_now)) { should_skip = powerclamp_adjust_controls(target_ratio, guard, window_size_now); smp_mb(); } if (should_skip) continue; target_jiffies = jiffies + duration_jiffies; mod_timer(&wakeup_timer, target_jiffies); if (unlikely(local_softirq_pending())) continue; /* * stop tick sched during idle time, interrupts are still * allowed. thus jiffies are updated properly. */ preempt_disable(); tick_nohz_idle_enter(); /* mwait until target jiffies is reached */ while (time_before(jiffies, target_jiffies)) { unsigned long ecx = 1; unsigned long eax = target_mwait; /* * REVISIT: may call enter_idle() to notify drivers who * can save power during cpu idle. same for exit_idle() */ local_touch_nmi(); stop_critical_timings(); mwait_idle_with_hints(eax, ecx); start_critical_timings(); atomic_inc(&idle_wakeup_counter); } tick_nohz_idle_exit(); preempt_enable_no_resched(); } del_timer_sync(&wakeup_timer); clear_bit(cpunr, cpu_clamping_mask); return 0; } /* * 1 HZ polling while clamping is active, useful for userspace * to monitor actual idle ratio. */ static void poll_pkg_cstate(struct work_struct *dummy); static DECLARE_DELAYED_WORK(poll_pkg_cstate_work, poll_pkg_cstate); static void poll_pkg_cstate(struct work_struct *dummy) { static u64 msr_last; static u64 tsc_last; static unsigned long jiffies_last; u64 msr_now; unsigned long jiffies_now; u64 tsc_now; u64 val64; msr_now = pkg_state_counter(); rdtscll(tsc_now); jiffies_now = jiffies; /* calculate pkg cstate vs tsc ratio */ if (!msr_last || !tsc_last) pkg_cstate_ratio_cur = 1; else { if (tsc_now - tsc_last) { val64 = 100 * (msr_now - msr_last); do_div(val64, (tsc_now - tsc_last)); pkg_cstate_ratio_cur = val64; } } /* update record */ msr_last = msr_now; jiffies_last = jiffies_now; tsc_last = tsc_now; if (true == clamping) schedule_delayed_work(&poll_pkg_cstate_work, HZ); } static int start_power_clamp(void) { unsigned long cpu; struct task_struct *thread; /* check if pkg cstate counter is completely 0, abort in this case */ if (!has_pkg_state_counter()) { pr_err("pkg cstate counter not functional, abort\n"); return -EINVAL; } set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1); /* prevent cpu hotplug */ get_online_cpus(); /* prefer BSP */ control_cpu = 0; if (!cpu_online(control_cpu)) control_cpu = smp_processor_id(); clamping = true; schedule_delayed_work(&poll_pkg_cstate_work, 0); /* start one thread per online cpu */ for_each_online_cpu(cpu) { struct task_struct **p = per_cpu_ptr(powerclamp_thread, cpu); thread = kthread_create_on_node(clamp_thread, (void *) cpu, cpu_to_node(cpu), "kidle_inject/%ld", cpu); /* bind to cpu here */ if (likely(!IS_ERR(thread))) { kthread_bind(thread, cpu); wake_up_process(thread); *p = thread; } } put_online_cpus(); return 0; }
int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) { int cmd_idx; int ret; BUG_ON(cmd->flags & CMD_ASYNC); /* A synchronous command can not have a callback set. */ BUG_ON(cmd->callback); IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", get_cmd_string(cmd->id)); mutex_lock(&priv->sync_cmd_mutex); set_bit(STATUS_HCMD_ACTIVE, &priv->status); IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", get_cmd_string(cmd->id)); cmd_idx = iwl_enqueue_hcmd(priv, cmd); if (cmd_idx < 0) { ret = cmd_idx; IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", get_cmd_string(cmd->id), ret); goto out; } ret = wait_event_interruptible_timeout(priv->wait_command_queue, !test_bit(STATUS_HCMD_ACTIVE, &priv->status), HOST_COMPLETE_TIMEOUT); if (!ret) { if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { IWL_ERR(priv, "Error sending %s: time out after %dms.\n", get_cmd_string(cmd->id), jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); clear_bit(STATUS_HCMD_ACTIVE, &priv->status); IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", get_cmd_string(cmd->id)); ret = -ETIMEDOUT; goto cancel; } } if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n", get_cmd_string(cmd->id)); ret = -ECANCELED; goto fail; } if (test_bit(STATUS_FW_ERROR, &priv->status)) { IWL_ERR(priv, "Command %s failed: FW Error\n", get_cmd_string(cmd->id)); ret = -EIO; goto fail; } if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { IWL_ERR(priv, "Error: Response NULL in '%s'\n", get_cmd_string(cmd->id)); ret = -EIO; goto cancel; } ret = 0; goto out; cancel: if (cmd->flags & CMD_WANT_SKB) { /* * Cancel the CMD_WANT_SKB flag for the cmd in the * TX cmd queue. Otherwise in case the cmd comes * in later, it will possibly set an invalid * address (cmd->meta.source). */ priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_idx].flags &= ~CMD_WANT_SKB; } fail: if (cmd->reply_page) { iwl_free_pages(priv, cmd->reply_page); cmd->reply_page = 0; } out: mutex_unlock(&priv->sync_cmd_mutex); return ret; }
static long wdog_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; int new_heartbeat; int status; int options; uint32_t remaining; struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT| WDIOF_MAGICCLOSE| WDIOF_KEEPALIVEPING, .firmware_version = 1, .identity = "BCM2708", }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: status = wdog_get_status(); return put_user(status, p); case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_KEEPALIVE: wdog_ping(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_heartbeat, p)) return -EFAULT; if (wdog_set_heartbeat(new_heartbeat)) return -EINVAL; wdog_ping(); /* Fall */ case WDIOC_GETTIMEOUT: return put_user(heartbeat, p); case WDIOC_GETTIMELEFT: remaining = WDOG_TICKS_TO_SECS(wdog_get_remaining()); return put_user(remaining, p); case WDIOC_SETOPTIONS: if (get_user(options, p)) return -EFAULT; if (options & WDIOS_DISABLECARD) wdog_stop(); if (options & WDIOS_ENABLECARD) wdog_start(wdog_ticks); return 0; default: return -ENOTTY; } } /** * @inode: inode of device * @file: file handle to device * * The watchdog device has been opened. The watchdog device is single * open and on opening we load the counters. */ static int wdog_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &wdog_is_open)) return -EBUSY; /* * Activate */ wdog_start(wdog_ticks); return nonseekable_open(inode, file); } /** * @inode: inode to board * @file: file handle to board * * The watchdog has a configurable API. There is a religious dispute * between people who want their watchdog to be able to shut down and * those who want to be sure if the watchdog manager dies the machine * reboots. In the former case we disable the counters, in the latter * case you have to open it again very soon. */ static int wdog_release(struct inode *inode, struct file *file) { if (expect_close == 42) { wdog_stop(); } else { printk(KERN_CRIT "wdt: WDT device closed unexpectedly. WDT will not stop!\n"); wdog_ping(); } clear_bit(0, &wdog_is_open); expect_close = 0; return 0; } /** * @this: our notifier block * @code: the event being reported * @unused: unused * * Our notifier is called on system shutdowns. Turn the watchdog * off so that it does not fire during the next reboot. */ static int wdog_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) wdog_stop(); return NOTIFY_DONE; } /* * Kernel Interfaces */ static const struct file_operations wdog_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = wdog_write, .unlocked_ioctl = wdog_ioctl, .open = wdog_open, .release = wdog_release, }; static struct miscdevice wdog_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wdog_fops, }; /* * The WDT card needs to learn about soft shutdowns in order to * turn the timebomb registers off. */ static struct notifier_block wdog_notifier = { .notifier_call = wdog_notify_sys, }; /** * cleanup_module: * * Unload the watchdog. You cannot do this with any file handles open. * If your watchdog is set to continue ticking on close and you unload * it, well it keeps ticking. We won't get the interrupt but the board * will not touch PC memory so all is fine. You just have to load a new * module in 60 seconds or reboot. */ static void __exit wdog_exit(void) { misc_deregister(&wdog_miscdev); unregister_reboot_notifier(&wdog_notifier); } static int __init wdog_init(void) { int ret; /* Check that the heartbeat value is within it's range; if not reset to the default */ if (wdog_set_heartbeat(heartbeat)) { wdog_set_heartbeat(WD_TIMO); printk(KERN_INFO "bcm2708_wdog: heartbeat value must be " "0 < heartbeat < %d, using %d\n", WDOG_TICKS_TO_SECS(PM_WDOG_TIME_SET), WD_TIMO); } ret = register_reboot_notifier(&wdog_notifier); if (ret) { printk(KERN_ERR "wdt: cannot register reboot notifier (err=%d)\n", ret); goto out_reboot; } ret = misc_register(&wdog_miscdev); if (ret) { printk(KERN_ERR "wdt: cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto out_misc; } printk(KERN_INFO "bcm2708 watchdog, heartbeat=%d sec (nowayout=%d)\n", heartbeat, nowayout); return 0; out_misc: unregister_reboot_notifier(&wdog_notifier); out_reboot: return ret; }
static int maxiradio_exclusive_release(struct file *file) { clear_bit(0, &in_use); return 0; }
static int wafwdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { int new_timeout; void __user *argp = (void __user *)arg; int __user *p = argp; static struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = "Wafer 5823 WDT", }; switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user(argp, &ident, sizeof (ident))) return -EFAULT; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_KEEPALIVE: wafwdt_ping(); break; case WDIOC_SETTIMEOUT: if (get_user(new_timeout, p)) return -EFAULT; if ((new_timeout < 1) || (new_timeout > 255)) return -EINVAL; timeout = new_timeout; wafwdt_stop(); wafwdt_start(); /* Fall */ case WDIOC_GETTIMEOUT: return put_user(timeout, p); case WDIOC_SETOPTIONS: { int options, retval = -EINVAL; if (get_user(options, p)) return -EFAULT; if (options & WDIOS_DISABLECARD) { wafwdt_start(); retval = 0; } if (options & WDIOS_ENABLECARD) { wafwdt_stop(); retval = 0; } return retval; } default: return -ENOIOCTLCMD; } return 0; } static int wafwdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &wafwdt_is_open)) return -EBUSY; /* * Activate */ wafwdt_start(); return nonseekable_open(inode, file); } static int wafwdt_close(struct inode *inode, struct file *file) { if (expect_close == 42) { wafwdt_stop(); } else { printk(KERN_CRIT PFX "WDT device closed unexpectedly. WDT will not stop!\n"); wafwdt_ping(); } clear_bit(0, &wafwdt_is_open); expect_close = 0; return 0; } /* * Notifier for system down */ static int wafwdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) { /* Turn the WDT off */ wafwdt_stop(); } return NOTIFY_DONE; } /* * Kernel Interfaces */ static struct file_operations wafwdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = wafwdt_write, .ioctl = wafwdt_ioctl, .open = wafwdt_open, .release = wafwdt_close, }; static struct miscdevice wafwdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wafwdt_fops, }; /* * The WDT needs to learn about soft shutdowns in order to * turn the timebomb registers off. */ static struct notifier_block wafwdt_notifier = { .notifier_call = wafwdt_notify_sys, }; static int __init wafwdt_init(void) { int ret; printk(KERN_INFO "WDT driver for Wafer 5823 single board computer initialising.\n"); spin_lock_init(&wafwdt_lock); if (timeout < 1 || timeout > 255) { timeout = WD_TIMO; printk (KERN_INFO PFX "timeout value must be 1<=x<=255, using %d\n", timeout); } if (wdt_stop != wdt_start) { if(!request_region(wdt_stop, 1, "Wafer 5823 WDT")) { printk (KERN_ERR PFX "I/O address 0x%04x already in use\n", wdt_stop); ret = -EIO; goto error; } } if(!request_region(wdt_start, 1, "Wafer 5823 WDT")) { printk (KERN_ERR PFX "I/O address 0x%04x already in use\n", wdt_start); ret = -EIO; goto error2; } ret = register_reboot_notifier(&wafwdt_notifier); if (ret != 0) { printk (KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", ret); goto error3; } ret = misc_register(&wafwdt_miscdev); if (ret != 0) { printk (KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto error4; } printk (KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d)\n", timeout, nowayout); return ret; error4: unregister_reboot_notifier(&wafwdt_notifier); error3: release_region(wdt_start, 1); error2: if (wdt_stop != wdt_start) release_region(wdt_stop, 1); error: return ret; } static void __exit wafwdt_exit(void) { misc_deregister(&wafwdt_miscdev); unregister_reboot_notifier(&wafwdt_notifier); if(wdt_stop != wdt_start) release_region(wdt_stop, 1); release_region(wdt_start, 1); }
static int qlcnic_loopback_test(struct net_device *netdev, u8 mode) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int max_sds_rings = adapter->max_sds_rings; struct qlcnic_host_sds_ring *sds_ring; int loop = 0; int ret; if (!(adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) { netdev_info(netdev, "Firmware is not loopback test capable\n"); return -EOPNOTSUPP; } QLCDB(adapter, DRV, "%s loopback test in progress\n", mode == QLCNIC_ILB_MODE ? "internal" : "external"); if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { netdev_warn(netdev, "Loopback test not supported for non " "privilege function\n"); return 0; } if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); if (ret) goto clear_it; sds_ring = &adapter->recv_ctx->sds_rings[0]; ret = qlcnic_set_lb_mode(adapter, mode); if (ret) goto free_res; adapter->ahw->diag_cnt = 0; do { msleep(500); qlcnic_process_rcv_ring_diag(sds_ring); if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { netdev_info(netdev, "firmware didnt respond to loopback" " configure request\n"); ret = -QLCNIC_FW_NOT_RESPOND; goto free_res; } else if (adapter->ahw->diag_cnt) { ret = adapter->ahw->diag_cnt; goto free_res; } } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state)); ret = qlcnic_do_lb_test(adapter, mode); qlcnic_clear_lb_mode(adapter); free_res: qlcnic_diag_free_res(netdev, max_sds_rings); clear_it: adapter->max_sds_rings = max_sds_rings; clear_bit(__QLCNIC_RESETTING, &adapter->state); return ret; }
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) { int nr = kvmppc_get_gpr(vcpu, 11); int r; unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); unsigned long r2 = 0; if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { /* 32 bit mode */ param1 &= 0xffffffff; param2 &= 0xffffffff; param3 &= 0xffffffff; param4 &= 0xffffffff; } switch (nr) { case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): { #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) /* Book3S can be little endian, find it out here */ int shared_big_endian = true; if (vcpu->arch.intr_msr & MSR_LE) shared_big_endian = false; if (shared_big_endian != vcpu->arch.shared_big_endian) kvmppc_swab_shared(vcpu); vcpu->arch.shared_big_endian = shared_big_endian; #endif if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { /* * Older versions of the Linux magic page code had * a bug where they would map their trampoline code * NX. If that's the case, remove !PR NX capability. */ vcpu->arch.disable_kernel_nx = true; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } vcpu->arch.magic_page_pa = param1 & ~0xfffULL; vcpu->arch.magic_page_ea = param2 & ~0xfffULL; #ifdef CONFIG_PPC_64K_PAGES /* * Make sure our 4k magic page is in the same window of a 64k * page within the guest and within the host's page. */ if ((vcpu->arch.magic_page_pa & 0xf000) != ((ulong)vcpu->arch.shared & 0xf000)) { void *old_shared = vcpu->arch.shared; ulong shared = (ulong)vcpu->arch.shared; void *new_shared; shared &= PAGE_MASK; shared |= vcpu->arch.magic_page_pa & 0xf000; new_shared = (void*)shared; memcpy(new_shared, old_shared, 0x1000); vcpu->arch.shared = new_shared; } #endif r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; r = EV_SUCCESS; break; } case KVM_HCALL_TOKEN(KVM_HC_FEATURES): r = EV_SUCCESS; #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); #endif /* Second return value is in r4 */ break; case EV_HCALL_TOKEN(EV_IDLE): r = EV_SUCCESS; kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); break; default: r = EV_UNIMPLEMENTED; break; } kvmppc_set_gpr(vcpu, 4, r2); return r; }
/* * Legacy thermal throttling * 1) Avoid NIC destruction due to high temperatures * Chip will identify dangerously high temperatures that can * harm the device and will power down * 2) Avoid the NIC power down due to high temperature * Throttle early enough to lower the power consumption before * drastic steps are needed */ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force) { struct iwl_tt_mgmt *tt = &priv->thermal_throttle; enum iwl_tt_state old_state; #ifdef CONFIG_IWLWIFI_DEBUG if ((tt->tt_previous_temp) && (temp > tt->tt_previous_temp) && ((temp - tt->tt_previous_temp) > IWL_TT_INCREASE_MARGIN)) { IWL_DEBUG_TEMP(priv, "Temperature increase %d degree Celsius\n", (temp - tt->tt_previous_temp)); } #endif old_state = tt->state; /* in Celsius */ if (temp >= IWL_MINIMAL_POWER_THRESHOLD) tt->state = IWL_TI_CT_KILL; else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_2) tt->state = IWL_TI_2; else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_1) tt->state = IWL_TI_1; else tt->state = IWL_TI_0; #ifdef CONFIG_IWLWIFI_DEBUG tt->tt_previous_temp = temp; #endif /* stop ct_kill_waiting_tm timer */ del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm); if (tt->state != old_state) { switch (tt->state) { case IWL_TI_0: /* * When the system is ready to go back to IWL_TI_0 * we only have to call iwl_power_update_mode() to * do so. */ break; case IWL_TI_1: tt->tt_power_mode = IWL_POWER_INDEX_3; break; case IWL_TI_2: tt->tt_power_mode = IWL_POWER_INDEX_4; break; default: tt->tt_power_mode = IWL_POWER_INDEX_5; break; } mutex_lock(&priv->mutex); if (old_state == IWL_TI_CT_KILL) clear_bit(STATUS_CT_KILL, &priv->status); if (tt->state != IWL_TI_CT_KILL && iwl_power_update_mode(priv, true)) { /* TT state not updated * try again during next temperature read */ if (old_state == IWL_TI_CT_KILL) set_bit(STATUS_CT_KILL, &priv->status); tt->state = old_state; IWL_ERR(priv, "Cannot update power mode, " "TT state not updated\n"); } else { if (tt->state == IWL_TI_CT_KILL) { if (force) { set_bit(STATUS_CT_KILL, &priv->status); iwl_perform_ct_kill_task(priv, true); } else { iwl_prepare_ct_kill_task(priv); tt->state = old_state; } } else if (old_state == IWL_TI_CT_KILL && tt->state != IWL_TI_CT_KILL) iwl_perform_ct_kill_task(priv, false); IWL_DEBUG_TEMP(priv, "Temperature state changed %u\n", tt->state); IWL_DEBUG_TEMP(priv, "Power Index change to %u\n", tt->tt_power_mode); } mutex_unlock(&priv->mutex); } }
int ctp_simulate_ps_operate(void* self, uint32_t command, void* buff_in, int size_in, void* buff_out, int size_out, int* actualout) { int err=0; int value; hwm_sensor_data* sensor_data; struct ctp_simulate_priv *obj = (struct ctp_simulate_priv *)self; switch (command) { case SENSOR_DELAY: CTP_PS_LOG("SENSOR_DELAY \n"); break; case SENSOR_ENABLE: CTP_PS_LOG("[FT_PS] SENSOR_ENABLE \n"); if((buff_in == NULL) || (size_in < sizeof(int))) { CTP_PS_LOG("Enable sensor parameter error!\n"); err = -EINVAL; } else { value = *(int *)buff_in; if(value) { if(err != pls_enable()) { CTP_PS_LOG("enable ps fail: %d\n", err); return -1; } set_bit(CMC_BIT_PS, &obj->enable); } else { if(err != pls_disable()) { printk("disable ps fail: %d\n", err); return -1; } clear_bit(CMC_BIT_PS, &obj->enable); } } break; case SENSOR_GET_DATA: //printk("[FT_PS] SENSOR_GET_DATA \n"); if((buff_out == NULL) || (size_out< sizeof(hwm_sensor_data))) { CTP_PS_LOG("get sensor data parameter error!\n"); err = -EINVAL; } else { sensor_data = (hwm_sensor_data *)buff_out; CTP_PS_LOG("SENSOR_GET_DATA"); //mdelay(160); //printk("[FT_PS] ps_operate ps data=%d!\n",get_data()); sensor_data->values[0] = get_data(); sensor_data->value_divide = 1; sensor_data->status = SENSOR_STATUS_ACCURACY_MEDIUM; } break; default: break; } return 0; }
void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, int handler_status) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); int index = SEQ_TO_INDEX(sequence); int cmd_index; struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; if (WARN(txq_id != trans_pcie->cmd_queue, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", txq_id, trans_pcie->cmd_queue, sequence, trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr, trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) { iwl_print_hex_error(trans, pkt, 32); return; } spin_lock(&txq->lock); cmd_index = get_cmd_index(&txq->q, index); cmd = txq->cmd[cmd_index]; meta = &txq->meta[cmd_index]; txq->time_stamp = jiffies; iwlagn_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); if (meta->flags & CMD_WANT_SKB) { struct page *p = rxb_steal_page(rxb); meta->source->resp_pkt = pkt; meta->source->_rx_page_addr = (unsigned long)page_address(p); meta->source->_rx_page_order = hw_params(trans).rx_page_order; meta->source->handler_status = handler_status; } iwl_hcmd_queue_reclaim(trans, txq_id, index); if (!(meta->flags & CMD_ASYNC)) { if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) { IWL_WARN(trans, "HCMD_ACTIVE already clear for command %s\n", get_cmd_string(cmd->hdr.cmd)); } clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", get_cmd_string(cmd->hdr.cmd)); wake_up(&trans->wait_command_queue); } meta->flags = 0; spin_unlock(&txq->lock); }
static void fimc_is_irq_handler_general(struct fimc_is_dev *dev) { /* Read ISSR10 ~ ISSR15 */ dev->i2h_cmd.cmd = readl(dev->regs + ISSR10); switch (dev->i2h_cmd.cmd) { case IHC_GET_SENSOR_NUMBER: dbg("IHC_GET_SENSOR_NUMBER\n"); fimc_is_hw_get_param(dev, 1); dbg("ISP - FW version - %d\n", dev->i2h_cmd.arg[0]); dev->fw.ver = dev->i2h_cmd.arg[0]; fimc_is_hw_wait_intmsr0_intmsd0(dev); fimc_is_hw_set_sensor_num(dev); break; case IHC_SET_SHOT_MARK: fimc_is_hw_get_param(dev, 3); break; case IHC_SET_FACE_MARK: fimc_is_hw_get_param(dev, 2); break; case IHC_FRAME_DONE: fimc_is_hw_get_param(dev, 2); break; case IHC_NOT_READY: break; case IHC_AA_DONE: fimc_is_hw_get_param(dev, 3); break; #if CONFIG_MACH_STUTTGART case IHC_FLASH_READY: fimc_is_hw_get_param(dev, 2); break; case IHC_ISP_ADJUST_DONE: fimc_is_hw_get_param(dev, 4); break; case IHC_ISP_ISO_DONE: fimc_is_hw_get_param(dev, 4); break; #endif case ISR_DONE: fimc_is_hw_get_param(dev, 3); break; case ISR_NDONE: fimc_is_hw_get_param(dev, 4); break; } /* Just clear the interrupt pending bits. */ fimc_is_fw_clear_irq1(dev, INTR_GENERAL); switch (dev->i2h_cmd.cmd) { case IHC_GET_SENSOR_NUMBER: fimc_is_hw_set_intgr0_gd0(dev); set_bit(IS_ST_FW_LOADED, &dev->state); break; case IHC_SET_SHOT_MARK: break; case IHC_SET_FACE_MARK: dev->fd_header.count = dev->i2h_cmd.arg[0]; dev->fd_header.index = dev->i2h_cmd.arg[1]; { if (dev->faceinfo_array->number < MAX_FRAME_COUNT) { int i = 0; u32 idx; dev->faceinfo_array->faceinfo[dev->faceinfo_array->write].count = dev->fd_header.count; while (i < dev->fd_header.count) { idx = (dev->fd_header.index + i) % MAX_FACE_COUNT; dev->faceinfo_array->faceinfo[dev->faceinfo_array->write].face[i] = dev->is_p_region->face[idx]; i++; } dev->faceinfo_array->write = (dev->faceinfo_array->write + 1) % MAX_FRAME_COUNT; dev->faceinfo_array->number++; } else { printk ("\n \t\t .... faceinfo lost .... \n"); } } break; case IHC_FRAME_DONE: break; case IHC_AA_DONE: printk("AA_DONE - %d, %d, %d\n", dev->i2h_cmd.arg[0], dev->i2h_cmd.arg[1], dev->i2h_cmd.arg[2]); switch (dev->i2h_cmd.arg[0]) { /* SEARCH: Occurs when search is requested at continuous AF */ case 2: dev->af.af_lost_state = FIMC_IS_AF_SEARCH; wake_up(&dev->aflost_queue); if(dev->af.mode == IS_FOCUS_MODE_TOUCH){ schedule_work(&fimc_is_af_wq); } break; /* INFOCUS: Occurs when focus is found. */ case 3: if (dev->af.af_state == FIMC_IS_AF_RUNNING) dev->af.af_state = FIMC_IS_AF_LOCK; dev->af.af_lock_state = 0x2; dev->af.af_lost_state = FIMC_IS_AF_INFOCUS; wake_up(&dev->aflost_queue); break; /* OUTOFFOCUS: Occurs when focus is not found. */ case 4: if (dev->af.af_state == FIMC_IS_AF_RUNNING) dev->af.af_state = FIMC_IS_AF_LOCK; dev->af.af_lock_state = 0x1; dev->af.af_lost_state = FIMC_IS_AF_OUTOFFOCUS; wake_up(&dev->aflost_queue); break; } break; #if CONFIG_MACH_STUTTGART case IHC_FLASH_READY: set_bit(IS_ST_FLASH_READY, &dev->state); dev->flash.led_on = dev->i2h_cmd.arg[1]; dbg("IS_ST_FLASH_READY : flash_on : %d", dev->flash.led_on); break; case IHC_ISP_ADJUST_DONE: { /* u32 uParam1; <==ISP_AdjustCommandEnum value... Contrast or Saturation or.. etc u32 uParam2; <== Actually control value (e.g. -4 ~ +4, -128~+128) u32 uParam3; <==frame counter when a5 received ISP Adjust command. u32 uParam4; <== frame counter applied Adjust command. */ struct is_adjust_info *infor = NULL; switch (dev->i2h_cmd.arg[0]) { case ISP_ADJUST_COMMAND_MANUAL_ALL: case ISP_ADJUST_COMMAND_AUTO: memset(&dev->adjust, 0, sizeof(struct is_adjust)); break; case ISP_ADJUST_COMMAND_MANUAL_CONTRAST: case ISP_ADJUST_COMMAND_MANUAL_SATURATION: case ISP_ADJUST_COMMAND_MANUAL_SHARPNESS: break; case ISP_ADJUST_COMMAND_MANUAL_EXPOSURE: infor = &dev->adjust.exposure; break; case ISP_ADJUST_COMMAND_MANUAL_BRIGHTNESS: case ISP_ADJUST_COMMAND_MANUAL_HUE: case ISP_ADJUST_COMMAND_MANUAL_HOTPIXEL: case ISP_ADJUST_COMMAND_MANUAL_SHADING: break; default: break; } if (infor) { infor->command = dev->i2h_cmd.arg[0]; infor->frame_start = dev->i2h_cmd.arg[2]; infor->frame_end = dev->i2h_cmd.arg[3]; if (infor->frame_end < 5) infor->old_value = infor->value = 0; infor->old_value = infor->value; infor->value = dev->i2h_cmd.arg[1]; } } //printk("====>[MMKIM]IHC_ISP_ADJUST_DONE(%d, %d, %d, %d)\n", // dev->i2h_cmd.arg[0],dev->i2h_cmd.arg[1],dev->i2h_cmd.arg[2],dev->i2h_cmd.arg[3]); break; case IHC_ISP_ISO_DONE : //printk("====>[MMKIM]IHC_ISP_ISO_DONE(%d, %d, %d, %d)\n", // dev->i2h_cmd.arg[0],dev->i2h_cmd.arg[1],dev->i2h_cmd.arg[2],dev->i2h_cmd.arg[3]); break; #endif case IHC_NOT_READY: err("Init Sequnce Error- IS will be turned off!!"); break; case ISR_DONE: dbg("ISR_DONE - %d\n", dev->i2h_cmd.arg[0]); switch (dev->i2h_cmd.arg[0]) { case HIC_PREVIEW_STILL: case HIC_PREVIEW_VIDEO: case HIC_CAPTURE_STILL: case HIC_CAPTURE_VIDEO: set_bit(IS_ST_CHANGE_MODE, &dev->state); /* Get CAC margin */ dev->sensor.offset_x = dev->i2h_cmd.arg[1]; dev->sensor.offset_y = dev->i2h_cmd.arg[2]; break; case HIC_STREAM_ON: clear_bit(IS_ST_STREAM_OFF, &dev->state); set_bit(IS_ST_STREAM_ON, &dev->state); break; case HIC_STREAM_OFF: clear_bit(IS_ST_STREAM_ON, &dev->state); set_bit(IS_ST_STREAM_OFF, &dev->state); break; case HIC_SET_PARAMETER: dev->p_region_index1 = 0; dev->p_region_index2 = 0; atomic_set(&dev->p_region_num, 0); set_bit(IS_ST_BLOCK_CMD_CLEARED, &dev->state); if (dev->af.af_state == FIMC_IS_AF_SETCONFIG) dev->af.af_state = FIMC_IS_AF_RUNNING; else if (dev->af.af_state == FIMC_IS_AF_ABORT) dev->af.af_state = FIMC_IS_AF_IDLE; break; case HIC_GET_PARAMETER: break; case HIC_SET_TUNE: break; case HIC_GET_STATUS: break; case HIC_OPEN_SENSOR: set_bit(IS_ST_OPEN_SENSOR, &dev->state); printk(KERN_INFO "FIMC-IS Lane= %d, Settle line= %d\n", dev->i2h_cmd.arg[2], dev->i2h_cmd.arg[1]); break; case HIC_CLOSE_SENSOR: clear_bit(IS_ST_OPEN_SENSOR, &dev->state); dev->sensor.id = 0; break; case HIC_MSG_TEST: dbg("Config MSG level was done\n"); break; case HIC_POWER_DOWN: set_bit(IS_PWR_SUB_IP_POWER_OFF, &dev->power); break; case HIC_GET_SET_FILE_ADDR: dev->setfile.base = dev->i2h_cmd.arg[1]; set_bit(IS_ST_SETFILE_LOADED, &dev->state); break; case HIC_LOAD_SET_FILE: set_bit(IS_ST_SETFILE_LOADED, &dev->state); break; } break; case ISR_NDONE: err("ISR_NDONE - %d: 0x%08x\n", dev->i2h_cmd.arg[0], dev->i2h_cmd.arg[1]); fimc_is_print_err_number(dev->i2h_cmd.arg[1]); switch (dev->i2h_cmd.arg[1]) { case IS_ERROR_SET_PARAMETER: fimc_is_mem_cache_inv((void *)dev->is_p_region, IS_PARAM_SIZE); fimc_is_param_err_checker(dev); break; } } }
static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int cmd_idx; int ret; IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", get_cmd_string(cmd->id)); if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) { IWL_ERR(trans, "Command %s failed: FW Error\n", get_cmd_string(cmd->id)); return -EIO; } if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status))) { IWL_ERR(trans, "Command %s: a command is already active!\n", get_cmd_string(cmd->id)); return -EIO; } IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", get_cmd_string(cmd->id)); cmd_idx = iwl_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { ret = cmd_idx; clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", get_cmd_string(cmd->id), ret); return ret; } ret = wait_event_timeout(trans->wait_command_queue, !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status), HOST_COMPLETE_TIMEOUT); if (!ret) { if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) { struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_queue *q = &txq->q; IWL_ERR(trans, "Error sending %s: time out after %dms.\n", get_cmd_string(cmd->id), jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", q->read_ptr, q->write_ptr); clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command" "%s\n", get_cmd_string(cmd->id)); ret = -ETIMEDOUT; goto cancel; } } if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { IWL_ERR(trans, "Error: Response NULL in '%s'\n", get_cmd_string(cmd->id)); ret = -EIO; goto cancel; } return 0; cancel: if (cmd->flags & CMD_WANT_SKB) { trans_pcie->txq[trans_pcie->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; } if (cmd->resp_pkt) { iwl_free_resp(cmd); cmd->resp_pkt = NULL; } return ret; }
static void stm_channel_free(uint32_t ch) { struct stm_drvdata *drvdata = stmdrvdata; clear_bit(ch, drvdata->chs.bitmap); }
void adc_sleep( void) { /* Wait for the conversion to complete */ while(test_bit(ADCSRA, ADSC)); /* Turn off the ADC */ clear_bit(ADCSRA, ADEN); }
/* Mark device node number vdev->num as unused */ static inline void devnode_clear(struct video_device *vdev) { clear_bit(vdev->num, devnode_bits(vdev->vfl_type)); }
static long pcwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int rv; int status; int temperature; int new_heartbeat; int __user *argp = (int __user *)arg; static struct watchdog_info ident = { .options = WDIOF_OVERHEAT | WDIOF_CARDRESET | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = "PCWD", }; switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user(argp, &ident, sizeof(ident))) return -EFAULT; return 0; case WDIOC_GETSTATUS: pcwd_get_status(&status); return put_user(status, argp); case WDIOC_GETBOOTSTATUS: return put_user(pcwd_private.boot_status, argp); case WDIOC_GETTEMP: if (pcwd_get_temperature(&temperature)) return -EFAULT; return put_user(temperature, argp); case WDIOC_SETOPTIONS: if (pcwd_private.revision == PCWD_REVISION_C) { if (get_user(rv, argp)) return -EFAULT; if (rv & WDIOS_DISABLECARD) { status = pcwd_stop(); if (status < 0) return status; } if (rv & WDIOS_ENABLECARD) { status = pcwd_start(); if (status < 0) return status; } if (rv & WDIOS_TEMPPANIC) temp_panic = 1; } return -EINVAL; case WDIOC_KEEPALIVE: pcwd_keepalive(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_heartbeat, argp)) return -EFAULT; if (pcwd_set_heartbeat(new_heartbeat)) return -EINVAL; pcwd_keepalive(); /* Fall */ case WDIOC_GETTIMEOUT: return put_user(heartbeat, argp); default: return -ENOTTY; } return 0; } static ssize_t pcwd_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { if (len) { if (!nowayout) { size_t i; /* In case it was set long ago */ expect_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, buf + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } pcwd_keepalive(); } return len; } static int pcwd_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &open_allowed)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Activate */ pcwd_start(); pcwd_keepalive(); return nonseekable_open(inode, file); } static int pcwd_close(struct inode *inode, struct file *file) { if (expect_close == 42) pcwd_stop(); else { printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); pcwd_keepalive(); } expect_close = 0; clear_bit(0, &open_allowed); return 0; } /* * /dev/temperature handling */ static ssize_t pcwd_temp_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int temperature; if (pcwd_get_temperature(&temperature)) return -EFAULT; if (copy_to_user(buf, &temperature, 1)) return -EFAULT; return 1; }
/*** * rt_socket_common_ioctl */ int rt_socket_common_ioctl(struct rtdm_dev_context *context, rtdm_user_info_t *user_info, int request, void *arg) { struct rtsocket *sock = (struct rtsocket *)&context->dev_private; int ret = 0; struct rtnet_callback *callback = arg; unsigned int rtskbs; unsigned long flags; switch (request) { case RTNET_RTIOC_XMITPARAMS: sock->priority = *(unsigned int *)arg; break; case RTNET_RTIOC_TIMEOUT: sock->timeout = *(nanosecs_t *)arg; break; case RTNET_RTIOC_CALLBACK: if (user_info) return -EACCES; rtos_spin_lock_irqsave(&sock->param_lock, flags); sock->callback_func = callback->func; sock->callback_arg = callback->arg; rtos_spin_unlock_irqrestore(&sock->param_lock, flags); break; case RTNET_RTIOC_NONBLOCK: if (*(unsigned int *)arg != 0) set_bit(RT_SOCK_NONBLOCK, &context->context_flags); else clear_bit(RT_SOCK_NONBLOCK, &context->context_flags); break; case RTNET_RTIOC_EXTPOOL: rtskbs = *(unsigned int *)arg; rtos_spin_lock_irqsave(&sock->param_lock, flags); if (test_bit(SKB_POOL_CLOSED, &context->context_flags)) { rtos_spin_unlock_irqrestore(&sock->param_lock, flags); return -EBADF; } atomic_add(rtskbs, &sock->pool_size); rtos_spin_unlock_irqrestore(&sock->param_lock, flags); if (test_bit(RTDM_CREATED_IN_NRT, &context->context_flags)) { if (rtdm_in_rt_context()) return -EACCES; ret = rtskb_pool_extend(&sock->skb_pool, rtskbs); } else ret = rtskb_pool_extend_rt(&sock->skb_pool, rtskbs); atomic_sub(rtskbs-ret, &sock->pool_size); break; case RTNET_RTIOC_SHRPOOL: rtskbs = *(unsigned int *)arg; rtos_spin_lock_irqsave(&sock->param_lock, flags); if (test_bit(SKB_POOL_CLOSED, &context->context_flags)) { rtos_spin_unlock_irqrestore(&sock->param_lock, flags); return -EBADF; } atomic_sub(rtskbs, &sock->pool_size); rtos_spin_unlock_irqrestore(&sock->param_lock, flags); if (test_bit(RTDM_CREATED_IN_NRT, &context->context_flags)) { if (rtdm_in_rt_context()) return -EACCES; ret = rtskb_pool_shrink(&sock->skb_pool, *(unsigned int *)arg); } else ret = rtskb_pool_shrink_rt(&sock->skb_pool, *(unsigned int *)arg); atomic_add(rtskbs-ret, &sock->pool_size); break; default: ret = -EOPNOTSUPP; break; } return ret; }
/* work that cannot be done in interrupt context uses keventd. * * NOTE: with 2.5 we could do more of this using completion callbacks, * especially now that control transfers can be queued. */ static void kevent (struct work_struct *work) { struct usbnet *dev = container_of(work, struct usbnet, kevent); int status; /* usb_clear_halt() needs a thread context */ if (test_bit (EVENT_TX_HALT, &dev->flags)) { unlink_urbs (dev, &dev->txq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_pipe; status = usb_clear_halt (dev->udev, dev->out); usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_tx_err (dev)) fail_pipe: netdev_err(dev->net, "can't clear tx halt, status %d\n", status); } else { clear_bit (EVENT_TX_HALT, &dev->flags); if (status != -ESHUTDOWN) netif_wake_queue (dev->net); } } if (test_bit (EVENT_RX_HALT, &dev->flags)) { unlink_urbs (dev, &dev->rxq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_halt; status = usb_clear_halt (dev->udev, dev->in); usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_rx_err (dev)) fail_halt: netdev_err(dev->net, "can't clear rx halt, status %d\n", status); } else { clear_bit (EVENT_RX_HALT, &dev->flags); tasklet_schedule (&dev->bh); } } /* tasklet could resubmit itself forever if memory is tight */ if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { struct urb *urb = NULL; int resched = 1; if (netif_running (dev->net)) urb = usb_alloc_urb (0, GFP_KERNEL); else clear_bit (EVENT_RX_MEMORY, &dev->flags); if (urb != NULL) { clear_bit (EVENT_RX_MEMORY, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) { usb_free_urb(urb); goto fail_lowmem; } if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) resched = 0; usb_autopm_put_interface(dev->intf); fail_lowmem: if (resched) tasklet_schedule (&dev->bh); } } if (test_bit (EVENT_LINK_RESET, &dev->flags)) { struct driver_info *info = dev->driver_info; int retval = 0; clear_bit (EVENT_LINK_RESET, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto skip_reset; if(info->link_reset && (retval = info->link_reset(dev)) < 0) { usb_autopm_put_interface(dev->intf); skip_reset: netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n", retval, dev->udev->bus->bus_name, dev->udev->devpath, info->description); } else { usb_autopm_put_interface(dev->intf); } } if (dev->flags) netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); }
/* * Set attributes, and at the same time refresh them. * * Truncation is slightly complicated, because the 'truncate' request * may fail, in which case we don't want to touch the mapping. * vmtruncate() doesn't allow for this case, so do the rlimit checking * and the actual truncation by hand. */ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr, struct file *file) { struct inode *inode = entry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_req *req; struct fuse_setattr_in inarg; struct fuse_attr_out outarg; bool is_truncate = false; loff_t oldsize; int err; if (!fuse_allow_task(fc, current)) return -EACCES; if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) attr->ia_valid |= ATTR_FORCE; err = inode_change_ok(inode, attr); if (err) return err; if (attr->ia_valid & ATTR_OPEN) { if (fc->atomic_o_trunc) return 0; file = NULL; } if (attr->ia_valid & ATTR_SIZE) is_truncate = true; req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); if (is_truncate) { fuse_set_nowrite(inode); set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); } memset(&inarg, 0, sizeof(inarg)); memset(&outarg, 0, sizeof(outarg)); iattr_to_fattr(attr, &inarg); if (file) { struct fuse_file *ff = file->private_data; inarg.valid |= FATTR_FH; inarg.fh = ff->fh; } if (attr->ia_valid & ATTR_SIZE) { /* For mandatory locking in truncate */ inarg.valid |= FATTR_LOCKOWNER; inarg.lock_owner = fuse_lock_owner_id(fc, current->files); } req->in.h.opcode = FUSE_SETATTR; req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; req->out.numargs = 1; if (fc->minor < 9) req->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; else req->out.args[0].size = sizeof(outarg); req->out.args[0].value = &outarg; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (err) { if (err == -EINTR) fuse_invalidate_attr(inode); goto error; } if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) { make_bad_inode(inode); err = -EIO; goto error; } spin_lock(&fc->lock); fuse_change_attributes_common(inode, &outarg.attr, attr_timeout(&outarg)); oldsize = inode->i_size; i_size_write(inode, outarg.attr.size); if (is_truncate) { /* NOTE: this may release/reacquire fc->lock */ __fuse_release_nowrite(inode); } spin_unlock(&fc->lock); /* * Only call invalidate_inode_pages2() after removing * FUSE_NOWRITE, otherwise fuse_launder_page() would deadlock. */ if (S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) { truncate_pagecache(inode, oldsize, outarg.attr.size); invalidate_inode_pages2(inode->i_mapping); } clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); return 0; error: if (is_truncate) fuse_release_nowrite(inode); clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); return err; }
static inline void idset_del(struct idset *set, int ssid, int id) { clear_bit(ssid * set->num_id + id, set->bitmap); }
static long asr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .identity = "IBM ASR", }; void __user *argp = (void __user *)arg; int __user *p = argp; int heartbeat; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: { int new_options, retval = -EINVAL; if (get_user(new_options, p)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { asr_disable(); retval = 0; } if (new_options & WDIOS_ENABLECARD) { asr_enable(); asr_toggle(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: asr_toggle(); return 0; /* */ case WDIOC_GETTIMEOUT: heartbeat = 256; return put_user(heartbeat, p); default: return -ENOTTY; } } static int asr_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &asr_is_open)) return -EBUSY; asr_toggle(); asr_enable(); return nonseekable_open(inode, file); } static int asr_release(struct inode *inode, struct file *file) { if (asr_expect_close == 42) asr_disable(); else { pr_crit("unexpected close, not stopping watchdog!\n"); asr_toggle(); } clear_bit(0, &asr_is_open); asr_expect_close = 0; return 0; } static const struct file_operations asr_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = asr_write, .unlocked_ioctl = asr_ioctl, .open = asr_open, .release = asr_release, }; static struct miscdevice asr_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &asr_fops, }; struct ibmasr_id { const char *desc; int type; }; static struct ibmasr_id __initdata ibmasr_id_table[] = { { "IBM Automatic Server Restart - eserver xSeries 220", ASMTYPE_TOPAZ }, { "IBM Automatic Server Restart - Machine Type 8673", ASMTYPE_PEARL }, { "IBM Automatic Server Restart - Machine Type 8480", ASMTYPE_JASPER }, { "IBM Automatic Server Restart - Machine Type 8482", ASMTYPE_JUNIPER }, { "IBM Automatic Server Restart - Machine Type 8648", ASMTYPE_SPRUCE }, { NULL } }; static int __init ibmasr_init(void) { struct ibmasr_id *id; int rc; for (id = ibmasr_id_table; id->desc; id++) { if (dmi_find_device(DMI_DEV_TYPE_OTHER, id->desc, NULL)) { asr_type = id->type; break; } } if (!asr_type) return -ENODEV; rc = asr_get_base_address(); if (rc) return rc; rc = misc_register(&asr_miscdev); if (rc < 0) { release_region(asr_base, asr_length); pr_err("failed to register misc device\n"); return rc; } return 0; } static void __exit ibmasr_exit(void) { if (!nowayout) asr_disable(); misc_deregister(&asr_miscdev); release_region(asr_base, asr_length); } module_init(ibmasr_init); module_exit(ibmasr_exit); module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); MODULE_DESCRIPTION("IBM Automatic Server Restart driver"); MODULE_AUTHOR("Andrey Panin"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
void rt2x00lib_dmadone(struct queue_entry *entry) { set_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags); clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); rt2x00queue_index_inc(entry, Q_INDEX_DMA_DONE); }