void HuffmanCodec::__release(HuffmanNode *root) { if (!root) return; __release(root->getLeft()); __release(root->getRight()); delete root; }
/* * It works on following logic: * * For enabling clock, enable = 1 * set2dis = 1 -> clear bit -> set = 0 * set2dis = 0 -> set bit -> set = 1 * * For disabling clock, enable = 0 * set2dis = 1 -> set bit -> set = 1 * set2dis = 0 -> clear bit -> set = 0 * * So, result is always: enable xor set2dis. */ static void clk_gate_endisable(struct clk_hw *hw, int enable) { struct clk_gate *gate = to_clk_gate(hw); int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0; unsigned long uninitialized_var(flags); u32 reg; set ^= enable; if (gate->lock) spin_lock_irqsave(gate->lock, flags); else __acquire(gate->lock); if (gate->flags & CLK_GATE_HIWORD_MASK) { reg = BIT(gate->bit_idx + 16); if (set) reg |= BIT(gate->bit_idx); } else { reg = clk_readl(gate->reg); if (set) reg |= BIT(gate->bit_idx); else reg &= ~BIT(gate->bit_idx); } clk_writel(reg, gate->reg); if (gate->lock) spin_unlock_irqrestore(gate->lock, flags); else __release(gate->lock); }
void lkdtm_SPINLOCKUP(void) { /* Must be called twice to trigger. */ spin_lock(&lock_me_up); /* Let sparse know we intended to exit holding the lock. */ __release(&lock_me_up); }
static int mpll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_regmap *clk = to_clk_regmap(hw); struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk); unsigned int sdm, n2; unsigned long flags = 0; params_from_rate(rate, parent_rate, &sdm, &n2, mpll->flags); if (mpll->lock) spin_lock_irqsave(mpll->lock, flags); else __acquire(mpll->lock); /* Set the fractional part */ meson_parm_write(clk->map, &mpll->sdm, sdm); /* Set the integer divider part */ meson_parm_write(clk->map, &mpll->n2, n2); if (mpll->lock) spin_unlock_irqrestore(mpll->lock, flags); else __release(mpll->lock); return 0; }
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_fractional_divider *fd = to_clk_fd(hw); unsigned long flags = 0; unsigned long m, n; u32 val; rational_best_approximation(rate, parent_rate, GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0), &m, &n); if (fd->lock) spin_lock_irqsave(fd->lock, flags); #if 0 else __acquire(fd->lock); #endif val = clk_readl(fd->reg); val &= ~(fd->mmask | fd->nmask); val |= (m << fd->mshift) | (n << fd->nshift); clk_writel(val, fd->reg); if (fd->lock) spin_unlock_irqrestore(fd->lock, flags); #if 0 else __release(fd->lock); #endif return 0; }
static unsigned long clk_fd_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_fractional_divider *fd = to_clk_fd(hw); unsigned long flags = 0; u32 val, m, n; u64 ret; if (fd->lock) spin_lock_irqsave(fd->lock, flags); else __acquire(fd->lock); val = clk_readl(fd->reg); if (fd->lock) spin_unlock_irqrestore(fd->lock, flags); else __release(fd->lock); m = (val & fd->mmask) >> fd->mshift; n = (val & fd->nmask) >> fd->nshift; if (!n || !m) return parent_rate; ret = (u64)parent_rate * m; do_div(ret, n); return ret; }
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_fractional_divider *fd = to_clk_fd(hw); unsigned long flags = 0; unsigned long div; unsigned n, m; u32 val; div = gcd(parent_rate, rate); m = rate / div; n = parent_rate / div; if (fd->lock) spin_lock_irqsave(fd->lock, flags); else __acquire(fd->lock); val = clk_readl(fd->reg); val &= ~(fd->mmask | fd->nmask); val |= (m << fd->mshift) | (n << fd->nshift); clk_writel(val, fd->reg); if (fd->lock) spin_unlock_irqrestore(fd->lock, flags); else __release(fd->lock); return 0; }
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_fractional_divider *fd = to_clk_fd(hw); unsigned long flags = 0; unsigned long m, n; u32 val; rational_best_approximation(rate, parent_rate, GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0), &m, &n); if (fd->flags & CLK_FRAC_DIVIDER_ZERO_BASED) { m--; n--; } if (fd->lock) spin_lock_irqsave(fd->lock, flags); else __acquire(fd->lock); val = clk_fd_readl(fd); val &= ~(fd->mmask | fd->nmask); val |= (m << fd->mshift) | (n << fd->nshift); clk_fd_writel(fd, val); if (fd->lock) spin_unlock_irqrestore(fd->lock, flags); else __release(fd->lock); return 0; }
static void* mevent_start_base_entry(void *arg) { int rv; struct timespec ts; struct queue_entry *q; struct event_entry *e = (struct event_entry*)arg; int *mypos = _tls_get_mypos(); *mypos = e->cur_thread; mtc_dbg("I'm %s %dnd thread", e->name, *mypos); for (;;) { /* Condition waits are specified with absolute timeouts, see * pthread_cond_timedwait()'s SUSv3 specification for more * information. We need to calculate it each time. * We sleep for 1 sec. There's no real need for it to be too * fast (it's only used so that stop detection doesn't take * long), but we don't want it to be too slow either. */ mutil_utc_time(&ts); ts.tv_sec += 1; rv = 0; queue_lock(e->op_queue[*mypos]); while (queue_isempty(e->op_queue[*mypos]) && rv == 0) { rv = queue_timedwait(e->op_queue[*mypos], &ts); } if (rv != 0 && rv != ETIMEDOUT) { errlog("Error in queue_timedwait()"); /* When the timedwait fails the lock is released, so * we need to properly annotate this case. */ __release(e->op_queue[*mypos]->lock); continue; } q = queue_get(e->op_queue[*mypos]); queue_unlock(e->op_queue[*mypos]); if (q == NULL) { if (e->loop_should_stop) { break; } else { continue; } } mtc_dbg("%s %d process", e->name, *mypos); e->process_driver(e, q, *mypos); /* Free the entry that was allocated when tipc queued the * operation. This also frees it's components. */ queue_entry_free(q); } return NULL; }
ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid) { int queue = ieee80211_ac_from_tid(tid); if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0) ieee80211_wake_queue_by_reason( &local->hw, queue, IEEE80211_QUEUE_STOP_REASON_AGGREGATION); __release(agg_queue); }
ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) { int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) ieee80211_wake_queue_by_reason( &sdata->local->hw, queue, IEEE80211_QUEUE_STOP_REASON_AGGREGATION); __release(agg_queue); }
//! copy ctor explicit map( const map &other ) : itmax(other.size()), slots(htable::compute_slots_for(itmax)), klist(), kpool(0,0), hslot(0), hpool(0,0), wlen(0), wksp(0), hash(), hmem() { __init(); try { other.__duplicate_into( *this ); } catch(...) {__release(); throw; } }
/* * 析构函数 */ HuffmanCodec::~HuffmanCodec() { __release(tree); }
static void lkdtm_do_action(enum ctype which) { switch (which) { case CT_PANIC: panic("dumptest"); break; case CT_BUG: BUG(); break; case CT_WARNING: WARN_ON(1); break; case CT_EXCEPTION: *((int *) 0) = 0; break; case CT_LOOP: for (;;) ; break; case CT_OVERFLOW: (void) recursive_loop(recur_count); break; case CT_CORRUPT_STACK: corrupt_stack(); break; case CT_UNALIGNED_LOAD_STORE_WRITE: { static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5}; u32 *p; u32 val = 0x12345678; p = (u32 *)(data + 1); if (*p == 0) val = 0x87654321; *p = val; break; } case CT_OVERWRITE_ALLOCATION: { size_t len = 1020; u32 *data = kmalloc(len, GFP_KERNEL); data[1024 / sizeof(u32)] = 0x12345678; kfree(data); break; } case CT_WRITE_AFTER_FREE: { size_t len = 1024; u32 *data = kmalloc(len, GFP_KERNEL); kfree(data); schedule(); memset(data, 0x78, len); break; } case CT_SOFTLOCKUP: preempt_disable(); for (;;) cpu_relax(); break; case CT_HARDLOCKUP: local_irq_disable(); for (;;) cpu_relax(); break; case CT_SPINLOCKUP: /* Must be called twice to trigger. */ spin_lock(&lock_me_up); /* Let sparse know we intended to exit holding the lock. */ __release(&lock_me_up); break; case CT_HUNG_TASK: set_current_state(TASK_UNINTERRUPTIBLE); schedule(); break; case CT_EXEC_DATA: execute_location(data_area); break; case CT_EXEC_STACK: { u8 stack_area[EXEC_SIZE]; execute_location(stack_area); break; } case CT_EXEC_KMALLOC: { u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL); execute_location(kmalloc_area); kfree(kmalloc_area); break; } case CT_EXEC_VMALLOC: { u32 *vmalloc_area = vmalloc(EXEC_SIZE); execute_location(vmalloc_area); vfree(vmalloc_area); break; } case CT_EXEC_USERSPACE: { unsigned long user_addr; user_addr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); if (user_addr >= TASK_SIZE) { pr_warn("Failed to allocate user memory\n"); return; } execute_user_location((void *)user_addr); vm_munmap(user_addr, PAGE_SIZE); break; } case CT_ACCESS_USERSPACE: { unsigned long user_addr, tmp; unsigned long *ptr; user_addr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); if (user_addr >= TASK_SIZE) { pr_warn("Failed to allocate user memory\n"); return; } ptr = (unsigned long *)user_addr; pr_info("attempting bad read at %p\n", ptr); tmp = *ptr; tmp += 0xc0dec0de; pr_info("attempting bad write at %p\n", ptr); *ptr = tmp; vm_munmap(user_addr, PAGE_SIZE); break; } case CT_WRITE_RO: { unsigned long *ptr; ptr = (unsigned long *)&rodata; pr_info("attempting bad write at %p\n", ptr); *ptr ^= 0xabcd1234; break; } case CT_WRITE_KERN: { size_t size; unsigned char *ptr; size = (unsigned long)do_overwritten - (unsigned long)do_nothing; ptr = (unsigned char *)do_overwritten; pr_info("attempting bad %zu byte write at %p\n", size, ptr); memcpy(ptr, (unsigned char *)do_nothing, size); flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size)); do_overwritten(); break; } case CT_NONE: default: break; } }
virtual void release() throw() { __release(); }
virtual ~map() throw() { __release(); }