/* * To disconnect a channel, and reflect it back to all who may be waiting. * * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by * xpc_disconnect_wait(). * * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN. */ void xpc_disconnect_channel(const int line, struct xpc_channel *ch, enum xp_retval reason, unsigned long *irq_flags) { u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); DBUG_ON(!spin_is_locked(&ch->lock)); if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) return; DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", reason, line, ch->partid, ch->number); XPC_SET_REASON(ch, reason, line); ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); /* some of these may not have been set */ ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | XPC_C_CONNECTING | XPC_C_CONNECTED); xpc_arch_ops.send_chctl_closerequest(ch, irq_flags); if (channel_was_connected) ch->flags |= XPC_C_WASCONNECTED; spin_unlock_irqrestore(&ch->lock, *irq_flags); /* wake all idle kthreads so they can exit */ if (atomic_read(&ch->kthreads_idle) > 0) { wake_up_all(&ch->idle_wq); } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { /* start a kthread that will do the xpDisconnecting callout */ xpc_create_kthreads(ch, 1, 1); } /* wake those waiting to allocate an entry from the local msg queue */ if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) wake_up(&ch->msg_allocate_wq); spin_lock_irqsave(&ch->lock, *irq_flags); }
static void xfs_perag_clear_reclaim_tag( struct xfs_perag *pag) { struct xfs_mount *mp = pag->pag_mount; ASSERT(spin_is_locked(&pag->pag_ici_lock)); if (--pag->pag_ici_reclaimable) return; /* clear the reclaim tag from the perag radix tree */ spin_lock(&mp->m_perag_lock); radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, XFS_ICI_RECLAIM_TAG); spin_unlock(&mp->m_perag_lock); trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_); }
NDAS_SAL_API void sal_spinlock_destroy(sal_spinlock m) { struct _sal_spinlock *sm; sm = (struct _sal_spinlock*) m; sal_assert(m!=SAL_INVALID_MUTEX); #ifdef MAGIC sal_assert(m->magic == SAL_MUTEX_MAGIC); #endif #ifdef DEBUG /* check if locked and print debugging info */ dbgl_salsync(5, "%s %p",sm->desc, sm); sal_assert( !spin_is_locked(&sm->mutex) ); #endif kfree(sm); }
/* * Free up msg_slots and clear other stuff that were setup for the specified * channel. */ static void xpc_teardown_msg_structures_uv(struct xpc_channel *ch) { struct xpc_channel_uv *ch_uv = &ch->sn.uv; DBUG_ON(!spin_is_locked(&ch->lock)); kfree(ch_uv->cached_notify_gru_mq_desc); ch_uv->cached_notify_gru_mq_desc = NULL; if (ch->flags & XPC_C_SETUP) { xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); kfree(ch_uv->send_msg_slots); xpc_init_fifo_uv(&ch_uv->recv_msg_list); kfree(ch_uv->recv_msg_slots); } }
/* * Allocate and initialise an xfs_inode. */ struct xfs_inode * xfs_inode_alloc( struct xfs_mount *mp, xfs_ino_t ino) { struct xfs_inode *ip; /* * if this didn't occur in transactions, we could use * KM_MAYFAIL and return NULL here on ENOMEM. Set the * code up to do this anyway. */ ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); if (!ip) return NULL; if (inode_init_always(mp->m_super, VFS_I(ip))) { kmem_zone_free(xfs_inode_zone, ip); return NULL; } /* VFS doesn't initialise i_mode! */ VFS_I(ip)->i_mode = 0; XFS_STATS_INC(mp, vn_active); ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(!xfs_isiflocked(ip)); ASSERT(ip->i_ino == 0); mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); /* initialise the xfs inode */ ip->i_ino = ino; ip->i_mount = mp; memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); ip->i_afp = NULL; ip->i_cowfp = NULL; ip->i_cnextents = 0; ip->i_cformat = XFS_DINODE_FMT_EXTENTS; memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); ip->i_flags = 0; ip->i_delayed_blks = 0; memset(&ip->i_d, 0, sizeof(ip->i_d)); return ip; }
/* * caller must hold spinlock */ static void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist) { LASSERT(spin_is_locked(&ctx->cc_sec->ps_lock)); LASSERT(atomic_read(&ctx->cc_refcount) > 0); LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)); LASSERT(!hlist_unhashed(&ctx->cc_cache)); clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); if (atomic_dec_and_test(&ctx->cc_refcount)) { __hlist_del(&ctx->cc_cache); hlist_add_head(&ctx->cc_cache, freelist); } else { hlist_del_init(&ctx->cc_cache); } }
/* * Allocate and initialise an xfs_inode. */ STATIC struct xfs_inode * xfs_inode_alloc( struct xfs_mount *mp, xfs_ino_t ino) { struct xfs_inode *ip; /* * if this didn't occur in transactions, we could use * KM_MAYFAIL and return NULL here on ENOMEM. Set the * code up to do this anyway. */ ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); if (!ip) return NULL; if (inode_init_always(mp->m_super, VFS_I(ip))) { kmem_zone_free(xfs_inode_zone, ip); return NULL; } ASSERT(atomic_read(&ip->i_iocount) == 0); ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(completion_done(&ip->i_flush)); ASSERT(ip->i_ino == 0); mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); lockdep_set_class_and_name(&ip->i_iolock.mr_lock, &xfs_iolock_active, "xfs_iolock_active"); /* initialise the xfs inode */ ip->i_ino = ino; ip->i_mount = mp; memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); ip->i_afp = NULL; memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); ip->i_flags = 0; ip->i_update_core = 0; ip->i_delayed_blks = 0; memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); ip->i_size = 0; ip->i_new_size = 0; return ip; }
void aee_sram_fiq_save_bin(const char *msg, size_t len) { int delay = 100; char bin_buffer[4]; struct ram_console_buffer *buffer = ram_console_buffer; if(FIQ_log_size + len > CONFIG_MTK_RAM_CONSOLE_SIZE) { return; } if(len > 0xffff) { return; } if(len%4 !=0) { len -= len%4; } if(!atomic_read(&rc_in_fiq)) { atomic_set(&rc_in_fiq, 1); } while ((delay > 0) && (spin_is_locked(&ram_console_lock))) { udelay(1); delay--; } // bin buffer flag 00ff bin_buffer[0] = 0x00; bin_buffer[1] = 0xff; // bin buffer size bin_buffer[2] = len/255; bin_buffer[3] = len%255; sram_log_save(bin_buffer, 4); sram_log_save(msg, len); FIQ_log_size = FIQ_log_size + len +4; buffer->bin_log_count += len; }
/* * Process a connect message from a remote partition. * * Note: xpc_process_connect() is expecting to be called with the * spin_lock_irqsave held and will leave it locked upon return. */ static void xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) { enum xp_retval ret; DBUG_ON(!spin_is_locked(&ch->lock)); if (!(ch->flags & XPC_C_OPENREQUEST) || !(ch->flags & XPC_C_ROPENREQUEST)) { /* nothing more to do for now */ return; } DBUG_ON(!(ch->flags & XPC_C_CONNECTING)); if (!(ch->flags & XPC_C_SETUP)) { spin_unlock_irqrestore(&ch->lock, *irq_flags); ret = xpc_setup_msg_structures(ch); spin_lock_irqsave(&ch->lock, *irq_flags); if (ret != xpSuccess) XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); ch->flags |= XPC_C_SETUP; if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) return; } if (!(ch->flags & XPC_C_OPENREPLY)) { ch->flags |= XPC_C_OPENREPLY; xpc_send_chctl_openreply(ch, irq_flags); } if (!(ch->flags & XPC_C_ROPENREPLY)) return; ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ dev_info(xpc_chan, "channel %d to partition %d connected\n", ch->number, ch->partid); spin_unlock_irqrestore(&ch->lock, *irq_flags); xpc_create_kthreads(ch, 1, 0); spin_lock_irqsave(&ch->lock, *irq_flags); }
/* used by __split_huge_page_refcount() */ void lru_add_page_tail(struct zone* zone, struct page *page, struct page *page_tail) { int active; enum lru_list lru; const int file = 0; VM_BUG_ON(!PageHead(page)); VM_BUG_ON(PageCompound(page_tail)); VM_BUG_ON(PageLRU(page_tail)); VM_BUG_ON(!spin_is_locked(&zone->lru_lock)); SetPageLRU(page_tail); if (page_evictable(page_tail, NULL)) { if (PageActive(page)) { SetPageActive(page_tail); active = 1; lru = LRU_ACTIVE_ANON; } else { active = 0; lru = LRU_INACTIVE_ANON; } update_page_reclaim_stat(zone, page_tail, file, active); } else { SetPageUnevictable(page_tail); lru = LRU_UNEVICTABLE; } if (likely(PageLRU(page))) list_add_tail(&page_tail->lru, &page->lru); else { struct list_head *list_head; /* * Head page has not yet been counted, as an hpage, * so we must account for each subpage individually. * * Use the standard add function to put page_tail on the list, * but then correct its position so they all end up in order. */ add_page_to_lru_list(zone, page_tail, lru); list_head = page_tail->lru.prev; list_move_tail(&page_tail->lru, list_head); } }
void xfs_inode_free( struct xfs_inode *ip) { switch (ip->i_d.di_mode & S_IFMT) { case S_IFREG: case S_IFDIR: case S_IFLNK: xfs_idestroy_fork(ip, XFS_DATA_FORK); break; } if (ip->i_afp) xfs_idestroy_fork(ip, XFS_ATTR_FORK); if (ip->i_itemp) { /* * Only if we are shutting down the fs will we see an * inode still in the AIL. If it is there, we should remove * it to prevent a use-after-free from occurring. */ xfs_log_item_t *lip = &ip->i_itemp->ili_item; struct xfs_ail *ailp = lip->li_ailp; ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || XFS_FORCED_SHUTDOWN(ip->i_mount)); if (lip->li_flags & XFS_LI_IN_AIL) { spin_lock(&ailp->xa_lock); if (lip->li_flags & XFS_LI_IN_AIL) xfs_trans_ail_delete(ailp, lip); else spin_unlock(&ailp->xa_lock); } xfs_inode_item_destroy(ip); ip->i_itemp = NULL; } /* asserts to verify all state is correct here */ ASSERT(atomic_read(&ip->i_iocount) == 0); ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(completion_done(&ip->i_flush)); kmem_zone_free(xfs_inode_zone, ip); }
static void rtc_update_irq(RTCState *s) { ASSERT(spin_is_locked(&s->lock)); if ( rtc_mode_is(s, strict) && (s->hw.cmos_data[RTC_REG_C] & RTC_IRQF) ) return; /* IRQ is raised if any source is both raised & enabled */ if ( !(s->hw.cmos_data[RTC_REG_B] & s->hw.cmos_data[RTC_REG_C] & (RTC_PF | RTC_AF | RTC_UF)) ) return; s->hw.cmos_data[RTC_REG_C] |= RTC_IRQF; if ( rtc_mode_is(s, no_ack) ) hvm_isa_irq_deassert(vrtc_domain(s), RTC_IRQ); hvm_isa_irq_assert(vrtc_domain(s), RTC_IRQ); }
void xfs_inode_free( struct xfs_inode *ip) { switch (ip->i_d.di_mode & S_IFMT) { case S_IFREG: case S_IFDIR: case S_IFLNK: xfs_idestroy_fork(ip, XFS_DATA_FORK); break; } if (ip->i_afp) xfs_idestroy_fork(ip, XFS_ATTR_FORK); if (ip->i_itemp) { xfs_log_item_t *lip = &ip->i_itemp->ili_item; struct xfs_ail *ailp = lip->li_ailp; ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || XFS_FORCED_SHUTDOWN(ip->i_mount)); if (lip->li_flags & XFS_LI_IN_AIL) { spin_lock(&ailp->xa_lock); if (lip->li_flags & XFS_LI_IN_AIL) xfs_trans_ail_delete(ailp, lip); else spin_unlock(&ailp->xa_lock); } xfs_inode_item_destroy(ip); ip->i_itemp = NULL; } ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(!xfs_isiflocked(ip)); spin_lock(&ip->i_flags_lock); ip->i_flags = XFS_IRECLAIM; ip->i_ino = 0; spin_unlock(&ip->i_flags_lock); call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); }
static int __ccif_v1_de_init(ccif_t* ccif) { // Disable ccif irq, no need for there is kernel waring of free already-free irq when free_irq //ccif->ccif_dis_intr(ccif); // Check if TOP half is running while(test_bit(CCIF_TOP_HALF_RUNNING,&ccif->m_status)) yield(); WARN_ON(spin_is_locked(&ccif->m_lock)); // Un-register irq free_irq(ccif->m_irq_id,ccif); // Free memory kfree(ccif); return 0; }
static void __domain_finalise_shutdown(struct domain *d) { struct vcpu *v; BUG_ON(!spin_is_locked(&d->shutdown_lock)); if ( d->is_shut_down ) return; for_each_vcpu ( d, v ) if ( !v->paused_for_shutdown ) return; d->is_shut_down = 1; if ( (d->shutdown_code == SHUTDOWN_suspend) && d->suspend_evtchn ) evtchn_send(d, d->suspend_evtchn); else send_global_virq(VIRQ_DOM_EXC); }
static inline void vcpu_runstate_change( struct vcpu *v, int new_state, s_time_t new_entry_time) { s_time_t delta; ASSERT(v->runstate.state != new_state); ASSERT(spin_is_locked(&per_cpu(schedule_data,v->processor).schedule_lock)); trace_runstate_change(v, new_state); delta = new_entry_time - v->runstate.state_entry_time; if ( delta > 0 ) { v->runstate.time[v->runstate.state] += delta; v->runstate.state_entry_time = new_entry_time; } v->runstate.state = new_state; }
void aee_sram_fiq_log(const char *msg) { unsigned int count = strlen(msg); int delay = 100; if (FIQ_log_size + count > ram_console_buffer_size) { return; } atomic_set(&rc_in_fiq, 1); while ((delay > 0) && (spin_is_locked(&ram_console_lock))) { udelay(1); delay--; } sram_log_save(msg, count); FIQ_log_size += count; }
/* handle update-ended timer */ static void check_update_timer(RTCState *s) { uint64_t next_update_time, expire_time; uint64_t guest_usec; struct domain *d = vrtc_domain(s); stop_timer(&s->update_timer); stop_timer(&s->update_timer2); ASSERT(spin_is_locked(&s->lock)); if (!(s->hw.cmos_data[RTC_REG_C] & RTC_UF) && !(s->hw.cmos_data[RTC_REG_B] & RTC_SET)) { s->use_timer = 1; guest_usec = get_localtime_us(d) % USEC_PER_SEC; if (guest_usec >= (USEC_PER_SEC - 244)) { /* RTC is in update cycle */ s->hw.cmos_data[RTC_REG_A] |= RTC_UIP; next_update_time = (USEC_PER_SEC - guest_usec) * NS_PER_USEC; expire_time = NOW() + next_update_time; /* release lock before set timer */ spin_unlock(&s->lock); set_timer(&s->update_timer2, expire_time); /* fetch lock again */ spin_lock(&s->lock); } else { next_update_time = (USEC_PER_SEC - guest_usec - 244) * NS_PER_USEC; expire_time = NOW() + next_update_time; s->next_update_time = expire_time; /* release lock before set timer */ spin_unlock(&s->lock); set_timer(&s->update_timer, expire_time); /* fetch lock again */ spin_lock(&s->lock); } } else s->use_timer = 0; }
/* Set the correct value in the timer, accounting for time elapsed * since the last time we did that. */ static void pmt_update_time(PMTState *s) { uint64_t curr_gtime; uint32_t msb = s->pm.tmr_val & TMR_VAL_MSB; ASSERT(spin_is_locked(&s->lock)); /* Update the timer */ curr_gtime = hvm_get_guest_time(s->vcpu); s->pm.tmr_val += ((curr_gtime - s->last_gtime) * s->scale) >> 32; s->pm.tmr_val &= TMR_VAL_MASK; s->last_gtime = curr_gtime; /* If the counter's MSB has changed, set the status bit */ if ( (s->pm.tmr_val & TMR_VAL_MSB) != msb ) { s->pm.pm1a_sts |= TMR_STS; pmt_update_sci(s); } }
static void rtc_update_irq(RTCState *s) { struct domain *d = vrtc_domain(s); uint8_t irqf; ASSERT(spin_is_locked(&s->lock)); /* IRQ is raised if any source is both raised & enabled */ irqf = (s->hw.cmos_data[RTC_REG_B] & s->hw.cmos_data[RTC_REG_C] & (RTC_PF|RTC_AF|RTC_UF)) ? RTC_IRQF : 0; s->hw.cmos_data[RTC_REG_C] &= ~RTC_IRQF; s->hw.cmos_data[RTC_REG_C] |= irqf; hvm_isa_irq_deassert(d, RTC_IRQ); if ( irqf ) hvm_isa_irq_assert(d, RTC_IRQ); }
/* Mark specified intr remap entry as free */ static void free_remap_entry(struct iommu *iommu, int index) { struct iremap_entry *iremap_entry = NULL, *iremap_entries; struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); if ( index < 0 || index > IREMAP_ENTRY_NR - 1 ) return; ASSERT( spin_is_locked(&ir_ctrl->iremap_lock) ); GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index, iremap_entries, iremap_entry); memset(iremap_entry, 0, sizeof(struct iremap_entry)); iommu_flush_cache_entry(iremap_entry, sizeof(struct iremap_entry)); iommu_flush_iec_index(iommu, 0, index); unmap_vtd_domain_page(iremap_entries); ir_ctrl->iremap_num--; }
/* Enable/configure/disable the periodic timer based on the RTC_PIE and * RTC_RATE_SELECT settings */ static void rtc_timer_update(RTCState *s) { int period_code, period, delta; struct vcpu *v = vrtc_vcpu(s); ASSERT(spin_is_locked(&s->lock)); s->pt_dead_ticks = 0; period_code = s->hw.cmos_data[RTC_REG_A] & RTC_RATE_SELECT; switch ( s->hw.cmos_data[RTC_REG_A] & RTC_DIV_CTL ) { case RTC_REF_CLCK_32KHZ: if ( (period_code != 0) && (period_code <= 2) ) period_code += 7; /* fall through */ case RTC_REF_CLCK_1MHZ: case RTC_REF_CLCK_4MHZ: if ( period_code != 0 ) { if ( period_code != s->pt_code ) { s->pt_code = period_code; period = 1 << (period_code - 1); /* period in 32 Khz cycles */ period = DIV_ROUND(period * 1000000000ULL, 32768); /* in ns */ if ( v->domain->arch.hvm_domain.params[HVM_PARAM_VPT_ALIGN] ) delta = 0; else delta = period - ((NOW() - s->start_time) % period); create_periodic_time(v, &s->pt, delta, period, RTC_IRQ, NULL, s); } break; } /* fall through */ default: destroy_periodic_time(&s->pt); s->pt_code = 0; break; } }
static void xfs_perag_set_reclaim_tag( struct xfs_perag *pag) { struct xfs_mount *mp = pag->pag_mount; ASSERT(spin_is_locked(&pag->pag_ici_lock)); if (pag->pag_ici_reclaimable++) return; /* propagate the reclaim tag up into the perag radix tree */ spin_lock(&mp->m_perag_lock); radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, XFS_ICI_RECLAIM_TAG); spin_unlock(&mp->m_perag_lock); /* schedule periodic background inode reclaim */ xfs_reclaim_work_queue(mp); trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_); }
STATIC void xfs_inode_free( struct xfs_inode *ip) { switch (ip->i_d.di_mode & S_IFMT) { case S_IFREG: case S_IFDIR: case S_IFLNK: xfs_idestroy_fork(ip, XFS_DATA_FORK); break; } if (ip->i_afp) xfs_idestroy_fork(ip, XFS_ATTR_FORK); if (ip->i_itemp) { ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL)); xfs_inode_item_destroy(ip); ip->i_itemp = NULL; } /* asserts to verify all state is correct here */ ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(!xfs_isiflocked(ip)); /* * Because we use RCU freeing we need to ensure the inode always * appears to be reclaimed with an invalid inode number when in the * free state. The ip->i_flags_lock provides the barrier against lookup * races. */ spin_lock(&ip->i_flags_lock); ip->i_flags = XFS_IRECLAIM; ip->i_ino = 0; spin_unlock(&ip->i_flags_lock); call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); }
/* Enable/configure/disable the periodic timer based on the RTC_PIE and * RTC_RATE_SELECT settings */ static void rtc_timer_update(RTCState *s) { int period_code, period; struct vcpu *v = vrtc_vcpu(s); ASSERT(spin_is_locked(&s->lock)); period_code = s->hw.cmos_data[RTC_REG_A] & RTC_RATE_SELECT; if ( (period_code != 0) && (s->hw.cmos_data[RTC_REG_B] & RTC_PIE) ) { if ( period_code <= 2 ) period_code += 7; period = 1 << (period_code - 1); /* period in 32 Khz cycles */ period = DIV_ROUND((period * 1000000000ULL), 32768); /* period in ns */ create_periodic_time(v, &s->pt, period, period, RTC_IRQ, rtc_periodic_cb, s); } else { destroy_periodic_time(&s->pt); } }
void aee_sram_fiq_log(const char *msg) { unsigned int count = strlen(msg); int delay = 100; if(FIQ_log_size + count > CONFIG_MTK_RAM_CONSOLE_SIZE) { return; } if(!atomic_read(&rc_in_fiq)) { atomic_set(&rc_in_fiq, 1); } while ((delay > 0) && (spin_is_locked(&ram_console_lock))) { udelay(1); delay--; } sram_log_save(msg, count); FIQ_log_size += count; }
/* used by __split_huge_page_refcount() */ void lru_add_page_tail(struct page *page, struct page *page_tail, struct lruvec *lruvec, struct list_head *list) { const int file = 0; VM_BUG_ON(!PageHead(page)); VM_BUG_ON(PageCompound(page_tail)); VM_BUG_ON(PageLRU(page_tail)); VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); if (!list) SetPageLRU(page_tail); if (likely(PageLRU(page))) list_add_tail(&page_tail->lru, &page->lru); else if (list) { /* page reclaim is reclaiming a huge page */ get_page(page_tail); list_add_tail(&page_tail->lru, list); } else { struct list_head *list_head; /* * Head page has not yet been counted, as an hpage, * so we must account for each subpage individually. * * Use the standard add function to put page_tail on the list, * but then correct its position so they all end up in order. */ add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail)); list_head = page_tail->lru.prev; list_move_tail(&page_tail->lru, list_head); } if (!PageUnevictable(page)) update_page_reclaim_stat(lruvec, file, PageActive(page_tail)); }
/* * Deq and move all pending jobs that match the index for this slot to list returned */ struct pending_job * flashcache_deq_pending(struct cache_c *dmc, int index) { struct pending_job *node, *next, *movelist = NULL; int moved = 0; struct pending_job **head; VERIFY(spin_is_locked(&dmc->cache_spin_lock)); head = &dmc->pending_job_hashbuckets[FLASHCACHE_PENDING_JOB_HASH(index)]; for (node = *head ; node != NULL ; node = next) { next = node->next; if (node->index == index) { /* * Remove pending job from the global list of * jobs and move it to the private list for freeing */ if (node->prev == NULL) { *head = node->next; if (node->next) node->next->prev = NULL; } else node->prev->next = node->next; if (node->next == NULL) { if (node->prev) node->prev->next = NULL; } else node->next->prev = node->prev; node->prev = NULL; node->next = movelist; movelist = node; moved++; } } VERIFY(dmc->pending_jobs_count >= moved); dmc->pending_jobs_count -= moved; return movelist; }
/* * Look for a free intr remap entry (or a contiguous set thereof). * Need hold iremap_lock, and setup returned entry before releasing lock. */ static unsigned int alloc_remap_entry(struct iommu *iommu, unsigned int nr) { struct iremap_entry *iremap_entries = NULL; struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); unsigned int i, found; ASSERT( spin_is_locked(&ir_ctrl->iremap_lock) ); for ( found = i = 0; i < IREMAP_ENTRY_NR; i++ ) { struct iremap_entry *p; if ( i % (1 << IREMAP_ENTRY_ORDER) == 0 ) { /* This entry across page boundry */ if ( iremap_entries ) unmap_vtd_domain_page(iremap_entries); GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, i, iremap_entries, p); } else p = &iremap_entries[i % (1 << IREMAP_ENTRY_ORDER)]; if ( p->lo_val || p->hi_val ) /* not a free entry */ found = 0; else if ( ++found == nr ) break; } if ( iremap_entries ) unmap_vtd_domain_page(iremap_entries); if ( i < IREMAP_ENTRY_NR ) ir_ctrl->iremap_num += nr; return i; }
int saa7134_buffer_queue(struct saa7134_dev *dev, struct saa7134_dmaqueue *q, struct saa7134_buf *buf) { struct saa7134_buf *next = NULL; #ifdef DEBUG_SPINLOCKS BUG_ON(!spin_is_locked(&dev->slock)); #endif dprintk("buffer_queue %p\n",buf); if (NULL == q->curr) { if (!q->need_two) { q->curr = buf; buf->activate(dev,buf,NULL); } else if (list_empty(&q->queue)) { list_add_tail(&buf->vb.queue,&q->queue); buf->vb.state = STATE_QUEUED; } else { next = list_entry(q->queue.next,struct saa7134_buf, vb.queue); q->curr = buf; buf->activate(dev,buf,next); } } else {