bool spk2mt(Speakers spk, CMediaType &mt, int i) { if (spk.format == FORMAT_SPDIF) { // SPDIF media types if (i < 0 || i >= 2) return false; std::auto_ptr<WAVEFORMATEX> wfe(spk2wfe(spk, 0)); if (!wfe.get()) return false; mt.SetType(&MEDIATYPE_Audio); mt.SetSubtype(i == 0? &MEDIASUBTYPE_DOLBY_AC3_SPDIF: &MEDIASUBTYPE_PCM); mt.SetFormatType(&FORMAT_WaveFormatEx); mt.SetFormat((BYTE*)wfe.get(), sizeof(WAVEFORMATEX) + wfe->cbSize); return true; } else if (FORMAT_MASK(spk.format) & FORMAT_CLASS_PCM) { // PCM media types std::auto_ptr<WAVEFORMATEX> wfe(spk2wfe(spk, i)); if (!wfe.get()) return false; mt.SetType(&MEDIATYPE_Audio); mt.SetSubtype(&MEDIASUBTYPE_PCM); mt.SetFormatType(&FORMAT_WaveFormatEx); mt.SetFormat((BYTE*)wfe.get(), sizeof(WAVEFORMATEX) + wfe->cbSize); return true; } else return false; }
/* * Acquire bakery lock * * Contending CPUs need first obtain a non-zero ticket and then calculate * priority value. A contending CPU iterate over all other CPUs in the platform, * which may be contending for the same lock, in the order of their ordinal * position (CPU0, CPU1 and so on). A non-contending CPU will have its ticket * (and priority) value as 0. The contending CPU compares its priority with that * of others'. The CPU with the highest priority (lowest numerical value) * acquires the lock */ void bakery_lock_get(unsigned long mpidr, bakery_lock_t *bakery) { unsigned int they, me; unsigned int my_ticket, my_prio, their_ticket; me = platform_get_core_pos(mpidr); assert_bakery_entry_valid(me, bakery); /* Prevent recursive acquisition */ assert(bakery->owner != me); /* Get a ticket */ my_ticket = bakery_get_ticket(bakery, me); /* * Now that we got our ticket, compute our priority value, then compare * with that of others, and proceed to acquire the lock */ my_prio = PRIORITY(my_ticket, me); for (they = 0; they < BAKERY_LOCK_MAX_CPUS; they++) { if (me == they) continue; /* Wait for the contender to get their ticket */ while (bakery->entering[they]) wfe(); /* * If the other party is a contender, they'll have non-zero * (valid) ticket value. If they do, compare priorities */ their_ticket = bakery->number[they]; if (their_ticket && (PRIORITY(their_ticket, they) < my_prio)) { /* * They have higher priority (lower value). Wait for * their ticket value to change (either release the lock * to have it dropped to 0; or drop and probably content * again for the same lock to have an even higher value) */ do { wfe(); } while (their_ticket == bakery->number[they]); } } /* Lock acquired */ bakery->owner = me; }
void bakery_lock_get(unsigned int id, unsigned int offset) { unsigned int they, me, is_cached; unsigned int my_ticket, my_prio, their_ticket; bakery_info_t *their_bakery_info; unsigned int their_bakery_data; me = plat_my_core_pos(); is_cached = read_sctlr_el3() & SCTLR_C_BIT; /* Get a ticket */ my_ticket = bakery_get_ticket(id, offset, me, is_cached); /* * Now that we got our ticket, compute our priority value, then compare * with that of others, and proceed to acquire the lock */ my_prio = PRIORITY(my_ticket, me); for (they = 0; they < BAKERY_LOCK_MAX_CPUS; they++) { if (me == they) continue; /* * Get a reference to the other contender's bakery info and * ensure that a stale copy is not read. */ their_bakery_info = get_bakery_info_by_index(offset, id, they); assert(their_bakery_info); /* Wait for the contender to get their ticket */ do { read_cache_op(their_bakery_info, is_cached); their_bakery_data = their_bakery_info->lock_data; } while (bakery_is_choosing(their_bakery_data)); /* * If the other party is a contender, they'll have non-zero * (valid) ticket value. If they do, compare priorities */ their_ticket = bakery_ticket_number(their_bakery_data); if (their_ticket && (PRIORITY(their_ticket, they) < my_prio)) { /* * They have higher priority (lower value). Wait for * their ticket value to change (either release the lock * to have it dropped to 0; or drop and probably content * again for the same lock to have an even higher value) */ do { wfe(); read_cache_op(their_bakery_info, is_cached); } while (their_ticket == bakery_ticket_number(their_bakery_info->lock_data)); } } /* Lock acquired */ }
/* * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. * This function should be called by the last man, after local CPU teardown * is complete. CPU cache expected to be active. * * Returns: * false: the critical section was not entered because an inbound CPU was * observed, or the cluster is already being set up; * true: the critical section was entered: it is now safe to tear down the * cluster. */ bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) { unsigned int i; struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; /* Warn inbound CPUs that the cluster is being torn down: */ c->cluster = CLUSTER_GOING_DOWN; sync_cache_w(&c->cluster); /* Back out if the inbound cluster is already in the critical region: */ sync_cache_r(&c->inbound); if (c->inbound == INBOUND_COMING_UP) goto abort; /* * Wait for all CPUs to get out of the GOING_DOWN state, so that local * teardown is complete on each CPU before tearing down the cluster. * * If any CPU has been woken up again from the DOWN state, then we * shouldn't be taking the cluster down at all: abort in that case. */ sync_cache_r(&c->cpus); for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { int cpustate; if (i == cpu) continue; while (1) { cpustate = c->cpus[i].cpu; if (cpustate != CPU_GOING_DOWN) break; wfe(); sync_cache_r(&c->cpus[i].cpu); } switch (cpustate) { case CPU_DOWN: continue; default: goto abort; } } return true; abort: __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); return false; }
/* * each active core apart from the one changing * the DDR frequency will execute this function. * the rest of the cores have to remain in WFE * state until the frequency is changed. */ irqreturn_t wait_in_wfe_irq(int irq, void *dev_id) { u32 me = smp_processor_id(); *((char *)(&cpus_in_wfe) + (u8)me) = 0xff; while (wait_for_ddr_freq_update) wfe(); *((char *)(&cpus_in_wfe) + (u8)me) = 0; return IRQ_HANDLED; }
static void __smp_boot_secondary(int cpu, secondary_entry_fn entry) { int ret; secondary_data.stack = thread_stack_alloc(); secondary_data.entry = entry; mmu_mark_disabled(cpu); ret = cpu_psci_cpu_boot(cpu); assert(ret == 0); while (!cpu_online(cpu)) wfe(); }
static struct cpu_action *wait_for_action(struct cpu_action_queue *q, struct cpu_action *local) { struct cpu_action *action; while (action_queue_empty(q)) wfe(); /* * Keep original address, but use a local copy for async processing. */ do { action = load_acquire_exclusive(&q->todo); *local = *action; } while (!store_release_exclusive(&q->todo, NULL)); return action; }
void LPR_Ram(void) inram #endif #ifdef _IAR_ #pragma location="MY_RAM_FUNC" void LPR_Ram(void) #endif { uint8_t i = 0; /* To reduce consumption to minimal Swith off the Flash */ FLASH->CR1 = 0x08; while(((CLK->REGCSR)&0x80)==0x80); /* Swith off the Regulator*/ CLK->REGCSR = 0x02; while(((CLK->REGCSR)&0x01)==0x01); /* Set trigger on GPIOE pin6*/ WFE->CR2 = 0x04; GPIOE->CR2 = 0x44; for (i=0; i<100; i++); /* To start counter on falling edge*/ GPIO_LOW(CTN_GPIO_PORT,CTN_CNTEN_GPIO_PIN); /*Wait for end of counter */ wfe(); EXTI->SR1 |= 0x40; WFE->CR2 = 0x00; //Switch on the regulator CLK->REGCSR = 0x00; while(((CLK->REGCSR)&0x1) != 0x1); }
/* power off whole system, save user data to flash before call this func. * BE SUURE: all hardware has been closed and it's prcm config setted to default value. * @type:0:shutdown, 1:reboot */ static void pm_power_off(pm_operate_t type) { __record_dbg_status(PM_POWEROFF | 0); if (type == PM_REBOOT) { PM_REBOOT(); /* never return */ } #ifdef __CONFIG_ARCH_APP_CORE /* step 1 & 2 has been done when wlan sys poweroff */ /* step3: writel(0x0f, GPRCM_SYS1_WAKEUP_CTRL) to tell PMU that turn on * SW1, SW2, SRSW1, LDO before release application system reset signal. */ HAL_PRCM_SetSys1WakeupPowerFlags(0x0f); __record_dbg_status(PM_POWEROFF | 5); /* step4: writel(0x0f, GPRCM_SYS1_SLEEP_CTRL) to tell PMU that turn off SW1, * SW3 SRSW1, LDO after pull down application system reset signal. */ HAL_PRCM_SetSys1SleepPowerFlags(0x0f); __record_dbg_status(PM_POWEROFF | 7); /* step5: switch to HOSC, close SYS1_CLK. */ PM_SystemDeinit(); __record_dbg_status(PM_POWEROFF | 9); /* step6: set nvic deepsleep flag, and enter wfe. */ SCB->SCR = 0x14; PM_SetCPUBootFlag(0); __disable_fault_irq(); __disable_irq(); if (check_wakeup_irqs()) { PM_REBOOT(); } wfe(); if (check_wakeup_irqs()) { PM_REBOOT(); } wfe(); /* some irq generated when second wfe */ PM_REBOOT(); __record_dbg_status(PM_POWEROFF | 0x0ff); #else /* net cpu */ /* check wifi is closed by app? */ PM_WARN_ON(HAL_PRCM_IsSys3Release()); /* step1: cpu to switch to HOSC */ HAL_PRCM_SetCPUNClk(PRCM_CPU_CLK_SRC_HFCLK, PRCM_SYS_CLK_FACTOR_80M); __record_dbg_status(PM_POWEROFF | 1); /* step2: turn off SYSCLK2. */ HAL_PRCM_DisableSysClk2(PRCM_SYS_CLK_FACTOR_80M); __record_dbg_status(PM_POWEROFF | 3); PM_SystemDeinit(); /* step3: enter WFI state */ arch_suspend_disable_irqs(); while (1) wfi(); __record_dbg_status(PM_POWEROFF | 0x0ff); #endif }
static void wait_for_action_complete(struct cpu_action_queue *q, struct cpu_action *a) { while (!action_completed(q, a)) wfe(); }
static inline void wait_for_action_queue_slot(struct cpu_action_queue *q) { while (!action_queue_empty(q)) wfe(); }