void ml_arm_sleep(void) { cpu_data_t *cpu_data_ptr = getCpuDatap(); if (cpu_data_ptr == &BootCpuData) { cpu_data_t *target_cdp; unsigned int cpu; for (cpu=0; cpu < MAX_CPUS; cpu++) { target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; if(target_cdp == (cpu_data_t *)NULL) break; if (target_cdp == cpu_data_ptr) continue; while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH); } /* Now that the other cores have entered the sleep path, set * the abstime fixup we'll use when we resume.*/ rtclock_base_abstime = ml_get_timebase(); wake_abstime = rtclock_base_abstime; } else { platform_cache_disable(); CleanPoU_Dcache(); } cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH; #if __ARM_SMP__ && defined(ARMA7) cpu_data_ptr->cpu_CLWFlush_req = 0; cpu_data_ptr->cpu_CLWClean_req = 0; __builtin_arm_dmb(DMB_ISH); cpu_data_ptr->cpu_CLW_active = 0; #endif if (cpu_data_ptr == &BootCpuData) { platform_cache_disable(); platform_cache_shutdown(); bcopy((const void *)suspend_signature, (void *)(IOS_STATE), IOS_STATE_SIZE); } else CleanPoC_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t)); __builtin_arm_dsb(DSB_SY); while (TRUE) { #if __ARM_ENABLE_WFE_ __builtin_arm_wfe(); #endif } /* Spin */ }
void test_barrier() { __builtin_arm_dmb(1); //CHECK: call {{.*}} @llvm.arm.dmb(i32 1) __builtin_arm_dsb(2); //CHECK: call {{.*}} @llvm.arm.dsb(i32 2) __builtin_arm_isb(3); //CHECK: call {{.*}} @llvm.arm.isb(i32 3) }
void barriers() { __builtin_arm_dmb(1); //CHECK: call {{.*}} @llvm.aarch64.dmb(i32 1) __builtin_arm_dsb(2); //CHECK: call {{.*}} @llvm.aarch64.dsb(i32 2) __builtin_arm_isb(3); //CHECK: call {{.*}} @llvm.aarch64.isb(i32 3) }
void test3() { __builtin_arm_dsb(16); // expected-error {{argument should be a value from 0 to 15}} __builtin_arm_dmb(17); // expected-error {{argument should be a value from 0 to 15}} __builtin_arm_isb(18); // expected-error {{argument should be a value from 0 to 15}} }