void threads_initialize(void) { int i; struct thread *t; u8 *stack_top; struct cpu_info *ci; u8 *thread_stacks; thread_stacks = arch_get_thread_stackbase(); /* Initialize the BSP thread first. The cpu_info structure is assumed * to be just under the top of the stack. */ t = &all_threads[0]; ci = cpu_info(); ci->thread = t; t->stack_orig = (uintptr_t)ci; t->id = 0; stack_top = &thread_stacks[CONFIG_STACK_SIZE] - sizeof(struct cpu_info); for (i = 1; i < TOTAL_NUM_THREADS; i++) { t = &all_threads[i]; t->stack_orig = (uintptr_t)stack_top; t->id = i; stack_top += CONFIG_STACK_SIZE; free_thread(t); } idle_thread_init(); }
/** * * @param boot_dev * @param arm_m_type * @param atags * @return */ int notmain(uint32_t boot_dev, uint32_t arm_m_type, uint32_t atags) { #ifdef ENABLE_FRAMEBUFFER fb_init(); #else bcm2835_uart_begin(); #endif printf("Compiled on %s at %s\n\n", __DATE__, __TIME__); mem_info(); cpu_info(); printf("\n"); printf("EMMC Clock rate (Hz): %ld\n", bcm2835_vc_get_clock_rate(BCM2835_VC_CLOCK_ID_EMMC)); printf("UART Clock rate (Hz): %ld\n", bcm2835_vc_get_clock_rate(BCM2835_VC_CLOCK_ID_UART)); printf("ARM Clock rate (Hz): %ld\n", bcm2835_vc_get_clock_rate(BCM2835_VC_CLOCK_ID_ARM)); printf("CORE Clock rate (Hz): %ld\n", bcm2835_vc_get_clock_rate(BCM2835_VC_CLOCK_ID_CORE)); printf("\n"); printf("Set UART Clock rate 4000000 Hz: %ld\n", bcm2835_vc_set_clock_rate(BCM2835_VC_CLOCK_ID_UART, 4000000)); printf("UART Clock rate (Hz): %ld\n", bcm2835_vc_get_clock_rate(BCM2835_VC_CLOCK_ID_UART)); printf("\n"); uint8_t mac_address[6]; bcm2835_vc_get_board_mac_address(mac_address); printf("MAC address : %.2X:%.2X:%.2X:%.2X:%.2X:%.2X\n", mac_address[0],mac_address[1],mac_address[2],mac_address[3],mac_address[4],mac_address[5]); printf("\nProgram ending...\n"); return 0; }
static void secmon_start(void *arg) { uint32_t scr; secmon_entry_t entry; struct secmon_params *p; struct secmon_runit *r = arg; entry = r->entry; p = &r->params; /* Obtain secondary entry point for non-BSP CPUs. */ if (!cpu_is_bsp()) entry = secondary_entry_point(entry); printk(BIOS_DEBUG, "CPU%x entering secure monitor %p.\n", cpu_info()->id, entry); /* We want to enforce the following policies: * NS bit is set for lower EL */ scr = raw_read_scr_el3(); scr |= SCR_NS; raw_write_scr_el3(scr); entry(p); }
void arch_secondary_cpu_init(void) { /* Mark this CPU online. */ cpu_mark_online(cpu_info()); arch_cpu_wait_for_action(); }
int main() { auto info = *ctop::system_query(); // Print global information. cc::println(info); cc::println(info.cpu_info()); for (const auto& cache : info.cpu_info().caches()) { cc::println(cache); } // Print information local to each NUMA node. for (const auto& node : info.available_numa_nodes()) { auto& cpu = node.cpu_info(); cc::println(node); cc::println(cpu); for (const auto& thread : cpu.available_threads()) { cc::println(thread); } } }
void cstart() { disp_str("cstart begin\n"); // init_gdt(); // init_8259A(); // init_idt(); cpu_info(); cpuid(1); disp_int(rdmsr(0x1B));disp_str("\n"); init_mp(); disp_str("cstart finish\n"); while(1); }
static uint64_t clock() { std::ifstream cpu_info("/proc/cpuinfo"); std::vector<std::string> v((std::istream_iterator<std::string>(cpu_info)), (std::istream_iterator<std::string>())); auto it = std::find(v.begin(), v.end(), "MHz"); if (it == std::end(v)) throw std::runtime_error("TimeStampCounter::clock"); auto c = std::stod(*std::next(it, 2)); return static_cast<uint64_t>(c) * 1000000; }
static int __arch_run_on_all_cpus_but_self(struct cpu_action *action, int sync) { int i; struct cpu_info *me = cpu_info(); for (i = 0; i < CONFIG_MAX_CPUS; i++) { struct cpu_info *ci = cpu_info_for_cpu(i); if (ci == me) continue; action_run_on_cpu(ci, action, sync); } return 0; }
void arch_cpu_wait_for_action(void) { struct cpu_info *ci = cpu_info(); struct cpu_action_queue *q = &ci->action_queue; while (1) { struct cpu_action *orig; struct cpu_action action; orig = wait_for_action(q, &action); action_run(&action); action_queue_complete(q, orig); } }
/* Give some information about the simulator. */ static void sim_get_info (SIM_DESC sd, char *cmd) { sim_cpu *cpu; cpu = STATE_CPU (sd, 0); if (cmd != 0 && (cmd[0] == ' ' || cmd[0] == '-')) { int i; struct hw *hw_dev; struct sim_info_list *dev_list; const struct bfd_arch_info *arch; arch = STATE_ARCHITECTURE (sd); cmd++; if (arch->arch == bfd_arch_m68hc11) dev_list = dev_list_68hc11; else dev_list = dev_list_68hc12; for (i = 0; dev_list[i].name; i++) if (strcmp (cmd, dev_list[i].name) == 0) break; if (dev_list[i].name == 0) { sim_io_eprintf (sd, "Device '%s' not found.\n", cmd); sim_io_eprintf (sd, "Valid devices: cpu timer sio eeprom\n"); return; } hw_dev = sim_hw_parse (sd, dev_list[i].device); if (hw_dev == 0) { sim_io_eprintf (sd, "Device '%s' not found\n", dev_list[i].device); return; } hw_ioctl (hw_dev, 23, 0); return; } cpu_info (sd, cpu); interrupts_info (sd, &cpu->cpu_interrupts); }
int main() { int i = 0; const char *err; while (1) { if (!mem_info(&err)) fprintf(stderr, "Mem info failed: %s\n", err); if (!cpu_info(&err)) fprintf(stderr, "CPU info failed: %s\n", err); if (!read_procs(&err)) fprintf(stderr, "Process info failed: %s\n", err); printf("%d\n", i++); usleep(50000); } }
static void set_cpuid(struct pt_cpu *cpu) { uint32_t info; uint16_t family; cpu->vendor = cpu_vendor(); info = cpu_info(); cpu->family = family = (info>>8) & 0xf; if (family == 0xf) cpu->family += (info>>20) & 0xf; cpu->model = (info>>4) & 0xf; if (family == 0x6 || family == 0xf) cpu->model += (info>>12) & 0xf0; cpu->stepping = (info>>0) & 0xf; }
static inline struct thread *get_free_thread(void) { struct thread *t; struct cpu_info *ci; struct cpu_info *new_ci; if (thread_list_empty(&free_threads)) return NULL; t = pop_thread(&free_threads); ci = cpu_info(); /* Initialize the cpu_info structure on the new stack. */ new_ci = thread_cpu_info(t); *new_ci = *ci; new_ci->thread = t; /* Reset the current stack value to the original. */ t->stack_current = t->stack_orig; return t; }
int pt_cpu_read(struct pt_cpu *cpu) { uint32_t info; uint16_t family; if (!cpu) return -pte_invalid; cpu->vendor = cpu_vendor(); info = cpu_info(); cpu->family = family = (info>>8) & 0xf; if (family == 0xf) cpu->family += (info>>20) & 0xf; cpu->model = (info>>4) & 0xf; if (family == 0x6 || family == 0xf) cpu->model += (info>>12) & 0xf0; cpu->stepping = (info>>0) & 0xf; return 0; }
void initialize_cpus(struct bus *cpu_bus) { struct device_path cpu_path; struct cpu_info *info; /* Find the info struct for this CPU */ info = cpu_info(); if (need_lapic_init()) { /* Ensure the local APIC is enabled */ enable_lapic(); /* Get the device path of the boot CPU */ cpu_path.type = DEVICE_PATH_APIC; cpu_path.apic.apic_id = lapicid(); } else { /* Get the device path of the boot CPU */ cpu_path.type = DEVICE_PATH_CPU; cpu_path.cpu.id = 0; } /* Find the device structure for the boot CPU */ info->cpu = alloc_find_dev(cpu_bus, &cpu_path); // why here? In case some day we can start core1 in amd_sibling_init if (is_smp_boot()) copy_secondary_start_to_lowest_1M(); if (!IS_ENABLED(CONFIG_SERIALIZED_SMM_INITIALIZATION)) smm_init(); /* start all aps at first, so we can init ECC all together */ if (is_smp_boot() && IS_ENABLED(CONFIG_PARALLEL_CPU_INIT)) start_other_cpus(cpu_bus, info->cpu); /* Initialize the bootstrap processor */ cpu_initialize(0); if (is_smp_boot() && !IS_ENABLED(CONFIG_PARALLEL_CPU_INIT)) { start_other_cpus(cpu_bus, info->cpu); /* Now wait the rest of the cpus stop*/ wait_other_cpus_stop(cpu_bus); } if (IS_ENABLED(CONFIG_SERIALIZED_SMM_INITIALIZATION)) { /* At this point, all APs are sleeping: * smm_init() will queue a pending SMI on all cpus * and smm_other_cpus() will start them one by one */ smm_init(); if (is_smp_boot()) { last_cpu_index = 0; smm_other_cpus(cpu_bus, info->cpu); } } smm_init_completion(); if (is_smp_boot()) recover_lowest_1M(); }
void arch_initialize_cpus(device_t cluster, struct cpu_control_ops *cntrl_ops) { size_t max_cpus; size_t i; struct cpu_info *ci; void (*entry)(void); struct bus *bus; if (cluster->path.type != DEVICE_PATH_CPU_CLUSTER) { printk(BIOS_ERR, "CPU init failed. Device is not a CPU_CLUSTER: %s\n", dev_path(cluster)); return; } bus = cluster->link_list; /* Check if no children under this device. */ if (bus == NULL) return; /* * el3_init must be performed prior to prepare_secondary_cpu_startup. * This is important since el3_init initializes SCR values on BSP CPU * and then prepare_secondary_cpu_startup reads the initialized SCR * value and saves it for use by non-BSP CPUs. */ el3_init(); /* Mark current cpu online. */ cpu_mark_online(cpu_info()); entry = prepare_secondary_cpu_startup(); /* Initialize the cpu_info structures. */ init_cpu_info(bus); max_cpus = cntrl_ops->total_cpus(); if (max_cpus > CONFIG_MAX_CPUS) { printk(BIOS_WARNING, "max_cpus (%zu) exceeds CONFIG_MAX_CPUS (%zu).\n", max_cpus, (size_t)CONFIG_MAX_CPUS); max_cpus = CONFIG_MAX_CPUS; } for (i = 0; i < max_cpus; i++) { device_t dev; struct cpu_action action; struct stopwatch sw; ci = cpu_info_for_cpu(i); dev = ci->cpu; /* Disregard CPUs not in device tree. */ if (dev == NULL) continue; /* Skip disabled CPUs. */ if (!dev->enabled) continue; if (!cpu_online(ci)) { /* Start the CPU. */ printk(BIOS_DEBUG, "Starting CPU%x\n", ci->id); if (cntrl_ops->start_cpu(ci->id, entry)) { printk(BIOS_ERR, "Failed to start CPU%x\n", ci->id); continue; } stopwatch_init_msecs_expire(&sw, 1000); /* Wait for CPU to come online. */ while (!stopwatch_expired(&sw)) { if (!cpu_online(ci)) continue; printk(BIOS_DEBUG, "CPU%x online in %ld usecs.\n", ci->id, stopwatch_duration_usecs(&sw)); break; } } if (!cpu_online(ci)) { printk(BIOS_DEBUG, "CPU%x failed to come online in %ld usecs.\n", ci->id, stopwatch_duration_usecs(&sw)); continue; } /* Send it the init action. */ action.run = init_this_cpu; action.arg = ci; arch_run_on_cpu(ci->id, &action); } }
kern_return_t processor_info( processor_t processor, processor_flavor_t flavor, host_t *host, processor_info_t info, mach_msg_type_number_t *count) { int cpu_id, state; kern_return_t result; if (processor == PROCESSOR_NULL) return (KERN_INVALID_ARGUMENT); cpu_id = processor->cpu_id; switch (flavor) { case PROCESSOR_BASIC_INFO: { processor_basic_info_t basic_info; if (*count < PROCESSOR_BASIC_INFO_COUNT) return (KERN_FAILURE); basic_info = (processor_basic_info_t) info; basic_info->cpu_type = slot_type(cpu_id); basic_info->cpu_subtype = slot_subtype(cpu_id); state = processor->state; if (state == PROCESSOR_OFF_LINE) basic_info->running = FALSE; else basic_info->running = TRUE; basic_info->slot_num = cpu_id; if (processor == master_processor) basic_info->is_master = TRUE; else basic_info->is_master = FALSE; *count = PROCESSOR_BASIC_INFO_COUNT; *host = &realhost; return (KERN_SUCCESS); } case PROCESSOR_CPU_LOAD_INFO: { processor_cpu_load_info_t cpu_load_info; timer_t idle_state; uint64_t idle_time_snapshot1, idle_time_snapshot2; uint64_t idle_time_tstamp1, idle_time_tstamp2; /* * We capture the accumulated idle time twice over * the course of this function, as well as the timestamps * when each were last updated. Since these are * all done using non-atomic racy mechanisms, the * most we can infer is whether values are stable. * timer_grab() is the only function that can be * used reliably on another processor's per-processor * data. */ if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT) return (KERN_FAILURE); cpu_load_info = (processor_cpu_load_info_t) info; if (precise_user_kernel_time) { cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval); cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval); } else { uint64_t tval = timer_grab(&PROCESSOR_DATA(processor, user_state)) + timer_grab(&PROCESSOR_DATA(processor, system_state)); cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(tval / hz_tick_interval); cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0; } idle_state = &PROCESSOR_DATA(processor, idle_state); idle_time_snapshot1 = timer_grab(idle_state); idle_time_tstamp1 = idle_state->tstamp; /* * Idle processors are not continually updating their * per-processor idle timer, so it may be extremely * out of date, resulting in an over-representation * of non-idle time between two measurement * intervals by e.g. top(1). If we are non-idle, or * have evidence that the timer is being updated * concurrently, we consider its value up-to-date. */ if (PROCESSOR_DATA(processor, current_state) != idle_state) { cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = (uint32_t)(idle_time_snapshot1 / hz_tick_interval); } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) || (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))){ /* Idle timer is being updated concurrently, second stamp is good enough */ cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = (uint32_t)(idle_time_snapshot2 / hz_tick_interval); } else { /* * Idle timer may be very stale. Fortunately we have established * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging */ idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1; cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = (uint32_t)(idle_time_snapshot1 / hz_tick_interval); } cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0; *count = PROCESSOR_CPU_LOAD_INFO_COUNT; *host = &realhost; return (KERN_SUCCESS); } default: result = cpu_info(flavor, cpu_id, info, count); if (result == KERN_SUCCESS) *host = &realhost; return (result); } }
static void model_15_init(device_t dev) { printk(BIOS_DEBUG, "Model 15 Init.\n"); u8 i; msr_t msr; int msrno; unsigned int cpu_idx; #if IS_ENABLED(CONFIG_LOGICAL_CPUS) u32 siblings; #endif //x86_enable_cache(); //amd_setup_mtrrs(); //x86_mtrr_check(); disable_cache (); /* Enable access to AMD RdDram and WrDram extension bits */ msr = rdmsr(SYSCFG_MSR); msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); // BSP: make a0000-bffff UC, c0000-fffff WB, same as OntarioApMtrrSettingsList for APs msr.lo = msr.hi = 0; wrmsr (0x259, msr); msr.lo = msr.hi = 0x1e1e1e1e; wrmsr(0x250, msr); wrmsr(0x258, msr); for (msrno = 0x268; msrno <= 0x26f; msrno++) wrmsr (msrno, msr); msr = rdmsr(SYSCFG_MSR); msr.lo &= ~SYSCFG_MSR_MtrrFixDramModEn; msr.lo |= SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); if (acpi_is_wakeup()) restore_mtrr(); x86_mtrr_check(); x86_enable_cache(); /* zero the machine check error status registers */ msr.lo = 0; msr.hi = 0; for (i = 0; i < 6; i++) { wrmsr(MCI_STATUS + (i * 4), msr); } /* Enable the local cpu apics */ setup_lapic(); #if IS_ENABLED(CONFIG_LOGICAL_CPUS) siblings = cpuid_ecx(0x80000008) & 0xff; if (siblings > 0) { msr = rdmsr_amd(CPU_ID_FEATURES_MSR); msr.lo |= 1 << 28; wrmsr_amd(CPU_ID_FEATURES_MSR, msr); msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR); msr.hi |= 1 << (33 - 32); wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr); } printk(BIOS_DEBUG, "siblings = %02d, ", siblings); #endif /* DisableCf8ExtCfg */ msr = rdmsr(NB_CFG_MSR); msr.hi &= ~(1 << (46 - 32)); wrmsr(NB_CFG_MSR, msr); if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) { cpu_idx = cpu_info()->index; printk(BIOS_INFO, "Initializing SMM for CPU %u\n", cpu_idx); /* Set SMM base address for this CPU */ msr = rdmsr(MSR_SMM_BASE); msr.lo = SMM_BASE - (cpu_idx * 0x400); wrmsr(MSR_SMM_BASE, msr); /* Enable the SMM memory window */ msr = rdmsr(MSR_SMM_MASK); msr.lo |= (1 << 0); /* Enable ASEG SMRAM Range */ wrmsr(MSR_SMM_MASK, msr); } /* Write protect SMM space with SMMLOCK. */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); }
kern_return_t processor_info( register processor_t processor, processor_flavor_t flavor, host_t *host, processor_info_t info, mach_msg_type_number_t *count) { register int cpu_id, state; kern_return_t result; if (processor == PROCESSOR_NULL) return (KERN_INVALID_ARGUMENT); cpu_id = processor->cpu_id; switch (flavor) { case PROCESSOR_BASIC_INFO: { register processor_basic_info_t basic_info; if (*count < PROCESSOR_BASIC_INFO_COUNT) return (KERN_FAILURE); basic_info = (processor_basic_info_t) info; basic_info->cpu_type = slot_type(cpu_id); basic_info->cpu_subtype = slot_subtype(cpu_id); state = processor->state; if (state == PROCESSOR_OFF_LINE) basic_info->running = FALSE; else basic_info->running = TRUE; basic_info->slot_num = cpu_id; if (processor == master_processor) basic_info->is_master = TRUE; else basic_info->is_master = FALSE; *count = PROCESSOR_BASIC_INFO_COUNT; *host = &realhost; return (KERN_SUCCESS); } case PROCESSOR_CPU_LOAD_INFO: { register processor_cpu_load_info_t cpu_load_info; if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT) return (KERN_FAILURE); cpu_load_info = (processor_cpu_load_info_t) info; cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval); cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval); cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, idle_state)) / hz_tick_interval); cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0; *count = PROCESSOR_CPU_LOAD_INFO_COUNT; *host = &realhost; return (KERN_SUCCESS); } default: result = cpu_info(flavor, cpu_id, info, count); if (result == KERN_SUCCESS) *host = &realhost; return (result); } }
void init_all(int argc, char **argv) { cpu_info(); char *config = 0, *bpx = 0, *labels = 0; char opt; while ((opt = getopt(argc, argv, "i:l:b:")) != EOF) switch (opt) { case 'i': config = optarg; break; case 'b': bpx = optarg; break; case 'l': labels = optarg; } temp.Minimized = false; init_z80tables(); init_ie_help(); load_config(config); //make_samples(); #ifdef MOD_GS init_gs(); #endif init_leds(); init_tape(); init_hdd_cd(); start_dx(); init_debug(); applyconfig(); main_reset(); autoload(); init_bpx(bpx); init_labels(labels); temp.Gdiplus = GdiplusStartup(); if (!temp.Gdiplus) { color(CONSCLR_WARNING); printf("warning: gdiplus.dll was not loaded, only SCR and BMP screenshots available\n"); } if (comp.ts.vdac2) { if (!vdac2::open_ft8xx()) { color(CONSCLR_WARNING); printf("warning: ft8xx library was not loaded\n"); comp.ts.vdac2 = false; } } load_errors = 0; trd_toload = 0; *(DWORD*)trd_loaded = 0; // clear loaded flags, don't see autoload'ed images for (; optind < argc; optind++) { char fname[0x200], *temp; GetFullPathName(argv[optind], sizeof fname, fname, &temp); trd_toload = DefaultDrive; // auto-select if (!loadsnap(fname)) errmsg("error loading <%s>", argv[optind]), load_errors = 1; } if (load_errors) { int code = MessageBox(wnd, "Some files, specified in\r\ncommand line, failed to load\r\n\r\nContinue emulation?", "File loading error", MB_YESNO | MB_ICONWARNING); if (code != IDYES) exit(); } SetCurrentDirectory(conf.workdir); // timeBeginPeriod(1); InitializeCriticalSection(&tsu_toggle_cr); }
static inline struct thread *current_thread(void) { return cpu_info_to_thread(cpu_info()); }
//==================================== Old Functions ==================================== void FMTPSender::SendFileBufferedIO(const char* file_name) { PerformanceCounter cpu_info(50); cpu_info.SetCPUFlag(true); cpu_info.Start(); ResetSessionStatistics(); AccessCPUCounter(&cpu_counter.hi, &cpu_counter.lo); struct stat file_status; stat(file_name, &file_status); ulong file_size = file_status.st_size; ulong remained_size = file_size; // Send a notification to all receivers before starting the memory transfer struct FmtpSenderMessage msg; msg.session_id = cur_session_id; msg.msg_type = FILE_TRANSFER_START; msg.data_len = file_size; strcpy(msg.text, file_name); retrans_tcp_server->SendToAll(&msg, sizeof(msg)); //cout << "Start file transferring..." << endl; // Transfer the file using memory mapped I/O int fd = open(file_name, O_RDWR); if (fd < 0) { SysError("FMTPSender()::SendFile(): File open error!"); } char* buffer = (char *)malloc(FMTP_DATA_LEN); off_t offset = 0; while (remained_size > 0) { uint read_size = remained_size < FMTP_DATA_LEN ? remained_size : FMTP_DATA_LEN; ssize_t res = read(fd, buffer, read_size); if (res < 0) { SysError("FMTPSender::SendFileBufferedIO()::read() error"); } DoMemoryTransfer(buffer, read_size, offset); offset += read_size; remained_size -= read_size; } free(buffer); // Record memory data multicast time send_stats.session_trans_time = GetElapsedSeconds(cpu_counter); AccessCPUCounter(&cpu_counter.hi, &cpu_counter.lo); // Send a notification to all receivers to start retransmission msg.msg_type = FILE_TRANSFER_FINISH; retrans_tcp_server->SendToAll(&msg, sizeof(msg)); //cout << "File transfer finished. Start retransmission..." << endl; if (retrans_scheme == RETRANS_SERIAL) DoFileRetransmissionSerial(fd); else if (retrans_scheme == RETRANS_SERIAL_RR) DoFileRetransmissionSerialRR(fd); else if (retrans_scheme == RETRANS_PARALLEL) DoFileRetransmissionParallel(file_name); close(fd); // collect experiment results from receivers CollectExpResults(); // Record total transfer and retransmission time send_stats.session_retrans_time = GetElapsedSeconds(cpu_counter); //send_stats.session_total_time - send_stats.session_trans_time; send_stats.session_total_time = send_stats.session_trans_time + send_stats.session_retrans_time; //GetElapsedSeconds(cpu_counter); send_stats.session_retrans_percentage = send_stats.session_retrans_packets * 1.0 / (send_stats.session_sent_packets + send_stats.session_retrans_packets); // Increase the session id for the next transfer cur_session_id++; SendSessionStatistics(); cpu_info.Stop(); }
void FMTPSender::TcpSendFile(const char* file_name) { AccessCPUCounter(&cpu_counter.hi, &cpu_counter.lo); struct stat file_status; stat(file_name, &file_status); ulong file_size = file_status.st_size; ulong remained_size = file_size; // Send a notification to all receivers before starting the memory transfer char msg_packet[500]; FmtpHeader* header = (FmtpHeader*)msg_packet; header->session_id = cur_session_id; header->seq_number = 0; header->data_len = sizeof(FmtpSenderMessage); header->flags = FMTP_SENDER_MSG_EXP; FmtpSenderMessage* msg = (FmtpSenderMessage*)(msg_packet + FMTP_HLEN); msg->msg_type = TCP_FILE_TRANSFER_START; msg->session_id = cur_session_id; msg->data_len = file_size; strcpy(msg->text, file_name); retrans_tcp_server->SendToAll(&msg_packet, FMTP_HLEN + sizeof(FmtpSenderMessage)); PerformanceCounter cpu_info(100); cpu_info.SetCPUFlag(true); cpu_info.Start(); cout << "Start TCP file transferring..." << endl; list<int> sock_list = retrans_tcp_server->GetSocketList(); list<TcpThreadInfo*> thread_info_list; list<pthread_t*> thread_list; int file_name_len = strlen(file_name); for (list<int>::iterator it = sock_list.begin(); it != sock_list.end(); it++) { TcpThreadInfo* info = new TcpThreadInfo(); info->ptr = this; info->sock_fd = *it; memcpy(info->file_name, file_name, file_name_len); thread_info_list.push_back(info); pthread_t * t = new pthread_t(); pthread_create(t, NULL, &FMTPSender::StartTcpSendThread, info); thread_list.push_back(t); } for (list<pthread_t*>::iterator it = thread_list.begin(); it != thread_list.end(); it++) { pthread_join(**it, NULL); } for (list<pthread_t*>::iterator it = thread_list.begin(); it != thread_list.end(); it++) { delete (*it); } for (list<TcpThreadInfo*>::iterator it = thread_info_list.begin(); it != thread_info_list.end(); it++) { delete (*it); } cpu_info.Stop(); int cpu_usage = cpu_info.GetAverageCpuUsage(); // Record memory data multicast time double trans_time = GetElapsedSeconds(cpu_counter); double send_rate = file_size / 1024.0 / 1024.0 * 8.0 * 1514.0 / 1460.0 / trans_time; char str[256]; sprintf(str, "***** TCP Send Info *****\nTotal transfer time: %.2f seconds\nThroughput: %.2f Mbps\nAvg. CPU Usage: %d\%\n", trans_time, send_rate, cpu_usage); status_proxy->SendMessageLocal(INFORMATIONAL, str); cur_session_id++; }