void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) { struct _lowcore *lc, *current_lc; struct stack_frame *sf; struct pt_regs *regs; unsigned long sp; if (smp_processor_id() == 0) func(data); __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY); /* Disable lowcore protection */ __ctl_clear_bit(0, 28); current_lc = lowcore_ptr[smp_processor_id()]; lc = lowcore_ptr[0]; if (!lc) lc = current_lc; lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; if (!cpu_online(0)) smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); while (sigp(0, sigp_stop_and_store_status) == sigp_busy) cpu_relax(); sp = lc->panic_stack; sp -= sizeof(struct pt_regs); regs = (struct pt_regs *) sp; memcpy(®s->gprs, ¤t_lc->gpregs_save_area, sizeof(regs->gprs)); regs->psw = lc->psw_save_area; sp -= STACK_FRAME_OVERHEAD; sf = (struct stack_frame *) sp; sf->back_chain = regs->gprs[15]; smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]); }
/* * Initialize CPU ELF notes */ void setup_regs(void) { unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; int cpu, this_cpu, phys_cpu = 0, first = 1; this_cpu = stap(); if (!S390_lowcore.prefixreg_save_area) first = 0; for_each_online_cpu(cpu) { if (first) { add_elf_notes(cpu); first = 0; continue; } phys_cpu = store_status_next(phys_cpu, this_cpu); if (phys_cpu == -1) break; add_elf_notes(cpu); phys_cpu++; } /* Copy dump CPU store status info to absolute zero */ memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area_s390x)); }
/* * Start kdump: create a LGR log entry, store status of all CPUs and * branch to __do_machine_kdump. */ static noinline void __machine_kdump(void *image) { int this_cpu, cpu; lgr_info_log(); /* Get status of the other CPUs */ this_cpu = smp_find_processor_id(stap()); for_each_online_cpu(cpu) { if (cpu == this_cpu) continue; if (smp_store_status(cpu)) continue; } /* Store status of the boot CPU */ if (MACHINE_HAS_VX) save_vx_regs((void *) &S390_lowcore.vector_save_area); /* * To create a good backchain for this CPU in the dump store_status * is passed the address of a function. The address is saved into * the PSW save area of the boot CPU and the function is invoked as * a tail call of store_status. The backchain in the dump will look * like this: * restart_int_handler -> __machine_kexec -> __do_machine_kdump * The call to store_status() will not return. */ store_status(__do_machine_kdump, image); }
/* * Ensure that PSW restart is done on an online CPU */ void smp_restart_with_online_cpu(void) { int cpu; for_each_online_cpu(cpu) { if (stap() == __cpu_logical_map[cpu]) { /* We are online: Enable DAT again and return */ __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); return; } } /* We are not online: Do PSW restart on an online CPU */ while (sigp(cpu, sigp_restart) == sigp_busy) cpu_relax(); /* And stop ourself */ while (raw_sigp(stap(), sigp_stop) == sigp_busy) cpu_relax(); for (;;); }
/* * Initialize CPU ELF notes */ void setup_regs(void) { unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; int cpu, this_cpu; this_cpu = smp_find_processor_id(stap()); add_elf_notes(this_cpu); for_each_online_cpu(cpu) { if (cpu == this_cpu) continue; if (smp_store_status(cpu)) continue; add_elf_notes(cpu); } /* Copy dump CPU store status info to absolute zero */ memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); }
/* * Initialize CPU ELF notes */ static void setup_regs(void) { unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; struct _lowcore *lc; int cpu, this_cpu; /* Get lowcore pointer from store status of this CPU (absolute zero) */ lc = (struct _lowcore *)(unsigned long)S390_lowcore.prefixreg_save_area; this_cpu = smp_find_processor_id(stap()); add_elf_notes(this_cpu); for_each_online_cpu(cpu) { if (cpu == this_cpu) continue; if (smp_store_status(cpu)) continue; add_elf_notes(cpu); } if (MACHINE_HAS_VX) save_vx_regs_safe((void *) lc->vector_save_area_addr); /* Copy dump CPU store status info to absolute zero */ memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); }
// For English version: void step () { stap(); }
void stappen(int s) { while(s--) { stap(); } }
bool AbiCollab::_handleSessionTakeover(AbstractSessionTakeoverPacket* pPacket, BuddyPtr collaborator) { UT_DEBUGMSG(("AbiCollab::_handleSessionTakeover()\n")); UT_return_val_if_fail(pPacket, false); UT_return_val_if_fail(collaborator, false); AbiCollabSessionManager* pManager = AbiCollabSessionManager::getManager(); UT_return_val_if_fail(pManager, false); switch (m_eTakeoveState) { case STS_NONE: { // we only accept a SessionTakeoverRequest or MasterChangeRequest packet UT_return_val_if_fail(pPacket->getClassType() == PCT_SessionTakeoverRequestPacket, false); // we can only allow such a packet from the controller UT_return_val_if_fail(m_pController == collaborator, false); // handle the SessionTakeoverRequestPacket packet m_pProposedController = BuddyPtr(); m_vApprovedReconnectBuddies.clear(); SessionTakeoverRequestPacket* strp = static_cast<SessionTakeoverRequestPacket*>(pPacket); m_bProposedController = strp->promote(); if (m_bProposedController) { for (std::vector<std::string>::const_iterator cit = strp->getBuddyIdentifiers().begin(); cit != strp->getBuddyIdentifiers().end(); cit++) m_vApprovedReconnectBuddies[*cit] = false; } else { UT_return_val_if_fail(strp->getBuddyIdentifiers().size() == 1, false); BuddyPtr pBuddy = pManager->constructBuddy(strp->getBuddyIdentifiers()[0], collaborator); UT_return_val_if_fail(pBuddy, false); m_pProposedController = pBuddy; } // inform the master that we received the takeover request SessionTakeoverAckPacket stap(m_sId, m_pDoc->getDocUUIDString()); collaborator->getHandler()->send(&stap, collaborator); m_eTakeoveState = STS_SENT_TAKEOVER_ACK; return true; } return false; case STS_SENT_TAKEOVER_REQUEST: { // we only accept SessionTakeoverAck packets UT_return_val_if_fail(pPacket->getClassType() == PCT_SessionTakeoverAckPacket, false); // we can only receive SessionTakeoverAck packets when we are the master UT_return_val_if_fail(!m_pController, false); // we should have a proposed master UT_return_val_if_fail(m_pProposedController, false); // a slave should only ack once UT_return_val_if_fail(!_hasAckedSessionTakeover(collaborator), false); // handle the SessionTakeoverAck packet m_mAckedSessionTakeoverBuddies[collaborator] = true; // check if every slave has acknowledged the session takeover // TODO: handle dropouts if (m_vCollaborators.size() == 1 || m_mAckedSessionTakeoverBuddies.size() == m_vCollaborators.size()) { // ... our tour of duty is done _shutdownAsMaster(); m_eTakeoveState = STS_NONE; return true; } } return true; case STS_SENT_TAKEOVER_ACK: // we only accept a SessionFlushed or SessionReconnectRequest packet UT_return_val_if_fail( pPacket->getClassType() == PCT_SessionFlushedPacket || pPacket->getClassType() == PCT_SessionReconnectRequestPacket, false ); if (pPacket->getClassType() == PCT_SessionReconnectRequestPacket) { // we only accept a SessionReconnectRequest when we are the proposed master UT_return_val_if_fail(m_bProposedController, false); // we only allow an incoming SessionReconnectRequest packet from a buddy // that is in the buddy list we received from the master, and we didn't receive // such a packet from him before bool allow = false; for (std::map<std::string, bool>::iterator it = m_vApprovedReconnectBuddies.begin(); it != m_vApprovedReconnectBuddies.end(); it++) { // TODO: is it a good idea to compare descriptors with full session information? if ((*it).first == collaborator->getDescriptor(true) && (*it).second == false) { (*it).second = true; allow = true; break; } } UT_return_val_if_fail(allow, false); // handle the SessionReconnectRequest packet addCollaborator(collaborator); _checkRestartAsMaster(); return true; } else if (pPacket->getClassType() == PCT_SessionFlushedPacket) { // we can only allow a SessionFlushed packet from the controller UT_return_val_if_fail(m_pController == collaborator, false); // handle the SessionFlushed packet m_bSessionFlushed = true; if (m_bProposedController) { // as far we we're concerned now, the old master is dead _becomeMaster(); _checkRestartAsMaster(); return true; } else { // as far we we're concerned now, the old master is dead _switchMaster(); // inform the new master that we want to rejoin the session SessionReconnectRequestPacket srrp(m_sId, m_pDoc->getDocUUIDString()); m_pProposedController->getHandler()->send(&srrp, m_pProposedController); m_eTakeoveState = STS_SENT_SESSION_RECONNECT_REQUEST; } return true; } return false; case STS_SENT_SESSION_RECONNECT_REQUEST: { // we only accept a SessionReconnectAck packet UT_return_val_if_fail(pPacket->getClassType() == PCT_SessionReconnectAckPacket, false); // we only accept said packet when we are a slave UT_return_val_if_fail(m_pController, false); // we only accept said packet when we are not the proposed master UT_return_val_if_fail(!m_bProposedController, false); // we only accept said packet from the proposed master UT_return_val_if_fail(m_pProposedController == collaborator, false); // handle the SessionReconnectAck packet SessionReconnectAckPacket* srap = static_cast<SessionReconnectAckPacket*>(pPacket); // Nuke the current collaboration state, and restart with the // given revision from the proposed master UT_return_val_if_fail(_restartAsSlave(srap->getDocUUID(), srap->getRev()), false); } return true; default: UT_ASSERT_HARMLESS(UT_SHOULD_NOT_HAPPEN); break; } return false; }
static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb) { struct sclp_core_entry *cpue; u16 boot_cpu_address, cpu; if (sclp_early_read_info(sccb)) return; sclp.facilities = sccb->facilities; sclp.has_sprp = !!(sccb->fac84 & 0x02); sclp.has_core_type = !!(sccb->fac84 & 0x01); sclp.has_gsls = !!(sccb->fac85 & 0x80); sclp.has_64bscao = !!(sccb->fac116 & 0x80); sclp.has_cmma = !!(sccb->fac116 & 0x40); sclp.has_esca = !!(sccb->fac116 & 0x08); sclp.has_pfmfi = !!(sccb->fac117 & 0x40); sclp.has_ibs = !!(sccb->fac117 & 0x20); sclp.has_gisaf = !!(sccb->fac118 & 0x08); sclp.has_hvs = !!(sccb->fac119 & 0x80); sclp.has_kss = !!(sccb->fac98 & 0x01); if (sccb->fac85 & 0x02) S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; if (sccb->fac91 & 0x40) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST; sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; sclp.rzm <<= 20; sclp.ibc = sccb->ibc; if (sccb->hamaxpow && sccb->hamaxpow < 64) sclp.hamax = (1UL << sccb->hamaxpow) - 1; else sclp.hamax = U64_MAX; if (!sccb->hcpua) { if (MACHINE_IS_VM) sclp.max_cores = 64; else sclp.max_cores = sccb->ncpurl; } else { sclp.max_cores = sccb->hcpua + 1; } boot_cpu_address = stap(); cpue = (void *)sccb + sccb->cpuoff; for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) { if (boot_cpu_address != cpue->core_id) continue; sclp.has_siif = cpue->siif; sclp.has_sigpif = cpue->sigpif; sclp.has_sief2 = cpue->sief2; sclp.has_gpere = cpue->gpere; sclp.has_ib = cpue->ib; sclp.has_cei = cpue->cei; sclp.has_skey = cpue->skey; break; } /* Save IPL information */ sclp_ipl_info.is_valid = 1; if (sccb->fac91 & 0x2) sclp_ipl_info.has_dump = 1; memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN); sclp.mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0; sclp.mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0; sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0; sclp.hmfai = sccb->hmfai; }
void __init setup_arch(char **cmdline_p) { /* * print what head.S has found out about the machine */ #ifndef CONFIG_64BIT if (MACHINE_IS_VM) pr_info("Linux is running as a z/VM " "guest operating system in 31-bit mode\n"); else pr_info("Linux is running natively in 31-bit mode\n"); if (MACHINE_HAS_IEEE) pr_info("The hardware system has IEEE compatible " "floating point units\n"); else pr_info("The hardware system has no IEEE compatible " "floating point units\n"); #else /* CONFIG_64BIT */ if (MACHINE_IS_VM) pr_info("Linux is running as a z/VM " "guest operating system in 64-bit mode\n"); else if (MACHINE_IS_KVM) pr_info("Linux is running under KVM in 64-bit mode\n"); else pr_info("Linux is running natively in 64-bit mode\n"); #endif /* CONFIG_64BIT */ /* Have one command line that is parsed and saved in /proc/cmdline */ /* boot_command_line has been already set up in early.c */ *cmdline_p = boot_command_line; ROOT_DEV = Root_RAM0; init_mm.start_code = PAGE_OFFSET; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; if (MACHINE_HAS_MVCOS) memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess)); else memcpy(&uaccess, &uaccess_std, sizeof(uaccess)); parse_early_param(); setup_ipl(); setup_memory_end(); setup_addressing_mode(); setup_memory(); setup_resources(); setup_lowcore(); cpu_init(); __cpu_logical_map[0] = stap(); s390_init_cpu_topology(); /* * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). */ setup_hwcaps(); /* * Create kernel page tables and switch to virtual addressing. */ paging_init(); /* Setup default console */ conmode_default(); set_preferred_console(); /* Setup zfcpdump support */ setup_zfcpdump(console_devno); }