int64_t ibm_fsp_cec_power_down(uint64_t request) { /* Request is: * * 0 = normal * 1 = immediate * (we do not allow 2 for "pci cfg reset" just yet) */ if (request !=0 && request != 1) return OPAL_PARAMETER; if (!fsp_present()) return OPAL_UNSUPPORTED; /* Flash new firmware */ if (fsp_flash_term_hook) fsp_flash_term_hook(); printf("FSP: Sending shutdown command to FSP...\n"); if (fsp_sync_msg(fsp_mkmsg(FSP_CMD_POWERDOWN_NORM, 1, request), true)) return OPAL_BUSY_EVENT; fsp_reset_links(); return OPAL_SUCCESS; }
static bool send_response_to_fsp(u32 cmd_sub_mod) { struct fsp_msg *rsp; int rc = -ENOMEM; rsp = fsp_mkmsg(cmd_sub_mod, 0); if (rsp) rc = fsp_queue_msg(rsp, fsp_freemsg); if (rc) { fsp_freemsg(rsp); /* XXX Generate error logs */ prerror("Error %d queueing FSP memory error reply\n", rc); return false; } return true; }
static bool fsp_chiptod_update_topology(uint32_t cmd_sub_mod, struct fsp_msg *msg) { struct fsp_msg *resp; enum chiptod_topology topo; bool action; uint8_t status = 0; switch (cmd_sub_mod) { case FSP_CMD_TOPO_ENABLE_DISABLE: /* * Action Values: 0x00 = Disable, 0x01 = Enable * Topology Values: 0x00 = Primary, 0x01 = Secondary */ action = !!msg->data.bytes[2]; topo = msg->data.bytes[3]; prlog(PR_DEBUG, "Topology update event:\n"); prlog(PR_DEBUG, " Action = %s, Topology = %s\n", action ? "Enable" : "Disable", topo ? "Secondary" : "Primary"); if (!chiptod_adjust_topology(topo, action)) status = FSP_STATUS_TOPO_IN_USE; else status = 0x00; resp = fsp_mkmsg(FSP_RSP_TOPO_ENABLE_DISABLE | status, 0); if (!resp) { prerror("Response allocation failed\n"); return false; } if (fsp_queue_msg(resp, fsp_freemsg)) { fsp_freemsg(resp); prerror("Failed to queue response msg\n"); return false; } return true; default: prlog(PR_DEBUG, "Unhandled sub cmd: %06x\n", cmd_sub_mod); break; } return false; }
int64_t ibm_fsp_cec_reboot(void) { uint32_t cmd = FSP_CMD_REBOOT; if (!fsp_present()) return OPAL_UNSUPPORTED; /* Flash new firmware */ if (fsp_flash_term_hook && fsp_flash_term_hook() == OPAL_SUCCESS) cmd = FSP_CMD_DEEP_REBOOT; printf("FSP: Sending 0x%02x reboot command to FSP...\n", cmd); /* If that failed, talk to the FSP */ if (fsp_sync_msg(fsp_mkmsg(cmd, 0), true)) return OPAL_BUSY_EVENT; return OPAL_SUCCESS; }
/* Process captured EPOW event notification */ static void fsp_process_epow(struct fsp_msg *msg, int epow_type) { struct fsp_msg *resp; u8 epow[8]; /* Basic EPOW signature */ if (msg->data.bytes[0] != 0xF2) { prlog(PR_ERR, "Signature mismatch\n"); return; } /* Common to all EPOW event types */ epow[0] = msg->data.bytes[0]; epow[1] = msg->data.bytes[1]; epow[2] = msg->data.bytes[2]; epow[3] = msg->data.bytes[3]; /* * After receiving the FSP async message, HV needs to * ask for the detailed panel status through corresponding * mbox command. HV need not use the received details status * as it does not have any thing more or new than what came * along with the original FSP async message. But requesting * for the detailed panel status exclussively is necessary as * it forms a kind of handshaking with the FSP. Without this * step, FSP wont be sending any new panel status messages. */ switch(epow_type) { case EPOW_NORMAL: resp = fsp_mkmsg(FSP_CMD_STATUS_REQ, 0); if (resp == NULL) { prerror("%s : Message allocation failed\n", __func__); break; } if (fsp_queue_msg(resp, fsp_freemsg)) { fsp_freemsg(resp); prerror("%s : Failed to queue response " "message\n", __func__); } break; case EPOW_EX1: /* EPOW_EX1 specific extra event data */ epow[4] = msg->data.bytes[4]; resp = fsp_mkmsg(FSP_CMD_STATUS_EX1_REQ, 0); if (resp == NULL) { prerror("%s : Message allocation failed\n", __func__); break; } if (fsp_queue_msg(resp, fsp_freemsg)) { fsp_freemsg(resp); prerror("%s : Failed to queue response " "message\n", __func__); } break; case EPOW_EX2: resp = fsp_mkmsg(FSP_CMD_STATUS_EX2_REQ, 0); if (resp == NULL) { prerror("%s : Message allocation failed\n", __func__); break; } if (fsp_queue_msg(resp, fsp_freemsg)) { fsp_freemsg(resp); prerror("%s : Failed to queue response " "message\n", __func__); } break; default: prlog(PR_WARNING, "Unknown EPOW event notification\n"); return; } fsp_epow_update(epow, epow_type); }
static void firenze_send_pci_inventory(void) { uint64_t base, abase, end, aend, offset; int64_t rc; if (!fsp_pcie_inv) return; prlog(PR_DEBUG, "PLAT: Sending PCI inventory to FSP, table has" " %d entries\n", fsp_pcie_inv->num_entries); { unsigned int i; prlog(PR_DEBUG, "HWP SLT VDID DVID SVID SDID\n"); prlog(PR_DEBUG, "---------------------------\n"); for (i = 0; i < fsp_pcie_inv->num_entries; i++) { struct fsp_pcie_entry *e = &fsp_pcie_inv->entries[i]; prlog(PR_DEBUG, "%03d %03d %04x %04x %04x %04x\n", e->hw_proc_id, e->slot_idx, e->vendor_id, e->device_id, e->subsys_vendor_id, e->subsys_device_id); } } /* * Get the location of the table in a form we can send * to the FSP */ base = (uint64_t)fsp_pcie_inv; end = base + sizeof(struct fsp_pcie_inventory) + fsp_pcie_inv->num_entries * fsp_pcie_inv->entry_size; abase = base & ~0xffful; aend = (end + 0xffful) & ~0xffful; offset = PSI_DMA_PCIE_INVENTORY + (base & 0xfff); /* We can only accomodate so many entries in the PSI map */ if ((aend - abase) > PSI_DMA_PCIE_INVENTORY_SIZE) { prerror("PLAT: PCIe inventory too large (%lld bytes)\n", aend - abase); goto bail; } /* Map this in the TCEs */ fsp_tce_map(PSI_DMA_PCIE_INVENTORY, (void *)abase, aend - abase); /* Send FSP message */ rc = fsp_sync_msg(fsp_mkmsg(FSP_CMD_PCI_POWER_CONF, 3, hi32(offset), lo32(offset), end - base), true); if (rc) prerror("PLAT: FSP error %lld sending inventory\n", rc); /* Unmap */ fsp_tce_unmap(PSI_DMA_PCIE_INVENTORY, aend - abase); bail: /* * We free the inventory. We'll have to redo that on hotplug * when we support it but that isn't the case yet */ free(fsp_pcie_inv); fsp_pcie_inv = NULL; }