/* * mrsas_update_load_balance_params: Update load balance parmas * Inputs: * sc - driver softc instance * drv_map - driver RAID map * lbInfo - Load balance info * * This function updates the load balance parameters for the LD config of a two * drive optimal RAID-1. */ void mrsas_update_load_balance_params(struct mrsas_softc *sc, MR_DRV_RAID_MAP_ALL * drv_map, PLD_LOAD_BALANCE_INFO lbInfo) { int ldCount; u_int16_t ld; MR_LD_RAID *raid; if (sc->lb_pending_cmds > 128 || sc->lb_pending_cmds < 1) sc->lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, drv_map); if (ld >= MAX_LOGICAL_DRIVES_EXT) { lbInfo[ldCount].loadBalanceFlag = 0; continue; } raid = MR_LdRaidGet(ld, drv_map); if ((raid->level != 1) || (raid->ldState != MR_LD_STATE_OPTIMAL)) { lbInfo[ldCount].loadBalanceFlag = 0; continue; } lbInfo[ldCount].loadBalanceFlag = 1; } }
u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, struct MR_FW_RAID_MAP_ALL *map) { struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); struct MR_QUAD_ELEMENT *quad; struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); u32 span, j; for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) { quad = &pSpanBlock->block_span_info.quad[j]; if (le32_to_cpu(quad->diff) == 0) return SPAN_INVALID; if (le64_to_cpu(quad->logStart) <= row && row <= le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart), le32_to_cpu(quad->diff))) == 0) { if (span_blk != NULL) { u64 blk, debugBlk; blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); debugBlk = blk; blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift; *span_blk = blk; } return span; } } } return SPAN_INVALID; }
U32 MR_GetSpanBlock(U32 ld, U64 row, U64 *span_blk, MR_FW_RAID_MAP_ALL *map, int *div_error) { MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); MR_QUAD_ELEMENT *qe; MR_LD_RAID *raid = MR_LdRaidGet(ld, map); U32 span, j; for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { qe = &pSpanBlock->block_span_info.quads[j]; if (qe->diff == 0) { *div_error = 1; return (span); } if (qe->logStart <= row && row <= qe->logEnd && (((row - qe->logStart) % qe->diff)) == 0) { if (span_blk != NULL) { U64 blk; blk = ((row - qe->logStart) / (qe->diff)); blk = (blk + qe->offsetInSpan) << raid->stripeShift; *span_blk = blk; } return (span); } } } return (span); }
/* * * This routine calculates the arm, span and block for the specified stripe and * reference in stripe using spanset * * Inputs : * sc - HBA instance * ld - Logical drive number * stripRow: Stripe number * stripRef: Reference in stripe * * Outputs : span - Span number block - Absolute Block * number in the physical disk */ static u_int8_t mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) { MR_LD_RAID *raid = MR_LdRaidGet(ld, map); u_int32_t pd, arRef; u_int8_t physArm, span; u_int64_t row; u_int8_t retval = TRUE; u_int64_t *pdBlock = &io_info->pdBlock; u_int16_t *pDevHandle = &io_info->devHandle; u_int32_t logArm, rowMod, armQ, arm; u_int8_t do_invader = 0; if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) do_invader = 1; /* Get row and span from io_info for Uneven Span IO. */ row = io_info->start_row; span = io_info->start_span; if (raid->level == 6) { logArm = get_arm_from_strip(sc, ld, stripRow, map); rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; arm = armQ + 1 + logArm; if (arm >= SPAN_ROW_SIZE(map, ld, span)) arm -= SPAN_ROW_SIZE(map, ld, span); physArm = (u_int8_t)arm; } else /* Calculate the arm */ physArm = get_arm(sc, ld, span, stripRow, map); arRef = MR_LdSpanArrayGet(ld, span, map); pd = MR_ArPdGet(arRef, physArm, map); if (pd != MR_PD_INVALID) *pDevHandle = MR_PdDevHandleGet(pd, map); else { *pDevHandle = MR_PD_INVALID; if ((raid->level >= 5) && ((!do_invader) || (do_invader && raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { pd = MR_ArPdGet(arRef, physArm + 1, map); if (pd != MR_PD_INVALID) *pDevHandle = MR_PdDevHandleGet(pd, map); } } *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; io_info->span_arm = pRAID_Context->spanArm; return retval; }
static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map) { struct fusion_context *fusion = instance->ctrl_context; struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; u32 info, strip_offset, span, span_offset; u64 span_set_Strip, span_set_Row, retval; for (info = 0; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (strip > span_set->data_strip_end) continue; span_set_Strip = strip - span_set->data_strip_start; strip_offset = mega_mod64(span_set_Strip, span_set->span_row_data_width); span_set_Row = mega_div64_32(span_set_Strip, span_set->span_row_data_width) * span_set->diff; for (span = 0, span_offset = 0; span < raid->spanDepth; span++) if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements >= info+1)) { if (strip_offset >= span_set->strip_offset[span]) span_offset++; else break; } #if SPAN_DEBUG dev_info(&instance->pdev->dev, "Strip 0x%llx," "span_set_Strip 0x%llx, span_set_Row 0x%llx" "data width 0x%llx span offset 0x%x\n", strip, (unsigned long long)span_set_Strip, (unsigned long long)span_set_Row, (unsigned long long)span_set->span_row_data_width, span_offset); dev_info(&instance->pdev->dev, "For strip 0x%llx" "row is 0x%llx\n", strip, (unsigned long long) span_set->data_row_start + (unsigned long long) span_set_Row + (span_offset - 1)); #endif retval = (span_set->data_row_start + span_set_Row + (span_offset - 1)); return retval; } return -1LLU; }
u_int32_t mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row, u_int64_t *span_blk, MR_DRV_RAID_MAP_ALL * map, int *div_error) { MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; MR_QUAD_ELEMENT *quad; u_int32_t span, info; PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; for (info = 0; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (row > span_set->data_row_end) continue; for (span = 0; span < raid->spanDepth; span++) if (map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements >= info + 1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span]. block_span_info.quad[info]; if (quad->diff == 0) { *div_error = 1; return span; } if (quad->logStart <= row && row <= quad->logEnd && (mega_mod64(row - quad->logStart, quad->diff)) == 0) { if (span_blk != NULL) { u_int64_t blk; blk = mega_div64_32 ((row - quad->logStart), quad->diff); blk = (blk + quad->offsetInSpan) << raid->stripeShift; *span_blk = blk; } return span; } } } return SPAN_INVALID; }
U16 MR_CheckDIF(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map) { MR_LD_RAID *raid; U32 ld; ld = MR_TargetIdToLdGet(ldTgtId, map); if (ld >= MAX_LOGICAL_DRIVES) { return (FALSE); } raid = MR_LdRaidGet(ld, map); return (raid->capability.ldPiMode == 0x8); }
void mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo) { int ldCount; U16 ld; MR_LD_RAID *raid; for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, map); if (ld >= MAX_LOGICAL_DRIVES) { con_log(CL_ANN1, (CE_NOTE, "mrsas: ld=%d Invalid ld \n", ld)); continue; } raid = MR_LdRaidGet(ld, map); /* Two drive Optimal RAID 1 */ if ((raid->level == 1) && (raid->rowSize == 2) && (raid->spanDepth == 1) && raid->ldState == MR_LD_STATE_OPTIMAL) { U32 pd, arRef; lbInfo[ldCount].loadBalanceFlag = 1; /* Get the array on which this span is present. */ arRef = MR_LdSpanArrayGet(ld, 0, map); pd = MR_ArPdGet(arRef, 0, map); /* Get the Pd. */ /* Get dev handle from Pd. */ lbInfo[ldCount].raid1DevHandle[0] = MR_PdDevHandleGet(pd, map); pd = MR_ArPdGet(arRef, 1, map); /* Get the Pd. */ /* Get dev handle from Pd. */ lbInfo[ldCount].raid1DevHandle[1] = MR_PdDevHandleGet(pd, map); con_log(CL_ANN1, (CE_NOTE, "mrsas: ld=%d load balancing enabled \n", ldCount)); } else { lbInfo[ldCount].loadBalanceFlag = 0; } } }
u32 mr_spanset_get_span_block(struct megasas_instance *instance, u32 ld, u64 row, u64 *span_blk, struct MR_FW_RAID_MAP_ALL *map) { struct fusion_context *fusion = instance->ctrl_context; struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; struct MR_QUAD_ELEMENT *quad; u32 span, info; PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; for (info = 0; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (row > span_set->data_row_end) continue; for (span = 0; span < raid->spanDepth; span++) if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements) >= info+1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span]. block_span_info.quad[info]; if (le32_to_cpu(quad->diff == 0)) return SPAN_INVALID; if (le64_to_cpu(quad->logStart) <= row && row <= le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart), le32_to_cpu(quad->diff))) == 0) { if (span_blk != NULL) { u64 blk; blk = mega_div64_32 ((row - le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift; *span_blk = blk; } return span; } } } return SPAN_INVALID; }
static u32 get_arm_from_strip(struct megasas_instance *instance, u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map) { struct fusion_context *fusion = instance->ctrl_context; struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; u32 info, strip_offset, span, span_offset, retval; for (info = 0 ; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (strip > span_set->data_strip_end) continue; strip_offset = (uint)mega_mod64 ((strip - span_set->data_strip_start), span_set->span_row_data_width); for (span = 0, span_offset = 0; span < raid->spanDepth; span++) if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements) >= info+1) { if (strip_offset >= span_set->strip_offset[span]) span_offset = span_set->strip_offset[span]; else break; } #if SPAN_DEBUG dev_info(&instance->pdev->dev, "get_arm_from_strip:" "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld, (long unsigned int)strip, (strip_offset - span_offset)); #endif retval = (strip_offset - span_offset); return retval; } dev_err(&instance->pdev->dev, "get_arm_from_strip" "returns invalid arm for ld=%x strip=%lx\n", ld, (long unsigned int)strip); return -1; }
static u_int64_t get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) { MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; u_int32_t info, strip_offset, span, span_offset; u_int64_t span_set_Strip, span_set_Row; for (info = 0; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (strip > span_set->data_strip_end) continue; span_set_Strip = strip - span_set->data_strip_start; strip_offset = mega_mod64(span_set_Strip, span_set->span_row_data_width); span_set_Row = mega_div64_32(span_set_Strip, span_set->span_row_data_width) * span_set->diff; for (span = 0, span_offset = 0; span < raid->spanDepth; span++) if (map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements >= info + 1) { if (strip_offset >= span_set->strip_offset[span]) span_offset++; else break; } mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : Strip 0x%llx, span_set_Strip 0x%llx, span_set_Row 0x%llx " "data width 0x%llx span offset 0x%llx\n", (unsigned long long)strip, (unsigned long long)span_set_Strip, (unsigned long long)span_set_Row, (unsigned long long)span_set->span_row_data_width, (unsigned long long)span_offset); mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : For strip 0x%llx row is 0x%llx\n", (unsigned long long)strip, (unsigned long long)span_set->data_row_start + (unsigned long long)span_set_Row + (span_offset - 1)); return (span_set->data_row_start + span_set_Row + (span_offset - 1)); } return -1LLU; }
static u64 get_strip_from_row(struct megasas_instance *instance, u32 ld, u64 row, struct MR_FW_RAID_MAP_ALL *map) { struct fusion_context *fusion = instance->ctrl_context; struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; struct MR_QUAD_ELEMENT *quad; PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; u32 span, info; u64 strip; for (info = 0; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (row > span_set->data_row_end) continue; for (span = 0; span < raid->spanDepth; span++) if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements) >= info+1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span].block_span_info.quad[info]; if (le64_to_cpu(quad->logStart) <= row && row <= le64_to_cpu(quad->logEnd) && mega_mod64((row - le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)) == 0) { strip = mega_div64_32 (((row - span_set->data_row_start) - le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); strip *= span_set->span_row_data_width; strip += span_set->data_strip_start; strip += span_set->strip_offset[span]; return strip; } } } dev_err(&instance->pdev->dev, "get_strip_from_row" "returns invalid strip for ld=%x, row=%lx\n", ld, (long unsigned int)row); return -1; }
void mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map, struct LD_LOAD_BALANCE_INFO *lbInfo) { int ldCount; u16 ld; struct MR_LD_RAID *raid; for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, map); if (ld >= MAX_LOGICAL_DRIVES) { lbInfo[ldCount].loadBalanceFlag = 0; continue; } raid = MR_LdRaidGet(ld, map); /* Two drive Optimal RAID 1 */ if ((raid->level == 1) && (raid->rowSize == 2) && (raid->spanDepth == 1) && raid->ldState == MR_LD_STATE_OPTIMAL) { u32 pd, arRef; lbInfo[ldCount].loadBalanceFlag = 1; /* Get the array on which this span is present */ arRef = MR_LdSpanArrayGet(ld, 0, map); /* Get the Pd */ pd = MR_ArPdGet(arRef, 0, map); /* Get dev handle from Pd */ lbInfo[ldCount].raid1DevHandle[0] = MR_PdDevHandleGet(pd, map); /* Get the Pd */ pd = MR_ArPdGet(arRef, 1, map); /* Get the dev handle from Pd */ lbInfo[ldCount].raid1DevHandle[1] = MR_PdDevHandleGet(pd, map); } else lbInfo[ldCount].loadBalanceFlag = 0; } }
static u_int64_t get_strip_from_row(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row, MR_DRV_RAID_MAP_ALL * map) { MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; MR_QUAD_ELEMENT *quad; PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; u_int32_t span, info; u_int64_t strip; for (info = 0; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (row > span_set->data_row_end) continue; for (span = 0; span < raid->spanDepth; span++) if (map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements >= info + 1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span].block_span_info.quad[info]; if (quad->logStart <= row && row <= quad->logEnd && mega_mod64((row - quad->logStart), quad->diff) == 0) { strip = mega_div64_32 (((row - span_set->data_row_start) - quad->logStart), quad->diff); strip *= span_set->span_row_data_width; strip += span_set->data_strip_start; strip += span_set->strip_offset[span]; return strip; } } } mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug - get_strip_from_row: returns invalid " "strip for ld=%x, row=%lx\n", ld, (long unsigned int)row); return -1; }
/* * This function will validate Map info data provided by FW */ u8 MR_ValidateMapInfo(struct megasas_instance *instance) { struct fusion_context *fusion = instance->ctrl_context; struct MR_FW_RAID_MAP_ALL *map = fusion->ld_map[(instance->map_id & 1)]; struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info; PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap; struct MR_LD_RAID *raid; int ldCount, num_lds; u16 ld; if (le32_to_cpu(pFwRaidMap->totalSize) != (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) + (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) { printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n", (unsigned int)((sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP)) + (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))); printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize " ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP), le32_to_cpu(pFwRaidMap->totalSize)); return 0; } if (instance->UnevenSpanSupport) mr_update_span_set(map, ldSpanInfo); mr_update_load_balance_params(map, lbInfo); num_lds = le32_to_cpu(map->raidMap.ldCount); /*Convert Raid capability values to CPU arch */ for (ldCount = 0; ldCount < num_lds; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, map); raid = MR_LdRaidGet(ld, map); le32_to_cpus((u32 *)&raid->capability); } return 1; }
u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) { MR_LD_RAID *raid; u_int32_t ld, ldBlockSize = MRSAS_SCSIBLOCKSIZE; ld = MR_TargetIdToLdGet(ldTgtId, map); /* * Check if logical drive was removed. */ if (ld >= MAX_LOGICAL_DRIVES) return ldBlockSize; raid = MR_LdRaidGet(ld, map); ldBlockSize = raid->logicalBlockLength; if (!ldBlockSize) ldBlockSize = MRSAS_SCSIBLOCKSIZE; return ldBlockSize; }
static u_int32_t get_arm_from_strip(struct mrsas_softc *sc, u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) { MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; u_int32_t info, strip_offset, span, span_offset; for (info = 0; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (strip > span_set->data_strip_end) continue; strip_offset = (u_int32_t)mega_mod64 ((strip - span_set->data_strip_start), span_set->span_row_data_width); for (span = 0, span_offset = 0; span < raid->spanDepth; span++) if (map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements >= info + 1) { if (strip_offset >= span_set->strip_offset[span]) span_offset = span_set->strip_offset[span]; else break; } mrsas_dprint(sc, MRSAS_PRL11, "LSI PRL11: get_arm_from_strip: " "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld, (long unsigned int)strip, (strip_offset - span_offset)); return (strip_offset - span_offset); } mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: - get_arm_from_strip: returns invalid arm" " for ld=%x strip=%lx\n", ld, (long unsigned int)strip); return -1; }
/* This Function will return Phys arm */ u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe, struct MR_FW_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); /* Need to check correct default value */ u32 arm = 0; switch (raid->level) { case 0: case 5: case 6: arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); break; case 1: /* start with logical arm */ arm = get_arm_from_strip(instance, ld, stripe, map); if (arm != -1U) arm *= 2; break; } return arm; }
/* This Function will return Phys arm */ u_int8_t get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe, MR_DRV_RAID_MAP_ALL * map) { MR_LD_RAID *raid = MR_LdRaidGet(ld, map); /* Need to check correct default value */ u_int32_t arm = 0; switch (raid->level) { case 0: case 5: case 6: arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); break; case 1: /* start with logical arm */ arm = get_arm_from_strip(sc, ld, stripe, map); arm *= 2; break; } return arm; }
/* ****************************************************************************** * * MR_BuildRaidContext function * * This function will initiate command processing. The start/end row and strip * information is calculated then the lock is acquired. * This function will return 0 if region lock was acquired OR return num strips */ u8 MR_BuildRaidContext(struct megasas_instance *instance, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN) { struct MR_LD_RAID *raid; u32 ld, stripSize, stripe_mask; u64 endLba, endStrip, endRow, start_row, start_strip; u64 regStart; u32 regSize; u8 num_strips, numRows; u16 ref_in_start_stripe, ref_in_end_stripe; u64 ldStartBlock; u32 numBlocks, ldTgtId; u8 isRead; u8 retval = 0; u8 startlba_span = SPAN_INVALID; u64 *pdBlock = &io_info->pdBlock; ldStartBlock = io_info->ldStartBlock; numBlocks = io_info->numBlocks; ldTgtId = io_info->ldTgtId; isRead = io_info->isRead; io_info->IoforUnevenSpan = 0; io_info->start_span = SPAN_INVALID; ld = MR_TargetIdToLdGet(ldTgtId, map); raid = MR_LdRaidGet(ld, map); /* * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero * return FALSE */ if (raid->rowDataSize == 0) { if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) return FALSE; else if (instance->UnevenSpanSupport) { io_info->IoforUnevenSpan = 1; } else { dev_info(&instance->pdev->dev, "raid->rowDataSize is 0, but has SPAN[0]" "rowDataSize = 0x%0x," "but there is _NO_ UnevenSpanSupport\n", MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); return FALSE; } } stripSize = 1 << raid->stripeShift; stripe_mask = stripSize-1; /* * calculate starting row and stripe, and number of strips and rows */ start_strip = ldStartBlock >> raid->stripeShift; ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask); endLba = ldStartBlock + numBlocks - 1; ref_in_end_stripe = (u16)(endLba & stripe_mask); endStrip = endLba >> raid->stripeShift; num_strips = (u8)(endStrip - start_strip + 1); /* End strip */ if (io_info->IoforUnevenSpan) { start_row = get_row_from_strip(instance, ld, start_strip, map); endRow = get_row_from_strip(instance, ld, endStrip, map); if (start_row == -1ULL || endRow == -1ULL) { dev_info(&instance->pdev->dev, "return from %s %d." "Send IO w/o region lock.\n", __func__, __LINE__); return FALSE; } if (raid->spanDepth == 1) { startlba_span = 0; *pdBlock = start_row << raid->stripeShift; } else startlba_span = (u8)mr_spanset_get_span_block(instance, ld, start_row, pdBlock, map); if (startlba_span == SPAN_INVALID) { dev_info(&instance->pdev->dev, "return from %s %d" "for row 0x%llx,start strip %llx" "endSrip %llx\n", __func__, __LINE__, (unsigned long long)start_row, (unsigned long long)start_strip, (unsigned long long)endStrip); return FALSE; } io_info->start_span = startlba_span; io_info->start_row = start_row; #if SPAN_DEBUG dev_dbg(&instance->pdev->dev, "Check Span number from %s %d" "for row 0x%llx, start strip 0x%llx end strip 0x%llx" " span 0x%x\n", __func__, __LINE__, (unsigned long long)start_row, (unsigned long long)start_strip, (unsigned long long)endStrip, startlba_span); dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx" "Start span 0x%x\n", (unsigned long long)start_row, (unsigned long long)endRow, startlba_span); #endif } else { start_row = mega_div64_32(start_strip, raid->rowDataSize); endRow = mega_div64_32(endStrip, raid->rowDataSize); } numRows = (u8)(endRow - start_row + 1); /* * calculate region info. */ /* assume region is at the start of the first row */ regStart = start_row << raid->stripeShift; /* assume this IO needs the full row - we'll adjust if not true */ regSize = stripSize; /* Check if we can send this I/O via FastPath */ if (raid->capability.fpCapable) { if (isRead) io_info->fpOkForIo = (raid->capability.fpReadCapable && ((num_strips == 1) || raid->capability. fpReadAcrossStripe)); else io_info->fpOkForIo = (raid->capability.fpWriteCapable && ((num_strips == 1) || raid->capability. fpWriteAcrossStripe)); } else io_info->fpOkForIo = FALSE; if (numRows == 1) { /* single-strip IOs can always lock only the data needed */ if (num_strips == 1) { regStart += ref_in_start_stripe; regSize = numBlocks; } /* multi-strip IOs always need to full stripe locked */ } else if (io_info->IoforUnevenSpan == 0) { /* * For Even span region lock optimization. * If the start strip is the last in the start row */ if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { regStart += ref_in_start_stripe; /* initialize count to sectors from startref to end of strip */ regSize = stripSize - ref_in_start_stripe; } /* add complete rows in the middle of the transfer */ if (numRows > 2) regSize += (numRows-2) << raid->stripeShift; /* if IO ends within first strip of last row*/ if (endStrip == endRow*raid->rowDataSize) regSize += ref_in_end_stripe+1; else regSize += stripSize; } else { /* * For Uneven span region lock optimization. * If the start strip is the last in the start row */ if (start_strip == (get_strip_from_row(instance, ld, start_row, map) + SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { regStart += ref_in_start_stripe; /* initialize count to sectors from * startRef to end of strip */ regSize = stripSize - ref_in_start_stripe; } /* Add complete rows in the middle of the transfer*/ if (numRows > 2) /* Add complete rows in the middle of the transfer*/ regSize += (numRows-2) << raid->stripeShift; /* if IO ends within first strip of last row */ if (endStrip == get_strip_from_row(instance, ld, endRow, map)) regSize += ref_in_end_stripe + 1; else regSize += stripSize; } pRAID_Context->timeoutValue = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; else pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; pRAID_Context->VirtualDiskTgtId = raid->targetId; pRAID_Context->regLockRowLBA = cpu_to_le64(regStart); pRAID_Context->regLockLength = cpu_to_le32(regSize); pRAID_Context->configSeqNum = raid->seqNum; /* save pointer to raid->LUN array */ *raidLUN = raid->LUN; /*Get Phy Params only if FP capable, or else leave it to MR firmware to do the calculation.*/ if (io_info->fpOkForIo) { retval = io_info->IoforUnevenSpan ? mr_spanset_get_phy_params(instance, ld, start_strip, ref_in_start_stripe, io_info, pRAID_Context, map) : MR_GetPhyParams(instance, ld, start_strip, ref_in_start_stripe, io_info, pRAID_Context, map); /* If IO on an invalid Pd, then FP is not possible.*/ if (io_info->devHandle == MR_PD_INVALID) io_info->fpOkForIo = FALSE; return retval; } else if (isRead) { uint stripIdx; for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { retval = io_info->IoforUnevenSpan ? mr_spanset_get_phy_params(instance, ld, start_strip + stripIdx, ref_in_start_stripe, io_info, pRAID_Context, map) : MR_GetPhyParams(instance, ld, start_strip + stripIdx, ref_in_start_stripe, io_info, pRAID_Context, map); if (!retval) return TRUE; } } #if SPAN_DEBUG /* Just for testing what arm we get for strip.*/ if (io_info->IoforUnevenSpan) get_arm_from_strip(instance, ld, start_strip, map); #endif return TRUE; }
/* ****************************************************************************** * * This routine calculates the arm, span and block for the specified stripe and * reference in stripe. * * Inputs : * * ld - Logical drive number * stripRow - Stripe number * stripRef - Reference in stripe * * Outputs : * * span - Span number * block - Absolute Block number in the physical disk */ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); u32 pd, arRef; u8 physArm, span; u64 row; u8 retval = TRUE; u8 do_invader = 0; u64 *pdBlock = &io_info->pdBlock; u16 *pDevHandle = &io_info->devHandle; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER || instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) do_invader = 1; row = mega_div64_32(stripRow, raid->rowDataSize); if (raid->level == 6) { /* logical arm within row */ u32 logArm = mega_mod64(stripRow, raid->rowDataSize); u32 rowMod, armQ, arm; if (raid->rowSize == 0) return FALSE; /* get logical row mod */ rowMod = mega_mod64(row, raid->rowSize); armQ = raid->rowSize-1-rowMod; /* index of Q drive */ arm = armQ+1+logArm; /* data always logically follows Q */ if (arm >= raid->rowSize) /* handle wrap condition */ arm -= raid->rowSize; physArm = (u8)arm; } else { if (raid->modFactor == 0) return FALSE; physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map); } if (raid->spanDepth == 1) { span = 0; *pdBlock = row << raid->stripeShift; } else { span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map); if (span == SPAN_INVALID) return FALSE; } /* Get the array on which this span is present */ arRef = MR_LdSpanArrayGet(ld, span, map); pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */ if (pd != MR_PD_INVALID) /* Get dev handle from Pd. */ *pDevHandle = MR_PdDevHandleGet(pd, map); else { *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ if ((raid->level >= 5) && (!do_invader || (do_invader && (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { /* Get alternate Pd. */ pd = MR_ArPdGet(arRef, physArm + 1, map); if (pd != MR_PD_INVALID) /* Get dev handle from Pd */ *pDevHandle = MR_PdDevHandleGet(pd, map); } } *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; return retval; }
/* ****************************************************************************** * * This routine calculates the arm, span and block for the specified stripe and * reference in stripe using spanset * * Inputs : * * ld - Logical drive number * stripRow - Stripe number * stripRef - Reference in stripe * * Outputs : * * span - Span number * block - Absolute Block number in the physical disk */ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); u32 pd, arRef; u8 physArm, span; u64 row; u8 retval = TRUE; u8 do_invader = 0; u64 *pdBlock = &io_info->pdBlock; u16 *pDevHandle = &io_info->devHandle; u32 logArm, rowMod, armQ, arm; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER || instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) do_invader = 1; /*Get row and span from io_info for Uneven Span IO.*/ row = io_info->start_row; span = io_info->start_span; if (raid->level == 6) { logArm = get_arm_from_strip(instance, ld, stripRow, map); if (logArm == -1U) return FALSE; rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; arm = armQ + 1 + logArm; if (arm >= SPAN_ROW_SIZE(map, ld, span)) arm -= SPAN_ROW_SIZE(map, ld, span); physArm = (u8)arm; } else /* Calculate the arm */ physArm = get_arm(instance, ld, span, stripRow, map); if (physArm == 0xFF) return FALSE; arRef = MR_LdSpanArrayGet(ld, span, map); pd = MR_ArPdGet(arRef, physArm, map); if (pd != MR_PD_INVALID) *pDevHandle = MR_PdDevHandleGet(pd, map); else { *pDevHandle = MR_PD_INVALID; if ((raid->level >= 5) && (!do_invader || (do_invader && (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { pd = MR_ArPdGet(arRef, physArm + 1, map); if (pd != MR_PD_INVALID) *pDevHandle = MR_PdDevHandleGet(pd, map); } } *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; return retval; }
/* * MR_BuildRaidContext: Set up Fast path RAID context * * This function will initiate command processing. The start/end row and strip * information is calculated then the lock is acquired. This function will * return 0 if region lock was acquired OR return num strips. */ u_int8_t MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) { MR_LD_RAID *raid; u_int32_t ld, stripSize, stripe_mask; u_int64_t endLba, endStrip, endRow, start_row, start_strip; REGION_KEY regStart; REGION_LEN regSize; u_int8_t num_strips, numRows; u_int16_t ref_in_start_stripe, ref_in_end_stripe; u_int64_t ldStartBlock; u_int32_t numBlocks, ldTgtId; u_int8_t isRead, stripIdx; u_int8_t retval = 0; u_int8_t startlba_span = SPAN_INVALID; u_int64_t *pdBlock = &io_info->pdBlock; int error_code = 0; ldStartBlock = io_info->ldStartBlock; numBlocks = io_info->numBlocks; ldTgtId = io_info->ldTgtId; isRead = io_info->isRead; io_info->IoforUnevenSpan = 0; io_info->start_span = SPAN_INVALID; ld = MR_TargetIdToLdGet(ldTgtId, map); raid = MR_LdRaidGet(ld, map); if (raid->rowDataSize == 0) { if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) return FALSE; else if (sc->UnevenSpanSupport) { io_info->IoforUnevenSpan = 1; } else { mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: raid->rowDataSize is 0, but has SPAN[0] rowDataSize = 0x%0x," " but there is _NO_ UnevenSpanSupport\n", MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); return FALSE; } } stripSize = 1 << raid->stripeShift; stripe_mask = stripSize - 1; /* * calculate starting row and stripe, and number of strips and rows */ start_strip = ldStartBlock >> raid->stripeShift; ref_in_start_stripe = (u_int16_t)(ldStartBlock & stripe_mask); endLba = ldStartBlock + numBlocks - 1; ref_in_end_stripe = (u_int16_t)(endLba & stripe_mask); endStrip = endLba >> raid->stripeShift; num_strips = (u_int8_t)(endStrip - start_strip + 1); /* End strip */ if (io_info->IoforUnevenSpan) { start_row = get_row_from_strip(sc, ld, start_strip, map); endRow = get_row_from_strip(sc, ld, endStrip, map); if (raid->spanDepth == 1) { startlba_span = 0; *pdBlock = start_row << raid->stripeShift; } else { startlba_span = (u_int8_t)mr_spanset_get_span_block(sc, ld, start_row, pdBlock, map, &error_code); if (error_code == 1) { mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: return from %s %d. Send IO w/o region lock.\n", __func__, __LINE__); return FALSE; } } if (startlba_span == SPAN_INVALID) { mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: return from %s %d for row 0x%llx," "start strip %llx endSrip %llx\n", __func__, __LINE__, (unsigned long long)start_row, (unsigned long long)start_strip, (unsigned long long)endStrip); return FALSE; } io_info->start_span = startlba_span; io_info->start_row = start_row; mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: Check Span number from %s %d for row 0x%llx, " " start strip 0x%llx endSrip 0x%llx span 0x%x\n", __func__, __LINE__, (unsigned long long)start_row, (unsigned long long)start_strip, (unsigned long long)endStrip, startlba_span); mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : 1. start_row 0x%llx endRow 0x%llx Start span 0x%x\n", (unsigned long long)start_row, (unsigned long long)endRow, startlba_span); } else { start_row = mega_div64_32(start_strip, raid->rowDataSize); endRow = mega_div64_32(endStrip, raid->rowDataSize); } numRows = (u_int8_t)(endRow - start_row + 1); /* get the row count */ /* * Calculate region info. (Assume region at start of first row, and * assume this IO needs the full row - will adjust if not true.) */ regStart = start_row << raid->stripeShift; regSize = stripSize; /* Check if we can send this I/O via FastPath */ if (raid->capability.fpCapable) { if (isRead) io_info->fpOkForIo = (raid->capability.fpReadCapable && ((num_strips == 1) || raid->capability.fpReadAcrossStripe)); else io_info->fpOkForIo = (raid->capability.fpWriteCapable && ((num_strips == 1) || raid->capability.fpWriteAcrossStripe)); } else io_info->fpOkForIo = FALSE; if (numRows == 1) { if (num_strips == 1) { regStart += ref_in_start_stripe; regSize = numBlocks; } } else if (io_info->IoforUnevenSpan == 0) { /* * For Even span region lock optimization. If the start strip * is the last in the start row */ if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { regStart += ref_in_start_stripe; /* * initialize count to sectors from startRef to end * of strip */ regSize = stripSize - ref_in_start_stripe; } /* add complete rows in the middle of the transfer */ if (numRows > 2) regSize += (numRows - 2) << raid->stripeShift; /* if IO ends within first strip of last row */ if (endStrip == endRow * raid->rowDataSize) regSize += ref_in_end_stripe + 1; else regSize += stripSize; } else { if (start_strip == (get_strip_from_row(sc, ld, start_row, map) + SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { regStart += ref_in_start_stripe; /* * initialize count to sectors from startRef to end * of strip */ regSize = stripSize - ref_in_start_stripe; } /* add complete rows in the middle of the transfer */ if (numRows > 2) regSize += (numRows - 2) << raid->stripeShift; /* if IO ends within first strip of last row */ if (endStrip == get_strip_from_row(sc, ld, endRow, map)) regSize += ref_in_end_stripe + 1; else regSize += stripSize; } pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; else pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; pRAID_Context->VirtualDiskTgtId = raid->targetId; pRAID_Context->regLockRowLBA = regStart; pRAID_Context->regLockLength = regSize; pRAID_Context->configSeqNum = raid->seqNum; /* * Get Phy Params only if FP capable, or else leave it to MR firmware * to do the calculation. */ if (io_info->fpOkForIo) { retval = io_info->IoforUnevenSpan ? mr_spanset_get_phy_params(sc, ld, start_strip, ref_in_start_stripe, io_info, pRAID_Context, map) : MR_GetPhyParams(sc, ld, start_strip, ref_in_start_stripe, io_info, pRAID_Context, map); /* If IO on an invalid Pd, then FP is not possible */ if (io_info->devHandle == MR_PD_INVALID) io_info->fpOkForIo = FALSE; return retval; } else if (isRead) { for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { retval = io_info->IoforUnevenSpan ? mr_spanset_get_phy_params(sc, ld, start_strip + stripIdx, ref_in_start_stripe, io_info, pRAID_Context, map) : MR_GetPhyParams(sc, ld, start_strip + stripIdx, ref_in_start_stripe, io_info, pRAID_Context, map); if (!retval) return TRUE; } } #if SPAN_DEBUG /* Just for testing what arm we get for strip. */ get_arm_from_strip(sc, ld, start_strip, map); #endif return TRUE; }
/* * * This routine pepare spanset info from Valid Raid map and store it into local * copy of ldSpanInfo per instance data structure. * * Inputs : LD map * ldSpanInfo per HBA instance * */ void mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) { u_int8_t span, count; u_int32_t element, span_row_width; u_int64_t span_row; MR_LD_RAID *raid; LD_SPAN_SET *span_set, *span_set_prev; MR_QUAD_ELEMENT *quad; int ldCount; u_int16_t ld; for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, map); if (ld >= MAX_LOGICAL_DRIVES) continue; raid = MR_LdRaidGet(ld, map); for (element = 0; element < MAX_QUAD_DEPTH; element++) { for (span = 0; span < raid->spanDepth; span++) { if (map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements < element + 1) continue; /* TO-DO */ span_set = &(ldSpanInfo[ld].span_set[element]); quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span].block_span_info.quad[element]; span_set->diff = quad->diff; for (count = 0, span_row_width = 0; count < raid->spanDepth; count++) { if (map->raidMap.ldSpanMap[ld].spanBlock[count]. block_span_info.noElements >= element + 1) { span_set->strip_offset[count] = span_row_width; span_row_width += MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize; #if SPAN_DEBUG printf("LSI Debug span %x rowDataSize %x\n", count, MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize); #endif } } span_set->span_row_data_width = span_row_width; span_row = mega_div64_32(((quad->logEnd - quad->logStart) + quad->diff), quad->diff); if (element == 0) { span_set->log_start_lba = 0; span_set->log_end_lba = ((span_row << raid->stripeShift) * span_row_width) - 1; span_set->span_row_start = 0; span_set->span_row_end = span_row - 1; span_set->data_strip_start = 0; span_set->data_strip_end = (span_row * span_row_width) - 1; span_set->data_row_start = 0; span_set->data_row_end = (span_row * quad->diff) - 1; } else { span_set_prev = &(ldSpanInfo[ld].span_set[element - 1]); span_set->log_start_lba = span_set_prev->log_end_lba + 1; span_set->log_end_lba = span_set->log_start_lba + ((span_row << raid->stripeShift) * span_row_width) - 1; span_set->span_row_start = span_set_prev->span_row_end + 1; span_set->span_row_end = span_set->span_row_start + span_row - 1; span_set->data_strip_start = span_set_prev->data_strip_end + 1; span_set->data_strip_end = span_set->data_strip_start + (span_row * span_row_width) - 1; span_set->data_row_start = span_set_prev->data_row_end + 1; span_set->data_row_end = span_set->data_row_start + (span_row * quad->diff) - 1; } break; } if (span == raid->spanDepth) break; /* no quads remain */ } } #if SPAN_DEBUG getSpanInfo(map, ldSpanInfo); /* to get span set info */ #endif }
/* * ************************************************************* * * This routine calculates the arm, span and block for * the specified stripe and reference in stripe. * * Inputs : * * ld - Logical drive number * stripRow - Stripe number * stripRef - Reference in stripe * * Outputs : * * span - Span number * block - Absolute Block number in the physical disk */ U8 MR_GetPhyParams(struct mrsas_instance *instance, U32 ld, U64 stripRow, U16 stripRef, U64 *pdBlock, U16 *pDevHandle, MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, MR_FW_RAID_MAP_ALL *map) { MR_LD_RAID *raid = MR_LdRaidGet(ld, map); U32 pd, arRef; U8 physArm, span; U64 row; int error_code = 0; U8 retval = TRUE; U32 rowMod; U32 armQ; U32 arm; U16 devid = instance->device_id; ASSERT(raid->rowDataSize != 0); row = (stripRow / raid->rowDataSize); if (raid->level == 6) { U32 logArm = (stripRow % (raid->rowDataSize)); if (raid->rowSize == 0) { return (FALSE); } rowMod = (row % (raid->rowSize)); armQ = raid->rowSize-1-rowMod; arm = armQ + 1 + logArm; if (arm >= raid->rowSize) arm -= raid->rowSize; physArm = (U8)arm; } else { if (raid->modFactor == 0) return (FALSE); physArm = MR_LdDataArmGet(ld, (stripRow % (raid->modFactor)), map); } if (raid->spanDepth == 1) { span = 0; *pdBlock = row << raid->stripeShift; } else span = (U8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code); if (error_code == 1) return (FALSE); /* Get the array on which this span is present. */ arRef = MR_LdSpanArrayGet(ld, span, map); /* Get the Pd. */ pd = MR_ArPdGet(arRef, physArm, map); /* Get dev handle from Pd. */ if (pd != MR_PD_INVALID) { *pDevHandle = MR_PdDevHandleGet(pd, map); } else { *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ if ((raid->level >= 5) && ((devid != PCI_DEVICE_ID_LSI_INVADER) || ((devid == PCI_DEVICE_ID_LSI_INVADER || (devid == PCI_DEVICE_ID_LSI_FURY)) && raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) { pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; } else if (raid->level == 1) { /* Get Alternate Pd. */ pd = MR_ArPdGet(arRef, physArm + 1, map); /* Get dev handle from Pd. */ if (pd != MR_PD_INVALID) *pDevHandle = MR_PdDevHandleGet(pd, map); } } *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; return (retval); }
static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo) { u8 span; u32 element; struct MR_LD_RAID *raid; LD_SPAN_SET *span_set; struct MR_QUAD_ELEMENT *quad; int ldCount; u16 ld; for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, map); if (ld >= MAX_LOGICAL_DRIVES) continue; raid = MR_LdRaidGet(ld, map); dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n", ld, raid->spanDepth); for (span = 0; span < raid->spanDepth; span++) dev_dbg(&instance->pdev->dev, "Span=%x," " number of quads=%x\n", span, le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements)); for (element = 0; element < MAX_QUAD_DEPTH; element++) { span_set = &(ldSpanInfo[ld].span_set[element]); if (span_set->span_row_data_width == 0) break; dev_dbg(&instance->pdev->dev, "Span Set %x:" "width=%x, diff=%x\n", element, (unsigned int)span_set->span_row_data_width, (unsigned int)span_set->diff); dev_dbg(&instance->pdev->dev, "logical LBA" "start=0x%08lx, end=0x%08lx\n", (long unsigned int)span_set->log_start_lba, (long unsigned int)span_set->log_end_lba); dev_dbg(&instance->pdev->dev, "span row start=0x%08lx," " end=0x%08lx\n", (long unsigned int)span_set->span_row_start, (long unsigned int)span_set->span_row_end); dev_dbg(&instance->pdev->dev, "data row start=0x%08lx," " end=0x%08lx\n", (long unsigned int)span_set->data_row_start, (long unsigned int)span_set->data_row_end); dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx," " end=0x%08lx\n", (long unsigned int)span_set->data_strip_start, (long unsigned int)span_set->data_strip_end); for (span = 0; span < raid->spanDepth; span++) { if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements) >= element + 1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span].block_span_info. quad[element]; dev_dbg(&instance->pdev->dev, "Span=%x," "Quad=%x, diff=%x\n", span, element, le32_to_cpu(quad->diff)); dev_dbg(&instance->pdev->dev, "offset_in_span=0x%08lx\n", (long unsigned int)le64_to_cpu(quad->offsetInSpan)); dev_dbg(&instance->pdev->dev, "logical start=0x%08lx, end=0x%08lx\n", (long unsigned int)le64_to_cpu(quad->logStart), (long unsigned int)le64_to_cpu(quad->logEnd)); } } } } return 0; }
/* * mrsas_set_pd_lba: Sets PD LBA * input: io_request pointer * CDB length * io_info pointer * Pointer to CCB * Local RAID map pointer * Start block of IO Block Size * * Used to set the PD logical block address in CDB for FP IOs. */ void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb, MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, u_int32_t ld_block_size) { MR_LD_RAID *raid; u_int32_t ld; u_int64_t start_blk = io_info->pdBlock; u_int8_t *cdb = io_request->CDB.CDB32; u_int32_t num_blocks = io_info->numBlocks; u_int8_t opcode = 0, flagvals = 0, groupnum = 0, control = 0; struct ccb_hdr *ccb_h = &(ccb->ccb_h); /* Check if T10 PI (DIF) is enabled for this LD */ ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); raid = MR_LdRaidGet(ld, local_map_ptr); if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { memset(cdb, 0, sizeof(io_request->CDB.CDB32)); cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD; cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN; if (ccb_h->flags == CAM_DIR_OUT) cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32; else cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32; cdb[10] = MRSAS_RD_WR_PROTECT_CHECK_ALL; /* LBA */ cdb[12] = (u_int8_t)((start_blk >> 56) & 0xff); cdb[13] = (u_int8_t)((start_blk >> 48) & 0xff); cdb[14] = (u_int8_t)((start_blk >> 40) & 0xff); cdb[15] = (u_int8_t)((start_blk >> 32) & 0xff); cdb[16] = (u_int8_t)((start_blk >> 24) & 0xff); cdb[17] = (u_int8_t)((start_blk >> 16) & 0xff); cdb[18] = (u_int8_t)((start_blk >> 8) & 0xff); cdb[19] = (u_int8_t)(start_blk & 0xff); /* Logical block reference tag */ io_request->CDB.EEDP32.PrimaryReferenceTag = swap32(ref_tag); io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; io_request->IoFlags = 32; /* Specify 32-byte cdb */ /* Transfer length */ cdb[28] = (u_int8_t)((num_blocks >> 24) & 0xff); cdb[29] = (u_int8_t)((num_blocks >> 16) & 0xff); cdb[30] = (u_int8_t)((num_blocks >> 8) & 0xff); cdb[31] = (u_int8_t)(num_blocks & 0xff); /* set SCSI IO EEDP Flags */ if (ccb_h->flags == CAM_DIR_OUT) { io_request->EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; } else { io_request->EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; } io_request->Control |= (0x4 << 26); io_request->EEDPBlockSize = ld_block_size; } else {
U8 MR_BuildRaidContext(struct mrsas_instance *instance, struct IO_REQUEST_INFO *io_info, MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, MR_FW_RAID_MAP_ALL *map) { MR_LD_RAID *raid; U32 ld, stripSize, stripe_mask; U64 endLba, endStrip, endRow; U64 start_row, start_strip; REGION_KEY regStart; REGION_LEN regSize; U8 num_strips, numRows; U16 ref_in_start_stripe; U16 ref_in_end_stripe; U64 ldStartBlock; U32 numBlocks, ldTgtId; U8 isRead; U8 retval = 0; ldStartBlock = io_info->ldStartBlock; numBlocks = io_info->numBlocks; ldTgtId = io_info->ldTgtId; isRead = io_info->isRead; if (map == NULL) { io_info->fpOkForIo = FALSE; return (FALSE); } ld = MR_TargetIdToLdGet(ldTgtId, map); if (ld >= MAX_LOGICAL_DRIVES) { io_info->fpOkForIo = FALSE; return (FALSE); } raid = MR_LdRaidGet(ld, map); stripSize = 1 << raid->stripeShift; stripe_mask = stripSize-1; /* * calculate starting row and stripe, and number of strips and rows */ start_strip = ldStartBlock >> raid->stripeShift; ref_in_start_stripe = (U16)(ldStartBlock & stripe_mask); endLba = ldStartBlock + numBlocks - 1; ref_in_end_stripe = (U16)(endLba & stripe_mask); endStrip = endLba >> raid->stripeShift; num_strips = (U8)(endStrip - start_strip + 1); /* Check to make sure is not dividing by zero */ if (raid->rowDataSize == 0) return (FALSE); start_row = (start_strip / raid->rowDataSize); endRow = (endStrip / raid->rowDataSize); /* get the row count */ numRows = (U8)(endRow - start_row + 1); /* * calculate region info. */ regStart = start_row << raid->stripeShift; regSize = stripSize; /* Check if we can send this I/O via FastPath */ if (raid->capability.fpCapable) { if (isRead) { io_info->fpOkForIo = (raid->capability.fpReadCapable && ((num_strips == 1) || raid->capability.fpReadAcrossStripe)); } else { io_info->fpOkForIo = (raid->capability.fpWriteCapable && ((num_strips == 1) || raid->capability.fpWriteAcrossStripe)); } } else io_info->fpOkForIo = FALSE; /* * Check for DIF support */ if (!raid->capability.ldPiMode) { io_info->ldPI = FALSE; } else { io_info->ldPI = TRUE; } if (numRows == 1) { if (num_strips == 1) { regStart += ref_in_start_stripe; regSize = numBlocks; } } else { if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { regStart += ref_in_start_stripe; regSize = stripSize - ref_in_start_stripe; } if (numRows > 2) { regSize += (numRows - 2) << raid->stripeShift; } if (endStrip == endRow * raid->rowDataSize) { regSize += ref_in_end_stripe + 1; } else { regSize += stripSize; } } pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; if ((instance->device_id == PCI_DEVICE_ID_LSI_INVADER) || (instance->device_id == PCI_DEVICE_ID_LSI_FURY)) { pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; } else { pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; } pRAID_Context->ldTargetId = raid->targetId; pRAID_Context->regLockRowLBA = regStart; pRAID_Context->regLockLength = regSize; pRAID_Context->configSeqNum = raid->seqNum; /* * Get Phy Params only if FP capable, * or else leave it to MR firmware to do the calculation. */ if (io_info->fpOkForIo) { /* if fast path possible then get the physical parameters */ retval = MR_GetPhyParams(instance, ld, start_strip, ref_in_start_stripe, &io_info->pdBlock, &io_info->devHandle, pRAID_Context, map); /* If IO on an invalid Pd, then FP is not possible. */ if (io_info->devHandle == MR_PD_INVALID) io_info->fpOkForIo = FALSE; return (retval); } else if (isRead) { uint_t stripIdx; for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { if (!MR_GetPhyParams(instance, ld, start_strip + stripIdx, ref_in_start_stripe, &io_info->pdBlock, &io_info->devHandle, pRAID_Context, map)) { return (TRUE); } } } return (TRUE); }
/* ****************************************************************************** * * This routine pepare spanset info from Valid Raid map and store it into * local copy of ldSpanInfo per instance data structure. * * Inputs : * map - LD map * ldSpanInfo - ldSpanInfo per HBA instance * */ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo) { u8 span, count; u32 element, span_row_width; u64 span_row; struct MR_LD_RAID *raid; LD_SPAN_SET *span_set, *span_set_prev; struct MR_QUAD_ELEMENT *quad; int ldCount; u16 ld; for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, map); if (ld >= MAX_LOGICAL_DRIVES) continue; raid = MR_LdRaidGet(ld, map); for (element = 0; element < MAX_QUAD_DEPTH; element++) { for (span = 0; span < raid->spanDepth; span++) { if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements) < element + 1) continue; span_set = &(ldSpanInfo[ld].span_set[element]); quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span].block_span_info. quad[element]; span_set->diff = le32_to_cpu(quad->diff); for (count = 0, span_row_width = 0; count < raid->spanDepth; count++) { if (le32_to_cpu(map->raidMap.ldSpanMap[ld]. spanBlock[count]. block_span_info. noElements) >= element + 1) { span_set->strip_offset[count] = span_row_width; span_row_width += MR_LdSpanPtrGet (ld, count, map)->spanRowDataSize; printk(KERN_INFO "megasas:" "span %x rowDataSize %x\n", count, MR_LdSpanPtrGet (ld, count, map)->spanRowDataSize); } } span_set->span_row_data_width = span_row_width; span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) - le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)), le32_to_cpu(quad->diff)); if (element == 0) { span_set->log_start_lba = 0; span_set->log_end_lba = ((span_row << raid->stripeShift) * span_row_width) - 1; span_set->span_row_start = 0; span_set->span_row_end = span_row - 1; span_set->data_strip_start = 0; span_set->data_strip_end = (span_row * span_row_width) - 1; span_set->data_row_start = 0; span_set->data_row_end = (span_row * le32_to_cpu(quad->diff)) - 1; } else { span_set_prev = &(ldSpanInfo[ld]. span_set[element - 1]); span_set->log_start_lba = span_set_prev->log_end_lba + 1; span_set->log_end_lba = span_set->log_start_lba + ((span_row << raid->stripeShift) * span_row_width) - 1; span_set->span_row_start = span_set_prev->span_row_end + 1; span_set->span_row_end = span_set->span_row_start + span_row - 1; span_set->data_strip_start = span_set_prev->data_strip_end + 1; span_set->data_strip_end = span_set->data_strip_start + (span_row * span_row_width) - 1; span_set->data_row_start = span_set_prev->data_row_end + 1; span_set->data_row_end = span_set->data_row_start + (span_row * le32_to_cpu(quad->diff)) - 1; } break; } if (span == raid->spanDepth) break; } } #if SPAN_DEBUG getSpanInfo(map, ldSpanInfo); #endif }
static int getSpanInfo(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) { u_int8_t span; u_int32_t element; MR_LD_RAID *raid; LD_SPAN_SET *span_set; MR_QUAD_ELEMENT *quad; int ldCount; u_int16_t ld; for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, map); if (ld >= MAX_LOGICAL_DRIVES) { continue; } raid = MR_LdRaidGet(ld, map); printf("LD %x: span_depth=%x\n", ld, raid->spanDepth); for (span = 0; span < raid->spanDepth; span++) printf("Span=%x, number of quads=%x\n", span, map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements); for (element = 0; element < MAX_QUAD_DEPTH; element++) { span_set = &(ldSpanInfo[ld].span_set[element]); if (span_set->span_row_data_width == 0) break; printf("Span Set %x: width=%x, diff=%x\n", element, (unsigned int)span_set->span_row_data_width, (unsigned int)span_set->diff); printf("logical LBA start=0x%08lx, end=0x%08lx\n", (long unsigned int)span_set->log_start_lba, (long unsigned int)span_set->log_end_lba); printf("span row start=0x%08lx, end=0x%08lx\n", (long unsigned int)span_set->span_row_start, (long unsigned int)span_set->span_row_end); printf("data row start=0x%08lx, end=0x%08lx\n", (long unsigned int)span_set->data_row_start, (long unsigned int)span_set->data_row_end); printf("data strip start=0x%08lx, end=0x%08lx\n", (long unsigned int)span_set->data_strip_start, (long unsigned int)span_set->data_strip_end); for (span = 0; span < raid->spanDepth; span++) { if (map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements >= element + 1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span].block_span_info. quad[element]; printf("Span=%x, Quad=%x, diff=%x\n", span, element, quad->diff); printf("offset_in_span=0x%08lx\n", (long unsigned int)quad->offsetInSpan); printf("logical start=0x%08lx, end=0x%08lx\n", (long unsigned int)quad->logStart, (long unsigned int)quad->logEnd); } } } } return 0; }