int bcm_assertlog_get(void *outbuf, int iobuf_len) { bcm_assert_info_t *g_assert_hdl = &g_assert_info; assertlog_results_t *results = (assertlog_results_t *)outbuf; int iobuf_allowed_num = 0; uint8 num = 0; uint8 last_idx; uint8 cur_idx; assert_record_t *log_record = &results->logs[0]; int i; if (g_assert_hdl->ref_cnt == 0) return -1; ASSERT_LOCK(g_assert_hdl); iobuf_allowed_num = IOBUF_ALLOWED_NUM_OF_LOGREC(assert_record_t, iobuf_len); cur_idx = g_assert_hdl->cur_idx; if (g_assert_hdl->seq_num < MAX_ASSERT_NUM) { last_idx = 0; num = cur_idx; } else { last_idx = MODINC(cur_idx, MAX_ASSERT_NUM); num = MAX_ASSERT_NUM - 1; } num = MIN(iobuf_allowed_num, num); results->num = num; results->version = ASSERTLOG_CUR_VER; results->record_len = sizeof(assert_record_t); for (i = 0; i < num; i++) { memcpy(&log_record[i], &g_assert_hdl->assert_table[last_idx], sizeof(assert_record_t)); last_idx = MODINC(last_idx, MAX_ASSERT_NUM); } ASSERT_UNLOCK(g_assert_hdl); return 0; }
void BCMATTACHFN(bcm_assertlog_deinit)(void) { bcm_assert_info_t *g_assert_hdl = &g_assert_info; if (g_assert_hdl->ref_cnt) { ASSERT_LOCK(g_assert_hdl); g_assert_hdl->ref_cnt--; ASSERT_UNLOCK(g_assert_hdl); } if (g_assert_hdl->ref_cnt == 0) { ASSERT_FREE_LOCK(g_assert_hdl); memset(g_assert_hdl, 0, sizeof(bcm_assert_info_t)); } return; }
/* * Function: scsi_end_request() * * Purpose: Post-processing of completed commands called from interrupt * handler or a bottom-half handler. * * Arguments: SCpnt - command that is complete. * uptodate - 1 if I/O indicates success, 0 for I/O error. * sectors - number of sectors we want to mark. * requeue - indicates whether we should requeue leftovers. * frequeue - indicates that if we release the command block * that the queue request function should be called. * * Lock status: Assumed that lock is not held upon entry. * * Returns: Nothing * * Notes: This is called for block device requests in order to * mark some number of sectors as complete. * * We are guaranteeing that the request queue will be goosed * at some point during this call. */ static Scsi_Cmnd *__scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors, int requeue, int frequeue) { request_queue_t *q = &SCpnt->device->request_queue; struct request *req; struct buffer_head *bh; unsigned long flags; int nsect; ASSERT_LOCK(&io_request_lock, 0); req = &SCpnt->request; req->errors = 0; if (!uptodate) { printk(" I/O error: dev %s, sector %lu\n", kdevname(req->rq_dev), req->sector); } do { if ((bh = req->bh) != NULL) { nsect = bh->b_size >> 9; blk_finished_io(nsect); blk_finished_sectors(req, nsect); req->bh = bh->b_reqnext; bh->b_reqnext = NULL; sectors -= nsect; bh->b_end_io(bh, uptodate); if ((bh = req->bh) != NULL) { req->hard_sector += nsect; req->hard_nr_sectors -= nsect; req->sector += nsect; req->nr_sectors -= nsect; req->current_nr_sectors = bh->b_size >> 9; req->hard_cur_sectors = req->current_nr_sectors; if (req->nr_sectors < req->current_nr_sectors) { req->nr_sectors = req->current_nr_sectors; printk("scsi_end_request: buffer-list destroyed\n"); } } }
/* * Function: scsi_init_cmd_errh() * * Purpose: Initialize SCpnt fields related to error handling. * * Arguments: SCpnt - command that is ready to be queued. * * Returns: Nothing * * Notes: This function has the job of initializing a number of * fields related to error handling. Typically this will * be called once for each command, as required. */ int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt) { ASSERT_LOCK(&io_request_lock, 0); SCpnt->owner = SCSI_OWNER_MIDLEVEL; SCpnt->reset_chain = NULL; SCpnt->serial_number = 0; SCpnt->serial_number_at_timeout = 0; SCpnt->flags = 0; SCpnt->retries = 0; SCpnt->abort_reason = 0; memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer); if (SCpnt->cmd_len == 0) SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); /* * We need saved copies of a number of fields - this is because * error handling may need to overwrite these with different values * to run different commands, and once error handling is complete, * we will need to restore these values prior to running the actual * command. */ SCpnt->old_use_sg = SCpnt->use_sg; SCpnt->old_cmd_len = SCpnt->cmd_len; SCpnt->sc_old_data_direction = SCpnt->sc_data_direction; SCpnt->old_underflow = SCpnt->underflow; memcpy((void *) SCpnt->data_cmnd, (const void *) SCpnt->cmnd, sizeof(SCpnt->cmnd)); SCpnt->buffer = SCpnt->request_buffer; SCpnt->bufflen = SCpnt->request_bufflen; SCpnt->reset_chain = NULL; SCpnt->internal_timeout = NORMAL_TIMEOUT; SCpnt->abort_reason = 0; return 1; }
/* * Function: scsi_queue_next_request() * * Purpose: Handle post-processing of completed commands. * * Arguments: SCpnt - command that may need to be requeued. * * Returns: Nothing * * Notes: After command completion, there may be blocks left * over which weren't finished by the previous command * this can be for a number of reasons - the main one is * that a medium error occurred, and the sectors after * the bad block need to be re-read. * * If SCpnt is NULL, it means that the previous command * was completely finished, and we should simply start * a new command, if possible. * * This is where a lot of special case code has begun to * accumulate. It doesn't really affect readability or * anything, but it might be considered architecturally * inelegant. If more of these special cases start to * accumulate, I am thinking along the lines of implementing * an atexit() like technology that gets run when commands * complete. I am not convinced that it is worth the * added overhead, however. Right now as things stand, * there are simple conditional checks, and most hosts * would skip past. * * Another possible solution would be to tailor different * handler functions, sort of like what we did in scsi_merge.c. * This is probably a better solution, but the number of different * permutations grows as 2**N, and if too many more special cases * get added, we start to get screwed. */ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) { int all_clear; unsigned long flags; Scsi_Device *SDpnt; struct Scsi_Host *SHpnt; ASSERT_LOCK(&io_request_lock, 0); spin_lock_irqsave(&io_request_lock, flags); if (SCpnt != NULL) { /* * For some reason, we are not done with this request. * This happens for I/O errors in the middle of the request, * in which case we need to request the blocks that come after * the bad sector. */ SCpnt->request.special = (void *) SCpnt; list_add(&SCpnt->request.queue, &q->queue_head); } /* * Just hit the requeue function for the queue. */ q->request_fn(q); SDpnt = (Scsi_Device *) q->queuedata; SHpnt = SDpnt->host; /* * If this is a single-lun device, and we are currently finished * with this device, then see if we need to get another device * started. FIXME(eric) - if this function gets too cluttered * with special case code, then spin off separate versions and * use function pointers to pick the right one. */ if (SDpnt->single_lun && list_empty(&q->queue_head) && SDpnt->device_busy == 0) { request_queue_t *q; for (SDpnt = SHpnt->host_queue; SDpnt; SDpnt = SDpnt->next) { if (((SHpnt->can_queue > 0) && (SHpnt->host_busy >= SHpnt->can_queue)) || (SHpnt->host_blocked) || (SHpnt->host_self_blocked) || (SDpnt->device_blocked)) { break; } q = &SDpnt->request_queue; q->request_fn(q); } } /* * Now see whether there are other devices on the bus which * might be starved. If so, hit the request function. If we * don't find any, then it is safe to reset the flag. If we * find any device that it is starved, it isn't safe to reset the * flag as the queue function releases the lock and thus some * other device might have become starved along the way. */ all_clear = 1; if (SHpnt->some_device_starved) { for (SDpnt = SHpnt->host_queue; SDpnt; SDpnt = SDpnt->next) { request_queue_t *q; if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue)) || (SHpnt->host_blocked) || (SHpnt->host_self_blocked)) { break; } if (SDpnt->device_blocked || !SDpnt->starved) { continue; } q = &SDpnt->request_queue; q->request_fn(q); all_clear = 0; } if (SDpnt == NULL && all_clear) { SHpnt->some_device_starved = 0; } } spin_unlock_irqrestore(&io_request_lock, flags); }