Example #1
0
static void cachedev_idlework_callback (void *idleworkparam, int idledevno)
{

   struct cache_dev *cache = (struct cache_dev *)idleworkparam;
   struct cache_dev_event *flushdesc;
   ioreq_event *flushreq;
   int blkno, bcount;
   struct ioq *queue;

   ASSERT (idledevno == cache->real_devno);

#ifdef DEBUG_CACHEDEV
   fprintf(outputfile, "*** %f: Entered cachedev::cachedev_idlework_callback\n", simtime );
   fflush(outputfile);
#endif

   queue = (*cache->queuefind)(cache->queuefindparam, cache->real_devno);
   if (ioqueue_get_number_in_queue (queue) != 0) {
      return;
   }

   queue = (*cache->queuefind)(cache->queuefindparam, cache->cache_devno);
   if (ioqueue_get_number_in_queue (queue) != 0) {
      return;
   }

   if (cachedev_find_dirty_cache_blocks (cache, &blkno, &bcount) == 0) {
      return;
   }

   /* Just assume that bufferspace is available */
   cache->bufferspace += bcount;
   if (cache->bufferspace > cache->stat.maxbufferspace) {
      cache->stat.maxbufferspace = cache->bufferspace;
   }

   flushdesc = (struct cache_dev_event *) getfromextraq();
   flushdesc->type = CACHE_EVENT_IDLEFLUSH_READ;
   flushreq = (ioreq_event *) getfromextraq();
   flushreq->buf = flushdesc;
   flushreq->devno = cache->cache_devno;
   flushreq->blkno = blkno;
   flushreq->bcount = bcount;
   flushreq->type = IO_ACCESS_ARRIVE;
   flushreq->flags = READ;
   (*cache->issuefunc)(cache->issueparam, flushreq);
   cache->stat.destagereads++;
   cache->stat.destagereadblocks += bcount;
}
Example #2
0
static int logorg_shadowed_get_short_queue (logorg *currlogorg, ioreq_event *curr, int def)
{
   int i, j;
   int len;
   int shortdev = -1;
   int shortlen = 999999999;
   int ties[MAXCOPIES];

   j = -1;
   for (i = 0; i < currlogorg->copies; i++) {
      len = ioqueue_get_number_in_queue(currlogorg->devs[(curr->devno + (i * currlogorg->numdisks))].queue);
      if (len >= 999999999) {
	 fprintf(stderr, "Haven't allowed for large enough 'dist's in logorg_shadowed_get_short_queue - %d\n", len);
	 exit(1);
      }
      if (len == shortlen) {
	 j++;
	 if (j >= MAXCOPIES) {
	    fprintf(stderr, "Haven't allowed for enough ties in logorg_shadowed_get_short_queue - %d\n", j);
	    exit(1);
	 }
	 ties[j] = i;
      }
      if (len < shortlen) {
	 shortlen = len;
	 shortdev = i;
	 j = -1;
      }
   }
   if (shortdev == -1) {
      fprintf(stderr, "Illegal condition in logorg_shadowed_get_short_queue\n");
      exit(1);
   } else if (j != -1) {
      if (def == 1) {
         i = (int) (DISKSIM_drand48() * (double) (j+2));
         if (i != 0) {
	    shortdev = ties[(i-1)];
         }
      } else if (def == 2) {
	 j++;
	 ties[j] = shortdev;
	 shortdev = logorg_shadowed_get_short_dist(currlogorg, curr, (j+1), ties);
      } else {
	 fprintf(stderr, "Unknown default tie breaker in logorg_shadowed_get_short_queue - %d\n", def);
	 exit(1);
      }
   }
   return(shortdev);
}
Example #3
0
event * iodriver_request (int iodriverno, ioreq_event *curr)
{
   ioreq_event *temp = NULL;
   ioreq_event *ret = NULL;
   ioreq_event *retlist = NULL;
   int numreqs;
/*
   printf ("Entered iodriver_request - simtime %f, devno %d, blkno %lld, cause %d\n", simtime, curr->devno, curr->blkno, curr->cause);
   fprintf (outputfile, "Entered iodriver_request - simtime %f, devno %d, blkno %lld, cause %d\n", simtime, curr->devno, curr->blkno, curr->cause);
   fprintf (stderr, "Entered iodriver_request - simtime %f, devno %d, blkno %lld, cause %d\n", simtime, curr->devno, curr->blkno, curr->cause);
*/
   if (NULL != OUTIOS) {
      fprintf(OUTIOS, "%.6f,%d,%lld,%d,%x,%d,%p\n", simtime, curr->devno, curr->blkno, curr->bcount, curr->flags, OVERALLQUEUE->base.listlen + 1, curr );
      fflush( OUTIOS );
   }

#if 0
   fprintf (stderr, "Entered iodriver_request - simtime %f, devno %d, blkno %lld, cause %d\n", simtime, curr->devno, curr->blkno, curr->cause);
#endif

   /* add to the overall queue to start tracking */
   ret = ioreq_copy (curr);
   ioqueue_add_new_request (OVERALLQUEUE, ret);
   ret = NULL;
 
   disksim->totalreqs++;
   if ((disksim->checkpoint_iocnt > 0) && ((disksim->totalreqs % disksim->checkpoint_iocnt) == 0)) {
      disksim_register_checkpoint (simtime);
   }
   if (disksim->totalreqs == disksim->warmup_iocnt) {
      warmuptime = simtime;
      resetstats();
   }
   numreqs = logorg_maprequest(sysorgs, numsysorgs, curr);
   temp = curr->next;
   for (; numreqs>0; numreqs--) {
          /* Request list size must match numreqs */
      ASSERT(curr != NULL);
      curr->next = NULL;
      if ((iodrivers[iodriverno]->consttime == IODRIVER_TRACED_QUEUE_TIMES) || (iodrivers[iodriverno]->consttime == IODRIVER_TRACED_BOTH_TIMES)) {
         ret = ioreq_copy(curr);
         ret->time = simtime + (double) ret->tempint1 / (double) 1000;
         ret->type = IO_TRACE_REQUEST_START;
         addtointq((event *) ret);
         ret = NULL;
         if ((curr->slotno == 1) && (ioqueue_get_number_in_queue(iodrivers[iodriverno]->devices[(curr->devno)].queue) == 0)) {
            iodrivers[(iodriverno)]->devices[(curr->devno)].flag = 2;
            iodrivers[(iodriverno)]->devices[(curr->devno)].lastevent = simtime;
         }
      }
      ret = handle_new_request(iodrivers[iodriverno], curr);

      if ((ret) && (iodrivers[iodriverno]->type == STANDALONE) && (ret->time == 0.0)) {
         ret->type = IO_ACCESS_ARRIVE;
         ret->time = simtime;
         iodriver_schedule(iodriverno, ret);
      } else if (ret) {
         ret->type = IO_ACCESS_ARRIVE;
         ret->next = retlist;
         ret->prev = NULL;
         retlist = ret;
      }
      curr = temp;
      temp = (temp) ? temp->next : NULL;
   }
   if (iodrivers[iodriverno]->type == STANDALONE) {
      while (retlist) {
         ret = retlist;
         retlist = ret->next;
         ret->next = NULL;
         ret->time += simtime;
         addtointq((event *) ret);
      }
   }
/*
fprintf (outputfile, "leaving iodriver_request: retlist %p\n", retlist);
*/
   return((event *) retlist);
}
Example #4
0
void iodriver_access_complete (int iodriverno, intr_event *intrp)
{
   int i;
   int numreqs;
   ioreq_event *tmp;
   ioreq_event *del;
   ioreq_event *req;
   int devno;
   int skip = 0;
   ctlr *ctl = NULL;
   time_t now;

   if (iodrivers[iodriverno]->type == STANDALONE) {
      req = ioreq_copy((ioreq_event *) intrp->infoptr);
   } 
   else {
      req = (ioreq_event *) intrp->infoptr;
   }

#ifdef DEBUG_IODRIVER
   fprintf (outputfile, "*** %f: iodriver_access_complete - devno %d, blkno %d, bcount %d, read %d\n", simtime, req->devno, req->blkno, req->bcount, (req->flags & READ));
   fflush(outputfile);
#endif

   time( & now );
   disksim_exectrace( "Request completion: simtime %f, devno %d, blkno %lld, bcount %d, flags %X, time %s\n", simtime, req->devno, req->blkno, req->bcount, req->flags, asctime( localtime(& now)) );

   if (iodrivers[iodriverno]->devices[(req->devno)].queuectlr != -1) 
   {
      int ctlrno = iodrivers[iodriverno]->devices[(req->devno)].queuectlr;
      ctl = &iodrivers[iodriverno]->ctlrs[ctlrno];
      tmp = ctl->oversized;
      numreqs = 1;


      while (((numreqs) || (tmp != ctl->oversized)) 
	     && (tmp) 
	     && (tmp->next) 
	     && ((tmp->next->devno != req->devno) 
		 || (tmp->next->opid != req->opid) 
		 || (req->blkno < tmp->next->blkno) 
		 || (req->blkno >= (tmp->next->blkno + tmp->next->bcount)))) 
      {
	
	// fprintf (outputfile, "oversized request in list: opid %d, blkno %lld, bcount %d\n", tmp->opid, tmp->blkno, tmp->bcount);
	
	numreqs = 0;
	tmp = tmp->next;
      }


      if ((tmp) 
	  && (tmp->next->devno == req->devno) 
	  && (tmp->next->opid == req->opid) 
	  && (req->blkno >= tmp->next->blkno) 
	  && (req->blkno < (tmp->next->blkno + tmp->next->bcount))) 
      {
	fprintf (outputfile, "%f, part of oversized request completed: opid %d, blkno %lld, bcount %d, maxreqsize %d\n", simtime, req->opid, req->blkno, req->bcount, ctl->maxreqsize);

	if ((req->blkno + ctl->maxreqsize) < (tmp->next->blkno + tmp->next->bcount)) 
	{
	  fprintf (outputfile, "more to go\n");
	  req->blkno += ctl->maxreqsize;
	  req->bcount = min(ctl->maxreqsize, 
			    (tmp->next->blkno + tmp->next->bcount - req->blkno));
	  goto schedule_next;
	} 
	else {
	  fprintf (outputfile, "done for real\n");

	  addtoextraq((event *) req);
	  req = tmp->next;
	  tmp->next = tmp->next->next;

	  if (ctl->oversized == req) {
	    ctl->oversized = (req != req->next) ? req->next : NULL;
	  }

	  req->next = NULL;
	}
      }
   }




   devno = req->devno;
   req = ioqueue_physical_access_done(iodrivers[iodriverno]->devices[devno].queue, req);

   if (ctl) {
      ctl->numoutstanding--;
   }

   // special case for validate:
   if (disksim->traceformat == VALIDATE) {
      tmp = (ioreq_event *) getfromextraq();
      io_validate_do_stats1();
      tmp = iotrace_validate_get_ioreq_event(disksim->iotracefile, tmp);
      if (tmp) {
         io_validate_do_stats2(tmp);
         tmp->type = IO_REQUEST_ARRIVE;
         addtointq((event *) tmp);

	 disksim_exectrace("Request issue: simtime %f, devno %d, blkno %lld, time %f\n",
			   simtime, tmp->devno, tmp->blkno, tmp->time);
      } 
      else {
         disksim_simstop();
      }
  } 
   else if (disksim->closedios) {
      tmp = (ioreq_event *) io_get_next_external_event(disksim->iotracefile);
      if (tmp) {
         io_using_external_event ((event *)tmp);
         tmp->time = simtime + disksim->closedthinktime;
         tmp->type = IO_REQUEST_ARRIVE;
         addtointq((event *) tmp);
      } else {
         disksim_simstop();
      }
   }

 

   while (req) {
      tmp = req;
      req = req->next;
      tmp->next = NULL;
      update_iodriver_statistics();
      if ((numreqs = logorg_mapcomplete(sysorgs, numsysorgs, tmp)) == COMPLETE) {

	/* update up overall I/O system stats for this completed request */
	ioreq_event *temp = ioqueue_get_specific_request (OVERALLQUEUE, tmp);
	ioreq_event *temp2 = ioqueue_physical_access_done (OVERALLQUEUE, temp);
	ASSERT (temp2 != NULL);
	addtoextraq((event *)temp);
	temp = NULL;
	
	if (iodrivers[iodriverno]->type != STANDALONE) {
	  iodriver_add_to_intrp_eventlist(intrp, 
					  io_done_notify(tmp), 
					  iodrivers[iodriverno]->scale);
         } else {
            io_done_notify (tmp);
         }
      } 
      else if (numreqs > 0) {
	for (i = 0; i < numreqs; i++) {
	  del = tmp->next;
	  tmp->next = del->next;
	  del->next = NULL;
	  del->type = IO_REQUEST_ARRIVE;
	  del->flags |= MAPPED;
	  skip |= (del->devno == devno);

	  if (iodrivers[iodriverno]->type == STANDALONE) 
	  {
	    del->time += simtime + 0.0000000001; /* to affect an ordering */
	    addtointq((event *) del);
	  } 
	  else {
	    iodriver_add_to_intrp_eventlist(intrp, 
					    (event *) del, 
					    iodrivers[iodriverno]->scale);
	  }
	}
      }
      addtoextraq((event *) tmp);
   }

   if ((iodrivers[iodriverno]->consttime == IODRIVER_TRACED_QUEUE_TIMES) 
       || (iodrivers[iodriverno]->consttime == IODRIVER_TRACED_BOTH_TIMES)) 
   {
     if (ioqueue_get_number_in_queue(iodrivers[iodriverno]->devices[devno].queue) > 0) 
     {
       iodrivers[iodriverno]->devices[devno].flag = 1;
       iodrivers[iodriverno]->devices[devno].lastevent = simtime;
     }
     return;
   }
   if (skip) {
     return;
   }


   // fprintf(outputfile, "iodriver_access_complete::  calling ioqueue_get_next_request\n");


   req = ioqueue_get_next_request(iodrivers[iodriverno]->devices[devno].queue);

   
   // fprintf (outputfile, "next scheduled: req %p, req->blkno %d, req->flags %x\n", req, ((req) ? req->blkno : 0), ((req) ? req->flags : 0));


schedule_next:
   if (req) {
      req->type = IO_ACCESS_ARRIVE;
      req->next = NULL;

      if (ctl) {
         ctl->numoutstanding++;
      }

      if (iodrivers[iodriverno]->type == STANDALONE) {
         req->time = simtime;
         addtointq((event *) req);
      } 
      else {
         iodriver_add_to_intrp_eventlist(intrp, 
					 (event *) req, 
					 iodrivers[iodriverno]->scale);
      }
   }
}
Example #5
0
File: ssd.c Project: vishnu89/gijoe
static void ssd_activate_elem(ssd_t *currdisk, int elem_num)
{
    ioreq_event *req;
    ssd_req **read_reqs;
    ssd_req **write_reqs;
    int i;
    int read_total = 0;
    int write_total = 0;
    double schtime = 0;
    int max_reqs;
    int tot_reqs_issued;
    double max_time_taken = 0;


    ssd_element *elem = &currdisk->elements[elem_num];

    // if the media is busy, we can't do anything, so return
    if (elem->media_busy == TRUE) {
        return;
    }

    ASSERT(ioqueue_get_reqoutstanding(elem->queue) == 0);

    // we can invoke cleaning in the background whether there
    // is request waiting or not
    if (currdisk->params.cleaning_in_background) {
        // if cleaning was invoked, wait until
        // it is over ...
        if (ssd_invoke_element_cleaning(elem_num, currdisk)) {
            return;
        }
    }

    ASSERT(elem->metadata.reqs_waiting == ioqueue_get_number_in_queue(elem->queue));

    if (elem->metadata.reqs_waiting > 0) {

        // invoke cleaning in foreground when there are requests waiting
        if (!currdisk->params.cleaning_in_background) {
            // if cleaning was invoked, wait until
            // it is over ...
            if (ssd_invoke_element_cleaning(elem_num, currdisk)) {
                return;
            }
        }

        // how many reqs can we issue at once
        if (currdisk->params.copy_back == SSD_COPY_BACK_DISABLE) {
            max_reqs = 1;
        } else {
            if (currdisk->params.num_parunits == 1) {
                max_reqs = 1;
            } else {
                max_reqs = MAX_REQS_ELEM_QUEUE;
            }
        }

        // ideally, we should issue one req per plane, overlapping them all.
        // in order to simplify the overlapping strategy, let's issue
        // requests of the same type together.

        read_reqs = (ssd_req **) malloc(max_reqs * sizeof(ssd_req *));
        write_reqs = (ssd_req **) malloc(max_reqs * sizeof(ssd_req *));

        // collect the requests
        while ((req = ioqueue_get_next_request(elem->queue)) != NULL) {
            int found = 0;

            elem->metadata.reqs_waiting --;

            // see if we already have the same request in the list.
            // this usually doesn't happen -- but on synthetic traces
            // this weird case can occur.
            if (req->flags & READ) {
                found = ssd_already_present(read_reqs, read_total, req);
            } else {
                found = ssd_already_present(write_reqs, write_total, req);
            }

            if (!found) {
                // this is a valid request
                ssd_req *r = malloc(sizeof(ssd_req));
                r->blk = req->blkno;
                r->count = req->bcount;
                r->is_read = req->flags & READ;
                r->org_req = req;
                r->plane_num = -1; // we don't know to which plane this req will be directed at

                if (req->flags & READ) {
                    read_reqs[read_total] = r;
                    read_total ++;
                } else {
                    write_reqs[write_total] = r;
                    write_total ++;
                }

                // if we have more reqs than we can handle, quit
                if ((read_total >= max_reqs) ||
                    (write_total >= max_reqs)) {
                    break;
                }
            } else {
                // throw this request -- it doesn't make sense
                stat_update (&currdisk->stat.acctimestats, 0);
                req->time = simtime;
                req->ssd_elem_num = elem_num;
                req->type = DEVICE_ACCESS_COMPLETE;
                addtointq ((event *)req);
            }
        }

        if (read_total > 0) {
            // first issue all the read requests (it doesn't matter what we
            // issue first). i chose read because reads are mostly synchronous.
            // find the time taken to serve these requests.
            ssd_compute_access_time(currdisk, elem_num, read_reqs, read_total);

            // add an event for each request completion
            for (i = 0; i < read_total; i ++) {
              elem->media_busy = TRUE;

              // find the maximum time taken by a request
              if (schtime < read_reqs[i]->schtime) {
                  schtime = read_reqs[i]->schtime;
              }

              stat_update (&currdisk->stat.acctimestats, read_reqs[i]->acctime);
              read_reqs[i]->org_req->time = simtime + read_reqs[i]->schtime;
              read_reqs[i]->org_req->ssd_elem_num = elem_num;
              read_reqs[i]->org_req->type = DEVICE_ACCESS_COMPLETE;

              //printf("R: blk %d elem %d acctime %f simtime %f\n", read_reqs[i]->blk,
                //  elem_num, read_reqs[i]->acctime, read_reqs[i]->org_req->time);

              addtointq ((event *)read_reqs[i]->org_req);
              free(read_reqs[i]);
            }
        }

        free(read_reqs);

        max_time_taken = schtime;

        if (write_total > 0) {
            // next issue the write requests
            ssd_compute_access_time(currdisk, elem_num, write_reqs, write_total);

            // add an event for each request completion.
            // note that we can issue the writes only after all the reads above are
            // over. so, include the maximum read time when creating the event.
            for (i = 0; i < write_total; i ++) {
              elem->media_busy = TRUE;

              stat_update (&currdisk->stat.acctimestats, write_reqs[i]->acctime);
              write_reqs[i]->org_req->time = simtime + schtime + write_reqs[i]->schtime;
              //printf("blk %d elem %d acc time %f\n", write_reqs[i]->blk, elem_num, write_reqs[i]->acctime);

              if (max_time_taken < (schtime+write_reqs[i]->schtime)) {
                  max_time_taken = (schtime+write_reqs[i]->schtime);
              }

              write_reqs[i]->org_req->ssd_elem_num = elem_num;
              write_reqs[i]->org_req->type = DEVICE_ACCESS_COMPLETE;
              //printf("W: blk %d elem %d acctime %f simtime %f\n", write_reqs[i]->blk,
                //  elem_num, write_reqs[i]->acctime, write_reqs[i]->org_req->time);

              addtointq ((event *)write_reqs[i]->org_req);
              free(write_reqs[i]);
            }
        }

        free(write_reqs);

        // statistics
        tot_reqs_issued = read_total + write_total;
        ASSERT(tot_reqs_issued > 0);
        currdisk->elements[elem_num].stat.tot_reqs_issued += tot_reqs_issued;
        currdisk->elements[elem_num].stat.tot_time_taken += max_time_taken;
    }
}