示例#1
0
void iodriver_trace_request_start (int iodriverno, ioreq_event *curr)
{
   ioreq_event *tmp;
   device *currdev = &iodrivers[iodriverno]->devices[(curr->devno)];
   double tdiff = simtime - currdev->lastevent;

   if (currdev->flag == 1) {
      stat_update(&initiatenextstats, tdiff);
   } else if (currdev->flag == 2) {
      stat_update(&emptyqueuestats, tdiff);
   }
   currdev->flag = 0;

   tmp = ioqueue_get_specific_request(currdev->queue, curr);
   addtoextraq((event *) curr);
   ASSERT(tmp != NULL);

   schedule_disk_access(iodrivers[iodriverno], tmp);
   tmp->time = simtime;
   tmp->type = IO_ACCESS_ARRIVE;
   tmp->slotno = 0;
   if (tmp->time == simtime) {
      iodriver_schedule(iodriverno, tmp);
   } else {
      addtointq((event *) tmp);
   }
}
示例#2
0
static int
kserver_connection_drop(struct sock *sk)
{
	stat_update(READ_SZ);

	return 0;
}
示例#3
0
static void simpledisk_bustransfer_complete (ioreq_event *curr)
{
   simpledisk_t *currdisk;

   // fprintf (outputfile, "Entering simpledisk_bustransfer_complete for disk %d: %12.6f\n", curr->devno, simtime);

   currdisk = getsimpledisk (curr->devno);

   if (curr->flags & READ) {
      simpledisk_request_complete (curr);

   } else {
      simpledisk_t *currdisk = getsimpledisk (curr->devno);

      if (currdisk->neverdisconnect == FALSE) {
         /* disconnect from bus */
         ioreq_event *tmp = ioreq_copy (curr);
         tmp->type = IO_INTERRUPT_ARRIVE;
         tmp->cause = DISCONNECT;
         simpledisk_send_event_up_path (tmp, currdisk->bus_transaction_latency);
      }

      /* do media access */
      currdisk->media_busy = TRUE;
      stat_update (&currdisk->stat.acctimestats, currdisk->acctime);
      curr->time = simtime + currdisk->acctime;
      curr->type = DEVICE_ACCESS_COMPLETE;
      addtointq ((event *) curr);
   }
}
示例#4
0
static void trystart(void)
{
	pid_t pid;
fprintf(stderr, "start\n"); // FIXME path

	pid = fork();
	if(pid < 0) {
		fprintf(stderr, "can't fork child process, sleeping 60 seconds: %s\n", strerror(errno)); // FIXME path
		sleep(60);
		return;
	} else if(pid == 0) {
		sig_uncatch(SIGCHLD);
		sig_unblock(SIGCHLD);

		if(opt_func) {
			(*opt_func)(opt_func_data);
			exit(0);
		} else {
			execvp(opt_argv[0], opt_argv);
			fprintf(stderr, "can't execute %s: %s\n", opt_argv[0], strerror(errno)); // FIXME path
			exit(127);
		}
	}

	sleep(1);

	g_pid = pid;
	g_flagpaused = 0;

	stat_pidchange();
	stat_update();
}
示例#5
0
static void simpledisk_completion_done (ioreq_event *curr)
{
   simpledisk_t *currdisk = getsimpledisk (curr->devno);

   // fprintf (outputfile, "Entering simpledisk_completion for disk %d: %12.6f\n", currdisk->devno, simtime);

   addtoextraq((event *) curr);

   if (currdisk->busowned != -1) {
      bus_ownership_release(currdisk->busowned);
      currdisk->busowned = -1;
   }

   /* check for and start next queued request, if any */
   curr = ioqueue_get_next_request(currdisk->queue);
   if (curr != NULL) {
      ASSERT (currdisk->media_busy == FALSE);
      if (curr->flags & READ) {
         currdisk->media_busy = TRUE;
         stat_update (&currdisk->stat.acctimestats, currdisk->acctime);
         curr->time = simtime + currdisk->acctime;
         curr->type = DEVICE_ACCESS_COMPLETE;
         addtointq ((event *)curr);

      } else {
         curr->type = IO_INTERRUPT_ARRIVE;
         curr->cause = RECONNECT;
         simpledisk_send_event_up_path (curr, currdisk->bus_transaction_latency);
         currdisk->reconnect_reason = IO_INTERRUPT_ARRIVE;
      }
   }
}
示例#6
0
void io_map_trace_request (ioreq_event *temp)
{
   int i;

   for (i=0; i<tracemappings; i++) {
      if (temp->devno == tracemap[i]) {
#ifdef DEBUG_IOSIM
		 fprintf (outputfile, "*** %f: io_map_trace_request mapping devno number %d to %d\n", simtime, temp->devno, tracemap1[i] );
#endif
		 temp->devno = tracemap1[i];
		 if (tracemap2[i]) {
			if (tracemap2[i] < 1) {
			   temp->blkno *= -tracemap2[i];
			} else {
			   if (temp->blkno % tracemap2[i]) {
				  fprintf(stderr, "Small sector size disk using odd sector number: %lld\n", temp->blkno);
				  exit(1);
			   }
#ifdef DEBUG_IOSIM
			   fprintf (outputfile, "*** %f: io_map_trace_request mapping block number %lld to %lld\n", simtime, temp->blkno, (temp->blkno / tracemap2[i]));
#endif
			   temp->blkno /= tracemap2[i];
			}
		 }
		 temp->bcount *= tracemap3[i];
		 temp->blkno += tracemap4[i];
		 if (tracestats) {
			stat_update(&tracestats[i], ((double) temp->tempint1 / (double) 1000));
			stat_update(&tracestats1[i],((double) (temp->tempint1 + temp->tempint2) / (double) 1000));
			stat_update(&tracestats2[i],((double) temp->tempint2 / (double) 1000));
			stat_update(&tracestats3[i], (double) temp->slotno);
			if (temp->slotno == 1) {
			   stat_update(&tracestats4[i], ((double) temp->tempint1 / (double) 1000));
			}
		 }
		 return;
	  }
   }
/*
   fprintf(stderr, "Requested device not mapped - %x\n", temp->devno);
   exit(1);
*/
}
示例#7
0
void io_validate_do_stats2 (ioreq_event *new_event)
{
   stat_update(tracestats2, validate_lastserv);
   if (new_event->flags == WRITE) {
      stat_update(tracestats4, validate_lastserv);
   }
   if (strcmp(validate_buffaction, "Doub") == 0) {
      validatebuf[0]++;
   } else if (strcmp(validate_buffaction, "Trip") == 0) {
      validatebuf[1]++;
   } else if (strcmp(validate_buffaction, "Miss") == 0) {
      validatebuf[2]++;
   } else if (strcmp(validate_buffaction, "Hit") == 0) {
      validatebuf[3]++;
   } else {
      fprintf(stderr, "Unrecognized buffaction in validate trace: %s\n", validate_buffaction);
      exit(1);
   }
}
示例#8
0
/*
 * Just do some useless work.
 */
static int
kserver_read(struct sock *sk, unsigned char *data, size_t len)
{
	int i;
	for (i = 0; i < len / 4; ++i)
		g_counter += data[i];

	stat_update(len);

	return 0;
}
示例#9
0
static void simpledisk_request_arrive (ioreq_event *curr)
{
   ioreq_event *intrp;
   simpledisk_t *currdisk;

#ifdef DEBUG_SIMPLEDISK
   fprintf (outputfile, "*** %f: simpledisk_request_arrive - devno %d, blkno %d, bcount %d, flags 0x%x\n", simtime, curr->devno, curr->blkno, curr->bcount, curr->flags );
#endif

   currdisk = getsimpledisk(curr->devno);

   /* verify that request is valid. */
   if ((curr->blkno < 0) || (curr->bcount <= 0) ||
       ((curr->blkno + curr->bcount) > currdisk->numblocks)) {
      fprintf(stderr, "Invalid set of blocks requested from simpledisk - blkno %lld, bcount %d, numblocks %lld\n", curr->blkno, curr->bcount, currdisk->numblocks);
      exit(1);
   }

   /* create a new request, set it up for initial interrupt */
   currdisk->busowned = simpledisk_get_busno(curr);

   if (ioqueue_get_reqoutstanding (currdisk->queue) == 0) {
      ioqueue_add_new_request(currdisk->queue, curr);
      curr = ioqueue_get_next_request (currdisk->queue);
      intrp = curr;

      /* initiate media access if request is a READ */
      if (curr->flags & READ) {
         ioreq_event *tmp = ioreq_copy (curr);
         currdisk->media_busy = TRUE;
         stat_update (&currdisk->stat.acctimestats, currdisk->acctime);
         tmp->time = simtime + currdisk->acctime;
         tmp->type = DEVICE_ACCESS_COMPLETE;
         addtointq ((event *)tmp);
      }

      /* if not disconnecting, then the READY_TO_TRANSFER is like a RECONNECT */
      currdisk->reconnect_reason = IO_INTERRUPT_ARRIVE;
      if (curr->flags & READ) {
         intrp->cause = (currdisk->neverdisconnect) ? READY_TO_TRANSFER : DISCONNECT;
      } else {
         intrp->cause = READY_TO_TRANSFER;
      }

   } else {
      intrp = ioreq_copy(curr);
      ioqueue_add_new_request(currdisk->queue, curr);
      intrp->cause = DISCONNECT;
   }

   intrp->type = IO_INTERRUPT_ARRIVE;
   simpledisk_send_event_up_path(intrp, currdisk->bus_transaction_latency);
}
示例#10
0
文件: ssd_gang.c 项目: vishnu89/gijoe
static int ssd_invoke_gang_cleaning(int gang_num, ssd_t *s)
{
    int i;
    int elem_num;
    double max_cost = 0;
    double elem_clean_cost;
    int cleaning_invoked = 0;
    gang_metadata *g = &s->gang_meta[gang_num];

    // all the elements in the gang must be free
    ASSERT(g->busy == FALSE);
    ASSERT(g->cleaning == FALSE);

    // invoke cleaning on all the elements
    for (i = 0; i < s->params.elements_per_gang; i ++) {
        elem_num = gang_num * s->params.elements_per_gang + i;
        elem_clean_cost = _ssd_invoke_element_cleaning(elem_num, s);

        // stat
        s->elements[elem_num].stat.tot_clean_time += max_cost;

        if (max_cost < elem_clean_cost) {
            max_cost = elem_clean_cost;
        }
    }

    // cleaning was invoked on all the elements. we can start
    // the next operation on this gang only after the cleaning
    // gets over on all the elements.
    if (max_cost > 0) {
        ioreq_event *tmp;

        g->busy = 1;
        g->cleaning = 1;
        cleaning_invoked = 1;

        // we use the 'blkno' field to store the gang number
        tmp = (ioreq_event *)getfromextraq();
        tmp->devno = s->devno;
        tmp->time = simtime + max_cost;
        tmp->blkno = gang_num;
        tmp->ssd_gang_num = gang_num;
        tmp->type = SSD_CLEAN_GANG;
        tmp->flags = SSD_CLEAN_GANG;
        tmp->busno = -1;
        tmp->bcount = -1;
        stat_update (&s->stat.acctimestats, max_cost);
        addtointq ((event *)tmp);
    }

    return cleaning_invoked;
}
示例#11
0
void io_validate_do_stats1 ()
{
   int i;

   if (tracestats2 == NULL) {
      tracestats2 = (statgen *)DISKSIM_malloc(sizeof(statgen));
      tracestats3 = (statgen *)DISKSIM_malloc(sizeof(statgen));
      tracestats4 = (statgen *)DISKSIM_malloc(sizeof(statgen));
      tracestats5 = (statgen *)DISKSIM_malloc(sizeof(statgen));

      stat_initialize(statdeffile, statdesc_traceaccstats, tracestats2);
      stat_initialize(statdeffile, statdesc_traceaccdiffstats, tracestats3);
      stat_initialize(statdeffile, statdesc_traceaccwritestats, tracestats4);
      stat_initialize(statdeffile, statdesc_traceaccdiffwritestats, tracestats5);
      for (i=0; i<10; i++) {
	 validatebuf[i] = 0;
      }
   } else {
      stat_update(tracestats3, (validate_lastserv - disksim->lastphystime));
      if (!validate_lastread) {
         stat_update(tracestats5, (validate_lastserv - disksim->lastphystime));
      }
   }
}
示例#12
0
文件: ssd.c 项目: ESOS-Lab/EnergySim
static int ssd_invoke_element_cleaning(int elem_num, ssd_t *s)
{
    double max_cost = 0;
    int cleaning_invoked = 0;
    ssd_element *elem = &s->elements[elem_num];

    // element must be free
    ASSERT(elem->media_busy == FALSE);

    max_cost = _ssd_invoke_element_cleaning(elem_num, s);

    // cleaning was invoked on this element. we can start
    // the next operation on this elem only after the cleaning
    // gets over.
    if (max_cost > 0) {
        ioreq_event *tmp;

        elem->media_busy = 1;
        cleaning_invoked = 1;

        // we use the 'blkno' field to store the element number
        tmp = (ioreq_event *)getfromextraq();
        tmp->devno = s->devno;
        tmp->time = simtime + max_cost;
        tmp->blkno = elem_num;
        tmp->ssd_elem_num = elem_num;
        tmp->type = SSD_CLEAN_ELEMENT;
        tmp->flags = SSD_CLEAN_ELEMENT;
        tmp->busno = -1;
        tmp->bcount = -1;
        stat_update (&s->stat.acctimestats, max_cost);
        addtointq ((event *)tmp);

        // stat
        elem->stat.tot_clean_time += max_cost;
		elem->power_stat.acc_time += max_cost;
		ssd_dpower(s, max_cost);
    }

    return cleaning_invoked;
}
示例#13
0
static int
kserver_connection_new(struct sock *sock)
{
	int ci;

	BUG_ON(!sock->sk_user_data);

	/* TODO Typically we should allocate a new connection here. */

	/* Write the socket to free it as module exit. */
	ci = atomic_inc_return(&conn_i);
	if (ci < MAX_CONN) {
		conn[ci] = sock;
	} else {
		printk(KERN_ERR "Too many connections!\n");
	}

	stat_update(READ_SZ);

	return 0;
}
示例#14
0
void ssd_invoke_element_refresh_fcfs(int elem_num,listnode *blocks_to_refresh,ssd_t *currdisk)
{
  int block_len = 0;
  int i;
  double cost = 0;

  block_len = ll_get_size(blocks_to_refresh);
  block_metadata *currblock = 0;
  listnode *currnode = 0;
  //for all blocks in every element.

  for(i=0;i<block_len;i++) {
    currnode = ll_get_nth_node(blocks_to_refresh,i);
    currblock = (block_metadata*)currnode->data;
    ASSERT(currblock->elem_num  == elem_num);
    cost+= ssd_refresh_block(currblock,currdisk); //sum every cost, because we are not applying refresh in batches
  }

  ssd_element *elem = &currdisk->elements[elem_num];
  if (cost > 0) {
      ioreq_event *tmp;
      elem->media_busy = TRUE;

      // we use the 'blkno' field to store the element number
      tmp = (ioreq_event *)getfromextraq();
      tmp->devno = currdisk->devno;
      tmp->time = simtime + cost;
      tmp->blkno = elem_num;
      tmp->ssd_elem_num = elem_num;
      tmp->type = SSD_REFRESH_ELEMENT;
      tmp->flags = SSD_REFRESH_ELEMENT;
      tmp->busno = -1;
      tmp->bcount = -1;
      stat_update (&currdisk->stat.acctimestats, cost);
      addtointq ((event *)tmp);
      // stat
      elem->stat.tot_refresh_time += cost;
  }
}
示例#15
0
文件: ssd_gang.c 项目: vishnu89/gijoe
/*
 * collects 1 request from each chip in the gang
 */
static void ssd_collect_req_in_gang
(ssd_t *s, int gang_num, ssd_req ***rd_q, ssd_req ***wr_q, int *rd_total, int *wr_total)
{
    int i;
    int start;
    gang_metadata *g;

    g = &s->gang_meta[gang_num];

    // start from the first element of the gang
    start = gang_num * s->params.elements_per_gang;
    i = start;

    *rd_total = 0;
    *wr_total = 0;

    do {
        ssd_element *elem;
        ioreq_event *req;
        int tot_rd_reqs;
        int tot_wr_reqs;
        int j;

        elem = &s->elements[i];
        ASSERT(ioqueue_get_reqoutstanding(elem->queue) == 0);
        j = i % s->params.elements_per_gang;

        // collect the requests
        tot_rd_reqs = 0;
        tot_wr_reqs = 0;
        if ((req = ioqueue_get_next_request(elem->queue)) != NULL) {
            int found;

            if (req->flags & READ) {
                found = ssd_already_present(rd_q[j], tot_rd_reqs, req);
            } else {
                found = ssd_already_present(wr_q[j], tot_wr_reqs, req);
            }

            if (!found) {
                // this is a valid request
                ssd_req *r = malloc(sizeof(ssd_req));
                r->blk = req->blkno;
                r->count = req->bcount;
                r->is_read = req->flags & READ;
                r->org_req = req;
                r->plane_num = -1; // we don't know to which plane this req will be directed at

                if (req->flags & READ) {
                    rd_q[j][tot_rd_reqs] = r;
                    tot_rd_reqs ++;
                } else {
                    wr_q[j][tot_wr_reqs] = r;
                    tot_wr_reqs ++;
                }
            } else {
                // throw this request -- it doesn't make sense
                stat_update (&s->stat.acctimestats, 0);
                req->time = simtime;
                req->ssd_elem_num = i;
                req->ssd_gang_num = gang_num;

                req->type = DEVICE_ACCESS_COMPLETE;
                addtointq ((event *)req);
            }

            ASSERT((tot_rd_reqs < MAX_REQS) && (tot_wr_reqs < MAX_REQS))
        }

        *rd_total = *rd_total + tot_rd_reqs;
        *wr_total = *wr_total + tot_wr_reqs;

        // go to the next element
        i = ssd_next_elem_in_gang(s, gang_num, i);
    } while (i != start);
示例#16
0
int stat_to_mom(

  char             *job_id,
  struct stat_cntl *cntl)  /* M */

  {
  struct batch_request *newrq;
  int                   rc = PBSE_NONE;
  unsigned long         addr;
  char                  log_buf[LOCAL_LOG_BUF_SIZE+1];
  struct pbsnode       *node;
  int                   handle = -1;
  unsigned long         job_momaddr = -1;
  unsigned short        job_momport = -1;
  char                 *job_momname = NULL;
  job                  *pjob = NULL;

  if ((pjob = svr_find_job(job_id, FALSE)) == NULL)
    return(PBSE_JOBNOTFOUND);

  mutex_mgr job_mutex(pjob->ji_mutex, true);

  if ((pjob->ji_qs.ji_un.ji_exect.ji_momaddr == 0) || 
      (!pjob->ji_wattr[JOB_ATR_exec_host].at_val.at_str))
    {
    job_mutex.unlock();
    snprintf(log_buf, sizeof(log_buf),
      "Job %s missing MOM's information. Skipping statting on this job", pjob->ji_qs.ji_jobid);
    log_record(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, __func__, log_buf);
    return PBSE_BAD_PARAMETER;
    }

  job_momaddr = pjob->ji_qs.ji_un.ji_exect.ji_momaddr;
  job_momport = pjob->ji_qs.ji_un.ji_exect.ji_momport;
  job_momname = strdup(pjob->ji_wattr[JOB_ATR_exec_host].at_val.at_str);
  job_mutex.unlock();

  if (job_momname == NULL)
    return PBSE_MEM_MALLOC;

  if ((newrq = alloc_br(PBS_BATCH_StatusJob)) == NULL)
    {
    free(job_momname);
    return PBSE_MEM_MALLOC;
    }

  if (cntl->sc_type == 1)
    snprintf(newrq->rq_ind.rq_status.rq_id, sizeof(newrq->rq_ind.rq_status.rq_id), "%s", job_id);
  else
    newrq->rq_ind.rq_status.rq_id[0] = '\0';  /* get stat of all */

  CLEAR_HEAD(newrq->rq_ind.rq_status.rq_attr);

  /* if MOM is down just return stale information */
  addr = job_momaddr;

  node = tfind_addr(addr,job_momport,job_momname);
  free(job_momname);

  if (node == NULL)
    return PBSE_UNKNODE;
  if ((node->nd_state & INUSE_DOWN)||(node->nd_power_state != POWER_STATE_RUNNING))
    {
    if (LOGLEVEL >= 6)
      {
      snprintf(log_buf, LOCAL_LOG_BUF_SIZE,
          "node '%s' is allocated to job but in state 'down'",
          node->nd_name);

      log_event(PBSEVENT_SYSTEM,PBS_EVENTCLASS_JOB,job_id,log_buf);
      }

    unlock_node(node, __func__, "no rely mom", LOGLEVEL);
    free_br(newrq);

    return PBSE_NORELYMOM;
    }

  /* get connection to MOM */
  unlock_node(node, __func__, "before svr_connect", LOGLEVEL);
  handle = svr_connect(job_momaddr, job_momport, &rc, NULL, NULL);

  if (handle >= 0)
    {
    if ((rc = issue_Drequest(handle, newrq, true)) == PBSE_NONE)
      {
      stat_update(newrq, cntl);
      }
    }
  else
    rc = PBSE_CONNECT;

  if (rc == PBSE_SYSTEM)
    rc = PBSE_MEM_MALLOC;

  free_br(newrq);

  return(rc);
  }  /* END stat_to_mom() */
示例#17
0
int supervise_run(void)
{
	g_pid = 0;
	g_flagexit = 0;
	g_flagwant = 1;
	g_flagwantup = opt_auto_start;
	g_flagpaused = 0;

	sig_block(SIGCHLD);
	sig_catch(SIGCHLD, sigchild_handler);

	stat_pidchange();
	stat_update();

	if(g_flagwant && g_flagwantup) {
		trystart();
	}

	while(1) {
		char c;
		ssize_t rl;

		if(g_flagexit && !g_pid) { return 0; }

printf("waiting pid pid=%d\n",g_pid);
		while(1) {
			int stat;
			int r = waitpid(-1, &stat, WNOHANG);
			if(r == 0) { break; }
			if(r < 0 && errno != EAGAIN && errno != EINTR) { break; }
			if(r == g_pid) {
				g_pid = 0;
				stat_pidchange();
				stat_update();
				if(g_flagexit) { return 0; }
				if(g_flagwant && g_flagwantup) {
					trystart();
					break;
				}
			}
		}

printf("reading... pid=%d\n",g_pid);
		sig_unblock(SIGCHLD);
		rl = read(g_ctl_rfd, &c, 1);
		if(rl <= 0) {
			if(errno == EAGAIN || errno == EINTR) {
				continue;
			}
			return -1;
		}
		sig_block(SIGCHLD);

		switch(c) {
		case 'd':  /* down */
printf("down %d\n",g_pid);
			g_flagwant = 1;
			g_flagwantup = 0;
			if(g_pid) {
				kill(g_pid, SIGTERM);
				kill(g_pid, SIGCONT);
				g_flagpaused = 0;
			}
			stat_update();
			break;

		case 'u':  /* up */
printf("up %d\n",g_pid);
			g_flagwant = 1;
			g_flagwantup = 1;
			if(!g_pid) { trystart(); }
			stat_update();
			break;

		case 'o':  /* once */
printf("once %d\n",g_pid);
			g_flagwant = 0;
			if(!g_pid) { trystart(); }
			stat_update();
			break;

		case 'x':  /* exit */
printf("exit %d\n",g_pid);
			g_flagexit = 1;
			stat_update();
			break;

		case 'p':  /* pause */
printf("pause %d\n",g_pid);
			g_flagpaused = 1;
			if(g_pid) { kill(g_pid, SIGSTOP); }
			stat_update();
			break;

		case 'c':  /* continue */
printf("continue %d\n",g_pid);
			g_flagpaused = 0;
			if(g_pid) { kill(g_pid, SIGCONT); }
			stat_update();
			break;

		case 'h':  /* hup */
printf("hup %d\n",g_pid);
			if(g_pid) { kill(g_pid, SIGHUP); }
			break;

		case 'a':  /* alarm */
printf("alarm %d\n",g_pid);
			if(g_pid) { kill(g_pid, SIGALRM); }
			break;

		case 'i':  /* interrupt */
printf("interrupt %d\n",g_pid);
			if(g_pid) { kill(g_pid, SIGINT); }
			break;

		case 't':  /* term */
printf("term %d\n",g_pid);
			if(g_pid) { kill(g_pid, SIGTERM); }
			break;

		case 'k':  /* kill */
printf("kill %d\n",g_pid);
			if(g_pid) { kill(g_pid, SIGKILL); }
			break;

		case '1':  /* usr1 */
printf("usr1 %d\n",g_pid);
			if(g_pid) { kill(g_pid, SIGUSR1); }
			break;

		case '2':  /* usr2 */
printf("usr2 %d\n",g_pid);
			if(g_pid) { kill(g_pid, SIGUSR2); }
			break;

		case 's':  /* sigchld */
printf("sigchld %d\n",g_pid);
			if(!opt_auto_restart) {
				g_flagwant = 0;
				stat_update();
			}
			break;

		case ' ': /* ping */
			// FIXME touch status file
			break;
		}
	}
}
示例#18
0
int
NdbIndexStat::records_in_range(const NdbDictionary::Index* index, NdbIndexScanOperation* op, Uint64 table_rows, Uint64* count, int flags)
{
    DBUG_ENTER("NdbIndexStat::records_in_range");
    Uint64 rows;
    Uint32 key1[1000], keylen1;
    Uint32 key2[1000], keylen2;

    if (m_cache == NULL)
        flags |= RR_UseDb | RR_NoUpdate;
    else if (m_area[0].m_entries == 0 || m_area[1].m_entries == 0)
        flags |= RR_UseDb;

    if ((flags & (RR_UseDb | RR_NoUpdate)) != RR_UseDb | RR_NoUpdate) {
        // get start and end key - assume bound is ordered, wellformed
        Uint32 bound[1000];
        Uint32 boundlen = op->getKeyFromSCANTABREQ(bound, 1000);

        keylen1 = keylen2 = 0;
        Uint32 n = 0;
        while (n < boundlen) {
            Uint32 t = bound[n];
            AttributeHeader ah(bound[n + 1]);
            Uint32 sz = 2 + ah.getDataSize();
            t &= 0xFFFF;      // may contain length
            assert(t <= 4);
            bound[n] = t;
            if (t == 0 || t == 1 || t == 4) {
                memcpy(&key1[keylen1], &bound[n], sz << 2);
                keylen1 += sz;
            }
            if (t == 2 || t == 3 || t == 4) {
                memcpy(&key2[keylen2], &bound[n], sz << 2);
                keylen2 += sz;
            }
            n += sz;
        }
    }

    if (flags & RR_UseDb) {
        Uint32 out[4] = { 0, 0, 0, 0 };  // rows, in, before, after
        float tot[4] = { 0, 0, 0, 0 };   // totals of above
        int cnt, ret;
        bool forceSend = true;
        NdbTransaction* trans = op->m_transConnection;
        if (op->interpret_exit_last_row() == -1 ||
                op->getValue(NdbDictionary::Column::RECORDS_IN_RANGE, (char*)out) == 0) {
            m_error = op->getNdbError();
            DBUG_PRINT("error", ("op:%d", op->getNdbError().code));
            DBUG_RETURN(-1);
        }
        if (trans->execute(NdbTransaction::NoCommit,
                           NdbOperation::AbortOnError, forceSend) == -1) {
            m_error = trans->getNdbError();
            DBUG_PRINT("error", ("trans:%d op:%d", trans->getNdbError().code,
                                 op->getNdbError().code));
            DBUG_RETURN(-1);
        }
        cnt = 0;
        while ((ret = op->nextResult(true, forceSend)) == 0) {
            DBUG_PRINT("info", ("frag rows=%u in=%u before=%u after=%u [error=%d]",
                                out[0], out[1], out[2], out[3],
                                (int)(out[1] + out[2] + out[3]) - (int)out[0]));
            unsigned i;
            for (i = 0; i < 4; i++)
                tot[i] += (float)out[i];
            cnt++;
        }
        if (ret == -1) {
            m_error = op->getNdbError();
            DBUG_PRINT("error", ("trans:%d op:%d", trans->getNdbError().code,
                                 op->getNdbError().code));
            DBUG_RETURN(-1);
        }
        op->close(forceSend);
        rows = (Uint64)tot[1];
        if (cnt != 0 && ! (flags & RR_NoUpdate)) {
            float pct[2];
            pct[0] = 100 * tot[2] / tot[0];
            pct[1] = 100 * tot[3] / tot[0];
            DBUG_PRINT("info", ("update stat pct"
                                " before=%.2f after=%.2f",
                                pct[0], pct[1]));
            stat_update(key1, keylen1, key2, keylen2, pct);
        }
    } else {
        float pct[2];
        stat_select(key1, keylen1, key2, keylen2, pct);
        float diff = 100.0 - (pct[0] + pct[1]);
        float trows = (float)table_rows;
        DBUG_PRINT("info", ("select stat pct"
                            " before=%.2f after=%.2f in=%.2f table_rows=%.2f",
                            pct[0], pct[1], diff, trows));
        rows = 0;
        if (diff >= 0)
            rows = (Uint64)(diff * trows / 100);
        if (rows == 0)
            rows = 1;
    }

    *count = rows;
    DBUG_PRINT("value", ("rows=%llu flags=%o", rows, flags));
    DBUG_RETURN(0);
}
示例#19
0
文件: ssd.c 项目: vishnu89/gijoe
static void ssd_activate_elem(ssd_t *currdisk, int elem_num)
{
    ioreq_event *req;
    ssd_req **read_reqs;
    ssd_req **write_reqs;
    int i;
    int read_total = 0;
    int write_total = 0;
    double schtime = 0;
    int max_reqs;
    int tot_reqs_issued;
    double max_time_taken = 0;


    ssd_element *elem = &currdisk->elements[elem_num];

    // if the media is busy, we can't do anything, so return
    if (elem->media_busy == TRUE) {
        return;
    }

    ASSERT(ioqueue_get_reqoutstanding(elem->queue) == 0);

    // we can invoke cleaning in the background whether there
    // is request waiting or not
    if (currdisk->params.cleaning_in_background) {
        // if cleaning was invoked, wait until
        // it is over ...
        if (ssd_invoke_element_cleaning(elem_num, currdisk)) {
            return;
        }
    }

    ASSERT(elem->metadata.reqs_waiting == ioqueue_get_number_in_queue(elem->queue));

    if (elem->metadata.reqs_waiting > 0) {

        // invoke cleaning in foreground when there are requests waiting
        if (!currdisk->params.cleaning_in_background) {
            // if cleaning was invoked, wait until
            // it is over ...
            if (ssd_invoke_element_cleaning(elem_num, currdisk)) {
                return;
            }
        }

        // how many reqs can we issue at once
        if (currdisk->params.copy_back == SSD_COPY_BACK_DISABLE) {
            max_reqs = 1;
        } else {
            if (currdisk->params.num_parunits == 1) {
                max_reqs = 1;
            } else {
                max_reqs = MAX_REQS_ELEM_QUEUE;
            }
        }

        // ideally, we should issue one req per plane, overlapping them all.
        // in order to simplify the overlapping strategy, let's issue
        // requests of the same type together.

        read_reqs = (ssd_req **) malloc(max_reqs * sizeof(ssd_req *));
        write_reqs = (ssd_req **) malloc(max_reqs * sizeof(ssd_req *));

        // collect the requests
        while ((req = ioqueue_get_next_request(elem->queue)) != NULL) {
            int found = 0;

            elem->metadata.reqs_waiting --;

            // see if we already have the same request in the list.
            // this usually doesn't happen -- but on synthetic traces
            // this weird case can occur.
            if (req->flags & READ) {
                found = ssd_already_present(read_reqs, read_total, req);
            } else {
                found = ssd_already_present(write_reqs, write_total, req);
            }

            if (!found) {
                // this is a valid request
                ssd_req *r = malloc(sizeof(ssd_req));
                r->blk = req->blkno;
                r->count = req->bcount;
                r->is_read = req->flags & READ;
                r->org_req = req;
                r->plane_num = -1; // we don't know to which plane this req will be directed at

                if (req->flags & READ) {
                    read_reqs[read_total] = r;
                    read_total ++;
                } else {
                    write_reqs[write_total] = r;
                    write_total ++;
                }

                // if we have more reqs than we can handle, quit
                if ((read_total >= max_reqs) ||
                    (write_total >= max_reqs)) {
                    break;
                }
            } else {
                // throw this request -- it doesn't make sense
                stat_update (&currdisk->stat.acctimestats, 0);
                req->time = simtime;
                req->ssd_elem_num = elem_num;
                req->type = DEVICE_ACCESS_COMPLETE;
                addtointq ((event *)req);
            }
        }

        if (read_total > 0) {
            // first issue all the read requests (it doesn't matter what we
            // issue first). i chose read because reads are mostly synchronous.
            // find the time taken to serve these requests.
            ssd_compute_access_time(currdisk, elem_num, read_reqs, read_total);

            // add an event for each request completion
            for (i = 0; i < read_total; i ++) {
              elem->media_busy = TRUE;

              // find the maximum time taken by a request
              if (schtime < read_reqs[i]->schtime) {
                  schtime = read_reqs[i]->schtime;
              }

              stat_update (&currdisk->stat.acctimestats, read_reqs[i]->acctime);
              read_reqs[i]->org_req->time = simtime + read_reqs[i]->schtime;
              read_reqs[i]->org_req->ssd_elem_num = elem_num;
              read_reqs[i]->org_req->type = DEVICE_ACCESS_COMPLETE;

              //printf("R: blk %d elem %d acctime %f simtime %f\n", read_reqs[i]->blk,
                //  elem_num, read_reqs[i]->acctime, read_reqs[i]->org_req->time);

              addtointq ((event *)read_reqs[i]->org_req);
              free(read_reqs[i]);
            }
        }

        free(read_reqs);

        max_time_taken = schtime;

        if (write_total > 0) {
            // next issue the write requests
            ssd_compute_access_time(currdisk, elem_num, write_reqs, write_total);

            // add an event for each request completion.
            // note that we can issue the writes only after all the reads above are
            // over. so, include the maximum read time when creating the event.
            for (i = 0; i < write_total; i ++) {
              elem->media_busy = TRUE;

              stat_update (&currdisk->stat.acctimestats, write_reqs[i]->acctime);
              write_reqs[i]->org_req->time = simtime + schtime + write_reqs[i]->schtime;
              //printf("blk %d elem %d acc time %f\n", write_reqs[i]->blk, elem_num, write_reqs[i]->acctime);

              if (max_time_taken < (schtime+write_reqs[i]->schtime)) {
                  max_time_taken = (schtime+write_reqs[i]->schtime);
              }

              write_reqs[i]->org_req->ssd_elem_num = elem_num;
              write_reqs[i]->org_req->type = DEVICE_ACCESS_COMPLETE;
              //printf("W: blk %d elem %d acctime %f simtime %f\n", write_reqs[i]->blk,
                //  elem_num, write_reqs[i]->acctime, write_reqs[i]->org_req->time);

              addtointq ((event *)write_reqs[i]->org_req);
              free(write_reqs[i]);
            }
        }

        free(write_reqs);

        // statistics
        tot_reqs_issued = read_total + write_total;
        ASSERT(tot_reqs_issued > 0);
        currdisk->elements[elem_num].stat.tot_reqs_issued += tot_reqs_issued;
        currdisk->elements[elem_num].stat.tot_time_taken += max_time_taken;
    }
}
示例#20
0
int stat_to_mom(

  char             *job_id,
  struct stat_cntl *cntl)  /* M */

  {
  struct batch_request *newrq;
  int                   rc = PBSE_NONE;
  unsigned long         addr;
  char                  log_buf[LOCAL_LOG_BUF_SIZE+1];
  struct pbsnode       *node;
  int handle = -1;
  unsigned long job_momaddr = -1;
  unsigned short job_momport = -1;
  char *job_momname = NULL;
  job *pjob = NULL;

  if ((pjob = svr_find_job(job_id, FALSE)) == NULL)
    return PBSE_JOBNOTFOUND;

  job_momaddr = pjob->ji_qs.ji_un.ji_exect.ji_momaddr;
  job_momport = pjob->ji_qs.ji_un.ji_exect.ji_momport;
  job_momname = strdup(pjob->ji_wattr[JOB_ATR_exec_host].at_val.at_str);
  unlock_ji_mutex(pjob, __func__, "1", LOGLEVEL);

  if (job_momname == NULL)
    return PBSE_MEM_MALLOC;

  if ((newrq = alloc_br(PBS_BATCH_StatusJob)) == NULL)
    {
    free(job_momname);
    return PBSE_MEM_MALLOC;
    }

  if (cntl->sc_type == 1)
    strcpy(newrq->rq_ind.rq_status.rq_id, job_id);
  else
    newrq->rq_ind.rq_status.rq_id[0] = '\0';  /* get stat of all */

  CLEAR_HEAD(newrq->rq_ind.rq_status.rq_attr);

  /* if MOM is down just return stale information */
  addr = job_momaddr;

  node = tfind_addr(addr,job_momport,job_momname);
  free(job_momname);

  if (node == NULL)
    return PBSE_UNKNODE;
  if (node->nd_state & INUSE_DOWN)
    {
    if (LOGLEVEL >= 6)
      {
      snprintf(log_buf, LOCAL_LOG_BUF_SIZE,
          "node '%s' is allocated to job but in state 'down'",
          node->nd_name);

      log_event(PBSEVENT_SYSTEM,PBS_EVENTCLASS_JOB,job_id,log_buf);
      }

    unlock_node(node, __func__, "no rely mom", LOGLEVEL);
    free_br(newrq);

    return PBSE_NORELYMOM;
    }

  /* get connection to MOM */
  unlock_node(node, __func__, "before svr_connect", LOGLEVEL);
  handle = svr_connect(job_momaddr, job_momport, &rc, NULL, NULL, ToServerDIS);

  /* Unlock job here */
  if (handle >= 0)
    {
    if ((rc = issue_Drequest(handle, newrq)) == PBSE_NONE)
      {
      stat_update(newrq, cntl);
      }
    }
  else
    rc = PBSE_CONNECT;

  if (rc == PBSE_SYSTEM)
    rc = PBSE_MEM_MALLOC;

  free_br(newrq);

  return rc;
  }  /* END stat_to_mom() */
示例#21
0
static void on_packet_received(void)
{
    if (receive_crc_ok())
        stat_update(&g_report_packet);
    receive_start();
}
示例#22
0
/*
 * Fetch a file
 */
static int
fetch(char *URL, const char *path)
{
	struct url *url;
	struct url_stat us;
	struct stat sb, nsb;
	struct xferstat xs;
	FILE *f, *of;
	size_t size, readcnt, wr;
	off_t count;
	char flags[8];
	const char *slash;
	char *tmppath;
	int r;
	unsigned timeout;
	char *ptr;

	f = of = NULL;
	tmppath = NULL;

	timeout = 0;
	*flags = 0;
	count = 0;

	/* set verbosity level */
	if (v_level > 1)
		strcat(flags, "v");
	if (v_level > 2)
		fetchDebug = 1;

	/* parse URL */
	if ((url = fetchParseURL(URL)) == NULL) {
		warnx("%s: parse error", URL);
		goto failure;
	}

	/* if no scheme was specified, take a guess */
	if (*url->scheme == 0) {
		if (*url->host == 0)
			strcpy(url->scheme, SCHEME_FILE);
		else if (strncasecmp(url->host, "ftp.", 4) == 0)
			strcpy(url->scheme, SCHEME_FTP);
		else if (strncasecmp(url->host, "www.", 4) == 0)
			strcpy(url->scheme, SCHEME_HTTP);
	}

	/* common flags */
	switch (family) {
	case PF_INET:
		strcat(flags, "4");
		break;
	case PF_INET6:
		strcat(flags, "6");
		break;
	}

	/* FTP specific flags */
	if (strcmp(url->scheme, SCHEME_FTP) == 0) {
		if (p_flag)
			strcat(flags, "p");
		if (d_flag)
			strcat(flags, "d");
		if (U_flag)
			strcat(flags, "l");
		timeout = T_secs ? T_secs : ftp_timeout;
	}

	/* HTTP specific flags */
	if (strcmp(url->scheme, SCHEME_HTTP) == 0 ||
	    strcmp(url->scheme, SCHEME_HTTPS) == 0) {
		if (d_flag)
			strcat(flags, "d");
		if (A_flag)
			strcat(flags, "A");
		timeout = T_secs ? T_secs : http_timeout;
		if (i_flag) {
			if (stat(i_filename, &sb)) {
				warn("%s: stat()", i_filename);
				goto failure;
			}
			url->ims_time = sb.st_mtime;
			strcat(flags, "i");
		}
	}

	/* set the protocol timeout. */
	fetchTimeout = timeout;

	/* just print size */
	if (s_flag) {
		if (timeout)
			alarm(timeout);
		r = fetchStat(url, &us, flags);
		if (timeout)
			alarm(0);
		if (sigalrm || sigint)
			goto signal;
		if (r == -1) {
			warnx("%s", fetchLastErrString);
			goto failure;
		}
		if (us.size == -1)
			printf("Unknown\n");
		else
			printf("%jd\n", (intmax_t)us.size);
		goto success;
	}

	/*
	 * If the -r flag was specified, we have to compare the local
	 * and remote files, so we should really do a fetchStat()
	 * first, but I know of at least one HTTP server that only
	 * sends the content size in response to GET requests, and
	 * leaves it out of replies to HEAD requests.  Also, in the
	 * (frequent) case that the local and remote files match but
	 * the local file is truncated, we have sufficient information
	 * before the compare to issue a correct request.  Therefore,
	 * we always issue a GET request as if we were sure the local
	 * file was a truncated copy of the remote file; we can drop
	 * the connection later if we change our minds.
	 */
	sb.st_size = -1;
	if (!o_stdout) {
		r = stat(path, &sb);
		if (r == 0 && r_flag && S_ISREG(sb.st_mode)) {
			url->offset = sb.st_size;
		} else if (r == -1 || !S_ISREG(sb.st_mode)) {
			/*
			 * Whatever value sb.st_size has now is either
			 * wrong (if stat(2) failed) or irrelevant (if the
			 * path does not refer to a regular file)
			 */
			sb.st_size = -1;
		}
		if (r == -1 && errno != ENOENT) {
			warnx("%s: stat()", path);
			goto failure;
		}
	}

	/* start the transfer */
	if (timeout)
		alarm(timeout);
	f = fetchXGet(url, &us, flags);
	if (timeout)
		alarm(0);
	if (sigalrm || sigint)
		goto signal;
	if (f == NULL) {
		warnx("%s: %s", URL, fetchLastErrString);
		if (i_flag && strcmp(url->scheme, SCHEME_HTTP) == 0
		    && fetchLastErrCode == FETCH_OK
		    && strcmp(fetchLastErrString, "Not Modified") == 0) {
			/* HTTP Not Modified Response, return OK. */
			r = 0;
			goto done;
		} else
			goto failure;
	}
	if (sigint)
		goto signal;

	/* check that size is as expected */
	if (S_size) {
		if (us.size == -1) {
			warnx("%s: size unknown", URL);
		} else if (us.size != S_size) {
			warnx("%s: size mismatch: expected %jd, actual %jd",
			    URL, (intmax_t)S_size, (intmax_t)us.size);
			goto failure;
		}
	}

	/* symlink instead of copy */
	if (l_flag && strcmp(url->scheme, "file") == 0 && !o_stdout) {
		if (symlink(url->doc, path) == -1) {
			warn("%s: symlink()", path);
			goto failure;
		}
		goto success;
	}

	if (us.size == -1 && !o_stdout && v_level > 0)
		warnx("%s: size of remote file is not known", URL);
	if (v_level > 1) {
		if (sb.st_size != -1)
			fprintf(stderr, "local size / mtime: %jd / %ld\n",
			    (intmax_t)sb.st_size, (long)sb.st_mtime);
		if (us.size != -1)
			fprintf(stderr, "remote size / mtime: %jd / %ld\n",
			    (intmax_t)us.size, (long)us.mtime);
	}

	/* open output file */
	if (o_stdout) {
		/* output to stdout */
		of = stdout;
	} else if (r_flag && sb.st_size != -1) {
		/* resume mode, local file exists */
		if (!F_flag && us.mtime && sb.st_mtime != us.mtime) {
			/* no match! have to refetch */
			fclose(f);
			/* if precious, warn the user and give up */
			if (R_flag) {
				warnx("%s: local modification time "
				    "does not match remote", path);
				goto failure_keep;
			}
		} else if (us.size != -1) {
			if (us.size == sb.st_size)
				/* nothing to do */
				goto success;
			if (sb.st_size > us.size) {
				/* local file too long! */
				warnx("%s: local file (%jd bytes) is longer "
				    "than remote file (%jd bytes)", path,
				    (intmax_t)sb.st_size, (intmax_t)us.size);
				goto failure;
			}
			/* we got it, open local file */
			if ((of = fopen(path, "a")) == NULL) {
				warn("%s: fopen()", path);
				goto failure;
			}
			/* check that it didn't move under our feet */
			if (fstat(fileno(of), &nsb) == -1) {
				/* can't happen! */
				warn("%s: fstat()", path);
				goto failure;
			}
			if (nsb.st_dev != sb.st_dev ||
			    nsb.st_ino != nsb.st_ino ||
			    nsb.st_size != sb.st_size) {
				warnx("%s: file has changed", URL);
				fclose(of);
				of = NULL;
				sb = nsb;
			}
		}
	} else if (m_flag && sb.st_size != -1) {
		/* mirror mode, local file exists */
		if (sb.st_size == us.size && sb.st_mtime == us.mtime)
			goto success;
	}

	if (of == NULL) {
		/*
		 * We don't yet have an output file; either this is a
		 * vanilla run with no special flags, or the local and
		 * remote files didn't match.
		 */

		if (url->offset > 0) {
			/*
			 * We tried to restart a transfer, but for
			 * some reason gave up - so we have to restart
			 * from scratch if we want the whole file
			 */
			url->offset = 0;
			if ((f = fetchXGet(url, &us, flags)) == NULL) {
				warnx("%s: %s", URL, fetchLastErrString);
				goto failure;
			}
			if (sigint)
				goto signal;
		}

		/* construct a temporary file name */
		if (sb.st_size != -1 && S_ISREG(sb.st_mode)) {
			if ((slash = strrchr(path, '/')) == NULL)
				slash = path;
			else
				++slash;
			asprintf(&tmppath, "%.*s.fetch.XXXXXX.%s",
			    (int)(slash - path), path, slash);
			if (tmppath != NULL) {
				if (mkstemps(tmppath, strlen(slash)+1) == -1) {
					warn("%s: mkstemps()", path);
					goto failure;
				}

				of = fopen(tmppath, "w");
				chown(tmppath, sb.st_uid, sb.st_gid);
				chmod(tmppath, sb.st_mode & ALLPERMS);
			}
		}

		if (of == NULL)
			if ((of = fopen(path, "w")) == NULL) {
				warn("%s: fopen()", path);
			goto failure;
		}
	}
	count = url->offset;

	/* start the counter */
	stat_start(&xs, path, us.size, count);

	sigalrm = siginfo = sigint = 0;

	/* suck in the data */
	signal(SIGINFO, sig_handler);
	while (!sigint) {
		if (us.size != -1 && us.size - count < B_size &&
		    us.size - count >= 0)
			size = us.size - count;
		else
			size = B_size;
		if (siginfo) {
			stat_end(&xs);
			siginfo = 0;
		}

		if (size == 0)
			break;

		if ((readcnt = fread(buf, 1, size, f)) < size) {
			if (ferror(f) && errno == EINTR && !sigint)
				clearerr(f);
			else if (readcnt == 0)
				break;
		}

		stat_update(&xs, count += readcnt);
		for (ptr = buf; readcnt > 0; ptr += wr, readcnt -= wr)
			if ((wr = fwrite(ptr, 1, readcnt, of)) < readcnt) {
				if (ferror(of) && errno == EINTR && !sigint)
					clearerr(of);
				else
					break;
			}
		if (readcnt != 0)
			break;
	}
	if (!sigalrm)
		sigalrm = ferror(f) && errno == ETIMEDOUT;
	signal(SIGINFO, SIG_DFL);

	stat_end(&xs);

	/*
	 * If the transfer timed out or was interrupted, we still want to
	 * set the mtime in case the file is not removed (-r or -R) and
	 * the user later restarts the transfer.
	 */
 signal:
	/* set mtime of local file */
	if (!n_flag && us.mtime && !o_stdout && of != NULL &&
	    (stat(path, &sb) != -1) && sb.st_mode & S_IFREG) {
		struct timeval tv[2];

		fflush(of);
		tv[0].tv_sec = (long)(us.atime ? us.atime : us.mtime);
		tv[1].tv_sec = (long)us.mtime;
		tv[0].tv_usec = tv[1].tv_usec = 0;
		if (utimes(tmppath ? tmppath : path, tv))
			warn("%s: utimes()", tmppath ? tmppath : path);
	}

	/* timed out or interrupted? */
	if (sigalrm)
		warnx("transfer timed out");
	if (sigint) {
		warnx("transfer interrupted");
		goto failure;
	}

	/* timeout / interrupt before connection completley established? */
	if (f == NULL)
		goto failure;

	if (!sigalrm) {
		/* check the status of our files */
		if (ferror(f))
			warn("%s", URL);
		if (ferror(of))
			warn("%s", path);
		if (ferror(f) || ferror(of))
			goto failure;
	}

	/* did the transfer complete normally? */
	if (us.size != -1 && count < us.size) {
		warnx("%s appears to be truncated: %jd/%jd bytes",
		    path, (intmax_t)count, (intmax_t)us.size);
		goto failure_keep;
	}

	/*
	 * If the transfer timed out and we didn't know how much to
	 * expect, assume the worst (i.e. we didn't get all of it)
	 */
	if (sigalrm && us.size == -1) {
		warnx("%s may be truncated", path);
		goto failure_keep;
	}

 success:
	r = 0;
	if (tmppath != NULL && rename(tmppath, path) == -1) {
		warn("%s: rename()", path);
		goto failure_keep;
	}
	goto done;
 failure:
	if (of && of != stdout && !R_flag && !r_flag)
		if (stat(path, &sb) != -1 && (sb.st_mode & S_IFREG))
			unlink(tmppath ? tmppath : path);
	if (R_flag && tmppath != NULL && sb.st_size == -1)
		rename(tmppath, path); /* ignore errors here */
 failure_keep:
	r = -1;
	goto done;
 done:
	if (f)
		fclose(f);
	if (of && of != stdout)
		fclose(of);
	if (url)
		fetchFreeURL(url);
	if (tmppath != NULL)
		free(tmppath);
	return (r);
}