Beispiel #1
0
int log_fatal(const char *name, const char *message, ...) {
	va_list va;
	va_start(va, message);
	LogLogVA(ZLOG_FATAL, name, message, va);
	va_end(va);
	va_start(va, message);
	vsyslog(LOG_MAKEPRI(LOG_USER, LOG_CRIT), message, va);
	va_end(va);

	exit(EXIT_FAILURE);
}
Beispiel #2
0
int log_error(const char *name, const char *message, ...) {
	va_list va;
	va_start(va, message);
	int ret = LogLogVA(ZLOG_ERROR, name, message, va);
	va_end(va);

	va_start(va, message);
	vsyslog(LOG_MAKEPRI(LOG_USER, LOG_ERR), message, va);
	va_end(va);

	return ret;
}
Beispiel #3
0
void fg_debug(const char *fmt, ...)
{
    va_list argp;
    va_start(argp, fmt);
#ifdef DEBUG
    vfprintf(stdout, fmt, argp);
    printf("\n");
#else
    vsyslog(LOG_MAKEPRI(LOG_USER, LOG_DEBUG), fmt, argp);
#endif
    va_end(argp);
}
Beispiel #4
0
void fg_debug_error(const char *fmt, ...)
{
    va_list argp;
    va_start(argp, fmt);
#ifdef DEBUG
    vfprintf(stderr, fmt, argp);
    fprintf(stderr, ": %s\n", strerror(errno));
#else
    vsyslog(LOG_MAKEPRI(LOG_USER, LOG_ERR), fmt, argp);
#endif
    va_end(argp);
}
Beispiel #5
0
int main(int argc, char * argv[])
{
  struct namenode_state state;
  uint16_t port;
  int res;

  if (argc < 3)
  {
    fprintf(stderr, "need namenode host & namenode port as first two arguments\n");
    return 1;
  }

  port = atoi(argv[2]);
  memset(&state, 0, sizeof(state));
  state.clientname = "hadoop_fuse";
  pthread_mutex_init(&state.connection.mutex, NULL);

  res = hadoop_rpc_connect_namenode(&state, argv[1], port);
  if(res < 0)
  {
    fprintf(stderr, "connection to hdfs://%s:%d failed: %s\n", argv[1], port, strerror(-res));
    return 1;
  }

#ifndef NDEBUG
  openlog (state.clientname, LOG_CONS | LOG_PID | LOG_NDELAY | LOG_PERROR, LOG_USER);
  signal(SIGSEGV, dump_trace);
  syslog(
    LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
    "connected to hdfs://%s:%d, defaults: packetsize=%u, blocksize=%llu, replication=%u, bytesperchecksum=%u, checksumtype=%u",
    argv[1],
    port,
    state.packetsize,
    state.blocksize,
    state.replication,
    state.bytesperchecksum,
    state.checksumtype);
#endif

  for(int i = 3; i < argc; ++i)
  {
    argv[i - 2] = argv[i];
  }
  res = fuse_main(argc - 2, argv, &hello_oper, &state);

#ifndef NDEBUG
  closelog();
#endif

  return res;
}
Beispiel #6
0
// helper for fatal errors
void bomb(int ecode, const char *msg)
{
  char fmsg[255];  // formatted msg
  snprintf(fmsg, sizeof(fmsg), "ERROR: %s", msg);

  // print to stderr if we haven't daemonized
  if (0 == daemonized)
    fprintf(stderr, "%s\n", fmsg);

  // log to syslog
  syslog(LOG_MAKEPRI(LOG_DAEMON, LOG_NOTICE), fmsg);

  exit(ecode);
}
Beispiel #7
0
int print_trace(int mode, const char * fmt, ...) {
    va_list ap;

    if (log_file == NULL) return 0;
    if (mode != LOG_ALWAYS && (log_mode & mode) == 0) return 0;

    va_start(ap, fmt);
    if (is_daemon()) {
#if defined(WIN32)
#elif defined(_WRS_KERNEL)
#elif defined(__SYMBIAN32__)
#else
        vsyslog(LOG_MAKEPRI(LOG_DAEMON, LOG_INFO), fmt, ap);
#endif
    }
    else {
        struct timespec timenow;

        if (clock_gettime(CLOCK_REALTIME, &timenow)) {
            perror("clock_gettime");
            exit(1);
        }

        if ((errno = pthread_mutex_lock(&mutex)) != 0) {
            perror("pthread_mutex_lock");
            exit(1);
        }

        fprintf(log_file, "TCF %02d:%02d.%03d: ",
            (int)(timenow.tv_sec / 60 % 60),
            (int)(timenow.tv_sec % 60),
            (int)(timenow.tv_nsec / 1000000));
        vfprintf(log_file, fmt, ap);
        fprintf(log_file, "\n");
        fflush(log_file);

        if ((errno = pthread_mutex_unlock(&mutex)) != 0) {
            perror("pthread_mutex_unlock");
            exit(1);
        }
    }
    va_end(ap);
    return 1;
}
Beispiel #8
0
EXPORT BOOL OSAL_EventLogger_GenericLog(int loglevel, int eventid, ...)
{
#ifndef _WIMAX_SDK_
	char logmsg[MAX_EVENTLOG_STRING] = "\0";

	va_list marker;
	va_start(marker, eventid);

	switch (eventid) {
	case MSG_APPSRV_EVENT:
		{
			vsprintf(logmsg, LOGMSG_APPSRV_EVENT, marker);
			break;
		}
	case MSG_VERSION_ERROR:
		{
			vsprintf(logmsg, LOGMSG_VERSION_ERROR, marker);
			break;
		}
	case MSG_BAD_COMMAND:
		{
			sprintf(logmsg, LOGMSG_BAD_COMMAND);
			break;
		}
	default:
		{
			sprintf(logmsg, LOGMSG_UNKNOWN_LOG);
		}
	}
	va_end(marker);

	// log the message
#if 1
	syslog( LOG_MAKEPRI(LOG_USER, LOG_INFO), logmsg);
#else
	syslog(loglevel, logmsg);
#endif 


#endif

	return TRUE;	
}
Beispiel #9
0
int log_info(const char *name, const char *message, ...) {
	va_list va;
	va_start(va, message);
	int ret = LogLogVA(ZLOG_INFO, name, message, va);
	va_end(va);

	char *prefixed = xmalloc(strlen(name) + strlen(message) + 3);
	strcpy(prefixed, name);
	strcat(prefixed, ": ");
	strcat(prefixed, message);

	va_start(va, message);
	vsyslog(LOG_MAKEPRI(LOG_USER, LOG_INFO), prefixed, va);
	va_end(va);

	free(prefixed);

	return ret;
}
Beispiel #10
0
void main(void)
{
	LogEntry loginfo;
	int status;
	char buf[200];

	/*
	 * This call is necessary to initialize target communications
	 * beween the DeviceMate and the target processor.
	 */
	targetproc_init();

	/*
	 * Initialize the TCP/IP stack and the web server.
	 */
	sock_init();
	http_init();

	/*
	 * The following improves interactive performance of the web server.
	 */
	tcp_reserveport(80);

	/*
	 * Log an initial entry.
	 */
#define LOG_TEST_STRING "~~~{ Started test run. }~~~"
	status = log_put(LOG_MAKEPRI(2,LOG_INFO), 0, LOG_TEST_STRING, strlen(LOG_TEST_STRING));
	if (status != 0) {
		printf("Failed to add 1st message: %d\n", status);
	}
	
	/*
	 * Drive the target communications and the web server continuously.
	 * This is all that is necessary as the main part of the program.
	 */
	for (;;) {
		targetproc_tick();
		http_handler();
	}
}
Beispiel #11
0
static
int hadoop_fuse_lock(
  const char * src,
  Hadoop__Hdfs__ExtendedBlockProto ** last)
{
  int res;
  Hadoop__Hdfs__AppendRequestProto appendrequest = HADOOP__HDFS__APPEND_REQUEST_PROTO__INIT;
  Hadoop__Hdfs__AppendResponseProto * appendresponse = NULL;

  // hadoop semantics means we must "append" to a file, even if we're
  // going to write in the middle of it. This makes us take the lease.

  appendrequest.src = (char *) src;
  appendrequest.clientname = hadoop_fuse_client_name();

  res = CALL_NN("append", appendrequest, appendresponse);
  if(res < 0)
  {
    goto end;
  }

  if(appendresponse->block)
  {
    hadoop_fuse_clone_block(appendresponse->block->b, last);
  }

  res = 0;
  hadoop__hdfs__append_response_proto__free_unpacked(appendresponse, NULL);

end:

#ifndef NDEBUG
  syslog(
    LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
    "hadoop_fuse_lock %s => %d",
    src,
    res);
#endif

  return res;
}
Beispiel #12
0
void
log_info(const char *fmt, ...)
{
    va_list ap;
    va_start(ap, fmt);
    if (rpcapd_opt.use_syslog) {
#ifdef WIN32
        char msg[1024];
        char *pmsg = msg;
        vsnprintf(msg, sizeof(msg), fmt, ap);
        ReportEvent(event_source, EVENTLOG_INFORMATION_TYPE, 0, MSG_INFO, NULL,
                    1, 0, &pmsg, NULL);
#else
        vsyslog(LOG_MAKEPRI(LOG_DAEMON, LOG_INFO), fmt, ap);
#endif
    }
    else {
        vfprintf(stderr, fmt, ap);
        fprintf(stderr, "\n");
    }
    va_end(ap);
}
Beispiel #13
0
static
void unpack_filestatus(Hadoop__Hdfs__HdfsFileStatusProto * fs, struct stat * stbuf)
{
  assert(fs);
  assert(stbuf);

  stbuf->st_size = fs->length;
  switch(fs->filetype)
  {
  case HADOOP__HDFS__HDFS_FILE_STATUS_PROTO__FILE_TYPE__IS_DIR:
  {
    stbuf->st_mode = S_IFDIR;
    break;
  }
  case HADOOP__HDFS__HDFS_FILE_STATUS_PROTO__FILE_TYPE__IS_FILE:
  {
    stbuf->st_mode = S_IFREG;
    break;
  }
  case HADOOP__HDFS__HDFS_FILE_STATUS_PROTO__FILE_TYPE__IS_SYMLINK:
  {
    stbuf->st_mode = S_IFLNK;
    break;
  }
  }
  if(fs->permission)
  {
    stbuf->st_mode |= fs->permission->perm;
  }
  if(fs->has_blocksize)
  {
    stbuf->st_blksize = fs->blocksize;
  }
  else
  {
    stbuf->st_blksize = hadoop_fuse_namenode_state()->blocksize;
  }
  stbuf->st_mtime = fs->modification_time / 1000;
  stbuf->st_atime = fs->access_time / 1000;

  if(fs->owner)
  {
    struct passwd * o = getpwnam(fs->owner);
    if(o)
    {
      stbuf->st_uid = o->pw_uid;
    }
  }
  if(fs->group)
  {
    struct group * g = getgrnam(fs->group);
    if(g)
    {
      stbuf->st_gid = g->gr_gid;
    }
  }

#ifndef NDEBUG
  syslog(
    LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
    "unpack_filestatus, length=%llu blocksize=%zd",
    stbuf->st_size,
    stbuf->st_blksize);
#endif
}
Beispiel #14
0
/**
 * Assumes file is locked, and offsetintofile is in the (under construction)
 * last block of the file. offsetintofile must also be contiguous, i.e. at most
 * current file length + 1
 */
static
int hadoop_fuse_do_write(
  const char * src,
  struct Hadoop_Fuse_Buffer_Pos * bufferpos,
  const uint64_t offsetintofile,
  const struct Hadoop_Fuse_FileHandle * fh,
  const Hadoop__Hdfs__LocatedBlockProto * lastlocation,
  Hadoop__Hdfs__ExtendedBlockProto ** last)
{
  int res = 0;
  Hadoop__Hdfs__ChecksumProto checksum = HADOOP__HDFS__CHECKSUM_PROTO__INIT;

  assert(fh);
  assert(fh->blocksize);
  assert(!lastlocation || lastlocation->offset <= offsetintofile);
  assert(!lastlocation || lastlocation->b->has_numbytes); // not sure where to get this from otherwise
  assert(bufferpos);
  assert(bufferpos->bufferoffset == 0);

  // set up the checksum algorithm we'll use to transfer the data
  checksum.type = hadoop_fuse_namenode_state()->checksumtype;
  checksum.bytesperchecksum = hadoop_fuse_namenode_state()->bytesperchecksum;

  if(lastlocation && offsetintofile < lastlocation->offset + fh->blocksize && offsetintofile >= lastlocation->offset)
  {
    Hadoop__Hdfs__UpdateBlockForPipelineRequestProto updateblockrequest = HADOOP__HDFS__UPDATE_BLOCK_FOR_PIPELINE_REQUEST_PROTO__INIT;
    Hadoop__Hdfs__UpdateBlockForPipelineResponseProto * updateblockresponse = NULL;
    Hadoop__Hdfs__UpdatePipelineRequestProto updaterequest = HADOOP__HDFS__UPDATE_PIPELINE_REQUEST_PROTO__INIT;
    Hadoop__Hdfs__UpdatePipelineResponseProto * updateresponse = NULL;
    Hadoop__Hdfs__DatanodeIDProto ** newnodes = NULL;
    uint64_t blockoffset = offsetintofile - lastlocation->offset;
    uint64_t len = hadoop_fuse_bytestowrite(bufferpos, blockoffset, fh);

    updateblockrequest.clientname = hadoop_fuse_client_name();
    updateblockrequest.block = lastlocation->b;

    // since we're updating a block, we need to get a new "generation stamp" (version)
    // from the NN.
    res = CALL_NN("updateBlockForPipeline", updateblockrequest, updateblockresponse);
    if(res < 0)
    {
      goto endwrite;
    }

    res = hadoop_fuse_write_block(
      bufferpos,
      len,
      blockoffset,
      &checksum,
      lastlocation,
      updateblockresponse->block);
    if(res < 0)
    {
      goto endwrite;
    }

    // tell the NN we've updated the block

    newnodes = alloca(lastlocation->n_locs * sizeof(Hadoop__Hdfs__DatanodeIDProto *));
    for(size_t n = 0; n < lastlocation->n_locs; ++n)
    {
      newnodes[n] = lastlocation->locs[n]->id;
    }

    updaterequest.clientname = hadoop_fuse_client_name();
    updaterequest.oldblock = lastlocation->b;
    updaterequest.newblock = updateblockresponse->block->b;
    updaterequest.n_newnodes = lastlocation->n_locs;
    updaterequest.newnodes = newnodes;
    updaterequest.n_storageids = lastlocation->n_storageids;
    updaterequest.storageids = lastlocation->storageids;

    res = CALL_NN("updatePipeline", updaterequest, updateresponse);
    if(res < 0)
    {
      goto endwrite;
    }

    hadoop_fuse_clone_block(updateblockresponse->block->b, last);

endwrite:
#ifndef NDEBUG
    syslog(
      LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
      "hadoop_fuse_do_write wrote new version %llu for block %s blk_%llu_%llu on %zd node(s) writing %llu bytes to block offset %llu, file offset %llu, block is %llu-%llu => %d",
      updateblockresponse->block->b->generationstamp,
      lastlocation->b->poolid,
      lastlocation->b->blockid,
      lastlocation->b->generationstamp,
      lastlocation->n_locs,
      len,
      offsetintofile - lastlocation->offset,
      offsetintofile,
      lastlocation->offset,
      lastlocation->offset + fh->blocksize,
      res);
#endif
    if(updateblockresponse)
    {
      hadoop__hdfs__update_block_for_pipeline_response_proto__free_unpacked(updateblockresponse, NULL);
    }
    if(updateresponse)
    {
      hadoop__hdfs__update_pipeline_response_proto__free_unpacked(updateresponse, NULL);
    }
    if(res < 0)
    {
      return res;   // stop writing blocks
    }
  }

  // TODO: fill nulls if offset after last byte.

  // (4) if we've still got some more to write, keep tacking on blocks
  //     and filling them
  while(bufferpos->bufferoffset < bufferpos->len && res >= 0)
  {
    Hadoop__Hdfs__AddBlockRequestProto block_request = HADOOP__HDFS__ADD_BLOCK_REQUEST_PROTO__INIT;
    Hadoop__Hdfs__AddBlockResponseProto * block_response = NULL;
    uint64_t len =  hadoop_fuse_bytestowrite(bufferpos, 0, fh);

    block_request.src = (char *) src;
    block_request.clientname = hadoop_fuse_client_name();
    block_request.previous = *last;
    block_request.has_fileid = true;
    block_request.fileid = fh->fileid;
    block_request.n_excludenodes = 0;
    block_request.n_favorednodes = 0;

    res = CALL_NN("addBlock", block_request, block_response);
    if(res < 0)
    {
      return res;
    }

    res = hadoop_fuse_write_block(
      bufferpos,
      len,
      0, // block offset
      &checksum,
      block_response->block,
      block_response->block);
    if(res < 0)
    {
      // we failed to write data into our new block. We'll try and keep HDFS
      // consisitent, so we'll tell the NN to throw away this failed block.
      // This may well work, as "hadoop_fuse_write_block" only talks to a DN, so
      // even if that fails there's every chance the NN will still be responsive.
      Hadoop__Hdfs__AbandonBlockRequestProto abandon_request = HADOOP__HDFS__ABANDON_BLOCK_REQUEST_PROTO__INIT;
      Hadoop__Hdfs__AbandonBlockResponseProto * abandon_response = NULL;

      abandon_request.src = (char *) src;
      abandon_request.holder = hadoop_fuse_client_name();
      abandon_request.b = block_response->block->b;

      CALL_NN("abandonBlock", abandon_request, abandon_response); // ignore return value.
      if(abandon_response)
      {
        hadoop__hdfs__abandon_block_response_proto__free_unpacked(abandon_response, NULL);
      }
    }
    else
    {
#ifndef NDEBUG
      syslog(
        LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
        "hadoop_fuse_do_write added new block %s blk_%llu_%llu on %zd node(s) after %s blk_%llu_%llu & now written %zu bytes => %d",
        block_response->block->b->poolid,
        block_response->block->b->blockid,
        block_response->block->b->generationstamp,
        block_response->block->n_locs,
        *last ? (*last)->poolid : "",
        *last ? (*last)->blockid : 0,
        *last ? (*last)->generationstamp : 0,
        bufferpos->bufferoffset,
        res);
#endif

      hadoop_fuse_clone_block(block_response->block->b, last);
    }

    hadoop__hdfs__add_block_response_proto__free_unpacked(block_response, NULL);
  }

  return res;
}
Beispiel #15
0
static
int hadoop_fuse_write_block(
  struct Hadoop_Fuse_Buffer_Pos * bufferpos,
  const uint64_t len, // bytes to write
  const off_t blockoffset, // offset into block
  const Hadoop__Hdfs__ChecksumProto * checksum,
  const Hadoop__Hdfs__LocatedBlockProto * block,
  Hadoop__Hdfs__LocatedBlockProto * newblock)
{
  int res;
  Hadoop__Hdfs__ClientOperationHeaderProto clientheader = HADOOP__HDFS__CLIENT_OPERATION_HEADER_PROTO__INIT;
  Hadoop__Hdfs__BaseHeaderProto baseheader = HADOOP__HDFS__BASE_HEADER_PROTO__INIT;
  Hadoop__Hdfs__OpWriteBlockProto oprequest = HADOOP__HDFS__OP_WRITE_BLOCK_PROTO__INIT;
  Hadoop__Hdfs__BlockOpResponseProto * opresponse = NULL;
  bool written = false;
  uint64_t newblocklen = max(newblock->b->has_numbytes ? newblock->b->numbytes : 0, blockoffset + len);

  assert(bufferpos->len > 0);
  assert(bufferpos->bufferoffset >= 0);
  assert(block->n_locs > 0);

  clientheader.clientname = hadoop_fuse_client_name();
  baseheader.block = block->b;
  baseheader.token = newblock->blocktoken;
  clientheader.baseheader = &baseheader;
  oprequest.header = &clientheader;
  oprequest.pipelinesize = block->n_locs; // not actually used?
  if(newblock->b->generationstamp == block->b->generationstamp)
  {
    oprequest.stage = HADOOP__HDFS__OP_WRITE_BLOCK_PROTO__BLOCK_CONSTRUCTION_STAGE__PIPELINE_SETUP_CREATE;
  }
  else
  {
    oprequest.stage = HADOOP__HDFS__OP_WRITE_BLOCK_PROTO__BLOCK_CONSTRUCTION_STAGE__PIPELINE_SETUP_APPEND;
  }
  oprequest.latestgenerationstamp = newblock->b->generationstamp;
  oprequest.minbytesrcvd = newblock->b->numbytes;
  oprequest.maxbytesrcvd = newblocklen;
  oprequest.requestedchecksum = (Hadoop__Hdfs__ChecksumProto *) checksum;

  // targets are the other DNs in the pipeline that the DN we send our block
  // to will mirror the block to. Clearly this can't include the DN we're sending
  // to, as that would create a loop!
  oprequest.n_targets = block->n_locs - 1;
  oprequest.targets = alloca(oprequest.n_targets * sizeof(Hadoop__Hdfs__DatanodeInfoProto *));

  // for now, don't care about which location we send to, but we should probably
  // choose the "closest".
  for (size_t l = 0; l < block->n_locs && !written; ++l)
  {
    const Hadoop__Hdfs__DatanodeInfoProto * location = block->locs[l];
    struct connection_state dn_state;

    // build the target list without this location
    for(uint32_t t_idx = 0, l_idx = 0; l_idx < block->n_locs; ++l_idx)
    {
      if(l_idx != l)
      {
        oprequest.targets[t_idx++] = block->locs[l_idx];
      }
    }

    memset(&dn_state, 0, sizeof(dn_state));
    res = hadoop_rpc_connect_datanode(&dn_state, location->id->ipaddr, location->id->xferport);
    if(res < 0)
    {
      continue;
    }

    res = hadoop_rpc_call_datanode(&dn_state, 80, (const ProtobufCMessage *) &oprequest, &opresponse);
    if(res == 0)
    {
      hadoop__hdfs__block_op_response_proto__free_unpacked(opresponse, NULL);
      res = hadoop_rpc_send_packets(
        &dn_state,
        bufferpos,
        len,
        blockoffset,
        hadoop_fuse_namenode_state()->packetsize,
        checksum);
      written = res == 0;
    }
    hadoop_rpc_disconnect(&dn_state);

#ifndef NDEBUG
    syslog(
      LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
      "hadoop_fuse_write_block, %s %llu bytes to block offset %llu of block %s blk_%llu_%llu (was: %llu, now: %llu) to DN %s:%d (%zd of %zd) => %d",
      (written ? "written" : "NOT written"),
      len,
      blockoffset,
      block->b->poolid,
      block->b->blockid,
      newblock->b->generationstamp,
      newblock->b->numbytes,
      newblocklen,
      location->id->ipaddr,
      location->id->xferport,
      l + 1,
      block->n_locs,
      res);
#endif
  }

  if(written)
  {
    newblock->b->has_numbytes = true;
    newblock->b->numbytes = newblocklen;
    return len;
  }
  else
  {
    // ensure we return a sensible error code
    return res < 0 ? res : -EIO;
  }
}
Beispiel #16
0
static
int hadoop_fuse_complete(const char * src, uint64_t fileid, Hadoop__Hdfs__ExtendedBlockProto * last)
{
  int res;
  Hadoop__Hdfs__CompleteRequestProto request = HADOOP__HDFS__COMPLETE_REQUEST_PROTO__INIT;
  Hadoop__Hdfs__CompleteResponseProto * response = NULL;

  request.src = (char *) src;
  request.clientname = hadoop_fuse_client_name();
  request.has_fileid = fileid > 0;
  request.fileid = fileid;
  request.last = last;

  assert(src);

  while(true)
  {
    bool complete;

    res = CALL_NN("complete", request, response);
    if(res < 0)
    {
      return res;
    }
    complete = response->result;
    hadoop__hdfs__complete_response_proto__free_unpacked(response, NULL);
    if(complete)
    {
      break;
    }
    else
    {
      sleep(1);
    }
  }

#ifndef NDEBUG
  if(last)
  {
    syslog(
      LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
      "hadoop_fuse_complete %s (fileid %llu) last block %s blk_%llu_%llu (length = %llu) => %d",
      src,
      fileid,
      last->poolid,
      last->blockid,
      last->generationstamp,
      last->numbytes,
      res);
  }
  else
  {
    syslog(
      LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
      "hadoop_fuse_complete %s (fileid %llu) with NO last block => %d",
      src,
      fileid,
      res);
  }
#endif

  return res;
}
Beispiel #17
0
int main()
{
	LogEntry loginfo;
	int status;
	char buf[200];


#ifdef LOG_USE_FS2
	#define DEST_IN_USE		LOG_DEST_FS2
	printf("Initializing file system.  Please wait...\n");
	status = fs_init(0,0);
	if (status) {
		printf("Could not initialize filesystem, error number %d\n", errno);
		exit(2);
	}
	printf("...done.  Using LX#%d for logging\n", (int)LOG_FS2_DATALX(0));
#else
	#define DEST_IN_USE		LOG_DEST_XMEM
#endif

#ifdef RESET_ALL_LOGS
	log_open(LOG_DEST_ALL, 1);
#endif

	targetproc_init();

reprint:

#ifdef READ_BACKWARDS
	if (!log_seek(DEST_IN_USE, 1)) {
		printf("Scanning previous log entries, most recent first...\n");
		for (;;) {
			if (log_prev(DEST_IN_USE, &loginfo) < 0)
				break;
			printf("%s\n", log_format(&loginfo, buf, sizeof(buf), 1));
		}
		printf("End of messages.\n");
	}
#else
	if (!log_seek(DEST_IN_USE, 0)) {
		printf("Scanning previous log entries, oldest first...\n");
		for (;;) {
			if (log_next(DEST_IN_USE, &loginfo) < 0)
				break;
			printf("%s\n", log_format(&loginfo, buf, sizeof(buf), 1));
		}
		printf("End of messages.\n");
	}
#endif

#define LOG_TEST_STRING "~~~{ Started test run. }~~~"
	status = log_put(LOG_MAKEPRI(2,LOG_INFO), 0, LOG_TEST_STRING, strlen(LOG_TEST_STRING));
	if (status != 0)
		printf("Failed to add 1st message: %d\n", status);

	for (;;) {
		targetproc_tick();
		if (kbhit()) {
			gets(buf);	// ignore it
			goto reprint;
		}
	}

}
Beispiel #18
0
static
int hadoop_fuse_ftruncate(const char * path, off_t offset, struct fuse_file_info * fi)
{
  int res;
  Hadoop__Hdfs__GetBlockLocationsRequestProto request = HADOOP__HDFS__GET_BLOCK_LOCATIONS_REQUEST_PROTO__INIT;
  Hadoop__Hdfs__GetBlockLocationsResponseProto * response = NULL;
  Hadoop__Hdfs__ExtendedBlockProto * last = NULL;
  struct Hadoop_Fuse_FileHandle * fh = (struct Hadoop_Fuse_FileHandle *) fi->fh;
  uint64_t oldlength;
  uint64_t newlength = offset;
  uint32_t n_blocks;

  assert(offset >= 0);

  request.src = (char *) path;
  request.offset = 0;
  request.length = 0x7FFFFFFFFFFFFFFF; // not really a uint64_t due to Java
  res = CALL_NN("getBlockLocations", request, response);
  if(res < 0)
  {
    return res;
  }
  if(!response->locations)
  {
    res = -ENOENT;
    goto end;
  }
  oldlength = response->locations->filelength;
  n_blocks = response->locations ? response->locations->n_blocks : 0;

  if(newlength > oldlength)
  {
    // extend the current file by appending NULLs

#ifndef NDEBUG
    syslog(
      LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
      "hadoop_fuse_ftruncate, writing %llu bytes to %s to ftruncate size up to %llu (from %llu)",
      newlength - oldlength,
      path,
      newlength,
      oldlength);
#endif

    res = hadoop_fuse_write(
      path,
      NULL, // append \0s
      newlength - oldlength,
      oldlength,
      fi);
    if(res < 0)
    {
      goto end;
    }
  }
  else if(newlength == oldlength)
  {
#ifndef NDEBUG
    syslog(
      LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
      "hadoop_fuse_ftruncate making no changes to %s (size %llu)",
      path,
      oldlength);
#endif
    goto end;
  }
  else
  {
    struct Hadoop_Fuse_Buffer byteskept = {
      .data = NULL,
      .len = 0
    };
    uint64_t bytesoffset = 0;
    uint32_t lastkeptblock = n_blocks;

    for(uint32_t b = 0; b < n_blocks; ++b)
    {
      Hadoop__Hdfs__LocatedBlockProto * block = response->locations->blocks[b];
      uint64_t blockstart = block->offset;
      uint64_t blockend = blockstart + block->b->numbytes;

      if(blockend <= newlength)
      {
        // we need the whole of this block...
        lastkeptblock = b;
        continue;
      }

      if(blockstart < newlength)
      {
        // we need some bits of this block
        byteskept.len = newlength - blockstart;

        // we need a bit of this block (this block of code should
        // only be executed for one of the blocks in the file). However,
        // HDFS blocks are append-only so we have to get the data we want
        // to preserve, abandon the old (longer) block, add a new (shorter)
        // block and put the data back.
        byteskept.data = malloc(byteskept.len);
        if(!byteskept.data)
        {
          res = -ENOMEM;
          goto end;
        }

        res = hadoop_fuse_read(path, byteskept.data, byteskept.len, block->offset, fi);
        if(res < 0)
        {
          goto end;
        }
        bytesoffset = blockstart; // where to put them back
      }

      break;
    }

    if(lastkeptblock == n_blocks)
    {
      // we're dropping all of them
      Hadoop__Hdfs__GetFileInfoRequestProto filerequest = HADOOP__HDFS__GET_FILE_INFO_REQUEST_PROTO__INIT;
      Hadoop__Hdfs__GetFileInfoResponseProto * fileresponse = NULL;
      Hadoop__Hdfs__CreateRequestProto overwriterequest = HADOOP__HDFS__CREATE_REQUEST_PROTO__INIT;
      Hadoop__Hdfs__CreateResponseProto * overwriteresponse = NULL;

      filerequest.src = (char *) path;
      res = CALL_NN("getFileInfo", filerequest, fileresponse);
      if(res < 0)
      {
        return res;
      }

      overwriterequest.src = (char *) path;
      overwriterequest.clientname = hadoop_fuse_client_name();
      overwriterequest.createparent = false;
      overwriterequest.masked = fileresponse->fs->permission;
      overwriterequest.createflag = HADOOP__HDFS__CREATE_FLAG_PROTO__OVERWRITE; // must already exist
      overwriterequest.replication = fileresponse->fs->block_replication;
      overwriterequest.blocksize = fileresponse->fs->blocksize;

#ifndef NDEBUG
      syslog(
        LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
        "hadoop_fuse_ftruncate, overwriting %s",
        path);
#endif

      res = CALL_NN("create", overwriterequest, overwriteresponse);
      hadoop__hdfs__get_file_info_response_proto__free_unpacked(fileresponse, NULL);
      if(res < 0)
      {
        return res;
      }
      if(overwriteresponse->fs->has_fileid)
      {
        fh->fileid = overwriteresponse->fs->fileid;
      }
      hadoop__hdfs__create_response_proto__free_unpacked(overwriteresponse, NULL);

      hadoop_fuse_clone_block(NULL, &last);
    }
    else
    {
      // put the abandon block messages here as we may make more than one call
      Hadoop__Hdfs__AbandonBlockRequestProto abandon_request = HADOOP__HDFS__ABANDON_BLOCK_REQUEST_PROTO__INIT;
      Hadoop__Hdfs__AbandonBlockResponseProto * abandon_response = NULL;

      abandon_request.src = (char *) path;
      abandon_request.holder = hadoop_fuse_client_name();

      // we need to lock the file to drop blocks, and we'll always drop
      // at least one block as the new size is strictly less than the old
      // size.
      res = hadoop_fuse_lock(path, &last);
      if(res < 0)
      {
        goto end;
      }

      for(uint32_t b = n_blocks - 1; b > lastkeptblock; --b)
      {
        Hadoop__Hdfs__LocatedBlockProto * block = response->locations->blocks[b];

        abandon_request.b = block->b;

#ifndef NDEBUG
        syslog(
          LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
          "hadoop_fuse_ftruncate, dropping block %s blk_%llu_%llu of %s as start %llu + len %llu >= new len %llu (keeping %zd bytes)",
          block->b->poolid,
          block->b->blockid,
          block->b->generationstamp,
          path,
          block->offset,
          block->b->numbytes,
          newlength,
          byteskept.len);
#endif

        res = CALL_NN("abandonBlock", abandon_request, abandon_response);
        if(res < 0)
        {
          goto end;
        }
        hadoop__hdfs__abandon_block_response_proto__free_unpacked(abandon_response, NULL);
      }

      hadoop_fuse_clone_block(response->locations->blocks[lastkeptblock]->b, &last);
    }

    if(byteskept.data)
    {
      struct Hadoop_Fuse_Buffer_Pos bufferpos = {
        .buffers = &byteskept,
        .n_buffers = 1,
        .bufferoffset = 0,
        .len = byteskept.len
      };
      res = hadoop_fuse_do_write(
        path,
        &bufferpos,
        bytesoffset,
        (struct Hadoop_Fuse_FileHandle *) fi->fh,
        NULL, // we don't care about the old locations as we're appending a new block.
        &last);
      free(byteskept.data);
      if(res < 0)
      {
        goto end;
      }
    }

    res = hadoop_fuse_complete(path, fh->fileid, last);
    if(res < 0)
    {
      goto end;
    }
  }

  res = 0;

end:
  hadoop__hdfs__get_block_locations_response_proto__free_unpacked(response, NULL);
  if(last)
  {
    hadoop__hdfs__extended_block_proto__free_unpacked(last, NULL);
  }
  return res;
}

static
int hadoop_fuse_truncate(const char * path, off_t offset)
{
  return hadoop_fuse_ftruncate(path, offset, NULL);
}

enum Hadoop_Fuse_Write_Buffers {
  TRUNCATE = 0,
  NULLPADDING = 1,
  THEDATA = 2,
  TRAILINGDATA = 3
};
#define Hadoop_Fuse_Write_Buffers_Count 4

static
int hadoop_fuse_write(
  const char * src,
  const char * const data,
  const size_t len,
  const off_t offset,
  struct fuse_file_info * fi)
{
  int res;
  Hadoop__Hdfs__GetBlockLocationsRequestProto request = HADOOP__HDFS__GET_BLOCK_LOCATIONS_REQUEST_PROTO__INIT;
  Hadoop__Hdfs__GetBlockLocationsResponseProto * response = NULL;
  Hadoop__Hdfs__ExtendedBlockProto * last = NULL;
  struct Hadoop_Fuse_FileHandle * fh = (struct Hadoop_Fuse_FileHandle *) fi->fh;
  uint64_t offsetintofile = offset;
  uint64_t lastblockoffset;
  uint32_t n_blocks;

  // at most 3: truncated bytes, the data itself and extract bytes.
  struct Hadoop_Fuse_Buffer buffers[Hadoop_Fuse_Write_Buffers_Count];
  memset(&buffers[0], 0, Hadoop_Fuse_Write_Buffers_Count * sizeof(struct Hadoop_Fuse_Buffer));
  struct Hadoop_Fuse_Buffer_Pos bufferpos = {
    .buffers = &buffers[0],
    .n_buffers = Hadoop_Fuse_Write_Buffers_Count,
    .bufferoffset = 0,
    .len = 0
  };

  request.src = (char *) src;
  request.offset = 0;
  request.length = 0x7FFFFFFFFFFFFFFF; // not really a uint64_t due to Java
  res = CALL_NN("getBlockLocations", request, response);
  if(res < 0)
  {
    return res;
  }
  if(!response->locations)
  {
    res = -ENOENT;
    goto end;
  }
  n_blocks = response->locations->n_blocks;
  lastblockoffset = n_blocks == 0 ? 0 : response->locations->blocks[n_blocks - 1]->offset;

  if(offsetintofile > response->locations->filelength)
  {
    // we should pad the end of the current file with nulls
    buffers[NULLPADDING].len = response->locations->filelength - offsetintofile;
    offsetintofile = response->locations->filelength;
    bufferpos.len += buffers[NULLPADDING].len;
  }

  if(n_blocks > 0 && offsetintofile < lastblockoffset)
  {
    uint64_t truncateto;

    buffers[TRAILINGDATA].len = response->locations->filelength - min(response->locations->filelength, offsetintofile + len);
    bufferpos.len += buffers[TRAILINGDATA].len;

    if(buffers[TRAILINGDATA].len > 0)
    {
#ifndef NDEBUG
      syslog(
        LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
        "hadoop_fuse_write %s stashing %zd bytes to append after write as offset %llu + len %zd < last block offset %llu",
        src,
        buffers[TRAILINGDATA].len,
        offsetintofile,
        len,
        lastblockoffset);
#endif

      buffers[TRAILINGDATA].data = malloc(buffers[TRAILINGDATA].len);
      if(!buffers[TRAILINGDATA].data)
      {
        res = -ENOMEM;
        goto end;
      }
      res = hadoop_fuse_read(
        src,
        buffers[TRAILINGDATA].data,
        buffers[TRAILINGDATA].len,
        offsetintofile + len,
        fi);
      if(res < 0)
      {
        goto end;
      }
    }

    // since we can modify the last block, we only need to throw
    // away blocks (that we've just saved) until we are the last one.
    // However, HDFS semantics mean we can't then overwrite this newly-
    // uncovered last block as it's no longer "under construction". We'll
    // therefore keep the part of that block before our write and drop
    // (and so recreate) the whole thing.

    for(uint32_t b = 0; b < n_blocks; ++b)
    {
      Hadoop__Hdfs__LocatedBlockProto * block = response->locations->blocks[b];
      uint64_t blockend = block->offset + block->b->numbytes;

      if(blockend < offsetintofile)
      {
        truncateto = blockend;
      }
      else
      {
        // we're in the first block we'll truncate
        buffers[TRUNCATE].len = offsetintofile - block->offset;
        bufferpos.len += buffers[TRUNCATE].len;

        if(buffers[TRUNCATE].len > 0)
        {
          buffers[TRUNCATE].data = malloc(buffers[TRUNCATE].len);
          if(!buffers[TRUNCATE].data)
          {
            res = -ENOMEM;
            goto end;
          }
          res = hadoop_fuse_read(
            src,
            buffers[TRUNCATE].data,
            buffers[TRUNCATE].len,
            block->offset,
            fi);
          if(res < 0)
          {
            goto end;
          }
          offsetintofile -= buffers[TRUNCATE].len;
        }

        // keep our response in sync
        n_blocks = b;
        if(b == 0)
        {
          hadoop_fuse_clone_block(NULL, &last);
        }
        else
        {
          hadoop_fuse_clone_block(response->locations->blocks[b - 1]->b, &last);
        }

        break;
      }
    }

#ifndef NDEBUG
    syslog(
      LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
      "hadoop_fuse_write %s truncating down to %llu bytes",
      src,
      truncateto);
#endif
    res = hadoop_fuse_ftruncate(src, truncateto, fi);
    if(res < 0)
    {
      goto end;
    }
  }

  res = hadoop_fuse_lock(src, &last);
  if(res < 0)
  {
    goto end;
  }

  buffers[THEDATA].data = (char *) data;
  buffers[THEDATA].len = len;
  bufferpos.len += len;

  res = hadoop_fuse_do_write(
    src,
    &bufferpos,
    offsetintofile,
    fh,
    n_blocks == 0 ? NULL : response->locations->blocks[n_blocks - 1],
    &last);

  // persist the data we've just written, & release the lease we have on "src"
  {
    int complete = hadoop_fuse_complete(src, fh->fileid, last);
    if(res >= 0 && complete < 0)
    {
      res = complete; // use this error if we don't have another
    }
  }

end:
  free(buffers[TRUNCATE].data);
  free(buffers[TRAILINGDATA].data);
  if(response)
  {
    hadoop__hdfs__get_block_locations_response_proto__free_unpacked(response, NULL);
  }
  if(last)
  {
    // these are clones, so we have to free them
    hadoop__hdfs__extended_block_proto__free_unpacked(last, NULL);
  }
  return res < 0 ? res : len;
}

static
int hadoop_fuse_read(const char * src, char * buf, size_t size, off_t offset, struct fuse_file_info * fi)
{
  (void) fi;
  int res;
  Hadoop__Hdfs__GetBlockLocationsRequestProto request = HADOOP__HDFS__GET_BLOCK_LOCATIONS_REQUEST_PROTO__INIT;
  Hadoop__Hdfs__GetBlockLocationsResponseProto * response = NULL;
  Hadoop__Hdfs__ClientOperationHeaderProto clientheader = HADOOP__HDFS__CLIENT_OPERATION_HEADER_PROTO__INIT;
  Hadoop__Hdfs__BaseHeaderProto baseheader = HADOOP__HDFS__BASE_HEADER_PROTO__INIT;
  Hadoop__Hdfs__OpReadBlockProto op = HADOOP__HDFS__OP_READ_BLOCK_PROTO__INIT;

  request.src = (char *) src;
  request.offset = offset;
  request.length = size;
  res = CALL_NN("getBlockLocations", request, response);
  if(res < 0)
  {
    return res;
  }
  if(!response->locations)
  {
    res = -ENOENT;
    goto end;
  }
  if(response->locations->underconstruction)
  {
    // This means that some (other?) client has the lease on this
    // file. Try again later - maybe they'll have release it?
    res = -EAGAIN;
    goto end;
  }

  clientheader.clientname = hadoop_fuse_client_name();

  for (size_t b = 0; b < response->locations->n_blocks; ++b)
  {
    Hadoop__Hdfs__LocatedBlockProto * block = response->locations->blocks[b];
    Hadoop__Hdfs__BlockOpResponseProto * opresponse = NULL;
    bool read = false;

    if(block->corrupt)
    {
      res = -EBADMSG;
      continue;
    }

    baseheader.block = block->b;
    clientheader.baseheader = &baseheader;
    op.header = &clientheader;
    op.has_sendchecksums = true;
    op.sendchecksums = false;
    op.offset = min(offset - block->offset, 0);     // offset into file -> offset into block
    op.len = min(block->b->numbytes - op.offset, size);

    // for now, don't care about which location we get it from
    for (size_t l = 0; l < block->n_locs && !read; ++l)
    {
      Hadoop__Hdfs__DatanodeInfoProto * location = block->locs[l];
      struct connection_state dn_state;
      uint64_t skipbytes;

      memset(&dn_state, 0, sizeof(dn_state));
      res = hadoop_rpc_connect_datanode(&dn_state, location->id->ipaddr, location->id->xferport);
      if(res < 0)
      {
        continue;
      }

      res = hadoop_rpc_call_datanode(&dn_state, 81, (const ProtobufCMessage *) &op, &opresponse);
      if(res < 0)
      {
        hadoop_rpc_disconnect(&dn_state);
        continue;
      }
      if(opresponse->readopchecksuminfo)
      {
        skipbytes =  op.offset - opresponse->readopchecksuminfo->chunkoffset;
      }
      else
      {
        skipbytes = 0;
      }
      hadoop__hdfs__block_op_response_proto__free_unpacked(opresponse, NULL);

      res = hadoop_rpc_receive_packets(
        &dn_state,
        skipbytes,
        op.len,
        (uint8_t *) buf);
      if(res < 0)
      {
        hadoop_rpc_disconnect(&dn_state);
        continue;
      }

      read = true;
      hadoop_rpc_disconnect(&dn_state);
    }

#ifndef NDEBUG
    syslog(
      LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
      "hadoop_fuse_read, %s %llu (of %llu) bytes at offset %llu of block %s blk_%llu_%llu => %d",
      (read ? "read" : "NOT read"),
      op.len,
      block->b->numbytes,
      op.offset,
      block->b->poolid,
      block->b->blockid,
      block->b->generationstamp,
      res);
#endif

    if(!read)
    {
      res = res < 0 ? res : -EIO; // use existing error as more specific
      goto end;
    }
  }

  // FUSE semantics, return bytes written on success
  res = size;

end:
#ifndef NDEBUG
  syslog(
    LOG_MAKEPRI(LOG_USER, LOG_DEBUG),
    "hadoop_fuse_read, read %zd bytes from %s (offset %zd) => %d",
    size,
    src,
    offset,
    res);
#endif
  hadoop__hdfs__get_block_locations_response_proto__free_unpacked(response, NULL);
  return res;
}

static
void * hadoop_fuse_init(struct fuse_conn_info * conn)
{
  conn->want |= FUSE_CAP_ATOMIC_O_TRUNC;
  conn->capable |= FUSE_CAP_ATOMIC_O_TRUNC;
  return fuse_get_context()->private_data;
}
Beispiel #19
0
inline void LoggerOutputSyslog::Output(const int level, const std::string& msg) {
	syslog(LOG_MAKEPRI(LOG_DAEMON, level), "%s", msg.c_str());
}
Beispiel #20
0
static gpg_error_t
internal_log_write (log_handle_t handle, log_level_t level,
		    const char *fmt, va_list ap)
{
  gpg_error_t err;

  assert (handle->backend != LOG_BACKEND_NONE);

  /* FIXME: shall we do error checking here?  And what if an error
     occurs?  */

  if (level < handle->min_level)
    /* User does not want to receive messages for level smaller than
       min_level. */
    return 0;

  if (handle->backend == LOG_BACKEND_SYSLOG)
    {
      int syslog_priority;

      switch (level)
	{
	case LOG_LEVEL_DEBUG:
	  syslog_priority = LOG_DEBUG;
	  break;

	case LOG_LEVEL_INFO:
	  syslog_priority = LOG_INFO;
	  break;

	case LOG_LEVEL_ERROR:
	  syslog_priority = LOG_ERR;
	  break;

	case LOG_LEVEL_FATAL:
	  syslog_priority = LOG_ALERT;
	  break;

	default:
	  /* FIXME: what to do when the user passes an invalid log
	     level? -mo */
	  syslog_priority = LOG_ERR;
	  break;
	}
	  
      vsyslog (LOG_MAKEPRI (LOG_AUTH, syslog_priority), fmt, ap);
      err = 0;
    }
  else if (handle->backend == LOG_BACKEND_STREAM
	   || handle->backend == LOG_BACKEND_FILE)
    {
      FILE *stream = handle->stream;

      assert (stream);

      if ((handle->flags & LOG_FLAG_WITH_PREFIX) && (*handle->prefix != 0))
	fprintf (stream, "%s ", handle->prefix);

      if (handle->flags & LOG_FLAG_WITH_TIME)
	{
	  struct tm *tp;
	  time_t atime = time (NULL);
          
	  tp = localtime (&atime);
	  fprintf (stream, "%04d-%02d-%02d %02d:%02d:%02d ",
		   1900+tp->tm_year, tp->tm_mon+1, tp->tm_mday,
		   tp->tm_hour, tp->tm_min, tp->tm_sec);
	}

      if (handle->flags & LOG_FLAG_WITH_PID)
	fprintf (stream, "[%u] ", (unsigned int) getpid ());

      switch (level)
	{
	case LOG_LEVEL_ERROR:
	case LOG_LEVEL_FATAL:
	  fprintf (stream, "error: ");
	  break;

	case LOG_LEVEL_DEBUG:
	  fprintf (stream, "debug: ");
	  break;

	case LOG_LEVEL_INFO:
	  break;
	}

      vfprintf (stream, fmt, ap);
      putc ('\n', stream);

      err = 0;
    }

  return err;
}
Beispiel #21
0
void main(void)
{
	long lxsize;
	LogEntry loginfo;
	int status;
	char buf[200];

	/* File system setup and partitioning */
	fs_ext = fs_get_flash_lx();
	if (fs_ext == 0) {
		printf("No flash available!\n");
		exit(1);
	}

	/*
	 * Get the size of the entire flash with the given sector size
	 */
	lxsize = fs_get_lx_size(fs_ext, 1, MY_LS_SHIFT);
	/*
	 * Partition the filesystem - always give 1/8 to the backup partition, as we
	 * have room to spare.
	 */
	backup_ext = fs_setup(fs_ext, MY_LS_SHIFT, 0, NULL, FS_PARTITION_FRACTION,
	                      0x2000, MY_LS_SHIFT, 0, NULL);
	if (backup_ext == 0) {
		printf("Could not create backup extent!\n");
		exit(2);
	}

	lxsize = fs_get_lx_size(fs_ext, 1, MY_LS_SHIFT);
	lxsize = fs_get_lx_size(backup_ext, 1, MY_LS_SHIFT);

	if (fs_init(0, 0) != 0) {
		printf("Filesystem failed to initialize!\n");
		exit(3);
	}
#ifdef FORMAT
	if (lx_format(fs_ext, 0) != 0) {
		printf("Filesystem failed to format!\n");
		exit(4);
	}
	if (lx_format(backup_ext, 0) != 0) {
		printf("Backup area failed to format!\n");
		exit(5);
	}
#endif

	fs_set_lx(fs_ext, fs_ext);

	/*
	 * Reset all logs if requested.
	 */
#ifdef RESET_ALL_LOGS
	log_open(LOG_DEST_ALL, 1);
#endif

	/*
	 * This call is necessary to initialize target communications
	 * beween the DeviceMate and the target processor.
	 */
	targetproc_init();

	/*
	 * Initialize the TCP/IP stack and the web server.
	 */
	sock_init();
	http_init();

	/*
	 * The following improves interactive performance of the web server.
	 */
	tcp_reserveport(80);
	
	/*
	 * Print out previous log entries
	 */
	if (!log_seek(LOG_DEST_FS2, 0)) {
		printf("Scanning previous log entries, oldest first...\n");
		for (;;) {
			if (log_next(LOG_DEST_FS2, &loginfo) < 0)
				break;
			printf("%s\n", log_format(&loginfo, buf, sizeof(buf), 1));
		}
		printf("End of messages.\n");
	}

	/*
	 * Log an initial entry.
	 */
#define LOG_TEST_STRING "~~~{ Started test run. }~~~"
	status = log_put(LOG_MAKEPRI(2,LOG_INFO), 0, LOG_TEST_STRING, strlen(LOG_TEST_STRING));
	if (status != 0) {
		printf("Failed to add 1st message: %d\n", status);
	}

	/*
	 * Drive the target communications and the web server continuously.
	 * This is all that is necessary as the main part of the program.
	 */
	for (;;) {
		targetproc_tick();
		http_handler();
	}
}