void update() {
      AT_ASSERT(ConsoleMenu, mConsoleDisplay, "mConsoleDisplay is 0!");
      AT_ASSERT(ConsoleMenu, mMenuEntries, "mMenuEntries is 0!");
      
      // Menu handling
      int key = getch();

      switch(key) {
      case KEY_UP:
	while (mPosition > 0) {
	  mPosition--;
	  bool isSeparator =  ! strcmp(mMenuEntries->at(mPosition)->getTitle(), "");
	  if (! isSeparator) { break; }
	}
	break;
      
      case KEY_DOWN:
	while (mPosition < mMenuEntries->size() - 1) {
	  mPosition++;
	  bool isSeparator =  ! strcmp(mMenuEntries->at(mPosition)->getTitle(), "");
	  if (! isSeparator) { break; }
	}
	break;
    
      case 9: /* TAB */
	bool isSeparator;
	do {
	  mPosition++;
	  mPosition = mPosition % mMenuEntries->size();
	
	  isSeparator =  ! strcmp(mMenuEntries->at(mPosition)->getTitle(), "");
	} while (isSeparator);
	break;

      case 'q':
	mWantExit = true;
	break;
	
      default:
	mMenuEntries->at(mPosition)->update(key);

	break;
      } // end switch

      // Update menu display, highlight selected menu entry
      for (std::vector<MenuEntryT *>::const_iterator it = mMenuEntries->begin(); it != mMenuEntries->end(); ++it) {
	const MenuEntryT * entry = *it;
	size_t i = std::distance(mMenuEntries->begin(), it);
	if (i == mPosition) { attron(A_STANDOUT); }
	mConsoleDisplay->print(cLeftMenuBorder, cTopMenuBorder + i, "%s", entry->getTitle());
	if (i == mPosition) { attroff(A_STANDOUT); }
	// Print corresponding value...
	mConsoleDisplay->print(cLeftMenuBorder + cLeftMenuWidth, cTopMenuBorder + i, "%s", entry->getValueStr().c_str());
      }
    }
示例#2
0
void train(
    int32_t epoch,
    Net& model,
    torch::Device device,
    DataLoader& data_loader,
    torch::optim::Optimizer& optimizer,
    size_t dataset_size) {
  model.train();
  size_t batch_idx = 0;
  for (auto& batch : data_loader) {
    auto data = batch.data.to(device), targets = batch.target.to(device);
    optimizer.zero_grad();
    auto output = model.forward(data);
    auto loss = torch::nll_loss(output, targets);
    AT_ASSERT(!std::isnan(loss.template item<float>()));
    loss.backward();
    optimizer.step();

    if (batch_idx++ % kLogInterval == 0) {
      std::printf(
          "\rTrain Epoch: %ld [%5ld/%5ld] Loss: %.4f",
          epoch,
          batch_idx * batch.data.size(0),
          dataset_size,
          loss.template item<float>());
    }
  }
}
示例#3
0
MemOverlap has_internal_overlap(TensorImpl* t) {
  AT_ASSERT(t->layout() == kStrided);

  if (t->is_contiguous()) {
    return MemOverlap::NO;
  }

  auto strides = t->strides();
  if (strides.end() != std::find_if(
        strides.begin(), strides.end(), [](int64_t s) { return s == 0; })) {
    return MemOverlap::YES;
  }

  return MemOverlap::TOO_HARD;
}
示例#4
0
int dbgMsg2FileOpen( const char * filename )
{
    AT_ASSERT( NULL == s_pf );
    
    s_pf = fopen(filename, "a+");
	if( NULL != s_pf ) {
		fseek(s_pf, 0, SEEK_END);
		long size = ftell(s_pf);
		if( size > 512 * 1024 ) {
			fclose(s_pf);
			s_pf = fopen(filename, "w");
		}
	}
    return 0;
}
示例#5
0
int tcardRWTest( void )
{
    FILE * fp = fopen(TCARD_FILE_NAME, "w");
    if( NULL == fp ) {
        ERRMSG("open '%s'(rw) fail: %s(%d)\n", TCARD_FILE_NAME, strerror(errno), errno);
        return -1;
    }
    
    if( fwrite(TCARD_TEST_CONTENT, 1, sizeof(TCARD_TEST_CONTENT), fp) != sizeof(TCARD_TEST_CONTENT) ) {
        ERRMSG("write '%s' fail: %s(%d)\n", TCARD_FILE_NAME, strerror(errno), errno);
        fclose(fp);
        return -2;
    }
    fclose(fp);
    
    fp = fopen(TCARD_FILE_NAME, "r");
    if( NULL == fp ) {
        ERRMSG("open '%s'(ronly) fail: %s(%d)\n", TCARD_FILE_NAME, strerror(errno), errno);
        return -3;
    }
    
    AT_ASSERT( sizeof(TCARD_TEST_CONTENT) < 128 );
    
    char buf[128];
    
    if( fread(buf, 1, sizeof(TCARD_TEST_CONTENT), fp) != sizeof(TCARD_TEST_CONTENT) ) {
        ERRMSG("read '%s' fail: %s(%d)\n", TCARD_FILE_NAME, strerror(errno), errno);
        fclose(fp);
        return -4;
    }
    fclose(fp);
    
    unlink(TCARD_FILE_NAME);
    
    if( strncmp(buf, TCARD_TEST_CONTENT, sizeof(TCARD_TEST_CONTENT) - 1) ) {
        ERRMSG("read = %s, dst = %s\n", buf, TCARD_TEST_CONTENT);
        return -5;
    }
    
    INFMSG("TFlash Card rw OK.\n");
    return 0;
}
示例#6
0
int chlRead( int fd, uchar * buf, int size, int timeout )
{
    if( fd < 0 ) {
        return -1;
    }
    DBGMSG(" %s enter\n", __FUNCTION__);
    
    AT_ASSERT( buf != NULL && size > 0 );
    
    int ret = 0;
    
    struct pollfd pfd;
    do {        
/*      // !! driver not support these mechenism !!
        pfd.fd     = fd;
        pfd.events = POLLIN;
        errno = 0;
        ret   = poll(&pfd, 1, timeout);
        if (ret < 0) {
            ERRMSG("poll() error: %s\n", strerror(errno));
            break;
        } else if( 0 == ret ) {
            CHL_INF("poll() timeout: %d ms\n", timeout);
            break;
        }
        
        if (pfd.revents & (POLLHUP | POLLERR | POLLNVAL)) {
            ERRMSG("poll() returned  success (%d), "
                 "but with an unexpected revents bitmask: %#x\n", ret, pfd.revents);
            ret = -1;
            break;
        }
        DBGMSG("after poll\n");
*/        
        ret = read(fd, buf, size);
        //DBGMSG("read %d bytes\n", ret);
    } while( 0 );
        
    DBGMSG(" %s exit\n", __FUNCTION__);
    return ret;
}
示例#7
0
inline Tensor new_qtensor_cpu(
    IntArrayRef sizes,
    const TensorOptions& options,
    QuantizerPtr quantizer) {
  AT_ASSERT(options.device().is_cpu());

  native::check_size_nonnegative(sizes);
  auto* allocator = at::getCPUAllocator();
  int64_t nelements = at::prod_intlist(sizes);
  auto dtype = options.dtype();
  AT_CHECK(isQIntType(typeMetaToScalarType(dtype)),
           "ScalarType is not supported in new_qtensor_cpu.");
  auto storage = c10::make_intrusive<StorageImpl>(
      dtype,
      nelements,
      allocator->allocate(nelements * dtype.itemsize()),
      allocator,
      /*resizable=*/true);
  auto tensor = detail::make_tensor<QTensorImpl>(
      storage, at::QuantizedCPUTensorId(), quantizer);
  get_qtensorimpl(tensor)->set_sizes_contiguous(sizes);
  return tensor;
}