void
semaphore_leave (semaphore_t *sem)
#endif
{
  thread_t *thr;
  int rc;

  rc = pthread_mutex_lock ((pthread_mutex_t*) sem->sem_handle);
  CKRET (rc);

#ifdef SEM_DEBUG
    {
      int inx;
      if (304 == ln && sem->sem_entry_count) GPF_T1 ("should have 0 count when signalling clrg_wait");
      for (inx = MAX_SEM_ENT - 1; inx > 0; inx--)
	{
	  sem->sem_last_left_line[inx] = sem->sem_last_left_line[inx - 1];
	  sem->sem_last_left_file[inx] = sem->sem_last_left_file[inx - 1];
	}
      sem->sem_last_left_line[0] = ln;
      sem->sem_last_left_file[0] = file;
    }
#endif
  if (sem->sem_entry_count)
    sem->sem_entry_count++;
  else
    {
#ifndef SEM_NO_ORDER
      thr = thread_queue_from (&sem->sem_waiting);
      if (thr)
	{
	  _thread_num_wait--;
	  assert (thr->thr_status == WAITSEM);
	  thr->thr_status = RUNNING;
	  pthread_cond_signal ((pthread_cond_t *) thr->thr_cv);
	}
      else
	sem->sem_entry_count++;
#else
      if (sem->sem_waiting.thq_count)
	{
	  _thread_num_wait--;
	  sem->sem_any_signalled = 1;
	  pthread_cond_signal ((pthread_cond_t *) sem->sem_cv);
	}
      else
	sem->sem_entry_count++;
#endif
    }

  rc = pthread_mutex_unlock ((pthread_mutex_t*) sem->sem_handle);
  CKRET (rc);
  return;

failed:
  GPF_T1 ("semaphore_leave() failed");
}
dk_mutex_t *
mutex_allocate_typed (int type)
{
  int rc;
  static int is_initialized = 0;
  NEW_VARZ (dk_mutex_t, mtx);
  mtx->mtx_type = type;
#if HAVE_SPINLOCK
  if (MUTEX_TYPE_SPIN == type)
    {
      pthread_spin_init (&mtx->l.spinl, 0);
    }
  else
#endif
    {
      memset ((void *) &mtx->mtx_mtx, 0, sizeof (pthread_mutex_t));
#ifndef OLD_PTHREADS
      if (!is_initialized)
	{
	  pthread_mutexattr_init (&_mutex_attr);
#if defined (PTHREAD_PROCESS_PRIVATE) && !defined(oldlinux) && !defined (__FreeBSD__)	  
	  rc = pthread_mutexattr_setpshared (&_mutex_attr, PTHREAD_PROCESS_PRIVATE);
	  CKRET (rc);
#endif

#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
	  rc = pthread_mutexattr_settype (&_mutex_attr, PTHREAD_MUTEX_ADAPTIVE_NP);
	  CKRET (rc);
#endif
	  is_initialized = 1;
	}
      rc = pthread_mutex_init (&mtx->mtx_mtx, &_mutex_attr);
#else
      rc = pthread_mutex_init (&mtx->mtx_mtx, _mutex_attr);
#endif
      CKRET (rc);
    }
#ifdef MTX_DEBUG
  mtx->mtx_owner = NULL;
#endif
#ifdef MTX_METER
  if (all_mtxs_mtx)
    mutex_enter (all_mtxs_mtx);
  dk_set_push (&all_mtxs, (void*)mtx);
  if (all_mtxs_mtx)
    mutex_leave (all_mtxs_mtx);
#endif
  return mtx;

failed:
  dk_free (mtx, sizeof (dk_mutex_t));
  return NULL;
}
int
semaphore_enter (semaphore_t * sem)
{
  thread_t *thr = current_thread;
  int rc;

  rc = pthread_mutex_lock ((pthread_mutex_t*) sem->sem_handle);
  CKRET (rc);

  if (sem->sem_entry_count)
    sem->sem_entry_count--;
  else
    {
#ifndef SEM_NO_ORDER
      thread_queue_to (&sem->sem_waiting, thr);
      _thread_num_wait++;
      thr->thr_status = WAITSEM;
      do
	{
	  rc = pthread_cond_wait ((pthread_cond_t *) thr->thr_cv, (pthread_mutex_t*) sem->sem_handle);
	  CKRET (rc);
	} while (thr->thr_status == WAITSEM);
#else      
      thread_queue_to (&sem->sem_waiting, thr);
      _thread_num_wait++;
      thr->thr_status = WAITSEM;
      do 
	{
	  rc = pthread_cond_wait ((pthread_cond_t *) sem->sem_cv, (pthread_mutex_t*) sem->sem_handle);
	  CKRET (rc);
	}
      while (sem->sem_n_signalled == sem->sem_last_signalled); 
      sem->sem_n_signalled --; /* this one is signalled */
      sem->sem_last_signalled = sem->sem_n_signalled;
      thr->thr_status = RUNNING;
      thread_queue_remove (&sem->sem_waiting, thr);
      if (sem->sem_n_signalled < 0) GPF_T1 ("The semaphore counter went wrong");
#endif
    }

  pthread_mutex_unlock ((pthread_mutex_t*) sem->sem_handle);

  return 0;

failed:
  GPF_T1 ("semaphore_enter() failed");
  return -1;
}
semaphore_t *
semaphore_allocate (int entry_count)
{
  NEW_VAR (pthread_mutex_t, ptm);
  NEW_VAR (semaphore_t, sem);
  int rc;

  memset ((void *) ptm, 0, sizeof (pthread_mutex_t));
#ifndef OLD_PTHREADS
  rc = pthread_mutex_init (ptm, &_mutex_attr);
#else
  rc = pthread_mutex_init (ptm, _mutex_attr);
#endif
  CKRET (rc);

  sem->sem_entry_count = entry_count;
  sem->sem_handle = (void *) ptm;
#ifdef SEM_NO_ORDER
  sem->sem_cv = _alloc_cv ();
  if (!sem->sem_cv) goto failed;
  sem->sem_any_signalled = 0;
#endif
  thread_queue_init (&sem->sem_waiting);
  return sem;

failed:
  dk_free ((void *) ptm, sizeof (pthread_mutex_t));
  dk_free (sem, sizeof (semaphore_t));
  return NULL;
}
static void *
_thread_boot (void *arg)
{
  thread_t *thr = (thread_t *) arg;
  int rc;

  rc = pthread_setspecific (_key_current, thr);
  CKRET (rc);

  /* Store the context so we can easily restart a dead thread */
  setjmp (thr->thr_init_context);

  thr->thr_status = RUNNING;
  _thread_init_attributes (thr);
  thr->thr_stack_base = (void *) &arg;

  rc = (*thr->thr_initial_function) (thr->thr_initial_argument);

  /* thread died, put it on the dead queue */
  thread_exit (rc);

  /* We should never come here */
  GPF_T;

failed:
  return (void *) 1L;
}
/*
 *  Wait for an event to happen.
 *
 *  If holds != NULL, the caller holds the mutex, which will be released
 *  before going to sleep. The thread calling thread_signal_cond *must* hold
 *  the same mutex.
 *
 *  The holds mutex is reacquired after wakeup.
 */
int
thread_wait_cond (void *event, dk_mutex_t *holds, TVAL timeout)
{
  thread_t *thr = current_thread;
  dk_mutex_t *mtx;
  int ok;

  thr->thr_status = WAITEVENT;
  thr->thr_event = event ? event : &_ev_never;
  thr->thr_event_pipe = -1;

  mtx = holds ? holds : _q_lock;

  Q_LOCK ();
  do
    {
      thread_queue_to (&_waitq, thr);
      _thread_num_wait++;

      if (holds)
	Q_UNLOCK ();

      if (timeout == TV_INFINITE)
	ok = pthread_cond_wait (thr->thr_cv, &mtx->mtx_mtx);
      else
	{
	  struct timespec to;
	  struct timeval now;
	  gettimeofday (&now, NULL);
	  to.tv_sec = now.tv_sec + timeout / 1000;
	  to.tv_nsec = now.tv_usec + 1000 * (timeout % 1000);
	  if (to.tv_nsec > 1000000)
	    {
	      to.tv_nsec -= 1000000;
	      to.tv_sec++;
	    }
	  ok = pthread_cond_timedwait (thr->thr_cv, &mtx->mtx_mtx, &to);
	}
      if (holds)
	Q_LOCK ();
      thread_queue_remove (&_waitq, thr);
      _thread_num_wait--;
    } while (ok == 0 && thr->thr_event);
  Q_UNLOCK ();
  CKRET (ok);

failed:
  thr->thr_status = RUNNING;
  return thr->thr_event == NULL ? 0 : -1;
}
int
semaphore_try_enter (semaphore_t *sem)
{
  int rc;

  rc = pthread_mutex_lock ((pthread_mutex_t*) sem->sem_handle);
  CKRET (rc);

  if (sem->sem_entry_count)
    {
      sem->sem_entry_count--;	/* IvAn: this decrement was added. */
      pthread_mutex_unlock ((pthread_mutex_t*) sem->sem_handle);
      return 1;
    }

  pthread_mutex_unlock ((pthread_mutex_t*) sem->sem_handle);

failed:
  return 0;
}
/*
 *  Allocates a condition variable
 */
static void *
_alloc_cv (void)
{
  NEW_VAR (pthread_cond_t, cv);
  int rc;

  memset ((void *) cv, 0, sizeof (pthread_cond_t));
#ifndef OLD_PTHREADS
  rc = pthread_cond_init (cv, NULL);
#else
  rc = pthread_cond_init (cv, pthread_condattr_default);
#endif
  CKRET (rc);

  return (void *) cv;

failed:
  dk_free ((void *) cv, sizeof (pthread_cond_t));
  return NULL;
}
int
thread_release_dead_threads (int leave_count)
{
  thread_t *thr;
  int rc;
  long thread_killed = 0;
  thread_queue_t term;

  Q_LOCK ();
  if (_deadq.thq_count <= leave_count)
    {
      Q_UNLOCK ();
      return 0;
    }
  thread_queue_init (&term);
  while (_deadq.thq_count > leave_count)
    {
      thr = thread_queue_from (&_deadq);
      if (!thr)
	break;
      _thread_num_dead--;
      thread_queue_to (&term, thr);
    }
  Q_UNLOCK ();

  while (NULL != (thr = thread_queue_from (&term)))
    {
      thr->thr_status = TERMINATE;
      rc = pthread_cond_signal ((pthread_cond_t *) thr->thr_cv);
      CKRET (rc);
      thread_killed++;
    }
#if 0
  if (thread_killed)
    log_info ("%ld OS threads released", thread_killed);
#endif
  return thread_killed;
failed:
  GPF_T1("Thread restart failed");
  return 0;
}
thread_t *
thread_attach (void)
{
  thread_t *thr;
  int rc;

  thr = thread_alloc ();
  thr->thr_stack_size = (unsigned long) -1;
  thr->thr_attached = 1;
  if (thr->thr_cv == NULL)
    goto failed;

  *((pthread_t *) thr->thr_handle) = pthread_self ();

  rc = pthread_setspecific (_key_current, thr);
  CKRET (rc);

  /* Store the context so we can easily restart a dead thread */
  setjmp (thr->thr_init_context);

  thr->thr_status = RUNNING;
  _thread_init_attributes (thr);
  thr->thr_stack_base = 0;

  return thr;

failed:
  if (thr->thr_sem)
    semaphore_free (thr->thr_sem);
  if (thr->thr_schedule_sem)
    semaphore_free (thr->thr_schedule_sem);
  if (thr->thr_handle)
    dk_free (thr->thr_handle, sizeof (pthread_t));
  dk_free (thr, sizeof (thread_t));
  return NULL;
}
void
thread_exit (int n)
{
  thread_t *thr = current_thread;
  volatile int is_attached = thr->thr_attached;

  if (thr == _main_thread)
    {
      call_exit (n);
    }

  thr->thr_retcode = n;
  thr->thr_status = DEAD;

  if (is_attached)
    {
      thr->thr_status = TERMINATE;
      goto terminate;
    }

  Q_LOCK ();
  thread_queue_to (&_deadq, thr);
  _thread_num_dead++;

  do
    {
      int rc = pthread_cond_wait ((pthread_cond_t *) thr->thr_cv, (pthread_mutex_t*) &_q_lock->mtx_mtx);
      CKRET (rc);
    } while (thr->thr_status == DEAD);
  Q_UNLOCK ();

  if (thr->thr_status == TERMINATE)
    goto terminate;
  /* Jumps back into _thread_boot */
  longjmp (thr->thr_init_context, 1);

failed:
  thread_queue_remove (&_deadq, thr);
  _thread_num_dead--;
  Q_UNLOCK ();
terminate:
  if (thr->thr_status == TERMINATE)
    {
#ifndef OLD_PTHREADS
      pthread_detach (* (pthread_t *)thr->thr_handle);
#else
      pthread_detach ( (pthread_t *)thr->thr_handle);
#endif
      _thread_free_attributes (thr);
      dk_free ((void *) thr->thr_cv, sizeof (pthread_cond_t));
      semaphore_free (thr->thr_sem);
      semaphore_free (thr->thr_schedule_sem);
      dk_free (thr->thr_handle, sizeof (pthread_t));
      thr_free_alloc_cache (thr);
      dk_free (thr, sizeof (thread_t));
    }
  if (!is_attached)
    {
      _thread_num_total--;
      pthread_exit ((void *) 1L);
    }
}
thread_t *
thread_create (
    thread_init_func initial_function,
    unsigned long stack_size,
    void *initial_argument)
{
  thread_t *thr;
  int rc;

  assert (_main_thread != NULL);

  if (stack_size == 0)
    stack_size = THREAD_STACK_SIZE;

#if (SIZEOF_VOID_P == 8)
  stack_size *= 2;
#endif
#if defined (__x86_64 ) && defined (SOLARIS)
  /*GK: the LDAP on that platform requires that */
  stack_size *= 2;
#endif
#ifdef HPUX_ITANIUM64
  stack_size += 8 * 8192;
#endif

  stack_size = ((stack_size / 8192) + 1) * 8192;

#if defined (PTHREAD_STACK_MIN)
  if (stack_size < PTHREAD_STACK_MIN)
    {
      stack_size = PTHREAD_STACK_MIN;
    }
#endif
  /* Any free threads with the right stack size? */
  Q_LOCK ();
  for (thr = (thread_t *) _deadq.thq_head.thr_next;
       thr != (thread_t *) &_deadq.thq_head;
       thr = (thread_t *) thr->thr_hdr.thr_next)
    {
      /* if (thr->thr_stack_size >= stack_size) */
	break;
    }
  Q_UNLOCK ();

  /* No free threads, create a new one */
  if (thr == (thread_t *) &_deadq.thq_head)
    {
#ifndef OLD_PTHREADS
      size_t os_stack_size = stack_size;
#endif
      thr = thread_alloc ();
      thr->thr_initial_function = initial_function;
      thr->thr_initial_argument = initial_argument;
      thr->thr_stack_size = stack_size;
      if (thr->thr_cv == NULL)
	goto failed;

#ifdef HPUX_ITANIUM64
      if (stack_size > PTHREAD_STACK_MIN)
        {
	  size_t s, rses;
          pthread_attr_getstacksize (&_thread_attr, &s);
	  pthread_attr_getrsestacksize_np (&_thread_attr, &rses);
	  log_error ("default rses=%d stack=%d : %m", rses,s);
	}
#endif


#ifndef OLD_PTHREADS
# if  defined(HAVE_PTHREAD_ATTR_SETSTACKSIZE)
      rc = pthread_attr_setstacksize (&_thread_attr, stack_size);
      if (rc)
	{
          log_error ("Failed setting the OS thread stack size to %d : %m", stack_size);
	}
# endif

#if defined(HAVE_PTHREAD_ATTR_GETSTACKSIZE)
      if (0 == pthread_attr_getstacksize (&_thread_attr, &os_stack_size))
	{
	  if (os_stack_size > 4 * 8192)
	    stack_size = thr->thr_stack_size = ((unsigned long) os_stack_size) - 4 * 8192;
	}
#endif
#ifdef HPUX_ITANIUM64
      if (stack_size > PTHREAD_STACK_MIN)
        {
	  size_t rsestack_size = stack_size / 2;
          rc = pthread_attr_setrsestacksize_np (&_thread_attr, rsestack_size);
	  if (rc)
	    {
	      log_error ("Failed setting the OS thread 'rse' stack size to %d (plain stack size set to %d) : %m", rsestack_size, stack_size);
	    }
	  thr->thr_stack_size /= 2;
	}
#endif

      rc = pthread_create ((pthread_t *) thr->thr_handle, &_thread_attr,
	  _thread_boot, thr);
      CKRET (rc);

      /* rc = pthread_detach (*(pthread_t *) thr->thr_handle); */
      /* CKRET (rc); */

#else /* OLD_PTHREAD */
      rc = pthread_attr_setstacksize (&_thread_attr, stack_size);
      CKRET (rc);

      rc = pthread_create ((pthread_t *) thr->thr_handle, _thread_attr,
	  _thread_boot, thr);
      CKRET (rc);

      /* rc = pthread_detach ((pthread_t *) thr->thr_handle); */
      /* CKRET (rc); */
#endif

      _thread_num_total++;
#if 0
      if (DO_LOG(LOG_THR))
	log_info ("THRD_0 OS threads create (%i)", _thread_num_total);
#endif
      thread_set_priority (thr, NORMAL_PRIORITY);
    }
  else
    {
      Q_LOCK ();
      thread_queue_remove (&_deadq, thr);
      _thread_num_dead--;
      Q_UNLOCK ();
      assert (thr->thr_status == DEAD);
      /* Set new context for the thread and resume it */
      thr->thr_initial_function = initial_function;
      thr->thr_initial_argument = initial_argument;
      thr->thr_status = RUNNABLE;
      rc = pthread_cond_signal ((pthread_cond_t *) thr->thr_cv);
      CKRET (rc);
/*    if (DO_LOG(LOG_THR))
	log_info ("THRD_3 OS threads reuse. Info threads - total (%ld) wait (%ld) dead (%ld)",
            _thread_num_total, _thread_num_wait, _thread_num_dead);*/
    }

  return thr;

failed:
  if (thr->thr_status == RUNNABLE)
    {
      _thread_free_attributes (thr);
      dk_free (thr, sizeof (thread_t));
    }
  return NULL;
}
/*
 *  The main thread must call this function to convert itself into a thread.
 */
thread_t *
thread_initial (unsigned long stack_size)
{
  int rc;
  thread_t *thr = NULL;

  if (_main_thread)
    return _main_thread;

  /*
   *  Initialize pthread key
   */
#ifndef OLD_PTHREADS
  rc = pthread_key_create (&_key_current, NULL);
#else
  rc = pthread_keycreate (&_key_current, NULL);
#endif
  CKRET (rc);

  /*
   *  Start off with a value of NULL
   */
  rc = pthread_setspecific (_key_current, NULL);
  CKRET (rc);

  /*
   *  Initialize default thread/mutex attributes
   */
#ifndef OLD_PTHREADS
  /* attribute for thread creation */
  rc = pthread_attr_init (&_thread_attr);
  CKRET (rc);

  /* attribute for mutex creation */
  rc = pthread_mutexattr_init (&_mutex_attr);
  CKRET (rc);
#else
  rc = pthread_attr_create (&_thread_attr);
  CKRET (rc);

  rc = pthread_mutexattr_create (&_mutex_attr);
  CKRET (rc);
#endif

#if defined (PTHREAD_PROCESS_PRIVATE) && !defined(oldlinux) && !defined(__FreeBSD__)
  rc = pthread_mutexattr_setpshared (&_mutex_attr, PTHREAD_PROCESS_PRIVATE);
  CKRET (rc);
#endif

#if defined (MUTEX_FAST_NP) && !defined (_AIX)
  rc = pthread_mutexattr_setkind_np (&_mutex_attr, MUTEX_FAST_NP);
  CKRET (rc);
#endif

#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
  rc = pthread_mutexattr_settype (&_mutex_attr, PTHREAD_MUTEX_ADAPTIVE_NP);
  CKRET (rc);
#endif

  /*
   *  Allocate a thread structure
   */
  thr = (thread_t *) dk_alloc (sizeof (thread_t));
  memset (thr, 0, sizeof (thread_t));

  assert (_main_thread == NULL);
  _main_thread = thr;

  _sched_init ();

  if (stack_size == 0)
    stack_size = MAIN_STACK_SIZE;

#if (SIZEOF_VOID_P == 8)
  stack_size *= 2;
#endif
#if defined (__x86_64 ) && defined (SOLARIS)
  /*GK: the LDAP on that platform requires that */
  stack_size *= 2;
#endif


  stack_size = ((stack_size / 8192) + 1) * 8192;

  thr->thr_stack_size = stack_size;
  thr->thr_status = RUNNING;
  thr->thr_cv = _alloc_cv ();
  thr->thr_sem = semaphore_allocate (0);
  thr->thr_schedule_sem = semaphore_allocate (0);
  if (thr->thr_cv == NULL)
    goto failed;
  _thread_init_attributes (thr);
  thread_set_priority (thr, NORMAL_PRIORITY);

  rc = pthread_setspecific (_key_current, thr);
  CKRET (rc);

  return thr;

failed:
  if (thr)
    {
      _thread_free_attributes (thr);
      dk_free (thr, sizeof (thread_t));
    }
  return NULL;
}
int
mutex_enter (dk_mutex_t *mtx)
#endif
{
#ifdef MTX_DEBUG
  du_thread_t * self = thread_current ();
#endif
  int rc;

#ifdef MTX_DEBUG
  assert (mtx->mtx_owner !=  self || !self);
  if (mtx->mtx_entry_check
      && !mtx->mtx_entry_check (mtx, self, mtx->mtx_entry_check_cd))
    GPF_T1 ("Mtx entry check fail");
#endif
#ifdef MTX_METER
#if HAVE_SPINLOCK
  if (MUTEX_TYPE_SPIN == mtx->mtx_type)
    rc = pthread_spin_trylock (&mtx->l.spinl);
  else 
#endif
    rc = pthread_mutex_trylock ((pthread_mutex_t*) &mtx->mtx_mtx);
  if (TRYLOCK_SUCCESS != rc)
    {
      long long wait_ts = rdtsc ();
      static int unnamed_waits;
#if HAVE_SPINLOCK
      if (MUTEX_TYPE_SPIN == mtx->mtx_type)
	rc = pthread_spin_lock (&mtx->l.spinl);
      else
#endif
	rc = pthread_mutex_lock ((pthread_mutex_t*) &mtx->mtx_mtx);
      mtx->mtx_wait_clocks += rdtsc () - wait_ts;
      mtx->mtx_waits++;
      if (!mtx->mtx_name)
	unnamed_waits++; /*for dbg breakpoint */
      mtx->mtx_enters++;
    }
  else
    mtx->mtx_enters++;
#else
#if HAVE_SPINLOCK
  if (MUTEX_TYPE_SPIN == mtx->mtx_type)
    rc = pthread_spin_lock (&mtx->l.spinl);
  else
#endif
    rc = pthread_mutex_lock ((pthread_mutex_t*) &mtx->mtx_mtx);
#endif
  CKRET (rc);
#ifdef MTX_DEBUG
  assert (mtx->mtx_owner == NULL);
  mtx->mtx_owner = self;
  mtx->mtx_entry_file = (char *) file;
  mtx->mtx_entry_line = line;
#endif
  return 0;

failed:
  GPF_T1 ("mutex_enter() failed");
  return -1;
}
int main(int argc, char* argv[])
{
  ns_nfm_ret_t ret;
  ns_log_init(NS_LOG_COLOR | NS_LOG_CONSOLE);
  ns_log_lvl_set(NS_LOG_LVL_INFO);
  unsigned int i, k, t, min, max;
  unsigned int slot, netmod, iface, nm_intfs;
  uint32_t nm_mask;
  uint8_t nm_type;
  const char *nm_name;

  int c;
  while ((c = getopt_long(argc, argv, "l:d:h", __long_options, NULL)) != -1) {
    switch (c) {
    case 'l':
      ns_log_lvl_set((unsigned int)strtoul(optarg,0,0));
      break;
    case 'h':
    default:
      print_usage(argv[0]);
    }
  }

  if (optind != argc)
    print_usage(argv[0]);

  ret = nfm_get_nfes_present(&k);
  CKRET(ret, "Error getting # of NFEs present");
  printf("%u NFEs present\n", k);

  ret = nfm_get_nmsbs_present(&k);
  CKRET(ret, "Error getting # of NMSBs present");
  printf("%u NMSBs present\n", k);
  if ( k < 1 ) { // No NMSB: geryon by now
      min = max = 1;
      ret = nfm_get_external_min_port(&min);
      CKRET(ret, "Error getting external minimum port");
      
      ret = nfm_get_external_max_port(&max);
      CKRET(ret, "Error getting external maximum port");
      
      printf("Port list\n");
      
      printf("%16s%16s\n", "Port", "Hardware ID");
      for (i = min; i <= max; i++) {
	  ret = nfm_get_internal_port_from_external(i, &k);
	  CKRET(ret, "Error converting external port # to internal");
	  
	  printf("%16u%16u\n", i, k);
	  /* sanity check */
	  ret = nfm_get_external_port_from_internal(k, &t);
	  CKRET(ret, "Error converting internal port # to external");
	  if (t != i)
	      NS_LOG_ERROR("Error conversion ext -> int -> ext "
			   "produced %u -> %u -> %u", i, k, t);
      }
  }
  else { // NMSB: cayenne
      ret = nfm_get_netmods_present(&nm_mask);
      CKRET(ret, "Error getting # of netmods present");
      ret = nfm_get_min_netmod(&min);
      CKRET(ret, "Error getting minimum netmod");
      ret = nfm_get_max_netmod(&max);
      CKRET(ret, "Error getting maximum netmod");

      /* Enumerate the Installed Netmods */
      printf("netmods present (mask=%#x, %u - %u):\n", nm_mask, min, max);
      printf("%16s%16s%16s%16s\n",
	     "Netmod", "Slot ID", "Netmod Type", "# Interfaces");
      while (1) {
	  slot = ffs(nm_mask);
	  if (!slot)
	      break;
	  slot = slot - 1; /* ffs() count bits starting at 1. We don't */
	  nm_mask &= ~(1 << slot);
	  ret = nfm_get_nmsb_attached_card(slot, &nm_type);
	  CKRET(ret, "Error getting card type");
	  ret = nfm_get_nmsb_card_intfs(slot, &nm_intfs);
	  CKRET(ret, "Error getting number of interfaces");
	  ret = nfm_get_nmsb_card_name(slot, &nm_name);
	  CKRET(ret, "Error getting card name");
	  printf("%16s%16u%16u%16u\n", nm_name, slot, nm_type, nm_intfs);
      }
      printf("\n");

      /* Enumerate the ports */
      ret = nfm_get_external_min_port(&min);
      CKRET(ret, "Error getting external minimum port");
      ret = nfm_get_external_max_port(&max);
      CKRET(ret, "Error getting external maximum port");

      printf("Port list\n");
      printf("%16s%16s%16s%16s\n", "Port", "Hardware ID", "Netmod", "NM Interface");
      for (i = min; i <= max; i++) {
	  ret = nfm_get_internal_port_from_external(i, &k);
	  CKRET(ret, "Error converting external port # to internal");
	  ret = nfm_get_slot_from_external(i, &netmod, &iface);
	  CKRET(ret, "Error converting external port # to netmod pair");
	  ret = nfm_get_nmsb_card_name(netmod, &nm_name);
	  CKRET(ret, "Error getting card name");
	  printf("%16u%16u%16s%16u\n", i, k, nm_name, iface);
	  /* sanity check */
	  ret = nfm_get_external_port_from_internal(k, &t);
	  CKRET(ret, "Error converting internal port # to external");
	  if (t != i)
	      NS_LOG_ERROR("Error conversion ext -> int -> ext "
			   "produced %u -> %u -> %u", i, k, t);
      }
  }
  printf("\n");

  return 0;
}