Exemple #1
0
int dump_hbuf_stats (void * pio)
{
   u_long hb_local [HBUF_NUM_STATS];
   u_long heap_curr_mem_local;
   u_long heap_curr_mem_hi_watermark_local;
#ifdef HEAPBUFS_DEBUG
   PHLEP phlep;
   u_short count = 0;
   u_long max_alloc = 0;
   u_long min_alloc = 0xFFFFFFFF;
   u_long total_alloc = 0;
#endif

   LOCK_NET_RESOURCE(FREEQ_RESID);

   ENTER_CRIT_SECTION(&hbufstats);
   MEMCPY (&hb_local, &hbufstats, sizeof(hbufstats));
   EXIT_CRIT_SECTION(&hbufstats);
   
   ENTER_CRIT_SECTION(&heap_curr_mem);
   heap_curr_mem_local = heap_curr_mem;
   heap_curr_mem_hi_watermark_local = heap_curr_mem_hi_watermark;
   EXIT_CRIT_SECTION(&heap_curr_mem);

   UNLOCK_NET_RESOURCE(FREEQ_RESID);

   ns_printf(pio, "Heap buffer error and other statistics:\n");
   ns_printf(pio, "Current total allocation %lu, high watermark %lu\n",heap_curr_mem_local,heap_curr_mem_hi_watermark_local);
   ns_printf(pio, "Successful allocations %lu, size netbuf %u, size PHLE %u\n",hb_local[HB_ALLOC_SUCC],sizeof(struct netbuf),sizeof(PHLE));
   ns_printf(pio, "Bad request size failures %lu, Max heap allocation exceeded failures %lu\n",hb_local[TOOBIG_ALLOC_ERR],hb_local[LIMIT_EXCEEDED_ERR]);
   ns_printf(pio, "netbuf allocation failures %lu, data buffer allocation failures %lu\n",hb_local[NB_ALLOCFAIL_ERR],hb_local[DB_ALLOCFAIL_ERR]);
   ns_printf(pio, "Inconsistent fields %lu, Guard band violations %lu\n",hb_local[INCONSISTENT_HBUF_LEN_ERR],hb_local[HB_GUARD_BAND_VIOLATED_ERR]);
#ifdef HEAPBUFS_DEBUG
   ns_printf(pio, "Private heapbuf list element allocation failures %lu, missing private heapbuf list element entry %lu\n",hb_local[PHLEB_ALLOCFAIL_ERR],hb_local[NO_PHLE_ERR]);
   
   LOCK_NET_RESOURCE(FREEQ_RESID);
   ENTER_CRIT_SECTION(&phlq);
   for (phlep=(PHLEP)phlq.q_head; phlep; phlep = phlep->next)
   {
      if (max_alloc < phlep->length) max_alloc = phlep->length;
      if (min_alloc > phlep->length) min_alloc = phlep->length;

      total_alloc += phlep->length;
      ++count; 
   }
   EXIT_CRIT_SECTION(&phlq);
   UNLOCK_NET_RESOURCE(FREEQ_RESID);

   if (count == 0)
      ns_printf (pio, "No heap buffers currently allocated\n");
   else
      ns_printf(pio, "Number of heap buffers currently allocated %lu, min length %lu, max length %lu, total allocation %lu\n", \
       count, min_alloc, max_alloc, total_alloc);
#endif /* HEAPBUFS_DEBUG */

   return 0;
}
Exemple #2
0
void *
getq(queue * q)
{
   q_elt   temp;        /* temp for result */


   ENTER_CRIT_SECTION();      /* shut off ints, save old state */   

   LOCKNET_CHECK(q);          /* make sure queue is protected */

   if ((temp = q->q_head) != (q_elt)NULL)    /* queue empty? */
   {
      q->q_head = temp->qe_next;       /* else unlink */
      temp->qe_next = (q_elt)NULL;     /* avoid dangling pointers */
      if (q->q_head == (q_elt)NULL)    /* queue empty? */
         q->q_tail = (q_elt)NULL;      /* yes, update tail pointer too */
      q->q_len--;                      /* update queue length */
      if (q->q_len < q->q_min)
         q->q_min = q->q_len;

      QUEUE_CHECK(q);            /* make sure queue is not corrupted */
   }

   EXIT_CRIT_SECTION();       /* restore caller's int state */

   return ((void*)temp);
}
Exemple #3
0
/* FUNCTION: tk_mutex_free()
 *
 * Delete a mutex and wakeup any pending tasks.
 *
 * PARAM1: IN_MUTEX *         ptr to mutexaphore
 *
 * RETURN: none
 */
void
tk_mutex_free(IN_MUTEX *mutex)
{
   if (mutex)
   {
      TASK *tk = mutex->tk_waitq;

      ENTER_CRIT_SECTION();

#ifdef DEBUG_TASK
      if (mutex->tk_tag != MUTEX_TAG)
         dtrap();
#endif

      while (tk)
      {
         TASK *tk_next = tk->tk_waitq;

         tk->tk_waitq = (TASK *)NULL;
         tk->tk_event = NULL;
         TK_RESUME(tk);
         tk = tk_next;
      }

      npfree(mutex);

      EXIT_CRIT_SECTION();
   }
}
Exemple #4
0
/* FUNCTION: tk_sem_free()
 *
 * Delete a semaphore and wakeup any pending tasks.
 *
 * PARAM1: IN_SEM *           ptr to semaphore
 *
 * RETURN: none
 */
void
tk_sem_free(IN_SEM *sem)
{
   if (sem)
   {
      TASK *tk = sem->tk_waitq;

#ifdef DEBUG_TASK
      if (sem->tk_tag != SEMA_TAG)
         dtrap();
#endif
      ENTER_CRIT_SECTION();

      while (tk)
      {
         TASK *tk_next = tk->tk_waitq;

         TK_RESUME(tk);
         tk = tk_next;
      }

      npfree(sem);

      EXIT_CRIT_SECTION();
   }
}
BOOL GetNextTcpInfo(HTCPINFO* p_pHTcp, T_TCPINFO* p_pTcpInfo, TcpState p_nTcpState)
{
	if(p_pHTcp == NULL)
		return FALSE;

	struct socket *   tmpso = ((struct socket *)(*p_pHTcp))->next;
	*p_pHTcp = NULL;

	LOCK_NET_RESOURCE(NET_RESID);
	ENTER_CRIT_SECTION();
	//locknet_check(&soq);  /* make sure queue is protected */

	for(;tmpso != NULL;tmpso=tmpso->next)
	{
		if((tmpso->so_domain == AF_INET6) && (tmpso->so_type==SOCK_STREAM)) //just ipv6 tcp connections
		{
			if(p_nTcpState == TCP_STATE_LISTEN)
			{
				if (((struct tcpcb *)tmpso->so_pcb->inp_ppcb)->t_state == TCPS_LISTEN)
				{
					strcpy(p_pTcpInfo->pcLocalAddr,FormatHex((void*)tmpso->so_pcb->ip6_laddr.addr,IPV6_ADDR_LEN,-1));
					p_pTcpInfo->shLocalPort = ntohs(tmpso->so_pcb->inp_lport);

					strcpy(p_pTcpInfo->pcForeignAddr,FormatHex((void*)tmpso->so_pcb->ip6_faddr.addr,IPV6_ADDR_LEN,-1));
					p_pTcpInfo->shForeignPort= ntohs(tmpso->so_pcb->inp_fport);

					*p_pHTcp = tmpso;
					break;
				}
			}

			if(p_nTcpState == TCP_STATE_ACTIVE)
			{
				if (((struct tcpcb *)tmpso->so_pcb->inp_ppcb)->t_state == TCPS_ESTABLISHED)
				{
					strcpy(p_pTcpInfo->pcLocalAddr,FormatHex((void*)tmpso->so_pcb->ip6_laddr.addr,IPV6_ADDR_LEN,-1));
					p_pTcpInfo->shLocalPort = ntohs(tmpso->so_pcb->inp_lport);

					strcpy(p_pTcpInfo->pcForeignAddr,FormatHex((void*)tmpso->so_pcb->ip6_faddr.addr,IPV6_ADDR_LEN,-1));
					p_pTcpInfo->shForeignPort= ntohs(tmpso->so_pcb->inp_fport);

					*p_pHTcp = tmpso;
					break;
				}
			}
		}
	}

	//queue_check(&soq);         /* make sure queue is not corrupted */
	EXIT_CRIT_SECTION();   /* restore int state */
	UNLOCK_NET_RESOURCE(NET_RESID);

   if(*p_pHTcp == NULL)
	   return FALSE;

	return TRUE;
}
Exemple #6
0
void
putq(
   queue   *   q,       /* the queue */
   void *   elt)        /* element to delete */
{
   ENTER_CRIT_SECTION();
   LOCKNET_CHECK(q);       /* make sure queue is protected */
   q_addt(q, (qp)elt);     /* use macro to do work */
   QUEUE_CHECK(q);         /* make sure queue is not corrupted */
   EXIT_CRIT_SECTION();   /* restore int state */
}
Exemple #7
0
/* FUNCTION: tk_sem_post()
 *
 * Increment a semaphore count. If the count is greater than 0
 * and a task is waiting for the semaphore, decrement the semaphore
 * count, remove the task from the wait queue, and mark the task "ready".
 * If the semaphore ptr is NULL, use the task's semaphore.
 *
 * PARAM1: TASK *             task (NULL == current task)
 * PARAM2: IN_SEM *           semaphore (NULL == task's semaphore)
 * PARAM3: int                completion code to return to caller
 *
 * RETURN: int                SUCCESS or error code
 */
int
tk_sem_post(TASK *task, IN_SEM *sem)
{
   int  err = SUCCESS;

   ENTER_CRIT_SECTION();

   if (task == (TASK *)NULL)
      task = tk_cur;          /* default to current task */
   if (sem == (IN_SEM *)NULL)
      sem = task->tk_semaphore;

   TK_VALIDATE(task);

   if (!sem)
   {
      dtrap();                /* no semaphore! */
   }
   else
   {
#ifdef DEBUG_TASK
      if (sem->tk_tag != SEMA_TAG)
         dtrap();
#endif

      if (++sem->tk_count > sem->tk_maxcnt)
         sem->tk_count = sem->tk_maxcnt;

      /* set one or more (probably one) tasks "ready" */
#ifdef TCP_DEBUG
      if (sem->tk_count > 1)
         dprintf("tcp_wake: tk_count = %d\n", sem->tk_count);
#endif

      while ((sem->tk_count > 0) && ((task = sem->tk_waitq) != (TASK *)NULL))
      {
         if ((sem->tk_waitq = task->tk_waitq) != (TASK *)NULL)
            sem->tk_count--;
         else
            sem->tk_count = 0;

         task->tk_waitq = (TASK *)NULL;
         task->tk_event = NULL;
         TK_RESUME(task);
      }
   }

   EXIT_CRIT_SECTION();

   return (err);
}
Exemple #8
0
qp
qdel(queue * q, void * elt)
{
   qp qptr;
   qp qlast;

   /* search queue for element passed */
   ENTER_CRIT_SECTION();
   qptr = q->q_head;
   qlast = NULL;
   while (qptr)
   {
      if (qptr == (qp)elt)
      {
         /* found our item; dequeue it */
         if (qlast)
            qlast->qe_next = qptr->qe_next;
         else     /* item was at head of queqe */
            q->q_head = qptr->qe_next;

         /* fix queue tail pointer if needed */
         if (q->q_tail == (qp)elt)
            q->q_tail = qlast;

         /* fix queue counters */
         q->q_len--;
         if (q->q_len < q->q_min)
            q->q_min = q->q_len;
         EXIT_CRIT_SECTION();   /* restore int state */
         return (qp)elt;   /* success exit point */
      }
      qlast = qptr;
      qptr = qptr->qe_next;
   }
   EXIT_CRIT_SECTION();   /* restore int state */
   return NULL;   /* item not found in queue */
}
Exemple #9
0
int dump_buf_estats (void * pio)
{
   u_long mlocal [MEMERR_NUM_STATS];

   LOCK_NET_RESOURCE(FREEQ_RESID);
   ENTER_CRIT_SECTION(&memestats);
   MEMCPY (&mlocal, &memestats, sizeof(memestats));
   EXIT_CRIT_SECTION(&memestats);
   UNLOCK_NET_RESOURCE(FREEQ_RESID);

   ns_printf(pio, "Regular buffer error statistics:\n");
   ns_printf(pio, "Bad buffer length %lu, Guard band violations %lu\n",mlocal[BAD_REGULAR_BUF_LEN_ERR],mlocal[GUARD_BAND_VIOLATED_ERR]);
   ns_printf(pio, "Multiple frees %lu, Inconsistent location %lu\n",mlocal[MULTIPLE_FREE_ERR],mlocal[INCONSISTENT_LOCATION_ERR]);

   return 0;
}
Exemple #10
0
void
in_delmulti(struct in_multi * inm)
{
   struct in_multi * p;
   NET         netp = inm->inm_netp;
   int error;

   ENTER_CRIT_SECTION(inm);
   if (--inm->inm_refcount == 0) 
   {
      /* Unlink from list.  */
      for (p = netp->mc_list; p; p = p->inm_next)
      {
         if(p == inm)   /* inm is first in mc_list */
         {
            netp->mc_list = p->inm_next;  /* unlink */
            break;
         }
         else if(p->inm_next == inm)   /* inm is next */
         {
            p->inm_next = inm->inm_next;  /* unlink */
            break;
         }
      }

      /*
       * If net has a multicast address registration routine then ask
       * the network driver to update its multicast reception
       * filter appropriately for the deleted address.
       */
      if(netp->n_mcastlist)
         error = netp->n_mcastlist(inm);
      else
         error = 0;
#if defined (IGMP_V2)
      /*
       * No remaining claims to this record; let IGMP know that
       * we are leaving the multicast group.
       */
      if (inm->inm_addr) igmp_leavegroup(inm);
#endif      

      IM_FREE(inm);
   }
   EXIT_CRIT_SECTION(inm);
}
Exemple #11
0
/* FUNCTION: tk_timeout()
 *
 * Mark the task as having timed-out, and make the task runnable.
 *
 * PARAM1: TASK *             task
 *
 * RETURN: int                SUCCESS or error code
 */
STATIC int
tk_timeout(TASK *task)
{
   int  err = SUCCESS;

   ENTER_CRIT_SECTION();

   /* make sure we are in a timed-wait */
   if (task && (task->tk_flags & TF_TIMER))
   {
      task->tk_flags |= TF_TIMEOUT;    /* mark the task as having timed-out */
      TK_RESUME(task);                 /* make the task runnable */
   }

   EXIT_CRIT_SECTION();

   return (err);
}
Exemple #12
0
/* FUNCTION: tk_mutex_post
 *
 * Free a currently held mutex.
 *
 * PARAM1: IN_MUTEX *         mutex
 *
 * RETURN: int                SUCCESS or error code
 */
int
tk_mutex_post(IN_MUTEX *mutex)
{
   int  err = SUCCESS;

   ENTER_CRIT_SECTION();

#ifdef DEBUG_TASK
   if (mutex && (mutex->tk_tag != MUTEX_TAG))
      dtrap();
#endif

   if ((mutex) && (mutex->tk_owner == tk_cur))
   {
      if (--mutex->tk_nesting <= 0)    /* done with mutex */
      {
         TASK *task = mutex->tk_waitq; /* next waiting task */

         /* is another task waiting for the mutex? */
         if ((mutex->tk_owner = task) != (TASK *)NULL)
         {
            mutex->tk_waitq = task->tk_waitq;
            mutex->tk_nesting = 1;
            task->tk_waitq = (TASK *)NULL;
            task->tk_event = NULL;
            TK_RESUME(task);
         }
      }

      EXIT_CRIT_SECTION();
   }
#ifdef DEBUG_TASK
   else
      dtrap();
#endif

   return (err);
}
Exemple #13
0
/* FUNCTION: tk_mutex_pend()
 *
 * Wait for the availability of a mutex.
 *
 * PARAM1: IN_MUTEX *         mutex
 * PARAM2: int32_t            > 0 == wait time (ticks)
 *                              0 == wait forever
 *                            < 0 == don't wait
 *
 * RETURN: int                SUCCESS or error code
 */
int
tk_mutex_pend(IN_MUTEX *mutex, int32_t timeout)
{
   int  err = SUCCESS;

   if (!mutex)
   {
      dtrap();
   }
   else
   {
      ENTER_CRIT_SECTION();

#ifdef DEBUG_TASK
      if (mutex->tk_tag != MUTEX_TAG)
         dtrap();
#endif

      if (mutex->tk_owner == (TASK *)NULL)   /* mutex is available */
      {
         mutex->tk_owner = tk_cur;
         mutex->tk_nesting = 1;
      }
      else if (mutex->tk_owner == tk_cur)    /* already own it */
      {
         mutex->tk_nesting++;
      }
      else if (timeout > 0)  /* wait until mutex is available */
      {
         /* append to the end of the wait queue */
         if (mutex->tk_waitq == (TASK *)NULL)
            mutex->tk_waitq = tk_cur;
         else
         {
            TASK *task = mutex->tk_waitq;

#ifdef DEBUG_TASK
            if ((task == tk_cur) || (tk_cur->tk_waitq != (TASK *)NULL))
               dtrap();
#endif
            while (task->tk_waitq)
            {
#ifdef DEBUG_TASK
               if (task == tk_cur)
                  dtrap();
#endif
               task = task->tk_waitq;
            }
            task->tk_waitq = tk_cur;
         } 

         tk_cur->tk_event = (void *)mutex;
         tk_cur->tk_waitq = (TASK *)NULL;
         tk_cur->tk_flags |= TF_MUTEX;
         if (timeout != INFINITE_DELAY)
         {
            tk_cur->tk_waketick = TIME_ADD(CTICKS, timeout);
            tk_cur->tk_flags |= TF_TIMER;
         }

         /* wait for something to happen */
         EXIT_CRIT_SECTION();
         TK_SUSPEND(TK_THIS);
         ENTER_CRIT_SECTION();

         /* clean up and continue */
         err = (tk_cur->tk_flags & TF_TIMEOUT) ? TK_TIMEOUT : 0;
         tk_cur->tk_flags &= ~(TF_MUTEX | TF_TIMER | TF_TIMEOUT);
         tk_cur->tk_waitq = (TASK *)NULL;
         tk_cur->tk_event = NULL;
         tk_cur->tk_waketick = 0;
      }
      else                    /* not availiable and not waiting */
      {
         err = TK_TIMEOUT;
      }

      EXIT_CRIT_SECTION();
   }

   return (err);
}
Exemple #14
0
int pk_validate(PACKET pkt)   /* check if pk_free() can free the pkt */
{
   PACKET   p;
#ifdef NPDEBUG
   int      j;
#endif

   /* If packet link is non-zero, then this packet is
    * part of a chain and deleted this packet would break
    * the chain and cause memory leak for subsequent pkts.
    * Note that heapbufs do not use the 'next' field at all.
    */
   if ((pkt->next) && (pkt->inuse >= 1))
   {
      INCR_SHARED_VAR (memestats, INCONSISTENT_LOCATION_ERR, 1);   
      return -1;
   }

#ifdef HEAPBUFS
   if (pkt->flags & PKF_HEAPBUF) /* check private heapbuf list queue */
   {
      return (pk_validate_heapbuf (pkt));
   }
   else  
#endif /* HEAPBUFS */
   {
      /* check if the packet is already in a freeq */
      if (pkt->nb_blen == bigbufsiz)  /* check in bigfreeq */
      {
         ENTER_CRIT_SECTION(&bigfreeq);
         for (p=(PACKET)bigfreeq.q_head; p; p = p->next)
            if (p == pkt)
            {
               dprintf("pk_free: buffer %p already in bigfreeq\n", pkt);
               EXIT_CRIT_SECTION(&bigfreeq);
               INCR_SHARED_VAR (memestats, MULTIPLE_FREE_ERR, 1);
               return -1;
            }
         EXIT_CRIT_SECTION(&bigfreeq);
      }
      else if (pkt->nb_blen == lilbufsiz)  /* check in lilfreeq */
      {
         ENTER_CRIT_SECTION(&lilfreeq);
         for (p=(PACKET)lilfreeq.q_head; p; p = p->next)
            if (p == pkt)
         {
            dprintf("pk_free: buffer %p already in lilfreeq\n", pkt);
            EXIT_CRIT_SECTION(&lilfreeq);
            INCR_SHARED_VAR (memestats, MULTIPLE_FREE_ERR, 1);
            return -1;
         }
         EXIT_CRIT_SECTION(&lilfreeq);
      }
      else
      {
         /* log an error */
         INCR_SHARED_VAR (memestats, BAD_REGULAR_BUF_LEN_ERR, 1);
         return -1;
      }
   }

#ifdef NPDEBUG
   /* check for corruption of memory markers (the guard bands are only
    * present when NPDEBUG is defined) */
   for (j = ALIGN_TYPE; j > 0; j--)
   {
      if (*(pkt->nb_buff - j) != 'M')
      {
         INCR_SHARED_VAR (memestats, GUARD_BAND_VIOLATED_ERR, 1);
         return -1;
      }
   }
   if (*(pkt->nb_buff + pkt->nb_blen) != 'M')
   {
      INCR_SHARED_VAR (memestats, GUARD_BAND_VIOLATED_ERR, 1);
      return -1;
   }
#endif /* NPDEBUG */

   return 0;
}
Exemple #15
0
PACKET pk_alloc_heapbuf (unsigned len)
{
   u_long increment;
   u_char limit_exceeded = PKTALLOC_FALSE;
   PACKET p;
   u_char num_guard_bytes;
#ifdef HEAPBUFS_DEBUG
   PHLEP phlep;
#endif
#ifdef NPDEBUG
   u_char i;
   char * bufp;
#endif

   /* check to see if the caller is requesting more than the maximum 
    * allowed individual allocation */
   if (len > MAX_INDIVIDUAL_HEAP_ALLOC)
   {
      INCR_SHARED_VAR (hbufstats, TOOBIG_ALLOC_ERR, 1);
      return(NULL);
   }

#ifdef NPDEBUG
   num_guard_bytes = ALIGN_TYPE + 1;
#else
   num_guard_bytes = 0;
#endif

   /* check to make sure that this allocation will not cause us to
    * exceed the maximum total allocation allowed from the heap.  First
    * compute the increment. */
   increment = sizeof (struct netbuf) + (len + num_guard_bytes);
#ifdef HEAPBUFS_DEBUG
   /* also account for the size of the debug structure if HEAPBUFS_DEBUG
    * is enabled */
   increment += sizeof (PHLE);
#endif
   ENTER_CRIT_SECTION(&heap_curr_mem);
   heap_curr_mem += increment;
   if (heap_curr_mem > MAX_TOTAL_HEAP_ALLOC)
   {
      limit_exceeded = PKTALLOC_TRUE;
   }
   EXIT_CRIT_SECTION(&heap_curr_mem);
   if (limit_exceeded)
   {
      INCR_SHARED_VAR (hbufstats, LIMIT_EXCEEDED_ERR, 1);
      DECR_SHARED_VAR (heap_curr_mem, increment);
      return(NULL);
   }

   if (heap_type == HEAP_ACCESS_BLOCKING) UNLOCK_NET_RESOURCE (FREEQ_RESID);
      
   /* attempt to allocate a buffer for struct netbuf from the heap */
   if ((p = ((struct netbuf *) HB_ALLOC (sizeof (struct netbuf)))) == 0)
   {
      /* restore state that existed prior to call into pk_alloc () */
      if (heap_type == HEAP_ACCESS_BLOCKING) LOCK_NET_RESOURCE (FREEQ_RESID);
      INCR_SHARED_VAR (hbufstats, NB_ALLOCFAIL_ERR, 1);
      DECR_SHARED_VAR (heap_curr_mem, increment);
      return(NULL);
   }
   /* attempt to allocate data buffer from heap */
   if ((p->nb_buff = HB_ALLOC (len + num_guard_bytes)) == 0)
   {
      HB_FREE (p);
      if (heap_type == HEAP_ACCESS_BLOCKING) LOCK_NET_RESOURCE (FREEQ_RESID);
      INCR_SHARED_VAR (hbufstats, DB_ALLOCFAIL_ERR, 1);
      DECR_SHARED_VAR (heap_curr_mem, increment);
      return(NULL);
   }
#ifdef HEAPBUFS_DEBUG
   /* obtain storage for private heapbuf list element to help keep track of the heapbuf allocation */
   if ((phlep = ((PHLEP) HB_ALLOC (sizeof(PHLE)))) == 0)
   {
      HB_FREE (p->nb_buff);
      HB_FREE (p);
      if (heap_type == HEAP_ACCESS_BLOCKING) LOCK_NET_RESOURCE (FREEQ_RESID);
      INCR_SHARED_VAR (hbufstats, PHLEB_ALLOCFAIL_ERR, 1);
      DECR_SHARED_VAR (heap_curr_mem, increment);
      return(NULL);
   }
   else
   {
      phlep->netbufp = p;
      phlep->databufp = p->nb_buff;
      phlep->length = len + num_guard_bytes;
   }
#endif

   p->next = 0;
   p->nb_tstamp = 0L;
   /* mark buffer as being from heap and not interrupt-safe */
   p->flags = (PKF_HEAPBUF | PKF_INTRUNSAFE);
#ifdef NPDEBUG
   /* Add memory markers at start and end of block (to help detect memory corruption) */
   bufp = p->nb_buff;
   for (i = 0; i < ALIGN_TYPE; i++)
       *(bufp + i) = 'M';
   *(bufp + len + ALIGN_TYPE) = 'M';
   p->nb_buff += ALIGN_TYPE;   /* increment buffer's start pointer past guard band */
#endif
   p->nb_blen = len;

   if (heap_type == HEAP_ACCESS_BLOCKING) LOCK_NET_RESOURCE (FREEQ_RESID);
#ifdef HEAPBUFS_DEBUG
   /* add element describing current allocation into the private heapbuf list.  This
    * manipulation is already protected via ENTER_CRIT_SECTION () and EXIT_CRIT_SECTION
    * macros. */
   q_add(&phlq, (qp)phlep);
#endif
   /* increment the count of successfull allocations */
   INCR_SHARED_VAR (hbufstats, HB_ALLOC_SUCC, 1);

   /* update the high watermark if appropriate */
   ENTER_CRIT_SECTION(&heap_curr_mem);
   if (heap_curr_mem > heap_curr_mem_hi_watermark)
   {
      heap_curr_mem_hi_watermark = heap_curr_mem;
   }
   EXIT_CRIT_SECTION(&heap_curr_mem);

   return p;
}
Exemple #16
0
struct in_multi * 
in_addmulti(ip_addr *ap, struct net *netp, int addrtype)
{
   struct in_multi *inm = (struct in_multi *)NULL;
   int error;

   /* check for good addr. */
   if ((ap == (ip_addr *)NULL) || (*ap == 0))
      return ((struct in_multi *)NULL);  

   ENTER_CRIT_SECTION(netp);

   /* See if address already in list. */
#ifdef IP_V6
   if(addrtype == 6)
      inm = v6_lookup_mcast((ip6_addr*)ap, netp);
#endif
#ifdef IP_V4
   if(addrtype != 6)
      inm = lookup_mcast(*ap, netp);
#endif

   if (inm != (struct in_multi *)NULL) 
   {
      /* Found it; just increment the reference count. */
      ++inm->inm_refcount;
   }
   else
   {
      /*
       * New address; allocate a new multicast record
       * and link it into the interface's multicast list.
       */
      inm = (struct in_multi *)INM_ALLOC(sizeof(*inm));

      if (inm == (struct in_multi *)NULL) 
      {
         EXIT_CRIT_SECTION(netp);
         return ((struct in_multi *)NULL);
      }
#ifdef IP_V6
      if(addrtype == 6)
         IP6CPY(&inm->ip6addr, (struct in6_addr *)ap);
#endif
#ifdef IP_V4
      if(addrtype != 6)
         inm->inm_addr = *ap;
#endif
      inm->inm_netp = netp;
      inm->inm_refcount = 1;
      inm->inm_next = netp->mc_list;
      netp->mc_list = inm;

      /*
       * If net has a multicast address registration routine then ask
       * the network driver to update its multicast reception
       * filter appropriately for the new address.
       */
      if(netp->n_mcastlist)
         error = netp->n_mcastlist(inm);
      else
         error = 0;
#if defined (IGMP_V1) || defined (IGMP_V2)
      /*
       * Let IGMP know that we have joined a new IP multicast group.
       */
      if (inm->inm_addr) igmp_joingroup(inm);
#endif      
   }

   EXIT_CRIT_SECTION(netp);
   USE_ARG(error);

   return (inm);
}
Exemple #17
0
int pk_validate_heapbuf (PACKET pkt)
{
#ifdef HEAPBUFS_DEBUG
   PHLEP phlep;
#endif
   u_char start_offset;
   u_char num_guard_bytes;
#ifdef NPDEBUG
   int j;
#endif

#ifdef NPDEBUG
   start_offset = ALIGN_TYPE;
   num_guard_bytes = ALIGN_TYPE + 1;
#else
   start_offset = num_guard_bytes = 0;
#endif

   /* check for consistency with the nb_blen field */
   if (pkt->nb_blen <= bigbufsiz)  
   {
      INCR_SHARED_VAR (hbufstats, INCONSISTENT_HBUF_LEN_ERR, 1);
      return -1;
   }

#ifdef HEAPBUFS_DEBUG
   ENTER_CRIT_SECTION(&phlq);
   for (phlep=(PHLEP)phlq.q_head; phlep; phlep = phlep->next)
   {
       if ((phlep->netbufp == pkt) && (phlep->databufp == (pkt->nb_buff - start_offset)))
       {
          /* found a matching entry; perform consistency check */
          if (phlep->length != (pkt->nb_blen + num_guard_bytes))
          {
             EXIT_CRIT_SECTION(&phlq);
             INCR_SHARED_VAR (hbufstats, INCONSISTENT_HBUF_LEN_ERR, 1);
             return -1;
          }
          else
             break;
       }
   }
   EXIT_CRIT_SECTION(&phlq);

   if (phlep == 0)
   {
      /* since we don't have a record of this allocation in the private heapbuf, 
       * list return an error */
      INCR_SHARED_VAR (hbufstats, NO_PHLE_ERR, 1);
      return -1;
   }
#endif /* HEAPBUFS_DEBUG */

#ifdef NPDEBUG
   /* check for corruption of memory markers (the guard bands are only
    * present when NPDEBUG is defined) */
   for (j = ALIGN_TYPE; j > 0; j--)
   {
      if (*(pkt->nb_buff - j) != 'M')
      {
         INCR_SHARED_VAR (hbufstats, HB_GUARD_BAND_VIOLATED_ERR, 1);
         return -1;
      }
   }
   if (*(pkt->nb_buff + pkt->nb_blen) != 'M')
   {
      INCR_SHARED_VAR (hbufstats, HB_GUARD_BAND_VIOLATED_ERR, 1);
      return -1;
   }
#endif /* NPDEBUG */

   /* packet has passed the validation checks */
   return 0;
}
Exemple #18
0
void pk_free_heapbuf (PACKET pkt)
{
   u_char start_offset;
   u_char num_guard_bytes;
   char * bufp;
#ifdef HEAPBUFS_DEBUG
   PHLEP phlep;
#endif
   u_long len;
   u_long decrement;

#ifdef NPDEBUG
   start_offset = ALIGN_TYPE;
   num_guard_bytes = ALIGN_TYPE + 1;
#else
   start_offset = num_guard_bytes = 0;
#endif

   bufp = pkt->nb_buff - start_offset;
   len = pkt->nb_blen + num_guard_bytes;
#ifdef HEAPBUFS_DEBUG
   /* update private heapbuf list - remove element */
   ENTER_CRIT_SECTION(&phlq);
   for (phlep=(PHLEP)phlq.q_head; phlep; phlep = phlep->next)
   { 
      if ((phlep->netbufp == pkt) && (phlep->databufp == bufp))
      {
         /* found a matching entry; remove it... */
         break;
      }
   }
   EXIT_CRIT_SECTION(&phlq);

   /* did we find the private heapbuf list entry corresponding to this allocation? */
   if (phlep == 0)
   {
      /* No; we were able to validate this PACKET earlier, 
       * but the corresponding PHL entry has now disappeared! */
      INCR_SHARED_VAR (hbufstats, NO_PHLE_ERR, 1);
      return;
   }
   else
   {
      /* delete PHLE entry from its queue.  This deletion is protected via 
       * ENTER_CRIT_SECTION () and EXIT_CRIT_SECTION () macros. */
      qdel(&phlq, phlep);
   }
#endif /* HEAPBUFS_DEBUG */

   if (heap_type == HEAP_ACCESS_BLOCKING) UNLOCK_NET_RESOURCE (FREEQ_RESID);
   
   HB_FREE (pkt);
   HB_FREE (bufp);
#ifdef HEAPBUFS_DEBUG
   HB_FREE (phlep);
#endif /* HEAPBUFS_DEBUG */
   
   if (heap_type == HEAP_ACCESS_BLOCKING) LOCK_NET_RESOURCE (FREEQ_RESID);
   /* decrement the global counter that is used to keep track of the total
    * heap allocation */
   decrement = sizeof (struct netbuf) + len;
#ifdef HEAPBUFS_DEBUG
   /* also account for the size of the debug structure if HEAPBUFS_DEBUG
    * is enabled */
   decrement += sizeof (PHLE);
#endif
   DECR_SHARED_VAR (heap_curr_mem, decrement);

   return;
}
Exemple #19
0
int
tcp_output(struct tcpcb * tp)
{
   struct socket *   so =  tp->t_inpcb->inp_socket;
   int   len;
   long  win;
   int   off,  flags,   error;
   struct mbuf *  m;
   struct tcpiphdr * ti;
   unsigned optlen = 0;
   int   idle, sendalot;
   struct mbuf *  sendm;   /* mbuf which contains data to send */
   struct mbuf * tcp_mbuf; /* mbuf containing TCP header */
   int   bufoff;           /* offset of data in sendm->m_data */

#ifdef TCP_SACK
   int   sack_resend;
   int   sack_hole = 0;    /* next sack hole to fill */

   if(tp->t_flags & TF_SACKREPLY)
   {
      /* we are resending based on a received SACK header */
      sack_resend = TRUE;
      tp->t_flags &= ~TF_SACKREPLY;    /* clear flag */
   }
   else
      sack_resend = FALSE;
#endif /* TCP_SACK */
   
   /*
    * Determine length of data that should be transmitted,
    * and flags that will be used.
    * If there is some data or critical controls (SYN, RST)
    * to send, then transmit; otherwise, investigate further.
    */
   idle = (tp->snd_max == tp->snd_una);

again:
   sendalot = 0;
   off = (int)(tp->snd_nxt - tp->snd_una);
   win = (long)tp->snd_wnd;   /* set basic send window */
   if (win > (long)tp->snd_cwnd) /* see if we need congestion control */
   {
      win = (int)(tp->snd_cwnd & ~(ALIGN_TYPE-1)); /* keep data aligned */
   }

   /*
    * If in persist timeout with window of 0, send 1 byte.
    * Otherwise, if window is small but nonzero
    * and timer expired, we will send what we can
    * and go to transmit state.
    */
   if (tp->t_force) 
   {
      if (win == 0)
         win = 1;
      else 
      {
         tp->t_timer[TCPT_PERSIST] = 0;
         tp->t_rxtshift = 0;
      }
   }

#ifdef TCP_SACK
   /* See if we need to adjust the offset for a sack resend */
   if(sack_resend)
   {
      off = (int)(tp->sack_hole_start[sack_hole] - tp->snd_una);
      /* if this hole's already been acked then punt and move to next hole */
      if(off < 0)
      {
         /* clear out the acked hole */
         tp->sack_hole_start[sack_hole] = tp->sack_hole_end[sack_hole] = 0;
         /* see if we're done with SACK hole list (2 tests) */
         if(++sack_hole >= SACK_BLOCKS)
            return 0;
         if(tp->sack_hole_start[sack_hole] == tp->sack_hole_end[sack_hole])
            return 0;
         goto again;
      }
      tp->snd_nxt = tp->sack_hole_start[sack_hole];
      len = (int)(tp->sack_hole_end[sack_hole] - tp->sack_hole_start[sack_hole]);
      len = (int)MIN(len, (int)win);
   }
   else
#endif /* TCP_SACK */
   {
      /* set length of packets which are not sack resends */
      len = (int)MIN(so->so_snd.sb_cc, (unsigned)win) - off;
   }

   flags = tcp_outflags[tp->t_state];


   /* See if we need to build TCP options field. This test should be fast. */

#if (defined(TCP_TIMESTAMP) | defined(TCP_SACK))	   
   if((flags & TH_SYN) ||
/*   !!!???   (so->so_options & SO_TIMESTAMP) ||  */
	  (tp->t_flags & TF_SACKNOW)
	 )
   {
      optlen = bld_options(tp, &tcp_optionbuf[optlen], flags, so);
   }
#else
   /* If other options not defined this build then don't bother to call bld_options() except 
    * on SYN packets
    */
   if(flags & TH_SYN)
   {
      optlen = bld_options(tp, &tcp_optionbuf[optlen], flags, so);
   }
#endif

   if (len < 0)
   {
      /*
       * If FIN has been sent but not acked,
       * but we haven't been called to retransmit,
       * len will be -1.  Otherwise, window shrank
       * after we sent into it.  If window shrank to 0,
       * cancel pending retransmit and pull snd_nxt
       * back to (closed) window.  We will enter persist
       * state below.  If the window didn't close completely,
       * just wait for an ACK.
       */
      len = 0;
      if (win == 0) 
      {
         tp->t_timer[TCPT_REXMT] = 0;
         tp->snd_nxt = tp->snd_una;
      }
   }

   if (len > (int)tp->t_maxseg)
   {
      len = tp->t_maxseg;
      sendalot = 1;
   }

#ifdef IP_V4
#ifdef IP_PMTU
   {
      int pmtu = tp->t_inpcb->inp_pmtu - 40;

      if (len > pmtu)
      {
         len = pmtu - 40;
         sendalot = 1;
      }
   }
#endif /* IP_PMTU */
   /* We don't need a pmtu test for IPv6. V6 code limits t_maxseg to
    * the Path MTU, so the test above the v4 ifdef above covers us.
    */
#endif /* IP_V4 */

   if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc))
      flags &= ~TH_FIN;
   win = (long)(sbspace(&so->so_rcv));

   /*
    * If our state indicates that FIN should be sent
    * and we have not yet done so, or we're retransmitting the FIN,
    * then we need to send.
    */
   if ((flags & TH_FIN) &&
       (so->so_snd.sb_cc == 0) &&
       ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una))
   {
      goto send;
   }
   /*
    * Send if we owe peer an ACK.
    */
   if (tp->t_flags & TF_ACKNOW)
      goto send;
   if (flags & (TH_SYN|TH_RST))
      goto send;
   if (SEQ_GT(tp->snd_up, tp->snd_una))
      goto send;

   /*
    * Sender silly window avoidance.  If connection is idle
    * and can send all data, a maximum segment,
    * at least a maximum default-size segment do it,
    * or are forced, do it; otherwise don't bother.
    * If peer's buffer is tiny, then send
    * when window is at least half open.
    * If retransmitting (possibly after persist timer forced us
    * to send into a small window), then must resend.
    */
   if (len)
   {
      if (len == (int)tp->t_maxseg)
         goto send;
      if ((idle || tp->t_flags & TF_NODELAY) &&
          len + off >= (int)so->so_snd.sb_cc)
      {
         goto send;
      }
      if (tp->t_force)
         goto send;
      if (len >= (int)(tp->max_sndwnd / 2))
         goto send;
      if (SEQ_LT(tp->snd_nxt, tp->snd_max))
         goto send;
   }

   /*
    * Compare available window to amount of window
    * known to peer (as advertised window less
    * next expected input).  If the difference is at least two
    * max size segments or at least 35% of the maximum possible
    * window, then want to send a window update to peer.
    */
   if (win > 0)
   {
      int   adv   =  (int)win -  (int)(tp->rcv_adv -  tp->rcv_nxt);

      if (so->so_rcv.sb_cc == 0 && adv >= (int)(tp->t_maxseg * 2))
         goto send;
      if (100 * (u_int)adv / so->so_rcv.sb_hiwat >= 35)
         goto send;
   }

   /*
    * TCP window updates are not reliable, rather a polling protocol
    * using ``persist'' packets is used to insure receipt of window
    * updates.  The three ``states'' for the output side are:
    *   idle         not doing retransmits or persists
    *   persisting      to move a small or zero window
    *   (re)transmitting   and thereby not persisting
    *
    * tp->t_timer[TCPT_PERSIST]
    *   is set when we are in persist state.
    * tp->t_force
    *   is set when we are called to send a persist packet.
    * tp->t_timer[TCPT_REXMT]
    *   is set when we are retransmitting
    * The output side is idle when both timers are zero.
    *
    * If send window is too small, there is data to transmit, and no
    * retransmit or persist is pending, then go to persist state.
    * If nothing happens soon, send when timer expires:
    * if window is nonzero, transmit what we can,
    * otherwise force out a byte.
    */
   if (so->so_snd.sb_cc && tp->t_timer[TCPT_REXMT] == 0 &&
       tp->t_timer[TCPT_PERSIST] == 0) 
   {
      tp->t_rxtshift = 0;
      tcp_setpersist(tp);
   }

   /*
    * No reason to send a segment, just return.
    */
   return (0);

send:
   ENTER_CRIT_SECTION(tp);

   /* Limit send length to the current buffer so as to
    * avoid doing the "mbuf shuffle" in m_copy().
    */
   bufoff = off;
   sendm = so->so_snd.sb_mb;
   if (len)
   {
      /* find mbuf containing data to send (at "off") */
      while (sendm)  /* loop through socket send list */
      {
         bufoff -= sendm->m_len;
         if (bufoff < 0)   /* if off is in this buffer, break */
            break;
         sendm = sendm->m_next;
      }
      if (!sendm) { dtrap();  /* shouldn't happen */ }
      bufoff += sendm->m_len; /* index to next data to send in msend */

      /* if socket has multiple unsent mbufs, set flag for send to loop */
      if ((sendm->m_next) && (len > (int)sendm->m_len))
      {
         flags &= ~TH_FIN; /* don't FIN on segment prior to last */
         sendalot = 1;     /* set to send more segments */
      }
      if((flags & TH_FIN) && (so->so_snd.sb_cc > (unsigned)len))
      {
         /* This can happen on slow links (PPP) which retry the last 
          * segment - the one with the FIN bit attached to data.
          */
         flags &= ~TH_FIN; /* don't FIN on segment prior to last */
      }

      /* only send the rest of msend */
      len = min(len, (int)sendm->m_len);

      /* if we're not sending starting at sendm->m_data (in which 
       * case bufoff != 0), then we will copy the data; else we would 
       * write IP/TCP headers over sent but un-ack'ed data in sendm. 
       * Similarly, if sendm->m_data is not aligned with respect to 
       * sendm->m_base and ALIGN_TYPE, we will copy the data to 
       * ensure that it (and the then-prepended IP/TCP headers) will 
       * be aligned according to ALIGN_TYPE. 
       */
      if ((bufoff != 0) ||       /* data not front aligned in send mbuf? */
          (((sendm->m_data - sendm->m_base) & (ALIGN_TYPE - 1)) != 0))
      {
         len = min(len, (int)(sendm->m_len - bufoff));   /* limit len again */

         /* One more test - if this data is not aligned with the front
          * of the m_data buffer then we can't use it in place, else we
          * might write the IP/TCP header over data that has not yet
          * been acked. In this case we must make sure our send
          * fits into a little buffer and send what we can.
          */
         if ((len > (int)(lilbufsiz - HDRSLEN)) && /* length is bigger the small buffer? */
             (bigfreeq.q_len < 2))      /* and we are low on big buffers */
         {
            len = lilbufsiz - HDRSLEN;
         }
      }
   }

   /* if send data is sufficiently aligned in packet, prepend TCP/IP header
    * in the space provided. 
    */
   if (len && (bufoff == 0) && 
       (sendm->pkt->inuse == 1) &&
       (((sendm->m_data - sendm->m_base) & (ALIGN_TYPE - 1)) == 0) && 
       (optlen == 0))
   {
      /* get an empty mbuf to "clone" the data */
      m = m_getnbuf(MT_TXDATA, 0);
      if (!m)
      {
         EXIT_CRIT_SECTION(tp);
         return (ENOBUFS);
      }
      m->pkt = sendm->pkt; /* copy packet location in new mbuf */
      m->pkt->inuse++;     /* bump packet's use count */
      m->m_base = sendm->m_base; /* clone mbuf members */
      m->m_memsz = sendm->m_memsz;
      m->m_len = len + TCPIPHDRSZ;  /* adjust clone for header */
      m->m_data = sendm->m_data - TCPIPHDRSZ;
   }
   else  /* either no data or data is not front aligned in mbuf */
   {
      /* Grab a header mbuf, attaching a copy of data to be 
       * transmitted, and initialize the header from 
       * the template for sends on this connection.
       */
      m = m_getwithdata (MT_HEADER, IFNETHDR_SIZE + TCPIPHDRSZ);
      if (m ==(struct mbuf *)NULL)
      {
         EXIT_CRIT_SECTION(tp);
         return ENOBUFS;
      }

      m->m_len = TCPIPHDRSZ;
      m->m_data += IFNETHDR_SIZE;/* Move this to sizeof tcpip hdr leave*/
      /* 14 bytes for ethernet header      */

      if (len) /* attach any data to send */
      {
         m->m_next = m_copy(so->so_snd.sb_mb, off, (int) len);
         if (m->m_next == 0)
         {
            m_freem(m);
            EXIT_CRIT_SECTION(tp);
            return ENOBUFS;
         }
      }
   }
   EXIT_CRIT_SECTION(tp);

   if (len) 
   {
      if (tp->t_force && len == 1)
         tcpstat.tcps_sndprobe++;
      else if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 
      {
         tcpstat.tcps_sndrexmitpack++;
         tcpstat.tcps_sndrexmitbyte += len;
#ifdef TCP_SACK
      if(sack_resend)
         tcpstat.tcps_sackresend++;
#endif
      } 
      else 
      {
         tcpstat.tcps_sndpack++;
         tcpstat.tcps_sndbyte += len;
      }
   }
   else if (tp->t_flags & TF_ACKNOW)
   {
      tcpstat.tcps_sndacks++;
   }
   else if (flags & (TH_SYN|TH_FIN|TH_RST))
      tcpstat.tcps_sndctrl++;
   else if (SEQ_GT(tp->snd_up, tp->snd_una))
      tcpstat.tcps_sndurg++;
   else
      tcpstat.tcps_sndwinup++;

   ti = (struct tcpiphdr *)(m->m_data+sizeof(struct ip)-sizeof(struct ipovly));
   if ((char *)ti < m->pkt->nb_buff)
   {
      panic("tcp_out- packet ptr underflow\n");
   }
   tcp_mbuf = m;        /* flag TCP header mbuf */

#ifdef IP_V6  /* Dual mode code */
   if(so->so_domain == AF_INET6)
   {
      m = mbuf_prepend(m, sizeof(struct ipv6));
      if(m == NULL)
      {
         /* this can happen when we run out of mbufs or pkt buffers
          * That is, mfreeq is empty or (lilfreeq, bigfreeq) are empty.
          * One solution is to find out which one is getting full and
          * then increase them.
          */
         dtrap();             /* This is really rare... */
         m_freem(tcp_mbuf);   /* Free TCP/data chain */
         return ENOBUFS;
      }

      /* strip overlay from front of TCP header */
      tcp_mbuf->m_data += sizeof(struct ipovly);
      tcp_mbuf->m_len -= sizeof(struct ipovly);
   }
#endif   /* end IP_V6 */

   if (tp->t_template == 0)
      panic("tcp_output");

   MEMCPY((char*)ti, (char*)tp->t_template, sizeof(struct tcpiphdr));

   /*
    * Fill in fields, remembering maximum advertised
    * window for use in delaying messages about window sizes.
    * If resending a FIN, be sure not to use a new sequence number.
    */
   if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 
       tp->snd_nxt == tp->snd_max)
   {
      tp->snd_nxt--;
   }

   ti->ti_seq = htonl(tp->snd_nxt);
   ti->ti_ack = htonl(tp->rcv_nxt);

   /*
    * If we're sending a SYN, check the IP address of the interface
    * that we will (likely) use to send the IP datagram -- if it's
    * changed from what is in the template (as it might if this is
    * a retransmission, and the original SYN caused PPP to start
    * bringing the interface up, and PPP has got a new IP address
    * via IPCP), update the template and the inpcb with the new 
    * address.
    */
   if (flags & TH_SYN)
   {
      struct inpcb * inp;
      inp = (struct inpcb *)so->so_pcb;

      switch(so->so_domain)
      {
#ifdef IP_V4
      case AF_INET:
      {
         ip_addr src;

#ifdef INCLUDE_PPP

         if(((flags & TH_ACK) == 0) && /* SYN only, not SYN/ACK */
            (inp->ifp) &&              /* Make sure we have iface */
            (inp->ifp->mib.ifType == PPP))   /* only PPP type */
         {
            dtrap(); /* remove after confirmed to work in PPP */ 
            src = ip_mymach(ti->ti_dst.s_addr);

         if (src != ti->ti_src.s_addr)
         {
            ti->ti_src.s_addr = src;
            tp->t_template->ti_src.s_addr = src;
            tp->t_inpcb->inp_laddr.s_addr = src;
         }
         }
#endif   /* INCLUDE_PPP */

         /* If this is a SYN (not a SYN/ACK) then set the pmtu */
         if((flags & TH_ACK) == 0)
         {
#ifdef IP_PMTU
            inp->inp_pmtu = pmtucache_get(inp->inp_faddr.s_addr);
#else    /* not compiled for pathmtu, guess based on iface */
            {
               NET ifp;
               /* find iface for route. Pass "src" as nexthop return */
               ifp = iproute(ti->ti_dst.s_addr, &src);
               if(ifp)
                  inp->inp_pmtu = ifp->n_mtu - (ifp->n_lnh + 40);
               else
                  inp->inp_pmtu = 580;  /* Ugh. */
            }
#endif   /* IP_PMTU */
         }
         break;
      }
#endif   /* IP_V4 */

#ifdef IP_V6
      case AF_INET6:
      {
         struct ip6_inaddr * local;
         
         local = ip6_myaddr(&tp->t_inpcb->ip6_faddr, inp->ifp);

         /* If we got a local address & it's not the one in the pcb, then
          * we assume it changed at the iface and fix it in the pcb. Unlike 
          * v4, we don't have an IP header yet, not do we have a template 
          * to worry about.
          */
         if((local) && 
            (!IP6EQ(&local->addr, &tp->t_inpcb->ip6_laddr)))
         {
            IP6CPY(&tp->t_inpcb->ip6_laddr, &local->addr);
         }
         /* If this is a SYN (not a SYN/ACK) then set the pmtu */
         if((flags & TH_ACK) == 0)
         {
            inp->inp_pmtu = ip6_pmtulookup(&inp->ip6_laddr, inp->ifp);
         }
         break;
      }
#endif   /* IP_V6 */
      default:
         dtrap();    /* bad domain setting */
      }
   }

   /* fill in options if any are set */
   if (optlen)
   {
      struct mbuf * mopt;

      mopt = m_getwithdata(MT_TXDATA, MAXOPTLEN);
      if (mopt == NULL) 
      {
         m_freem(m);
         return (ENOBUFS);
      }

      /* insert options mbuf after after tmp_mbuf */
      mopt->m_next = tcp_mbuf->m_next;
      tcp_mbuf->m_next = mopt;

      /* extend options to aligned address */
      while(optlen & 0x03)
         tcp_optionbuf[optlen++] = TCPOPT_EOL;

      MEMCPY(mtod(mopt, char *), tcp_optionbuf, optlen);
      mopt->m_len = optlen;
      /* use portable macro to set tcp data offset bits */
      SET_TH_OFF(ti->ti_t, ((sizeof (struct tcphdr) + optlen) >> 2));
   }

   ti->ti_flags = (u_char)flags;
   /*
    * Calculate receive window. Don't shrink window,
    * but avoid silly window syndrome.
    */
   if (win < (long)(so->so_rcv.sb_hiwat / 4) && win < (long)tp->t_maxseg)
      win = 0;
   if (win < (long)(tp->rcv_adv - tp->rcv_nxt))
      win = (long)(tp->rcv_adv - tp->rcv_nxt);

   /* do check for Iniche buffer limits -JB- */
   if (bigfreeq.q_len == 0)   /* If queue length is 0, set window to 0 */
   {
      win = 0;
   }
   else if(win > (((long)bigfreeq.q_len - 1) * (long)bigbufsiz))
   {
      win = ((long)bigfreeq.q_len - 1) * bigbufsiz;
   }

#ifdef TCP_WIN_SCALE
   if(tp->t_flags & TF_WINSCALE)
   {
      ti->ti_win = htons((u_short)(win >> tp->rcv_wind_scale)); /* apply scale */
   }
Exemple #20
0
/* FUNCTION: tk_sem_pend()
 *
 * If the semaphore count is greater than 0, decrement the semaphore
 * count and continue. Otherwise, add the task to the end of the 
 * semaphore wait queue, and mark the task "waiting".
 *
 * PARAM1: TASK *             task (NULL == current task)
 * PARAM1: IN_SEM *           semaphore (NULL == task's semaphore)
 * PARAM3: int32_t            > 0 == wait time (ticks)
 *                              0 == wait forever
 *                            < 0 == don't wait
 *
 * RETURN: int                SUCCESS or error code
 */
int
tk_sem_pend(TASK *task, IN_SEM *sem, int32_t timeout)
{
   int  err = SUCCESS;

   ENTER_CRIT_SECTION();

   if (task == (TASK *)NULL)
      task = tk_cur;          /* default to current task */
   if (sem == (IN_SEM *)NULL)
      sem = task->tk_semaphore;

   TK_VALIDATE(task);

   if (!sem)
   {
      dtrap();                /* no semaphore! */
   }
   else
   {
#ifdef DEBUG_TASK
      if ((sem->tk_tag != SEMA_TAG) || (task->tk_waitq))
         dtrap();
#endif
      if (sem->tk_count > 0)     /* semaphore is available */
      {
         sem->tk_count--;
      }
      else if (timeout > 0)      /* wait until semaphore is available */
      {
         TASK *tk;

         /* append task onto the end of the wait queue */
         if ((tk = sem->tk_waitq) == (TASK *)NULL)
         {
            sem->tk_waitq = task;
         }
         else
         {
#ifdef DEBUG_TASK
            if (tk == task)
               dtrap();
#endif
            while (tk->tk_waitq)
            {
#ifdef DEBUG_TASK
               if (tk == task)
                  dtrap();
#endif
               tk = task->tk_waitq;
            }
            tk->tk_waitq = task;
         } 

         task->tk_event = (void *)sem;
         task->tk_waitq = (TASK *)NULL;
         task->tk_flags |= TF_SEMAPHORE;
         if (timeout != INFINITE_DELAY)
         {
            task->tk_waketick = TIME_ADD(CTICKS, timeout);
            task->tk_flags |= TF_TIMER;
         }

         /* wait for something to happen */
         EXIT_CRIT_SECTION();
         TK_SUSPEND(TK_THIS);
         ENTER_CRIT_SECTION();

         /* clean up and continue */
         err = (task->tk_flags & TF_TIMEOUT) ? TK_TIMEOUT : 0;
         task->tk_flags &= ~(TF_SEMAPHORE | TF_TIMER | TF_TIMEOUT);
         task->tk_waitq = (TASK *)NULL;
         task->tk_event = NULL;
         task->tk_waketick = 0;
      }
      else           /* not availiable and not waiting */
      {
         err = TK_TIMEOUT;
      }
   }

   EXIT_CRIT_SECTION();

   return (err);
}