Ejemplo n.º 1
0
IASolverInt::IASolverInt(const IAData * ia_data_ptr, IASolution *relaxed_solution_ptr, 
  const bool set_silent) 
  : IASolverToolInt(ia_data_ptr, relaxed_solution_ptr, true), 
  silent(false),
//  silent(set_silent), 
  debugging(true)
{ 
  ip_data(new IPData);
  // initialize copies relaxed solution, then we can overwrite relaxed_solution_pointer
  ip_data()->initialize(relaxed_solution_ptr->x_solution); 
}
Ejemplo n.º 2
0
int
tcp_rcv(PACKET pkt)     /* NOTE: pkt has nb_prot pointing to IP header */
{
   struct mbuf *  m_in;
   struct ip * bip;  /* IP header, berkeley version */
   struct tcphdr *   tcpp;
   unshort  len;  /* scratch length holder */

   /* For TCP, the netport IP layer is modified to set nb_prot to the 
    * start of the IP header (not TCP). We need to do some further
    * mods which the BSD code expects:
    */
   bip = (struct ip *)pkt->nb_prot;    /* get ip header */
   len = ntohs(bip->ip_len);  /* get length in local endian */

   /* verify checksum of received packet */

   tcpp = (struct tcphdr *)ip_data(bip);
   if (tcp_cksum(bip) != tcpp->th_sum)
   {
      TCP_MIB_INC(tcpInErrs);    /* keep MIB stats */
      tcpstat.tcps_rcvbadsum++;  /* keep BSD stats */
      LOCK_NET_RESOURCE(FREEQ_RESID);
      pk_free(pkt);  /* punt packet */
      UNLOCK_NET_RESOURCE(FREEQ_RESID);
      return ENP_BAD_HEADER;
   }

   m_in = m_getnbuf(MT_RXDATA, 0);
   if (!m_in){
      LOCK_NET_RESOURCE(FREEQ_RESID);
      pk_free(pkt);
      UNLOCK_NET_RESOURCE(FREEQ_RESID);
      return ENP_RESOURCE;  
   }

   IN_PROFILER(PF_TCP, PF_ENTRY);      /* measure time in TCP */

   /* subtract IP header length from total IP packet length */
   len -= ((unshort)(bip->ip_ver_ihl & 0x0f) << 2);
   bip->ip_len = len;   /* put TCP length in struct for TCP code to use */

   /* set mbuf to point to start of IP header (not TCP) */
   m_in->pkt = pkt;
   m_in->m_data = pkt->nb_prot;
   m_in->m_len = pkt->nb_plen;
   m_in->m_base = pkt->nb_buff;     /* ??? */
   m_in->m_memsz = pkt->nb_blen;    /* ??? */

   tcp_input(m_in, pkt->net);

   IN_PROFILER(PF_TCP, PF_EXIT);      /* measure time in TCP */

   return 0;
}
Ejemplo n.º 3
0
int igmpv2_input (PACKET p)
{
   struct igmp * igmp;
   struct ip * pip;
   int igmplen;
   u_char type;
   int rc;

   pip = ip_head (p);    
   /* compute length of IGMP packet (after accounting for IP header, 
    * including the IP Router Alert option (if present)) */   
   igmplen = p->nb_plen - ip_hlen (pip);
   igmp = (struct igmp *) (ip_data (pip));   
   /* extract the IGMP packet type from received packet */
   type = igmp->igmp_type;

   switch (type) 
   {
      case IGMP_HOST_MEMBERSHIP_QUERY:
         rc = igmpv2_process_query (p);
         break;

      case IGMP_HOST_MEMBERSHIP_REPORT:
      case IGMPv2_MEMBERSHIP_REPORT:
         rc = igmpv2_process_report (p);
         break;
         
      case IGMPv2_LEAVE_GROUP:
         /* Leave messages are typically addressed to the all-routers 
          * multicast address group (224.0.0.2).  We do not implement 
          * multicast router functionality, and therefore, do not 
          * expect to receive such messages.  However, according to
          * RFC 2236, some implementations of an older version of the 
          * IGMPv2 specification send leave messages to the group 
          * being left.  If we do receive such a message, we will 
          * drop it. */       
         ++igmpstats.igmpv2mode_v2_leave_msgs_rcvd;
         rc = IGMP_OK;
         break;               

      default:     
         ++igmpstats.igmpv2mode_unknown_pkttype;
         rc = IGMP_ERR;         
         break;
   } /* end SWITCH */

   /* we're done processing the received packet; return packet buffer 
    * back to free pool */
   LOCK_NET_RESOURCE(FREEQ_RESID);
   pk_free(p);
   UNLOCK_NET_RESOURCE(FREEQ_RESID);
   
   return rc;
}
Ejemplo n.º 4
0
int igmp_validate (PACKET p)
{
    struct ip * pip;
    int igmplen;
    struct igmp * igmp;
    u_short osum;
    u_short xsum;
    u_char type;
    ip_addr mcgrp_addr;
    u_char resp_time;

    pip = ip_head (p);

    /* compute length of IGMP packet (after accounting for IP header,
     * including the IP Router Alert option (if present)) */
    igmplen = p->nb_plen - ip_hlen (pip);

    /* validate length (IGMP_MINLEN is 8 bytes) */
    if (igmplen != IGMP_MINLEN)
    {
        ++igmpstats.igmp_badlen_rcvd;
        return ENP_BAD_HEADER;
    }

    /* validate checksum */
    igmp = (struct igmp *) (ip_data (pip));
    osum = igmp->igmp_cksum;
    igmp->igmp_cksum = 0;
    xsum = ~cksum(igmp, igmplen>>1);
    if (xsum != osum)
    {
        igmp->igmp_cksum = osum;
        ++igmpstats.igmp_badsum_rcvd;
        return ENP_BAD_HEADER;
    }

    /* extract the IGMP packet type, Group Address, and Max Response Time
     * (unused for IGMPv1) fields from received packet */
    type = igmp->igmp_type;
    mcgrp_addr = ntohl(igmp->igmp_group);
    resp_time = igmp->igmp_code;

    if (type == IGMP_HOST_MEMBERSHIP_QUERY)
    {
        if ((resp_time == 0) || /* IGMPv1 Query */
                ((resp_time > 0) && (mcgrp_addr == 0))) /* IGMPv2 General Query */
        {
            /* if this is a IGMPv1 Host Membership Query or a IGMPv2
             * General Query, it must be addressed to the all-hosts
             * group */
            if (pip->ip_dest != igmp_all_hosts_group)
            {
                ++igmpstats.igmp_bad_queries_rcvd;
                return ENP_BAD_HEADER;
            }
        }

        if ((resp_time > 0) && (mcgrp_addr != 0))
        {
            /* this is a IGMPv2 Group-Specific Query. */
            if (p->net->igmp_oper_mode == IGMP_MODE_V1)
            {
                /* IGMPv1 code does not understand a IGMPv2 Group-
                 * Specific Query */
                return ENP_BAD_HEADER;
            }

            /* check to make sure that the group address field carries
             * a valid multicast address; if it doesn't, we
             * drop the packet.  Also drop packets that
             * carry the multicast address for the all-hosts
             * group. */
            if ((!IN_MULTICAST(mcgrp_addr)) ||
                    /* igmp_all_hosts_group is already in network byte order */
                    (igmp->igmp_group == igmp_all_hosts_group))
            {
                ++igmpstats.igmpv2mode_v2_bad_grp_specific_queries_rcvd;
                /* caller will free received packet */
                return ENP_BAD_HEADER;
            }
        }
    }

    /* check to ensure that a received IGMPv1 or v2 Report has the
     * same IP host group address in its IP destination field and
     * its IGMP group address field, and that the group address is
     * a valid multicast address */
    if ((type == IGMP_HOST_MEMBERSHIP_REPORT) ||
            (type == IGMPv2_MEMBERSHIP_REPORT))
    {
        if ((igmp->igmp_group != pip->ip_dest) ||
                (!IN_MULTICAST(mcgrp_addr)))
        {
            ++igmpstats.igmp_bad_reports_rcvd;
            return ENP_BAD_HEADER;
        }
    }

    /*
     * After receiving an IGMP packet, our IGMP module will check for the
     * presence of the Router Alert option in the following types of
     * packets.  Packets that do not have the option will be discarded.

     * (a) Version 2 General Query (0x11) (differentiated from a Version 1
           Host Membership Query by having a Max Response Time > 0)
     * (b) Version 2 Group-Specific Query (0x11)
     * (c) Version 2 Membership Report (0x16)
     * (d) Version 2 Leave Group (0x17)

     * Version 1 Host Membership Reports and Version 1 Host Membership Query
     * packets will not be checked for the IP Router Alert option.
     */
#ifdef IGMP_V2
    if ((type == IGMPv2_LEAVE_GROUP) ||
            (type == IGMPv2_MEMBERSHIP_REPORT) ||
            ((type == IGMP_HOST_MEMBERSHIP_QUERY) && (igmp->igmp_code > 0)))

    {
        if (!igmpv2_chk4_rtr_alert_opt (pip))
        {
            ++igmpstats.igmpv2mode_v2_rtr_alert_missing;
            return ENP_BAD_HEADER;
        }
    }
#endif

    /* validation successful */
    return IGMP_OK;
}
Ejemplo n.º 5
0
int igmpv2_process_query (PACKET p)
{
   struct igmp * igmp;
   struct ip * pip;
   NET netp;
   u_short max_resp_time;
   u_char process_all;
   struct in_multi * inm;
   ip_addr mcgrp_addr;

   netp = p->net;
   pip = ip_head (p);
   igmp = (struct igmp *) (ip_data (pip));
   mcgrp_addr = ntohl(igmp->igmp_group);

   if (igmp->igmp_code == 0)
   {
      /* this is a IGMPv1 Host Membership Query */
      netp->igmpv1_rtr_present = IGMP_TRUE;
      netp->igmpv1_query_rcvd_time = cticks;      
      ++igmpstats.igmpv2mode_v1_queries_rcvd;
      /* set maximum time to respond to the equivalent of 10 
       * seconds worth of "ticks" (the timeout routine is
       * intended to be invoked PR_FASTHZ (5) times a second,
       * so each tick is equal to 200 ms) */
      max_resp_time = IGMP_MAX_HOST_REPORT_DELAY * PR_FASTHZ;
      process_all = IGMP_TRUE;
   }
   else
   {
      /* this is either a IGMPv2 General Query or 
       * a IGMPv2 Group-Specific Query */
      if (igmp->igmp_group == 0)
      {
         /* this is a IGMPv2 General Query */
         ++igmpstats.igmpv2mode_v2_general_queries_rcvd;
         process_all = IGMP_TRUE;
      }
      else
      {
         /* this is a IGMPv2 Group-Specific Query */       
         ++igmpstats.igmpv2mode_v2_grp_specific_queries_rcvd;
         process_all = IGMP_FALSE;
      }
      
      /* irrespective of whether received message is a 
       * IGMPv2 General Query or a IGMPv2 Group-Specific Query,
       * set maximum time to respond to value extracted 
       * from received message. The value in the message
       * is in tenths of a second.  max_resp_time is in
       * units of ticks (where one tick is 200 ms) */
      max_resp_time = (igmp->igmp_code * PR_FASTHZ) / 10;
   }
   
   /* process all entries in a link's multicast address linked
    * list (pointed to by mc_list) as part of the response to
    * the received IGMPv1 Host Membership Query or IGMPv2 General
    * Query message */
   if (process_all)
   {
      for (inm = netp->mc_list; inm; inm = inm->inm_next)
      {
         /* skip all IPv6 entries - they are indicated by 
          * an IPv4 address field of 0 */
         if (!(inm->inm_addr)) continue;
         /* skip IPv4 multicast address of 224.0.0.1 (note that
          * the IPv4 address stored in inm_addr is in network 
          * byte order */
         if (inm->inm_addr != igmp_all_hosts_group)
            igmpv2_chk_set_timer (inm, max_resp_time);
      } /* end FOR (iterate thru' mc_list on net) */
   } /* end IF (process all) */
   else
   {
      /* process one (for IGMPv2 Group-Specific Query) entry (the 
       * one that corresponds to the address listed in the received 
       * query) - it should be present in the link's multicast
       * address list */
      inm = lookup_mcast(igmp->igmp_group, netp);
      if (inm != NULL)
         igmpv2_chk_set_timer (inm, max_resp_time);
      else ++igmpstats.igmpv2mode_v2_unknown_grp_specific_queries_rcvd;
   } /* end ELSE (process ALL) */
   
   /* return success; caller will the received packet back to the 
    * free pool */
   return IGMP_OK;
}
Ejemplo n.º 6
0
int igmpv2_process_report (PACKET p)
{
   struct igmp * igmp;
   struct ip * pip;
   NET netp;
   struct in_multi * inm;

   netp = p->net;
   pip = ip_head (p);
   igmp = (struct igmp *) (ip_data (pip));   
   
   /* If we receive a IGMP (v1 or v2) report for a multicast group 
    * that we have a timer running for, the IGMPv2 specification 
    * requires that we stop the timer (and thereby cancel the 
    * future transmission of our report).
    
    * However, we will use the following table to guide our actions
    * instead.  Scenario #4 causes us to not cancel the timer, 
    * since we have received a IGMPv2 report, but we believe that 
    * the querier on the network is running IGMPv1, and therefore 
    * will not be able to understand the IGMPv2 report.  As a 
    * result, we let our timer run, and if it expires, we will 
    * send out a report.

    * The type of a received report can be determined by examining 
    * the first byte of the IGMP message.  This byte is 0x12 for a 
    * IGMPv1 Host Membership Report, and 0x16 for a Version 2 
    * Membership Report.
    
    * Scenario# igmpv1_rtr_present Type of rcvd report Cancel timer
    * =============================================================
    * 1         No                 IGMPv1              Yes 
    * 2         No                 IGMPv2              Yes
    * 3         Yes                IGMPv1              Yes
    * 4         Yes                IGMPv2              No

    * In scenario #1 and #2, we have a IGMPv2-capable router that 
    * can understand both IGMPv1 and IGMPv2 reports.  In scenario 
    * #3, we have a IGMPv1-capable router that can understand IGMPv1 
    * reports. In scenario #4, we have a IGMPv1-capable router that
    * cannot understand a IGMPv2 report.  It is possible that the 
    * IGMPv1-capable router in scenario #4 is also capable of 
    * processing IGMPv2 packets (it has "downgraded" itself because
    * there are IGMPv1 routers on that network); however, we do not
    * know that, and hence we don't cancel our timer (for the 
    * subsequent transmission of a IGMPv1 report).
    */
   inm = lookup_mcast(igmp->igmp_group, netp);
   if (inm != NULL) 
   {
      if (inm->inm_timer != 0)
      {
         /* we have a timer running */
         if (!(netp->igmpv1_rtr_present && 
             igmp->igmp_type == IGMPv2_MEMBERSHIP_REPORT))
         {
            /* cancel timer */
            inm->inm_timer = 0;
            /* decrement the count of running timers */
            --igmp_timers_are_running;
            /* indicate that we are not the last host to send a 
             * report for this group */
            inm->last2send_report = IGMP_FALSE;
            ++igmpstats.igmpv2mode_v12_reports_rcvd_canceled_timer;
         }
      }
      else
      {
         /* we don't have a timer running; perhaps the source
          * host has just joined the group, and has sent an
          * unsolicited report */
         ++igmpstats.igmpv2mode_v12_reports_rcvd_no_timer;   
      }
   }
   else 
   {
      /* since the Reports are sent the group address, we should
       * never receive them unless we are a member of that group
       * on that interface.  Even if imperfect filtering at the 
       * device level causes reports for unregistered groups to 
       * be passed up to the IP module, ip_rcv_phase2 () is 
       * responsible for dropping them, and so we should never
       * receive such packets. */
      ++igmpstats.igmpv2mode_v12_unknown_grp_reports_rcvd;
   }
   
   return IGMP_OK;   
}
Ejemplo n.º 7
0
int
ip_output(struct mbuf * data, struct   ip_socopts * so_optsPack) /* mbuf chain with data to send */
{
   struct ip * bip;
   struct tcphdr *   tcpp;
   PACKET pkt;
   struct mbuf *  m1, * m2, * mtmp; /* temp mbuf pointers */
   int   e;    /* error holder */
   int   total;

   /* reassemble mbufs into a contiguous packet. Do this with as 
    * little copying as possible. Typically the mbufs will be either 
    * 1) a single mbuf with iptcp header info only (e.g.tcp ACK 
    * packet), or 2) iptcp header with data mbuf chained to it, or 3) 
    * #2) with a tiny option data mbuf between header and data. 
    */
   if ((data->m_next))
   {
      m1 = data;
      m2 = data->m_next;

      /* If m2 is small (e.g. options), copy it to m1 and free it */
      while (m2 && (m2->m_len < 10))
      {
         pkt = m1->pkt;
         if ((pkt->nb_buff + pkt->nb_blen) > /* make sure m2 will fit in m1 */
             (m1->m_data + m1->m_len + m2->m_len))
         {
            MEMCPY((m1->m_data + m1->m_len), m2->m_data, m2->m_len);
            m1->m_len += m2->m_len;
            m1->m_next = m2->m_next;
            m_free(m2);    /* free this m2.... */
            m2 = m1->m_next;  /* ...and thread the next one */
            tcpstat.tcps_oappends++;
         }
         else     /* if won't fit, fall to next copy */
            break;
      }

      while (m2)  /* If we still have two or more buffers, more copying: */
      {
         /* try prepending m1 to m2, first see if it fits: */
         e = m2->m_data - m2->pkt->nb_buff;  /* e is prepend space */
         if (e < MaxLnh)
         { 
#ifdef NPDEBUG
            dprintf("nptcp: MaxLnh:%d, e:%d\n", MaxLnh, e);
#endif
            panic("tcp_out:mbuf-nbuf");   /* sanity check */
         }

         if ((m1->m_len < (unsigned)(e - MaxLnh))  /* leave room for MAC */
             && ((m1->m_len & (ALIGN_TYPE - 1)) == 0)  /* and stay aligned */
             && ((m2->m_data - m2->pkt->nb_buff) == HDRSLEN))   /* be at start */
         {
            MEMCPY((m2->m_data - m1->m_len), m1->m_data, m1->m_len);
            m2->m_data -= m1->m_len;   /* fix target to reflect prepend */
            m2->m_len += m1->m_len;
            m_free(m1);    /* free head (copied) mbuf */
            data = m1 = m2;   /* move other mbufs up the chain */
            m2 = m2->m_next;  /* loop to while(m2) test */
            tcpstat.tcps_oprepends++;
         }
         else     /* if won't fit, fall to next copy */
            break;
      }

      if (m2)  /* If all else fails, brute force copy: */
      {
         total = 0;
         for (mtmp = m1; mtmp; mtmp = mtmp->m_next)
            total += mtmp->m_len;
         LOCK_NET_RESOURCE(FREEQ_RESID);
         pkt = pk_alloc(total + HDRSLEN);
         UNLOCK_NET_RESOURCE(FREEQ_RESID);
         if (!pkt)
            return ENOBUFS;
         pkt->nb_prot = pkt->nb_buff + MaxLnh;

         mtmp = m1;
         while (mtmp)
         {
            MEMCPY(pkt->nb_prot, mtmp->m_data, mtmp->m_len);
            pkt->nb_prot += mtmp->m_len;
            pkt->nb_plen += mtmp->m_len;
            m2 = mtmp;
            mtmp = mtmp->m_next;
            if (m2 != data)   /* save original head */
               m_free(m2);
            tcpstat.tcps_ocopies++;
         }
         pkt->nb_prot -= total;     /* fix data pointer */

         /* release the original mbufs packet install the new one */
         LOCK_NET_RESOURCE(FREEQ_RESID);
         pk_free(data->pkt);
         UNLOCK_NET_RESOURCE(FREEQ_RESID);
         data->pkt = pkt;
         data->m_len = pkt->nb_plen;
         data->m_next = NULL;
         data->m_data = pkt->nb_prot;
         data->m_len = total;
      }
   }

   if ((data->m_data < (data->pkt->nb_buff + MaxLnh)))
      panic("ip_output: overflow");

   pkt = data->pkt;

   /* do we have options? */
   if (so_optsPack)
	   pkt->soxopts = so_optsPack;   /* yup */
#ifdef IP6_ROUTING
   else
   {
      panic("ip_output: no so_optsPack for the IPv6 scope");     
   }
#endif

   /* fill in dest host for IP layer */
   bip = (struct ip *)data->m_data;
   pkt->fhost = bip->ip_dest;

   /* make enough IP header for cksum calculation */
   bip->ip_ver_ihl = 0x45;
   bip->ip_len = htons(bip->ip_len);   /* make net endian for calculation */
   tcpp = (struct tcphdr *)ip_data(bip);
#ifdef CSUM_DEMO
   if (!(tcpp->th_flags & TH_SYN))
   tcpp->th_flags |= TH_PUSH;     /* force the PSH flag in TCP hdr */
#endif
   tcpp->th_sum = tcp_cksum(bip);

   pkt->nb_prot = (char*)(bip + 1);    /* point past IP header */
   pkt->nb_plen = data->m_len - sizeof(struct ip);

   e = ip_write(IPPROTO_TCP, pkt);

   /* ip_write() is now responsable for data->pkt, so... */
   data->pkt = NULL;
   m_freem(data);

   if (e < 0)
   {
      /* don't report dropped sends, it causes socket applications to 
      bail when a TCP retry will fix the problem */
      if (e == SEND_DROPPED)
         return 0;
      return e;
   }
   else
      return 0;
}
Ejemplo n.º 8
0
/** default destructor */
IASolverInt::~IASolverInt() 
{
  delete ip_data();
}