示例#1
0
void Yap_ReleaseAtom(Atom atom) { /* Releases an atom from the hash chain */
  register Int hash;
  register const unsigned char *p;
  AtomEntry *inChain;
  AtomEntry *ap = RepAtom(atom);
  char unsigned *name = ap->UStrOfAE;

  /* compute hash */
  p = name;
  hash = HashFunction(p) % AtomHashTableSize;
  WRITE_LOCK(HashChain[hash].AERWLock);
  if (HashChain[hash].Entry == atom) {
    NOfAtoms--;
    HashChain[hash].Entry = ap->NextOfAE;
    WRITE_UNLOCK(HashChain[hash].AERWLock);
    return;
  }
  /* else */
  inChain = RepAtom(HashChain[hash].Entry);
  while (inChain->NextOfAE != atom)
    inChain = RepAtom(inChain->NextOfAE);
  WRITE_LOCK(inChain->ARWLock);
  inChain->NextOfAE = ap->NextOfAE;
  WRITE_UNLOCK(inChain->ARWLock);
  WRITE_UNLOCK(HashChain[hash].AERWLock);
}
示例#2
0
void *modifierThread(void *arg)
{
	node_t *x_ptr, *y_ptr;
	while (1) {
		printf("%s\n", __func__);

		WRITE_LOCK(list1Lock);
		x_ptr = listUnLink(&gList_1);
		LEAVE_LOCK(list1Lock);

		WRITE_LOCK(freeListLock);
		y_ptr = listUnLink(&gFreeList);
		LEAVE_LOCK(freeListLock);

		use_block_x_to_pro_y(x_ptr, y_ptr);

		WRITE_LOCK(freeListLock);
		listLink(x_ptr, &gFreeList);
		LEAVE_LOCK(freeListLock);

		WRITE_LOCK(list2Lock);
		listLink(y_ptr, &gList_2);
		LEAVE_LOCK(list2Lock);
	}

	return 0;
}
示例#3
0
文件: init.c 项目: jfmc/yap-6.3
static int
OpDec(int p, const char *type, Atom a, Term m)
{
  int             i;
  AtomEntry      *ae = RepAtom(a);
  OpEntry        *info;

  if (m == TermProlog)
    m = PROLOG_MODULE;
  else if (m == USER_MODULE)
    m = PROLOG_MODULE;
  for (i = 1; i <= 7; ++i)
    if (strcmp(type, optypes[i]) == 0)
      break;
  if (i > 7) {
    Yap_Error(DOMAIN_ERROR_OPERATOR_SPECIFIER,MkAtomTerm(Yap_LookupAtom(type)),"op/3");
    return(FALSE);
  }
  if (p) {
    if (i == 1 || i == 2 || i == 4)
      p |= DcrlpFlag;
    if (i == 1 || i == 3 || i == 6)
      p |= DcrrpFlag;
  }
  WRITE_LOCK(ae->ARWLock);
  info = Yap_GetOpPropForAModuleHavingALock(ae, m);
  if (EndOfPAEntr(info)) {
    info = (OpEntry *) Yap_AllocAtomSpace(sizeof(OpEntry));
    info->KindOfPE = Ord(OpProperty);
    info->OpModule = m;
    info->OpName = a;
    //LOCK(OpListLock);
    info->OpNext = OpList;
    OpList = info;
    //UNLOCK(OpListLock);
    AddPropToAtom(ae, (PropEntry *)info);
    INIT_RWLOCK(info->OpRWLock);
    WRITE_LOCK(info->OpRWLock);
    WRITE_UNLOCK(ae->ARWLock);
    info->Prefix = info->Infix = info->Posfix = 0;
  } else {
    WRITE_LOCK(info->OpRWLock);
    WRITE_UNLOCK(ae->ARWLock);
  }
  if (i <= 3) {
    GET_LD
    if (truePrologFlag(PLFLAG_ISO) &&
	info->Posfix != 0) /* there is a posfix operator */ {
      /* ISO dictates */
      WRITE_UNLOCK(info->OpRWLock);
      Yap_Error(PERMISSION_ERROR_CREATE_OPERATOR,MkAtomTerm(a),"op/3");
      return FALSE;
    }
    info->Infix = p;
  } else if (i <= 5) {
static void alloc_request_p(u_int32_t xid, u_int16_t proto, u_int32_t ip,
		     u_int16_t port)
{
	struct request_p *req_p;
        
	/* Verifies if entry already exists */
	WRITE_LOCK(&ipct_rpc_udp_lock);
	req_p = LIST_FIND(&request_p_list_udp, request_p_cmp,
		struct request_p *, xid, ip, port);

	if (req_p) {
		/* Refresh timeout */
		if (del_timer(&req_p->timeout)) {
			req_p->timeout.expires = jiffies + EXP;
			add_timer(&req_p->timeout);	
		} 
		WRITE_UNLOCK(&ipct_rpc_udp_lock);
		return;	

	}
	WRITE_UNLOCK(&ipct_rpc_udp_lock);
	
	/* Allocate new request_p */
	req_p = (struct request_p *) kmalloc(sizeof(struct request_p), GFP_ATOMIC);
	if (!req_p) {
 		DEBUGP("can't allocate request_p\n");
		return;			
	}

        req_p->list.next = NULL;
        req_p->list.prev = NULL;
        req_p->xid = xid;
        req_p->ip = ip;
        req_p->port = port;
        req_p->proto = proto;
      
	/* Initialize timer */
	init_timer(&req_p->timeout);
        req_p->timeout.expires = jiffies + EXP;
	req_p->timeout.data = (unsigned long)req_p;
	req_p->timeout.function = delete_request_p;
	add_timer(&req_p->timeout); 

	/* Put in list */
	WRITE_LOCK(&ipct_rpc_udp_lock);
	list_prepend(&request_p_list_udp, req_p);
	WRITE_UNLOCK(&ipct_rpc_udp_lock); 
	return; 

}
示例#5
0
文件: bb.c 项目: davidvaz/yap-udi
static BBProp 
PutBBProp(AtomEntry *ae, Term mod USES_REGS)		/* get BBentry for at; */
{
  Prop          p0;
  BBProp        p;

  WRITE_LOCK(ae->ARWLock);
  p = RepBBProp(p0 = ae->PropsOfAE);
  while (p0 != NIL && (!IsBBProperty(p->KindOfPE) ||
		(p->ModuleOfBB != mod))) {
    p = RepBBProp(p0 = p->NextOfPE);
  }
  if (p0 == NIL) {
    p = (BBProp)Yap_AllocAtomSpace(sizeof(*p));
    if (p == NULL) {
      WRITE_UNLOCK(ae->ARWLock);
      Yap_Error(OUT_OF_HEAP_ERROR,ARG1,"could not allocate space in bb_put/2");
      return(NULL);
    }
    AddPropToAtom(ae, (PropEntry *)p);
    p->ModuleOfBB = mod;
    p->Element = 0L;
    p->KeyOfBB = AbsAtom(ae);
    p->KindOfPE = BBProperty;
    INIT_RWLOCK(p->BBRWLock);    
  }
  WRITE_UNLOCK(ae->ARWLock);
  return (p);
}
示例#6
0
// Called when a process, which already opened the dev file, attempts to read from it
static ssize_t dev_device_read(struct file *filp, char *buffer, size_t length, loff_t *offset)
{
    char msg[100];
    int bytes_read = 0;

    WRITE_LOCK(g_ser_device_lock);
        sprintf(msg,"SEREADMO char device registred and open %d times", g_device_counter);
    WRITE_UNLOCK(g_ser_device_lock);


    bytes_read = strlen(msg);
    if(*offset >= bytes_read)
        // no more bytes to read
        return 0;

    bytes_read -= *offset; // bytes left to read

    if(bytes_read > length)
        bytes_read = length;


    if(copy_to_user(buffer,msg+(*offset),bytes_read) != 0)
    {
        MSG_FAILED("SEREADMO copy_to_user failed\n");
        bytes_read = 0;
    }

    *offset += bytes_read;
    return bytes_read;
}
/* Returns verdict for packet, or -1 for invalid. */
static int tcp_packet(struct ip_conntrack *conntrack,
		      struct iphdr *iph, size_t len,
		      enum ip_conntrack_info ctinfo)
{
	enum tcp_conntrack newconntrack, oldtcpstate;
	struct tcphdr *tcph = (struct tcphdr *)((u_int32_t *)iph + iph->ihl);

	/* We're guaranteed to have the base header, but maybe not the
           options. */
	if (len < (iph->ihl + tcph->doff) * 4) {
		DEBUGP("ip_conntrack_tcp: Truncated packet.\n");
		return -1;
	}

	WRITE_LOCK(&tcp_lock);
	oldtcpstate = conntrack->proto.tcp.state;
	newconntrack
		= tcp_conntracks
		[CTINFO2DIR(ctinfo)]
		[get_conntrack_index(tcph)][oldtcpstate];

	/* Invalid */
	if (newconntrack == TCP_CONNTRACK_MAX) {
		DEBUGP("ip_conntrack_tcp: Invalid dir=%i index=%u conntrack=%u\n",
		       CTINFO2DIR(ctinfo), get_conntrack_index(tcph),
		       conntrack->proto.tcp.state);
		WRITE_UNLOCK(&tcp_lock);
		return -1;
	}

	conntrack->proto.tcp.state = newconntrack;

	/* Poor man's window tracking: record SYN/ACK for handshake check */
	if (oldtcpstate == TCP_CONNTRACK_SYN_SENT
	    && CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY
	    && tcph->syn && tcph->ack)
		conntrack->proto.tcp.handshake_ack
			= htonl(ntohl(tcph->seq) + 1);
	WRITE_UNLOCK(&tcp_lock);

	/* If only reply is a RST, we can consider ourselves not to
	   have an established connection: this is a fairly common
	   problem case, so we can delete the conntrack
	   immediately.  --RR */
	if (!(conntrack->status & IPS_SEEN_REPLY) && tcph->rst) {
		if (del_timer(&conntrack->timeout))
			conntrack->timeout.function((unsigned long)conntrack);
	} else {
		/* Set ASSURED if we see see valid ack in ESTABLISHED after SYN_RECV */
		if (oldtcpstate == TCP_CONNTRACK_SYN_RECV
		    && CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL
		    && tcph->ack && !tcph->syn
		    && tcph->ack_seq == conntrack->proto.tcp.handshake_ack)
			set_bit(IPS_ASSURED_BIT, &conntrack->status);

		ip_ct_refresh(conntrack, tcp_timeouts[newconntrack]);
	}

	return NF_ACCEPT;
}
/* Update sender->td_end after NAT successfully mangled the packet */
int ip_conntrack_tcp_update(struct sk_buff *skb,
			    struct ip_conntrack *conntrack, 
			    int dir)
{
	struct iphdr *iph = skb->nh.iph;
	struct tcphdr *tcph = (void *)skb->nh.iph + skb->nh.iph->ihl*4;
	__u32 end;
#ifdef DEBUGP_VARS
	struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[dir];
	struct ip_ct_tcp_state *receiver = &conntrack->proto.tcp.seen[!dir];
#endif

	end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, iph, tcph);
	
	WRITE_LOCK(&tcp_lock);
	/*
	 * We have to worry for the ack in the reply packet only...
	 */
	if (after(end, conntrack->proto.tcp.seen[dir].td_end))
		conntrack->proto.tcp.seen[dir].td_end = end;
	conntrack->proto.tcp.last_end = end;
	WRITE_UNLOCK(&tcp_lock);
	DEBUGP("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i "
	       "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
		sender->td_end, sender->td_maxend, sender->td_maxwin,
		sender->td_scale, 
		receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
		receiver->td_scale);
		
	return 1;
}
示例#9
0
int
int_dict_del(
    int_dict_t         *dict,
    intkey_t            key)
{
    int_dict_node_t target;
    int_dict_node_t *node;

    assert(dict);
    assert(dict->tree);

    target.key = key;

    WRITE_LOCK(&dict->mutex);
    node = (int_dict_node_t *)rbdelete(&target, dict->tree);
    RW_MUTEX_UNLOCK(&dict->mutex);

    if (node == NULL) {
        return 1;
    }
    dict->count--;

    free(node);
    return 0;
}
示例#10
0
		rux::uint8 GroupAnimation::get_IsStarted( void )
		{
			WRITE_LOCK( _cs_animation_schedule_index );
			rux::uint8 res = _animation_schedule_index != SIZE_MAX ? 1 : 0;
			_cs_animation_schedule_index.WriteUnlock();
			return res;
		};
示例#11
0
void
Yap_InitConstExps(void)
{
  unsigned int    i;
  ExpEntry       *p;

  for (i = 0; i < sizeof(InitConstTab)/sizeof(InitConstEntry); ++i) {
    AtomEntry *ae = RepAtom(Yap_LookupAtom(InitConstTab[i].OpName));
    if (ae == NULL) {
      Yap_EvalError(RESOURCE_ERROR_HEAP,TermNil,"at InitConstExps");
      return;
    }
    WRITE_LOCK(ae->ARWLock);
    if (Yap_GetExpPropHavingLock(ae, 0)) {
      WRITE_UNLOCK(ae->ARWLock);
      break;
    }
    p = (ExpEntry *) Yap_AllocAtomSpace(sizeof(ExpEntry));
    p->KindOfPE = ExpProperty;
    p->ArityOfEE = 0;
    p->ENoOfEE = 0;
    p->FOfEE = InitConstTab[i].f;
    AddPropToAtom(ae, (PropEntry *)p);
    WRITE_UNLOCK(ae->ARWLock);
  }
}
示例#12
0
void Yap_LookupAtomWithAddress(const char *atom,
                               AtomEntry *ae) { /* lookup atom in atom table */
  register CELL hash;
  register const unsigned char *p;
  Atom a;

  /* compute hash */
  p = (const unsigned char *)atom;
  hash = HashFunction(p) % AtomHashTableSize;
  /* ask for a WRITE lock because it is highly unlikely we shall find anything
   */
  WRITE_LOCK(HashChain[hash].AERWLock);
  a = HashChain[hash].Entry;
  /* search atom in chain */
  if (SearchAtom(p, a) != NIL) {
    Yap_Error(SYSTEM_ERROR_INTERNAL, TermNil,
              "repeated initialization for atom %s", ae);
    WRITE_UNLOCK(HashChain[hash].AERWLock);
    return;
  }
  /* add new atom to start of chain */
  NOfAtoms++;
  ae->NextOfAE = a;
  HashChain[hash].Entry = AbsAtom(ae);
  ae->PropsOfAE = NIL;
  strcpy((char *)ae->StrOfAE, (char *)atom);
  INIT_RWLOCK(ae->ARWLock);
  WRITE_UNLOCK(HashChain[hash].AERWLock);
}
示例#13
0
		void GroupAnimation::AddAnimation( const XAnimation& animation )
		{
			WRITE_LOCK( _cs_animations );
			_duration = 0;
			_animations.Add( animation );
			_cs_animations.WriteUnlock();
		};
示例#14
0
static Atom
LookupAtom(const unsigned char *atom) { /* lookup atom in atom table */
  uint64_t hash;
  const unsigned char *p;
  Atom a, na;
  AtomEntry *ae;
  size_t sz = AtomHashTableSize;

  /* compute hash */
  p = atom;

  hash = HashFunction(p);
  hash = hash % sz ;

  /* we'll start by holding a read lock in order to avoid contention */
  READ_LOCK(HashChain[hash].AERWLock);
  a = HashChain[hash].Entry;
  /* search atom in chain */
  na = SearchAtom(atom, a);
  if (na != NIL) {
    READ_UNLOCK(HashChain[hash].AERWLock);
    return (na);
  }
  READ_UNLOCK(HashChain[hash].AERWLock);
  /* we need a write lock */
  WRITE_LOCK(HashChain[hash].AERWLock);
/* concurrent version of Yap, need to take care */
#if defined(YAPOR) || defined(THREADS)
  if (a != HashChain[hash].Entry) {
    a = HashChain[hash].Entry;
    na = SearchAtom(atom, a);
    if (na != NIL) {
      WRITE_UNLOCK(HashChain[hash].AERWLock);
      return (na);
    }
  }
#endif
  /* add new atom to start of chain */
  ae = (AtomEntry *)Yap_AllocAtomSpace((sizeof *ae) +
                                       strlen((const char *)atom) + 1);
  if (ae == NULL) {
    WRITE_UNLOCK(HashChain[hash].AERWLock);
    return NIL;
  }
  NOfAtoms++;
  na = AbsAtom(ae);
  ae->PropsOfAE = NIL;
  if (ae->UStrOfAE != atom)
    strcpy((char *)ae->StrOfAE, (const char *)atom);
  ae->NextOfAE = a;
  HashChain[hash].Entry = na;
  INIT_RWLOCK(ae->ARWLock);
  WRITE_UNLOCK(HashChain[hash].AERWLock);

  if (NOfAtoms > 2 * AtomHashTableSize) {
    Yap_signal(YAP_CDOVF_SIGNAL);
  }
  return na;
}
示例#15
0
// Called when a process tries to open the device file, like "cat /dev/sereadmo"
static int dev_device_open(struct inode *inode, struct file *file)
{
    WRITE_LOCK(g_ser_device_lock);
    g_device_counter++;
    WRITE_UNLOCK(g_ser_device_lock);

    return 0;
}
示例#16
0
// Called when a process closes the device file
static int dev_device_release(struct inode *inode, struct file *file)
{
    WRITE_LOCK(g_ser_device_lock);
    g_device_counter--;
    WRITE_UNLOCK(g_ser_device_lock);

    return 0;
}
示例#17
0
		void GroupAnimation::Clear( void )
		{			
			Stop();
			WRITE_LOCK( _cs_animations );
			_duration = 0;
			_on_completed = NULL;
			_animations.Clear();
			_cs_animations.WriteUnlock();
		};
示例#18
0
		void GroupAnimation::AddAnimationsAfterCompleted( const ::rux::XArray< ::rux::gui::XAnimation >& animations )
		{			
			WRITE_LOCK( _cs_animations );
			if( _animations.Count() > 0 )
				_after_completed_animations.Add( animations );
			else
				_animations.AddRange( animations );
			_cs_animations.WriteUnlock();
		};
示例#19
0
		void GroupAnimation::Stop( void )
		{
			WRITE_LOCK( _cs_animation_schedule_index );
			size_t animation_schedule_index = _animation_schedule_index;
			_animation_schedule_index = SIZE_MAX;
			_cs_animation_schedule_index.WriteUnlock();
			if( animation_schedule_index != SIZE_MAX )
				::rux::gui::application::remove_schedule( animation_schedule_index );
		};
示例#20
0
/* Noone stores the protocol anywhere; simply delete it. */
void ip_nat_protocol_unregister(struct ip_nat_protocol *proto)
{
	WRITE_LOCK(&ip_nat_lock);
	ip_nat_protos[proto->protonum] = &ip_nat_unknown_protocol;
	WRITE_UNLOCK(&ip_nat_lock);

	/* Someone could be still looking at the proto in a bh. */
	synchronize_net();
}
static void req_cl(struct request_p * r)
{
	WRITE_LOCK(&ipct_rpc_udp_lock);
	del_timer(&r->timeout);
	LIST_DELETE(&request_p_list_udp, r);
	WRITE_UNLOCK(&ipct_rpc_udp_lock);
	kfree(r);
	return;
}
示例#22
0
/* vsc: We must guarantee that IsVarTerm(functor) returns true! */
Functor Yap_MkFunctor(Atom ap, unsigned int arity) {
  AtomEntry *ae = RepAtom(ap);
  Functor f;

  WRITE_LOCK(ae->ARWLock);
  f = InlinedUnlockedMkFunctor(ae, arity);
  WRITE_UNLOCK(ae->ARWLock);
  return (f);
}
static void delete_request_p(unsigned long request_p_ul)
{
	struct request_p *p = (void *)request_p_ul;
	
	WRITE_LOCK(&ipct_rpc_udp_lock);
	LIST_DELETE(&request_p_list_udp, p);
	WRITE_UNLOCK(&ipct_rpc_udp_lock);
	kfree(p);
	return;
}
示例#24
0
void *consumerThread(void *arg)
{
	node_t *c_ptr;
	while (1) {
		printf("%s\n", __func__);

		WRITE_LOCK(list2Lock);
		c_ptr = listUnLink(&gList_2);
		LEAVE_LOCK(list2Lock);

		consume_info_in_block(c_ptr);

		WRITE_LOCK(freeListLock);
		listLink(c_ptr, &gFreeList);
		LEAVE_LOCK(freeListLock);
	}

	return 0;
}
示例#25
0
/* vsc: We must guarantee that IsVarTerm(functor) returns true! */
void Yap_MkFunctorWithAddress(Atom ap, unsigned int arity, FunctorEntry *p) {
  AtomEntry *ae = RepAtom(ap);

  WRITE_LOCK(ae->ARWLock);
  p->KindOfPE = FunctorProperty;
  p->NameOfFE = ap;
  p->ArityOfFE = arity;
  AddPropToAtom(ae, (PropEntry *)p);
  WRITE_UNLOCK(ae->ARWLock);
}
示例#26
0
void *producerThread(void *arg)
{
	node_t *n_ptr;

	while (1) {
		printf("%s\n", __func__);

		WRITE_LOCK(freeListLock);
		n_ptr = listUnLink(&gFreeList);
		LEAVE_LOCK(freeListLock);

		produce_info(n_ptr);

		WRITE_LOCK(list1Lock);
		listLink(n_ptr, &gList_1);
		LEAVE_LOCK(list1Lock);
	}

	return 0;
}
示例#27
0
/* Noone stores the protocol anywhere; simply delete it. */
void ip_nat_protocol_unregister(struct ip_nat_protocol *proto)
{
	WRITE_LOCK(&ip_nat_lock);
	LIST_DELETE(&protos, proto);
	WRITE_UNLOCK(&ip_nat_lock);

	/* Someone could be still looking at the proto in a bh. */
	br_write_lock_bh(BR_NETPROTO_LOCK);
	br_write_unlock_bh(BR_NETPROTO_LOCK);

	MOD_DEC_USE_COUNT;
}
示例#28
0
void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto)
{
	WRITE_LOCK(&ip_conntrack_lock);
	ip_ct_protos[proto->proto] = &ip_conntrack_generic_protocol;
	WRITE_UNLOCK(&ip_conntrack_lock);
	
	/* Somebody could be still looking at the proto in bh. */
	synchronize_net();

	/* Remove all contrack entries for this protocol */
	ip_ct_selective_cleanup(kill_proto, &proto->proto);
}
示例#29
0
PUBLIC int
avl_tree_insert (avl_tree_t *tree,
        void *data_to_be_inserted,
        void **data_already_present)
{
    int rv;

    WRITE_LOCK(tree);
    rv = thread_unsafe_avl_tree_insert(tree,
            data_to_be_inserted, data_already_present);
    WRITE_UNLOCK(tree);
    return rv;
}
示例#30
0
PUBLIC int
avl_tree_remove (avl_tree_t *tree,
        void *data_to_be_removed,
        void **data_actually_removed)
{
    int rv;

    WRITE_LOCK(tree);
    rv = thread_unsafe_avl_tree_remove(tree,
                data_to_be_removed, data_actually_removed);
    WRITE_UNLOCK(tree);
    return rv;
}