Пример #1
0
void
BDirectMessageTarget::Acquire()
{
	atomic_add(&fReferenceCount, 1);
}
Пример #2
0
status_t
load_image(char const* name, image_type type, const char* rpath,
	image_t** _image)
{
	int32 pheaderSize, sheaderSize;
	char path[PATH_MAX];
	ssize_t length;
	char pheaderBuffer[4096];
	int32 numRegions;
	image_t* found;
	image_t* image;
	status_t status;
	int fd;

	struct Elf32_Ehdr eheader;

	// Have we already loaded that image? Don't check for add-ons -- we always
	// reload them.
	if (type != B_ADD_ON_IMAGE) {
		found = find_loaded_image_by_name(name, APP_OR_LIBRARY_TYPE);

		if (found == NULL && type != B_APP_IMAGE && gProgramImage != NULL) {
			// Special case for add-ons that link against the application
			// executable, with the executable not having a soname set.
			if (const char* lastSlash = strrchr(name, '/')) {
				if (strcmp(gProgramImage->name, lastSlash + 1) == 0)
					found = gProgramImage;
			}
		}

		if (found) {
			atomic_add(&found->ref_count, 1);
			*_image = found;
			KTRACE("rld: load_container(\"%s\", type: %d, rpath: \"%s\") "
				"already loaded", name, type, rpath);
			return B_OK;
		}
	}

	KTRACE("rld: load_container(\"%s\", type: %d, rpath: \"%s\")", name, type,
		rpath);

	strlcpy(path, name, sizeof(path));

	// find and open the file
	fd = open_executable(path, type, rpath, get_program_path(),
		sSearchPathSubDir);
	if (fd < 0) {
		FATAL("Cannot open file %s: %s\n", name, strerror(fd));
		KTRACE("rld: load_container(\"%s\"): failed to open file", name);
		return fd;
	}

	// normalize the image path
	status = _kern_normalize_path(path, true, path);
	if (status != B_OK)
		goto err1;

	// Test again if this image has been registered already - this time,
	// we can check the full path, not just its name as noted.
	// You could end up loading an image twice with symbolic links, else.
	if (type != B_ADD_ON_IMAGE) {
		found = find_loaded_image_by_name(path, APP_OR_LIBRARY_TYPE);
		if (found) {
			atomic_add(&found->ref_count, 1);
			*_image = found;
			_kern_close(fd);
			KTRACE("rld: load_container(\"%s\"): already loaded after all",
				name);
			return B_OK;
		}
	}

	length = _kern_read(fd, 0, &eheader, sizeof(eheader));
	if (length != sizeof(eheader)) {
		status = B_NOT_AN_EXECUTABLE;
		FATAL("%s: Troubles reading ELF header\n", path);
		goto err1;
	}

	status = parse_elf_header(&eheader, &pheaderSize, &sheaderSize);
	if (status < B_OK) {
		FATAL("%s: Incorrect ELF header\n", path);
		goto err1;
	}

	// ToDo: what to do about this restriction??
	if (pheaderSize > (int)sizeof(pheaderBuffer)) {
		FATAL("%s: Cannot handle program headers bigger than %lu\n",
			path, sizeof(pheaderBuffer));
		status = B_UNSUPPORTED;
		goto err1;
	}

	length = _kern_read(fd, eheader.e_phoff, pheaderBuffer, pheaderSize);
	if (length != pheaderSize) {
		FATAL("%s: Could not read program headers: %s\n", path,
			strerror(length));
		status = B_BAD_DATA;
		goto err1;
	}

	numRegions = count_regions(path, pheaderBuffer, eheader.e_phnum,
		eheader.e_phentsize);
	if (numRegions <= 0) {
		FATAL("%s: Troubles parsing Program headers, numRegions = %ld\n",
			path, numRegions);
		status = B_BAD_DATA;
		goto err1;
	}

	image = create_image(name, path, numRegions);
	if (image == NULL) {
		FATAL("%s: Failed to allocate image_t object\n", path);
		status = B_NO_MEMORY;
		goto err1;
	}

	status = parse_program_headers(image, pheaderBuffer, eheader.e_phnum,
		eheader.e_phentsize);
	if (status < B_OK)
		goto err2;

	if (!assert_dynamic_loadable(image)) {
		FATAL("%s: Dynamic segment must be loadable (implementation "
			"restriction)\n", image->path);
		status = B_UNSUPPORTED;
		goto err2;
	}

	status = map_image(fd, path, image, type == B_APP_IMAGE);
	if (status < B_OK) {
		FATAL("%s: Could not map image: %s\n", image->path, strerror(status));
		status = B_ERROR;
		goto err2;
	}

	if (!parse_dynamic_segment(image)) {
		FATAL("%s: Troubles handling dynamic section\n", image->path);
		status = B_BAD_DATA;
		goto err3;
	}

	if (eheader.e_entry != 0)
		image->entry_point = eheader.e_entry + image->regions[0].delta;

	analyze_image_haiku_version_and_abi(fd, image, eheader, sheaderSize,
		pheaderBuffer, sizeof(pheaderBuffer));

	// If this is the executable image, we init the search path
	// subdir, if the compiler version doesn't match ours.
	if (type == B_APP_IMAGE) {
		#if __GNUC__ == 2
			if ((image->abi & B_HAIKU_ABI_MAJOR) == B_HAIKU_ABI_GCC_4)
				sSearchPathSubDir = "gcc4";
		#elif __GNUC__ == 4
			if ((image->abi & B_HAIKU_ABI_MAJOR) == B_HAIKU_ABI_GCC_2)
				sSearchPathSubDir = "gcc2";
		#endif
	}

	// init gcc version dependent image flags
	// symbol resolution strategy
	if (image->abi == B_HAIKU_ABI_GCC_2_ANCIENT)
		image->find_undefined_symbol = find_undefined_symbol_beos;

	// init version infos
	status = init_image_version_infos(image);

	image->type = type;
	register_image(image, fd, path);
	image_event(image, IMAGE_EVENT_LOADED);

	_kern_close(fd);

	enqueue_loaded_image(image);

	*_image = image;

	KTRACE("rld: load_container(\"%s\"): done: id: %ld (ABI: %#lx)", name,
		image->id, image->abi);

	return B_OK;

err3:
	unmap_image(image);
err2:
	delete_image_struct(image);
err1:
	_kern_close(fd);

	KTRACE("rld: load_container(\"%s\"): failed: %s", name,
		strerror(status));

	return status;
}
/*
 *	Check if this packet is complete.
 *	Returns NULL on failure by any reason, and pointer
 *	to current nexthdr field in reassembled frame.
 *
 *	It is called with locked fq, and caller must check that
 *	queue is eligible for reassembly i.e. it is not COMPLETE,
 *	the last and the first frames arrived and all the bits are here.
 */
static struct sk_buff *
nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
{
	struct sk_buff *fp, *op, *head = fq->fragments;
	int    payload_len;

	fq_kill(fq);

	BUG_TRAP(head != NULL);
	BUG_TRAP(NFCT_FRAG6_CB(head)->offset == 0);

	/* Unfragmented part is taken from the first segment. */
	payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len - sizeof(struct frag_hdr);
	if (payload_len > IPV6_MAXPLEN) {
		DEBUGP("payload len is too large.\n");
		goto out_oversize;
	}

	/* Head of list must not be cloned. */
	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) {
		DEBUGP("skb is cloned but can't expand head");
		goto out_oom;
	}

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments. */
	if (skb_shinfo(head)->frag_list) {
		struct sk_buff *clone;
		int i, plen = 0;

		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) {
			DEBUGP("Can't alloc skb\n");
			goto out_oom;
		}
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_shinfo(head)->frag_list = NULL;
		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
			plen += skb_shinfo(head)->frags[i].size;
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;

		NFCT_FRAG6_CB(clone)->orig = NULL;
		atomic_add(clone->truesize, &nf_ct_frag6_mem);
	}

	/* We have to remove fragment header from datagram and to relocate
	 * header in order to calculate ICV correctly. */
	head->nh.raw[fq->nhoffset] = head->h.raw[0];
	memmove(head->head + sizeof(struct frag_hdr), head->head, 
		(head->data - head->head) - sizeof(struct frag_hdr));
	head->mac.raw += sizeof(struct frag_hdr);
	head->nh.raw += sizeof(struct frag_hdr);

	skb_shinfo(head)->frag_list = head->next;
	head->h.raw = head->data;
	skb_push(head, head->data - head->nh.raw);
	atomic_sub(head->truesize, &nf_ct_frag6_mem);

	for (fp=head->next; fp; fp = fp->next) {
		head->data_len += fp->len;
		head->len += fp->len;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_HW)
			head->csum = csum_add(head->csum, fp->csum);
		head->truesize += fp->truesize;
		atomic_sub(fp->truesize, &nf_ct_frag6_mem);
	}

	head->next = NULL;
	head->dev = dev;
	skb_set_timestamp(head, &fq->stamp);
	head->nh.ipv6h->payload_len = htons(payload_len);

	/* Yes, and fold redundant checksum back. 8) */
	if (head->ip_summed == CHECKSUM_HW)
		head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);

	fq->fragments = NULL;

	/* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
	fp = skb_shinfo(head)->frag_list;
	if (NFCT_FRAG6_CB(fp)->orig == NULL)
		/* at above code, head skb is divided into two skbs. */
		fp = fp->next;

	op = NFCT_FRAG6_CB(head)->orig;
	for (; fp; fp = fp->next) {
		struct sk_buff *orig = NFCT_FRAG6_CB(fp)->orig;

		op->next = orig;
		op = orig;
		NFCT_FRAG6_CB(fp)->orig = NULL;
	}

	return head;

out_oversize:
	if (net_ratelimit())
		printk(KERN_DEBUG "nf_ct_frag6_reasm: payload len = %d\n", payload_len);
	goto out_fail;
out_oom:
	if (net_ratelimit())
		printk(KERN_DEBUG "nf_ct_frag6_reasm: no memory for reassembly\n");
out_fail:
	return NULL;
}
Пример #4
0
asmlinkage long sys_msgsnd (int msqid, struct msgbuf *msgp, size_t msgsz, int msgflg)
{
	struct msg_queue *msq;
	struct msg_msg *msg;
	long mtype;
	int err;
	
	if (msgsz > msg_ctlmax || (long) msgsz < 0 || msqid < 0)
		return -EINVAL;
	if (get_user(mtype, &msgp->mtype))
		return -EFAULT; 
	if (mtype < 1)
		return -EINVAL;

	msg = load_msg(msgp->mtext, msgsz);
	if(IS_ERR(msg))
		return PTR_ERR(msg);

	msg->m_type = mtype;
	msg->m_ts = msgsz;

	msq = msg_lock(msqid);
	err=-EINVAL;
	if(msq==NULL)
		goto out_free;
retry:
	err= -EIDRM;
	if (msg_checkid(msq,msqid))
		goto out_unlock_free;

	err=-EACCES;
	if (ipcperms(&msq->q_perm, S_IWUGO)) 
		goto out_unlock_free;

	if(msgsz + msq->q_cbytes > msq->q_qbytes ||
		1 + msq->q_qnum > msq->q_qbytes) {
		struct msg_sender s;

		if(msgflg&IPC_NOWAIT) {
			err=-EAGAIN;
			goto out_unlock_free;
		}
		ss_add(msq, &s);
		msg_unlock(msqid);
		schedule();
		current->state= TASK_RUNNING;

		msq = msg_lock(msqid);
		err = -EIDRM;
		if(msq==NULL)
			goto out_free;
		ss_del(&s);
		
		if (signal_pending(current)) {
			err=-EINTR;
			goto out_unlock_free;
		}
		goto retry;
	}

	if(!pipelined_send(msq,msg)) {
		/* noone is waiting for this message, enqueue it */
		list_add_tail(&msg->m_list,&msq->q_messages);
		msq->q_cbytes += msgsz;
		msq->q_qnum++;
		atomic_add(msgsz,&msg_bytes);
		atomic_inc(&msg_hdrs);
	}
	
	err = 0;
	msg = NULL;
	msq->q_lspid = current->pid;
	msq->q_stime = CURRENT_TIME;

out_unlock_free:
	msg_unlock(msqid);
out_free:
	if(msg!=NULL)
		free_msg(msg);
	return err;
}
Пример #5
0
//BrowserWindow::BrowserWindow( BRect cFrame, std::vector<HistoryEntry*>* pcHistory, bool bLoadPos ) :
BrowserWindow::BrowserWindow( BRect cFrame, bool /*bLoadPos*/ )
	:BWindow( cFrame, "ABrowse", B_TITLED_WINDOW,  0 )
	,m_pcHTMLPart( NULL )
	,m_pcStatusBar( NULL )
	,m_pcURLView( NULL )
{
	atomic_add( &s_nWndCount, 1 );

//	BAutolock al( GlobalMutex::GetMutex() );
//	GlobalMutex::Lock();
	RootView *pcRoot =  new RootView( Bounds() );

	AddChild( pcRoot );

//	AddChild( pcRoot );

//	AddTimer( this, 1, 100000, false );
//	GlobalMutex::Unlock();

//--------------------------------------------------------------------------
#if 0
	LayoutView* pcFrame = new LayoutView( cWndBounds, "frame_view" );

	
	VLayoutNode* pcRoot = new VLayoutNode( "root" );

	FrameView* pcNavBarFrame = new FrameView( Rect(0,0,0,0), "nav_bar_frame", "" );
	HLayoutNode* pcNavBar = new HLayoutNode( "nav_bar" );


	m_pcURLView = new TextView( Rect(0,0,0,0), "url_view", ""/*g_pzDefaultURL*/ );

	m_pcURLView->SetEventMask( os::TextView::EI_ENTER_PRESSED | os::TextView::EI_ESC_PRESSED );
	m_pcURLView->SetMultiLine( false );
	m_pcURLView->SetMessage( new Message( ID_URL_CHANGED ) );

	m_pcToolBar   = new ToolBar( Rect(0,0,0,0), "tool_bar" );
	m_pcStatusBar = new StatusBar( Rect(), "status_bar" );

	
	m_pcToolBar->AddButton( "hi16-action-back.png", os::Message( ID_PREV_URL ) );
	m_pcToolBar->AddButton( "hi16-action-forward.png", os::Message( ID_NEXT_URL ) );
	m_pcToolBar->AddButton( "hi16-action-gohome.png", os::Message( ID_GOHOME ) );
	m_pcToolBar->AddButton( "hi16-action-reload.png", os::Message( ID_RELOAD ) );
	m_pcToolBar->AddButton( "hi16-action-find.png", os::Message( ID_FIND ) );
	m_pcToolBar->AddButton( "hi16-action-editcopy.png", os::Message( ID_COPY ) );

	pcNavBar->AddChild( m_pcToolBar )->SetBorders( Rect( 4, 2, 10, 2 ) );
	pcNavBar->AddChild( m_pcURLView );

	pcNavBarFrame->SetRoot( pcNavBar );
	
	pcRoot->AddChild( pcNavBarFrame );
	
	
	QWidget* pcTopView = new TopView;
	pcTopView->show();
	
	pcTopView->SetResizeMask( CF_FOLLOW_ALL );
	pcTopView->SetFrame( cWndBounds.Bounds() );
	pcRoot->AddChild( pcTopView );
	pcRoot->AddChild( m_pcStatusBar );
	pcFrame->SetRoot( pcRoot );

   
	m_pcHTMLPart = new KHTMLPart( pcTopView, "khtmlpart" ); // ### frame name
	m_pcHTMLPart->browserExtension()->SetCallback( this );

	AddChild( pcFrame );
	
	KHTMLView* pcView = m_pcHTMLPart->view();

	pcView->SetResizeMask( CF_FOLLOW_ALL );

	pcView->SetFrame( pcTopView->GetBounds() );

	pcView->MakeFocus();

	pcView->SetMsgTarget( Messenger( this ) );

	pcView->show();
	
	m_pcURLView->SetTarget( this );
	m_pcToolBar->SetTarget( Messenger( this ) );
	
	m_pcToolBar->EnableButton( BI_BACK, false );
	m_pcToolBar->EnableButton( BI_FORWARD, false );
	m_pcToolBar->EnableButton( BI_COPY, false );
	
	char*	pzHome = getenv( "HOME" );

	if ( bLoadPos && NULL != pzHome ) {
		FILE*	hFile;
		char	zPath[ 256 ];
				
		strcpy( zPath, pzHome );
		strcat( zPath, "/config/abrowse.cfg" );
	
		hFile = fopen( zPath, "rb" );
	
		if ( NULL != hFile ) {
			Rect cNewFrame;
					
			fread( &cNewFrame, sizeof( cNewFrame ), 1, hFile );
			fclose( hFile );
			SetFrame( cNewFrame );
		}
	}
	if ( pcHistory != NULL ) {
		for ( uint i = 0 ; i < pcHistory->size() ; ++i ) {
			HistoryEntry* pcEntry = new HistoryEntry;
	//		*pcEntry = *(*pcHistory)[i];
			pcEntry->m_cURL = (*pcHistory)[i]->m_cURL;
			pcEntry->m_cState = (*pcHistory)[i]->m_cState;
			m_cHistory.push_back( pcEntry );
		}
		m_nCurHistoryPos = m_cHistory.size() - 1;
		m_pcToolBar->EnableButton( BI_BACK, true );
	}
//	AddTimer( this, 1, 100000, false );
	GlobalMutex::PopLooper();
//	GlobalMutex::Unlock();
#endif
}
Пример #6
0
/**
 * The main polling loop
 *
 * This routine does the polling and despatches of IO events
 * to the DCB's. It may be called either directly or as the entry point
 * of a polling thread within the gateway.
 *
 * The routine will loop as long as the variable "shutdown" is set to zero,
 * setting this to a non-zero value will cause the polling loop to return.
 *
 * There are two options for the polling, a debug option that is only useful if
 * you have a single thread. This blocks in epoll_wait until an event occurs.
 *
 * The non-debug option does an epoll with a time out. This allows the checking of
 * shutdown value to be checked in all threads. The algorithm for polling in this
 * mode is to do a poll with no-wait, if no events are detected then the poll is
 * repeated with a time out. This allows for a quick check before making the call 
 * with timeout. The call with the timeout differs in that the Linux scheduler may
 * deschedule a process if a timeout is included, but will not do this if a 0 timeout
 * value is given. this improves performance when the gateway is under heavy load.
 *
 * In order to provide a fairer means of sharing the threads between the different
 * DCB's the poll mechanism has been decoupled from the processing of the events.
 * The events are now recieved via the epoll_wait call, a queue of DCB's that have
 * events pending is maintained and as new events arrive the DCB is added to the end
 * of this queue. If an eent arrives for a DCB alreayd in the queue, then the event
 * bits are added to the DCB but the DCB mantains the same point in the queue unless
 * the original events are already being processed. If they are being processed then
 * the DCB is moved to the back of the queue, this means that a DCB that is receiving
 * events at a high rate will not block the execution of events for other DCB's and
 * should result in a fairer polling strategy.
 *
 * The introduction of the ability to inject "fake" write events into the event queue meant
 * that there was a possibility to "starve" new events sicne the polling loop would
 * consume the event queue before looking for new events. If the DCB that inject
 * the fake event then injected another fake event as a result of the first it meant
 * that new events did not get added to the queue. The strategy has been updated to
 * not consume the entire event queue, but process one event before doing a non-blocking
 * call to add any new events before processing any more events. A blocking call to
 * collect events is only made if there are no pending events to be processed on the
 * event queue.
 *
 * Also introduced a "timeout bias" mechanism. This mechansim control the length of
 * of timeout passed to epoll_wait in blocking calls based on previous behaviour.
 * The initial call will block for 10% of the define timeout peroid, this will be
 * increased in increments of 10% until the full timeout value is used. If at any
 * point there is an event to be processed then the value will be reduced to 10% again
 * for the next blocking call.
 *
 * @param arg	The thread ID passed as a void * to satisfy the threading package
 */
void
poll_waitevents(void *arg)
{
struct epoll_event events[MAX_EVENTS];
int		   i, nfds, timeout_bias = 1;
int		   thread_id = (int)arg;
DCB                *zombies = NULL;
int		   poll_spins = 0;

	/** Add this thread to the bitmask of running polling threads */
	bitmask_set(&poll_mask, thread_id);
	if (thread_data)
	{
		thread_data[thread_id].state = THREAD_IDLE;
	}

	/** Init mysql thread context for use with a mysql handle and a parser */
	mysql_thread_init();
	
	while (1)
	{
		if (pollStats.evq_pending == 0 && timeout_bias < 10)
		{
			timeout_bias++;
		}

		atomic_add(&n_waiting, 1);
#if BLOCKINGPOLL
		nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, -1);
		atomic_add(&n_waiting, -1);
#else /* BLOCKINGPOLL */
#if MUTEX_EPOLL
                simple_mutex_lock(&epoll_wait_mutex, TRUE);
#endif
		if (thread_data)
		{
			thread_data[thread_id].state = THREAD_POLLING;
		}
                
		atomic_add(&pollStats.n_polls, 1);
		if ((nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, 0)) == -1)
		{
			atomic_add(&n_waiting, -1);
                        int eno = errno;
                        errno = 0;
                        LOGIF(LD, (skygw_log_write(
                                LOGFILE_DEBUG,
                                "%lu [poll_waitevents] epoll_wait returned "
                                "%d, errno %d",
                                pthread_self(),
                                nfds,
                                eno)));
			atomic_add(&n_waiting, -1);
		}
		/*
		 * If there are no new descriptors from the non-blocking call
		 * and nothing to process on the event queue then for do a
		 * blocking call to epoll_wait.
		 *
		 * We calculate a timeout bias to alter the length of the blocking
		 * call based on the time since we last received an event to process
		 */
		else if (nfds == 0 && pollStats.evq_pending == 0 && poll_spins++ > number_poll_spins)
		{
			atomic_add(&pollStats.blockingpolls, 1);
			nfds = epoll_wait(epoll_fd,
                                                  events,
                                                  MAX_EVENTS,
                                                  (max_poll_sleep * timeout_bias) / 10);
			if (nfds == 0 && pollStats.evq_pending)
			{
				atomic_add(&pollStats.wake_evqpending, 1);
				poll_spins = 0;
			}
		}
		else
		{
			atomic_add(&n_waiting, -1);
		}

		if (n_waiting == 0)
			atomic_add(&pollStats.n_nothreads, 1);
#if MUTEX_EPOLL
                simple_mutex_unlock(&epoll_wait_mutex);
#endif
#endif /* BLOCKINGPOLL */
		if (nfds > 0)
		{
			timeout_bias = 1;
			if (poll_spins <= number_poll_spins + 1)
				atomic_add(&pollStats.n_nbpollev, 1);
			poll_spins = 0;
                        LOGIF(LD, (skygw_log_write(
                                LOGFILE_DEBUG,
                                "%lu [poll_waitevents] epoll_wait found %d fds",
                                pthread_self(),
                                nfds)));
			atomic_add(&pollStats.n_pollev, 1);
			if (thread_data)
			{
				thread_data[thread_id].n_fds = nfds;
				thread_data[thread_id].cur_dcb = NULL;
				thread_data[thread_id].event = 0;
				thread_data[thread_id].state = THREAD_PROCESSING;
			}

			pollStats.n_fds[(nfds < MAXNFDS ? (nfds - 1) : MAXNFDS - 1)]++;

			load_average = (load_average * load_samples + nfds)
						/ (load_samples + 1);
			atomic_add(&load_samples, 1);
			atomic_add(&load_nfds, nfds);

			/*
			 * Process every DCB that has a new event and add
			 * it to the poll queue.
			 * If the DCB is currently being processed then we
			 * or in the new eent bits to the pending event bits
			 * and leave it in the queue.
			 * If the DCB was not already in the queue then it was
			 * idle and is added to the queue to process after
			 * setting the event bits.
			 */
			for (i = 0; i < nfds; i++)
			{
				DCB 	*dcb = (DCB *)events[i].data.ptr;
				__uint32_t	ev = events[i].events;

				spinlock_acquire(&pollqlock);
				if (DCB_POLL_BUSY(dcb))
				{
					if (dcb->evq.pending_events == 0)
					{
						pollStats.evq_pending++;
						dcb->evq.inserted = hkheartbeat;
					}
					dcb->evq.pending_events |= ev;
				}
				else
				{
					dcb->evq.pending_events = ev;
					if (eventq)
					{
						dcb->evq.prev = eventq->evq.prev;
						eventq->evq.prev->evq.next = dcb;
						eventq->evq.prev = dcb;
						dcb->evq.next = eventq;
					}
					else
					{
						eventq = dcb;
						dcb->evq.prev = dcb;
						dcb->evq.next = dcb;
					}
					pollStats.evq_length++;
					pollStats.evq_pending++;
					dcb->evq.inserted = hkheartbeat;
					if (pollStats.evq_length > pollStats.evq_max)
					{
						pollStats.evq_max = pollStats.evq_length;
					}
				}
				spinlock_release(&pollqlock);
			}
		}

		/*
		 * Process of the queue of waiting requests
		 * This is done without checking the evq_pending count as a
		 * precautionary measure to avoid issues if the house keeping
		 * of the count goes wrong.
		 */
		if (process_pollq(thread_id))
			timeout_bias = 1;

		if (thread_data)
			thread_data[thread_id].state = THREAD_ZPROCESSING;
		zombies = dcb_process_zombies(thread_id);
		if (thread_data)
			thread_data[thread_id].state = THREAD_IDLE;

		if (do_shutdown)
		{
                        /*<
                         * Remove the thread from the bitmask of running
                         * polling threads.
                         */
			if (thread_data)
			{
				thread_data[thread_id].state = THREAD_STOPPED;
			}
			bitmask_clear(&poll_mask, thread_id);
			/** Release mysql thread context */
			mysql_thread_end();
			return;
		}
		if (thread_data)
		{
			thread_data[thread_id].state = THREAD_IDLE;
		}
	} /*< while(1) */
}
Пример #7
0
static int host_cmd_motion_sense(struct host_cmd_handler_args *args)
{
	const struct ec_params_motion_sense *in = args->params;
	struct ec_response_motion_sense *out = args->response;
	struct motion_sensor_t *sensor;
	int i, ret = EC_RES_INVALID_PARAM, reported;

	switch (in->cmd) {
	case MOTIONSENSE_CMD_DUMP:
		out->dump.module_flags =
			(*(host_get_memmap(EC_MEMMAP_ACC_STATUS)) &
			 EC_MEMMAP_ACC_STATUS_PRESENCE_BIT) ?
			MOTIONSENSE_MODULE_FLAG_ACTIVE : 0;
		out->dump.sensor_count = motion_sensor_count;
		args->response_size = sizeof(out->dump);
		reported = MIN(motion_sensor_count, in->dump.max_sensor_count);
		mutex_lock(&g_sensor_mutex);
		for (i = 0; i < reported; i++) {
			sensor = &motion_sensors[i];
			out->dump.sensor[i].flags =
				MOTIONSENSE_SENSOR_FLAG_PRESENT;
			/* casting from int to s16 */
			out->dump.sensor[i].data[X] = sensor->xyz[X];
			out->dump.sensor[i].data[Y] = sensor->xyz[Y];
			out->dump.sensor[i].data[Z] = sensor->xyz[Z];
		}
		mutex_unlock(&g_sensor_mutex);
		args->response_size += reported *
			sizeof(struct ec_response_motion_sensor_data);
		break;

	case MOTIONSENSE_CMD_DATA:
		sensor = host_sensor_id_to_motion_sensor(
				in->sensor_odr.sensor_num);
		if (sensor == NULL)
			return EC_RES_INVALID_PARAM;

		out->data.flags = 0;

		mutex_lock(&g_sensor_mutex);
		out->data.data[X] = sensor->xyz[X];
		out->data.data[Y] = sensor->xyz[Y];
		out->data.data[Z] = sensor->xyz[Z];
		mutex_unlock(&g_sensor_mutex);
		args->response_size = sizeof(out->data);
		break;

	case MOTIONSENSE_CMD_INFO:
		sensor = host_sensor_id_to_motion_sensor(
				in->sensor_odr.sensor_num);
		if (sensor == NULL)
			return EC_RES_INVALID_PARAM;

		out->info.type = sensor->type;
		out->info.location = sensor->location;
		out->info.chip = sensor->chip;

		args->response_size = sizeof(out->info);
		break;

	case MOTIONSENSE_CMD_EC_RATE:
		sensor = host_sensor_id_to_motion_sensor(
				in->sensor_odr.sensor_num);
		if (sensor == NULL)
			return EC_RES_INVALID_PARAM;

		/*
		 * Set new sensor sampling rate when AP is on, if the data arg
		 * has a value.
		 */
		if (in->ec_rate.data != EC_MOTION_SENSE_NO_VALUE) {
			if (in->ec_rate.data == 0)
				sensor->config[SENSOR_CONFIG_AP].ec_rate = 0;
			else
				sensor->config[SENSOR_CONFIG_AP].ec_rate =
					MAX(in->ec_rate.data,
					    MIN_MOTION_SENSE_WAIT_TIME / MSEC);

			/* Bound the new sampling rate. */
			motion_sense_set_accel_interval();
		}

		out->ec_rate.ret = motion_sense_ec_rate(sensor) / MSEC;

		args->response_size = sizeof(out->ec_rate);
		break;

	case MOTIONSENSE_CMD_SENSOR_ODR:
		/* Verify sensor number is valid. */
		sensor = host_sensor_id_to_motion_sensor(
				in->sensor_odr.sensor_num);
		if (sensor == NULL)
			return EC_RES_INVALID_PARAM;

		/* Set new data rate if the data arg has a value. */
		if (in->sensor_odr.data != EC_MOTION_SENSE_NO_VALUE) {
			sensor->config[SENSOR_CONFIG_AP].odr =
				in->sensor_odr.data |
				(in->sensor_odr.roundup ? ROUND_UP_FLAG : 0);

			ret = motion_sense_set_data_rate(sensor);
			if (ret != EC_SUCCESS)
				return EC_RES_INVALID_PARAM;

			/*
			 * To be sure timestamps are calculated properly,
			 * Send an event to have a timestamp inserted in the
			 * FIFO.
			 */
			task_set_event(TASK_ID_MOTIONSENSE,
					TASK_EVENT_MOTION_ODR_CHANGE, 0);
			/*
			 * If the sensor was suspended before, or now
			 * suspended, we have to recalculate the EC sampling
			 * rate
			 */
			motion_sense_set_accel_interval();
		}

		out->sensor_odr.ret = sensor->drv->get_data_rate(sensor);

		args->response_size = sizeof(out->sensor_odr);

		break;

	case MOTIONSENSE_CMD_SENSOR_RANGE:
		/* Verify sensor number is valid. */
		sensor = host_sensor_id_to_motion_sensor(
				in->sensor_range.sensor_num);
		if (sensor == NULL)
			return EC_RES_INVALID_PARAM;

		/* Set new range if the data arg has a value. */
		if (in->sensor_range.data != EC_MOTION_SENSE_NO_VALUE) {
			if (sensor->drv->set_range(sensor,
						in->sensor_range.data,
						in->sensor_range.roundup)
					!= EC_SUCCESS) {
				return EC_RES_INVALID_PARAM;
			}
		}

		out->sensor_range.ret = sensor->drv->get_range(sensor);
		args->response_size = sizeof(out->sensor_range);
		break;

	case MOTIONSENSE_CMD_SENSOR_OFFSET:
		/* Verify sensor number is valid. */
		sensor = host_sensor_id_to_motion_sensor(
				in->sensor_offset.sensor_num);
		if (sensor == NULL)
			return EC_RES_INVALID_PARAM;

		/* Set new range if the data arg has a value. */
		if (in->sensor_offset.flags & MOTION_SENSE_SET_OFFSET) {
			ret = sensor->drv->set_offset(sensor,
						in->sensor_offset.offset,
						in->sensor_offset.temp);
			if (ret != EC_SUCCESS)
				return ret;
		}

		ret = sensor->drv->get_offset(sensor, out->sensor_offset.offset,
				&out->sensor_offset.temp);
		if (ret != EC_SUCCESS)
			return ret;
		args->response_size = sizeof(out->sensor_offset);
		break;

	case MOTIONSENSE_CMD_PERFORM_CALIB:
		/* Verify sensor number is valid. */
		sensor = host_sensor_id_to_motion_sensor(
				in->sensor_offset.sensor_num);
		if (sensor == NULL)
			return EC_RES_INVALID_PARAM;
		if (!sensor->drv->perform_calib)
			return EC_RES_INVALID_COMMAND;

		ret = sensor->drv->perform_calib(sensor);
		if (ret != EC_SUCCESS)
			return ret;
		ret = sensor->drv->get_offset(sensor, out->sensor_offset.offset,
				&out->sensor_offset.temp);
		if (ret != EC_SUCCESS)
			return ret;
		args->response_size = sizeof(out->sensor_offset);
		break;

#ifdef CONFIG_ACCEL_FIFO
	case MOTIONSENSE_CMD_FIFO_FLUSH:
		sensor = host_sensor_id_to_motion_sensor(
				in->sensor_odr.sensor_num);
		if (sensor == NULL)
			return EC_RES_INVALID_PARAM;

		atomic_add(&sensor->flush_pending, 1);

		task_set_event(TASK_ID_MOTIONSENSE,
			       TASK_EVENT_MOTION_FLUSH_PENDING, 0);
		/* passthrough */
	case MOTIONSENSE_CMD_FIFO_INFO:
		motion_sense_get_fifo_info(&out->fifo_info);
		for (i = 0; i < motion_sensor_count; i++) {
			out->fifo_info.lost[i] = motion_sensors[i].lost;
			motion_sensors[i].lost = 0;
		}
		motion_sense_fifo_lost = 0;
		args->response_size = sizeof(out->fifo_info) +
			sizeof(uint16_t) * motion_sensor_count;
		break;

	case MOTIONSENSE_CMD_FIFO_READ:
		mutex_lock(&g_sensor_mutex);
		reported = MIN((args->response_max - sizeof(out->fifo_read)) /
			       motion_sense_fifo.unit_bytes,
			       MIN(queue_count(&motion_sense_fifo),
				   in->fifo_read.max_data_vector));
		reported = queue_remove_units(&motion_sense_fifo,
				out->fifo_read.data, reported);
		mutex_unlock(&g_sensor_mutex);
		out->fifo_read.number_data = reported;
		args->response_size = sizeof(out->fifo_read) + reported *
			motion_sense_fifo.unit_bytes;
		break;
#else
	case MOTIONSENSE_CMD_FIFO_INFO:
		/* Only support the INFO command, to tell there is no FIFO. */
		memset(&out->fifo_info, 0, sizeof(out->fifo_info));
		args->response_size = sizeof(out->fifo_info);
		break;
#endif
	default:
		/* Call other users of the motion task */
#ifdef CONFIG_LID_ANGLE
		if (ret == EC_RES_INVALID_PARAM)
			ret = host_cmd_motion_lid(args);
#endif
		return ret;
	}

	return EC_RES_SUCCESS;
}
Пример #8
0
 INLINE friend T operator++ (AtomicInternal& value) { return atomic_add(&value.data,  1) + 1; }
Пример #9
0
 INLINE friend T operator-- (AtomicInternal& value, int) { return atomic_add(&value.data, -1); }
inline void ATOMIC_ADD(ATOMIC_T *v, int i)
{
	atomic_add(i, v);
}
Пример #11
0
 INLINE friend T operator+= (AtomicInternal& value, T input) { return atomic_add(&value.data, input) + input; }
Пример #12
0
/* Test various atomic.h macros.  */
static int
do_test (void)
{
  atomic_t mem;
  int ret = 0;

#ifdef atomic_compare_and_exchange_val_acq
  mem = 24;
  if (atomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24
      || mem != 35)
    {
      puts ("atomic_compare_and_exchange_val_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (atomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12
      || mem != 12)
    {
      puts ("atomic_compare_and_exchange_val_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (atomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15
      || mem != -56)
    {
      puts ("atomic_compare_and_exchange_val_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (atomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1
      || mem != -1)
    {
      puts ("atomic_compare_and_exchange_val_acq test 4 failed");
      ret = 1;
    }
#endif

  mem = 24;
  if (atomic_compare_and_exchange_bool_acq (&mem, 35, 24)
      || mem != 35)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (! atomic_compare_and_exchange_bool_acq (&mem, 10, 15)
      || mem != 12)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (atomic_compare_and_exchange_bool_acq (&mem, -56, -15)
      || mem != -56)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (! atomic_compare_and_exchange_bool_acq (&mem, 17, 0)
      || mem != -1)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 4 failed");
      ret = 1;
    }

  mem = 64;
  if (atomic_exchange_acq (&mem, 31) != 64
      || mem != 31)
    {
      puts ("atomic_exchange_acq test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_exchange_and_add (&mem, 11) != 2
      || mem != 13)
    {
      puts ("atomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = -21;
  atomic_add (&mem, 22);
  if (mem != 1)
    {
      puts ("atomic_add test failed");
      ret = 1;
    }

  mem = -1;
  atomic_increment (&mem);
  if (mem != 0)
    {
      puts ("atomic_increment test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_increment_val (&mem) != 3)
    {
      puts ("atomic_increment_val test failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_increment_and_test (&mem)
      || mem != 1)
    {
      puts ("atomic_increment_and_test test 1 failed");
      ret = 1;
    }

  mem = 35;
  if (atomic_increment_and_test (&mem)
      || mem != 36)
    {
      puts ("atomic_increment_and_test test 2 failed");
      ret = 1;
    }

  mem = -1;
  if (! atomic_increment_and_test (&mem)
      || mem != 0)
    {
      puts ("atomic_increment_and_test test 3 failed");
      ret = 1;
    }

  mem = 17;
  atomic_decrement (&mem);
  if (mem != 16)
    {
      puts ("atomic_decrement test failed");
      ret = 1;
    }

  if (atomic_decrement_val (&mem) != 15)
    {
      puts ("atomic_decrement_val test failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_decrement_and_test (&mem)
      || mem != -1)
    {
      puts ("atomic_decrement_and_test test 1 failed");
      ret = 1;
    }

  mem = 15;
  if (atomic_decrement_and_test (&mem)
      || mem != 14)
    {
      puts ("atomic_decrement_and_test test 2 failed");
      ret = 1;
    }

  mem = 1;
  if (! atomic_decrement_and_test (&mem)
      || mem != 0)
    {
      puts ("atomic_decrement_and_test test 3 failed");
      ret = 1;
    }

  mem = 1;
  if (atomic_decrement_if_positive (&mem) != 1
      || mem != 0)
    {
      puts ("atomic_decrement_if_positive test 1 failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_decrement_if_positive (&mem) != 0
      || mem != 0)
    {
      puts ("atomic_decrement_if_positive test 2 failed");
      ret = 1;
    }

  mem = -1;
  if (atomic_decrement_if_positive (&mem) != -1
      || mem != -1)
    {
      puts ("atomic_decrement_if_positive test 3 failed");
      ret = 1;
    }

  mem = -12;
  if (! atomic_add_negative (&mem, 10)
      || mem != -2)
    {
      puts ("atomic_add_negative test 1 failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_add_negative (&mem, 100)
      || mem != 100)
    {
      puts ("atomic_add_negative test 2 failed");
      ret = 1;
    }

  mem = 15;
  if (atomic_add_negative (&mem, -10)
      || mem != 5)
    {
      puts ("atomic_add_negative test 3 failed");
      ret = 1;
    }

  mem = -12;
  if (atomic_add_negative (&mem, 14)
      || mem != 2)
    {
      puts ("atomic_add_negative test 4 failed");
      ret = 1;
    }

  mem = 0;
  if (! atomic_add_negative (&mem, -1)
      || mem != -1)
    {
      puts ("atomic_add_negative test 5 failed");
      ret = 1;
    }

  mem = -31;
  if (atomic_add_negative (&mem, 31)
      || mem != 0)
    {
      puts ("atomic_add_negative test 6 failed");
      ret = 1;
    }

  mem = -34;
  if (atomic_add_zero (&mem, 31)
      || mem != -3)
    {
      puts ("atomic_add_zero test 1 failed");
      ret = 1;
    }

  mem = -36;
  if (! atomic_add_zero (&mem, 36)
      || mem != 0)
    {
      puts ("atomic_add_zero test 2 failed");
      ret = 1;
    }

  mem = 113;
  if (atomic_add_zero (&mem, -13)
      || mem != 100)
    {
      puts ("atomic_add_zero test 3 failed");
      ret = 1;
    }

  mem = -18;
  if (atomic_add_zero (&mem, 20)
      || mem != 2)
    {
      puts ("atomic_add_zero test 4 failed");
      ret = 1;
    }

  mem = 10;
  if (atomic_add_zero (&mem, -20)
      || mem != -10)
    {
      puts ("atomic_add_zero test 5 failed");
      ret = 1;
    }

  mem = 10;
  if (! atomic_add_zero (&mem, -10)
      || mem != 0)
    {
      puts ("atomic_add_zero test 6 failed");
      ret = 1;
    }

  mem = 0;
  atomic_bit_set (&mem, 1);
  if (mem != 2)
    {
      puts ("atomic_bit_set test 1 failed");
      ret = 1;
    }

  mem = 8;
  atomic_bit_set (&mem, 3);
  if (mem != 8)
    {
      puts ("atomic_bit_set test 2 failed");
      ret = 1;
    }

#ifdef TEST_ATOMIC64
  mem = 16;
  atomic_bit_set (&mem, 35);
  if (mem != 0x800000010LL)
    {
      puts ("atomic_bit_set test 3 failed");
      ret = 1;
    }
#endif

  mem = 0;
  if (atomic_bit_test_set (&mem, 1)
      || mem != 2)
    {
      puts ("atomic_bit_test_set test 1 failed");
      ret = 1;
    }

  mem = 8;
  if (! atomic_bit_test_set (&mem, 3)
      || mem != 8)
    {
      puts ("atomic_bit_test_set test 2 failed");
      ret = 1;
    }

#ifdef TEST_ATOMIC64
  mem = 16;
  if (atomic_bit_test_set (&mem, 35)
      || mem != 0x800000010LL)
    {
      puts ("atomic_bit_test_set test 3 failed");
      ret = 1;
    }

  mem = 0x100000000LL;
  if (! atomic_bit_test_set (&mem, 32)
      || mem != 0x100000000LL)
    {
      puts ("atomic_bit_test_set test 4 failed");
      ret = 1;
    }
#endif

#ifdef catomic_compare_and_exchange_val_acq
  mem = 24;
  if (catomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24
      || mem != 35)
    {
      puts ("catomic_compare_and_exchange_val_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (catomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12
      || mem != 12)
    {
      puts ("catomic_compare_and_exchange_val_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (catomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15
      || mem != -56)
    {
      puts ("catomic_compare_and_exchange_val_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (catomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1
      || mem != -1)
    {
      puts ("catomic_compare_and_exchange_val_acq test 4 failed");
      ret = 1;
    }
#endif

  mem = 24;
  if (catomic_compare_and_exchange_bool_acq (&mem, 35, 24)
      || mem != 35)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (! catomic_compare_and_exchange_bool_acq (&mem, 10, 15)
      || mem != 12)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (catomic_compare_and_exchange_bool_acq (&mem, -56, -15)
      || mem != -56)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (! catomic_compare_and_exchange_bool_acq (&mem, 17, 0)
      || mem != -1)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 4 failed");
      ret = 1;
    }

  mem = 2;
  if (catomic_exchange_and_add (&mem, 11) != 2
      || mem != 13)
    {
      puts ("catomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = -21;
  catomic_add (&mem, 22);
  if (mem != 1)
    {
      puts ("catomic_add test failed");
      ret = 1;
    }

  mem = -1;
  catomic_increment (&mem);
  if (mem != 0)
    {
      puts ("catomic_increment test failed");
      ret = 1;
    }

  mem = 2;
  if (catomic_increment_val (&mem) != 3)
    {
      puts ("catomic_increment_val test failed");
      ret = 1;
    }

  mem = 17;
  catomic_decrement (&mem);
  if (mem != 16)
    {
      puts ("catomic_decrement test failed");
      ret = 1;
    }

  if (catomic_decrement_val (&mem) != 15)
    {
      puts ("catomic_decrement_val test failed");
      ret = 1;
    }

  return ret;
}
Пример #13
0
static int camera_v4l2_open(struct file *filep)
{
	int rc = 0;
	struct v4l2_event event;
	struct msm_video_device *pvdev = video_drvdata(filep);
	struct task_struct *qdaemon_task;
	BUG_ON(!pvdev);

	rc = camera_v4l2_fh_open(filep);
	if (rc < 0) {
	    pr_err("%s : camera_v4l2_fh_open", __FUNCTION__);
	    goto fh_open_fail;
	}

	/* every stream has a vb2 queue */
	rc = camera_v4l2_vb2_q_init(filep);
	if (rc < 0) {
	    pr_err("%s : camera_v4l2_vb2_q_init", __FUNCTION__);
	    goto vb2_q_fail;
	}

	if (!atomic_read(&pvdev->opened)) {

		/* create a new session when first opened */
		rc = msm_create_session(pvdev->vdev->num, pvdev->vdev);
		if (rc < 0) {
		    pr_err("%s : msm_create_session", __func__);
		    goto session_fail;
		}

		rc = msm_create_command_ack_q(pvdev->vdev->num, 0);
		if (rc < 0) {
		    pr_err("%s : msm_create_command_ack_q", __FUNCTION__);
		    goto command_ack_q_fail;
		}

		camera_pack_event(filep, MSM_CAMERA_NEW_SESSION, 0, -1, &event);
		rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
		if (rc < 0) {
		    pr_err("%s, __dbg: post fail \n",__FUNCTION__);
		    pr_err("%s, pid: %d, tgid: %d\n",__FUNCTION__, qdaemon_pid, qdaemon_tgid);
		    qdaemon_task = pid_task(find_get_pid(qdaemon_tgid), PIDTYPE_PID);
		    if (qdaemon_task) {
			if (!strncmp(qdaemon_task->comm, QDAEMON, strlen(QDAEMON))) {
			    pr_err("%s, kill daemon", __func__);
			    send_sig(SIGKILL, qdaemon_task, 0);
			    pr_err("%s, kill this", __func__);
			    send_sig(SIGKILL, current, 0);
			} else
			    pr_err("%s, now (%s : %d)", __func__,
				    qdaemon_task->comm, task_pid_nr(qdaemon_task));
		    } else
			pr_err("error!! can't look for daemon");
		    goto post_fail;
		}
		rc = camera_check_event_status(&event);
		if (rc < 0) {
		    pr_err("%s : camera_check_event_status", __FUNCTION__);
		    goto post_fail;
		}
	} else {
		rc = msm_create_command_ack_q(pvdev->vdev->num,
			atomic_read(&pvdev->opened));
		if (rc < 0) {
		    pr_err("%s : msm_create_command_ack_q", __FUNCTION__);
		    goto session_fail;
		}
	}

	atomic_add(1, &pvdev->opened);
	return rc;

post_fail:
	msm_delete_command_ack_q(pvdev->vdev->num, 0);
command_ack_q_fail:
	msm_destroy_session(pvdev->vdev->num);
session_fail:
	camera_v4l2_vb2_q_release(filep);
vb2_q_fail:
	camera_v4l2_fh_release(filep);
fh_open_fail:
	return rc;
}
Пример #14
0
void
BDirectMessageTarget::Release()
{
	if (atomic_add(&fReferenceCount, -1) == 1)
		delete this;
}
Пример #15
0
static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
{
	in_cache_entry *entry;
	struct iphdr *iph;
	char *buff;
	__be32 ipaddr = 0;

	static struct {
		struct llc_snap_hdr hdr;
		__be32 tag;
	} tagged_llc_snap_hdr = {
		{0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}, {0x88, 0x4c}},
		0
	};

	buff = skb->data + mpc->dev->hard_header_len;
	iph = (struct iphdr *)buff;
	ipaddr = iph->daddr;

	ddprintk("(%s) ipaddr 0x%x\n",
		 mpc->dev->name, ipaddr);

	entry = mpc->in_ops->get(ipaddr, mpc);
	if (entry == NULL) {
		entry = mpc->in_ops->add_entry(ipaddr, mpc);
		if (entry != NULL)
			mpc->in_ops->put(entry);
		return 1;
	}
	/* threshold not exceeded or VCC not ready */
	if (mpc->in_ops->cache_hit(entry, mpc) != OPEN) {
		ddprintk("(%s) cache_hit: returns != OPEN\n",
			 mpc->dev->name);
		mpc->in_ops->put(entry);
		return 1;
	}

	ddprintk("(%s) using shortcut\n",
		 mpc->dev->name);
	/* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */
	if (iph->ttl <= 1) {
		ddprintk("(%s) IP ttl = %u, using LANE\n",
			 mpc->dev->name, iph->ttl);
		mpc->in_ops->put(entry);
		return 1;
	}
	iph->ttl--;
	iph->check = 0;
	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);

	if (entry->ctrl_info.tag != 0) {
		ddprintk("(%s) adding tag 0x%x\n",
			 mpc->dev->name, entry->ctrl_info.tag);
		tagged_llc_snap_hdr.tag = entry->ctrl_info.tag;
		skb_pull(skb, ETH_HLEN);	/* get rid of Eth header */
		skb_push(skb, sizeof(tagged_llc_snap_hdr));
						/* add LLC/SNAP header   */
		skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr,
					sizeof(tagged_llc_snap_hdr));
	} else {
		skb_pull(skb, ETH_HLEN);	/* get rid of Eth header */
		skb_push(skb, sizeof(struct llc_snap_hdr));
						/* add LLC/SNAP header + tag  */
		skb_copy_to_linear_data(skb, &llc_snap_mpoa_data,
					sizeof(struct llc_snap_hdr));
	}

	atomic_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
	ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
	entry->shortcut->send(entry->shortcut, skb);
	entry->packets_fwded++;
	mpc->in_ops->put(entry);

	return 0;
}
Пример #16
0
/**
 * Release a previously obtained readlock.
 *
 * Simply decrement the n_readers value for the hash table
 *
 * @param table		The hash table to unlock
 */
static void
hashtable_read_unlock(HASHTABLE *table)
{
	atomic_add(&table->n_readers, -1);
}
Пример #17
0
static int __ip6_append_data(struct sock *sk,
			     struct flowi6 *fl6,
			     struct sk_buff_head *queue,
			     struct inet_cork *cork,
			     struct inet6_cork *v6_cork,
			     struct page_frag *pfrag,
			     int getfrag(void *from, char *to, int offset,
					 int len, int odd, struct sk_buff *skb),
			     void *from, int length, int transhdrlen,
			     unsigned int flags, struct ipcm6_cookie *ipc6,
			     const struct sockcm_cookie *sockc)
{
	struct sk_buff *skb, *skb_prev = NULL;
	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
	int exthdrlen = 0;
	int dst_exthdrlen = 0;
	int hh_len;
	int copy;
	int err;
	int offset = 0;
	__u8 tx_flags = 0;
	u32 tskey = 0;
	struct rt6_info *rt = (struct rt6_info *)cork->dst;
	struct ipv6_txoptions *opt = v6_cork->opt;
	int csummode = CHECKSUM_NONE;
	unsigned int maxnonfragsize, headersize;

	skb = skb_peek_tail(queue);
	if (!skb) {
		exthdrlen = opt ? opt->opt_flen : 0;
		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
	}

	mtu = cork->fragsize;
	orig_mtu = mtu;

	hh_len = LL_RESERVED_SPACE(rt->dst.dev);

	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
			(opt ? opt->opt_nflen : 0);
	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
		     sizeof(struct frag_hdr);

	headersize = sizeof(struct ipv6hdr) +
		     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
		     (dst_allfrag(&rt->dst) ?
		      sizeof(struct frag_hdr) : 0) +
		     rt->rt6i_nfheader_len;

	if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
	    (sk->sk_protocol == IPPROTO_UDP ||
	     sk->sk_protocol == IPPROTO_RAW)) {
		ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
				sizeof(struct ipv6hdr));
		goto emsgsize;
	}

	if (ip6_sk_ignore_df(sk))
		maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
	else
		maxnonfragsize = mtu;

	if (cork->length + length > maxnonfragsize - headersize) {
emsgsize:
		ipv6_local_error(sk, EMSGSIZE, fl6,
				 mtu - headersize +
				 sizeof(struct ipv6hdr));
		return -EMSGSIZE;
	}

	/* CHECKSUM_PARTIAL only with no extension headers and when
	 * we are not going to fragment
	 */
	if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
	    headersize == sizeof(struct ipv6hdr) &&
	    length < mtu - headersize &&
	    !(flags & MSG_MORE) &&
	    rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
		csummode = CHECKSUM_PARTIAL;

	if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
		sock_tx_timestamp(sk, sockc->tsflags, &tx_flags);
		if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
		    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
			tskey = sk->sk_tskey++;
	}

	/*
	 * Let's try using as much space as possible.
	 * Use MTU if total length of the message fits into the MTU.
	 * Otherwise, we need to reserve fragment header and
	 * fragment alignment (= 8-15 octects, in total).
	 *
	 * Note that we may need to "move" the data from the tail of
	 * of the buffer to the new fragment when we split
	 * the message.
	 *
	 * FIXME: It may be fragmented into multiple chunks
	 *        at once if non-fragmentable extension headers
	 *        are too large.
	 * --yoshfuji
	 */

	cork->length += length;
	if (((length > mtu) ||
	     (skb && skb_is_gso(skb))) &&
	    (sk->sk_protocol == IPPROTO_UDP) &&
	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
					  hh_len, fragheaderlen, exthdrlen,
					  transhdrlen, mtu, flags, fl6);
		if (err)
			goto error;
		return 0;
	}

	if (!skb)
		goto alloc_new_skb;

	while (length > 0) {
		/* Check if the remaining data fits into current packet. */
		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
		if (copy < length)
			copy = maxfraglen - skb->len;

		if (copy <= 0) {
			char *data;
			unsigned int datalen;
			unsigned int fraglen;
			unsigned int fraggap;
			unsigned int alloclen;
alloc_new_skb:
			/* There's no room in the current skb */
			if (skb)
				fraggap = skb->len - maxfraglen;
			else
				fraggap = 0;
			/* update mtu and maxfraglen if necessary */
			if (!skb || !skb_prev)
				ip6_append_data_mtu(&mtu, &maxfraglen,
						    fragheaderlen, skb, rt,
						    orig_mtu);

			skb_prev = skb;

			/*
			 * If remaining data exceeds the mtu,
			 * we know we need more fragment(s).
			 */
			datalen = length + fraggap;

			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
			if ((flags & MSG_MORE) &&
			    !(rt->dst.dev->features&NETIF_F_SG))
				alloclen = mtu;
			else
				alloclen = datalen + fragheaderlen;

			alloclen += dst_exthdrlen;

			if (datalen != length + fraggap) {
				/*
				 * this is not the last fragment, the trailer
				 * space is regarded as data space.
				 */
				datalen += rt->dst.trailer_len;
			}

			alloclen += rt->dst.trailer_len;
			fraglen = datalen + fragheaderlen;

			/*
			 * We just reserve space for fragment header.
			 * Note: this may be overallocation if the message
			 * (without MSG_MORE) fits into the MTU.
			 */
			alloclen += sizeof(struct frag_hdr);

			if (transhdrlen) {
				skb = sock_alloc_send_skb(sk,
						alloclen + hh_len,
						(flags & MSG_DONTWAIT), &err);
			} else {
				skb = NULL;
				if (atomic_read(&sk->sk_wmem_alloc) <=
				    2 * sk->sk_sndbuf)
					skb = sock_wmalloc(sk,
							   alloclen + hh_len, 1,
							   sk->sk_allocation);
				if (unlikely(!skb))
					err = -ENOBUFS;
			}
			if (!skb)
				goto error;
			/*
			 *	Fill in the control structures
			 */
			skb->protocol = htons(ETH_P_IPV6);
			skb->ip_summed = csummode;
			skb->csum = 0;
			/* reserve for fragmentation and ipsec header */
			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
				    dst_exthdrlen);

			/* Only the initial fragment is time stamped */
			skb_shinfo(skb)->tx_flags = tx_flags;
			tx_flags = 0;
			skb_shinfo(skb)->tskey = tskey;
			tskey = 0;

			/*
			 *	Find where to start putting bytes
			 */
			data = skb_put(skb, fraglen);
			skb_set_network_header(skb, exthdrlen);
			data += fragheaderlen;
			skb->transport_header = (skb->network_header +
						 fragheaderlen);
			if (fraggap) {
				skb->csum = skb_copy_and_csum_bits(
					skb_prev, maxfraglen,
					data + transhdrlen, fraggap, 0);
				skb_prev->csum = csum_sub(skb_prev->csum,
							  skb->csum);
				data += fraggap;
				pskb_trim_unique(skb_prev, maxfraglen);
			}
			copy = datalen - transhdrlen - fraggap;

			if (copy < 0) {
				err = -EINVAL;
				kfree_skb(skb);
				goto error;
			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
				err = -EFAULT;
				kfree_skb(skb);
				goto error;
			}

			offset += copy;
			length -= datalen - fraggap;
			transhdrlen = 0;
			exthdrlen = 0;
			dst_exthdrlen = 0;

			/*
			 * Put the packet on the pending queue
			 */
			__skb_queue_tail(queue, skb);
			continue;
		}

		if (copy > length)
			copy = length;

		if (!(rt->dst.dev->features&NETIF_F_SG)) {
			unsigned int off;

			off = skb->len;
			if (getfrag(from, skb_put(skb, copy),
						offset, copy, off, skb) < 0) {
				__skb_trim(skb, off);
				err = -EFAULT;
				goto error;
			}
		} else {
			int i = skb_shinfo(skb)->nr_frags;

			err = -ENOMEM;
			if (!sk_page_frag_refill(sk, pfrag))
				goto error;

			if (!skb_can_coalesce(skb, i, pfrag->page,
					      pfrag->offset)) {
				err = -EMSGSIZE;
				if (i == MAX_SKB_FRAGS)
					goto error;

				__skb_fill_page_desc(skb, i, pfrag->page,
						     pfrag->offset, 0);
				skb_shinfo(skb)->nr_frags = ++i;
				get_page(pfrag->page);
			}
			copy = min_t(int, copy, pfrag->size - pfrag->offset);
			if (getfrag(from,
				    page_address(pfrag->page) + pfrag->offset,
				    offset, copy, skb->len, skb) < 0)
				goto error_efault;

			pfrag->offset += copy;
			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
			skb->len += copy;
			skb->data_len += copy;
			skb->truesize += copy;
			atomic_add(copy, &sk->sk_wmem_alloc);
		}
		offset += copy;
		length -= copy;
	}

	return 0;

error_efault:
	err = -EFAULT;
error:
	cork->length -= length;
	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
	return err;
}
Пример #18
0
/**
 * Release the write lock on the hash table.
 *
 * @param table The hash table to unlock
 */
static void
hashtable_write_unlock(HASHTABLE *table)
{
	atomic_add(&table->writelock, -1);
}
Пример #19
0
void zfs_do_throttle(zfs_throttle_t *zt, bool is_write, uint64_t size) {
	int64_t _wait;
	uint64_t _now;
	uint64_t data;
	struct semaphore *sem;

	if (zt->is_enabled) {
		if (is_write) {
			if (zt->z_prop_write_bytes > 0) {
				sem = zt->z_sem_real_write;
				down(sem);
				atomic_add(size, &(zt->z_real_write_bytes));
				data = atomic_read(&(zt->z_real_write_bytes));
				//dprintf("------- write bytes throttle 0: %llu\n", data);
				if (data > zt->z_prop_write_bytes) {
					if (zt->z_write_timestamp > 0) {
						_now = now();
						_wait = ((data * 1000000) / zt->z_prop_write_bytes)
								- ((_now - zt->z_write_timestamp) / 1000);
						//dprintf("------- write bytes throttle 1: %llu %llu %llu %llu %lld\n", data, zt->z_prop_write_bytes, _now, zt->z_write_timestamp, _wait);
						if (_wait > 0) {
							usleep_range(_wait, _wait);
						}
					}
					atomic_set(&(zt->z_real_write_bytes), 0);
					zt->z_write_timestamp = now();
				}
				up(sem);
			} else {
				if (zt->z_prop_write_iops > 0) {
					sem = zt->z_sem_real_write;
					down(sem);
					atomic_add(1, &(zt->z_real_write_iops));
					data = atomic_read(&(zt->z_real_write_iops));
					//dprintf("------- write iops throttle 0: %llu\n", data);
					if (data > zt->z_prop_write_iops) {
						if (zt->z_write_timestamp > 0) {
							_now = now();
							_wait = ((data * 1000000) / zt->z_prop_write_iops)
									- ((_now - zt->z_write_timestamp) / 1000);
							//dprintf("------- write iops throttle 1: %llu %llu %llu %llu %lld\n", data, zt->z_prop_write_iops, _now, zt->z_write_timestamp, _wait);
							if (_wait > 0) {
								usleep_range(_wait, _wait);
							}
						}
						atomic_set(&(zt->z_real_write_iops), 0);
						zt->z_write_timestamp = now();
					}
					up(sem);
				}
			}
		} else {
			if (zt->z_prop_read_bytes > 0) {
				sem = zt->z_sem_real_read;
				down(sem);
				atomic_add(size, &(zt->z_real_read_bytes));
				data = atomic_read(&(zt->z_real_read_bytes));
				//dprintf("------- read bytes throttle 0: %llu\n", data);
				if (data > zt->z_prop_read_bytes) {
					if (zt->z_read_timestamp > 0) {
						_now = now();
						_wait = ((data * 1000000) / zt->z_prop_read_bytes)
								- ((_now - zt->z_read_timestamp) / 1000);
						//dprintf("------- read bytes throttle 1: %llu %llu %llu %llu %lld\n", data, zt->z_prop_read_bytes, _now, zt->z_read_timestamp, _wait);
						if (_wait > 0) {
							usleep_range(_wait, _wait);
						}
					}
					atomic_set(&(zt->z_real_read_bytes), 0);
					zt->z_read_timestamp = now();
				}
				up(sem);
			} else {
				if (zt->z_prop_read_iops > 0) {
					sem = zt->z_sem_real_read;
					down(sem);
					atomic_add(1, &(zt->z_real_read_iops));
					data = atomic_read(&(zt->z_real_read_iops));
					//dprintf("------- read iops throttle 0: %llu\n", data);
					if (data > zt->z_prop_read_iops) {
						if (zt->z_read_timestamp > 0) {
							_now = now();
							_wait = ((data * 1000000) / zt->z_prop_read_iops)
									- ((_now - zt->z_read_timestamp) / 1000);
							//dprintf("------- read iops throttle 1: %llu %llu %llu %llu %lld\n", data, zt->z_prop_read_iops, _now, zt->z_read_timestamp, _wait);
							if (_wait > 0) {
								usleep_range(_wait, _wait);
							}
						}
						atomic_set(&(zt->z_real_read_iops), 0);
						zt->z_read_timestamp = now();
					}
					up(sem);
				}
			}
		}
	}
	// update counters
	if (is_write) {
		zt->z_io_serviced->write_bytes.value.ui64 += size;
		zt->z_io_serviced->write_iops.value.ui64++;
	} else {
		zt->z_io_serviced->read_bytes.value.ui64 += size;
		zt->z_io_serviced->read_iops.value.ui64++;
	}
}
Пример #20
0
/*
 * Decrements the reference-counter and
 * tests whether it became zero.
 *
 * @return 1 reference-counter became zero
 * @return 0 reference-counter didn't became zero
 */
int
ieee80211_node_dectestref(struct ieee80211_node* ni)
{
	// atomic_add returns old value
	return atomic_add((vint32*)&ni->ni_refcnt, -1) == 1;
}
Пример #21
0
long do_msgsnd(int msqid, long mtype, void __user *mtext,
		size_t msgsz, int msgflg)
{
	struct msg_queue *msq;
	struct msg_msg *msg;
	int err;
	struct ipc_namespace *ns;

	ns = current->nsproxy->ipc_ns;

	if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
		return -EINVAL;
	if (mtype < 1)
		return -EINVAL;

	msg = load_msg(mtext, msgsz);
	if (IS_ERR(msg))
		return PTR_ERR(msg);

	msg->m_type = mtype;
	msg->m_ts = msgsz;

	msq = msg_lock_check(ns, msqid);
	if (IS_ERR(msq)) {
		err = PTR_ERR(msq);
		goto out_free;
	}

	for (;;) {
		struct msg_sender s;

		err = -EACCES;
		if (ipcperms(ns, &msq->q_perm, S_IWUGO))
			goto out_unlock_free;

		err = security_msg_queue_msgsnd(msq, msg, msgflg);
		if (err)
			goto out_unlock_free;

		if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
				1 + msq->q_qnum <= msq->q_qbytes) {
			break;
		}

		/* queue full, wait: */
		if (msgflg & IPC_NOWAIT) {
			err = -EAGAIN;
			goto out_unlock_free;
		}
		ss_add(msq, &s);
		ipc_rcu_getref(msq);
		msg_unlock(msq);
		schedule();

		ipc_lock_by_ptr(&msq->q_perm);
		ipc_rcu_putref(msq);
		if (msq->q_perm.deleted) {
			err = -EIDRM;
			goto out_unlock_free;
		}
		ss_del(&s);

		if (signal_pending(current)) {
			err = -ERESTARTNOHAND;
			goto out_unlock_free;
		}
	}

	msq->q_lspid = task_tgid_vnr(current);
	msq->q_stime = get_seconds();

	if (!pipelined_send(msq, msg)) {
		/* no one is waiting for this message, enqueue it */
		list_add_tail(&msg->m_list, &msq->q_messages);
		msq->q_cbytes += msgsz;
		msq->q_qnum++;
		atomic_add(msgsz, &ns->msg_bytes);
		atomic_inc(&ns->msg_hdrs);
	}

	err = 0;
	msg = NULL;

out_unlock_free:
	msg_unlock(msq);
out_free:
	if (msg != NULL)
		free_msg(msg);
	return err;
}
Пример #22
0
static int clip_start_xmit(struct sk_buff *skb,struct net_device *dev)
{
	struct clip_priv *clip_priv = PRIV(dev);
	struct atmarp_entry *entry;
	struct atm_vcc *vcc;
	int old;
	unsigned long flags;

	DPRINTK("clip_start_xmit (skb %p)\n",skb);
	if (!skb->dst) {
		printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n");
		dev_kfree_skb(skb);
		clip_priv->stats.tx_dropped++;
		return 0;
	}
	if (!skb->dst->neighbour) {
#if 0
		skb->dst->neighbour = clip_find_neighbour(skb->dst,1);
		if (!skb->dst->neighbour) {
			dev_kfree_skb(skb); /* lost that one */
			clip_priv->stats.tx_dropped++;
			return 0;
		}
#endif
		printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n");
		dev_kfree_skb(skb);
		clip_priv->stats.tx_dropped++;
		return 0;
	}
	entry = NEIGH2ENTRY(skb->dst->neighbour);
	if (!entry->vccs) {
		if (time_after(jiffies, entry->expires)) {
			/* should be resolved */
			entry->expires = jiffies+ATMARP_RETRY_DELAY*HZ;
			to_atmarpd(act_need,PRIV(dev)->number,entry->ip);
		}
		if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS)
			skb_queue_tail(&entry->neigh->arp_queue,skb);
		else {
			dev_kfree_skb(skb);
			clip_priv->stats.tx_dropped++;
		}
		return 0;
	}
	DPRINTK("neigh %p, vccs %p\n",entry,entry->vccs);
	ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
	DPRINTK("using neighbour %p, vcc %p\n",skb->dst->neighbour,vcc);
	if (entry->vccs->encap) {
		void *here;

		here = skb_push(skb,RFC1483LLC_LEN);
		memcpy(here,llc_oui,sizeof(llc_oui));
		((u16 *) here)[3] = skb->protocol;
	}
	atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
	ATM_SKB(skb)->atm_options = vcc->atm_options;
	entry->vccs->last_use = jiffies;
	DPRINTK("atm_skb(%p)->vcc(%p)->dev(%p)\n",skb,vcc,vcc->dev);
	old = xchg(&entry->vccs->xoff,1); /* assume XOFF ... */
	if (old) {
		printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n");
		return 0;
	}
	clip_priv->stats.tx_packets++;
	clip_priv->stats.tx_bytes += skb->len;
	(void) vcc->send(vcc,skb);
	if (atm_may_send(vcc,0)) {
		entry->vccs->xoff = 0;
		return 0;
	}
	spin_lock_irqsave(&clip_priv->xoff_lock,flags);
	netif_stop_queue(dev); /* XOFF -> throttle immediately */
	barrier();
	if (!entry->vccs->xoff)
		netif_start_queue(dev);
		/* Oh, we just raced with clip_pop. netif_start_queue should be
		   good enough, because nothing should really be asleep because
		   of the brief netif_stop_queue. If this isn't true or if it
		   changes, use netif_wake_queue instead. */
	spin_unlock_irqrestore(&clip_priv->xoff_lock,flags);
	return 0;
}
Пример #23
0
static int clip_start_xmit(struct sk_buff *skb, knet_netdev_t *dev)
{
    struct clip_priv *clip_priv = PRIV(dev);
    struct atmarp_entry *entry;
    struct atm_vcc *vcc;
    int old;
    unsigned long flags;

    DPRINTK("clip_start_xmit (skb %p)\n", skb);
    if (!skb->dst)
    {
	printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n");
	dev_kfree_skb(skb);
	clip_priv->stats.tx_dropped++;
	return 0;
    }
    if (!skb->dst->neighbour)
    {
	printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR!\n");
	dev_kfree_skb(skb);
	clip_priv->stats.tx_dropped++;
	return 0;
    }
    entry = NEIGH2ENTRY(skb->dst->neighbour);
    if (!entry->vccs)
    {
	if (time_after(jiffies, entry->expires))
	{
	    /* should be resolved */
	    entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ;
	} 
	if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS)
	    skb_queue_tail(&entry->neigh->arp_queue, skb);
	else
	{
	    dev_kfree_skb(skb);
	    clip_priv->stats.tx_dropped++;
	}

	/* If a vcc was not resolved for a long time, it sends an InARP
	 * packet every 5 minutes. But if the other side connected now
	 * we do not want to wait.
	 */
	all_clip_vccs_start_resolving();
	return 0;
    }
    DPRINTK("neigh %p, vccs %p\n", entry, entry->vccs);
    ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
    DPRINTK("using neighbour %p, vcc %p\n", skb->dst->neighbour, vcc);
    if (entry->vccs->encap)
    {
	void *here;

	here = skb_push(skb, RFC1483LLC_LEN);
	memcpy(here, llc_oui, sizeof(llc_oui));
	((u16 *) here)[3] = skb->protocol;
    }
    atomic_add(skb->truesize, &vcc->tx_inuse);
    ATM_SKB(skb)->iovcnt = 0;
    ATM_SKB(skb)->atm_options = vcc->atm_options;
    entry->vccs->last_use = jiffies;
    DPRINTK("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc,vcc->dev);
    old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
    if (old)
    {
	printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n");
	return 0;
    }
    clip_priv->stats.tx_packets++;
    clip_priv->stats.tx_bytes += skb->len;
    vcc->dev->ops->send(vcc, skb);
    if (atm_may_send(vcc, 0))
    {
	entry->vccs->xoff = 0;
	return 0;
    }
    spin_lock_irqsave(&clip_priv->xoff_lock, flags);
    knet_netdev_stop_queue(dev); /* XOFF -> throttle immediately */
    barrier();
    if (!entry->vccs->xoff)
	knet_netdev_start_queue(dev);
    /* Oh, we just raced with clip_pop. netif_start_queue should be
       good enough, because nothing should really be asleep because
       of the brief netif_stop_queue. If this isn't true or if it
       changes, use netif_wake_queue instead. */
    spin_unlock_irqrestore(&clip_priv->xoff_lock, flags);
    return 0;
}
Пример #24
0
/* Add new segment to existing queue. */
static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
{
	struct sk_buff *prev, *next;
	struct net_device *dev;
	int flags, offset;
	int ihl, end;
	int err = -ENOENT;
	u8 ecn;

	if (qp->q.last_in & INET_FRAG_COMPLETE)
		goto err;

	if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
	    unlikely(ip_frag_too_far(qp)) &&
	    unlikely(err = ip_frag_reinit(qp))) {
		ipq_kill(qp);
		goto err;
	}

	ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
	offset = ntohs(ip_hdr(skb)->frag_off);
	flags = offset & ~IP_OFFSET;
	offset &= IP_OFFSET;
	offset <<= 3;		/* offset is in 8-byte chunks */
	ihl = ip_hdrlen(skb);

	/* Determine the position of this fragment. */
	end = offset + skb->len - ihl;
	err = -EINVAL;

	/* Is this the final fragment? */
	if ((flags & IP_MF) == 0) {
		/* If we already have some bits beyond end
		 * or have different end, the segment is corrrupted.
		 */
		if (end < qp->q.len ||
		    ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
			goto err;
		qp->q.last_in |= INET_FRAG_LAST_IN;
		qp->q.len = end;
	} else {
		if (end&7) {
			end &= ~7;
			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
				skb->ip_summed = CHECKSUM_NONE;
		}
		if (end > qp->q.len) {
			/* Some bits beyond end -> corruption. */
			if (qp->q.last_in & INET_FRAG_LAST_IN)
				goto err;
			qp->q.len = end;
		}
	}
	if (end == offset)
		goto err;

	err = -ENOMEM;
	if (pskb_pull(skb, ihl) == NULL)
		goto err;

	err = pskb_trim_rcsum(skb, end - offset);
	if (err)
		goto err;

	/* Find out which fragments are in front and at the back of us
	 * in the chain of fragments so far.  We must know where to put
	 * this fragment, right?
	 */
	prev = qp->q.fragments_tail;
	if (!prev || FRAG_CB(prev)->offset < offset) {
		next = NULL;
		goto found;
	}
	prev = NULL;
	for (next = qp->q.fragments; next != NULL; next = next->next) {
		if (FRAG_CB(next)->offset >= offset)
			break;	/* bingo! */
		prev = next;
	}

found:
	/* We found where to put this one.  Check for overlap with
	 * preceding fragment, and, if needed, align things so that
	 * any overlaps are eliminated.
	 */
	if (prev) {
		int i = (FRAG_CB(prev)->offset + prev->len) - offset;

		if (i > 0) {
			offset += i;
			err = -EINVAL;
			if (end <= offset)
				goto err;
			err = -ENOMEM;
			if (!pskb_pull(skb, i))
				goto err;
			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
				skb->ip_summed = CHECKSUM_NONE;
		}
	}

	err = -ENOMEM;

	while (next && FRAG_CB(next)->offset < end) {
		int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */

		if (i < next->len) {
			/* Eat head of the next overlapped fragment
			 * and leave the loop. The next ones cannot overlap.
			 */
			if (!pskb_pull(next, i))
				goto err;
			FRAG_CB(next)->offset += i;
			qp->q.meat -= i;
			if (next->ip_summed != CHECKSUM_UNNECESSARY)
				next->ip_summed = CHECKSUM_NONE;
			break;
		} else {
			struct sk_buff *free_it = next;

			/* Old fragment is completely overridden with
			 * new one drop it.
			 */
			next = next->next;

			if (prev)
				prev->next = next;
			else
				qp->q.fragments = next;

			qp->q.meat -= free_it->len;
			frag_kfree_skb(qp->q.net, free_it);
		}
	}

	FRAG_CB(skb)->offset = offset;

	/* Insert this fragment in the chain of fragments. */
	skb->next = next;
	if (!next)
		qp->q.fragments_tail = skb;
	if (prev)
		prev->next = skb;
	else
		qp->q.fragments = skb;

	dev = skb->dev;
	if (dev) {
		qp->iif = dev->ifindex;
		skb->dev = NULL;
	}
	qp->q.stamp = skb->tstamp;
	qp->q.meat += skb->len;
	qp->ecn |= ecn;
	atomic_add(skb->truesize, &qp->q.net->mem);
	if (offset == 0)
		qp->q.last_in |= INET_FRAG_FIRST_IN;

	if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
	    qp->q.meat == qp->q.len)
		return ip_frag_reasm(qp, prev, dev);

	write_lock(&ip4_frags.lock);
	list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
	write_unlock(&ip4_frags.lock);
	return -EINPROGRESS;

err:
	kfree_skb(skb);
	return err;
}
Пример #25
0
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
	int offset, int len, int odd, struct sk_buff *skb),
	void *from, int length, int transhdrlen,
	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
	struct rt6_info *rt, unsigned int flags, int dontfrag)
{
	struct inet_sock *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct inet_cork *cork;
	struct sk_buff *skb, *skb_prev = NULL;
	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
	int exthdrlen;
	int dst_exthdrlen;
	int hh_len;
	int copy;
	int err;
	int offset = 0;
	__u8 tx_flags = 0;

	if (flags&MSG_PROBE)
		return 0;
	cork = &inet->cork.base;
	if (skb_queue_empty(&sk->sk_write_queue)) {
		/*
		 * setup for corking
		 */
		if (opt) {
			if (WARN_ON(np->cork.opt))
				return -EINVAL;

			np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
			if (unlikely(np->cork.opt == NULL))
				return -ENOBUFS;

			np->cork.opt->tot_len = opt->tot_len;
			np->cork.opt->opt_flen = opt->opt_flen;
			np->cork.opt->opt_nflen = opt->opt_nflen;

			np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
							    sk->sk_allocation);
			if (opt->dst0opt && !np->cork.opt->dst0opt)
				return -ENOBUFS;

			np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
							    sk->sk_allocation);
			if (opt->dst1opt && !np->cork.opt->dst1opt)
				return -ENOBUFS;

			np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
							   sk->sk_allocation);
			if (opt->hopopt && !np->cork.opt->hopopt)
				return -ENOBUFS;

			np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
							    sk->sk_allocation);
			if (opt->srcrt && !np->cork.opt->srcrt)
				return -ENOBUFS;

			/* need source address above miyazawa*/
		}
		dst_hold(&rt->dst);
		cork->dst = &rt->dst;
		inet->cork.fl.u.ip6 = *fl6;
		np->cork.hop_limit = hlimit;
		np->cork.tclass = tclass;
		if (rt->dst.flags & DST_XFRM_TUNNEL)
			mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
			      rt->dst.dev->mtu : dst_mtu(&rt->dst);
		else
			mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
			      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
		if (np->frag_size < mtu) {
			if (np->frag_size)
				mtu = np->frag_size;
		}
		cork->fragsize = mtu;
		if (dst_allfrag(rt->dst.path))
			cork->flags |= IPCORK_ALLFRAG;
		cork->length = 0;
		exthdrlen = (opt ? opt->opt_flen : 0);
		length += exthdrlen;
		transhdrlen += exthdrlen;
		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
	} else {
		rt = (struct rt6_info *)cork->dst;
		fl6 = &inet->cork.fl.u.ip6;
		opt = np->cork.opt;
		transhdrlen = 0;
		exthdrlen = 0;
		dst_exthdrlen = 0;
		mtu = cork->fragsize;
	}
	orig_mtu = mtu;

	hh_len = LL_RESERVED_SPACE(rt->dst.dev);

	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
			(opt ? opt->opt_nflen : 0);
	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
		     sizeof(struct frag_hdr);

	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
		unsigned int maxnonfragsize, headersize;

		headersize = sizeof(struct ipv6hdr) +
			     (opt ? opt->tot_len : 0) +
			     (dst_allfrag(&rt->dst) ?
			      sizeof(struct frag_hdr) : 0) +
			     rt->rt6i_nfheader_len;

		if (ip6_sk_local_df(sk))
			maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
		else
			maxnonfragsize = mtu;

		/* dontfrag active */
		if ((cork->length + length > mtu - headersize) && dontfrag &&
		    (sk->sk_protocol == IPPROTO_UDP ||
		     sk->sk_protocol == IPPROTO_RAW)) {
			ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
						   sizeof(struct ipv6hdr));
			goto emsgsize;
		}

		if (cork->length + length > maxnonfragsize - headersize) {
emsgsize:
			ipv6_local_error(sk, EMSGSIZE, fl6,
					 mtu - headersize +
					 sizeof(struct ipv6hdr));
			return -EMSGSIZE;
		}
	}

	/* For UDP, check if TX timestamp is enabled */
	if (sk->sk_type == SOCK_DGRAM)
		sock_tx_timestamp(sk, &tx_flags);

	/*
	 * Let's try using as much space as possible.
	 * Use MTU if total length of the message fits into the MTU.
	 * Otherwise, we need to reserve fragment header and
	 * fragment alignment (= 8-15 octects, in total).
	 *
	 * Note that we may need to "move" the data from the tail of
	 * of the buffer to the new fragment when we split
	 * the message.
	 *
	 * FIXME: It may be fragmented into multiple chunks
	 *        at once if non-fragmentable extension headers
	 *        are too large.
	 * --yoshfuji
	 */

	skb = skb_peek_tail(&sk->sk_write_queue);
	cork->length += length;
	if (((length > mtu) ||
	     (skb && skb_is_gso(skb))) &&
	    (sk->sk_protocol == IPPROTO_UDP) &&
	    (rt->dst.dev->features & NETIF_F_UFO)) {
		err = ip6_ufo_append_data(sk, getfrag, from, length,
					  hh_len, fragheaderlen,
					  transhdrlen, mtu, flags, rt);
		if (err)
			goto error;
		return 0;
	}

	if (!skb)
		goto alloc_new_skb;

	while (length > 0) {
		/* Check if the remaining data fits into current packet. */
		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
		if (copy < length)
			copy = maxfraglen - skb->len;

		if (copy <= 0) {
			char *data;
			unsigned int datalen;
			unsigned int fraglen;
			unsigned int fraggap;
			unsigned int alloclen;
alloc_new_skb:
			/* There's no room in the current skb */
			if (skb)
				fraggap = skb->len - maxfraglen;
			else
				fraggap = 0;
			/* update mtu and maxfraglen if necessary */
			if (skb == NULL || skb_prev == NULL)
				ip6_append_data_mtu(&mtu, &maxfraglen,
						    fragheaderlen, skb, rt,
						    orig_mtu);

			skb_prev = skb;

			/*
			 * If remaining data exceeds the mtu,
			 * we know we need more fragment(s).
			 */
			datalen = length + fraggap;

			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
			if ((flags & MSG_MORE) &&
			    !(rt->dst.dev->features&NETIF_F_SG))
				alloclen = mtu;
			else
				alloclen = datalen + fragheaderlen;

			alloclen += dst_exthdrlen;

			if (datalen != length + fraggap) {
				/*
				 * this is not the last fragment, the trailer
				 * space is regarded as data space.
				 */
				datalen += rt->dst.trailer_len;
			}

			alloclen += rt->dst.trailer_len;
			fraglen = datalen + fragheaderlen;

			/*
			 * We just reserve space for fragment header.
			 * Note: this may be overallocation if the message
			 * (without MSG_MORE) fits into the MTU.
			 */
			alloclen += sizeof(struct frag_hdr);

			if (transhdrlen) {
				skb = sock_alloc_send_skb(sk,
						alloclen + hh_len,
						(flags & MSG_DONTWAIT), &err);
			} else {
				skb = NULL;
				if (atomic_read(&sk->sk_wmem_alloc) <=
				    2 * sk->sk_sndbuf)
					skb = sock_wmalloc(sk,
							   alloclen + hh_len, 1,
							   sk->sk_allocation);
				if (unlikely(skb == NULL))
					err = -ENOBUFS;
				else {
					/* Only the initial fragment
					 * is time stamped.
					 */
					tx_flags = 0;
				}
			}
			if (skb == NULL)
				goto error;
			/*
			 *	Fill in the control structures
			 */
			skb->protocol = htons(ETH_P_IPV6);
			skb->ip_summed = CHECKSUM_NONE;
			skb->csum = 0;
			/* reserve for fragmentation and ipsec header */
			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
				    dst_exthdrlen);

			if (sk->sk_type == SOCK_DGRAM)
				skb_shinfo(skb)->tx_flags = tx_flags;

			/*
			 *	Find where to start putting bytes
			 */
			data = skb_put(skb, fraglen);
			skb_set_network_header(skb, exthdrlen);
			data += fragheaderlen;
			skb->transport_header = (skb->network_header +
						 fragheaderlen);
			if (fraggap) {
				skb->csum = skb_copy_and_csum_bits(
					skb_prev, maxfraglen,
					data + transhdrlen, fraggap, 0);
				skb_prev->csum = csum_sub(skb_prev->csum,
							  skb->csum);
				data += fraggap;
				pskb_trim_unique(skb_prev, maxfraglen);
			}
			copy = datalen - transhdrlen - fraggap;

			if (copy < 0) {
				err = -EINVAL;
				kfree_skb(skb);
				goto error;
			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
				err = -EFAULT;
				kfree_skb(skb);
				goto error;
			}

			offset += copy;
			length -= datalen - fraggap;
			transhdrlen = 0;
			exthdrlen = 0;
			dst_exthdrlen = 0;

			/*
			 * Put the packet on the pending queue
			 */
			__skb_queue_tail(&sk->sk_write_queue, skb);
			continue;
		}

		if (copy > length)
			copy = length;

		if (!(rt->dst.dev->features&NETIF_F_SG)) {
			unsigned int off;

			off = skb->len;
			if (getfrag(from, skb_put(skb, copy),
						offset, copy, off, skb) < 0) {
				__skb_trim(skb, off);
				err = -EFAULT;
				goto error;
			}
		} else {
			int i = skb_shinfo(skb)->nr_frags;
			struct page_frag *pfrag = sk_page_frag(sk);

			err = -ENOMEM;
			if (!sk_page_frag_refill(sk, pfrag))
				goto error;

			if (!skb_can_coalesce(skb, i, pfrag->page,
					      pfrag->offset)) {
				err = -EMSGSIZE;
				if (i == MAX_SKB_FRAGS)
					goto error;

				__skb_fill_page_desc(skb, i, pfrag->page,
						     pfrag->offset, 0);
				skb_shinfo(skb)->nr_frags = ++i;
				get_page(pfrag->page);
			}
			copy = min_t(int, copy, pfrag->size - pfrag->offset);
			if (getfrag(from,
				    page_address(pfrag->page) + pfrag->offset,
				    offset, copy, skb->len, skb) < 0)
				goto error_efault;

			pfrag->offset += copy;
			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
			skb->len += copy;
			skb->data_len += copy;
			skb->truesize += copy;
			atomic_add(copy, &sk->sk_wmem_alloc);
		}
		offset += copy;
		length -= copy;
	}

	return 0;

error_efault:
	err = -EFAULT;
error:
	cork->length -= length;
	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
	return err;
}
Пример #26
0
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
			 struct net_device *dev)
{
	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
	struct iphdr *iph;
	struct sk_buff *fp, *head = qp->q.fragments;
	int len;
	int ihlen;
	int err;
	u8 ecn;

	ipq_kill(qp);

	ecn = ip4_frag_ecn_table[qp->ecn];
	if (unlikely(ecn == 0xff)) {
		err = -EINVAL;
		goto out_fail;
	}
	/* Make the one we just received the head. */
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);
		if (!fp)
			goto out_nomem;

		fp->next = head->next;
		if (!fp->next)
			qp->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, qp->q.fragments);
		head->next = qp->q.fragments->next;

		kfree_skb(qp->q.fragments);
		qp->q.fragments = head;
	}

	WARN_ON(head == NULL);
	WARN_ON(FRAG_CB(head)->offset != 0);

	/* Allocate a new buffer for the datagram. */
	ihlen = ip_hdrlen(head);
	len = ihlen + qp->q.len;

	err = -E2BIG;
	if (len > 65535)
		goto out_oversize;

	/* Head of list must not be cloned. */
	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
		goto out_nomem;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments. */
	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
			goto out_nomem;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
			plen += skb_shinfo(head)->frags[i].size;
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		atomic_add(clone->truesize, &qp->q.net->mem);
	}

	skb_shinfo(head)->frag_list = head->next;
	skb_push(head, head->data - skb_network_header(head));

	for (fp=head->next; fp; fp = fp->next) {
		head->data_len += fp->len;
		head->len += fp->len;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_COMPLETE)
			head->csum = csum_add(head->csum, fp->csum);
		head->truesize += fp->truesize;
	}
	atomic_sub(head->truesize, &qp->q.net->mem);

	head->next = NULL;
	head->dev = dev;
	head->tstamp = qp->q.stamp;

	iph = ip_hdr(head);
	iph->frag_off = 0;
	iph->tot_len = htons(len);
	iph->tos |= ecn;
	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
	qp->q.fragments = NULL;
	qp->q.fragments_tail = NULL;
	return 0;

out_nomem:
	LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
			      "queue %p\n", qp);
	err = -ENOMEM;
	goto out_fail;
out_oversize:
	if (net_ratelimit())
		printk(KERN_INFO "Oversized IP packet from %pI4.\n",
			&qp->saddr);
out_fail:
	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
	return err;
}
static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, 
			     struct frag_hdr *fhdr, int nhoff)
{
	struct sk_buff *prev, *next;
	int offset, end;

	if (fq->last_in & COMPLETE) {
		DEBUGP("Allready completed\n");
		goto err;
	}

	offset = ntohs(fhdr->frag_off) & ~0x7;
	end = offset + (ntohs(skb->nh.ipv6h->payload_len) -
			((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));

	if ((unsigned int)end > IPV6_MAXPLEN) {
		DEBUGP("offset is too large.\n");
 		return -1;
	}

 	if (skb->ip_summed == CHECKSUM_HW)
 		skb->csum = csum_sub(skb->csum,
 				     csum_partial(skb->nh.raw,
						  (u8*)(fhdr + 1) - skb->nh.raw,
						  0));

	/* Is this the final fragment? */
	if (!(fhdr->frag_off & htons(IP6_MF))) {
		/* If we already have some bits beyond end
		 * or have different end, the segment is corrupted.
		 */
		if (end < fq->len ||
		    ((fq->last_in & LAST_IN) && end != fq->len)) {
			DEBUGP("already received last fragment\n");
			goto err;
		}
		fq->last_in |= LAST_IN;
		fq->len = end;
	} else {
		/* Check if the fragment is rounded to 8 bytes.
		 * Required by the RFC.
		 */
		if (end & 0x7) {
			/* RFC2460 says always send parameter problem in
			 * this case. -DaveM
			 */
			DEBUGP("the end of this fragment is not rounded to 8 bytes.\n");
			return -1;
		}
		if (end > fq->len) {
			/* Some bits beyond end -> corruption. */
			if (fq->last_in & LAST_IN) {
				DEBUGP("last packet already reached.\n");
				goto err;
			}
			fq->len = end;
		}
	}

	if (end == offset)
		goto err;

	/* Point into the IP datagram 'data' part. */
	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
		DEBUGP("queue: message is too short.\n");
		goto err;
	}
	if (end-offset < skb->len) {
		if (pskb_trim(skb, end - offset)) {
			DEBUGP("Can't trim\n");
			goto err;
		}
		if (skb->ip_summed != CHECKSUM_UNNECESSARY)
			skb->ip_summed = CHECKSUM_NONE;
	}

	/* Find out which fragments are in front and at the back of us
	 * in the chain of fragments so far.  We must know where to put
	 * this fragment, right?
	 */
	prev = NULL;
	for (next = fq->fragments; next != NULL; next = next->next) {
		if (NFCT_FRAG6_CB(next)->offset >= offset)
			break;	/* bingo! */
		prev = next;
	}

	/* We found where to put this one.  Check for overlap with
	 * preceding fragment, and, if needed, align things so that
	 * any overlaps are eliminated.
	 */
	if (prev) {
		int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset;

		if (i > 0) {
			offset += i;
			if (end <= offset) {
				DEBUGP("overlap\n");
				goto err;
			}
			if (!pskb_pull(skb, i)) {
				DEBUGP("Can't pull\n");
				goto err;
			}
			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
				skb->ip_summed = CHECKSUM_NONE;
		}
	}

	/* Look for overlap with succeeding segments.
	 * If we can merge fragments, do it.
	 */
	while (next && NFCT_FRAG6_CB(next)->offset < end) {
		/* overlap is 'i' bytes */
		int i = end - NFCT_FRAG6_CB(next)->offset;

		if (i < next->len) {
			/* Eat head of the next overlapped fragment
			 * and leave the loop. The next ones cannot overlap.
			 */
			DEBUGP("Eat head of the overlapped parts.: %d", i);
			if (!pskb_pull(next, i))
				goto err;

			/* next fragment */
			NFCT_FRAG6_CB(next)->offset += i;
			fq->meat -= i;
			if (next->ip_summed != CHECKSUM_UNNECESSARY)
				next->ip_summed = CHECKSUM_NONE;
			break;
		} else {
			struct sk_buff *free_it = next;

			/* Old fragmnet is completely overridden with
			 * new one drop it.
			 */
			next = next->next;

			if (prev)
				prev->next = next;
			else
				fq->fragments = next;

			fq->meat -= free_it->len;
			frag_kfree_skb(free_it, NULL);
		}
	}

	NFCT_FRAG6_CB(skb)->offset = offset;

	/* Insert this fragment in the chain of fragments. */
	skb->next = next;
	if (prev)
		prev->next = skb;
	else
		fq->fragments = skb;

	skb->dev = NULL;
	skb_get_timestamp(skb, &fq->stamp);
	fq->meat += skb->len;
	atomic_add(skb->truesize, &nf_ct_frag6_mem);

	/* The first fragment.
	 * nhoffset is obtained from the first fragment, of course.
	 */
	if (offset == 0) {
		fq->nhoffset = nhoff;
		fq->last_in |= FIRST_IN;
	}
	write_lock(&nf_ct_frag6_lock);
	list_move_tail(&fq->lru_list, &nf_ct_frag6_lru_list);
	write_unlock(&nf_ct_frag6_lock);
	return 0;

err:
	return -1;
}
Пример #28
0
/* replenish the buffers for a pool.  note that we don't need to
 * skb_reserve these since they are used for incoming...
 */
static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
					  struct ibmveth_buff_pool *pool)
{
	u32 i;
	u32 count = pool->size - atomic_read(&pool->available);
	u32 buffers_added = 0;
	struct sk_buff *skb;
	unsigned int free_index, index;
	u64 correlator;
	unsigned long lpar_rc;
	dma_addr_t dma_addr;

	mb();

	for (i = 0; i < count; ++i) {
		union ibmveth_buf_desc desc;

		skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);

		if (!skb) {
			netdev_dbg(adapter->netdev,
				   "replenish: unable to allocate skb\n");
			adapter->replenish_no_mem++;
			break;
		}

		free_index = pool->consumer_index;
		pool->consumer_index++;
		if (pool->consumer_index >= pool->size)
			pool->consumer_index = 0;
		index = pool->free_map[free_index];

		BUG_ON(index == IBM_VETH_INVALID_MAP);
		BUG_ON(pool->skbuff[index] != NULL);

		dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
				pool->buff_size, DMA_FROM_DEVICE);

		if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
			goto failure;

		pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
		pool->dma_addr[index] = dma_addr;
		pool->skbuff[index] = skb;

		correlator = ((u64)pool->index << 32) | index;
		*(u64 *)skb->data = correlator;

		desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
		desc.fields.address = dma_addr;

		if (rx_flush) {
			unsigned int len = min(pool->buff_size,
						adapter->netdev->mtu +
						IBMVETH_BUFF_OH);
			ibmveth_flush_buffer(skb->data, len);
		}
		lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
						   desc.desc);

		if (lpar_rc != H_SUCCESS) {
			goto failure;
		} else {
			buffers_added++;
			adapter->replenish_add_buff_success++;
		}
	}

	mb();
	atomic_add(buffers_added, &(pool->available));
	return;

failure:
	pool->free_map[free_index] = index;
	pool->skbuff[index] = NULL;
	if (pool->consumer_index == 0)
		pool->consumer_index = pool->size - 1;
	else
		pool->consumer_index--;
	if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
		dma_unmap_single(&adapter->vdev->dev,
		                 pool->dma_addr[index], pool->buff_size,
		                 DMA_FROM_DEVICE);
	dev_kfree_skb_any(skb);
	adapter->replenish_add_buff_failure++;

	mb();
	atomic_add(buffers_added, &(pool->available));
}
Пример #29
0
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                    int offset, int len, int odd, struct sk_buff *skb),
                    void *from, int length, int transhdrlen,
                    int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
                    struct rt6_info *rt, unsigned int flags)
{
    struct inet_sock *inet = inet_sk(sk);
    struct ipv6_pinfo *np = inet6_sk(sk);
    struct sk_buff *skb;
    unsigned int maxfraglen, fragheaderlen;
    int exthdrlen;
    int hh_len;
    int mtu;
    int copy;
    int err;
    int offset = 0;
    int csummode = CHECKSUM_NONE;

    if (flags&MSG_PROBE)
        return 0;
    if (skb_queue_empty(&sk->sk_write_queue)) {
        /*
         * setup for corking
         */
        if (opt) {
            if (WARN_ON(np->cork.opt))
                return -EINVAL;

            np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
            if (unlikely(np->cork.opt == NULL))
                return -ENOBUFS;

            np->cork.opt->tot_len = opt->tot_len;
            np->cork.opt->opt_flen = opt->opt_flen;
            np->cork.opt->opt_nflen = opt->opt_nflen;

            np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
                                                sk->sk_allocation);
            if (opt->dst0opt && !np->cork.opt->dst0opt)
                return -ENOBUFS;

            np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
                                                sk->sk_allocation);
            if (opt->dst1opt && !np->cork.opt->dst1opt)
                return -ENOBUFS;

            np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
                                               sk->sk_allocation);
            if (opt->hopopt && !np->cork.opt->hopopt)
                return -ENOBUFS;

            np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
                                                sk->sk_allocation);
            if (opt->srcrt && !np->cork.opt->srcrt)
                return -ENOBUFS;

            /* need source address above miyazawa*/
        }
        dst_hold(&rt->u.dst);
        inet->cork.dst = &rt->u.dst;
        inet->cork.fl = *fl;
        np->cork.hop_limit = hlimit;
        np->cork.tclass = tclass;
        mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
              rt->u.dst.dev->mtu : dst_mtu(rt->u.dst.path);
        if (np->frag_size < mtu) {
            if (np->frag_size)
                mtu = np->frag_size;
        }
        inet->cork.fragsize = mtu;
        if (dst_allfrag(rt->u.dst.path))
            inet->cork.flags |= IPCORK_ALLFRAG;
        inet->cork.length = 0;
        sk->sk_sndmsg_page = NULL;
        sk->sk_sndmsg_off = 0;
        exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0) -
                    rt->rt6i_nfheader_len;
        length += exthdrlen;
        transhdrlen += exthdrlen;
    } else {
        rt = (struct rt6_info *)inet->cork.dst;
        fl = &inet->cork.fl;
        opt = np->cork.opt;
        transhdrlen = 0;
        exthdrlen = 0;
        mtu = inet->cork.fragsize;
    }

    hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);

    fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
                    (opt ? opt->opt_nflen : 0);
    maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);

    if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
        if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
            ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
            return -EMSGSIZE;
        }
    }

    /*
     * Let's try using as much space as possible.
     * Use MTU if total length of the message fits into the MTU.
     * Otherwise, we need to reserve fragment header and
     * fragment alignment (= 8-15 octects, in total).
     *
     * Note that we may need to "move" the data from the tail of
     * of the buffer to the new fragment when we split
     * the message.
     *
     * FIXME: It may be fragmented into multiple chunks
     *        at once if non-fragmentable extension headers
     *        are too large.
     * --yoshfuji
     */

    inet->cork.length += length;
    if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
            (rt->u.dst.dev->features & NETIF_F_UFO)) {

        err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
                                  fragheaderlen, transhdrlen, mtu,
                                  flags, rt);
        if (err)
            goto error;
        return 0;
    }

    if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
        goto alloc_new_skb;

    while (length > 0) {
        /* Check if the remaining data fits into current packet. */
        copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
        if (copy < length)
            copy = maxfraglen - skb->len;

        if (copy <= 0) {
            char *data;
            unsigned int datalen;
            unsigned int fraglen;
            unsigned int fraggap;
            unsigned int alloclen;
            struct sk_buff *skb_prev;
alloc_new_skb:
            skb_prev = skb;

            /* There's no room in the current skb */
            if (skb_prev)
                fraggap = skb_prev->len - maxfraglen;
            else
                fraggap = 0;

            /*
             * If remaining data exceeds the mtu,
             * we know we need more fragment(s).
             */
            datalen = length + fraggap;
            if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
                datalen = maxfraglen - fragheaderlen;

            fraglen = datalen + fragheaderlen;
            if ((flags & MSG_MORE) &&
                    !(rt->u.dst.dev->features&NETIF_F_SG))
                alloclen = mtu;
            else
                alloclen = datalen + fragheaderlen;

            /*
             * The last fragment gets additional space at tail.
             * Note: we overallocate on fragments with MSG_MODE
             * because we have no idea if we're the last one.
             */
            if (datalen == length + fraggap)
                alloclen += rt->u.dst.trailer_len;

            /*
             * We just reserve space for fragment header.
             * Note: this may be overallocation if the message
             * (without MSG_MORE) fits into the MTU.
             */
            alloclen += sizeof(struct frag_hdr);

            if (transhdrlen) {
                skb = sock_alloc_send_skb(sk,
                                          alloclen + hh_len,
                                          (flags & MSG_DONTWAIT), &err);
            } else {
                skb = NULL;
                if (atomic_read(&sk->sk_wmem_alloc) <=
                        2 * sk->sk_sndbuf)
                    skb = sock_wmalloc(sk,
                                       alloclen + hh_len, 1,
                                       sk->sk_allocation);
                if (unlikely(skb == NULL))
                    err = -ENOBUFS;
            }
            if (skb == NULL)
                goto error;
            /*
             *	Fill in the control structures
             */
            skb->ip_summed = csummode;
            skb->csum = 0;
            /* reserve for fragmentation */
            skb_reserve(skb, hh_len+sizeof(struct frag_hdr));

            /*
             *	Find where to start putting bytes
             */
            data = skb_put(skb, fraglen);
            skb_set_network_header(skb, exthdrlen);
            data += fragheaderlen;
            skb->transport_header = (skb->network_header +
                                     fragheaderlen);
            if (fraggap) {
                skb->csum = skb_copy_and_csum_bits(
                                skb_prev, maxfraglen,
                                data + transhdrlen, fraggap, 0);
                skb_prev->csum = csum_sub(skb_prev->csum,
                                          skb->csum);
                data += fraggap;
                pskb_trim_unique(skb_prev, maxfraglen);
            }
            copy = datalen - transhdrlen - fraggap;
            if (copy < 0) {
                err = -EINVAL;
                kfree_skb(skb);
                goto error;
            } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
                err = -EFAULT;
                kfree_skb(skb);
                goto error;
            }

            offset += copy;
            length -= datalen - fraggap;
            transhdrlen = 0;
            exthdrlen = 0;
            csummode = CHECKSUM_NONE;

            /*
             * Put the packet on the pending queue
             */
            __skb_queue_tail(&sk->sk_write_queue, skb);
            continue;
        }

        if (copy > length)
            copy = length;

        if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
            unsigned int off;

            off = skb->len;
            if (getfrag(from, skb_put(skb, copy),
                        offset, copy, off, skb) < 0) {
                __skb_trim(skb, off);
                err = -EFAULT;
                goto error;
            }
        } else {
            int i = skb_shinfo(skb)->nr_frags;
            skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
            struct page *page = sk->sk_sndmsg_page;
            int off = sk->sk_sndmsg_off;
            unsigned int left;

            if (page && (left = PAGE_SIZE - off) > 0) {
                if (copy >= left)
                    copy = left;
                if (page != frag->page) {
                    if (i == MAX_SKB_FRAGS) {
                        err = -EMSGSIZE;
                        goto error;
                    }
                    get_page(page);
                    skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
                    frag = &skb_shinfo(skb)->frags[i];
                }
            } else if(i < MAX_SKB_FRAGS) {
                if (copy > PAGE_SIZE)
                    copy = PAGE_SIZE;
                page = alloc_pages(sk->sk_allocation, 0);
                if (page == NULL) {
                    err = -ENOMEM;
                    goto error;
                }
                sk->sk_sndmsg_page = page;
                sk->sk_sndmsg_off = 0;

                skb_fill_page_desc(skb, i, page, 0, 0);
                frag = &skb_shinfo(skb)->frags[i];
            } else {
                err = -EMSGSIZE;
                goto error;
            }
            if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
                err = -EFAULT;
                goto error;
            }
            sk->sk_sndmsg_off += copy;
            frag->size += copy;
            skb->len += copy;
            skb->data_len += copy;
            skb->truesize += copy;
            atomic_add(copy, &sk->sk_wmem_alloc);
        }
        offset += copy;
        length -= copy;
    }
    return 0;
error:
    inet->cork.length -= length;
    IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
    return err;
}
Пример #30
0
static inline int unlock_ben(struct benaphore *ben)
{
        if (atomic_add(&ben->atom, -1) > 1)
                return release_sem(ben->sem);
        return B_OK;
}