Ejemplo n.º 1
0
void tprintf(char *msg, ...)
{
	int ret;
	va_list vl;

	va_start(vl, msg);

	spinlock_lock(&buffer_lock);
	ret = vsnprintf(buffer + buffer_use,
			sizeof(buffer) - buffer_use,
			msg, vl);
	if (ret < 0)
		/* Something screwed up! Unexpected. */
		goto out;
	if (ret >= sizeof(buffer) - buffer_use) {
		tprintf_flush();
		/* Rewrite the buffer */
		ret = vsnprintf(buffer + buffer_use,
				sizeof(buffer) - buffer_use,
				msg, vl);

		/* Again, we've failed! This shouldn't happen! So 
		 * switch to vfprintf temporarily :-( */
		if (ret >= sizeof(buffer) - buffer_use) {
			fprintf(stderr, "BUG (buffer too large) -->\n");
			vfprintf(stdout, msg, vl);
			fprintf(stderr, " <--\n");
			goto out;
		}
	}

	if (ret < sizeof(buffer) - buffer_use)
		buffer_use += ret;
out:
	spinlock_unlock(&buffer_lock);

	va_end(vl);
}
Ejemplo n.º 2
0
void
free (void *m)
{
	int *p, *q, len;

	spinlock_lock (&lock);
	if (!initialized)
		prepare ();
	TST ("free enter");
	p = heap + heaplen - 1;
	q = NULL;
	while (*p) {
		if (*p < 0) {
			if (!q)
				q = p;
			p += *p - 1;
		} else if (p - *p == m) {
			goto found;
		} else {
			q = NULL;
			p -= *p + 1;
		}
	}
	panic ("freeing not allocated memory %p", m);
found:
	len = -*p;
	while (p[len - 1] < 0)
		len += p[len - 1] - 1;
	*p = len;
	if (q) {
		len = *q;
		while (q[len - 1] < 0)
			len += q[len - 1] - 1;
		*q = len;
	}
	TST ("free exit");
	spinlock_unlock (&lock);
}
Ejemplo n.º 3
0
static struct nvme_request *
get_request (struct nvme_host *host,
	     struct nvme_request_hub *hub,
	     u16 subm_queue_id,
	     u16 cmd_id)
{
	spinlock_lock (&hub->lock);

	struct nvme_subm_slot *subm_slot;
	subm_slot = nvme_get_subm_slot (host, subm_queue_id);

	struct nvme_request *req = subm_slot->req_slot[cmd_id];

	if (!req)
		goto end;

	subm_slot->req_slot[cmd_id] = NULL;
	subm_slot->n_slots_used--;
end:
	spinlock_unlock (&hub->lock);

	return req;
}
Ejemplo n.º 4
0
/* Kernel debug logging facility */
void dbglog(int level, const char *fmt, ...) {
    va_list args;

    /* If this log level is blocked out, don't even bother */
    if(level > dbglog_level)
        return;

    /* We only try to lock if the message isn't urgent */
    if(level >= DBG_ERROR && !irq_inside_int())
        spinlock_lock(&mutex);

    va_start(args, fmt);
    (void)vsprintf(printf_buf, fmt, args);
    va_end(args);

    if(irq_inside_int())
        dbgio_write_str(printf_buf);
    else
        fs_write(1, printf_buf, strlen(printf_buf));

    if(level >= DBG_ERROR && !irq_inside_int())
        spinlock_unlock(&mutex);
}
Ejemplo n.º 5
0
/*
 * oneshot_attach()
 *	Attach a timer to the list.
 */
void oneshot_attach(struct oneshot *timer)
{
	spinlock_lock(&oneshot_lock);

	if (DEBUG) {
		if (timer->os_next) {
			debug_stop();
			while (1) {
				debug_print_pstr("\fAttach timer: ");
				debug_print16((addr_t)timer);
				debug_print_pstr(" - is already");
				debug_wait_button();
				debug_stack_trace();
			}
		}
	}
	
	timer->os_next = oneshot_list;
	oneshot_list = timer;
        oneshot_ref(timer);
					
	spinlock_unlock(&oneshot_lock);
}
Ejemplo n.º 6
0
void tprintf(char *msg, ...)
{
    ssize_t ret;
    ssize_t avail;
    va_list vl;

    spinlock_lock(&buffer_lock);

    avail = sizeof(buffer) - buffer_use;
    bug_on(avail < 0);

    va_start(vl, msg);
    ret = vsnprintf(buffer + buffer_use, avail, msg, vl);
    va_end(vl);

    if (ret < 0)
        panic("vsnprintf screwed up in tprintf!\n");
    if ((size_t) ret > sizeof(buffer))
        panic("No mem in tprintf left!\n");
    if (ret >= avail) {
        __tprintf_flush();

        avail = sizeof(buffer) - buffer_use;
        bug_on(avail < 0);

        va_start(vl, msg);
        ret = vsnprintf(buffer + buffer_use, avail, msg, vl);
        va_end(vl);

        if (ret < 0)
            panic("vsnprintf screwed up in tprintf!\n");
    }

    buffer_use += ret;

    spinlock_unlock(&buffer_lock);
}
Ejemplo n.º 7
0
void semaphore_wait(semaphore_t* semaphore)
{
	// lock semaphore
	spinlock_lock(&semaphore->lock);

	int wait = (--semaphore->value<0);

	// if blocked, add the current tasks to the "waiting tasks" list
	if (wait) {
		task_list_t* list = kmalloc(sizeof(task_list_t));
		list->head = current_task;
		list->tail = semaphore->waiting_tasks;
		semaphore->waiting_tasks = list;
	}

	// unlock semaphore
	spinlock_unlock(&semaphore->lock);

	// if blocked, stop current thread
	if (wait) {
		sched_unschedule(current_task, TASK_WAITING);
		sched_yield();
	}
}
Ejemplo n.º 8
0
struct module *module_query(const char *name) {
	struct module *module = _query(name);
	if (module) return module;
	spinlock_lock(&M.lock);
	module = _query(name);
	if (!module && M.count < MODULE_SIZE) {
		void *dl = _module_open(name);
		if (dl) {
			module = &M.modules[M.count];
			module->name = (char *)name;
			module->dl = dl;
			if (_module_sym(module) == 0) {
				char *_name = (char *)pixel_alloc(0, strlen(name)+1);
				strcpy(_name, name);
				module->name = _name;
				M.count++;
			} else {
				module = 0;
			}
		}
	}
	spinlock_unlock(&M.lock);
	return module;
}
Ejemplo n.º 9
0
int main(int argc, char const *argv[]) {
	int m, i, ret;
	data data;
	cthread *thr;
	data.cq = clist_create();
	data.lock = SL_UNLOCK;
	cthr_pool *pool = cthr_pool_create(10); 
	cthr_pool_destroy(pool);
	pool = cthr_pool_create(100); 
	for(m = 0; m < 100; m++) {
		for(i = 0; i < 10; i++){
			spinlock_lock(&data.lock);
			clist_lpush(data.cq, NULL);
			spinlock_unlock(&data.lock);
		}
		cthr_pool_run_task(pool, print_test, &data);
	}
	cthr_pool_destroy(pool);
	for(i = 0; i < pool->size; i++) {
		thr = pool->thrs + i;
		printf("%d\n", thr->state);
	}
	return 0;
}
Ejemplo n.º 10
0
static int pcap_mmap_prepare_reading_pcap(int fd)
{
	int ret;
	struct stat sb;

	spinlock_lock(&lock);
	ret = fstat(fd, &sb);
	if (ret < 0)
		panic("Cannot fstat pcap file!\n");
	if (!S_ISREG (sb.st_mode))
		panic("pcap dump file is not a regular file!\n");
	map_size = sb.st_size;
	pstart = mmap(0, map_size, PROT_READ, MAP_SHARED
		      /*| MAP_HUGETLB*/, fd, 0);
	if (pstart == MAP_FAILED)
		puke_and_die(EXIT_FAILURE, "mmap of file failed!");
	ret = madvise(pstart, map_size, MADV_SEQUENTIAL);
	if (ret < 0)
		panic("Failed to give kernel mmap advise!\n");
	pcurr = pstart + sizeof(struct pcap_filehdr);
	spinlock_unlock(&lock);

	return 0;
}
Ejemplo n.º 11
0
void retranslators_destroy_reference(REFERENCE *pReference)
{	
	RETRANSLATOR *pRetranslator = pReference->pRetranslator;

	spinlock_lock(&spinlock);

	pRetranslator->reference_count--;

	if (pRetranslator->reference_count == 0) {

		if (pRetranslator->sock != -1) {
			closesocket(pRetranslator->sock);
			pRetranslator->sock = -1;
		}
	
		retranslators.remove(pRetranslator);

		delete pRetranslator;
	}

	spinlock_unlock(&spinlock);

	delete pReference;
}
Ejemplo n.º 12
0
void io_libraryRetain(io_library_t *library)
{
	spinlock_lock(&library->lock);
	library->refCount ++;
	spinlock_unlock(&library->lock);
}
Ejemplo n.º 13
0
int main()
{
	int arp_sockfd, arp_getlen, i;
	int send_count=0, file_num=0;
	struct sockaddr_in router_addr, device_addr;
	char router_ipaddr[17], router_mac[17], buffer[512];
	unsigned char scan_ipaddr[4]; // scan ip
	FILE *fp_ip;
	fd_set rfds;
        ARP_HEADER * arp_ptr;
        struct timeval tv1, tv2, arp_timeout;
	int shm_client_detail_info_id;

        FILE *fp = fopen("/var/run/networkmap.pid", "w");
        if(fp != NULL){
                fprintf(fp, "%d", getpid());
                fclose(fp);
        }
	#ifdef DEBUG
		eval("rm", "/var/client*");
	#endif

	//Initial client tables
        spinlock_lock(SPINLOCK_Networkmap);
	shm_client_detail_info_id = shmget((key_t)1001, sizeof(CLIENT_DETAIL_INFO_TABLE), 0666|IPC_CREAT);
        if (shm_client_detail_info_id == -1){
    	    fprintf(stderr,"shmget failed\n");
            exit(1);
    	}

	CLIENT_DETAIL_INFO_TABLE *p_client_detail_info_tab = (P_CLIENT_DETAIL_INFO_TABLE)shmat(shm_client_detail_info_id,(void *) 0,0);
	//Reset shared memory
	memset(p_client_detail_info_tab, 0x00, sizeof(CLIENT_DETAIL_INFO_TABLE));
	p_client_detail_info_tab->ip_mac_num = 0;
	p_client_detail_info_tab->detail_info_num = 0;
	spinlock_unlock(SPINLOCK_Networkmap);	

	//Get Router's IP/Mac
	strcpy(router_ipaddr, nvram_safe_get("lan_ipaddr"));
	strcpy(router_mac, nvram_safe_get("et0macaddr"));
        inet_aton(router_ipaddr, &router_addr.sin_addr);
        memcpy(my_ipaddr,  &router_addr.sin_addr, 4);

	//Prepare scan 
        memset(scan_ipaddr, 0x00, 4);
        memcpy(scan_ipaddr, &router_addr.sin_addr, 3);
	networkmap_fullscan = 1;
	nvram_set("networkmap_fullscan", "1");

	if (strlen(router_mac)!=0) ether_atoe(router_mac, my_hwaddr);

	signal(SIGUSR1, refresh_sig); //catch UI refresh signal

        // create UDP socket and bind to "br0" to get ARP packet//
	arp_sockfd = create_socket(INTERFACE);

        if(arp_sockfd < 0)
                perror("create socket ERR:");
	else {
	        arp_timeout.tv_sec = 0;
        	arp_timeout.tv_usec = 10000;
		setsockopt(arp_sockfd, SOL_SOCKET, SO_RCVTIMEO, &arp_timeout, sizeof(arp_timeout));//set receive timeout
		dst_sockll = src_sockll; //Copy sockaddr info to dst
		memset(dst_sockll.sll_addr, -1, sizeof(dst_sockll.sll_addr)); // set dmac= FF:FF:FF:FF:FF:FF
	}

        while(1)//main while loop
        {
	    while(1) { //full scan and reflush recv buffer
		fullscan:
                if(networkmap_fullscan == 1) { //Scan all IP address in the subnetwork
		    if(scan_count == 0) { 
	                arp_timeout.tv_sec = 0;
        	        arp_timeout.tv_usec = 10000;
                	setsockopt(arp_sockfd, SOL_SOCKET, SO_RCVTIMEO, &arp_timeout, sizeof(arp_timeout));//set receive timeout
			NMP_DEBUG("Starting full scan!\n");
			
                        //reset client tables
			spinlock_lock(SPINLOCK_Networkmap);
        		memset(p_client_detail_info_tab, 0x00, sizeof(CLIENT_DETAIL_INFO_TABLE));
        		p_client_detail_info_tab->detail_info_num = 0;
			spinlock_unlock(SPINLOCK_Networkmap);
		    }
		    scan_count++;
		    scan_ipaddr[3]++;

		    if( scan_count<255 && memcmp(scan_ipaddr, my_ipaddr, 4) ) {
                        sent_arppacket(arp_sockfd, scan_ipaddr);
		    }         
		    else if(scan_count>255) { //Scan completed
                	arp_timeout.tv_sec = 1;
                	arp_timeout.tv_usec = 500000; //Reset timeout at monitor state for decase cpu loading
                	setsockopt(arp_sockfd, SOL_SOCKET, SO_RCVTIMEO, &arp_timeout, sizeof(arp_timeout));//set receive timeout
			networkmap_fullscan = 0;
			//scan_count = 0;
			nvram_set("networkmap_fullscan", "0");
			NMP_DEBUG("Finish full scan!\n");
		    }
                }// End of full scan

		arp_getlen=recvfrom(arp_sockfd, buffer, 512, 0, NULL, NULL);

	   	if(arp_getlen == -1) {
			if( scan_count<255)
				goto fullscan;
			else
				break;
		}
		else {
		    arp_ptr = (ARP_HEADER*)(buffer);
                    NMP_DEBUG("*Receive an ARP Packet from: %d.%d.%d.%d, len:%d\n",
				(int *)arp_ptr->source_ipaddr[0],(int *)arp_ptr->source_ipaddr[1],
				(int *)arp_ptr->source_ipaddr[2],(int *)arp_ptr->source_ipaddr[3],
				arp_getlen);

		    //Check ARP packet if source ip and router ip at the same network
                    if( !memcmp(my_ipaddr, arp_ptr->source_ipaddr, 3) ) {

			swapbytes16(arp_ptr->message_type);

			//ARP Response packet to router
			if( arp_ptr->message_type == 0x02 &&   		       	// ARP response
                       	    memcmp(arp_ptr->dest_ipaddr, my_ipaddr, 4) == 0 && 	// dest IP
                       	    memcmp(arp_ptr->dest_hwaddr, my_hwaddr, 6) == 0) 	// dest MAC
			{
			    //NMP_DEBUG("   It's an ARP Response to Router!\n");
                            for(i=0; i<p_client_detail_info_tab->ip_mac_num; i++) {
                            	if( !memcmp(p_client_detail_info_tab->ip_addr[i], arp_ptr->source_ipaddr, 4) ) 
                                    break;
                            }
			    //i=0, table is empty.
			    //i=num, no the same ip at table.
			    if(i==p_client_detail_info_tab->ip_mac_num){
				spinlock_lock(SPINLOCK_Networkmap);
				memcpy(p_client_detail_info_tab->ip_addr[p_client_detail_info_tab->ip_mac_num], 
					arp_ptr->source_ipaddr, 4);
                                memcpy(p_client_detail_info_tab->mac_addr[p_client_detail_info_tab->ip_mac_num], 
					arp_ptr->source_hwaddr, 6);
                                p_client_detail_info_tab->ip_mac_num++;
				spinlock_unlock(SPINLOCK_Networkmap);

			    #ifdef DEBUG  //Write client info to file
                		fp_ip=fopen("/var/client_ip_mac.txt", "a");
                		if (fp_ip==NULL) {
                    		printf("File Open Error!\n");
                		}
                		else {
                        	printf("Fill: %d-> %d.%d", i,p_client_detail_info_tab->ip_addr[i][2],p_client_detail_info_tab->ip_addr[i][3]);

                        	fprintf(fp_ip, "%d.%d.%d.%d,%02X:%02X:%02X:%02X:%02X:%02X\n",
                      	 	    p_client_detail_info_tab->ip_addr[i][0],p_client_detail_info_tab->ip_addr[i][1],
                       	 	    p_client_detail_info_tab->ip_addr[i][2],p_client_detail_info_tab->ip_addr[i][3],
                        	    p_client_detail_info_tab->mac_addr[i][0],p_client_detail_info_tab->mac_addr[i][1],
                            	    p_client_detail_info_tab->mac_addr[i][2],p_client_detail_info_tab->mac_addr[i][3],
                       		    p_client_detail_info_tab->mac_addr[i][4],p_client_detail_info_tab->mac_addr[i][5]);
                    		}
                    		fclose(fp_ip);
			    #endif
                	    }
			}
			else { //Nomo ARP Packet or ARP response to other IP
        	                //Compare IP and IP buffer if not exist
                        	for(i=0; i<p_client_detail_info_tab->ip_mac_num; i++) {
                                        if( !memcmp(p_client_detail_info_tab->ip_addr[i], arp_ptr->source_ipaddr, 4) ) {
                                              	NMP_DEBUG_M("Find the same IP at the table!\n");
                	                        break;
                        	        }
                        	}
                        	if( i==p_client_detail_info_tab->ip_mac_num ) //Find a new IP or table is empty! Send an ARP request.
				{
					NMP_DEBUG("New IP\n");
					if(memcmp(my_ipaddr, arp_ptr->source_ipaddr, 4))
                                		sent_arppacket(arp_sockfd, arp_ptr->source_ipaddr);
					else
						NMP_DEBUG("New IP is the same as Router IP! Ignore it!\n");
				}
			}//End of Nomo ARP Packet
		    }//Source IP in the same subnetwork
		}//End of arp_getlen != -1
	    } // End of while for flush buffer

	    //Find All Application of clients
	    if(p_client_detail_info_tab->detail_info_num < p_client_detail_info_tab->ip_mac_num) {
		FindAllApp(my_ipaddr, p_client_detail_info_tab);
		#ifdef DEBUG //Fill client detail info table
                fp_ip=fopen("/var/client_detail_info.txt", "a");
                if (fp_ip==NULL) {
                        printf("File Open Error!\n");
                }
                else {
                        fprintf(fp_ip, "%s,%d,%d,%d,%d\n",
                                p_client_detail_info_tab->device_name[p_client_detail_info_tab->detail_info_num], 
				p_client_detail_info_tab->type[p_client_detail_info_tab->detail_info_num], 
				p_client_detail_info_tab->http[p_client_detail_info_tab->detail_info_num],
                                p_client_detail_info_tab->printer[p_client_detail_info_tab->detail_info_num], 
				p_client_detail_info_tab->itune[p_client_detail_info_tab->detail_info_num]);
                        fclose(fp_ip);
                }
		#endif
		p_client_detail_info_tab->detail_info_num++;
	    }

	} //End of main while loop
	close(arp_sockfd);
	return 0;
}
Ejemplo n.º 14
0
/**
 * @brief chek_advance 
 * @param host struct uhci_host
*/
int
uhci_check_advance(struct usb_host *usbhc) 
{
	struct uhci_host *host = (struct uhci_host *)usbhc->private;
	struct usb_request_block *urb, *nexturb;
	int advance = 0;
	int ucfn = -1;

	if (cmpxchgl(&host->incheck, 0U, 1U))
		return 0;

#if 0
	in16(host->iobase + UHCI_REG_USBSTS, &usbsts);
	if (usbsts)
		dprintft(2, "%04x: %s: usbsts = %04x\n", 
			host->iobase, __FUNCTION__, usbsts);
#endif /* 0 */
	spinlock_lock(&host->lock_hfl);
recheck:
	for (urb = LIST4_HEAD (host->inproc_urbs, list); urb;
	     urb = nexturb) {
		urb->prevent_del = true;
		spinlock_unlock(&host->lock_hfl);

		/* update urb->status */
		if (urb->status == URB_STATUS_RUN) {
			if (ucfn < 0)
				ucfn = uhci_current_frame_number (host);
			uhci_check_urb_advance_sub (host, ucfn, host->hc, urb);
		}

		switch (urb->status) {
		default: /* errors */
			dprintft(2, "%04x: %s: got some errors(%s) "
				 "for urb(%p).\n", host->iobase, 
				 __FUNCTION__, 
				 uhci_error_status_string(urb->status), urb);
			/* through */
		case URB_STATUS_ADVANCED:
			if (urb->callback)
				(urb->callback) (host->hc, urb, urb->cb_arg);
			advance++;
			break;
		case URB_STATUS_NAK:
			dprintft(2, "%04x: %s: got an NAK for urb(%p).\n",
				 host->iobase, __FUNCTION__, urb);
			urb->status = URB_STATUS_RUN;
		case URB_STATUS_RUN:
		case URB_STATUS_FINALIZED:
		case URB_STATUS_UNLINKED:
			break;
		} 
		spinlock_lock(&host->lock_hfl);
		nexturb = LIST4_NEXT (urb, list);
		urb->prevent_del = false;
		if (urb->deferred_del) {
			urb->deferred_del = false;
			spinlock_unlock(&host->lock_hfl);
			uhci_deactivate_urb(host->hc, urb);
			spinlock_lock(&host->lock_hfl);
			goto recheck;
		}
	}
	spinlock_unlock(&host->lock_hfl);

#if 0
	if (advance) {
		dprintft(3, "%s: USBSTS register cleared.\n", 
			__FUNCTION__);
		out16(host->iobase + UHCI_REG_USBSTS, usbsts);
	}
#endif

	host->incheck = 0U;

	return advance;
}
Ejemplo n.º 15
0
/**
 * @brief submit asynchronous urb 
 * @param host struct uhci_host
 * @param device struct usb_device
 * @param epdesc struct usb_endpoint_descriptor
 * @param data void *  
 * @param size u16 
 * @param callback int * 
 * @param arg void* 
 * @param ioc int  
 */	
static struct usb_request_block *
uhci_submit_async(struct uhci_host *host, struct usb_device *device,
		  struct usb_endpoint_descriptor *epdesc, 
		  void *data, u16 size,
		  int (*callback)(struct usb_host *,
				  struct usb_request_block *, void *), 
		  void *arg, int ioc)
{
	struct usb_request_block *urb;
	size_t pktsize;
	u32 lospeed = 0;

	urb = uhci_create_urb(host);
	if (!urb)
		return (struct usb_request_block *)NULL;
	if (device) {
		spinlock_lock(&device->lock_dev);
		init_urb(urb, device->devnum, epdesc, callback, arg);
		spinlock_unlock(&device->lock_dev);
	}

	/* determine if we are dealing with a low speed device or not */
	if (device) {
		if (device->speed == UD_SPEED_UNDEF) {
			u16 portsc;
			ASSERT (device->portno <= UHCI_NUM_PORTS_HC);
			portsc = host->portsc[(device->portno-1)];
			device->speed = (portsc & UHCI_PORTSC_LOSPEED) ? 
				UD_SPEED_LOW : UD_SPEED_FULL;
		}
		lospeed = (device->speed == UD_SPEED_LOW) ? UHCI_TD_STAT_LS : 0;
	}

	/* create a QH */
	URB_UHCI(urb)->qh = uhci_alloc_qh(host, &URB_UHCI(urb)->qh_phys);
	if (!URB_UHCI(urb)->qh)
		goto fail_submit_async;

	URB_UHCI(urb)->qh->link = UHCI_QH_LINK_TE;

	pktsize = epdesc->wMaxPacketSize;

	/* buffer and TD */
	if (size > 0) {
		struct usb_buffer_list *b;

		b = zalloc_usb_buffer_list();
		b->pid = USB_PID_IN;
		b->len = size;
		b->vadr = (virt_t)alloc2_aligned(b->len, &b->padr);
		if (!b->vadr) {
			free(b);
			goto fail_submit_async;
		}

		/* copy data if OUT direction */
		if (!USB_EP_DIRECT(epdesc)) {
			b->pid = USB_PID_OUT;
			memcpy((void *)b->vadr, data, b->len);
		}

		urb->buffers = b;
	}

	if (device) {
		spinlock_lock(&device->lock_dev);
		URB_UHCI(urb)->tdm_head = 
			prepare_buffer_tds(host, (urb->buffers) ? 
					   (phys32_t)urb->buffers->padr : 0U,
					   size, device->devnum, epdesc,
					   (size_t)pktsize,
					   UHCI_TD_STAT_AC | 
					   UHCI_TD_STAT_SP | 
					   lospeed | 
					   uhci_td_maxerr(3));
		spinlock_unlock(&device->lock_dev);
	}

	if (!URB_UHCI(urb)->tdm_head)
		goto fail_submit_async;

	/* link the TDs into the QH */
	URB_UHCI(urb)->qh->element = URB_UHCI(urb)->tdm_head->td_phys;
	
	/* set IOC */
	if (ioc)
		URB_UHCI(urb)->tdm_head->td->status |= UHCI_TD_STAT_IC;

	/* set up toggles in TDs */
	epdesc->toggle = uhci_fixup_toggles(URB_UHCI(urb)->tdm_head, 
					    epdesc->toggle);

	/* link the QH into the frame list */
	if (uhci_activate_urb(host, urb) != URB_STATUS_RUN)
		goto fail_submit_async;

	URB_UHCI(urb)->tdm_acttail = NULL;

	return urb;
fail_submit_async:
	uhci_destroy_urb(host, urb);
	return (struct usb_request_block *)NULL;
}
Ejemplo n.º 16
0
/**
 * @brief submit the control messagie
 * @param host struct uhci_host
 * @param device struct usb_device 
 * @param endpoint u8
 * @param csetup struct usb_device 
 * @param callback int * 
 * @param arg void* 
 * @param ioc int  
 */	
struct usb_request_block *
uhci_submit_control(struct usb_host *usbhc, struct usb_device *device, 
		    u8 endpoint, u16 pktsz, struct usb_ctrl_setup *csetup,
		    int (*callback)(struct usb_host *,
				    struct usb_request_block *, void *), 
		    void *arg, int ioc)
{
	struct uhci_host *host = (struct uhci_host *)usbhc->private;
	struct usb_request_block *urb;
	struct usb_endpoint_descriptor *epdesc;
	struct uhci_td_meta *tdm;
	struct usb_buffer_list *b;
	u32 lospeed = 0;

	epdesc = get_edesc_by_address(device, endpoint);
	if (!epdesc) {
		if (endpoint != 0) {
			dprintft(2, "%04x: %s: no endpoint(%d) found.\n",
				 host->iobase, __FUNCTION__, endpoint);

			return (struct usb_request_block *)NULL;
		}
		
		/* use the default endpoint */
		epdesc = &default_ep0;
	}

	/* determine if we are dealing with a low speed device or not */
	if (device) {
		if (device->speed == UD_SPEED_UNDEF) {
			u16 portsc;
			ASSERT (device->portno <= UHCI_NUM_PORTS_HC);
			portsc = host->portsc[(device->portno-1)];
			device->speed = (portsc & UHCI_PORTSC_LOSPEED) ? 
				UD_SPEED_LOW : UD_SPEED_FULL;
		}
		lospeed = (device->speed == UD_SPEED_LOW) ? UHCI_TD_STAT_LS : 0;
	}

	dprintft(5, "%s: epdesc->wMaxPacketSize = %d\n", 
		__FUNCTION__, epdesc->wMaxPacketSize);

	urb = uhci_create_urb(host);
	if (!urb)
		return (struct usb_request_block *)NULL;
	if (device) {
		spinlock_lock(&device->lock_dev);
		init_urb(urb, device->devnum, epdesc, callback, arg);
		spinlock_unlock(&device->lock_dev);
	}
	/* create a QH */
	URB_UHCI(urb)->qh = uhci_alloc_qh(host, &URB_UHCI(urb)->qh_phys);
	if (!URB_UHCI(urb)->qh)
		goto fail_submit_control;

	URB_UHCI(urb)->qh->link = UHCI_QH_LINK_TE;

	if (pktsz == 0)
		pktsz = epdesc->wMaxPacketSize;

	/* SETUP TD */
	URB_UHCI(urb)->tdm_head = tdm = uhci_new_td_meta(host, NULL);
	if (!tdm)
		goto fail_submit_control;
	URB_UHCI(urb)->qh->element = 
		URB_UHCI(urb)->qh_element_copy = tdm->td_phys;
	b = zalloc_usb_buffer_list();
	b->pid = USB_PID_SETUP;
	b->len = sizeof(*csetup);
	b->vadr = (virt_t)alloc2_aligned(b->len, &b->padr);
		
	if (!b->vadr) {
		free(b);
		goto fail_submit_control;
	}
	urb->buffers = b;
	memcpy((void *)b->vadr, (void *)csetup, b->len);

	tdm->td->status = tdm->status_copy =
		UHCI_TD_STAT_AC | lospeed | uhci_td_maxerr(3);
	if (device) {
		spinlock_lock(&device->lock_dev);
		tdm->td->token = tdm->token_copy =
			uhci_td_explen(sizeof(*csetup)) |
			UHCI_TD_TOKEN_ENDPOINT(epdesc->bEndpointAddress) | 
			UHCI_TD_TOKEN_DEVADDRESS(device->devnum) |
			UHCI_TD_TOKEN_PID_SETUP;
		spinlock_unlock(&device->lock_dev);
	}
	tdm->td->buffer = (phys32_t)b->padr;

	if (csetup->wLength > 0) {
		b = zalloc_usb_buffer_list();
		b->pid = USB_PID_IN;
		b->len = csetup->wLength;
		b->vadr = (virt_t)alloc2_aligned(b->len, &b->padr);

		if (!b->vadr) {
			free(b);
			goto fail_submit_control;
		}

		b->next = urb->buffers;
		urb->buffers = b;
		if (device) {
			spinlock_lock(&device->lock_dev);
			tdm = prepare_buffer_tds(host, (phys32_t)b->padr, 
						 b->len,
						 device->devnum, epdesc, 
						 (size_t)pktsz,
						 UHCI_TD_STAT_AC | 
						 UHCI_TD_STAT_SP | 
						 lospeed |
						 uhci_td_maxerr(3));
			spinlock_unlock(&device->lock_dev);
		}
		if (!tdm)
			goto fail_submit_control;
		dprintft(5, "%s: tdm->td_phys = %llx\n", 
			__FUNCTION__, tdm->td_phys);
		URB_UHCI(urb)->tdm_head->next = tdm;
		URB_UHCI(urb)->tdm_head->td->link = tdm->td_phys;

	}

	/* The 1st toggle for SETUP must be 0. */
	uhci_fixup_toggles(URB_UHCI(urb)->tdm_head, epdesc->toggle);

	/* append one more TD for the status stage */
	for (tdm = URB_UHCI(urb)->tdm_head; tdm->next; tdm = tdm->next);
	tdm->next = uhci_new_td_meta(host, NULL);
	if (!tdm->next)
		goto fail_submit_control;
	
        tdm->next->td->link = UHCI_TD_LINK_TE;
        tdm->next->td->status = UHCI_TD_STAT_AC | lospeed | uhci_td_maxerr(3);
	if (ioc) 
		tdm->next->td->status |= UHCI_TD_STAT_IC;
	if (device) {
		spinlock_lock(&device->lock_dev);
		tdm->next->td->token = uhci_td_explen(0) |
			UHCI_TD_TOKEN_ENDPOINT(epdesc->bEndpointAddress) | 
			UHCI_TD_TOKEN_DEVADDRESS(device->devnum) |
			UHCI_TD_TOKEN_DT1_TOGGLE;
		spinlock_unlock(&device->lock_dev);
	}
	tdm->next->td->token |= (csetup->wLength > 0) ? 
		UHCI_TD_TOKEN_PID_OUT : UHCI_TD_TOKEN_PID_IN;
        tdm->next->td->buffer = 0U;
	tdm->td->link = (phys32_t)tdm->next->td_phys;

	/* link the QH into the frame list */
	if (uhci_activate_urb(host, urb) != URB_STATUS_RUN)
		goto fail_submit_control;

	URB_UHCI(urb)->tdm_acttail = NULL;

	return urb;
fail_submit_control:
	uhci_destroy_urb(host, urb);
	return (struct usb_request_block *)NULL;
}
Ejemplo n.º 17
0
static inline void parwork_unlock(parwork_t* w)
{
    spinlock_unlock(&w->lock);
}
Ejemplo n.º 18
0
/**
 * @brief initialize host fram list 
 * @param host struct uhci_host
 */
int
init_hframelist(struct uhci_host *host) 
{
	struct usb_request_block *urb;
	u32 frid;
	phys32_t *frame_p;
	virt_t framelist_virt;
	phys_t framelist_phys;
	int n_skels;

	/* allocate a page for frame list */
	alloc_page((void *)&framelist_virt, &framelist_phys);
	if (!framelist_phys)
		return -1;
	host->hframelist = framelist_phys;
	host->hframelist_virt = (phys32_t *)framelist_virt;

	spinlock_lock(&host->lock_hfl);

	/* create a TD for termination */
	host->term_tdm = uhci_new_td_meta(host, NULL);
	if (!host->term_tdm)
		return -1;
	host->term_tdm->td->link = UHCI_TD_LINK_TE;
	host->term_tdm->td->status = 0U;
	host->term_tdm->td->token = 
		UHCI_TD_TOKEN_DEVADDRESS(0x7f) | UHCI_TD_TOKEN_ENDPOINT(0) | 
		UHCI_TD_TOKEN_PID_IN | uhci_td_explen(0);
	host->term_tdm->td->buffer = 0U;

	/* create skelton QHs */
	for (n_skels = 0; n_skels<UHCI_NUM_SKELTYPES; n_skels++) {
		urb = uhci_create_urb(host);
		if (!urb)
			break;
		urb->address = URB_ADDRESS_SKELTON;
		URB_UHCI(urb)->qh = 
			uhci_alloc_qh(host, &URB_UHCI(urb)->qh_phys);
		if (!URB_UHCI(urb)->qh)
			break;
		if (n_skels == 0) {
			URB_UHCI(urb)->tdm_head = host->term_tdm;
			URB_UHCI(urb)->qh->element = (phys32_t)
				URB_UHCI(urb)->tdm_head->td_phys;
			URB_UHCI(urb)->qh->link = UHCI_QH_LINK_TE;
		} else {
			URB_UHCI(urb)->qh->element = UHCI_QH_LINK_TE;
			URB_UHCI(urb)->qh->link = (phys32_t)
				URB_UHCI(host->host_skelton
					 [n_skels - 1])->qh_phys | 
				UHCI_QH_LINK_QH;
			urb->link_next = host->host_skelton[n_skels - 1];
		}
		
		host->host_skelton[n_skels] = urb;
	}


	/* make link to a QH in each frame list entry 
	   according to intervals */
	for (frid = 0U; frid < UHCI_NUM_FRAMES; frid++) {
		frame_p = (phys32_t *)
			(framelist_virt + frid * sizeof(phys32_t));
		n_skels = __ffs((frid + 1) | 
				(1 << (UHCI_NUM_SKELTYPES - 1)));
		*frame_p = (phys32_t)
			URB_UHCI(host->host_skelton[n_skels])->qh_phys | 
			UHCI_FRAME_LINK_QH;
	}

	for (n_skels = 0; n_skels < 2; n_skels++)
		host->tailurb[n_skels] = host->host_skelton[0];

	spinlock_unlock(&host->lock_hfl);

	return 0;
}
Ejemplo n.º 19
0
void __newlib_lock_release(__newlib_lock_t * lock) {
    spinlock_unlock(lock);
}
Ejemplo n.º 20
0
void
nvme_process_all_comp_queues (struct nvme_host *host)
{
	spinlock_lock (&host->lock);
	if (!host->enable) {
		spinlock_unlock (&host->lock);
		return;
	}
	host->handling_comp++;
	spinlock_unlock (&host->lock);

	struct nvme_queue_info *h_comp_queue_info, *g_comp_queue_info;

	/* i = 0 means starting from admin completion queue */
	uint i;
	for (i = 0; i <= host->g_queue.max_n_comp_queues; i++) {
		h_comp_queue_info = host->h_queue.comp_queue_info[i];
		g_comp_queue_info = host->g_queue.comp_queue_info[i];

		/* max_n_comp_queues and queue buffer are set separately */
		if (!g_comp_queue_info)
			continue;

		nvme_lock_comp_queue (host, i);
		process_comp_queue (host,
				    i,
				    h_comp_queue_info,
				    g_comp_queue_info);
		nvme_unlock_comp_queue (host, i);
	}

	/*
	 * Submission queues are fetched in a round-robin manner.
	 * We try at most #max_n_subm_queues times.
	 */
	spinlock_lock (&host->lock);
	uint round, queue = host->queue_to_fetch;
	for (round = 0; round < host->g_queue.max_n_subm_queues; round++) {
		uint count = 0;

		/* max_n_subm_queues and queue buffer are set separately */
		if (host->g_queue.subm_queue_info[queue]) {
			nvme_lock_subm_queue (host, queue);
			count = nvme_try_process_requests (host, queue);
			nvme_unlock_subm_queue (host, queue);
		}

		/* Pick next queue in the next round */
		queue++;
		if (queue > host->g_queue.max_n_subm_queues)
			queue = 1;

		if (count > 0) {
			host->queue_to_fetch = queue;
			break;
		}
	}
	spinlock_unlock (&host->lock);

	/*
	 * Process Admin Commands only if no other completion handler is
	 * running. This is to prevent unexpected errors when the guest
	 * wants to remove a queue.
	 */
	nvme_lock_subm_queue (host, 0);
	spinlock_lock (&host->lock);
	ASSERT (host->handling_comp > 0);
	host->handling_comp--;
	if (!host->handling_comp)
		nvme_try_process_requests (host, 0);
	spinlock_unlock (&host->lock);
	nvme_unlock_subm_queue (host, 0);
}
Ejemplo n.º 21
0
static void
process_comp_queue (struct nvme_host *host,
		    u16 comp_queue_id,
		    struct nvme_queue_info *h_comp_queue_info,
		    struct nvme_queue_info *g_comp_queue_info)
{
	struct nvme_request_hub *hub;
	hub = host->h_queue.request_hub[comp_queue_id];

	u16 h_cur_head = h_comp_queue_info->cur_pos.head;
	u16 g_cur_head = g_comp_queue_info->cur_pos.head;

	struct nvme_comp first_h_comp = {0}, *first_g_comp = NULL;

	struct nvme_comp *h_comp, *g_comp;
	for (h_comp = nvme_comp_queue_at_idx (h_comp_queue_info, h_cur_head),
	     g_comp = nvme_comp_queue_at_idx (g_comp_queue_info, g_cur_head);
	     NVME_COMP_GET_PHASE (h_comp) == h_comp_queue_info->phase;
	     h_comp = nvme_comp_queue_at_idx (h_comp_queue_info, h_cur_head),
	     g_comp = nvme_comp_queue_at_idx (g_comp_queue_info, g_cur_head)) {

		/* This queue ID is submission queue ID */
		u16 subm_queue_id = h_comp->queue_id;

		struct nvme_request *req;
		req = get_request (host, hub, subm_queue_id, h_comp->cmd_id);

		ASSERT (req);

		u64 time_taken = get_time () - req->submit_time;
		if (time_taken > NVME_TIME_TAKEN_WATERMARK) {
			printf ("Long time controller response: %llu\n",
				time_taken);
			printf ("Submission Queue ID: %u opcode: %u\n",
				subm_queue_id, req->cmd.std.opcode);
		}

		if (subm_queue_id == 0)
			process_admin_comp (host, h_comp, req);
		else
			process_io_comp (host, h_comp, req);

		h_cur_head++;

		if (h_cur_head >= h_comp_queue_info->n_entries) {
			h_comp_queue_info->phase ^= 1;
			h_cur_head = 0;
		}

		if (!req->is_h_req) {
			struct nvme_comp comp = *h_comp;
			comp.cmd_id = req->orig_cmd_id;
			comp.status &= ~0x1;
			comp.status |= g_comp_queue_info->phase;

			/*
			 * Replace with the host value instead of the
			 * value reported by the controller. This is necessary
			 * if we mix guest commands and host commands to share
			 * queues.
			 */
			comp.queue_head = g_subm_cur_tail (host,
							   subm_queue_id);

			if (first_g_comp) {
				*g_comp = comp;
			} else {
				/* Copy the first completion entry later */
				first_g_comp = g_comp;
				first_h_comp = comp;
			}

			g_cur_head++;
			if (g_cur_head >= g_comp_queue_info->n_entries) {
				g_comp_queue_info->phase ^= 1;
				g_cur_head = 0;
			}

			spinlock_lock (&hub->lock);
			g_comp_queue_info->cur_pos.head = g_cur_head;
			h_comp_queue_info->cur_pos.head = h_cur_head;
			spinlock_unlock (&hub->lock);
		} else {
			spinlock_lock (&hub->lock);
			nvme_write_comp_db (host, comp_queue_id, h_cur_head);
			hub->n_not_ack_h_reqs--;
			h_comp_queue_info->cur_pos.head = h_cur_head;
			spinlock_unlock (&hub->lock);
		}

		nvme_free_request (hub, req);
	}

	if (first_g_comp) {
		first_g_comp->cmd_specific = first_h_comp.cmd_specific;
		first_g_comp->rsvd = first_h_comp.rsvd;
		first_g_comp->queue_head = first_h_comp.queue_head;
		first_g_comp->queue_id = first_h_comp.queue_id;
		first_g_comp->cmd_id = first_h_comp.cmd_id;
		/*
		 * Make sure everything are stored in the memory properly
		 * before we copy the status field. This is to avoid
		 * data corruption.
		 */
		cpu_sfence ();
		first_g_comp->status = first_h_comp.status;
	}
}
Ejemplo n.º 22
0
static void pcap_mmap_fsync_pcap(int fd)
{
	spinlock_lock(&lock);
	msync(pstart, (unsigned long) (pcurr - pstart), MS_ASYNC);
	spinlock_unlock(&lock);
}
Ejemplo n.º 23
0
int sys_fork(uint_t flags, uint_t cpu_gid)
{
	fork_info_t info;
	struct dqdt_attr_s attr;
	struct thread_s *this_thread;
	struct task_s *this_task;
	struct thread_s *child_thread;
	struct task_s *child_task;
	uint_t irq_state;
	uint_t cpu_lid;
	uint_t cid;
	error_t err;
	uint_t tm_start;
	uint_t tm_end;
	uint_t tm_bRemote;
	uint_t tm_aRemote;

	tm_start = cpu_time_stamp();

	fork_dmsg(1, "%s: cpu %d, started [%d]\n",
		  __FUNCTION__, 
		  cpu_get_id(),
		  tm_start);

	this_thread = current_thread;
	this_task   = this_thread->task;
	info.current_clstr = current_cluster;

	err = atomic_add(&this_task->childs_nr, 1);
  
	if(err >= CONFIG_TASK_CHILDS_MAX_NR)
	{
		err = EAGAIN;
		goto fail_childs_nr;
	}

	fork_dmsg(1, "%s: task of pid %d can fork a child [%d]\n",
		  __FUNCTION__, 
		  this_task->pid,
		  cpu_time_stamp());

	info.isDone      = false;
	info.this_thread = this_thread;
	info.this_task   = this_task;
	info.flags       = flags;

	cpu_disable_all_irq(&irq_state);
	cpu_restore_irq(irq_state);
  
	if(current_cpu->fpu_owner == this_thread)
	{
		fork_dmsg(1, "%s: going to save FPU\n", __FUNCTION__);
		cpu_fpu_context_save(&this_thread->uzone);
	}

	if(flags & PT_FORK_USE_TARGET_CPU)
	{
		cpu_gid       = cpu_gid % arch_onln_cpu_nr();
		cpu_lid       = arch_cpu_lid(cpu_gid);
		cid           = arch_cpu_cid(cpu_gid);
		attr.cid      = cid;
		attr.cpu_id   = arch_cpu_lid(cpu_gid);
		info.isPinned = true;
	}
	else
	{
		info.isPinned = false;
		dqdt_attr_init(&attr, NULL);
		err = dqdt_task_placement(dqdt_root, &attr);
	}

        info.cpu = cpu_lid2ptr(attr.cpu_id);
        info.cid_exec = attr.cid_exec;

        /* Keeps the first two processes on current cluster. This is used by cluster zero to keep
         * the "sh" process on this cluster. Init is forced on current_cluster in the
         * task_load_init() function.
         */
        if ( this_task->pid < PID_MIN_GLOBAL+2 )
                info.cid_exec = current_cid;

	fork_dmsg(1, "%s: new task will be placed on cluster %d, cpu %d. Task will be moved on cluster %u on exec()\n", \
                        __FUNCTION__, attr.cid, attr.cpu_id, info.cid_exec);

	tm_bRemote = cpu_time_stamp();
	err = do_fork(&info);
	tm_aRemote = cpu_time_stamp();

	if(err)
		goto fail_do_fork;

	child_thread = info.child_thread;
	child_task   = info.child_task;

	spinlock_lock(&this_task->lock);

	list_add(&this_task->children, &child_task->list);
	spinlock_unlock(&this_task->lock);

	fork_dmsg(1, "%s: childs (task & thread) have been registered in their parents lists [%d]\n", 
		  __FUNCTION__, 
		  cpu_time_stamp());
  
	fork_dmsg(1, "%s: going to add child to target scheduler\n", __FUNCTION__);
	sched_add_created(child_thread);
	tm_end = cpu_time_stamp();
    
	fork_dmsg(1, "%s: cpu %d, pid %d, done [s:%u, bR:%u, aR:%u, e:%u, d:%u, t:%u, r:%u]\n",
	       __FUNCTION__,
	       cpu_get_id(),
	       this_task->pid,
	       tm_start,
	       tm_bRemote,
	       tm_aRemote,
	       tm_end,
	       attr.tm_request,
	       tm_end - tm_start,
	       info.tm_event);

	return child_task->pid;

fail_do_fork:
fail_childs_nr:
	atomic_add(&this_task->childs_nr, -1);
	this_thread->info.errno = err;
	return -1;
}
Ejemplo n.º 24
0
/** 
 * @brief deactivates the urb 
 * @param host struct uhci_host
 * @param urb struct usb_request_block
 */
u8
uhci_deactivate_urb(struct usb_host *usbhc, struct usb_request_block *urb)
{
	struct uhci_host *host = (struct uhci_host *)usbhc->private;
	u8 status, type;

	/* nothing to do if already unlinked */
	if (urb->status == URB_STATUS_UNLINKED)
		return urb->status;

	dprintft(5, "%s: The urb link is %p <- %p -> %p.\n", 
		__FUNCTION__, urb->link_prev, urb, urb->link_next);

	spinlock_lock(&host->lock_hfl);

	if (urb->prevent_del) {
		urb->deferred_del = true;
		spinlock_unlock(&host->lock_hfl);
		return 0U;
	}

	/* urb link */
	if ((urb == host->fsbr_loop_head) && 
	    (urb == host->fsbr_loop_tail)) {
		dprintft(2, "%04x: %s: FSBR unlooped \n",
			 host->iobase, __FUNCTION__);
		host->fsbr = 0;
		host->fsbr_loop_head = host->fsbr_loop_tail = 
			(struct usb_request_block *)NULL;
		/* qh */
		URB_UHCI(urb->link_prev)->qh->link = UHCI_QH_LINK_TE;
	} else if (urb == host->fsbr_loop_tail) {
		/* tail of a FSBR loopback */
		dprintft(2, "%04x: %s: the tail of a FSBR loopback\n",
			 host->iobase, __FUNCTION__);
		host->fsbr_loop_tail = urb->link_prev;
		/* qh */
		URB_UHCI(host->fsbr_loop_tail)->qh->link = 
			(phys32_t)URB_UHCI(host->fsbr_loop_head)->qh_phys |
			UHCI_QH_LINK_QH;
	} else if (host->fsbr_loop_head == urb) {
		/* head of a FSBR loopback */
		dprintft(2, "%04x: %s: the head of a FSBR loopback\n",
			 host->iobase, __FUNCTION__);
		host->fsbr_loop_head = urb->link_next;
		/* qh */
		URB_UHCI(host->fsbr_loop_tail)->qh->link = 
			(phys32_t)URB_UHCI(host->fsbr_loop_head)->qh_phys |
			UHCI_QH_LINK_QH;
		URB_UHCI(urb->link_prev)->qh->link = URB_UHCI(urb)->qh->link;
	} else {
		/* qh */
		URB_UHCI(urb->link_prev)->qh->link = URB_UHCI(urb)->qh->link;
	}
	URB_UHCI(urb)->qh->link = UHCI_QH_LINK_TE;

	/* MEMO: There must exist urb->link_prev 
	   because of the skelton. */
	urb->link_prev->link_next = urb->link_next;
	if (urb->link_next)
		urb->link_next->link_prev = urb->link_prev;

	urb->status = URB_STATUS_UNLINKED;

	type = (urb->endpoint) ? 
		USB_EP_TRANSTYPE(urb->endpoint) : USB_ENDPOINT_TYPE_CONTROL;

	switch (type) {
	case USB_ENDPOINT_TYPE_INTERRUPT:
		/* through */
	case USB_ENDPOINT_TYPE_CONTROL:
		if (host->tailurb[URB_TAIL_CONTROL] == urb)
			host->tailurb[URB_TAIL_CONTROL] = urb->link_prev;
		/* through */
	case USB_ENDPOINT_TYPE_BULK:
		if (host->tailurb[URB_TAIL_BULK] == urb)
			host->tailurb[URB_TAIL_BULK] = urb->link_prev;
		break;
	case USB_ENDPOINT_TYPE_ISOCHRONOUS:
	default:
		printf("%s: transfer type(%02x) unsupported.\n",
		       __FUNCTION__, type);
	}

	status = urb->status;
	LIST4_DEL (host->inproc_urbs, list, urb);
	LIST4_ADD (host->unlinked_urbs[host->unlinked_urbs_index], list, urb);
	spinlock_unlock(&host->lock_hfl);

	return status;
}
Ejemplo n.º 25
0
thread_t *thread_createKernel(process_t *process, thread_entry_t entry, size_t UNUSED(stackSize), uint32_t argCount, va_list args)
{
	thread_t *thread = thread_createVoid();
	if(thread)
	{
		//size_t stackPages = 1; //MAX(1, stackSize / 4096);
		uint8_t *kernelStack = (uint8_t *)pm_alloc(1);

		if(!kernelStack)
		{
			hfree(NULL, thread);
			return NULL;
		}

		thread->entry   = entry;
		thread->process = process;

		// Create the kernel stack
		thread->kernelStack     = kernelStack;
		thread->kernelStackVirt = (uint8_t *)vm_allocLimit(process->pdirectory, (uintptr_t)kernelStack, THREAD_STACK_LIMIT, 1, VM_FLAGS_KERNEL);

		uint32_t *stack = ((uint32_t *)(thread->kernelStackVirt + VM_PAGE_SIZE)) - argCount;
		memset(thread->kernelStackVirt, 0, 1 * VM_PAGE_SIZE);

		// Push the arguments for the thread on its stack
		thread->arguments = NULL;
		thread->argumentCount = argCount;

		if(argCount > 0)
		{
			thread->arguments = (uintptr_t **)halloc(NULL, argCount * sizeof(uintptr_t *));

			for(uint32_t i=0; i<argCount; i++)
			{
				uintptr_t *val = va_arg(args, uintptr_t *);
				thread->arguments[i] = val;
			}
		}

		// Forge initial kernel stackframe
		*(-- stack) = 0x10; // ss
		*(-- stack) = 0x0; // esp, kernel threads use the TSS
		*(-- stack) = 0x0200; // eflags
		*(-- stack) = 0x8; // cs
		*(-- stack) = (uint32_t)entry; // eip

		// Interrupt number and error code
		*(-- stack) = 0x0;
		*(-- stack) = 0x0;

		// General purpose register
		*(-- stack) = 0x0;
		*(-- stack) = 0x0;
		*(-- stack) = 0x0;
		*(-- stack) = 0x0;
		*(-- stack) = 0x0;
		*(-- stack) = 0x0;
		*(-- stack) = 0x0;
		*(-- stack) = 0x0;

		// Segment registers
		*(-- stack) = 0x10;
		*(-- stack) = 0x10;
		*(-- stack) = 0x10;
		*(-- stack) = 0x10;

		// Update the threads
		thread->esp = (uint32_t)stack;

		// Attach the thread to the process;
		spinlock_lock(&process->threadLock); // Acquire the process' thread lock so we don't end up doing bad things

		thread->id = _thread_getUniqueID(process);
		if(process->mainThread)
		{
			thread_t *mthread = process->mainThread;

			// Attach the new thread next to the main thread
			thread->next  = mthread->next;
			mthread->next = thread;
		}
		else
		{
			process->mainThread 		= thread;
			process->scheduledThread 	= thread;
		}

		spinlock_unlock(&process->threadLock);
	}
Ejemplo n.º 26
0
/**
 * @brief activate urb 
 * @param host struct uhci_host *host
 * @param urb struct usb_request_block 
 */
u8
uhci_activate_urb(struct uhci_host *host, struct usb_request_block *urb)
{
	u8 status, type;
	int n;

	type = (urb->endpoint) ? 
		USB_EP_TRANSTYPE(urb->endpoint) : USB_ENDPOINT_TYPE_CONTROL;

	spinlock_lock(&host->lock_hfl);

	switch (type) {
	case USB_ENDPOINT_TYPE_INTERRUPT:
		n = __ffs(urb->endpoint->bInterval | 
			  (1 << (UHCI_NUM_SKELTYPES - 1)));
		/* MEMO: a new interrupt urb must be 
		   inserted just after a skelton anytime. */
		urb->link_prev = host->host_skelton[n];
		if (host->host_skelton[n] ==
		    host->tailurb[URB_TAIL_CONTROL])
			host->tailurb[URB_TAIL_CONTROL] = urb;
		if (host->host_skelton[n] ==
		    host->tailurb[URB_TAIL_BULK])
			host->tailurb[URB_TAIL_BULK] = urb;
		break;
	case USB_ENDPOINT_TYPE_CONTROL:
		urb->link_prev = host->tailurb[URB_TAIL_CONTROL];
		if (host->tailurb[URB_TAIL_CONTROL] == 
		    host->tailurb[URB_TAIL_BULK])
			host->tailurb[URB_TAIL_BULK] = urb;
		host->tailurb[URB_TAIL_CONTROL] = urb;
		break;
	case USB_ENDPOINT_TYPE_BULK:
		urb->link_prev = host->tailurb[URB_TAIL_BULK];
		host->tailurb[URB_TAIL_BULK] = urb;
		break;
	case USB_ENDPOINT_TYPE_ISOCHRONOUS:
	default:
		printf("%s: transfer type(%02x) unsupported.\n",
		       __FUNCTION__, type);
		status = urb->status;
		return status;
	}

	/* initialize qh_element_copy for detecting advance after NAK */
	URB_UHCI(urb)->qh_element_copy = URB_UHCI(urb)->qh->element;

	/* urb link */
	urb->link_next = urb->link_prev->link_next;
	urb->link_prev->link_next = urb;
	if (urb->link_next) {
		/* make a backward pointer */
		urb->link_next->link_prev = urb;
	} else if (type == USB_ENDPOINT_TYPE_BULK) {
		if (host->fsbr) {
			dprintft(2, "%04x: %s: append it to the "
				 "FSBR loopback.\n",
				 host->iobase, __FUNCTION__);
			host->fsbr_loop_tail = urb;
		} else {
			dprintft(2, "%04x: %s: make a FSBR loopback.\n",
				 host->iobase, __FUNCTION__);
			host->fsbr = 1;
			host->fsbr_loop_head = urb;
			host->fsbr_loop_tail = urb;
		}
	}

	/* record the current frame number */
	URB_UHCI(urb)->frnum_issued = uhci_current_frame_number(host);

	/* qh link */
	URB_UHCI(urb)->qh->link = URB_UHCI(urb->link_prev)->qh->link;
	if (host->fsbr_loop_tail)
		URB_UHCI(host->fsbr_loop_tail)->qh->link = (phys32_t)
			URB_UHCI(host->fsbr_loop_head)->qh_phys | 
			UHCI_QH_LINK_QH;
	URB_UHCI(urb->link_prev)->qh->link = 
		URB_UHCI(urb)->qh_phys | UHCI_QH_LINK_QH;

	urb->status = URB_STATUS_RUN;

	dprintft(3, "%s: The urb link is %p <- %p -> %p.\n", 
		__FUNCTION__, urb->link_prev, urb, urb->link_next);

	status = urb->status;
	LIST4_PUSH (host->inproc_urbs, list, urb);
	spinlock_unlock(&host->lock_hfl);

	return status;
}
Ejemplo n.º 27
0
void tprintf_flush(void)
{
    spinlock_lock(&buffer_lock);
    __tprintf_flush();
    spinlock_unlock(&buffer_lock);
}
Ejemplo n.º 28
0
static void tty_ctrl_input_thread(int argc, void *argv[])
{
    (void) argc;

    int ret;
    struct input_event event;
    struct tty_ctrl *ctrl = argv[0];

    for (;;) {
        ret = read(ctrl->kbd_fd, &event, sizeof (struct input_event));
        if (ret < 0)
            continue;

        if (event.value == EV_KEY_PRESSED)
        {
            switch (event.code) {
                case KEY_LEFTSHIFT:
                case KEY_RIGHTSHIFT:
                    ctrl->input.shift = 1;
                    break;
                case KEY_LEFTCTRL:
                    ctrl->input.ctrl = 1;
                    break;
                default:
                    if (ctrl->input.shift)
                        tty_ctrl_input_push(ctrl, keymap[event.code][2]);
                    else if (ctrl->input.ctrl)
                        tty_ctrl_input_push(ctrl, keymap[event.code][1]);
                    else
                        tty_ctrl_input_push(ctrl, keymap[event.code][0]);
                    break;
            }
        } else if (event.value == EV_KEY_RELEASED) {
            switch (event.code) {
                case KEY_LEFTSHIFT:
                case KEY_RIGHTSHIFT:
                    ctrl->input.shift = 0;
                    break;
                case KEY_LEFTCTRL:
                    ctrl->input.ctrl = 0;
                    break;
            }
        }

        spinlock_lock(&ctrl->input.lock);

        if (ctrl->slaves[ctrl->nb_slave].slave_id >= 0) {
            struct resp_rdwr resp;
            struct req_rdwr *req = &ctrl->slaves[ctrl->nb_slave].req;

            resp.size = 0;
            resp.ret = 0;

            while (req->size && ctrl->input.size > 0) {
                *((char *)(req->data++)) = tty_ctrl_input_pop(ctrl);
                --req->size;
                ++resp.size;
            }

            resp.hdr.slave_id = ctrl->slaves[ctrl->nb_slave].slave_id;

            write(ctrl->driver.channel_fd, &resp, sizeof (resp));

            ctrl->slaves[ctrl->nb_slave].slave_id = -1;
        }

        spinlock_unlock(&ctrl->input.lock);
    }
}
Ejemplo n.º 29
0
void __rwsem_down_read(rwsem_t *rwsem, wqueue_insop_t iop)
{
  spinlock_lock(&rwsem->sem_lock);
  __rwsem_down_read_core(rwsem, iop);
  spinlock_unlock(&rwsem->sem_lock);
}
Ejemplo n.º 30
0
/* TODO: reintroduce barrier's ops to deal with case-specific treatment */
error_t barrier_wait(struct barrier_s *barrier)
{
	register uint_t ticket;
	register uint_t index;
	register uint_t wqdbsz;
	register wqdb_t *wqdb;
	register bool_t isShared;
	struct thread_s *this;
	uint_t tm_now;

	tm_now   = cpu_time_stamp();
	this     = current_thread;
	index    = this->info.order;
	ticket   = 0;
	isShared = (barrier->owner == NULL) ? true : false;

	if((barrier->signature != BARRIER_ID) || ((isShared == false) && (barrier->owner != this->task)))
		return EINVAL;

	wqdbsz = PMM_PAGE_SIZE / sizeof(wqdb_record_t);

	if(isShared)
	{
		spinlock_lock(&barrier->lock);
		index  = barrier->index ++;
		ticket = barrier->count - index;
	}

	wqdb   = barrier->wqdb_tbl[index / wqdbsz];

#if CONFIG_USE_SCHED_LOCKS
	wqdb->tbl[index % wqdbsz].listner = (void*)this;
#else
	uint_t irq_state;
	cpu_disable_all_irq(&irq_state); /* To prevent against any scheduler intervention */
	wqdb->tbl[index % wqdbsz].event   = sched_event_make (this, SCHED_OP_WAKEUP);
	wqdb->tbl[index % wqdbsz].listner = sched_get_listner(this, SCHED_OP_WAKEUP);
#endif

	if(isShared == false)
		ticket = atomic_add(&barrier->waiting, -1);

	if(ticket == 1)
	{
#if !(CONFIG_USE_SCHED_LOCKS)
		cpu_restore_irq(irq_state);
#endif
		barrier->tm_last = tm_now;
		wqdb->tbl[index % wqdbsz].listner = NULL;

		if(isShared)
		{
			barrier->index = 0;
			spinlock_unlock(&barrier->lock);
		}
		else
			atomic_init(&barrier->waiting, barrier->count);

		barrier_do_broadcast(barrier);
		return PTHREAD_BARRIER_SERIAL_THREAD;
	}

	if(ticket == barrier->count)
		barrier->tm_first = tm_now;

	spinlock_unlock_nosched(&barrier->lock);
	sched_sleep(this);

#if !(CONFIG_USE_SCHED_LOCKS)
	cpu_restore_irq(irq_state);
#endif
	return 0;
}