void mt_usb_set_vbus(struct musb *musb, int is_on) { DBG(0, "mt65xx_usb20_vbus++,is_on=%d\r\n", is_on); #ifndef FPGA_PLATFORM if (is_on) { /* power on VBUS, implement later... */ #ifdef CONFIG_MTK_FAN5405_SUPPORT fan5405_set_opa_mode(1); fan5405_set_otg_pl(1); fan5405_set_otg_en(1); #elif defined(CONFIG_MTK_BQ24261_SUPPORT) bq24261_set_en_boost(1); #elif defined(CONFIG_MTK_BQ24296_SUPPORT) bq24296_set_otg_config(0x1); /* OTG */ bq24296_set_boostv(0x7); /* boost voltage 4.998V */ bq24296_set_boost_lim(0x1); /* 1.5A on VBUS */ bq24296_set_en_hiz(0x0); #elif defined(CONFIG_MTK_BQ24196_SUPPORT) bq24196_set_otg_config(0x01); /* OTG */ bq24196_set_boost_lim(0x01); /* 1.3A on VBUS */ #elif defined(CONFIG_MTK_NCP1854_SUPPORT) ncp1854_set_otg_en(0); ncp1854_set_chg_en(0); ncp1854_set_otg_en(1); #else #ifdef CONFIG_OF #if defined(CONFIG_MTK_LEGACY) mt_set_gpio_mode(drvvbus_pin, drvvbus_pin_mode); mt_set_gpio_out(drvvbus_pin, GPIO_OUT_ONE); #else pr_debug("****%s:%d Drive VBUS HIGH KS!!!!!\n", __func__, __LINE__); pinctrl_select_state(pinctrl, pinctrl_drvvbus_high); #endif #else mt_set_gpio_mode(GPIO_OTG_DRVVBUS_PIN, GPIO_OTG_DRVVBUS_PIN_M_GPIO); mt_set_gpio_out(GPIO_OTG_DRVVBUS_PIN, GPIO_OUT_ONE); #endif #endif } else { /* power off VBUS, implement later... */ #ifdef CONFIG_MTK_FAN5405_SUPPORT fan5405_reg_config_interface(0x01, 0x30); fan5405_reg_config_interface(0x02, 0x8e); #elif defined(CONFIG_MTK_BQ24261_SUPPORT) bq24261_set_en_boost(0); #elif defined(CONFIG_MTK_BQ24296_SUPPORT) bq24296_set_otg_config(0); #elif defined(CONFIG_MTK_BQ24196_SUPPORT) bq24196_set_otg_config(0x0); /* OTG disabled */ #elif defined(CONFIG_MTK_NCP1854_SUPPORT) ncp1854_set_otg_en(0x0); #else #ifdef CONFIG_OF #if defined(CONFIG_MTK_LEGACY) mt_set_gpio_mode(drvvbus_pin, drvvbus_pin_mode); mt_set_gpio_out(drvvbus_pin, GPIO_OUT_ZERO); #else pr_debug("****%s:%d Drive VBUS LOW KS!!!!!\n", __func__, __LINE__); pinctrl_select_state(pinctrl, pinctrl_drvvbus_low); #endif #else mt_set_gpio_mode(GPIO_OTG_DRVVBUS_PIN, GPIO_OTG_DRVVBUS_PIN_M_GPIO); mt_set_gpio_out(GPIO_OTG_DRVVBUS_PIN, GPIO_OUT_ZERO); #endif #endif } #endif }
static void cleanup_devices(void) { /* * Check what interfaces are currently up and if connman is * suppose to handle the interface, then cleanup the mess * related to that interface. There might be weird routes etc * that are related to that interface and that might confuse * connmand. So in this case we just turn the interface down * so that kernel removes routes/addresses automatically and * then proceed the startup. * * Note that this cleanup must be done before rtnl/detect code * has activated interface watches. */ char **interfaces; int i; interfaces = __connman_inet_get_running_interfaces(); if (!interfaces) return; for (i = 0; interfaces[i]; i++) { bool filtered; int index; struct sockaddr_in sin_addr, sin_mask; filtered = __connman_device_isfiltered(interfaces[i]); if (filtered) continue; index = connman_inet_ifindex(interfaces[i]); if (index < 0) continue; if (!__connman_inet_get_address_netmask(index, &sin_addr, &sin_mask)) { char *address = g_strdup(inet_ntoa(sin_addr.sin_addr)); char *netmask = g_strdup(inet_ntoa(sin_mask.sin_addr)); if (__connman_config_address_provisioned(address, netmask)) { DBG("Skip %s which is already provisioned " "with %s/%s", interfaces[i], address, netmask); g_free(address); g_free(netmask); continue; } g_free(address); g_free(netmask); } DBG("cleaning up %s index %d", interfaces[i], index); connman_inet_ifdown(index); /* * ConnMan will turn the interface UP automatically so * no need to do it here. */ } g_strfreev(interfaces); }
static void tx_iso_complete(struct urb *urb) { iso_urb_struct *context_iso_urb = (iso_urb_struct *) urb->context; usb_fifo *fifo = context_iso_urb->owner_fifo; hfcusb_data *hfc = fifo->hfc; int k, tx_offset, num_isoc_packets, sink, len, current_len, errcode; int frame_complete, transp_mode, fifon, status; __u8 threshbit; fifon = fifo->fifonum; status = urb->status; tx_offset = 0; /* ISO transfer only partially completed, look at individual frame status for details */ if (status == -EXDEV) { DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-S USB: tx_iso_complete with -EXDEV" ", urb->status %d, fifonum %d\n", status, fifon); for (k = 0; k < iso_packets[fifon]; ++k) { errcode = urb->iso_frame_desc[k].status; if (errcode) DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-S USB: tx_iso_complete " "packet %i, status: %i\n", k, errcode); } // clear status, so go on with ISO transfers status = 0; } if (fifo->active && !status) { transp_mode = 0; if (fifon < 4 && hfc->b_mode[fifon / 2] == L1_MODE_TRANS) transp_mode = 1; /* is FifoFull-threshold set for our channel? */ threshbit = (hfc->threshold_mask & (1 << fifon)); num_isoc_packets = iso_packets[fifon]; /* predict dataflow to avoid fifo overflow */ if (fifon >= HFCUSB_D_TX) { sink = (threshbit) ? SINK_DMIN : SINK_DMAX; } else { sink = (threshbit) ? SINK_MIN : SINK_MAX; } fill_isoc_urb(urb, fifo->hfc->dev, fifo->pipe, context_iso_urb->buffer, num_isoc_packets, fifo->usb_packet_maxlen, fifo->intervall, tx_iso_complete, urb->context); memset(context_iso_urb->buffer, 0, sizeof(context_iso_urb->buffer)); frame_complete = 0; /* Generate next ISO Packets */ for (k = 0; k < num_isoc_packets; ++k) { if (fifo->skbuff) { len = fifo->skbuff->len; /* we lower data margin every msec */ fifo->bit_line -= sink; current_len = (0 - fifo->bit_line) / 8; /* maximum 15 byte for every ISO packet makes our life easier */ if (current_len > 14) current_len = 14; current_len = (len <= current_len) ? len : current_len; /* how much bit do we put on the line? */ fifo->bit_line += current_len * 8; context_iso_urb->buffer[tx_offset] = 0; if (current_len == len) { if (!transp_mode) { /* here frame completion */ context_iso_urb-> buffer[tx_offset] = 1; /* add 2 byte flags and 16bit CRC at end of ISDN frame */ fifo->bit_line += 32; } frame_complete = 1; } memcpy(context_iso_urb->buffer + tx_offset + 1, fifo->skbuff->data, current_len); skb_pull(fifo->skbuff, current_len); /* define packet delimeters within the URB buffer */ urb->iso_frame_desc[k].offset = tx_offset; urb->iso_frame_desc[k].length = current_len + 1; tx_offset += (current_len + 1); } else { urb->iso_frame_desc[k].offset = tx_offset++; urb->iso_frame_desc[k].length = 1; fifo->bit_line -= sink; /* we lower data margin every msec */ if (fifo->bit_line < BITLINE_INF) { fifo->bit_line = BITLINE_INF; } } if (frame_complete) { fifo->delete_flg = 1; fifo->hif->l1l2(fifo->hif, PH_DATA | CONFIRM, (void *) (unsigned long) fifo->skbuff-> truesize); if (fifo->skbuff && fifo->delete_flg) { dev_kfree_skb_any(fifo->skbuff); fifo->skbuff = NULL; fifo->delete_flg = 0; } frame_complete = 0; } } errcode = usb_submit_urb(urb, GFP_ATOMIC); if (errcode < 0) { printk(KERN_INFO "HFC-S USB: error submitting ISO URB: %d\n", errcode); } } else { if (status && !hfc->disc_flag) { printk(KERN_INFO "HFC-S USB: tx_iso_complete: error(%i): '%s', fifonum=%d\n", status, symbolic(urb_errlist, status), fifon); } } }
/** * Resolve name using DNS * * @v resolv Name resolution interface * @v name Name to resolve * @v sa Socket address to fill in * @ret rc Return status code */ static int dns_resolv ( struct resolv_interface *resolv, const char *name, struct sockaddr *sa ) { struct dns_request *dns; char *fqdn; int rc; /* Fail immediately if no DNS servers */ if ( ! nameserver.st_family ) { DBG ( "DNS not attempting to resolve \"%s\": " "no DNS servers\n", name ); rc = -ENXIO; goto err_no_nameserver; } /* Ensure fully-qualified domain name if DHCP option was given */ fqdn = dns_qualify_name ( name ); if ( ! fqdn ) { rc = -ENOMEM; goto err_qualify_name; } /* Allocate DNS structure */ dns = zalloc ( sizeof ( *dns ) ); if ( ! dns ) { rc = -ENOMEM; goto err_alloc_dns; } ref_init ( &dns->refcnt, NULL ); resolv_init ( &dns->resolv, &null_resolv_ops, &dns->refcnt ); xfer_init ( &dns->socket, &dns_socket_operations, &dns->refcnt ); timer_init ( &dns->timer, dns_timer_expired ); memcpy ( &dns->sa, sa, sizeof ( dns->sa ) ); /* Create query */ dns->query.dns.flags = htons ( DNS_FLAG_QUERY | DNS_FLAG_OPCODE_QUERY | DNS_FLAG_RD ); dns->query.dns.qdcount = htons ( 1 ); dns->qinfo = ( void * ) dns_make_name ( fqdn, dns->query.payload ); dns->qinfo->qtype = htons ( DNS_TYPE_A ); dns->qinfo->qclass = htons ( DNS_CLASS_IN ); /* Open UDP connection */ if ( ( rc = xfer_open_socket ( &dns->socket, SOCK_DGRAM, ( struct sockaddr * ) &nameserver, NULL ) ) != 0 ) { DBGC ( dns, "DNS %p could not open socket: %s\n", dns, strerror ( rc ) ); goto err_open_socket; } /* Send first DNS packet */ dns_send_packet ( dns ); /* Attach parent interface, mortalise self, and return */ resolv_plug_plug ( &dns->resolv, resolv ); ref_put ( &dns->refcnt ); free ( fqdn ); return 0; err_open_socket: err_alloc_dns: ref_put ( &dns->refcnt ); err_qualify_name: free ( fqdn ); err_no_nameserver: return rc; }
static int print_fixup_f_2(void **param, int param_no) { DBG("print: print_fixup_f_2('%s')\n", (char*)*param); return print_fixup_f(param, param_no); }
static int8 recv_handler(void) { uint8 *cur, *end; uint8 recv_ip[4], opt_len, msg_type; int32 recv_len; uint16 recv_port; recv_len = GetSocketRxRecvBufferSize(di.sock); if(recv_len == 0) return RET_NOK; else memset(&dm, 0, sizeof(struct dhcp_msg)); recv_len = UDPRecv(di.sock, (int8*)&dm, sizeof(struct dhcp_msg), recv_ip, &recv_port); if(recv_len < 0) { ERRA("UDPRecv fail - ret(%d)", recv_len); return RET_NOK; } DBGA("DHCP_SIP:%d.%d.%d.%d",di.srv_ip[0],di.srv_ip[1],di.srv_ip[2],di.srv_ip[3]); DBGA("DHCP_RIP:%d.%d.%d.%d",di.srv_ip_real[0],di.srv_ip_real[1],di.srv_ip_real[2],di.srv_ip_real[3]); DBGA("recv_ip:%d.%d.%d.%d",recv_ip[0],recv_ip[1],recv_ip[2],recv_ip[3]); if(dm.op != DHCP_BOOTREPLY || recv_port != DHCP_SERVER_PORT) { if(dm.op != DHCP_BOOTREPLY) DBG("DHCP : NO DHCP MSG"); if(recv_port != DHCP_SERVER_PORT) DBG("DHCP : WRONG PORT"); return RET_NOK; } if(memcmp(dm.chaddr, storage.Mac, 6) != 0 || dm.xid != htonl(di.xid)) { DBG("No My DHCP Message. This message is ignored."); DBGA("SRC_MAC_ADDR(%02X:%02X:%02X:%02X:%02X:%02X)", storage.Mac[0], storage.Mac[1], storage.Mac[2], storage.Mac[3], storage.Mac[4], storage.Mac[5]); DBGA("chaddr(%02X:%02X:%02X:%02X:%02X:%02X)", dm.chaddr[0], dm.chaddr[1], dm.chaddr[2], dm.chaddr[3], dm.chaddr[4], dm.chaddr[5]); DBGA("DHCP_XID(%08lX), xid(%08lX), yiaddr(%d.%d.%d.%d)", htonl(di.xid), dm.xid, dm.yiaddr[0], dm.yiaddr[1], dm.yiaddr[2], dm.yiaddr[3]); return RET_NOK; } if( *((uint32*)di.srv_ip) != 0x00000000 ) { if( *((uint32*)di.srv_ip_real) != *((uint32*)recv_ip) && *((uint32*)di.srv_ip) != *((uint32*)recv_ip) ) { DBG("Another DHCP sever send a response message. This is ignored."); DBGA("IP:%d.%d.%d.%d",recv_ip[0],recv_ip[1],recv_ip[2],recv_ip[3]); return RET_NOK; } } memcpy(workinfo.IP, dm.yiaddr, 4); DBG("DHCP MSG received.."); DBGA("yiaddr : %d.%d.%d.%d",workinfo.IP[0],workinfo.IP[1],workinfo.IP[2],workinfo.IP[3]); msg_type = 0; cur = (uint8 *)(&dm.op); cur = cur + 240; end = cur + (recv_len - 240); //printf("cur : 0x%08X end : 0x%08X recv_len : %d\r\n", cur, end, recv_len); while ( cur < end ) { switch ( *cur++ ) { case padOption: break; case endOption: return msg_type; case dhcpMessageType: opt_len = *cur++; msg_type = *cur; DBGA("dhcpMessageType : %x", msg_type); break; case subnetMask: opt_len =* cur++; memcpy(workinfo.SN,cur,4); DBGA("subnetMask : %d.%d.%d.%d", workinfo.SN[0],workinfo.SN[1],workinfo.SN[2],workinfo.SN[3]); break; case routersOnSubnet: opt_len = *cur++; memcpy(workinfo.GW,cur,4); DBGA("routersOnSubnet : %d.%d.%d.%d", workinfo.GW[0],workinfo.GW[1],workinfo.GW[2],workinfo.GW[3]); break; case dns: opt_len = *cur++; memcpy(workinfo.DNS,cur,4); break; case dhcpIPaddrLeaseTime: opt_len = *cur++; di.lease_time = ntohl(*((uint32*)cur)); di.renew_time = di.lease_time / 2; // 0.5 di.rebind_time = di.lease_time / 8 * 7; // 0.875 DBGA("lease(%d), renew(%d), rebind(%d)", di.lease_time, di.renew_time, di.rebind_time); break; case dhcpServerIdentifier: opt_len = *cur++; DBGA("DHCP_SIP : %d.%d.%d.%d", di.srv_ip[0], di.srv_ip[1], di.srv_ip[2], di.srv_ip[3]); if( *((uint32*)di.srv_ip) == 0 || *((uint32*)di.srv_ip_real) == *((uint32*)recv_ip) || *((uint32*)di.srv_ip) == *((uint32*)recv_ip) ) { memcpy(di.srv_ip,cur,4); memcpy(di.srv_ip_real,recv_ip,4); // Copy the real ip address of my DHCP server DBGA("My dhcpServerIdentifier : %d.%d.%d.%d", di.srv_ip[0], di.srv_ip[1], di.srv_ip[2], di.srv_ip[3]); DBGA("My DHCP server real IP address : %d.%d.%d.%d", di.srv_ip_real[0], di.srv_ip_real[1], di.srv_ip_real[2], di.srv_ip_real[3]); } else { DBGA("Another dhcpServerIdentifier : MY(%d.%d.%d.%d)", di.srv_ip[0], di.srv_ip[1], di.srv_ip[2], di.srv_ip[3]); DBGA("Another(%d.%d.%d.%d)", recv_ip[0], recv_ip[1], recv_ip[2], recv_ip[3]); } break; default: opt_len = *cur++; DBGA("opt_len : %d", opt_len); break; } // switch cur+=opt_len; } // while return RET_NOK; }
static uint64_t etna_screen_get_timestamp(struct pipe_screen *screen) { DBG("unimplemented etna_screen_get_timestamp"); return 0; }
/* Set IRQ flags */ static void set_spi_irq_flags(struct spi_mailbox *mailbox, uint8_t flagValue) { DBG("Setting mailbox flags \n"); XIo_Out32(REGISTER_ADDRESS(mailbox, SPI_IRQ_FLAGS_SET_REG), flagValue); }
/* Clear IRQ flags */ static void clear_spi_irq_flags(struct spi_mailbox *mailbox, uint8_t flagValue) { DBG("Clearing mailbox flags \n"); XIo_Out32(REGISTER_ADDRESS(mailbox, SPI_IRQ_FLAGS_CLEAR_REG), flagValue); }
/* * IOCTL handler */ int hatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ifreq *ifr = (struct ifreq *)data; struct ifaddr *ifa = (struct ifaddr *)data; struct hatm_softc *sc = ifp->if_softc; struct atmio_vcctable *vtab; int error = 0; switch (cmd) { case SIOCSIFADDR: mtx_lock(&sc->mtx); ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) hatm_initialize(sc); switch (ifa->ifa_addr->sa_family) { #ifdef INET case AF_INET: case AF_INET6: ifa->ifa_rtrequest = atm_rtrequest; break; #endif default: break; } mtx_unlock(&sc->mtx); break; case SIOCSIFFLAGS: mtx_lock(&sc->mtx); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { hatm_initialize(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { hatm_stop(sc); } } mtx_unlock(&sc->mtx); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); break; case SIOCSIFMTU: /* * Set the interface MTU. */ if (ifr->ifr_mtu > ATMMTU) error = EINVAL; else ifp->if_mtu = ifr->ifr_mtu; break; case SIOCATMGVCCS: /* return vcc table */ vtab = atm_getvccs((struct atmio_vcc **)sc->vccs, HE_MAX_VCCS, sc->open_vccs, &sc->mtx, 1); error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) + vtab->count * sizeof(vtab->vccs[0])); free(vtab, M_DEVBUF); break; case SIOCATMGETVCCS: /* netgraph internal use */ vtab = atm_getvccs((struct atmio_vcc **)sc->vccs, HE_MAX_VCCS, sc->open_vccs, &sc->mtx, 0); if (vtab == NULL) { error = ENOMEM; break; } *(void **)data = vtab; break; case SIOCATMOPENVCC: /* kernel internal use */ error = hatm_open_vcc(sc, (struct atmio_openvcc *)data); break; case SIOCATMCLOSEVCC: /* kernel internal use */ error = hatm_close_vcc(sc, (struct atmio_closevcc *)data); break; default: DBG(sc, IOCTL, ("cmd=%08lx arg=%p", cmd, data)); error = EINVAL; break; } return (error); }
/* * Try to open the given VCC. */ static int hatm_open_vcc(struct hatm_softc *sc, struct atmio_openvcc *arg) { u_int cid; struct hevcc *vcc; int error = 0; DBG(sc, VCC, ("Open VCC: %u.%u flags=%#x", arg->param.vpi, arg->param.vci, arg->param.flags)); if ((arg->param.vpi & ~HE_VPI_MASK) || (arg->param.vci & ~HE_VCI_MASK) || (arg->param.vci == 0)) return (EINVAL); cid = HE_CID(arg->param.vpi, arg->param.vci); if ((arg->param.flags & ATMIO_FLAG_NOTX) && (arg->param.flags & ATMIO_FLAG_NORX)) return (EINVAL); vcc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO); if (vcc == NULL) return (ENOMEM); mtx_lock(&sc->mtx); if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) { error = EIO; goto done; } if (sc->vccs[cid] != NULL) { error = EBUSY; goto done; } vcc->param = arg->param; vcc->rxhand = arg->rxhand; switch (vcc->param.aal) { case ATMIO_AAL_0: case ATMIO_AAL_5: case ATMIO_AAL_RAW: break; default: error = EINVAL; goto done; } switch (vcc->param.traffic) { case ATMIO_TRAFFIC_UBR: case ATMIO_TRAFFIC_CBR: case ATMIO_TRAFFIC_ABR: break; default: error = EINVAL; goto done; } vcc->ntpds = 0; vcc->chain = vcc->last = NULL; vcc->ibytes = vcc->ipackets = 0; vcc->obytes = vcc->opackets = 0; if (!(vcc->param.flags & ATMIO_FLAG_NOTX) && (error = hatm_tx_vcc_can_open(sc, cid, vcc)) != 0) goto done; /* ok - go ahead */ sc->vccs[cid] = vcc; hatm_load_vc(sc, cid, 0); /* don't free below */ vcc = NULL; sc->open_vccs++; done: mtx_unlock(&sc->mtx); if (vcc != NULL) uma_zfree(sc->vcc_zone, vcc); return (error); }
//1 static irqreturn_t headset_interrupt(int irq, void *dev_id) { struct rk_headset_pdata *pdata = headset_info->pdata; static unsigned int old_status = 0; int i,level = 0; int adc_value = 0; wake_lock(&headset_info->headset_on_wake); if(headset_info->heatset_irq_working == BUSY || headset_info->heatset_irq_working == WAIT) return IRQ_HANDLED; DBG("In the headset_interrupt for read headset level wake_lock headset_on_wake\n"); headset_info->heatset_irq_working = BUSY; msleep(150); for(i=0; i<3; i++) { level = gpio_get_value(pdata->Headset_gpio); if(level < 0) { printk("%s:get pin level again,pin=%d,i=%d\n",__FUNCTION__,pdata->Headset_gpio,i); msleep(1); continue; } else break; } if(level < 0) { printk("%s:get pin level err!\n",__FUNCTION__); goto out; } old_status = headset_info->headset_status; switch(pdata->headset_in_type) { case HEADSET_IN_HIGH: if(level > 0) headset_info->headset_status = HEADSET_IN; else if(level == 0) headset_info->headset_status = HEADSET_OUT; break; case HEADSET_IN_LOW: if(level == 0) headset_info->headset_status = HEADSET_IN; else if(level > 0) headset_info->headset_status = HEADSET_OUT; break; default: DBG("---- ERROR: on headset headset_in_type error -----\n"); break; } if(old_status == headset_info->headset_status) { DBG("Read Headset IO level old status == now status\n"); goto out; } DBG("(headset in is %s)headset status is %s\n", pdata->headset_in_type?"high level":"low level", headset_info->headset_status?"in":"out"); if(headset_info->headset_status == HEADSET_IN) { #if 0 while(1) { if(adc_sync_read(headset_info->client) > HOOK_DEFAULT_VAL || adc_sync_read(headset_info->client) < 0) { printk("headset is showly inside\n"); } else break; msleep(50); if(pdata->headset_in_type == HEADSET_IN_HIGH) old_status = headset_info->headset_status = gpio_get_value(pdata->Headset_gpio)?HEADSET_IN:HEADSET_OUT; else old_status = headset_info->headset_status = gpio_get_value(pdata->Headset_gpio)?HEADSET_OUT:HEADSET_IN; if(headset_info->headset_status == HEADSET_OUT) goto out1; msleep(5); } #endif if(pdata->Hook_adc_chn>=0 && 3>=pdata->Hook_adc_chn) { // wait for find Hook key //#ifdef CONFIG_SND_SOC_RT5625 CHECK_AGAIN: //headset_info->isMic = rt5625_headset_mic_detect(true); #ifdef CONFIG_SND_SOC_WM8994 wm8994_headset_mic_detect(true); #endif #if defined (CONFIG_SND_SOC_RT3261) || defined (CONFIG_SND_SOC_RT3224) rt3261_headset_mic_detect(true); #endif #ifdef CONFIG_SND_SOC_RT5631_PHONE rt5631_headset_mic_detect(true); #endif //mdelay(400); adc_value = adc_sync_read(headset_info->client); if(adc_value >= 0 && adc_value < HOOK_LEVEL_LOW) { headset_info->isMic= 0;//No microphone #ifdef CONFIG_SND_SOC_WM8994 wm8994_headset_mic_detect(false); #endif #if defined (CONFIG_SND_SOC_RT3261) || defined (CONFIG_SND_SOC_RT3224) rt3261_headset_mic_detect(false); #endif #ifdef CONFIG_SND_SOC_RT5631_PHONE rt5631_headset_mic_detect(false); #endif } else if(adc_value >= HOOK_LEVEL_HIGH) headset_info->isMic = 1;//have mic if(headset_info->isMic < 0) { printk("codec is error\n"); headset_info->heatset_irq_working = WAIT; if(pdata->headset_in_type == HEADSET_IN_HIGH) irq_set_irq_type(headset_info->irq[HEADSET],IRQF_TRIGGER_LOW|IRQF_ONESHOT); else irq_set_irq_type(headset_info->irq[HEADSET],IRQF_TRIGGER_HIGH|IRQF_ONESHOT); schedule_delayed_work(&headset_info->h_delayed_work[HEADSET], msecs_to_jiffies(0)); wake_unlock(&headset_info->headset_on_wake); return IRQ_HANDLED; } //adc_value = adc_sync_read(headset_info->client); printk("headset adc value = %d\n",adc_value); if(headset_info->isMic) { if(adc_value > HOOK_DEFAULT_VAL || adc_value < HOOK_LEVEL_HIGH) goto CHECK_AGAIN; mod_timer(&headset_info->hook_timer, jiffies + msecs_to_jiffies(1000)); } //#endif headset_info->cur_headset_status = headset_info->isMic ? BIT_HEADSET:BIT_HEADSET_NO_MIC; } else { headset_info->isMic= 0;//No microphone headset_info->cur_headset_status = BIT_HEADSET_NO_MIC; } printk("headset->isMic = %d\n",headset_info->isMic); if(pdata->headset_in_type == HEADSET_IN_HIGH) irq_set_irq_type(headset_info->irq[HEADSET],IRQF_TRIGGER_FALLING); else irq_set_irq_type(headset_info->irq[HEADSET],IRQF_TRIGGER_RISING); } else if(headset_info->headset_status == HEADSET_OUT) { headset_info->cur_headset_status = ~(BIT_HEADSET|BIT_HEADSET_NO_MIC); del_timer(&headset_info->hook_timer); if(headset_info->isMic) { headset_info->hook_status = HOOK_UP; #ifdef CONFIG_SND_SOC_WM8994 //rt5625_headset_mic_detect(false); wm8994_headset_mic_detect(false); #endif #if defined (CONFIG_SND_SOC_RT3261) || defined (CONFIG_SND_SOC_RT3224) rt3261_headset_mic_detect(false); #endif #ifdef CONFIG_SND_SOC_RT5631_PHONE rt5631_headset_mic_detect(false); #endif } if(pdata->headset_in_type == HEADSET_IN_HIGH) irq_set_irq_type(headset_info->irq[HEADSET],IRQF_TRIGGER_RISING); else irq_set_irq_type(headset_info->irq[HEADSET],IRQF_TRIGGER_FALLING); } rk28_send_wakeup_key(); switch_set_state(&headset_info->sdev, headset_info->cur_headset_status); DBG("headset notice android headset status = %d\n",headset_info->cur_headset_status); // schedule_delayed_work(&headset_info->h_delayed_work[HEADSET], msecs_to_jiffies(0)); out: headset_info->heatset_irq_working = IDLE; wake_unlock(&headset_info->headset_on_wake); return IRQ_HANDLED; }
void SMPTE::SampleToTime() { // // make a temporary copy of the sample number // ulong tmp_sample = sample_number; // // keep track of the actual rates in use in doubles. // double the_smpte_rate = smpte_smpte_rates[ smpte_rate ]; double the_sample_rate = smpte_sample_rates[ sample_rate ]; // // keep track of the maximum frame number for this smpte format. // uchar max_frame = smpte_max_frames[ smpte_rate ]; // // Calculate the number of samples per frame. // double samples_per_frame = smpte_sample_rates[ sample_rate ] / smpte_smpte_rates[ smpte_rate ]; // // if the smpte rate is a drop frame type, calculate the number // of frames that must be dropped. // if ( smpte_rate == SMPTE_RATE_30DF || smpte_rate == SMPTE_RATE_2997DF ) { // // Calculate number of minutes that have gone by // // short num_minutes = (short)((double)tmp_sample/(smpte_sample_rates[sample_rate]))/60; int num_minutes = tmp_sample / ( 48000 * 60 ); DBG ( printf ( "num_minutes=%d\n", ( int ) num_minutes ) ); // // Calculate the number of tens of minutes that have gone by, including minute 00 // int ten_minutes = num_minutes / 10; DBG ( printf ( "ten_minutes=%d\n", ( int ) ten_minutes ) ); // // Calculate the number of frames that are dropped by this // time. // int drops = ( num_minutes - ten_minutes ) * 2; DBG ( printf ( "drops=%d\n", ( int ) drops ) ); // // Offset the tmp_sample number by this amount of frames. // DBG ( printf ( "tmp_sample before drops=%ld\n", ( long ) tmp_sample ) ); tmp_sample += ( ulong ) ( drops * samples_per_frame ); DBG ( printf ( "tmp_sample after drops=%ld\n", ( long ) tmp_sample ) ); } // // Calculate the time in sub frames, frames, seconds, minutes, hours // ulong rounded_sub_frames = ( ulong ) ( ( tmp_sample * the_smpte_rate * 100 ) / the_sample_rate + .5 ); DBG ( printf ( "rounded_sub_frames = %ld\n", rounded_sub_frames ) ); sub_frames = ( uchar ) ( ( rounded_sub_frames ) % 100 ); frames = ( uchar ) ( ( rounded_sub_frames / 100 ) % max_frame ); seconds = ( uchar ) ( ( rounded_sub_frames / ( 100L * max_frame ) ) % 60 ); minutes = ( uchar ) ( ( rounded_sub_frames / ( 100L * 60L * max_frame ) ) % 60 ); hours = ( uchar ) ( ( rounded_sub_frames / ( 100L * 60L * 24L * max_frame ) ) % 24 ); }
static void cops_list_cb(gboolean ok, GAtResult *result, gpointer user_data) { struct cb_data *cbd = user_data; ofono_netreg_operator_list_cb_t cb = cbd->cb; struct ofono_network_operator *list; GAtResultIter iter; int num = 0; struct ofono_error error; decode_at_error(&error, g_at_result_final_response(result)); if (!ok) { cb(&error, 0, NULL, cbd->data); return; } g_at_result_iter_init(&iter, result); while (g_at_result_iter_next(&iter, "+COPS:")) { while (g_at_result_iter_skip_next(&iter)) num += 1; } DBG("Got %d elements", num); list = g_try_new0(struct ofono_network_operator, num); if (list == NULL) { CALLBACK_WITH_FAILURE(cb, 0, NULL, cbd->data); return; } num = 0; g_at_result_iter_init(&iter, result); while (g_at_result_iter_next(&iter, "+COPS:")) { int status, tech, plmn; const char *l, *s, *n; gboolean have_long = FALSE; while (1) { if (!g_at_result_iter_open_list(&iter)) break; if (!g_at_result_iter_next_number(&iter, &status)) break; list[num].status = status; if (!g_at_result_iter_next_string(&iter, &l)) break; if (strlen(l) > 0) { have_long = TRUE; strncpy(list[num].name, l, OFONO_MAX_OPERATOR_NAME_LENGTH); } if (!g_at_result_iter_next_string(&iter, &s)) break; if (strlen(s) > 0 && !have_long) strncpy(list[num].name, s, OFONO_MAX_OPERATOR_NAME_LENGTH); list[num].name[OFONO_MAX_OPERATOR_NAME_LENGTH] = '\0'; if (!g_at_result_iter_next_string(&iter, &n)) break; extract_mcc_mnc(n, list[num].mcc, list[num].mnc); if (!g_at_result_iter_next_number(&iter, &tech)) tech = ACCESS_TECHNOLOGY_GSM; list[num].tech = tech; if (!g_at_result_iter_next_number(&iter, &plmn)) plmn = 0; if (!g_at_result_iter_close_list(&iter)) break; num += 1; } } DBG("Got %d operators", num); { int i = 0; for (; i < num; i++) { DBG("Operator: %s, %s, %s, status: %d, %d", list[i].name, list[i].mcc, list[i].mnc, list[i].status, list[i].tech); } } cb(&error, num, list, cbd->data); g_free(list); }
static void tx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context; struct eth_dev *dev = ep->driver_data; struct net_device *net = dev->net; struct usb_request *new_req; struct usb_ep *in; int length; int retval; switch (req->status) { default: dev->net->stats.tx_errors++; VDBG(dev, "tx err %d\n", req->status); /* FALLTHROUGH */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ break; case 0: if (!req->zero) dev->net->stats.tx_bytes += req->length-1; else dev->net->stats.tx_bytes += req->length; } dev->net->stats.tx_packets++; spin_lock(&dev->req_lock); list_add_tail(&req->list, &dev->tx_reqs); if (dev->port_usb->multi_pkt_xfer) { dev->no_tx_req_used--; req->length = 0; in = dev->port_usb->in_ep; if (!list_empty(&dev->tx_reqs)) { new_req = container_of(dev->tx_reqs.next, struct usb_request, list); list_del(&new_req->list); spin_unlock(&dev->req_lock); if (new_req->length > 0) { length = new_req->length; /* NCM requires no zlp if transfer is * dwNtbInMaxSize */ if (dev->port_usb->is_fixed && length == dev->port_usb->fixed_in_len && (length % in->maxpacket) == 0) new_req->zero = 0; else new_req->zero = 1; /* use zlp framing on tx for strict CDC-Ether * conformance, though any robust network rx * path ignores extra padding. and some hardware * doesn't like to write zlps. */ if (new_req->zero && !dev->zlp && (length % in->maxpacket) == 0) { new_req->zero = 0; length++; } new_req->length = length; retval = usb_ep_queue(in, new_req, GFP_ATOMIC); switch (retval) { default: DBG(dev, "tx queue err %d\n", retval); break; case 0: spin_lock(&dev->req_lock); dev->no_tx_req_used++; spin_unlock(&dev->req_lock); net->trans_start = jiffies; } } else { spin_lock(&dev->req_lock); list_add(&new_req->list, &dev->tx_reqs); spin_unlock(&dev->req_lock); } } else {
/* Set IRQ mask */ static void set_spi_irq_mask(struct spi_mailbox *mailbox, uint8_t maskValue) { DBG("Setting mailbox mask \n"); XIo_Out32(REGISTER_ADDRESS(mailbox, SPI_IRQ_MASK_REG), maskValue); }
static void dhcp_run(void) { static bool udp_open_fail = FALSE; if(di.state == DHCP_STATE_INIT && di.action != DHCP_ACT_START) { DBG("wrong attempt"); return; } else if(GetUDPSocketStatus(di.sock) == SOCKSTAT_CLOSED) { if(udp_open_fail == TRUE && !IS_TIME_PASSED(dhcp_run_tick, DHCP_RETRY_DELAY)) goto RET_ALARM; if(UDPOpen(di.sock, DHCP_CLIENT_PORT) == RET_OK) { if(dhcp_async) sockwatch_open(di.sock, dhcp_async_cb); udp_open_fail = FALSE; dhcp_run_tick = wizpf_get_systick(); dhcp_run_cnt = 0; } else { ERR("UDPOpen fail"); udp_open_fail = TRUE; dhcp_run_tick = wizpf_get_systick(); goto RET_ALARM; } } switch(di.state) { case DHCP_STATE_INIT: if(dhcp_run_cnt==0 && !IS_TIME_PASSED(dhcp_run_tick, DHCP_OPEN_DELAY)) goto RET_ALARM; if(dhcp_run_cnt < DHCP_SEND_RETRY_COUNT) { dhcp_run_cnt++; if(send_discover() == RET_OK) { // Discover ok if(dhcp_async) { DBG("DHCP Discovery Send Async"); sockwatch_set(di.sock, WATCH_SOCK_UDP_SEND); return; // alarm set is not needed } else { DBG("DHCP Discovery Sent"); SET_STATE(DHCP_STATE_SEARCHING); dhcp_run_tick = wizpf_get_systick(); } } else { ERRA("DHCP Discovery SEND fail - (%d)times", dhcp_run_cnt); dhcp_run_tick = wizpf_get_systick(); } } else { ERRA("DHCP Discovery SEND fail - (%d)times", dhcp_run_cnt); dhcp_run_cnt = 0; UDPClose(di.sock); if(dhcp_async) sockwatch_close(di.sock); dhcp_fail(); return; // alarm set is not needed } break; case DHCP_STATE_SEARCHING: if(!IS_TIME_PASSED(dhcp_run_tick, DHCP_RETRY_DELAY)) { int8 ret = recv_handler(); if(ret == DHCP_MSG_OFFER) { SET_STATE(DHCP_STATE_SELECTING); dhcp_run_tick = wizpf_get_systick(); dhcp_run_cnt = 0; } else if(ret != RET_NOK) DBGCRTCA(TRUE, "recv wrong packet(%d)", ret); } else { ERRA("DHCP Offer RECV fail - for (%d)msec", DHCP_RETRY_DELAY); SET_STATE(DHCP_STATE_INIT); dhcp_run_tick = wizpf_get_systick(); } break; case DHCP_STATE_SELECTING: if(dhcp_run_cnt < DHCP_SEND_RETRY_COUNT) { dhcp_run_cnt++; if(send_request() == RET_OK) { // Request ok if(dhcp_async) { DBG("DHCP Request Send Async"); sockwatch_set(di.sock, WATCH_SOCK_UDP_SEND); return; // alarm set is not needed } else { DBG("DHCP Request Sent"); SET_STATE(DHCP_STATE_REQUESTING); dhcp_run_tick = wizpf_get_systick(); } } else { ERRA("DHCP Request SEND fail - (%d)times", dhcp_run_cnt); dhcp_run_tick = wizpf_get_systick(); } } else { ERRA("DHCP Request SEND fail - (%d)times", dhcp_run_cnt); dhcp_run_cnt = 0; UDPClose(di.sock); if(dhcp_async) sockwatch_close(di.sock); dhcp_fail(); return; // alarm set is not needed } break; case DHCP_STATE_REQUESTING: if(!IS_TIME_PASSED(dhcp_run_tick, DHCP_RETRY_DELAY)) { int8 ret = recv_handler(); if(ret == DHCP_MSG_ACK) { // Recv ACK LOG("DHCP Success"); SET_STATE(DHCP_STATE_IP_CHECK); dhcp_run_tick = wizpf_get_systick(); dhcp_run_cnt = 0; } else if(ret == DHCP_MSG_NAK) { // Recv NAK if(di.action == DHCP_ACT_START) { SET_STATE(DHCP_STATE_INIT); dhcp_run_tick = wizpf_get_systick(); } else { SET_STATE(DHCP_STATE_BOUND); } dhcp_run_cnt = 0; } else if(ret != RET_NOK) DBGCRTCA(TRUE, "recv wrong packet(%d)", ret); } else { ERRA("DHCP ACK RECV fail - for (%d)msec", DHCP_RETRY_DELAY); if(di.action == DHCP_ACT_START) { SET_STATE(DHCP_STATE_INIT); dhcp_run_tick = wizpf_get_systick(); } else { SET_STATE(DHCP_STATE_BOUND); } } break; case DHCP_STATE_IP_CHECK: //if(send_checker() == RET_OK) { SET_STATE(DHCP_STATE_BOUND); workinfo.DHCP = NETINFO_DHCP_STABLE; SetNetInfo(&workinfo); if(di.ip_update) di.ip_update(); LOGA("DHCP ok - New IP (%d.%d.%d.%d)", workinfo.IP[0], workinfo.IP[1], workinfo.IP[2], workinfo.IP[3]); //bound_tick = wizpf_get_systick(); UDPClose(di.sock); if(dhcp_async) sockwatch_close(di.sock); //} else { // SET_STATE(DHCP_STATE_INIT); // ERR("IP Addr conflicted - IP(%d.%d.%d.%d)", workinfo.IP[0], workinfo.IP[1], workinfo.IP[2], workinfo.IP[3]); // send_rel_dec(DHCP_MSG_DECLINE); // if(di.ip_conflict) (*di.ip_conflict)(); //} break; case DHCP_STATE_BOUND: return; // alarm set is not needed default: ERRA("wrong state(%d)", di.state); return; // alarm set is not needed } RET_ALARM: if(dhcp_alarm) alarm_set(10, dhcp_alarm_cb, 0); }
/* Function containing the "meat" of the probe mechanism - this is used by * the OpenFirmware probe as well as the standard platform device mechanism. * @param name - Name of the instance * @param pdev - Platform device structure * @param addressRange - Resource describing the hardware's I/O range * @param irq - Resource describing the hardware's IRQ */ static int spi_mailbox_probe(const char *name, struct platform_device *pdev, struct resource *addressRange, struct resource *irq) { struct spi_mailbox *mailbox; int returnValue; /* Create and populate a device structure */ mailbox = (struct spi_mailbox*) kmalloc(sizeof(struct spi_mailbox), GFP_KERNEL); if(!mailbox) return(-ENOMEM); /* Request and map the device's I/O memory region into uncacheable space */ mailbox->physicalAddress = addressRange->start; mailbox->addressRangeSize = ((addressRange->end - addressRange->start) + 1); snprintf(mailbox->name, NAME_MAX_SIZE, "%s", name); mailbox->name[NAME_MAX_SIZE - 1] = '\0'; if(request_mem_region(mailbox->physicalAddress, mailbox->addressRangeSize, mailbox->name) == NULL) { returnValue = -ENOMEM; goto free; } mailbox->virtualAddress = (void*) ioremap_nocache(mailbox->physicalAddress, mailbox->addressRangeSize); if(!mailbox->virtualAddress) { returnValue = -ENOMEM; goto release; } /* Ensure that the mailbox and its interrupts are disabled */ disable_mailbox(mailbox); XIo_Out32(REGISTER_ADDRESS(mailbox, IRQ_MASK_REG), NO_IRQS); /* Retain the IRQ and register our handler, if an IRQ resource was supplied. */ if(irq != NULL) { mailbox->irq = irq->start; returnValue = request_irq(mailbox->irq, &biamp_spi_mailbox_interrupt, IRQF_DISABLED, mailbox->name, mailbox); if (returnValue) { printk(KERN_ERR "%s : Could not allocate Biamp SPI Mailbox interrupt (%d).\n", mailbox->name, mailbox->irq); goto unmap; } } else mailbox->irq = NO_IRQ_SUPPLIED; /* Announce the device */ printk(KERN_INFO "%s: Found Biamp mailbox at 0x%08X, ", mailbox->name, (uint32_t)mailbox->physicalAddress); if(mailbox->irq == NO_IRQ_SUPPLIED) { printk("polled interlocks\n"); } else { printk("IRQ %d\n", mailbox->irq); } /* Initialize other resources */ spin_lock_init(&mailbox->mutex); mailbox->opened = false; /* Provide navigation between the device structures */ platform_set_drvdata(pdev, mailbox); mailbox->pdev = pdev; /* Reset the state of the mailbox */ reset_mailbox(mailbox); /* Add as a character device to make the instance available for use */ cdev_init(&mailbox->cdev, &spi_mailbox_fops); mailbox->cdev.owner = THIS_MODULE; kobject_set_name(&mailbox->cdev.kobj, "%s%d", mailbox->name, mailbox->instanceNumber); mailbox->instanceNumber = instanceCount++; cdev_add(&mailbox->cdev, MKDEV(DRIVER_MAJOR, mailbox->instanceNumber), 1); /* Initialize the waitqueue used for synchronized writes */ init_waitqueue_head(&(mailbox->messageReadQueue)); /* Now that the device is configured, enable interrupts if they are to be used */ if(mailbox->irq != NO_IRQ_SUPPLIED) { XIo_Out32(REGISTER_ADDRESS(mailbox, IRQ_FLAGS_REG), ALL_IRQS); XIo_Out32(REGISTER_ADDRESS(mailbox, IRQ_MASK_REG), IRQ_S2H_MSG_RX); } DBG("Mailbox initialized\n"); /* Return success */ return(0); unmap: iounmap(mailbox->virtualAddress); release: release_mem_region(mailbox->physicalAddress, mailbox->addressRangeSize); free: kfree(mailbox); return(returnValue); }
static int etna_screen_get_shader_param( struct pipe_screen *screen, unsigned shader, enum pipe_shader_cap param ) { struct etna_screen *priv = etna_screen(screen); switch(shader) { case PIPE_SHADER_FRAGMENT: case PIPE_SHADER_VERTEX: break; case PIPE_SHADER_COMPUTE: case PIPE_SHADER_GEOMETRY: /* maybe we could emulate.. */ return 0; default: DBG("unknown shader type %d", shader); return 0; } switch (param) { case PIPE_SHADER_CAP_MAX_INSTRUCTIONS: case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS: case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS: case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS: return ETNA_MAX_TOKENS; case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH: return ETNA_MAX_DEPTH; /* XXX */ case PIPE_SHADER_CAP_MAX_INPUTS: return 16; /* XXX this amount is reserved */ case PIPE_SHADER_CAP_MAX_TEMPS: return 64; /* Max native temporaries. */ case PIPE_SHADER_CAP_MAX_ADDRS: return 1; /* Max native address registers */ case PIPE_SHADER_CAP_MAX_CONSTS: /* Absolute maximum on ideal hardware is 256 (as that's how much register space is reserved); * immediates are included in here, so actual space available for constants will always be less. * Also the amount of registers really available depends on the hw. * XXX see also: viv_specs.num_constants, if this is 0 we need to come up with some default value. */ return 256; case PIPE_SHADER_CAP_MAX_CONST_BUFFERS: return 1; case PIPE_SHADER_CAP_MAX_PREDS: return 0; /* nothing uses this */ case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED: return 1; case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR: case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR: case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR: case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR: return 1; case PIPE_SHADER_CAP_SUBROUTINES: return 0; case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED: return VIV_FEATURE(priv->dev, chipMinorFeatures0, HAS_SQRT_TRIG); case PIPE_SHADER_CAP_TGSI_POW_SUPPORTED: return false; case PIPE_SHADER_CAP_TGSI_LRP_SUPPORTED: return false; case PIPE_SHADER_CAP_INTEGERS: /* XXX supported on gc2000 but not yet implemented */ return 0; case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS: return shader==PIPE_SHADER_FRAGMENT ? priv->specs.fragment_sampler_count : priv->specs.vertex_sampler_count; case PIPE_SHADER_CAP_PREFERRED_IR: return PIPE_SHADER_IR_TGSI; case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE: return 4096; default: DBG("unknown shader param %d", param); return 0; } return 0; }
static int tun_set_iff(struct file *file, struct ifreq *ifr) { struct tun_struct *tun; struct net_device *dev; int err; dev = __dev_get_by_name(ifr->ifr_name); if (dev) { /* Device exist */ tun = dev->priv; if (dev->init != tun_net_init || tun->attached) return -EBUSY; /* Check permissions */ if (tun->owner != -1) if (current->euid != tun->owner && !capable(CAP_NET_ADMIN)) return -EPERM; } else { char *name; /* Allocate new device */ if (!(tun = kmalloc(sizeof(struct tun_struct), GFP_KERNEL)) ) return -ENOMEM; memset(tun, 0, sizeof(struct tun_struct)); skb_queue_head_init(&tun->readq); init_waitqueue_head(&tun->read_wait); tun->owner = -1; tun->dev.init = tun_net_init; tun->dev.priv = tun; err = -EINVAL; /* Set dev type */ if (ifr->ifr_flags & IFF_TUN) { /* TUN device */ tun->flags |= TUN_TUN_DEV; name = "tun%d"; } else if (ifr->ifr_flags & IFF_TAP) { /* TAP device */ tun->flags |= TUN_TAP_DEV; name = "tap%d"; } else goto failed; if (*ifr->ifr_name) name = ifr->ifr_name; if ((err = dev_alloc_name(&tun->dev, name)) < 0) goto failed; if ((err = register_netdevice(&tun->dev))) goto failed; MOD_INC_USE_COUNT; tun->name = tun->dev.name; } DBG(KERN_INFO "%s: tun_set_iff\n", tun->name); if (ifr->ifr_flags & IFF_NO_PI) tun->flags |= TUN_NO_PI; if (ifr->ifr_flags & IFF_ONE_QUEUE) tun->flags |= TUN_ONE_QUEUE; file->private_data = tun; tun->attached = 1; strcpy(ifr->ifr_name, tun->name); return 0; failed: kfree(tun); return err; }
static int etna_screen_get_param( struct pipe_screen *screen, enum pipe_cap param ) { struct etna_screen *priv = etna_screen(screen); switch (param) { /* Supported features (boolean caps). */ case PIPE_CAP_TWO_SIDED_STENCIL: case PIPE_CAP_ANISOTROPIC_FILTER: case PIPE_CAP_POINT_SPRITE: case PIPE_CAP_TEXTURE_SHADOW_MAP: case PIPE_CAP_BLEND_EQUATION_SEPARATE: case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT: /* FS coordinates start in upper left */ case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER: /* Pixel center on 0.5 */ case PIPE_CAP_SM3: case PIPE_CAP_SEAMLESS_CUBE_MAP: /* ??? */ case PIPE_CAP_TEXTURE_BARRIER: case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION: case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY: case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY: case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY: case PIPE_CAP_USER_CONSTANT_BUFFERS: /* constant buffers can be user buffers; they end up in command stream anyway */ case PIPE_CAP_TGSI_TEXCOORD: /* explicit TEXCOORD and POINTCOORD semantics */ return 1; /* Memory */ case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT: return 256; case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT: return 4; /* XXX could easily be supported */ case PIPE_CAP_GLSL_FEATURE_LEVEL: return 120; case PIPE_CAP_NPOT_TEXTURES: /* MUST be supported with GLES 2.0: what the capability specifies is filtering support */ return true; /* VIV_FEATURE(priv->dev, chipMinorFeatures1, NON_POWER_OF_TWO); */ case PIPE_CAP_MAX_VERTEX_BUFFERS: return priv->specs.stream_count; case PIPE_CAP_ENDIANNESS: return PIPE_ENDIAN_LITTLE; /* on most Viv hw this is configurable (feature ENDIANNESS_CONFIG) */ /* Unsupported features. */ case PIPE_CAP_TEXTURE_SWIZZLE: /* XXX supported on gc2000 */ case PIPE_CAP_COMPUTE: /* XXX supported on gc2000 */ case PIPE_CAP_MIXED_COLORBUFFER_FORMATS: /* only one colorbuffer supported, so mixing makes no sense */ case PIPE_CAP_PRIMITIVE_RESTART: /* primitive restart index AFAIK not supported */ case PIPE_CAP_VERTEX_COLOR_UNCLAMPED: /* no floating point buffer support */ case PIPE_CAP_CONDITIONAL_RENDER: /* no occlusion queries */ case PIPE_CAP_TGSI_INSTANCEID: /* no idea, really */ case PIPE_CAP_START_INSTANCE: /* instancing not supported AFAIK */ case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR: /* instancing not supported AFAIK */ case PIPE_CAP_SHADER_STENCIL_EXPORT: /* Fragment shader cannot export stencil value */ case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS: /* no dual-source supported */ case PIPE_CAP_TEXTURE_MULTISAMPLE: /* no texture multisample */ case PIPE_CAP_TEXTURE_MIRROR_CLAMP: /* only mirrored repeat */ case PIPE_CAP_INDEP_BLEND_ENABLE: case PIPE_CAP_INDEP_BLEND_FUNC: case PIPE_CAP_DEPTH_CLIP_DISABLE: case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE: case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT: case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER: case PIPE_CAP_SCALED_RESOLVE: /* Should be possible to support */ case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS: /* Don't skip strict max uniform limit check */ case PIPE_CAP_FRAGMENT_COLOR_CLAMPED: case PIPE_CAP_VERTEX_COLOR_CLAMPED: case PIPE_CAP_USER_VERTEX_BUFFERS: case PIPE_CAP_USER_INDEX_BUFFERS: case PIPE_CAP_TEXTURE_BUFFER_OBJECTS: return 0; /* Stream output. */ case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS: case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME: case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS: case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS: return 0; /* Texturing. */ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS: case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS: return 14; case PIPE_CAP_MAX_TEXTURE_3D_LEVELS: /* 3D textures not supported */ return 0; case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS: return 0; case PIPE_CAP_MAX_COMBINED_SAMPLERS: return priv->specs.fragment_sampler_count + priv->specs.vertex_sampler_count; case PIPE_CAP_CUBE_MAP_ARRAY: return 0; case PIPE_CAP_MIN_TEXEL_OFFSET: return -8; case PIPE_CAP_MAX_TEXEL_OFFSET: return 7; case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK: return 0; case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE: return 65536; /* Render targets. */ case PIPE_CAP_MAX_RENDER_TARGETS: return 1; /* Viewports and scissors. */ case PIPE_CAP_MAX_VIEWPORTS: return 1; /* Timer queries. */ case PIPE_CAP_QUERY_TIME_ELAPSED: case PIPE_CAP_OCCLUSION_QUERY: case PIPE_CAP_QUERY_TIMESTAMP: return 0; case PIPE_CAP_QUERY_PIPELINE_STATISTICS: return 0; /* Preferences */ case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER: return 0; default: DBG("unknown param %d", param); return 0; } }
static int tun_chr_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { struct tun_struct *tun = (struct tun_struct *)file->private_data; if (cmd == TUNSETIFF && !tun) { struct ifreq ifr; int err; if (copy_from_user(&ifr, (void *)arg, sizeof(ifr))) return -EFAULT; ifr.ifr_name[IFNAMSIZ-1] = '\0'; rtnl_lock(); err = tun_set_iff(file, &ifr); rtnl_unlock(); if (err) return err; copy_to_user((void *)arg, &ifr, sizeof(ifr)); return 0; } if (!tun) return -EBADFD; DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->name, cmd); switch (cmd) { case TUNSETNOCSUM: /* Disable/Enable checksum */ if (arg) tun->flags |= TUN_NOCHECKSUM; else tun->flags &= ~TUN_NOCHECKSUM; DBG(KERN_INFO "%s: checksum %s\n", tun->name, arg ? "disabled" : "enabled"); break; case TUNSETPERSIST: /* Disable/Enable persist mode */ if (arg) tun->flags |= TUN_PERSIST; else tun->flags &= ~TUN_PERSIST; DBG(KERN_INFO "%s: persist %s\n", tun->name, arg ? "disabled" : "enabled"); break; case TUNSETOWNER: /* Set owner of the device */ tun->owner = (uid_t) arg; DBG(KERN_INFO "%s: owner set to %d\n", tun->owner); break; #ifdef TUN_DEBUG case TUNSETDEBUG: tun->debug = arg; break; #endif default: return -EINVAL; }; return 0; }
static int l2tp_notify(DBusMessage *msg, struct connman_provider *provider) { DBusMessageIter iter, dict; const char *reason, *key, *value; char *addressv4 = NULL, *netmask = NULL, *gateway = NULL; char *ifname = NULL, *nameservers = NULL; struct connman_ipaddress *ipaddress = NULL; dbus_message_iter_init(msg, &iter); dbus_message_iter_get_basic(&iter, &reason); dbus_message_iter_next(&iter); if (!provider) { connman_error("No provider found"); return VPN_STATE_FAILURE; } if (strcmp(reason, "auth failed") == 0) return VPN_STATE_AUTH_FAILURE; if (strcmp(reason, "connect")) return VPN_STATE_DISCONNECT; dbus_message_iter_recurse(&iter, &dict); while (dbus_message_iter_get_arg_type(&dict) == DBUS_TYPE_DICT_ENTRY) { DBusMessageIter entry; dbus_message_iter_recurse(&dict, &entry); dbus_message_iter_get_basic(&entry, &key); dbus_message_iter_next(&entry); dbus_message_iter_get_basic(&entry, &value); DBG("%s = %s", key, value); if (!strcmp(key, "INTERNAL_IP4_ADDRESS")) { connman_provider_set_string(provider, "Address", value); addressv4 = g_strdup(value); } if (!strcmp(key, "INTERNAL_IP4_NETMASK")) { connman_provider_set_string(provider, "Netmask", value); netmask = g_strdup(value); } if (!strcmp(key, "INTERNAL_IP4_DNS")) { connman_provider_set_string(provider, "DNS", value); nameservers = g_strdup(value); } if (!strcmp(key, "INTERNAL_IFNAME")) ifname = g_strdup(value); dbus_message_iter_next(&dict); } if (vpn_set_ifname(provider, ifname) < 0) { g_free(ifname); g_free(addressv4); g_free(netmask); g_free(nameservers); return VPN_STATE_FAILURE; } if (addressv4 != NULL) ipaddress = connman_ipaddress_alloc(AF_INET); g_free(ifname); if (ipaddress == NULL) { connman_error("No IP address for provider"); g_free(addressv4); g_free(netmask); g_free(nameservers); return VPN_STATE_FAILURE; } value = connman_provider_get_string(provider, "HostIP"); if (value != NULL) { connman_provider_set_string(provider, "Gateway", value); gateway = g_strdup(value); } if (addressv4 != NULL) connman_ipaddress_set_ipv4(ipaddress, addressv4, netmask, gateway); connman_provider_set_ipaddress(provider, ipaddress); connman_provider_set_nameservers(provider, nameservers); g_free(addressv4); g_free(netmask); g_free(gateway); g_free(nameservers); connman_ipaddress_free(ipaddress); return VPN_STATE_CONNECT; }
void CBouquetManager::parseBouquetsXml(const char *fname, bool bUser) { xmlDocPtr parser; parser = parseXmlFile(fname); if (parser == NULL) return; xmlNodePtr root = xmlDocGetRootElement(parser); xmlNodePtr search = root->xmlChildrenNode; xmlNodePtr channel_node; if (search) { t_original_network_id original_network_id; t_service_id service_id; t_transport_stream_id transport_stream_id; int16_t satellitePosition; freq_id_t freq = 0; INFO("reading bouquets from %s", fname); while ((search = xmlGetNextOccurence(search, "Bouquet")) != NULL) { const char * name = xmlGetAttribute(search, "name"); if(name == NULL) name = const_cast<char*>("Unknown"); CZapitBouquet* newBouquet = addBouquet(name, bUser); // per default in contructor: newBouquet->BqID = 0; //set to default, override if bqID exists GET_ATTR(search, "bqID", SCANF_BOUQUET_ID_TYPE, newBouquet->BqID); const char* hidden = xmlGetAttribute(search, "hidden"); const char* locked = xmlGetAttribute(search, "locked"); const char* scanepg = xmlGetAttribute(search, "epg"); newBouquet->bHidden = hidden ? (strcmp(hidden, "1") == 0) : false; newBouquet->bLocked = locked ? (strcmp(locked, "1") == 0) : false; newBouquet->bFav = (strcmp(name, "favorites") == 0); newBouquet->bScanEpg = scanepg ? (strcmp(scanepg, "1") == 0) : false; channel_node = search->xmlChildrenNode; while ((channel_node = xmlGetNextOccurence(channel_node, "S")) != NULL) { std::string name2; name = xmlGetAttribute(channel_node, "n"); if (name) name2 = name; std::string uname; const char *uName = xmlGetAttribute(channel_node, "un"); if (uName) uname = uName; const char *url = xmlGetAttribute(channel_node, "u"); GET_ATTR(channel_node, "i", SCANF_SERVICE_ID_TYPE, service_id); GET_ATTR(channel_node, "on", SCANF_ORIGINAL_NETWORK_ID_TYPE, original_network_id); GET_ATTR(channel_node, "s", SCANF_SATELLITE_POSITION_TYPE, satellitePosition); GET_ATTR(channel_node, "t", SCANF_TRANSPORT_STREAM_ID_TYPE, transport_stream_id); GET_ATTR(channel_node, "frq", SCANF_SATELLITE_POSITION_TYPE, freq); bool clock = xmlGetNumericAttribute(channel_node, "l", 10); if(freq > 20000) freq = freq/1000; CZapitChannel* chan; t_channel_id chid = create_channel_id64(service_id, original_network_id, transport_stream_id, satellitePosition, freq, url); /* FIXME to load old cable settings with new cable "positions" started from 0xF00 */ if(!url && (bUser || CFEManager::getInstance()->cableOnly())) chan = CServiceManager::getInstance()->FindChannelFuzzy(chid, satellitePosition, freq); else chan = CServiceManager::getInstance()->FindChannel(chid); if (chan != NULL) { DBG("%04x %04x %04x %s\n", transport_stream_id, original_network_id, service_id, xmlGetAttribute(channel_node, "n")); if(bUser && !(uname.empty())) chan->setUserName(uname); if(!bUser) chan->pname = (char *) newBouquet->Name.c_str(); chan->bLocked = clock; newBouquet->addService(chan); } else if (bUser) { if (url) { chid = create_channel_id64(0, 0, 0, 0, 0, url); chan = new CZapitChannel(name2.c_str(), chid, url, NULL); } else chan = new CZapitChannel(name2, CREATE_CHANNEL_ID64, 1 /*service_type*/, satellitePosition, freq); CServiceManager::getInstance()->AddChannel(chan); chan->flags = CZapitChannel::NOT_FOUND; chan->bLocked = clock; if(!(uname.empty())) chan->setUserName(uname); newBouquet->addService(chan); CServiceManager::getInstance()->SetServicesChanged(false); } channel_node = channel_node->xmlNextNode; if(!bUser) { /* set satellite position for provider bouquets. reset position to 0, if position not match - means mixed bouquet */ if (newBouquet->satellitePosition == INVALID_SAT_POSITION) newBouquet->satellitePosition = satellitePosition; else if (newBouquet->satellitePosition != satellitePosition) newBouquet->satellitePosition = 0; } } if(!bUser) newBouquet->sortBouquet(); search = search->xmlNextNode; } INFO("total: %d bouquets", (int)Bouquets.size()); } xmlFreeDoc(parser); }
int MakeDir( char * fullpath ) { char subdir[4096]; char *sep, *xpath=(char *)fullpath; int rc = 0; sep = (char *) fullpath; sep = strchr(sep, '/'); while( sep ) { BPTR dlock; int len; len = sep - xpath; CopyMem( xpath, subdir, len); subdir[len] = 0; if((dlock = CreateDir((STRPTR) subdir ))) UnLock( dlock ); else { if((rc = IoErr()) == ERROR_OBJECT_EXISTS) { dlock = Lock((STRPTR) subdir, SHARED_LOCK ); if( !dlock ) { /* this can't happend!, I think.. */ rc = -1; } else { struct FileInfoBlock fib; if(Examine(dlock,&fib) == DOSFALSE) { rc = IoErr(); } else { if(fib.fib_DirEntryType > 0) rc = 0; #ifdef DEBUG if((rc != 0) || fib.fib_DirEntryType == ST_SOFTLINK) { DBG("\aDirectory Name exists and %spoint to a file !!!!\n", ((fib.fib_DirEntryType == ST_SOFTLINK) ? "MAY ":"")); } #endif } UnLock( dlock ); } } if(rc != 0) break; } sep = strchr(sep+1, '/'); } return(rc); }
static int rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) { struct sk_buff *skb; int retval = -ENOMEM; size_t size = 0; struct usb_ep *out; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) out = dev->port_usb->out_ep; else out = NULL; spin_unlock_irqrestore(&dev->lock, flags); if (!out) return -ENOTCONN; /* Padding up to RX_EXTRA handles minor disagreements with host. * Normally we use the USB "terminate on short read" convention; * so allow up to (N*maxpacket), since that memory is normally * already allocated. Some hardware doesn't deal well with short * reads (e.g. DMA must be N*maxpacket), so for now don't trim a * byte off the end (to force hardware errors on overflow). * * RNDIS uses internal framing, and explicitly allows senders to * pad to end-of-packet. That's potentially nice for speed, but * means receivers can't recover lost synch on their own (because * new packets don't only start after a short RX). */ size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; size += dev->port_usb->header_len; size += out->maxpacket - 1; size -= size % out->maxpacket; if (dev->port_usb->is_fixed) size = max_t(size_t, size, dev->port_usb->fixed_out_len); skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags); if (skb == NULL) { DBG(dev, "no rx skb\n"); goto enomem; } /* Some platforms perform better when IP packets are aligned, * but on at least one, checksumming fails otherwise. Note: * RNDIS headers involve variable numbers of LE32 values. */ skb_reserve(skb, NET_IP_ALIGN); req->buf = skb->data; req->length = size; req->complete = rx_complete; req->context = skb; retval = usb_ep_queue(out, req, gfp_flags); if (retval == -ENOMEM) enomem: defer_kevent(dev, WORK_RX_MEMORY); if (retval) { DBG(dev, "rx submit --> %d\n", retval); if (skb) dev_kfree_skb_any(skb); } return retval; }
intptr_t RenderServer::main() { RenderThreadsSet threads; while(1) { SocketStream *stream = m_listenSock->accept(); if (!stream) { fprintf(stderr,"Error accepting connection, aborting\n"); break; } unsigned int clientFlags; if (!stream->readFully(&clientFlags, sizeof(unsigned int))) { fprintf(stderr,"Error reading clientFlags\n"); delete stream; continue; } DBG("RenderServer: Got new stream!\n"); // check if we have been requested to exit while waiting on accept if ((clientFlags & IOSTREAM_CLIENT_EXIT_SERVER) != 0) { m_exiting = true; break; } RenderThread *rt = RenderThread::create(stream, &m_lock); if (!rt) { fprintf(stderr,"Failed to create RenderThread\n"); delete stream; stream = NULL; } else if (!rt->start()) { fprintf(stderr,"Failed to start RenderThread\n"); delete rt; rt = NULL; } // // remove from the threads list threads which are // no longer running // for (RenderThreadsSet::iterator n,t = threads.begin(); t != threads.end(); t = n) { // first find next iterator n = t; n++; // delete and erase the current iterator // if thread is no longer running if ((*t)->isFinished()) { delete (*t); threads.erase(t); } } // if the thread has been created and started, insert it to the list if (rt) { threads.insert(rt); DBG("Started new RenderThread\n"); } } // // Wait for all threads to finish // for (RenderThreadsSet::iterator t = threads.begin(); t != threads.end(); t++) { (*t)->wait(NULL); delete (*t); } threads.clear(); // // de-initialize the FrameBuffer object // FrameBuffer::finalize(); return 0; }
static void rx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context; struct eth_dev *dev = ep->driver_data; int status = req->status; bool queue = 0; switch (status) { /* normal completion */ case 0: skb_put(skb, req->actual); if (dev->unwrap) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { status = dev->unwrap(dev->port_usb, skb, &dev->rx_frames); if (status == -EINVAL) dev->net->stats.rx_errors++; else if (status == -EOVERFLOW) dev->net->stats.rx_over_errors++; } else { dev_kfree_skb_any(skb); status = -ENOTCONN; } spin_unlock_irqrestore(&dev->lock, flags); } else { skb_queue_tail(&dev->rx_frames, skb); } if (!status) queue = 1; break; /* software-driven interface shutdown */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; /* for hardware automagic (such as pxa) */ case -ECONNABORTED: /* endpoint reset */ DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: dev_kfree_skb_any(skb); goto clean; /* data overrun */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; /* FALLTHROUGH */ default: queue = 1; dev_kfree_skb_any(skb); dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } clean: spin_lock(&dev->req_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_lock); if (queue) queue_work(uether_wq, &dev->rx_work); }
static void rx_iso_complete(struct urb *urb) { iso_urb_struct *context_iso_urb = (iso_urb_struct *) urb->context; usb_fifo *fifo = context_iso_urb->owner_fifo; hfcusb_data *hfc = fifo->hfc; int k, len, errcode, offset, num_isoc_packets, fifon, maxlen, status; unsigned int iso_status; __u8 *buf; static __u8 eof[8]; fifon = fifo->fifonum; status = urb->status; if (urb->status == -EOVERFLOW) { DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-USB: ignoring USB DATAOVERRUN fifo(%i)", fifon); status = 0; } /* ISO transfer only partially completed, look at individual frame status for details */ if (status == -EXDEV) { DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-S USB: rx_iso_complete with -EXDEV " "urb->status %d, fifonum %d\n", status, fifon); status = 0; } if (fifo->active && !status) { num_isoc_packets = iso_packets[fifon]; maxlen = fifo->usb_packet_maxlen; for (k = 0; k < num_isoc_packets; ++k) { len = urb->iso_frame_desc[k].actual_length; offset = urb->iso_frame_desc[k].offset; buf = context_iso_urb->buffer + offset; iso_status = urb->iso_frame_desc[k].status; if (iso_status && !hfc->disc_flag) DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-S USB: rx_iso_complete " "ISO packet %i, status: %i\n", k, iso_status); if (fifon == HFCUSB_D_RX) { DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-S USB: ISO-D-RX lst_urblen:%2d " "act_urblen:%2d max-urblen:%2d EOF:0x%0x", fifo->last_urblen, len, maxlen, eof[5]); DBG_PACKET(HFCUSB_DBG_VERBOSE_USB, buf, len); } if (fifo->last_urblen != maxlen) { /* the threshold mask is in the 2nd status byte */ hfc->threshold_mask = buf[1]; /* care for L1 state only for D-Channel to avoid overlapped iso completions */ if (fifon == HFCUSB_D_RX) { /* the S0 state is in the upper half of the 1st status byte */ s0_state_handler(hfc, buf[0] >> 4); } eof[fifon] = buf[0] & 1; if (len > 2) collect_rx_frame(fifo, buf + 2, len - 2, (len < maxlen) ? eof[fifon] : 0); } else {
static void musb_id_pin_work(struct work_struct *data) { u8 devctl = 0; unsigned long flags; spin_lock_irqsave(&mtk_musb->lock, flags); musb_generic_disable(mtk_musb); spin_unlock_irqrestore(&mtk_musb->lock, flags); down(&mtk_musb->musb_lock); DBG(0, "work start, is_host=%d, boot mode(%d)\n", mtk_musb->is_host, get_boot_mode()); #ifdef CONFIG_MTK_KERNEL_POWER_OFF_CHARGING if (get_boot_mode() == KERNEL_POWER_OFF_CHARGING_BOOT || get_boot_mode() == LOW_POWER_OFF_CHARGING_BOOT) { DBG(0, "do nothing due to in power off charging\n"); goto out; } #endif if (mtk_musb->in_ipo_off) { DBG(0, "do nothing due to in_ipo_off\n"); goto out; } mtk_musb->is_host = musb_is_host(); DBG(0, "musb is as %s\n", mtk_musb->is_host?"host":"device"); switch_set_state((struct switch_dev *)&otg_state, mtk_musb->is_host); if (mtk_musb->is_host) { /* setup fifo for host mode */ ep_config_from_table_for_host(mtk_musb); wake_lock(&mtk_musb->usb_lock); musb_platform_set_vbus(mtk_musb, 1); /* for no VBUS sensing IP*/ #if 1 /* wait VBUS ready */ msleep(100); /* clear session*/ devctl = musb_readb(mtk_musb->mregs, MUSB_DEVCTL); musb_writeb(mtk_musb->mregs, MUSB_DEVCTL, (devctl&(~MUSB_DEVCTL_SESSION))); /* USB MAC OFF*/ /* VBUSVALID=0, AVALID=0, BVALID=0, SESSEND=1, IDDIG=X, IDPULLUP=1 */ USBPHY_SET8(0x6c, 0x11); USBPHY_CLR8(0x6c, 0x2e); USBPHY_SET8(0x6d, 0x3f); DBG(0, "force PHY to idle, 0x6d=%x, 0x6c=%x\n", USBPHY_READ8(0x6d), USBPHY_READ8(0x6c)); /* wait */ mdelay(5); /* restart session */ devctl = musb_readb(mtk_musb->mregs, MUSB_DEVCTL); musb_writeb(mtk_musb->mregs, MUSB_DEVCTL, (devctl | MUSB_DEVCTL_SESSION)); /* USB MAC ONand Host Mode*/ /* VBUSVALID=1, AVALID=1, BVALID=1, SESSEND=0, IDDIG=0, IDPULLUP=1 */ USBPHY_CLR8(0x6c, 0x10); USBPHY_SET8(0x6c, 0x2d); USBPHY_SET8(0x6d, 0x3f); DBG(0, "force PHY to host mode, 0x6d=%x, 0x6c=%x\n", USBPHY_READ8(0x6d), USBPHY_READ8(0x6c)); #endif musb_start(mtk_musb); MUSB_HST_MODE(mtk_musb); switch_int_to_device(mtk_musb); } else { DBG(0, "devctl is %x\n", musb_readb(mtk_musb->mregs, MUSB_DEVCTL)); musb_writeb(mtk_musb->mregs, MUSB_DEVCTL, 0); if (wake_lock_active(&mtk_musb->usb_lock)) wake_unlock(&mtk_musb->usb_lock); musb_platform_set_vbus(mtk_musb, 0); /* for no VBUS sensing IP */ #if 1 /* USB MAC OFF*/ /* VBUSVALID=0, AVALID=0, BVALID=0, SESSEND=1, IDDIG=X, IDPULLUP=1 */ USBPHY_SET8(0x6c, 0x11); USBPHY_CLR8(0x6c, 0x2e); USBPHY_SET8(0x6d, 0x3f); DBG(0, "force PHY to idle, 0x6d=%x, 0x6c=%x\n", USBPHY_READ8(0x6d), USBPHY_READ8(0x6c)); #endif musb_stop(mtk_musb); mtk_musb->xceiv->state = OTG_STATE_B_IDLE; MUSB_DEV_MODE(mtk_musb); switch_int_to_host(mtk_musb); } out: DBG(0, "work end, is_host=%d\n", mtk_musb->is_host); up(&mtk_musb->musb_lock); }