void hw_sp_dmawrite(void) { dword from,to,cnt; to =WSP[0]; from=WSP[1]; cnt =WSP[2]; if(!cart.first_rcp) { print("note: first rcp access\n"); cart.first_rcp=1; } LOGH("hw-sp: dma %08X->%08X,%-4X ",from,to,cnt); if((to&0xffff)==0x0FC0) { // detected load if OSTask structure sptaskpos=from; mem_readrangeraw(from,sizeof(sptask),(char *)&sptask); sploaded=1; LOGH(" Taskload (type=%i, flags=%i) ",sptask.type,sptask.flags); } LOGH("\n"); WSP[0]=0; WSP[1]=0; WSP[2]=0; }
void hw_sp_yield(void) { if(!spgfxexecuting) { LOGH("\n"); warning("hw: spYield with no executing task"); // hack // os_event(OS_EVENT_SP); } else { // this is a YIELD request if(spgfxexecuting) { spstatus|=(1<<8); // still executing, raise signal 1 } // sp task done os_event(OS_EVENT_SP); } // remove signal 0 spstatus&=~(1<<7); // mark that task has halted spstatus|=1; RSP[4]=spstatus; }
/** * rmnet_config_notify_cb() - Callback for netdevice notifier chain * @nb: Notifier block data * @event: Netdevice notifier event ID * @data: Contains a net device for which we are getting notified * * Return: * - result of NOTIFY_DONE() */ int rmnet_config_notify_cb(struct notifier_block *nb, unsigned long event, void *data) { struct net_device *dev = (struct net_device *)data; if (!dev) BUG(); LOGL("(..., %lu, %s)", event, dev->name); switch (event) { case NETDEV_UNREGISTER_FINAL: case NETDEV_UNREGISTER: trace_rmnet_unregister_cb_entry(dev); if (_rmnet_is_physical_endpoint_associated(dev)) { LOGH("Kernel is trying to unregister %s", dev->name); rmnet_force_unassociate_device(dev); } trace_rmnet_unregister_cb_exit(dev); break; default: trace_rmnet_unregister_cb_unhandled(dev); LOGD("Unhandeled event [%lu]", event); break; } return NOTIFY_DONE; }
int mm_app_take_picture(mm_camera_test_obj_t *test_obj, uint8_t is_burst_mode) { LOGH("\nEnter %s!!\n"); int rc = MM_CAMERA_OK; uint8_t num_snapshot = 1; int num_rcvd_snapshot = 0; if (is_burst_mode) num_snapshot = 6; //stop preview before starting capture. rc = mm_app_stop_preview(test_obj); if (rc != MM_CAMERA_OK) { LOGE(" stop preview failed before capture!!, err=%d\n", rc); return rc; } rc = mm_app_start_capture(test_obj, num_snapshot); if (rc != MM_CAMERA_OK) { LOGE(" mm_app_start_capture(), err=%d\n", rc); return rc; } while (num_rcvd_snapshot < num_snapshot) { LOGH("\nWaiting mm_camera_app_wait !!\n"); mm_camera_app_wait(); num_rcvd_snapshot++; } rc = mm_app_stop_capture(test_obj); if (rc != MM_CAMERA_OK) { LOGE(" mm_app_stop_capture(), err=%d\n", rc); return rc; } //start preview after capture. rc = mm_app_start_preview(test_obj); if (rc != MM_CAMERA_OK) { LOGE(" start preview failed after capture!!, err=%d\n",rc); } return rc; }
static int rmnet_vnd_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct rmnet_vnd_private_s *dev_conf; int rc; rc = 0; dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev); rc = _rmnet_vnd_do_qos_ioctl(dev, ifr, cmd); if (rc != -EINVAL) return rc; rc = 0; switch (cmd) { case RMNET_IOCTL_OPEN: LOGM("%s(): RMNET_IOCTL_OPEN on %s (ignored)\n", __func__, dev->name); break; case RMNET_IOCTL_CLOSE: LOGM("%s(): RMNET_IOCTL_CLOSE on %s (ignored)\n", __func__, dev->name); break; case RMNET_IOCTL_SET_LLP_ETHERNET: LOGM("%s(): RMNET_IOCTL_SET_LLP_ETHERNET on %s (no support)\n", __func__, dev->name); rc = -EINVAL; break; case RMNET_IOCTL_SET_LLP_IP: LOGM("%s(): RMNET_IOCTL_SET_LLP_IP on %s (ignored)\n", __func__, dev->name); break; case RMNET_IOCTL_GET_LLP: LOGM("%s(): RMNET_IOCTL_GET_LLP on %s\n", __func__, dev->name); ifr->ifr_ifru.ifru_data = (void *)(RMNET_MODE_LLP_IP); break; default: LOGH("%s(): Unkown IOCTL 0x%08X\n", __func__, cmd); rc = -EINVAL; } return rc; }
/** * rmnet_config_netlink_msg_handler() - Netlink message handler callback * @skb: Packet containing netlink messages * * Standard kernel-expected format for a netlink message handler. Processes SKBs * which contain RmNet data specific netlink messages. */ void rmnet_config_netlink_msg_handler(struct sk_buff *skb) { struct nlmsghdr *nlmsg_header, *resp_nlmsg; struct rmnet_nl_msg_s *rmnet_header, *resp_rmnet; int return_pid, response_data_length; struct sk_buff *skb_response; response_data_length = 0; nlmsg_header = (struct nlmsghdr *) skb->data; rmnet_header = (struct rmnet_nl_msg_s *) nlmsg_data(nlmsg_header); LOGL("%s(): Netlink message pid=%d, seq=%d, length=%d, rmnet_type=%d\n", __func__, nlmsg_header->nlmsg_pid, nlmsg_header->nlmsg_seq, nlmsg_header->nlmsg_len, rmnet_header->message_type); return_pid = nlmsg_header->nlmsg_pid; skb_response = nlmsg_new(sizeof(struct nlmsghdr) + sizeof(struct rmnet_nl_msg_s), GFP_KERNEL); if (!skb_response) { LOGH("%s(): Failed to allocate response buffer\n", __func__); return; } resp_nlmsg = nlmsg_put(skb_response, 0, nlmsg_header->nlmsg_seq, NLMSG_DONE, sizeof(struct rmnet_nl_msg_s), 0); resp_rmnet = nlmsg_data(resp_nlmsg); if (!resp_rmnet) BUG(); resp_rmnet->message_type = rmnet_header->message_type; rtnl_lock(); switch (rmnet_header->message_type) { case RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE: _rmnet_netlink_associate_network_device (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE: _rmnet_netlink_unassociate_network_device (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT: _rmnet_netlink_set_link_egress_data_format (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT: _rmnet_netlink_get_link_egress_data_format (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT: _rmnet_netlink_set_link_ingress_data_format (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT: _rmnet_netlink_get_link_ingress_data_format (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_SET_LOGICAL_EP_CONFIG: _rmnet_netlink_set_logical_ep_config(rmnet_header, resp_rmnet); break; case RMNET_NETLINK_NEW_VND: resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; resp_rmnet->return_code = rmnet_create_vnd(rmnet_header->vnd.id); break; default: resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; resp_rmnet->return_code = RMNET_CONFIG_UNKNOWN_MESSAGE; break; } rtnl_unlock(); nlmsg_unicast(nl_socket_handle, skb_response, return_pid); }
void hw_sp_statuswrite(void) { dword status; int go; status=WSP[4]; WSP[4]=0; if(status && status!=0x70707070) { go=0; LOGH("hw-sp: command %08X ",status,spstatus); if(status&(1<<0)) { // clear halt, start exec LOGH("go! "); if(sploaded) go=1; else { LOGH("\n"); warning("hw: spGo without loaded task"); os_event(OS_EVENT_SP); } } if(status&(1<<1)) { // set halt, stop exec warning("hw: spHalt\n"); } if(status&(1<<2)) spstatus&=~(1<<1); /* if(status&(1<<3)) ; // clear intr if(status&(1<<4)) ; // set intr if(status&(1<<5)) ; // clear sstop if(status&(1<<6)) ; // set sstep if(status&(1<<7)) ; // clear intr-on-break if(status&(1<<8)) ; // set intr-on-break */ if(status&(1<< 9)) { spstatus&=~(1<<7); // clear signal 0 } if(status&(1<<10)) { spstatus|= (1<<7); // set signal 0 LOGH("yield! "); hw_sp_yield(); } if(status&(1<<11)) spstatus&=~(1<<8); // clear signal 1 if(status&(1<<12)) spstatus|= (1<<8); // set signal 1 if(status&(1<<13)) spstatus&=~(1<<9); // clear signal 2 if(status&(1<<14)) spstatus|= (1<<9); // set signal 2 LOGH(" status=%08X\n",spstatus); if(go) { hw_sp_start(); } } if(spgfxexecuting) spstatus&=~1; else spstatus|=1; RSP[4]=spstatus; }
/** * rmnet_config_netlink_msg_handler() - Netlink message handler callback * @skb: Packet containing netlink messages * * Standard kernel-expected format for a netlink message handler. Processes SKBs * which contain RmNet data specific netlink messages. */ void rmnet_config_netlink_msg_handler(struct sk_buff *skb) { struct nlmsghdr *nlmsg_header, *resp_nlmsg; struct rmnet_nl_msg_s *rmnet_header, *resp_rmnet; int return_pid, response_data_length; struct sk_buff *skb_response; response_data_length = 0; nlmsg_header = (struct nlmsghdr *) skb->data; rmnet_header = (struct rmnet_nl_msg_s *) nlmsg_data(nlmsg_header); LOGL("Netlink message pid=%d, seq=%d, length=%d, rmnet_type=%d", nlmsg_header->nlmsg_pid, nlmsg_header->nlmsg_seq, nlmsg_header->nlmsg_len, rmnet_header->message_type); return_pid = nlmsg_header->nlmsg_pid; skb_response = nlmsg_new(sizeof(struct nlmsghdr) + sizeof(struct rmnet_nl_msg_s), GFP_KERNEL); if (!skb_response) { LOGH("%s", "Failed to allocate response buffer"); return; } resp_nlmsg = nlmsg_put(skb_response, 0, nlmsg_header->nlmsg_seq, NLMSG_DONE, sizeof(struct rmnet_nl_msg_s), 0); resp_rmnet = nlmsg_data(resp_nlmsg); if (!resp_rmnet) BUG(); resp_rmnet->message_type = rmnet_header->message_type; rtnl_lock(); switch (rmnet_header->message_type) { case RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE: _rmnet_netlink_associate_network_device (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE: _rmnet_netlink_unassociate_network_device (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_GET_NETWORK_DEVICE_ASSOCIATED: _rmnet_netlink_get_network_device_associated (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT: _rmnet_netlink_set_link_egress_data_format (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT: _rmnet_netlink_get_link_egress_data_format (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT: _rmnet_netlink_set_link_ingress_data_format (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT: _rmnet_netlink_get_link_ingress_data_format (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_SET_LOGICAL_EP_CONFIG: _rmnet_netlink_set_logical_ep_config(rmnet_header, resp_rmnet); break; case RMNET_NETLINK_UNSET_LOGICAL_EP_CONFIG: _rmnet_netlink_unset_logical_ep_config(rmnet_header, resp_rmnet); break; case RMNET_NETLINK_GET_LOGICAL_EP_CONFIG: _rmnet_netlink_get_logical_ep_config(rmnet_header, resp_rmnet); break; case RMNET_NETLINK_NEW_VND: resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; resp_rmnet->return_code = rmnet_create_vnd(rmnet_header->vnd.id); break; case RMNET_NETLINK_NEW_VND_WITH_PREFIX: resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; resp_rmnet->return_code = rmnet_create_vnd_prefix( rmnet_header->vnd.id, rmnet_header->vnd.vnd_name); break; case RMNET_NETLINK_FREE_VND: resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; /* Please check rmnet_vnd_free_dev documentation regarding the below locking sequence */ rtnl_unlock(); resp_rmnet->return_code = rmnet_free_vnd(rmnet_header->vnd.id); rtnl_lock(); break; case RMNET_NETLINK_GET_VND_NAME: _rmnet_netlink_get_vnd_name(rmnet_header, resp_rmnet); break; case RMNET_NETLINK_DEL_VND_TC_FLOW: case RMNET_NETLINK_ADD_VND_TC_FLOW: _rmnet_netlink_add_del_vnd_tc_flow(rmnet_header->message_type, rmnet_header, resp_rmnet); break; default: resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; resp_rmnet->return_code = RMNET_CONFIG_UNKNOWN_MESSAGE; break; } rtnl_unlock(); nlmsg_unicast(nl_socket_handle, skb_response, return_pid); LOGD("%s", "Done processing command"); }
/** * rmnet_force_unassociate_device() - Force a device to unassociate * @dev: Device to unassociate * * Return: * - void */ static void rmnet_force_unassociate_device(struct net_device *dev) { int i, j; struct net_device *vndev; struct rmnet_logical_ep_conf_s *cfg; struct rmnet_free_vnd_work *vnd_work; ASSERT_RTNL(); if (!dev) BUG(); if (!_rmnet_is_physical_endpoint_associated(dev)) { LOGM("%s", "Called on unassociated device, skipping"); return; } trace_rmnet_unregister_cb_clear_vnds(dev); vnd_work = (struct rmnet_free_vnd_work *) kmalloc(sizeof(struct rmnet_free_vnd_work), GFP_KERNEL); if (!vnd_work) { LOGH("%s", "Out of Memory"); return; } INIT_WORK(&vnd_work->work, _rmnet_free_vnd_later); vnd_work->count = 0; /* Check the VNDs for offending mappings */ for (i = 0, j = 0; i < RMNET_DATA_MAX_VND && j < RMNET_DATA_MAX_VND; i++) { vndev = rmnet_vnd_get_by_id(i); if (!vndev) { LOGL("VND %d not in use; skipping", i); continue; } cfg = rmnet_vnd_get_le_config(vndev); if (!cfg) { LOGH("Got NULL config from VND %d", i); BUG(); continue; } if (cfg->refcount && (cfg->egress_dev == dev)) { rmnet_unset_logical_endpoint_config(vndev, RMNET_LOCAL_LOGICAL_ENDPOINT); vnd_work->vnd_id[j] = i; j++; } } if (j > 0) { vnd_work->count = j; schedule_work(&vnd_work->work); } else { kfree(vnd_work); } /* Clear the mappings on the phys ep */ trace_rmnet_unregister_cb_clear_lepcs(dev); rmnet_unset_logical_endpoint_config(dev, RMNET_LOCAL_LOGICAL_ENDPOINT); for (i = 0; i < RMNET_DATA_MAX_LOGICAL_EP; i++) rmnet_unset_logical_endpoint_config(dev, i); rmnet_unassociate_network_device(dev); }