void HControlPointPrivate::deviceModelBuildDone(const Herqq::Upnp::HUdn& udn) { HLOG2(H_AT, H_FUN, m_loggingIdentifier); DeviceBuildTask* build = m_deviceBuildTasks.get(udn); Q_ASSERT(build); if (m_state == Initialized) { // The check is done because it is possible that a user has called // HControlPoint::quit() before this event is delivered. if (build->completionValue() == 0) { HLOG_INFO(QString("Device model for [%1] built successfully.").arg( udn.toString())); HDefaultClientDevice* device = build->createdDevice(); Q_ASSERT(device); for (qint32 i = 0; i < build->m_locations.size(); ++i) { device->addLocation(build->m_locations[i]); } processDeviceOnline(device, true); } else { HLOG_WARN(QString("Device model for [%1] could not be built: %2.").arg( udn.toString(), build->errorString())); } } m_deviceBuildTasks.remove(udn); }
int hlfs_take_snapshot(struct hlfs_ctrl *ctrl, const char *ssname) { //HLOG_DEBUG("enter func %s", __func__); //HLOG_DEBUG("create ssname is %s", ssname); if(ctrl == NULL || ssname ==NULL){ HLOG_ERROR("parameter error!"); return -1; } int ret = 0; if ((strlen(ssname) + 1) > HLFS_FILE_NAME_MAX) { HLOG_ERROR("error, snapshot name beyond max length!"); return -1; } g_mutex_lock(ctrl->hlfs_access_mutex); if(ctrl->rw_inode_flag == 0){ HLOG_ERROR("error, snapshot can not take when readonly"); g_mutex_unlock (ctrl->hlfs_access_mutex); return -1; } g_mutex_unlock (ctrl->hlfs_access_mutex); struct snapshot *_ss = NULL; if (0 == (ret=load_snapshot_by_name(ctrl->storage,SNAPSHOT_FILE,&_ss,ssname))){ HLOG_ERROR("snapshot %s is exist, use another snapshot name", ssname); return -1; }else{ HLOG_DEBUG("snapshot %s is not exist , create it ", ssname); } struct snapshot ss; memset(&ss, 0, sizeof(struct snapshot)); ss.timestamp = get_current_time(); g_strlcpy(ss.sname, ssname, strlen(ssname) + 1); g_mutex_lock(ctrl->hlfs_access_mutex); sprintf(ss.up_sname, "%s", ctrl->alive_ss_name); ss.inode_addr = ctrl->imap_entry.inode_addr; memset(ctrl->alive_ss_name, 0, MAX_FILE_NAME_LEN); sprintf(ctrl->alive_ss_name, "%s", ss.sname); g_mutex_unlock (ctrl->hlfs_access_mutex); ret = dump_alive_snapshot(ctrl->storage,ALIVE_SNAPSHOT_FILE,&ss); if(ret!=0){ HLOG_ERROR("dump snapshot alive error!"); return -1; } ret = dump_snapshot(ctrl->storage,SNAPSHOT_FILE,&ss); if(ret!=0){ HLOG_ERROR("dump snapshot error!"); return -1; } HLOG_INFO("Take Snapshot Succ- snapshot_name:%s,last_segno:%d,last_offset:%d", ssname, ctrl->last_segno, ctrl->last_offset); return ret; }
bool HControlPointPrivate::processDeviceOffline( const HResourceUnavailable& msg, const HEndpoint& /*source*/, HControlPointSsdpHandler* /*origin*/) { HLOG2(H_AT, H_FUN, m_loggingIdentifier); Q_ASSERT(thread() == QThread::currentThread()); HDefaultClientDevice* device = static_cast<HDefaultClientDevice*>( m_deviceStorage.searchDeviceByUdn(msg.usn().udn(), AllDevices)); if (!device) { // the device is not known by us. // note that even service announcements contain the "UDN", which identifies // the device that contains them. return true; } if (device->deviceStatus()->online()) { HLOG_INFO(QString("Resource [%1] is unavailable.").arg( msg.usn().resourceType().toString())); // according to the UDA v1.1 specification, if a bye bye message of any kind // is received, the control point can assume that nothing in that // device tree is available anymore HDefaultClientDevice* root = static_cast<HDefaultClientDevice*>(device->rootDevice()); Q_ASSERT(root); root->deviceStatus()->setOnline(false); m_eventSubscriber->remove(root, true); root->clearLocations(); root->stopStatusNotifier(HDefaultClientDevice::All); emit q_ptr->rootDeviceOffline(root); } return true; }
int hlfs_read(struct hlfs_ctrl *ctrl, char* read_buf, uint32_t read_len, uint64_t pos) { if((NULL == read_buf) || (NULL == ctrl) || (0 == read_len)){ HLOG_ERROR("Params Error"); return -1; } if(ctrl->sb.max_fs_size *1024 *1024< pos+read_len){ HLOG_ERROR("your config only allow write beyond :%llu",ctrl->sb.max_fs_size); //g_mutex_unlock (ctrl->hlfs_access_mutex); return -1; } //g_mutex_lock (ctrl->hlfs_access_mutex); HLOG_INFO("Hlfs Read Req pos:%llu,read_len:%d,last_segno:%d,last_offset:%d,cur_file_len:%llu", pos, read_len, ctrl->last_segno, ctrl->last_offset, ctrl->inode.length); guint32 BLOCKSIZE = ctrl->sb.block_size; HLOG_DEBUG("read offset:%llu,read len:%d", pos,read_len); int ret = 0; int start_db = 0; if(pos/BLOCKSIZE == (pos+read_len-1)/BLOCKSIZE){ HLOG_DEBUG("only need to read one block: %llu", pos / BLOCKSIZE); char *block = (char*)alloca(BLOCKSIZE); //g_mutex_lock (ctrl->hlfs_access_mutex); ret=load_block_by_addr_fast(ctrl,pos,block); //g_mutex_unlock (ctrl->hlfs_access_mutex); if(-1 == ret ){ HLOG_ERROR("fail to load block for addr %llu", pos); //g_mutex_unlock (ctrl->hlfs_access_mutex); return -1; }else if(1==ret){ //HLOG_DEBUG("fail to load block for not write yet"); memset(block,0,BLOCKSIZE); } memcpy(read_buf,block + pos%BLOCKSIZE,read_len); //g_free(block); //g_mutex_unlock (ctrl->hlfs_access_mutex); HLOG_DEBUG("read len %u", read_len); return read_len; } HLOG_DEBUG("need to read muti block", __func__); uint32_t offset=0; if( pos % BLOCKSIZE != 0 ){ HLOG_DEBUG("need to read first block", __func__); char *first_block = (char*)alloca(BLOCKSIZE); //g_mutex_lock (ctrl->hlfs_access_mutex); ret=load_block_by_addr_fast(ctrl,pos,first_block); //g_mutex_unlock (ctrl->hlfs_access_mutex); if(-1 == ret){ HLOG_ERROR("fail to load block for addr %llu", pos); //g_mutex_unlock (ctrl->hlfs_access_mutex); return -1; }else if(1 == ret){ //HLOG_DEBUG("fail to load block for not write yet"); memset(first_block,0,BLOCKSIZE); } memcpy(read_buf,first_block + pos%BLOCKSIZE, BLOCKSIZE - pos%BLOCKSIZE); offset += BLOCKSIZE - pos%BLOCKSIZE; HLOG_DEBUG("fist offset:%u", offset); //g_free(block); start_db = (pos + BLOCKSIZE)/BLOCKSIZE; }else{ start_db = pos/BLOCKSIZE; } int end_db = (pos+read_len)/BLOCKSIZE; HLOG_DEBUG("start db: %d end db: %d", start_db, end_db); int i; //char *block = (char*)alloca(BLOCKSIZE); for(i = start_db; i < end_db;i++){ //g_mutex_lock (ctrl->hlfs_access_mutex); char *block = read_buf+offset; ret=load_block_by_no_fast(ctrl,i,block); //g_mutex_unlock (ctrl->hlfs_access_mutex); if(-1 == ret){ HLOG_ERROR("fail to load block for no %d", i); //g_mutex_unlock (ctrl->hlfs_access_mutex); return -1; }else if(1==ret){ //HLOG_DEBUG("fail to load block for not write yet"); memset(block,0,BLOCKSIZE); } //memcpy(read_buf+offset,block,BLOCKSIZE); offset +=BLOCKSIZE; HLOG_DEBUG("offset: %u", offset); //g_free(block); } if((pos + read_len)% BLOCKSIZE != 0 ){ HLOG_DEBUG("need to read last block", __func__); char *last_block = (char*)alloca(BLOCKSIZE); //g_mutex_lock (ctrl->hlfs_access_mutex); ret=load_block_by_addr_fast(ctrl,pos+read_len,last_block); //g_mutex_unlock (ctrl->hlfs_access_mutex); if(-1 == ret){ HLOG_ERROR("fail to load block for addr %llu", pos + read_len); //g_mutex_unlock (ctrl->hlfs_access_mutex); return -1; }else if (1==ret){ //HLOG_DEBUG("fail to load block for not write yet"); memset(last_block,0,BLOCKSIZE); } memcpy(read_buf + offset , last_block , (pos + read_len)%BLOCKSIZE ); offset +=(pos+read_len)%BLOCKSIZE; //g_free(block); } //g_mutex_unlock (ctrl->hlfs_access_mutex); //ctrl->last_access_timestamp = get_current_time(); ctrl->last_read_timestamp = get_current_time(); HLOG_DEBUG("leave func %s", __func__); return offset; }
bool HControlPoint::init() { HLOG2(H_AT, H_FUN, h_ptr->m_loggingIdentifier); Q_ASSERT_X( thread() == QThread::currentThread(), H_AT, "The control point has to be initialized in the thread in which it is " "currently located."); if (h_ptr->m_state == HControlPointPrivate::Initialized) { setError( AlreadyInitializedError, "The control point is already initialized"); return false; } Q_ASSERT(h_ptr->m_state == HControlPointPrivate::Uninitialized); bool ok = true; const QList<QHostAddress> addrs = h_ptr->m_configuration->networkAddressesToUse(); h_ptr->m_state = HControlPointPrivate::Initializing; HLOG_INFO("ControlPoint initializing."); h_ptr->m_eventSubscriber = new HEventSubscriptionManager(h_ptr); ok = connect( h_ptr->m_eventSubscriber, SIGNAL(subscribed(Herqq::Upnp::HClientService*)), this, SIGNAL(subscriptionSucceeded(Herqq::Upnp::HClientService*))); ok = connect( h_ptr->m_eventSubscriber, SIGNAL(subscriptionFailed(Herqq::Upnp::HClientService*)), this, SIGNAL(subscriptionFailed(Herqq::Upnp::HClientService*))); Q_ASSERT(ok); ok = connect( h_ptr->m_eventSubscriber, SIGNAL(unsubscribed(Herqq::Upnp::HClientService*)), h_ptr, SLOT(unsubscribed(Herqq::Upnp::HClientService*))); Q_ASSERT(ok); h_ptr->m_server = new ControlPointHttpServer(h_ptr); if (!doInit()) { // it is assumed that the derived class filled the error and // error description ok = false; goto end; } if (!h_ptr->m_server->init(convertHostAddressesToEndpoints(addrs))) { setError(CommunicationsError, "Failed to start HTTP server"); ok = false; goto end; } foreach(const QHostAddress& ha, addrs) { quint32 netwAddr; ok = HSysInfo::instance().localNetwork(ha, &netwAddr); Q_ASSERT(ok); HControlPointSsdpHandler* ssdp = new HControlPointSsdpHandler(h_ptr); if (!ssdp->init(ha)) { delete ssdp; setError(CommunicationsError, "Failed to start SSDP"); ok = false; goto end; } h_ptr->m_ssdps.append(qMakePair(netwAddr, ssdp)); }
bool HControlPointPrivate::processDeviceDiscovery( const Msg& msg, const HEndpoint& source, HControlPointSsdpHandler*) { HLOG2(H_AT, H_FUN, m_loggingIdentifier); const HUdn& resourceUdn = msg.usn().udn(); HDefaultClientDevice* device = static_cast<HDefaultClientDevice*>( m_deviceStorage.searchDeviceByUdn(msg.usn().udn(), AllDevices)); if (device) { // according to the UDA v1.1 spec, if a control point receives an // alive announcement of any type for a device tree, the control point // can assume that all devices and services are available. // ==> reset timeouts for entire device tree and all services. device = static_cast<HDefaultClientDevice*>(device->rootDevice()); device->startStatusNotifier(HDefaultClientDevice::All); // it cannot be that only some embedded device is available at certain // interface, since the device description is always fetched from the // the location that the root device specifies ==> the entire device // tree has to be available at that location. if (device->addLocation(msg.location())) { HLOG_DBG(QString("Existing device [%1] now available at [%2]").arg( resourceUdn.toString(), msg.location().toString())); } if (!device->deviceStatus()->online()) { device->deviceStatus()->setOnline(true); emit q_ptr->rootDeviceOnline(device); processDeviceOnline(device, false); } return true; } // it does not matter if the device is an embedded device, since the // location of the device always points to the root device's description // and the internal device model is built of that. Hence, any advertisement // will do to build the entire model correctly. DeviceBuildTask* dbp = m_deviceBuildTasks.get(msg); if (dbp) { if (!dbp->m_locations.contains(msg.location())) { dbp->m_locations.push_back(msg.location()); } return true; } if (!q_ptr->acceptResource(msg.usn(), source)) { HLOG_DBG(QString("Resource advertisement [%1] rejected").arg( msg.usn().toString())); return true; } DeviceBuildTask* newBuildTask = new DeviceBuildTask(this, msg); newBuildTask->setAutoDelete(false); m_deviceBuildTasks.add(newBuildTask); bool ok = connect( newBuildTask, SIGNAL(done(Herqq::Upnp::HUdn)), this, SLOT(deviceModelBuildDone(Herqq::Upnp::HUdn))); Q_ASSERT(ok); Q_UNUSED(ok) HLOG_INFO(QString( "New resource [%1] is available @ [%2]. " "Attempting to build the device model.").arg( msg.usn().toString(), msg.location().toString())); m_threadPool->start(newBuildTask); return true; }
int flush_work(gpointer data){ int ret = 0; CACHE_CTRL *cctrl = (CACHE_CTRL*)data; GTimeVal expired; char *tmp_buf = (char *)g_malloc0(cctrl->block_size \ *cctrl->flush_once_size); g_assert(tmp_buf); while (!cctrl->flush_worker_should_exit) { HLOG_DEBUG("-- flush worker doing --"); g_get_current_time(&expired); g_time_val_add(&expired, cctrl->flush_interval * 1000 * 1000); g_mutex_lock(cctrl->cache_mutex); gboolean res = g_cond_timed_wait(cctrl->flush_waken_cond, \ cctrl->cache_mutex, &expired); g_mutex_unlock(cctrl->cache_mutex); HLOG_DEBUG(" time wait res for cond is :%d !",res); if (cctrl->flush_worker_should_exit) { HLOG_INFO("-- flush worker should exit --"); break; } do { GSList *continue_blocks = NULL; ret = get_continues_blocks(cctrl, &continue_blocks); g_assert(ret==0); uint32_t blocks_count = g_slist_length(continue_blocks); uint32_t buff_len = blocks_count *cctrl->block_size; HLOG_DEBUG("--blocks_count:%d, buff_len:%d--", \ blocks_count, buff_len); if (res == TRUE && buff_len == 0) { HLOG_ERROR("Never reach here"); g_assert(0); } if (buff_len == 0) { HLOG_DEBUG("do not need flush now"); break; } if (NULL == cctrl->write_callback_func) { HLOG_WARN("--not given flush callback func--"); break; } //char* tmp_buf = g_malloc0(buff_len); //g_assert(tmp_buf!=NULL); uint32_t start_no; uint32_t end_no; int i = 0; for (i = 0; i < blocks_count; i++) { block_t *block = g_slist_nth_data(continue_blocks, i); if (i == 0) { start_no = block->block_no; } if (i == blocks_count-1) { end_no = block->block_no; } memcpy(tmp_buf + i * cctrl->block_size, \ block->block, cctrl->block_size); } //HLOG_DEBUG("--tmp_buf:%p",tmp_buf); ret = cctrl->write_callback_func(cctrl->write_callback_user_param, \ tmp_buf, start_no,end_no); g_assert(ret >= 0); //g_free(tmp_buf); if (ret >= 0 ) { HLOG_DEBUG("--signal write thread--"); g_mutex_lock(cctrl->cache_mutex); __free_from_cache(cctrl, continue_blocks); //g_cond_broadcast(cctrl->writer_waken_cond); g_cond_signal(cctrl->writer_waken_cond); g_mutex_unlock(cctrl->cache_mutex); g_slist_free(continue_blocks); //HLOG_DEBUG("--return blocks to cache over--"); } } while (get_cache_free_size(cctrl) < cctrl->flush_trigger_level \ *cctrl->cache_size / 100 || (res == 0 && get_cache_free_size(cctrl) != 0)); } g_free(tmp_buf); HLOG_INFO("--flush worker exit--"); return 0; }