void wacom_change_dvfs_lock(struct work_struct *work)
{
	struct wacom_i2c *wac_i2c =
		container_of(work, struct wacom_i2c, dvfs_chg_work.work);

	mutex_lock(&wac_i2c->dvfs_lock);

	if (wac_i2c->boost_level == WACOM_BOOSTER_LEVEL1 || \
		wac_i2c->boost_level == WACOM_BOOSTER_LEVEL3) {
		remove_qos(&wac_i2c->cpu_qos);
		remove_qos(&wac_i2c->mif_qos);
		remove_qos(&wac_i2c->int_qos);

		wac_i2c->dvfs_lock_status = false;
		printk(KERN_DEBUG"epen:DVFS Off, %d\n", wac_i2c->boost_level);
	} else {
		set_qos(&wac_i2c->cpu_qos, PM_QOS_CPU_FREQ_MIN, WACOM_BOOSTER_CPU_FREQ2);
		set_qos(&wac_i2c->mif_qos, PM_QOS_BUS_THROUGHPUT, WACOM_BOOSTER_MIF_FREQ2);
		set_qos(&wac_i2c->int_qos, PM_QOS_DEVICE_THROUGHPUT, WACOM_BOOSTER_INT_FREQ2);

		printk(KERN_DEBUG"epen:change_mif_dvfs_lock\n");
	}

	mutex_unlock(&wac_i2c->dvfs_lock);
}
示例#2
0
int tzdev_migration_request(unsigned long arg)
{
    int ret = 0;

    mutex_lock(&tzdev_core_migration_lock);
    switch (arg) {
    case CMD_USE_CLUSTER_BIG:
        if (nr_big_cluster_requests == 0) {
            set_qos(&tz_qos, PM_QOS_CLUSTER1_FREQ_MIN, PM_QOS_CLUSTER1_FREQ_MAX_DEFAULT_VALUE);
            pr_notice("Set QoS request: max frequency for big cluster.\n");
        }
        nr_big_cluster_requests++;
        break;
    case CMD_USE_CLUSTER_DEFAULT:
        if (nr_big_cluster_requests == 0) {
            pr_err("Unexpected value of migration requests"
                   "quantity while processing CORE_LITTLE migration:"
                   "nr_big_cluster_requests = %d\n", nr_big_cluster_requests);
            ret = -EINVAL;
        } else {
            nr_big_cluster_requests--;
            if (nr_big_cluster_requests == 0) {
                pr_notice("Set QoS request: min frequency for big cluster.\n");
                set_qos(&tz_qos, PM_QOS_CLUSTER1_FREQ_MIN, 0);
            }
        }
        break;
    default:
        pr_err("Wrong request: %lu\n", arg);
        ret = -EINVAL;
    }
    mutex_unlock(&tzdev_core_migration_lock);
    return ret;
}
/* Pen */
DECLARE_DVFS_DELAYED_WORK_FUNC(CHG, PEN)
{
	struct input_booster *data =
		container_of(work, struct input_booster, dvfses[BOOSTER_DEVICE_PEN].dvfs_chg_work.work);
	struct booster_dvfs *dvfs = &data->dvfses[BOOSTER_DEVICE_PEN];

	mutex_lock(&dvfs->lock);

	switch (dvfs->level) {
	case BOOSTER_LEVEL0:
	case BOOSTER_LEVEL1:
	case BOOSTER_LEVEL3:
		remove_qos(&dvfs->cpu_qos);
		remove_qos(&dvfs->mif_qos);
		remove_qos(&dvfs->int_qos);
		dvfs->lock_status = false;
		DVFS_DEV_DBG(DBG_DVFS, data->dev, "%s : DVFS OFF\n", __func__);
	break;
	case BOOSTER_LEVEL2:
		set_qos(&dvfs->cpu_qos, PM_QOS_CPU_FREQ_MIN, dvfs->freqs[BOOSTER_LEVEL2].cpu_freq);
		set_qos(&dvfs->mif_qos, PM_QOS_BUS_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL2].mif_freq);
		set_qos(&dvfs->int_qos, PM_QOS_DEVICE_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL2].int_freq);
		DVFS_DEV_DBG(DBG_DVFS, data->dev, "%s : DVFS CHANGED [level %d]\n", __func__, dvfs->level);
	break;
	default:
		dev_err(data->dev, "%s : Undefined type passed[%d]\n", __func__, dvfs->level);
	break;
	}

	mutex_unlock(&dvfs->lock);
}
void
Transceiver::update_qos(const transceiverQos& qos)
{
  // cout << "Transceiver::update_qos(" << partition_id() << ", " << m_def.transceiver_id << ")" << endl;

  assert(m_def.transceiver_id == qos.transceiver_id);
  assert(m_writer_active);
  assert(m_reader_active);
  assert(m_report_active);

  set_qos();

  if (m_writer)			// REVISIT - locking
    {
      ReturnCode_t retcode = m_writer->set_qos(m_writer_qos);
      m_writer.check(retcode,
		     "latency_messageDataWriter::set_qos");
    }

  if (m_reader)			// REVISIT - locking
    {
      ReturnCode_t retcode = m_reader->set_qos(m_reader_qos);
      m_reader.check(retcode,
		     "latency_messageDataReader::set_qos");
    }

  m_config_number++;
}
static void gpio_key_change_dvfs_lock(struct work_struct *work)
{
	struct gpio_button_data *bdata =
			container_of(work,
				struct gpio_button_data, key_work_dvfs_chg.work);

	mutex_lock(&bdata->key_dvfs_lock);

	set_qos(&bdata->cpu_qos, PM_QOS_CPU_FREQ_MIN, KEY_BOOSTER_CPU_FREQ1);
	set_qos(&bdata->mif_qos, PM_QOS_BUS_THROUGHPUT, KEY_BOOSTER_MIF_FREQ1);
	set_qos(&bdata->int_qos, PM_QOS_DEVICE_THROUGHPUT, KEY_BOOSTER_INT_FREQ1);

	bdata->key_dvfs_lock_status = true;
	bdata->dvfs_signal = false;
	printk(KERN_DEBUG"keys:DVFS On\n");

	mutex_unlock(&bdata->key_dvfs_lock);
}
/* Key */
DECLARE_DVFS_DELAYED_WORK_FUNC(CHG, KEY)
{
	struct input_booster *data =
		container_of(work, struct input_booster, dvfses[BOOSTER_DEVICE_KEY].dvfs_chg_work.work);
	struct booster_dvfs *dvfs = &data->dvfses[BOOSTER_DEVICE_KEY];

	mutex_lock(&dvfs->lock);

	set_qos(&dvfs->cpu_qos, PM_QOS_CPU_FREQ_MIN, dvfs->freqs[BOOSTER_LEVEL1].cpu_freq);
	set_qos(&dvfs->mif_qos, PM_QOS_BUS_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL1].mif_freq);
	set_qos(&dvfs->int_qos, PM_QOS_DEVICE_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL1].int_freq);

	dvfs->lock_status = true;
	dvfs->short_press = false;

	DVFS_DEV_DBG(DBG_DVFS, data->dev, "%s : DVFS ON [level %d]\n", __func__, dvfs->level);

	mutex_unlock(&dvfs->lock);
}
示例#7
0
int
QosSettings::set_qos(QosLevel level, const char* name, const char* value)
{
  int status = 0;
  switch (level) {
    case publisher:
      status = set_qos(publisher_qos_, name, value);
      break;
    case subscriber:
      status = set_qos(subscriber_qos_, name, value);
      break;
    case datawriter:
      status = set_qos(datawriter_qos_, name, value);
      break;
    case datareader:
      status = set_qos(datareader_qos_, name, value);
      break;
  }
  return status;
}
DECLARE_DVFS_WORK_FUNC(SET, PEN)
{
	struct input_booster *data = (struct input_booster *)booster_data;
	struct booster_dvfs *dvfs = &data->dvfses[BOOSTER_DEVICE_PEN];

	mutex_lock(&dvfs->lock);

	if (!dvfs->level) {
		dev_err(data->dev, "%s : Skip to set booster due to level 0\n", __func__);
		goto out;
	}

	switch (booster_mode) {
	case BOOSTER_MODE_ON:
		cancel_delayed_work(&dvfs->dvfs_off_work);
		cancel_delayed_work(&dvfs->dvfs_chg_work);
		switch (dvfs->level) {
		case BOOSTER_LEVEL1:
		case BOOSTER_LEVEL2:
			set_qos(&dvfs->cpu_qos, PM_QOS_CPU_FREQ_MIN, dvfs->freqs[BOOSTER_LEVEL1].cpu_freq);
			set_qos(&dvfs->mif_qos, PM_QOS_BUS_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL1].mif_freq);
			set_qos(&dvfs->int_qos, PM_QOS_DEVICE_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL1].int_freq);
		break;
		case BOOSTER_LEVEL3:
			set_qos(&dvfs->cpu_qos, PM_QOS_CPU_FREQ_MIN, dvfs->freqs[BOOSTER_LEVEL3].cpu_freq);
			set_qos(&dvfs->mif_qos, PM_QOS_BUS_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL3].mif_freq);
			set_qos(&dvfs->int_qos, PM_QOS_DEVICE_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL3].int_freq);
		break;
		default:
			dev_err(data->dev, "%s : Undefined type passed[%d]\n", __func__, dvfs->level);
		break;
		}
		DVFS_DEV_DBG(DBG_DVFS, data->dev, "%s : DVFS ON [level %d]\n",	__func__, dvfs->level);
		schedule_delayed_work(&dvfs->dvfs_chg_work,
						msecs_to_jiffies(dvfs->msec_chg_time));
		dvfs->lock_status = true;
	break;
	case BOOSTER_MODE_OFF:
		if (dvfs->lock_status)
			schedule_delayed_work(&dvfs->dvfs_off_work,
								msecs_to_jiffies(dvfs->msec_off_time));
	break;
	case BOOSTER_MODE_FORCE_OFF:
		if (dvfs->lock_status) {
			cancel_delayed_work(&dvfs->dvfs_chg_work);
			cancel_delayed_work(&dvfs->dvfs_off_work);
			schedule_work(&dvfs->dvfs_off_work.work);
		}
	break;
	default:
	break;
	}

out:
	mutex_unlock(&dvfs->lock);
	return;
}
void
Transmitter::update_def(const transmitterDef& def)
{
  // cout << "Transmitter::update_def(" << partition_id() << ", " << m_def.transmitter_id << ")" << endl;

  assert(m_def.transmitter_id == def.transmitter_id);
  assert(m_active);

  // check for changes requiring thread restart
  if (m_def.scheduling_class != def.scheduling_class ||
      m_def.thread_priority != def.thread_priority ||
      m_def.topic_kind != def.topic_kind ||
      m_def.topic_id != def.topic_id)
    {
      // stop writer thread
      m_active = false;
      m_writer_thread.join();

      // check for topic change
      if (m_def.topic_kind != def.topic_kind ||
	  m_def.topic_id != def.topic_id)
	{
	  m_def = def;
	  set_topic();
	  set_qos();
	}
      else
	{
	  m_def = def;
	}

      // finish updating
      m_config_number++;
      
      // restart writer thread
      m_active = true;
      m_writer_thread.create(&Transmitter::writer_thread);
    }
  else
    {
      // update without stopping and restarting thread
      m_def = def;
      m_config_number++;
    }
}
void wacom_set_dvfs_lock(struct wacom_i2c *wac_i2c,
	uint32_t on)
{
	if (WACOM_BOOSTER_DISABLE == wac_i2c->boost_level)
		return;

	mutex_lock(&wac_i2c->dvfs_lock);
	if (on == 0) {
		if (wac_i2c->dvfs_lock_status) {
			schedule_delayed_work(&wac_i2c->dvfs_off_work,
				msecs_to_jiffies(WACOM_BOOSTER_OFF_TIME));
		}
	} else if (on == 1) {
		cancel_delayed_work(&wac_i2c->dvfs_off_work);
		cancel_delayed_work(&wac_i2c->dvfs_chg_work);
		
		if (wac_i2c->boost_level == WACOM_BOOSTER_LEVEL3) {
			set_qos(&wac_i2c->cpu_qos, PM_QOS_CPU_FREQ_MIN, WACOM_BOOSTER_CPU_FREQ3);
			set_qos(&wac_i2c->mif_qos, PM_QOS_BUS_THROUGHPUT, WACOM_BOOSTER_MIF_FREQ3);
			set_qos(&wac_i2c->int_qos, PM_QOS_DEVICE_THROUGHPUT, WACOM_BOOSTER_INT_FREQ3);
		} else {
			set_qos(&wac_i2c->cpu_qos, PM_QOS_CPU_FREQ_MIN, WACOM_BOOSTER_CPU_FREQ1);
			set_qos(&wac_i2c->mif_qos, PM_QOS_BUS_THROUGHPUT, WACOM_BOOSTER_MIF_FREQ1);
			set_qos(&wac_i2c->int_qos, PM_QOS_DEVICE_THROUGHPUT, WACOM_BOOSTER_INT_FREQ1);
		}

		schedule_delayed_work(&wac_i2c->dvfs_chg_work,
			msecs_to_jiffies(WACOM_BOOSTER_CHG_TIME));

		wac_i2c->dvfs_lock_status = true;
		printk(KERN_DEBUG"epen:DVFS On, %d\n", wac_i2c->boost_level);
	} else if (on == 2) {
		if (wac_i2c->dvfs_lock_status) {
			cancel_delayed_work(&wac_i2c->dvfs_off_work);
			cancel_delayed_work(&wac_i2c->dvfs_chg_work);
			schedule_work(&wac_i2c->dvfs_off_work.work);
		}
	}
	mutex_unlock(&wac_i2c->dvfs_lock);
}
示例#11
0
/*! \brief Apply handler for transports */
static int transport_apply(const struct ast_sorcery *sorcery, void *obj)
{
	struct ast_sip_transport *transport = obj;
	RAII_VAR(struct ast_sip_transport *, existing, ast_sorcery_retrieve_by_id(sorcery, "transport", ast_sorcery_object_get_id(obj)), ao2_cleanup);
	pj_status_t res = -1;

	if (!existing || !existing->state) {
		if (!(transport->state = ao2_alloc(sizeof(*transport->state), transport_state_destroy))) {
			ast_log(LOG_ERROR, "Transport state for '%s' could not be allocated\n", ast_sorcery_object_get_id(obj));
			return -1;
		}
	} else {
		transport->state = existing->state;
		ao2_ref(transport->state, +1);
	}

	/* Once active a transport can not be reconfigured */
	if (transport->state->transport || transport->state->factory) {
		return -1;
	}

	if (transport->host.addr.sa_family != PJ_AF_INET && transport->host.addr.sa_family != PJ_AF_INET6) {
		ast_log(LOG_ERROR, "Transport '%s' could not be started as binding not specified\n", ast_sorcery_object_get_id(obj));
		return -1;
	}

	/* Set default port if not present */
	if (!pj_sockaddr_get_port(&transport->host)) {
		pj_sockaddr_set_port(&transport->host, (transport->type == AST_TRANSPORT_TLS) ? 5061 : 5060);
	}

	/* Now that we know what address family we can set up a dnsmgr refresh for the external media address if present */
	if (!ast_strlen_zero(transport->external_signaling_address)) {
		if (transport->host.addr.sa_family == pj_AF_INET()) {
			transport->external_address.ss.ss_family = AF_INET;
		} else if (transport->host.addr.sa_family == pj_AF_INET6()) {
			transport->external_address.ss.ss_family = AF_INET6;
		} else {
			ast_log(LOG_ERROR, "Unknown address family for transport '%s', could not get external signaling address\n",
					ast_sorcery_object_get_id(obj));
			return -1;
		}

		if (ast_dnsmgr_lookup(transport->external_signaling_address, &transport->external_address, &transport->external_address_refresher, NULL) < 0) {
			ast_log(LOG_ERROR, "Could not create dnsmgr for external signaling address on '%s'\n", ast_sorcery_object_get_id(obj));
			return -1;
		}
	}

	if (transport->type == AST_TRANSPORT_UDP) {
		if (transport->host.addr.sa_family == pj_AF_INET()) {
			res = pjsip_udp_transport_start(ast_sip_get_pjsip_endpoint(), &transport->host.ipv4, NULL, transport->async_operations, &transport->state->transport);
		} else if (transport->host.addr.sa_family == pj_AF_INET6()) {
			res = pjsip_udp_transport_start6(ast_sip_get_pjsip_endpoint(), &transport->host.ipv6, NULL, transport->async_operations, &transport->state->transport);
		}

		if (res == PJ_SUCCESS && (transport->tos || transport->cos)) {
			pj_sock_t sock;
			pj_qos_params qos_params;

			sock = pjsip_udp_transport_get_socket(transport->state->transport);
			pj_sock_get_qos_params(sock, &qos_params);
			set_qos(transport, &qos_params);
			pj_sock_set_qos_params(sock, &qos_params);
		}
	} else if (transport->type == AST_TRANSPORT_TCP) {
		pjsip_tcp_transport_cfg cfg;

		pjsip_tcp_transport_cfg_default(&cfg, transport->host.addr.sa_family);
		cfg.bind_addr = transport->host;
		cfg.async_cnt = transport->async_operations;
		set_qos(transport, &cfg.qos_params);

		res = pjsip_tcp_transport_start3(ast_sip_get_pjsip_endpoint(), &cfg, &transport->state->factory);
	} else if (transport->type == AST_TRANSPORT_TLS) {
		transport->tls.ca_list_file = pj_str((char*)transport->ca_list_file);
		transport->tls.cert_file = pj_str((char*)transport->cert_file);
		transport->tls.privkey_file = pj_str((char*)transport->privkey_file);
		transport->tls.password = pj_str((char*)transport->password);
		set_qos(transport, &transport->tls.qos_params);

		res = pjsip_tls_transport_start2(ast_sip_get_pjsip_endpoint(), &transport->tls, &transport->host, NULL, transport->async_operations, &transport->state->factory);
	} else if ((transport->type == AST_TRANSPORT_WS) || (transport->type == AST_TRANSPORT_WSS)) {
		if (transport->cos || transport->tos) {
			ast_log(LOG_WARNING, "TOS and COS values ignored for websocket transport\n");
		}
		res = PJ_SUCCESS;
	}

	if (res != PJ_SUCCESS) {
		char msg[PJ_ERR_MSG_SIZE];

		pjsip_strerror(res, msg, sizeof(msg));
		ast_log(LOG_ERROR, "Transport '%s' could not be started: %s\n", ast_sorcery_object_get_id(obj), msg);
		return -1;
	}
	return 0;
}
示例#12
0
void DatalinkPrivate::initializeDatalink()
{
    Q_Q(Datalink);
    Q_ASSERT(q);

    DDS_ReturnCode_t            retcode;

    Node* parentNode = q->parent();

    {   /** create the participant...
         *
         * \internal
         * The code related to setting up the "spies" via the built-in subscriber is
         * based upon an example from the RTI knowledge base:
         * http://community.rti.com/kb/how-can-i-detect-new-datawriters-and-datareaders-joining-domain
         */

        {   /** \internal Factory */

            DDS_DomainParticipantFactoryQos   factory_qos;

            // To conserve memory, builtin Subscribers and DataReaders are created
            // only if and when you look them up. Therefore, to avoid missing
            // builtin data, you should look up the builtin readers before the
            // DomainParticipant is enabled.

            // Modify the Factory's QoS so that DomainParticipants are not automatically enabled when created
            DDSTheParticipantFactory->get_qos(factory_qos);


            factory_qos.entity_factory.autoenable_created_entities = DDS_BOOLEAN_FALSE;


            if(false) // DEBUG
            {   /** \internal Increasing the verbosity of DDS internal logging.
                 * copied from http://community.rti.com/comment/1979#comment-1979 */
                factory_qos.logging.category = NDDS_CONFIG_LOG_CATEGORY_ENTITIES ;
                factory_qos.logging.verbosity = NDDS_CONFIG_LOG_VERBOSITY_STATUS_LOCAL;
            }

            DDSTheParticipantFactory->set_qos(factory_qos);

        }





        // Modify participant_qos settings here, if necessary
        retcode = DDSTheParticipantFactory->get_default_participant_qos(participant_qos);
        if (retcode != DDS_RETCODE_OK) {
            qCritical("get default participant qos error %d\n", retcode);
            throw datalink_error("Get default participant QOS error.",retcode);
            return ;
        }

        // Increase type code serialized length from 256 (default) to 64K (maximum).
        // This ensures we can receive more involved type codes
        participant_qos.resource_limits.type_code_max_serialized_length = 0xffff; //FIXME: Magic Numbers

        // Increase receiver buffer size. The UDPv4 transport resource settings
        // will be increased after participant is created.
        participant_qos.receiver_pool.buffer_size = 65535; //FIXME: Magic Numbers

        {   /** transfer the name of the node to the DDS participant
             *
             *\note This is a one time thing, i.e. later updates of the name of the
             * parent node will (and actually can) \e not be propagated to the DDS
             * participant!
             *
             */
            DDS_String_replace(&participant_qos.participant_name.name,
                               parentNode->objectName().toStdString().c_str());
        }

        {   /// transfer the type of the node to a DDS participant property
            QHash<QString,QString> props;

            props["type"]=enumToQString(parentNode->type());
            qDebug() << q->qualifiedName() << ": Parent Node is of type" << props["type"];

            int hFov = 0;
            if( parentNode->settings()->camera().present() )
            {
                hFov = parentNode->settings()->camera().get().hFov();
            }
            props["hFov"]=QString("%1").arg(hFov);

#ifdef WITH_RTI_MONITORING
            {
                /** \internal Enabling monitoring functionality for this participant
                 * (using the RTI Monitor Library)
                 * NOTE: Compare RTI_Monitoring_Library_GettingStarted.pdf */
                props["rti.monitor.library"]="rtimonitoring";

                char buffer[17];
                sprintf(buffer,"%p",RTIDefaultMonitor_create);
                props["rti.monitor.create_function_ptr"] = buffer;
            }
#endif // WITH RTI_MONITORING      

            auto old_props = participant_qos.property.value;

            DDS::appendHashToProperty(props, participant_qos.property.value);

            auto new_props = participant_qos.property.value;

            //NOTE: don't free the memory pointed to by type.name and value, as
            // in that case the newly added property has a "\null" name and type. :(
            //
//       DDS_String_free(type.name); //TODO: who would do the clean up of this?
//       DDS_String_free(type.value); //TODO: who would do the clean up of this?
        }



        {   /// set discovery and liveliness parameters

            /** \internal
             * Some related articles on the RTI community forum / knowledge base:
             * - http://community.rti.com/kb/why-does-my-subscriber-report-multiple-liveliness-change-events-when-i-kill-my-publisher
             * - http://community.rti.com/forum-topic/how-can-i-react-participant-going-stale
             */


            // The time (in seconds) this node has had to been silent before other participants
            // declare it "stale" or "dead"
            //FIXME: I never ever got this to work... :/
            participant_qos.discovery_config.participant_liveliness_lease_duration.sec = 5; //NOTE: the default is 100

            // The time intervall (in seconds) in which the participant will assert
            // its liveliness (i.e. it will send out an liveliness asserting message)
            participant_qos.discovery_config.participant_liveliness_assert_period.sec  = 3; //NOTE: the default is 30

        }





        participant_listener = new NodePrivate::Participant_Listener(parentNode);
        DDS_StatusMask mask = NodePrivate::Participant_Listener::implementedCallbacks;


        // Create the DomainParticipant (which will start out disabled)
        participant  = DDSDomainParticipantFactory::get_instance()->
                       create_participant(
                           domainId              // Domain ID
                           ,participant_qos      // QoS
                           ,participant_listener // Listener
                           ,mask                 // Listener Mask
                       );
        Q_ASSERT(participant);

        if (participant == nullptr) {
            qCritical("create_participant error %d\n", retcode);
            throw datalink_error("Create participant error.",retcode);
            return;
        }
    }

    {   /** change the default UPDv4 socket layer settings.
         *
         * \internal
         * From the knowledge base entry related to the "spies":
         * "A a side note not related to builtin topics but related to the example
         * attached to this solution, we increase the typecode serialized length
         * from 256 (the default) to 64K (the maximum) to make sure we can receive
         * more involved typecodes. The typecode data size varies from data type to
         * data type (the more complex the structure, the larger the typecode). The
         * DomainParticipant's resource_limits.type_code_max_serialized_length
         * controls the maximum typecode size that can be sent and received by the
         * participant. RTI Connext’s default value is 256, which is adequate for
         * only very simple data structures (for instance, a simple struct with no
         * more than 8 primitive members)."
         */

        NDDS_Transport_UDPv4_Property_t property = NDDS_TRANSPORT_UDPV4_PROPERTY_DEFAULT;
        NDDS_Transport_Property_t& propref = reinterpret_cast<NDDS_Transport_Property_t&>(property); //FIXME: reinterpret_cast!

        // The default maximum data size is 8K. This is determined by the 9K maximum
        // socket buffer size and message_size_max, as defined in the UDPv4 builtin
        // transport.
        // To change the properties of the UDPv4 builtin transport, refer to the
        // online documentation (Programming How-To's ->Transport Use Cases).
        //      1. Turn off auto-enable of newly created participants (see above)
        //      2. Get the UDPv4 property
        //      3. Modify the property
        //      4. Set the property
        //      5. Enable the participant



        retcode = NDDSTransportSupport::get_builtin_transport_property(
                      participant,
                      DDS_TRANSPORTBUILTIN_UDPv4,
                      propref);
        if (retcode != DDS_RETCODE_OK)
        {
            qCritical() << q->qualifiedName()
                        << ": DDS error getting builtin transport property:" << retcode;
        }

        /* Increase the UDPv4 maximum message size to 64K (large messages) */
        property.parent.message_size_max = 65535; //FIXME: Magic Numbers
        property.recv_socket_buffer_size = 65535; //FIXME: Magic Numbers
        property.send_socket_buffer_size = 65535; //FIXME: Magic Numbers

        retcode = NDDSTransportSupport::set_builtin_transport_property(
                      participant
                      ,DDS_TRANSPORTBUILTIN_UDPv4
                      ,propref);
        if (retcode != DDS_RETCODE_OK)
        {
            qCritical() << q->qualifiedName()
                        << ": DDS error seting builtin transport property:" << retcode;
        }
    }

    {   /** \internal set the liveliness duration for the builtin reader for the topic
         *  DDS_PARTICIPANT_TOPIC_NAME */

        auto builtin_subscriber = participant->get_builtin_subscriber();
        Q_ASSERT( builtin_subscriber );


        auto builtin_sub_reader = builtin_subscriber->lookup_datareader(DDS_PARTICIPANT_TOPIC_NAME);
        Q_ASSERT( builtin_sub_reader );

        DDS_DataReaderQos defReaderQoS;
        builtin_sub_reader->get_qos(defReaderQoS);

        defReaderQoS.liveliness.kind = DDS_AUTOMATIC_LIVELINESS_QOS; ///NOTE: Default is DDS_AUTOMATIC_LIVELINESS_QOS
        defReaderQoS.liveliness.lease_duration = DDS_Duration_t::from_millis(1250); ///FIXME: Magic numbers: BuiltIn Reader Liveliness Duration 1.25s

        builtin_sub_reader->set_qos(defReaderQoS);
    }



    {   /// attach listeners to the built-in topics
        builtInTopic_Publications_Listener = new NodePrivate::BuiltInTopic_Publications_Listener(parentNode);
        attachToBuiltInTopic(builtInTopic_Publications_Listener, DDS_PUBLICATION_TOPIC_NAME
                             /*listener mask = DDS_DATA_AVAILABLE_STATUS */
                            );

        builtInTopic_Subscriptions_Listener = new NodePrivate::BuiltInTopic_Subscriptions_Listener(parentNode);
        attachToBuiltInTopic(builtInTopic_Subscriptions_Listener, DDS_SUBSCRIPTION_TOPIC_NAME
                             /*listener mask = DDS_DATA_AVAILABLE_STATUS */
                            );

        DDS_StatusMask participantListenerMask =
            NodePrivate::BuiltInTopic_Participant_Listener::implementedCallbacks;

        builtInTopic_Participant_Listener = new NodePrivate::BuiltInTopic_Participant_Listener(parentNode);
        attachToBuiltInTopic(builtInTopic_Participant_Listener, DDS_PARTICIPANT_TOPIC_NAME,
                             /*listener mask */ participantListenerMask
//       /*listener mask */ DDS_LIVELINESS_CHANGED_STATUS
                            );
    }

    {   /// create the publisher ...



        //NOTE: To customize publisher QoS, use the configuration file USER_QOS_PROFILES.xml
        publisher = participant->create_publisher( DDS_PUBLISHER_QOS_DEFAULT,
                    nullptr,  // listener
                    DDS_STATUS_MASK_NONE
                                                 );
        Q_ASSERT(publisher);
    }

    {   /// create the subscriber ...
        subscriberListener = new SubscriberListener;
//     DDS_StatusMask mask =   DDS_DATA_ON_READERS_STATUS
//                           | DDS_DATA_AVAILABLE_STATUS ;
        DDS_StatusMask mask = subscriberListener->implementedCallbacks;


        //NOTE: To customize the subscriber QoS, use the configuration file USER_QOS_PROFILES.xml
        subscriber = participant->create_subscriber( DDS_SUBSCRIBER_QOS_DEFAULT
                     , subscriberListener         // Listener
                     , mask // Listener Mask
                                                   );
        Q_ASSERT(subscriber);
    }


    // enable the factory entities in DDS.
    // NOTE: The participant is disabled as the auto-enable QoS of the factory has
    // ben set to false. However, that setting is still at it's default "true" for
    // the participant itself, meaning that the subscriber and publisher should
    // auto-enable once the participant is enabled. Hence it should be sufficient
    // to just enable the participant...

    retcode = participant->enable();
    if(retcode != DDS_RETCODE_OK)
    {
        qCritical("\"%s\" : Initialization failed, couldn't enable the participant. RETCODE = %s.",
                  q->qualifiedName().toStdString().c_str(),
                  DDS::enumToQString(retcode).toStdString().c_str() );
        throw datalink_error("Participant couldn't be enabled", retcode);
        return;
    }

    {   /** Configure the participant to ignore itself,
        * i.e. don't let dataReaders receive matching publications from
        * dataWriters of the same participant, i.e. the same Node.
        * This effectively means that a Node will only "receive" (well, process,
        * I guess) data from other Nodes
        * \note From the RTI API manual: "There is no way to reverse this
        * operation."
        */
        retcode = participant->ignore_participant(participant->get_instance_handle());

        if( retcode != DDS_RETCODE_OK )
        {
            qCritical("ignore_participant error %d\n", retcode);
            throw datalink_error("Ignore participant error.", retcode);
            return;
        }
    }

    {   // GUID Debug output
        qDebug() << q->qualifiedName() << "Ownship Instance Handle:" << participant->get_instance_handle();

        participant->get_qos(participant_qos);
        qDebug() << "RTPS Host ID        :"
                 << participant_qos.wire_protocol.rtps_host_id << ","
                 << QString::number(participant_qos.wire_protocol.rtps_host_id,16).toUpper().prepend("0x");
        qDebug() << "RTPS App ID         :"
                 << participant_qos.wire_protocol.rtps_app_id << ","
                 << QString::number(participant_qos.wire_protocol.rtps_app_id,16).toUpper().prepend("0x");
        qDebug() << "RTPS Instance ID    :"
                 << participant_qos.wire_protocol.rtps_instance_id << ","
                 << QString::number(participant_qos.wire_protocol.rtps_instance_id,16).toUpper().prepend("0x");

        /** \internal From RTI Api documentation ([local]/RTI/ndds.5.1.0/doc/html/api_cpp/classDDSDomainParticipantFactory.html#af7c7137eccd41ffa9351d577fb088417):
         * "If you want to create multiple participants on a given host in the same
         * domain, make sure each one has a different participant index (set in the
         * DDS_WireProtocolQosPolicy). This in turn will ensure each participant
         * uses a different port number (since the unicast port numbers are
         * calculated from the participant index and the domain ID)." */
        qDebug() << "RTPS Participant ID :"
                 << participant_qos.wire_protocol.participant_id << ","
                 << QString::number(participant_qos.wire_protocol.participant_id,16).toUpper().prepend("0x");
        qDebug() << "DDS Domain ID :"
                 << participant->get_domain_id() << ","
                 << QString::number(participant->get_domain_id(),16).toUpper().prepend("0x");
    }


    // get an update version of the participant QoS settings now that it has been
    // enabled.
    retcode = participant->get_qos(participant_qos);
    Q_ASSERT( retcode == DDS_RETCODE_OK );

}
示例#13
0
void
Transceiver::create(const transceiverDef& def)
{
  m_def = def;
  // cout << "Transceiver::create(" << partition_id() << ", " << m_def.transceiver_id << ")" << endl;

  // setup QueryCondition for reading this Transceiver's Qos

  stringstream id;
  id << m_def.transceiver_id;
  StringSeq params;
  params.length(1);
  params[0] = id.str().c_str();
  m_qos_query.create(ANY_SAMPLE_STATE,
		     ANY_VIEW_STATE,
		     ANY_INSTANCE_STATE,
		     "transceiver_id = %0",
		     params);

  // setup Transceiver topic

  set_topics();

  // read initial Qos

  transceiverQosSeq qoss;
  SampleInfoSeq infos;
  // REVISIT - read or take?
  ReturnCode_t retcode = qos_reader()->read_w_condition(qoss,
							infos,
							1,
							m_qos_query);
  if (retcode == RETCODE_NO_DATA)
    {
      // no Qos instance to read, so initialize and write
      m_qos.group_id = m_def.group_id;
      m_qos.transceiver_id = m_def.transceiver_id;
      m_qos.partition_id = m_def.partition_id;
      m_qos.writer_qos.latency_budget.duration.sec = 0;
      m_qos.writer_qos.latency_budget.duration.nanosec = 0;
      m_qos.writer_qos.transport_priority.value = 0;
      m_qos.reader_qos.history.depth = 1;
      m_qos.reader_qos.latency_budget.duration.sec = 0;
      m_qos.reader_qos.latency_budget.duration.nanosec = 0;

      retcode = qos_writer()->write(m_qos,
				    0);
      qos_writer().check(retcode,
			 "transceiverQosDataWriter::write");
    }
  else
    {
      qos_reader().check(retcode,
			 "transceiverQosDataReader::read_w_condition");

      assert(qoss.length() == 1);
      assert(infos.length() == 1);
      m_qos = qoss[0];
      assert(m_qos.group_id == m_def.group_id);
      assert(m_qos.transceiver_id == m_def.transceiver_id);
      assert(m_qos.partition_id == m_def.partition_id);
    }
  qos_reader()->return_loan(qoss,
			    infos);

  set_qos();

  // start threads

  m_writer_active = true;
  m_writer_thread.create(&Transceiver::writer_thread);

  m_reader_active = true;
  m_reader_thread.create(&Transceiver::reader_thread);

  m_report_active = true;
  m_report_thread.create(&Transceiver::report_thread);
}
示例#14
0
/*! \brief Apply handler for transports */
static int transport_apply(const struct ast_sorcery *sorcery, void *obj)
{
	struct ast_sip_transport *transport = obj;
	const char *transport_id = ast_sorcery_object_get_id(obj);
	RAII_VAR(struct ao2_container *, states, transport_states, states_cleanup);
	RAII_VAR(struct internal_state *, temp_state, NULL, ao2_cleanup);
	RAII_VAR(struct internal_state *, perm_state, NULL, ao2_cleanup);
	RAII_VAR(struct ast_variable *, changes, NULL, ast_variables_destroy);
	pj_status_t res = -1;
	int i;
#define BIND_TRIES 3
#define BIND_DELAY_US 100000

	if (!states) {
		return -1;
	}

	/*
	 * transport_apply gets called for EVERY retrieval of a transport when using realtime.
	 * We need to prevent multiple threads from trying to mess with underlying transports
	 * at the same time.  The container is the only thing we have to lock on.
	 */
	ao2_wrlock(states);

	temp_state = internal_state_alloc(transport);
	if (!temp_state) {
		ast_log(LOG_ERROR, "Transport '%s' failed to allocate memory\n", transport_id);
		return -1;
	}

	perm_state = find_internal_state_by_transport(transport);
	if (perm_state) {
		ast_sorcery_diff(sorcery, perm_state->transport, transport, &changes);
		if (!changes && !has_state_changed(perm_state->state, temp_state->state)) {
			/* In case someone is using the deprecated fields, reset them */
			transport->state = perm_state->state;
			copy_state_to_transport(transport);
			ao2_replace(perm_state->transport, transport);
			return 0;
		}

		if (!transport->allow_reload) {
			if (!perm_state->change_detected) {
				perm_state->change_detected = 1;
				ast_log(LOG_WARNING, "Transport '%s' is not reloadable, maintaining previous values\n", transport_id);
			}
			/* In case someone is using the deprecated fields, reset them */
			transport->state = perm_state->state;
			copy_state_to_transport(transport);
			ao2_replace(perm_state->transport, transport);
			return 0;
		}
	}

	if (temp_state->state->host.addr.sa_family != PJ_AF_INET && temp_state->state->host.addr.sa_family != PJ_AF_INET6) {
		ast_log(LOG_ERROR, "Transport '%s' could not be started as binding not specified\n", transport_id);
		return -1;
	}

	/* Set default port if not present */
	if (!pj_sockaddr_get_port(&temp_state->state->host)) {
		pj_sockaddr_set_port(&temp_state->state->host, (transport->type == AST_TRANSPORT_TLS) ? 5061 : 5060);
	}

	/* Now that we know what address family we can set up a dnsmgr refresh for the external media address if present */
	if (!ast_strlen_zero(transport->external_signaling_address)) {
		if (temp_state->state->host.addr.sa_family == pj_AF_INET()) {
			temp_state->state->external_address.ss.ss_family = AF_INET;
		} else if (temp_state->state->host.addr.sa_family == pj_AF_INET6()) {
			temp_state->state->external_address.ss.ss_family = AF_INET6;
		} else {
			ast_log(LOG_ERROR, "Unknown address family for transport '%s', could not get external signaling address\n",
					transport_id);
			return -1;
		}

		if (ast_dnsmgr_lookup(transport->external_signaling_address, &temp_state->state->external_address, &temp_state->state->external_address_refresher, NULL) < 0) {
			ast_log(LOG_ERROR, "Could not create dnsmgr for external signaling address on '%s'\n", transport_id);
			return -1;
		}
	}

	if (transport->type == AST_TRANSPORT_UDP) {

		for (i = 0; i < BIND_TRIES && res != PJ_SUCCESS; i++) {
			if (perm_state && perm_state->state && perm_state->state->transport) {
				pjsip_udp_transport_pause(perm_state->state->transport,
					PJSIP_UDP_TRANSPORT_DESTROY_SOCKET);
				usleep(BIND_DELAY_US);
			}

			if (temp_state->state->host.addr.sa_family == pj_AF_INET()) {
				res = pjsip_udp_transport_start(ast_sip_get_pjsip_endpoint(),
					&temp_state->state->host.ipv4, NULL, transport->async_operations,
					&temp_state->state->transport);
			} else if (temp_state->state->host.addr.sa_family == pj_AF_INET6()) {
				res = pjsip_udp_transport_start6(ast_sip_get_pjsip_endpoint(),
					&temp_state->state->host.ipv6, NULL, transport->async_operations,
					&temp_state->state->transport);
			}
		}

		if (res == PJ_SUCCESS && (transport->tos || transport->cos)) {
			pj_sock_t sock;
			pj_qos_params qos_params;
			sock = pjsip_udp_transport_get_socket(temp_state->state->transport);
			pj_sock_get_qos_params(sock, &qos_params);
			set_qos(transport, &qos_params);
			pj_sock_set_qos_params(sock, &qos_params);
		}
	} else if (transport->type == AST_TRANSPORT_TCP) {
		pjsip_tcp_transport_cfg cfg;

		pjsip_tcp_transport_cfg_default(&cfg, temp_state->state->host.addr.sa_family);
		cfg.bind_addr = temp_state->state->host;
		cfg.async_cnt = transport->async_operations;
		set_qos(transport, &cfg.qos_params);

		for (i = 0; i < BIND_TRIES && res != PJ_SUCCESS; i++) {
			if (perm_state && perm_state->state && perm_state->state->factory
				&& perm_state->state->factory->destroy) {
				perm_state->state->factory->destroy(perm_state->state->factory);
				usleep(BIND_DELAY_US);
			}

			res = pjsip_tcp_transport_start3(ast_sip_get_pjsip_endpoint(), &cfg,
				&temp_state->state->factory);
		}
	} else if (transport->type == AST_TRANSPORT_TLS) {
		if (transport->async_operations > 1 && ast_compare_versions(pj_get_version(), "2.5.0") < 0) {
			ast_log(LOG_ERROR, "Transport: %s: When protocol=tls and pjproject version < 2.5.0, async_operations can't be > 1\n",
					ast_sorcery_object_get_id(obj));
			return -1;
		}

		temp_state->state->tls.password = pj_str((char*)transport->password);
		set_qos(transport, &temp_state->state->tls.qos_params);

		for (i = 0; i < BIND_TRIES && res != PJ_SUCCESS; i++) {
			if (perm_state && perm_state->state && perm_state->state->factory
				&& perm_state->state->factory->destroy) {
				perm_state->state->factory->destroy(perm_state->state->factory);
				usleep(BIND_DELAY_US);
			}

			res = pjsip_tls_transport_start2(ast_sip_get_pjsip_endpoint(), &temp_state->state->tls,
				&temp_state->state->host, NULL, transport->async_operations,
				&temp_state->state->factory);
		}
	} else if ((transport->type == AST_TRANSPORT_WS) || (transport->type == AST_TRANSPORT_WSS)) {
		if (transport->cos || transport->tos) {
			ast_log(LOG_WARNING, "TOS and COS values ignored for websocket transport\n");
		}
		res = PJ_SUCCESS;
	}

	if (res != PJ_SUCCESS) {
		char msg[PJ_ERR_MSG_SIZE];

		pj_strerror(res, msg, sizeof(msg));
		ast_log(LOG_ERROR, "Transport '%s' could not be started: %s\n", ast_sorcery_object_get_id(obj), msg);
		return -1;
	}

	copy_state_to_transport(transport);
	if (perm_state) {
		ao2_unlink_flags(states, perm_state, OBJ_NOLOCK);
	}
	ao2_link_flags(states, temp_state, OBJ_NOLOCK);

	return 0;
}
/* Touch */
DECLARE_DVFS_DELAYED_WORK_FUNC(CHG, TOUCH)
{
	struct booster_dvfs *dvfs =
		container_of(work, struct booster_dvfs, dvfs_chg_work.work);
	struct input_booster *data = dev_get_drvdata(dvfs->parent_dev);

	mutex_lock(&dvfs->lock);

	if (!dvfs->times[dvfs->level].phase_time)
		dvfs->phase_excuted = false;

	switch (dvfs->level) {
	case BOOSTER_LEVEL0:
	case BOOSTER_LEVEL1:
	case BOOSTER_LEVEL3:
		remove_qos(&dvfs->cpu_qos);
		remove_qos(&dvfs->kfc_qos);
		remove_qos(&dvfs->mif_qos);
		remove_qos(&dvfs->int_qos);
		SET_HMP(data, BOOSTER_DEVICE_TOUCH, false);
		dvfs->lock_status = false;

		DVFS_DEV_DBG(DBG_DVFS, data->dev, "%s : DVFS OFF\n", __func__);
	break;
	case BOOSTER_LEVEL2:
		set_qos(&dvfs->kfc_qos, PM_QOS_KFC_FREQ_MIN, dvfs->freqs[BOOSTER_LEVEL2].kfc_freq);
		set_qos(&dvfs->int_qos, PM_QOS_DEVICE_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL2].int_freq);
		set_qos(&dvfs->mif_qos, PM_QOS_BUS_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL2].mif_freq);
		remove_qos(&dvfs->cpu_qos);
		SET_HMP(data, BOOSTER_DEVICE_TOUCH, false);

		DVFS_DEV_DBG(DBG_DVFS, data->dev, "%s : DVFS CHANGED [level %d]\n", __func__, dvfs->level);
	break;
	case BOOSTER_LEVEL4:
		DVFS_DEV_DBG(DBG_DVFS, data->dev, "%s : DVFS CHANGED [level %d]\n", __func__, dvfs->level);
	break;
	case BOOSTER_LEVEL5:
		if (dvfs->phase_excuted) {
			remove_qos(&dvfs->cpu_qos);
			remove_qos(&dvfs->kfc_qos);
			remove_qos(&dvfs->mif_qos);
			remove_qos(&dvfs->int_qos);
			SET_HMP(data, BOOSTER_DEVICE_TOUCH, false);
			dvfs->lock_status = false;
			dvfs->phase_excuted = false;

			DVFS_DEV_DBG(DBG_DVFS, data->dev, "%s : DVFS OFF\n", __func__);
		} else {
			set_qos(&dvfs->cpu_qos, PM_QOS_CPU_FREQ_MIN, dvfs->freqs[BOOSTER_LEVEL5].cpu_freq);
			schedule_delayed_work(&dvfs->dvfs_chg_work,
				msecs_to_jiffies(dvfs->times[BOOSTER_LEVEL5].head_time - dvfs->times[BOOSTER_LEVEL5].phase_time));
			dvfs->phase_excuted = true;
			DVFS_DEV_DBG(DBG_DVFS, data->dev, "%s : DVFS CHANGED [level %d, start time[%d]]\n",
				__func__, dvfs->level, dvfs->times[BOOSTER_LEVEL5].head_time - dvfs->times[BOOSTER_LEVEL5].phase_time);
		}
	break;
	case BOOSTER_LEVEL9:
		SET_HMP(data, BOOSTER_DEVICE_TOUCH, dvfs->freqs[BOOSTER_LEVEL9_CHG].hmp_boost);
		set_qos(&dvfs->kfc_qos, PM_QOS_KFC_FREQ_MIN, dvfs->freqs[BOOSTER_LEVEL9_CHG].kfc_freq);
		set_qos(&dvfs->int_qos, PM_QOS_DEVICE_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL9_CHG].int_freq);
		set_qos(&dvfs->cpu_qos, PM_QOS_CPU_FREQ_MIN, dvfs->freqs[BOOSTER_LEVEL9_CHG].cpu_freq);
		set_qos(&dvfs->mif_qos, PM_QOS_BUS_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL9_CHG].mif_freq);

		DVFS_DEV_DBG(DBG_DVFS, data->dev, "%s : DVFS CHANGED [level %d]\n", __func__, dvfs->level);
	break;
	default:
		dev_err(data->dev, "%s : Undefined type passed[%d]\n", __func__, dvfs->level);
	break;
	}

	if (!dvfs->lock_status)
		dvfs->phase_excuted = false;

	mutex_unlock(&dvfs->lock);
}
DECLARE_DVFS_WORK_FUNC(SET, TOUCH)
{
	struct input_booster *data = (struct input_booster *)booster_data;
	struct booster_dvfs *dvfs = data->dvfses[BOOSTER_DEVICE_TOUCH];

	if (!dvfs || !dvfs->initialized) {
		dev_err(data->dev, "%s: Dvfs is not initialized\n",	__func__);
		return;
	}

	mutex_lock(&dvfs->lock);

	if (!dvfs->level) {
		dev_err(data->dev, "%s : Skip to set booster due to level 0\n", __func__);
		goto out;
	}

	switch (booster_mode) {
	case BOOSTER_MODE_ON:
		cancel_delayed_work(&dvfs->dvfs_off_work);
		cancel_delayed_work(&dvfs->dvfs_chg_work);
		switch (dvfs->level) {
		case BOOSTER_LEVEL1:
		case BOOSTER_LEVEL2:
			SET_HMP(data, BOOSTER_DEVICE_TOUCH, dvfs->freqs[BOOSTER_LEVEL1].hmp_boost);
			set_qos(&dvfs->kfc_qos, PM_QOS_KFC_FREQ_MIN, dvfs->freqs[BOOSTER_LEVEL1].kfc_freq);
			set_qos(&dvfs->int_qos, PM_QOS_DEVICE_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL1].int_freq);
			set_qos(&dvfs->cpu_qos, PM_QOS_CPU_FREQ_MIN, dvfs->freqs[BOOSTER_LEVEL1].cpu_freq);
			set_qos(&dvfs->mif_qos, PM_QOS_BUS_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL1].mif_freq);
		break;
		case BOOSTER_LEVEL3:
			set_qos(&dvfs->kfc_qos, PM_QOS_KFC_FREQ_MIN, dvfs->freqs[BOOSTER_LEVEL3].kfc_freq);
			set_qos(&dvfs->int_qos, PM_QOS_DEVICE_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL3].int_freq);
			set_qos(&dvfs->mif_qos, PM_QOS_BUS_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL3].mif_freq);
			remove_qos(&dvfs->cpu_qos);
			SET_HMP(data, BOOSTER_DEVICE_TOUCH, dvfs->freqs[BOOSTER_LEVEL3].hmp_boost);
		break;
		case BOOSTER_LEVEL4:
			set_qos(&dvfs->kfc_qos, PM_QOS_KFC_FREQ_MIN, dvfs->freqs[BOOSTER_LEVEL4].kfc_freq);
			set_qos(&dvfs->int_qos, PM_QOS_DEVICE_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL4].int_freq);
			set_qos(&dvfs->mif_qos, PM_QOS_BUS_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL4].mif_freq);
			remove_qos(&dvfs->cpu_qos);
			SET_HMP(data, BOOSTER_DEVICE_TOUCH, dvfs->freqs[BOOSTER_LEVEL4].hmp_boost);
		break;
		case BOOSTER_LEVEL5:
			SET_HMP(data, BOOSTER_DEVICE_TOUCH, dvfs->freqs[BOOSTER_LEVEL5].hmp_boost);
			set_qos(&dvfs->kfc_qos, PM_QOS_KFC_FREQ_MIN, dvfs->freqs[BOOSTER_LEVEL5].kfc_freq);
			set_qos(&dvfs->int_qos, PM_QOS_DEVICE_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL5].int_freq);
			set_qos(&dvfs->mif_qos, PM_QOS_BUS_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL5].mif_freq);
		break;
		case BOOSTER_LEVEL9:
			SET_HMP(data, BOOSTER_DEVICE_TOUCH, dvfs->freqs[BOOSTER_LEVEL9].hmp_boost);
			set_qos(&dvfs->kfc_qos, PM_QOS_KFC_FREQ_MIN, dvfs->freqs[BOOSTER_LEVEL9].kfc_freq);
			set_qos(&dvfs->int_qos, PM_QOS_DEVICE_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL9].int_freq);
			set_qos(&dvfs->cpu_qos, PM_QOS_CPU_FREQ_MIN, dvfs->freqs[BOOSTER_LEVEL9].cpu_freq);
			set_qos(&dvfs->mif_qos, PM_QOS_BUS_THROUGHPUT, dvfs->freqs[BOOSTER_LEVEL9].mif_freq);
		break;
		default:
			dev_err(data->dev, "%s : Undefined type passed[%d]\n", __func__, dvfs->level);
		break;
		}

		if (dvfs->times[dvfs->level].phase_time) {
			schedule_delayed_work(&dvfs->dvfs_chg_work,
								msecs_to_jiffies(dvfs->times[dvfs->level].phase_time));
			if (dvfs->phase_excuted)
				dvfs->phase_excuted = false;
		} else {
			schedule_delayed_work(&dvfs->dvfs_chg_work,
								msecs_to_jiffies(dvfs->times[dvfs->level].head_time));
		}
		dvfs->lock_status = true;

		DVFS_DEV_DBG(DBG_DVFS, data->dev, "%s : DVFS ON [level %d][start time [%d]]\n",
			__func__, dvfs->level, dvfs->times[dvfs->level].phase_time ?: dvfs->times[dvfs->level].head_time);

	break;
	case BOOSTER_MODE_OFF:
		if (dvfs->lock_status)
			schedule_delayed_work(&dvfs->dvfs_off_work,
						msecs_to_jiffies(dvfs->times[dvfs->level].tail_time));
	break;
	case BOOSTER_MODE_FORCE_OFF:
		if (dvfs->lock_status) {
			cancel_delayed_work(&dvfs->dvfs_chg_work);
			cancel_delayed_work(&dvfs->dvfs_off_work);
			schedule_work(&dvfs->dvfs_off_work.work);
		}
	break;
	default:
	break;
	}

out:
	mutex_unlock(&dvfs->lock);
	return;
}