void UdpClient::do_link_auth_req( char *p_ip, unsigned short us_port )
{
    MSAuthReq_S *p_auth_req = new MSAuthReq_S();
    p_auth_req->magic_type = MSG_HEAD_MAGIC;
    p_auth_req->msg_type = IR_MONITOR_LINK_AUTH_REQ;
    p_auth_req->seq_id = 0;
    p_auth_req->total_len = sizeof( MSAuthReq_S );

    UINT32 ui_sec = ::time(NULL);
    std::string str_time_of_day;
    GetLocalTimeDay( ui_sec, str_time_of_day );
    std::string str_user_name = MSADMIN_USER;
    std::string str_digest =  str_user_name + str_time_of_day;

    UINT8 auc_digest_digest[16] = {0};
    UINT8 auc_degist_tmp[16] = {0};

    const int i_str_len = 128;
    INT8 *pc_str_tmp = new INT8[i_str_len]();
    memset( pc_str_tmp, 0, i_str_len );
    snprintf( pc_str_tmp, i_str_len, "%s", str_digest.c_str() );
    get_md5( pc_str_tmp, strlen( pc_str_tmp), NULL, auc_digest_digest );

    INT8  ac_md5[33] = {0};
    INT32  i_rand_tmp=0;
    UINT32 ui_time_stamp_tmp=0;

    Md5Val_S st_md5;
    memcpy(&st_md5, auc_digest_digest, sizeof(st_md5));
    snprintf(ac_md5, sizeof(ac_md5), "%08x%08x%08x%08x", htonl(st_md5.n_a), htonl(st_md5.n_b), htonl(st_md5.n_c), htonl(st_md5.n_d));

    INT8  *pc_str = new INT8[i_str_len]();
    memset(pc_str, 0, i_str_len);
    snprintf(pc_str, i_str_len, "%s%s%urand=%d", str_user_name.c_str(), ac_md5, ui_time_stamp_tmp, i_rand_tmp);
    get_md5( pc_str, strlen(pc_str), NULL, auc_degist_tmp );

    p_auth_req->body.ui_client_type = CLIENT_MONITOR;
    p_auth_req->body.ui_time_stamp = ui_time_stamp_tmp;
    p_auth_req->body.ui_rand = i_rand_tmp;
    strcpy( p_auth_req->body.ac_login_id, str_user_name.c_str() );
    memcpy( p_auth_req->body.auc_digest, auc_degist_tmp, sizeof(auc_degist_tmp) );
    //send_packet( get_link(), p_auth_req, 1 );
    send_packet_to( mh_link, p_auth_req, p_ip, us_port, UDP_FLAG_NONE );

    delete p_auth_req;
    p_auth_req = NULL;
}
Exemple #2
0
//head insert
void insert (list * head, char *cm, FILE * f)
{
	list tmp=(list)malloc(sizeof(struct md5_file));
	tmp->name = malloc(sizeof(char)*strlen(cm)+1);
	tmp->name=cm;
	get_md5(tmp->hash,f);
	//strcpy(tmp->hash, md);
	tmp->next= *head;
	*head=tmp;
}
Exemple #3
0
void _get_md5_str (char *out_str, size_t outlen,
                   const uint8_t *input, int n)
{
        uint8_t out[MD5_DIGEST_LEN] = {0};
        int     j = 0;

        GF_ASSERT (outlen >= (2*MD5_DIGEST_LEN + 1));
        get_md5 (out, input, n);
        for (j = 0; j < MD5_DIGEST_LEN; j++)
                snprintf(out_str + j * 2, outlen-j*2, "%02x", out[j]);

}
Exemple #4
0
// program start
int _tmain(int argc, TCHAR** argv) {
	//SET_UNICODE_MODE;
	STL_SETLOCALE_JAPAN;

	kjm::optionInfoList opts;
	opts.push_back(kjm::optionInfo(_T("binary"), _T("b"), kjm::no_argument));
	opts.push_back(kjm::optionInfo(_T("check"), _T("c"), kjm::no_argument));
	opts.push_back(kjm::optionInfo(_T("text"), _T("t"), kjm::no_argument));
	opts.push_back(kjm::optionInfo(_T("output"), _T("o"), kjm::required_argument));
	opts.push_back(kjm::optionInfo(_T("quiet"), _T(""), kjm::no_argument));
	opts.push_back(kjm::optionInfo(_T("status"), _T(""), kjm::no_argument));
	opts.push_back(kjm::optionInfo(_T("warn"), _T("w"), kjm::no_argument));
	opts.push_back(kjm::optionInfo(_T("help"), _T(""), kjm::no_argument));
	opts.push_back(kjm::optionInfo(_T("version"), _T(""), kjm::no_argument));

	kjm::cmdLine cmd;
	cmd.parse(argc, argv, opts);

	int exit_code = 0;

	if (cmd.hasOption(_T("version"))) {
		version();	// バージョンを表示して正常終了。
	}

	if (cmd.hasOption(_T("help"))) {
		usage( EXIT_SUCCESS );	// 使用法を表示して正常終了。
	}

	for (int i = 0; i < cmd.get_argCount(); i++) {
		kjm::_tstrings files = kjm::directory::glob(cmd.get_argument(i));

		for (int j = 0; j < files.size(); j++) {
			if (cmd.hasOption(_T("check"))) {
				if (check_md5(files[j], cmd) != 0) {
					exit_code = 1;
				}

				if (exit_code != 0 && cmd.hasOption(_T("status")) != false) {
					break;
				}
			} else {
				get_md5(files[j], cmd);
			}
		}
	}

	return exit_code;
}
Exemple #5
0
static int send_passwd(EAP_FRAME *requestframe)
{
    u_char str[MAX_MD5_STR];
    int len;

    EAP_MD5_VALUE *final_key = (EAP_MD5_VALUE*)(Eap_response_md5->ExtenData.Data);
    EAP_MD5_VALUE *attach_key = (EAP_MD5_VALUE*)requestframe->ExtenData.Data;
    printf("length of challenge value: %d\n",attach_key->Size);

    final_key->Size = attach_key->Size;

    memcpy(str,&requestframe->ExtenData.Id,sizeof(u_char));
    len = sizeof(u_char);

    memcpy(str+len,password,password_len);
    len += password_len;

    memcpy(str+len,attach_key->value,attach_key->Size);
    len += attach_key->Size;

    memcpy(final_key->value,get_md5(str,len),final_key->Size);


    memcpy(Eap_response_md5->DestMac,Nearest_mac,MAC_ADDRESS_LEN);

    Eap_response_md5->Length = username_len+EAP_EXTENDATA_BASE_LEN+sizeof(EAP_MD5_VALUE);
    swapbyte((u_char*)&Eap_response_md5->Length);

    Eap_response_md5->ExtenData.Length = username_len+EAP_EXTENDATA_BASE_LEN+sizeof(EAP_MD5_VALUE);
    swapbyte((u_char*)&Eap_response_md5->ExtenData.Length);

    Eap_response_md5->ExtenData.Id = requestframe->ExtenData.Id;

    memcpy(Eap_response_md5->ExtenData.Data+sizeof(EAP_MD5_VALUE),username,username_len);

    printf("size %d\n",sizeof(*Eap_response_md5));
    if (pcap_sendpacket(handle, (u_char*)Eap_response_md5, EAP_MESSAGE_LEN) != 0)
    {
        printf("Error Sending the packet: %s\n", pcap_geterr(handle));
        return ERROR;
    }
    printf("EAP-Response_MD5_Challenge sending...\n");
    //exit(0);
    return DONE;
}
Exemple #6
0
void WriteTask::draw() {
    SkString md5;
    {
        SkAutoLockPixels lock(fBitmap);
        md5 = fData ? get_md5(fData)
                    : get_md5(fBitmap.getPixels(), fBitmap.getSize());
    }

    SkASSERT(fSuffixes.count() > 0);
    SkString config = fSuffixes.back();
    SkString mode("direct");
    if (fSuffixes.count() > 1) {
        mode = fSuffixes.fromBack(1);
    }

    JsonData entry = { fBaseName, config, mode, fSourceType, md5 };
    {
        SkAutoMutexAcquire lock(&gJsonDataLock);
        gJsonData.push_back(entry);
    }

    SkString dir(FLAGS_writePath[0]);
#if defined(SK_BUILD_FOR_IOS)
    if (dir.equals("@")) {
        dir.set(FLAGS_resourcePath[0]);
    }
#endif
    this->makeDirOrFail(dir);

    SkString path;
    if (FLAGS_nameByHash) {
        // Flat directory of hash-named files.
        path = SkOSPath::Join(dir.c_str(), md5.c_str());
        path.append(fExtension);
        // We're content-addressed, so it's possible two threads race to write
        // this file.  We let the first one win.  This also means we won't
        // overwrite identical files from previous runs.
        if (sk_exists(path.c_str())) {
            return;
        }
    } else {
        // Nested by mode, config, etc.
        for (int i = 0; i < fSuffixes.count(); i++) {
            dir = SkOSPath::Join(dir.c_str(), fSuffixes[i].c_str());
            this->makeDirOrFail(dir);
        }
        path = SkOSPath::Join(dir.c_str(), fBaseName.c_str());
        path.append(fExtension);
        // The path is unique, so two threads can't both write to the same file.
        // If already present we overwrite here, since the content may have changed.
    }

    SkFILEWStream file(path.c_str());
    if (!file.isValid()) {
        return this->fail("Can't open file.");
    }

    bool ok = fData ? write_asset(fData, &file)
                    : SkImageEncoder::EncodeStream(&file, fBitmap, SkImageEncoder::kPNG_Type, 100);
    if (!ok) {
        return this->fail("Can't write to file.");
    }
}
Exemple #7
0
/**
 * Calculate and store into SUM a strong MD4 checksum of the file
 * blocks seen so far.
 *
 * In plain rsync, the checksum is perturbed by a seed value.  This is
 * used when retrying a failed transmission: we've discovered that the
 * hashes collided at some point, so we're going to try again with
 * different hashes to see if we can get it right.  (Check tridge's
 * thesis for details and to see if that's correct.)
 *
 * Since we can't retry a web transaction I'm not sure if it's very
 * useful in rproxy.
 */
void rs_calc_strong_sum(void const *buf, size_t len, rs_strong_sum_t *sum)
{
    //rs_mdfour((unsigned char *) sum, buf, len);
	get_md5((unsigned char *) sum, buf, len);
}
int main(int argc, char** argv)
{
    boost::log::trivial::severity_level log_level;
    boost::program_options::options_description desc("options");
    desc.add_options()
        ("help", "produce help message")
        ("topic", boost::program_options::value<std::string>(), "topic")
        ("broker", boost::program_options::value<std::string>(), "broker")
        ("partition", boost::program_options::value<std::string>(), "partition")
        ("key_schema_id", boost::program_options::value<std::string>(), "key_schema_id")
        ("write,w", boost::program_options::bool_switch()->default_value(false), "write to kafka")
        ("log_level", boost::program_options::value<boost::log::trivial::severity_level>(&log_level)->default_value(boost::log::trivial::info), "log level to output");
    ;

    boost::program_options::variables_map vm;
    try
    {
        boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), vm);
    }
    catch (std::exception& e)
    {
        std::cout << "bad command line: " << e.what() << std::endl;
        return 0;
    }

    boost::program_options::notify(vm);

    boost::log::core::get()->set_filter(boost::log::trivial::severity >= log_level);
    BOOST_LOG_TRIVIAL(info) << "loglevel " << log_level;

    if (vm.count("help"))
    {
        std::cout << desc << std::endl;
        return 0;
    }

    int32_t kafka_port = 9092;
    std::vector<csi::kafka::broker_address> brokers;
    if (vm.count("broker"))
    {
        std::string s = vm["broker"].as<std::string>();
        size_t last_colon = s.find_last_of(':');
        if (last_colon != std::string::npos)
            kafka_port = atoi(s.substr(last_colon + 1).c_str());
        s = s.substr(0, last_colon);

        // now find the brokers...
        size_t last_separator = s.find_last_of(',');
        while (last_separator != std::string::npos)
        {
            std::string host = s.substr(last_separator + 1);
            brokers.push_back(csi::kafka::broker_address(host, kafka_port));
            s = s.substr(0, last_separator);
            last_separator = s.find_last_of(',');
        }
        brokers.push_back(csi::kafka::broker_address(s, kafka_port));
    }
    else
    {
        std::cout << "--broker must be specified" << std::endl;
        return 0;
    }

    int32_t schema_registry_port = 8081;
    std::vector<csi::kafka::broker_address> schema_registrys;
    std::string used_schema_registry;

    std::string topic;
    if (vm.count("topic"))
    {
        topic = vm["topic"].as<std::string>();
    }
    else
    {
        std::cout << "--topic must be specified" << std::endl;
        return -1;
    }


    std::vector<int> key_schemas;
    bool delete_all = false;
    if (vm.count("key_schema_id"))
    {
        std::string s = vm["key_schema_id"].as<std::string>();

        //special case *
        if (s == "*")
        {
            delete_all = true;
        }
        else
        {
            // now find the brokers...
            size_t last_separator = s.find_last_of(',');
            while (last_separator != std::string::npos)
            {
                std::string token = s.substr(last_separator + 1);
                key_schemas.push_back(atoi(token.c_str()));
                s = s.substr(0, last_separator);
                last_separator = s.find_last_of(',');
            }
            key_schemas.push_back(atoi(s.c_str()));
        }
    }
    else
    {
        std::cout << "--key_schema_id must be specified" << std::endl;
        return 0;
    }

    std::vector<int> partition_mask;
    if (vm.count("partition"))
    {
        std::string s = vm["partition"].as<std::string>();

        //special case *
        if (s == "*")
        {
        }
        else
        {
            // now find the brokers...
            size_t last_separator = s.find_last_of(',');
            while (last_separator != std::string::npos)
            {
                std::string token = s.substr(last_separator + 1);
                partition_mask.push_back(atoi(token.c_str()));
                s = s.substr(0, last_separator);
                last_separator = s.find_last_of(',');
            }
            partition_mask.push_back(atoi(s.c_str()));
        }
    }

    bool dry_run = true;
    if (vm["write"].as<bool>())
        dry_run = false;

    boost::asio::io_service io_service;
    std::auto_ptr<boost::asio::io_service::work> work(new boost::asio::io_service::work(io_service));
    boost::thread bt(boost::bind(&boost::asio::io_service::run, &io_service));

    csi::kafka::highlevel_consumer consumer(io_service, topic, partition_mask, 500, 1000000);
    csi::kafka::highlevel_producer producer(io_service, topic, -1, 500, 1000000);

    consumer.connect(brokers);
    //std::vector<int64_t> result = consumer.get_offsets();

    consumer.connect_forever(brokers);

    {
        producer.connect(brokers);
        BOOST_LOG_TRIVIAL(info) << "connected to kafka";
        producer.connect_forever(brokers);
    }


    std::map<int, int64_t> highwater_mark_offset;
    consumer.set_offset(csi::kafka::latest_offsets);
    // this is assuming to much - what if anything goes wrong...
    auto r = consumer.fetch();
    for (std::vector<csi::kafka::rpc_result<csi::kafka::fetch_response>>::const_iterator i = r.begin(); i != r.end(); ++i)
    {
        if (i->ec)
            continue; // or die??

        for (std::vector<csi::kafka::fetch_response::topic_data>::const_iterator j = (*i)->topics.begin(); j != (*i)->topics.end(); ++j)
        {
            for (std::vector<std::shared_ptr<csi::kafka::fetch_response::topic_data::partition_data>>::const_iterator k = j->partitions.begin(); k != j->partitions.end(); ++k)
            {
                if ((*k)->error_code)
                    continue; // or die??

                highwater_mark_offset[(*k)->partition_id] = (*k)->highwater_mark_offset;
            }
        }
    }

    consumer.set_offset(csi::kafka::earliest_available_offset);

    std::map<int, int64_t> last_offset;
    int64_t _remaining_records = 1;

    std::map<boost::uuids::uuid, std::shared_ptr<csi::kafka::basic_message>> _to_delete;

    consumer.stream_async([delete_all, key_schemas, &last_offset, &highwater_mark_offset, &_remaining_records, &_to_delete](const boost::system::error_code& ec1, csi::kafka::error_codes ec2, std::shared_ptr<csi::kafka::fetch_response::topic_data::partition_data> response)
    {
        if (ec1 || ec2)
        {
            BOOST_LOG_TRIVIAL(error) << "stream failed ec1::" << ec1 << " ec2" << csi::kafka::to_string(ec2);
            return;
        }

        if (response->error_code)
        {
            BOOST_LOG_TRIVIAL(error) << "stream failed for partition: " << response->partition_id << " ec:" << csi::kafka::to_string((csi::kafka::error_codes) response->error_code);
            return;
        }
        int partition_id = response->partition_id;
        int64_t lo = -1;
        for (std::vector<std::shared_ptr<csi::kafka::basic_message>>::const_iterator i = response->messages.begin(); i != response->messages.end(); ++i)
        {
            if ((*i)->key.is_null())
            {
                //BOOST_LOG_TRIVIAL(warning) << "got key==NULL";
                continue;
            }

            if ((*i)->key.size() < 4)
            {
                BOOST_LOG_TRIVIAL(warning) << "got keysize==" << (*i)->key.size();
                continue;
            }

            int32_t be;
            memcpy(&be, (*i)->key.data(), 4);
            int32_t key_schema_id = boost::endian::big_to_native<int32_t>(be);

            // should we kill this schema?
            if (delete_all || std::find(std::begin(key_schemas), std::end(key_schemas), key_schema_id) != std::end(key_schemas))
            {
                boost::uuids::uuid key = get_md5((*i)->key.data(), (*i)->key.size());

                //not already dead
                if (!(*i)->value.is_null())
                {
                    std::map<boost::uuids::uuid, std::shared_ptr<csi::kafka::basic_message>>::iterator item = _to_delete.find(key);
                    if (item == _to_delete.end())
                    {
                        std::shared_ptr<csi::kafka::basic_message> msg(new csi::kafka::basic_message());
                        msg->key = (*i)->key;
                        msg->value.set_null(true);
                        msg->partition = partition_id; // make sure we write the same partion that we got the message from...
                        _to_delete[key] = msg;
                    }
                }
                else
                {
                    //we must search map to se if we should refrain from deleting the item since a deleete marker is already on kafka. (the previous messages has not yet been removed by compaction)
                    //std::map<boost::uuids::uuid, std::shared_ptr<csi::kafka::basic_message>>
                    std::map<boost::uuids::uuid, std::shared_ptr<csi::kafka::basic_message>>::iterator item = _to_delete.find(key);
                    if (item != _to_delete.end())
                    {
                        _to_delete.erase(item);
                    }
                }
            }
            lo = (*i)->offset;
        }
        if (lo >= 0)
            last_offset[partition_id] = lo;

        int64_t remaining_records = 0;
        for (std::map<int, int64_t>::const_iterator i = highwater_mark_offset.begin(); i != highwater_mark_offset.end(); ++i)
            remaining_records += ((int64_t)(i->second - 1)) - (int64_t)last_offset[i->first];
        _remaining_records = remaining_records;
    });

    while (true)
    {
        boost::this_thread::sleep(boost::posix_time::seconds(1));
        BOOST_LOG_TRIVIAL(info) << " to be deleted: " << _to_delete.size() << ", remaining: " << _remaining_records;
        if (_remaining_records <= 0)
            break;
    }
    BOOST_LOG_TRIVIAL(info) << "consumer finished";
    consumer.close();
    BOOST_LOG_TRIVIAL(info) << "consumer closed";

    BOOST_LOG_TRIVIAL(info) << "sending delete messages";

    std::vector<std::shared_ptr<csi::kafka::basic_message>> messages;
    for (std::map<boost::uuids::uuid, std::shared_ptr<csi::kafka::basic_message>>::iterator i = _to_delete.begin(); i != _to_delete.end(); ++i)
    {
        messages.push_back(i->second);
    }
    if (messages.size())
    {
        if (!dry_run)
        {
            producer.send_sync(messages);
        }
        else
        {
            std::cout << "dry run - should write " << messages.size() << " add -w to delete from kafka" << std::endl;
        }
    }
    else
    {
        std::cout << "uptodate - nothing to do" << std::endl;
    }
    BOOST_LOG_TRIVIAL(info) << "producer finished";
    producer.close();
    BOOST_LOG_TRIVIAL(info) << "producer closed";
    boost::this_thread::sleep(boost::posix_time::seconds(5));
    BOOST_LOG_TRIVIAL(info) << "done";
    work.reset();
    io_service.stop();
    return EXIT_SUCCESS;
}
void*
daemon_request_list (void *arg) {
    struct daemon_request   *r;
    char                    answer[512];
    DIR                     *dir;
    struct dirent           *entry;
    char                    entry_full_path[256];
    struct stat             entry_stat;
    char                    *key;

    /* OKAY, let's say all options/args are silently ignored */

    r = (struct daemon_request *)arg;
    if (!r)
        return NULL;

    dir = opendir (prefs->shared_folder);
    if (dir == NULL) {
        log_failure (log_file,
                    "daemon_request_list (): Unable to opendir %s",
                    prefs->shared_folder);
        return NULL;
    }
    // Browsing my own files
    for (entry = readdir (dir); entry != NULL; entry = readdir (dir)) {
        // Listing all regular files
        if (entry->d_type == DT_REG) {
            sprintf (entry_full_path,
                    "%s/%s",
                    prefs->shared_folder,
                    entry->d_name);
            if (stat (entry_full_path, &entry_stat) < 0) {
                log_failure (log_file,
                            "daemon_request_list (): can't stat file %s",
                            entry_full_path);
                continue;
            }

            key = get_md5 (entry_full_path);
            if (!key)
                continue;

            sprintf (answer, "file %s %s %d %s:%d\n",
                    entry->d_name,
                    key,
                    (int) entry_stat.st_size,
                    my_ip,
                    prefs->daemon_port);

            if (daemon_send (r->daemon, answer) < 0) {
                log_failure (log_file,
                    "daemon_request_list (): failed to send data to daemon");
            }

            free (key);
            key = NULL;
        }
    }

    if (closedir (dir) < 0) {
        log_failure (log_file,
                    "daemon_request_list () : can't close shared directory");
        return NULL;
    }

    return NULL;
}
Exemple #10
0
static void gpfs_calc_attr2(struct rsync_gpfs_attr *a, struct rsync_gpfs_attr2 *a2)
{
	get_md5(a2->md5_digest, (uchar *)a->buf, (int)a->size);
	a2->crc32 = crc32(0, (unsigned char *)a->buf, a->size);
	a2->next_similar = -1;
}