コード例 #1
0
//////////////////////////////////////////////////////////////////////////
// II 完全加解密
// 自动扩充到block的整数倍
// 需要申请一个扩充的数组
char* CRijndael_Utils::FullEncryptAES(char* pBuffer, size_t size, size_t& outLen)
{	
	// 加密
	if (AES == NULL) return NULL;

	outLen = padded_size(size);
	size_t padding_bytes = outLen - size;

	char *tmp;
	char pad = (padding_bytes % DEFAULT_BLOCK_SIZE);

	tmp = new char[outLen];
	memcpy(tmp, pBuffer, size);
	memset(tmp + size, pad, padding_bytes);
	AES->Encrypt(tmp, tmp, outLen);
	return tmp;
}
コード例 #2
0
ファイル: part2.cpp プロジェクト: tim36272/Fourier-Operations
int main(int argc, char* argv[]) {
	//if OpenGL is enabled open an OpenGL window, otherwise just a regular window
	cv::namedWindow("magnitude",cv::WINDOW_AUTOSIZE);
	if(argc != 2) {
		std::cout<<"Usage: ./part1.cpp image"<<std::endl;
		return 0;
	}

	cv::Mat input_image = cv::imread(argv[1],CV_LOAD_IMAGE_GRAYSCALE);

	if(input_image.empty()) {
		char appended[100];
		appended[0] = '\0';
		strcat(appended,"../");
		strcat(appended,argv[1]);
		cv::Mat input_image = cv::imread(appended,CV_LOAD_IMAGE_GRAYSCALE);
		if(input_image.empty()) {
			std::cout<<argv[1]<<" was invalid"<<std::endl;
			std::cout<<" also tried: "<<appended<<std::endl;
			return 0;
		}
	}

	//the image dimensions must be a power of two
	cv::Mat padded_image;
	cv::Size padded_size(
			cv::getOptimalDFTSize(input_image.cols),
			cv::getOptimalDFTSize(input_image.rows));

	//pad the input image
	cv::copyMakeBorder(input_image, //input image
			padded_image, //output image
			0, //pad the top with..
			padded_size.height-input_image.rows, //pad the bottom with...
			0, //pad the left with...
			padded_size.width-input_image.cols, //pad the right with...
			cv::BORDER_CONSTANT, //make the border constant (as opposed to a copy of the data
			cv::Scalar::all(0)); //make the border black

	/*
	 * The DFT function expects a two-channel image, so let's make two planes
	 * and then merge them
	 */

	cv::Mat planes[] = {cv::Mat_<float>(padded_image),cv::Mat::zeros(padded_size,CV_32F)};

	//now make a single complex (two-channel) image
	cv::Mat complex_image;
	cv::merge(planes,2,complex_image);

	//get the dft of the image
	cv::dft(complex_image,complex_image);

	//generate the first mask
	cv::Mat first_mask(padded_size,complex_image.type(),cv::Scalar::all(0));

	//get roi in the center of the mask
	cv::Rect first_roi(first_mask.cols/2-1,first_mask.rows/2-1,3,3);
	std::cout<<"roi: "<<first_roi<<std::endl;
	cv::Mat first_mask_center(first_mask,first_roi);
	first_mask_center.at<cv::Vec2f>(0,0)[0] = -1;
	first_mask_center.at<cv::Vec2f>(1,0)[0] = -2;
	first_mask_center.at<cv::Vec2f>(2,0)[0] = -1;
	first_mask_center.at<cv::Vec2f>(0,2)[0] = 1;
	first_mask_center.at<cv::Vec2f>(1,2)[0] = 2;
	first_mask_center.at<cv::Vec2f>(2,2)[0] = 1;

	//transform the mask to the fourier domain
	cv::dft(first_mask,first_mask);

	//filter the image
	cv::Mat first_filtered_image = multiplyInTimeDomain(complex_image,first_mask);

	//generate the second mask
	cv::Mat second_mask(padded_size,complex_image.type(),cv::Scalar::all(0));

	//get roi in the center of the mask
	cv::Rect second_roi(second_mask.cols/2-1,second_mask.rows/2-1,3,3);
	std::cout<<"roi: "<<second_roi<<std::endl;
	cv::Mat second_mask_center(second_mask,second_roi);
	second_mask_center.at<cv::Vec2f>(0,0)[0] = -1;
	second_mask_center.at<cv::Vec2f>(0,1)[0] = -2;
	second_mask_center.at<cv::Vec2f>(0,2)[0] = -1;
	second_mask_center.at<cv::Vec2f>(2,0)[0] = 1;
	second_mask_center.at<cv::Vec2f>(2,1)[0] = 2;
	second_mask_center.at<cv::Vec2f>(2,2)[0] = 1;
	std::cout<<second_mask_center<<std::endl;

	//transform the mask to the fourier domain
	cv::dft(second_mask,second_mask);

	//filter the image
	cv::Mat second_filtered_image = multiplyInTimeDomain(complex_image,second_mask);

	//convert filtered image back to spatial domain
    cv::dft(second_filtered_image,second_filtered_image,cv::DFT_INVERSE|cv::DFT_REAL_OUTPUT);
    cv::dft(first_filtered_image,first_filtered_image,cv::DFT_INVERSE|cv::DFT_REAL_OUTPUT);

	rearrangeQuadrants(&second_filtered_image);
	rearrangeQuadrants(&first_filtered_image);

    //normalize(first_filtered_image, first_filtered_image, 0, 1, CV_MINMAX);
    //normalize(second_filtered_image, second_filtered_image, 0, 1, CV_MINMAX);

    //compute magnitude

    cv::Mat result(first_filtered_image.rows,first_filtered_image.cols,CV_32FC1,cv::Scalar::all(0));
	for(int y=0;y<result.rows;y++) {
		for(int x=0;x<result.cols;x++) {
			float first = first_filtered_image.at<float>(y,x);
			float second = second_filtered_image.at<float>(y,x);
			result.at<float>(y,x) = sqrt(first*first+second*second);
		}
	}

    normalize(result, result, 0, 1, CV_MINMAX);
    cv::imshow("first filtered image",first_filtered_image);
    cv::imshow("second filtered image",second_filtered_image);
    cv::imshow("result",result);


    //show opencv sobel
    cv::Sobel(input_image,input_image,-1,1,0);
    imshow("opencv sobel",input_image);
   cv::waitKey(0);
    return 0;
}
コード例 #3
0
static void data_path_tx_func(unsigned long arg)
{
	struct data_path *dp = (struct data_path *)arg;
	struct shm_rbctl *rbctl = dp->rbctl;
	struct shm_skctl *skctl = rbctl->skctl_va;
	struct shm_psd_skhdr *skhdr;
	struct sk_buff *packet;
	int slot = 0;
	int pending_slot;
	int free_slots;
	int prio;
	int remain_bytes;
	int used_bytes;
	int consumed_slot = 0;
	int consumed_packets = 0;
	int start_q_len;
	int max_tx_shots = dp->max_tx_shots;

	pending_slot = -1;
	remain_bytes = rbctl->tx_skbuf_size - sizeof(struct shm_psd_skhdr);
	used_bytes = 0;

	start_q_len = tx_q_length(dp);

	dp->stat.tx_sched_cnt++;

	while (consumed_slot < max_tx_shots) {
		if (!cp_is_synced) {
			tx_q_clean(dp);
			break;
		}

		free_slots = shm_free_tx_skbuf(rbctl);
		if (free_slots == 0) {
			/*
			 * notify cp only if we still have packets in queue
			 * otherwise, simply break
			 * also check current fc status, if tx_stopped is
			 * already sent to cp, do not try to interrupt cp again
			 * it is useless, and just make cp busier
			 * BTW:
			 * this may have race condition here, but as cp side
			 * have a watermark for resume interrupt,
			 * we can assume it is safe
			 */
			if (tx_q_length(dp) && !rbctl->is_ap_xmit_stopped) {
				shm_notify_ap_tx_stopped(rbctl);
				acipc_notify_ap_psd_tx_stopped();
			}
			break;
		} else if (free_slots == 1 && pending_slot != -1) {
			/*
			 * the only left slot is our pending slot
			 * check if we still have enough space in this
			 * pending slot
			 */
			packet = tx_q_peek(dp, NULL);
			if (!packet)
				break;

			/* packet is too large, notify cp and break */
			if (padded_size(packet->len) > remain_bytes &&
				!rbctl->is_ap_xmit_stopped) {
				shm_notify_ap_tx_stopped(rbctl);
				acipc_notify_ap_psd_tx_stopped();
				break;
			}
		}

		packet = tx_q_dequeue(dp, &prio);

		if (!packet)
			break;

		/* push to ring buffer */

		/* we have one slot pending */
		if (pending_slot != -1) {
			/*
			 * the packet is too large for the pending slot
			 * send out the pending slot firstly
			 */
			if (padded_size(packet->len) > remain_bytes) {
				shm_flush_dcache(rbctl,
						SHM_PACKET_PTR(rbctl->tx_va,
							pending_slot,
							rbctl->tx_skbuf_size),
						used_bytes + sizeof(struct shm_psd_skhdr));
				skctl->ap_wptr = pending_slot;
				pending_slot = -1;
				consumed_slot++;
				dp->stat.tx_slots++;
				dp->stat.tx_free_bytes += remain_bytes;
				dp->stat.tx_used_bytes += used_bytes;
			} else
				slot = pending_slot;
		}

		/*
		 * each priority has one hard limit to guarantee higher priority
		 * packet is not affected by lower priority packet
		 * if we reach this limit, we can only send higher priority
		 * packets
		 * but in the other hand, if this packet can be filled into our
		 * pending slot, allow it anyway
		 */
		if (!has_enough_free_tx_slot(dp, free_slots, prio) &&
			((pending_slot == -1) || !dp->enable_piggyback)) {
			/* push back the packets and schedule delayed tx */
			tx_q_queue_head(dp, packet, prio);
			__data_path_schedule_tx(dp, true);
			dp->stat.tx_force_sched_cnt++;
			break;
		}

		/* get a new slot from ring buffer */
		if (pending_slot == -1) {
			slot = shm_get_next_tx_slot(dp->rbctl, skctl->ap_wptr);

			remain_bytes =
				rbctl->tx_skbuf_size
				- sizeof(struct shm_psd_skhdr);
			used_bytes = 0;

			pending_slot = slot;
		}

		consumed_packets++;

		dp->stat.tx_packets[prio]++;
		dp->stat.tx_bytes += packet->len;

		skhdr = (struct shm_psd_skhdr *)
			SHM_PACKET_PTR(rbctl->tx_va,
				slot,
				rbctl->tx_skbuf_size);

		/* we are sure our remains is enough for current packet */
		skhdr->length = used_bytes + padded_size(packet->len);
		memcpy((unsigned char *)(skhdr + 1) + used_bytes,
			packet->data, packet->len);

		used_bytes += padded_size(packet->len);
		remain_bytes -= padded_size(packet->len);

		trace_psd_xmit(packet, slot);

		dp->stat.tx_packets_delay[prio] +=
			ktime_to_ns(net_timedelta(skb_get_ktime(packet)));

		dev_kfree_skb_any(packet);
	}

	/* send out the pending slot */
	if (pending_slot != -1) {
		shm_flush_dcache(rbctl, SHM_PACKET_PTR(rbctl->tx_va,
				pending_slot,
				rbctl->tx_skbuf_size),
			used_bytes + sizeof(struct shm_psd_skhdr));
		skctl->ap_wptr = pending_slot;
		pending_slot = -1;
		consumed_slot++;
		dp->stat.tx_slots++;
		dp->stat.tx_free_bytes += remain_bytes;
		dp->stat.tx_used_bytes += used_bytes;
	}

	if (consumed_slot > 0) {
		trace_psd_xmit_irq(consumed_slot);
		acipc_notify_psd_packet_sent();
		dp->stat.tx_interrupts++;
		dp->stat.tx_sched_q_len += start_q_len;
	}

	if (consumed_slot >= max_tx_shots) {
		data_path_schedule_tx(dp);
		dp->stat.tx_resched_cnt++;
	}

	/*
	 * ring buffer is stopped, just notify upper layer
	 * do not need to check is_tx_stopped here, as we need to handle
	 * following situation:
	 * a new on-demand PDP is activated after tx_stop is called
	 */
	if (rbctl->is_ap_xmit_stopped) {
		if (!dp->is_tx_stopped)
			pr_err("%s tx stop\n", __func__);

		dp->is_tx_stopped = true;

		/* notify upper layer tx stopped */
		if (dp->cbs->tx_stop)
			dp->cbs->tx_stop();

		/* reschedule tx to polling the ring buffer */
		if (tx_q_length(dp))
			__data_path_schedule_tx(dp, true);
	}

	/*
	 * ring buffer is resumed and the remain packets
	 * in queue is also sent out
	 */
	if (!rbctl->is_ap_xmit_stopped && dp->is_tx_stopped
		&& tx_q_length(dp) == 0) {
		pr_err("%s tx resume\n", __func__);

		/* notify upper layer tx resumed */
		if (dp->cbs->tx_resume)
			dp->cbs->tx_resume();

		dp->is_tx_stopped = false;
	}
}
コード例 #4
0
ファイル: changeset.hpp プロジェクト: 7890/osrm-backend
 const_iterator cend() const {
     return const_iterator(data() + padded_size());
 }
コード例 #5
0
ファイル: changeset.hpp プロジェクト: 7890/osrm-backend
 iterator end() {
     return iterator(data() + padded_size());
 }