/** **************************************************************************
 * check-request-limits SAF
 *
 * See top of this file for the pb parameters this SAF can consume and the
 * defaults for each.
 *
 * This SAF counts requests per interval for the given monitor/bucket. If
 * the request/sec in an interval exceeds the max-rps given then it returns
 * REQ_ABORTED for this and all subsequent matching requests in the current
 * interval. After the next interval request rate recomputation the request
 * limiting may be discontinued if the conditions of 'continue' are met.
 *
 * Separately, concurrent requests for the same bucket may be limited to
 * the given number if max-connections is given.
 *
 * On the next request after purge_timeout, a purge sweep of the buckets is
 * done, deleting any entries which have not seen any recomputes in the 
 * purge interval (unless timeout disabled by setting it to zero).
 *
 * For more information, refer to the WS7.0 security functional spec at:
 * http://sac.eng/arc/WSARC/2004/076/
 *
 * For params: http://docs.sun.com/source/817-1835-10/npgmysaf.html#wp14889
 * For returns: http://docs.sun.com/source/817-1835-10/npgmysaf.html#wp14969
 *
 * This returns:
 *      REQ_NOACTION: If the request can go on.
 *      REQ_ABORTED: If the request has hit limits and is not to be processed.
 *          Return code is set to 'error' param (default 503).
 *
 */
int check_request_limits(pblock *pb, Session *sn, Request *rq)
{
    const char * param;
    int response = REQ_NOACTION;

    if (rq->rq_attr.req_restarted) {
        // Do not count restarted requests as new requests for the
        // purpose of reqlimit accounting as it is just one client
        // request.  (This is particularly important for
        // max-connections since processing restarts would cause the
        // conc counter to increase more than once for a given request
        // but only decrease once at the end).
        return response;
    }

    time_t time_now = rq->req_start;
    assert (time_now != NULL);

    // Get max-rps
    
    int max_rps = 0;
    param = pblock_findval(MAXRPS_PARAM, pb);
    if (param) {
        max_rps = atoi(param);
    }

    // Get max-connections

    int conc = 0;
    param = pblock_findval(CONC_PARAM, pb);
    if (param) {
        conc = atoi(param);
    }

    // We must have at least max-rps or max-connections, otherwise can't
    // do anything meaningful here

    if (!max_rps && !conc) {
        log_error(LOG_MISCONFIG, "check-request-limits", sn, rq,
                  XP_GetAdminStr(DBT_reqlimitCantWork));
        return response;
    }

    // Decide bucket name; if none, we use the anonymous bucket anon_bucket

    bucket_info * bucket = NULL;
    const char * bucket_name = pblock_findval(MONITOR_PARAM, pb);
    if (!bucket_name) {
        bucket = &anon_bucket;
    }

    // interval (in seconds), or use default

    int interval = DEFAULT_INTERVAL;
    param = pblock_findval(INTERVAL_PARAM, pb);
    if (param) {
        interval = atoi(param);
    }

    // Check continue, or use default
    int cont = DEFAULT_CONTINUE;
    param = pblock_findval(CONTINUE_PARAM, pb);
    if (param) {
        if (!strcmp(CONT_THRESHOLD_VAL, param)) { cont = CONT_THRESHOLD; }
        else if (!strcmp(CONT_SILENCE_VAL, param)) { cont = CONT_SILENCE; }
        else {
            // Log config error but continue since we have default
            log_error(LOG_MISCONFIG, "check-request-limits", sn, rq,
                      XP_GetAdminStr(DBT_reqlimitBadContinue));
        }
    }

    //----- START_CRIT ------------------------------

    crit_enter(reqlimit_crit);

    if (purge_timeout && (time_now > next_timeout)) { // run purge if needed
        handle_purge_timeout(time_now);
    }

    // If using anon bucket we already have reference to it, otherwise need
    // to go find it in hashtable (and if not found, create one)

    if (!bucket) {

        bucket = (bucket_info *)PL_HashTableLookup(hashtable, bucket_name);

        if (!bucket) {
            // Need to create new entry for this one
            log_error(LOG_VERBOSE, "check-request-limits", sn, rq,
                      "creating new entry for [%s]", bucket_name);
            bucket = (bucket_info *)PERM_MALLOC(sizeof(bucket_info));
            bucket->count = 1;
            bucket->time = time_now + interval;
            bucket->state = REQ_NOACTION;
            bucket->conc = 0;   // handle conc case on initial?
            PL_HashTableAdd(hashtable, 
                            (const void *)PERM_STRDUP(bucket_name), 
                            (void *)bucket);
            // Since it is the first request, no need to check more
            crit_exit(reqlimit_crit);
            return response;
        }
    }

    // If we are doing max-rps limiting then handle it otherwise don't bother

    if (max_rps) {

        bucket->count++;

        if (time_now > bucket->time) { 
            // Interval or more has passed, time to recompute and recheck

            int time_interval = time_now - bucket->time + interval;
            int rps = bucket->count / time_interval;

            log_error(LOG_VERBOSE, "check-request-limits", sn, rq,
                      "bucket [%s] %d req/s (%d req in %d sec)",
                      bucket_name ? bucket_name: "", 
                      rps, bucket->count, time_interval);

            if (rps > max_rps) {
                // Start limiting
                bucket->state = REQ_ABORTED;
                log_error(LOG_WARN,  "check-request-limits", sn, rq,
                          XP_GetAdminStr(DBT_reqlimitAboveMaxRPS),
                          rps, max_rps,
                          bucket_name ? bucket_name: "");

            } else {
                // Reset state if we're under threshhold or if this is first 
                // hit (which means an interval with zero hits has already 
                // passed)
                if ((cont == CONT_THRESHOLD) || (bucket->count == 1)) {
                    bucket->state = REQ_NOACTION;
                }
            }

            // Prepare for next interval by resetting count and recompute time
            bucket->count = 0;
            bucket->time = time_now + interval;
        }

        response = bucket->state;
    }

    // If decision to reject already done, no need to check or increase conc
    // since this is not getting processed anyway. Otherwise, do it if needed.
 
    if (conc && response != REQ_ABORTED) {

        if (bucket->conc >= conc) {
            // Note that this reject is based on conditions at this instant
            // instead of over an interval, so is independent of bucket->state
            response = REQ_ABORTED;

        } else {
            bucket->conc++;
            // This queues up a call to fn associated with req_cleanup
            // (here, reqlimit_conc_done) to be called after request is done
            request_set_data(rq, req_cleanup, bucket);
        }
    }

    crit_exit(reqlimit_crit);

    //----- END_CRIT ------------------------------

    if (response == REQ_NOACTION) {
        return REQ_NOACTION;
    }

    // abort this request

    int err = DEFAULT_ERROR;
    param = pblock_findval(ERROR_PARAM, pb);
    if (param) {
        err = atoi(param);
    }
    protocol_status(sn, rq, err, NULL);

    log_error(LOG_VERBOSE, "check-request-limits", sn, rq,
              "Rejecting request matching bucket [%s] with status %d",
              bucket_name ? bucket_name: "", err);
              
    return response;
}
static void
do_server_request(struct server *sv, int connfd)
{
	//printf("Starting cache_size:%d\n",cache_size);
	int ret;
	struct request *rq;
	struct file_data *data;

	data = file_data_init();

	/* fills data->file_name with name of the file being requested */
	rq = request_init(connfd, data);
	if (!rq) {
		file_data_free(data);
		return;
	}

	cache* temp;
	int hash_index = hash(data->file_name);

	pthread_mutex_lock(&lock2);
	temp = cache_lookup(data->file_name, hash_index);
	pthread_mutex_unlock(&lock2);



	if (temp != NULL)
	{
		request_set_data(rq, temp->data);	

	}


	else
	{
		ret = request_readfile(rq);

		//printf("Request file size is %d\n", data->file_size);
		if ((data->file_size)>(sv->max_cache_size))
		{
			request_sendfile(rq);
			goto out;
		}

		pthread_mutex_lock(&lock2);
		if (cache_lookup(data->file_name, hash_index)==NULL) // if it won the race, then cache
		{
			if (((sv->max_cache_size)-cache_size) < (data->file_size)) // check available cache size
			{
				//printf("Evicting for file size(minimum) of : %d\n",data->file_size);
				int amount = (data->file_size)-((sv->max_cache_size)-cache_size);
				cache_evict(amount);
			}
			cache_insert(data, hash_index);
		}
		pthread_mutex_unlock(&lock2);
	}

	if (!ret)
		goto out;
	/* sends file to client */
	request_sendfile(rq);
out:
	//printf("cache_size:%d\n",cache_size);
	//printf("END OF REQUEST----------------\n");
	request_destroy(rq);
	//file_data_free(data);
}