Example #1
0
static rc_t TableWriterSeq_WriteStatistics(TableWriterSeq const *cself, KMDataNode *node)
{
    pb_t pb;
    rc_t rc;
    KDataBuffer buf;
    
    rc = KDataBufferMake(&buf, 8 * sizeof(pb.stats[0]), cself->statsCount);
    if (rc) return rc;
    
    pb.stats = buf.base;
    pb.i = 0;
    rc = KVectorVisitU64(cself->stats, 0, stats_cb, &pb);
    if (rc == 0) {
        unsigned i;
        unsigned const n = cself->statsCount < 126 ? cself->statsCount : 126;
        uint64_t *const distance = buf.base;
        
        ksort(pb.stats, cself->statsCount, sizeof(pb.stats[0]), stats_cmp_count, NULL);
        ksort(pb.stats, n, sizeof(pb.stats[0]), stats_cmp_distance, NULL);
        for (i = 0; i != n; ++i) {
            distance[i] = pb.stats[i].distance;
        }
        rc = KMDataNodeWrite(node, distance, n * sizeof(distance[0]));
    }
    KDataBufferWhack(&buf);
    return rc;
}
Example #2
0
void ksort(int l, int h, int a[])
{
    if(h < l + 2)
        return;
    int e = h, p = l;
    while(l < h) {
        while(++l < e && a[l] <= a[p]);
        while(--h > p && a[h] >= a[p]);
        if(l < h)
            _swap_(a[l], a[h]);
    }
    _swap_(a[h], a[p]);
    ksort(p, h, a);
    ksort(l, e, a);
}
Example #3
0
static
rc_t rgn_read_complete_table( regions *rgn )
{
    rc_t rc;
    uint32_t rowcount = rgn->hdf5_regions.extents[ 0 ];
    uint32_t rowsize = sizeof( int32_t ) * RGN_COLUMN_COUNT;

    rgn->complete_table = malloc( rowcount * rowsize );
    if ( rgn->complete_table == NULL )
        rc = RC( rcExe, rcNoTarg, rcLoading, rcMemory, rcExhausted );
    else
    {
        rgn->table_index = malloc( sizeof( uint32_t ) * rowcount );
        if ( rgn->table_index == NULL )
        {
            free( rgn->complete_table );
            rgn->complete_table = NULL;
            rc = RC( rcExe, rcNoTarg, rcLoading, rcMemory, rcExhausted );
        }
        else
        {
            uint64_t n_read = 0;

            /* now let's read the whole table... */
            rc = array_file_read_dim2( &(rgn->hdf5_regions), 0, rgn->complete_table,
                                       rowcount, RGN_COLUMN_COUNT, &n_read );
            if ( rc == 0 )
            {
                uint32_t idx, first_spot_id;

                first_spot_id = rgn->complete_table[ pacbio_idx_spot_id ];
                if ( first_spot_id != 0 )
                {
                    /* in case the file we are loading is part of a multi-file submission */
                    for ( idx = 0; idx < rowcount; ++idx )
                        rgn->complete_table[ ( idx * RGN_COLUMN_COUNT ) + pacbio_idx_spot_id ] -= first_spot_id;
                }
                
                /* first let's fill the index, first with ascending row-id's */
                for ( idx = 0; idx < rowcount; ++idx )
                    rgn->table_index[ idx ] = idx;

                /* now sort the index-array by the content's spot-id's */
                ksort ( rgn->table_index, rowcount, sizeof( uint32_t ),
                        rgn_sort_callback, rgn );
                
                /* left here to print a debug-output of the sorted table-index */
                /*
                for ( idx = rowcount - 128; idx < rowcount; ++idx )
                    OUTMSG(( "idx[%i] = %i -> %i\n", 
                             idx, rgn->table_index[ idx ], 
                             rgn->complete_table[ rgn->table_index[ idx ] * RGN_COLUMN_COUNT ] ));
                */

                /* the table and the index is now ready to use... */
            }
        }
    }
    return rc;
}
Example #4
0
static
void cluster_mates(TMappingsData *const data)
{
    unsigned index[CG_MAPPINGS_MAX];
    unsigned i;
    unsigned j;
    
    for (i = 0; i != data->map_qty; ++i)
        index[i] = i;
    
    ksort(index, data->map_qty, sizeof(index[0]), clustering_sort_cb, data);
    for (i = 0, j = 1; j != data->map_qty; ++j) {
        unsigned const ii = index[i];
        unsigned const ij = index[j];
        TMappingsData_map *const a = &data->map[ij];
        TMappingsData_map const *const b = &data->map[ii];
        
        if (check_in_cluster(a, b)) {
            unsigned const a_mate = a->mate;
            unsigned const b_mate = b->mate;
            
            if (   a_mate == ij /** remove singletons **/
                || a_mate == b_mate) /** or cluster originator has the same mate **/
            {
                a->saved = true;
                DEBUG_MSG(10, ("mapping %u was dropped as a part of cluster at mapping %u\n", ij, ii));
            }
        }
        else
            i = j;
    }
}
Example #5
0
int main()
{
    freopen("1018_sort.txt","r",stdin);
    scanf("%d", &N);
    for(int i=0; i<N; i++){
        scanf("%d",&a[i]);
    }
    ksort(0, N, a);
    print();
    return 0;
}
Example #6
0
/*
 * smb_fsacl_to_vsa
 *
 * Converts given acl_t structure to a vsecattr_t structure.
 *
 * IMPORTANT:
 * Upon successful return the memory allocated for vsa_aclentp
 * should be freed by calling kmem_free(). The size is returned
 * in aclbsize.
 */
int
smb_fsacl_to_vsa(acl_t *acl_info, vsecattr_t *vsecattr, int *aclbsize)
{
	int		error = 0;
	int		numacls;
	aclent_t	*aclp;

	ASSERT(acl_info);
	ASSERT(vsecattr);
	ASSERT(aclbsize);

	bzero(vsecattr, sizeof (vsecattr_t));
	*aclbsize = 0;

	switch (acl_info->acl_type) {
	case ACLENT_T:
		numacls = acl_info->acl_cnt;
		/*
		 * Minimum ACL size is three entries so might as well
		 * bail out here.  Also limit request size to prevent user
		 * from allocating too much kernel memory.  Maximum size
		 * is MAX_ACL_ENTRIES for the ACL part and MAX_ACL_ENTRIES
		 * for the default ACL part.
		 */
		if (numacls < 3 || numacls > (MAX_ACL_ENTRIES * 2)) {
			error = EINVAL;
			break;
		}

		vsecattr->vsa_mask = VSA_ACL;

		vsecattr->vsa_aclcnt = numacls;
		*aclbsize = numacls * sizeof (aclent_t);
		vsecattr->vsa_aclentp = kmem_alloc(*aclbsize, KM_SLEEP);
		(void) memcpy(vsecattr->vsa_aclentp, acl_info->acl_aclp,
		    *aclbsize);

		/* Sort the acl list */
		ksort((caddr_t)vsecattr->vsa_aclentp,
		    vsecattr->vsa_aclcnt, sizeof (aclent_t), cmp2acls);

		/* Break into acl and default acl lists */
		for (numacls = 0, aclp = vsecattr->vsa_aclentp;
		    numacls < vsecattr->vsa_aclcnt;
		    aclp++, numacls++) {
			if (aclp->a_type & ACL_DEFAULT)
				break;
		}

		/* Find where defaults start (if any) */
		if (numacls < vsecattr->vsa_aclcnt) {
			vsecattr->vsa_mask |= VSA_DFACL;
			vsecattr->vsa_dfaclcnt = vsecattr->vsa_aclcnt - numacls;
			vsecattr->vsa_dfaclentp = aclp;
			vsecattr->vsa_aclcnt = numacls;
		}

		/* Adjust if they're all defaults */
		if (vsecattr->vsa_aclcnt == 0) {
			vsecattr->vsa_mask &= ~VSA_ACL;
			vsecattr->vsa_aclentp = NULL;
		}

		/* Only directories can have defaults */
		if (vsecattr->vsa_dfaclcnt &&
		    (acl_info->acl_flags & ACL_IS_DIR)) {
			error = ENOTDIR;
		}

		break;

	case ACE_T:
		if (acl_info->acl_cnt < 1 ||
		    acl_info->acl_cnt > MAX_ACL_ENTRIES) {
			error = EINVAL;
			break;
		}

		vsecattr->vsa_mask = VSA_ACE | VSA_ACE_ACLFLAGS;
		vsecattr->vsa_aclcnt = acl_info->acl_cnt;
		vsecattr->vsa_aclflags = acl_info->acl_flags & ACL_FLAGS_ALL;
		*aclbsize = vsecattr->vsa_aclcnt * sizeof (ace_t);
		vsecattr->vsa_aclentsz = *aclbsize;
		vsecattr->vsa_aclentp = kmem_alloc(*aclbsize, KM_SLEEP);
		(void) memcpy(vsecattr->vsa_aclentp, acl_info->acl_aclp,
		    *aclbsize);

		break;

	default:
		error = EINVAL;
	}

	return (error);
}
Example #7
0
static rc_t qual_stats(const Params* prm, const VDatabase* db) {
    rc_t rc = 0;
    const char tblName[] = "SEQUENCE";
    const VTable* tbl = NULL;
    const KMetadata* meta = NULL;
    const KMDataNode* node = NULL;
    assert(prm && db);
    if (rc == 0) {
        rc = VDatabaseOpenTableRead(db, &tbl, tblName);
        DISP_RC2(rc, tblName, "while calling VDatabaseOpenTableRead");
    }
    if (rc == 0) {
        rc = VTableOpenMetadataRead(tbl, &meta);
        DISP_RC2(rc, tblName, "while calling VTableOpenMetadataRead");
    }
    if (rc == 0) {
        bool found = false;
        const char path[] = "STATS/QUALITY";
        rc = KMetadataOpenNodeRead(meta, &node, path);
        if (rc == 0)
        {   found = true; }
        else if (GetRCState(rc) == rcNotFound)
        {   rc = 0; }
        DISP_RC2(rc, path, "while calling KMetadataOpenNodeRead");
        if (found) {
            uint32_t i = 0;
            int nbr = 0;
            uint32_t count = 0;
            KNamelist* names = NULL;
            int* quals = NULL;
            if (rc == 0) {
                rc = KMDataNodeListChild(node, &names);
                DISP_RC2(rc, path, "while calling KMDataNodeListChild");
            }
            if (rc == 0) {
                rc = KNamelistCount(names, &count);
                DISP_RC2(rc, path, "while calling KNamelistCount");
                if (rc == 0 && count > 0) {
                    quals = calloc(count, sizeof *quals);
                    if (quals == NULL) {
                        rc = RC(rcExe,
                            rcStorage, rcAllocating, rcMemory, rcExhausted);
                    }
                }
            }
            for (i = 0; i < count && rc == 0; ++i) {
             /* uint64_t u = 0;
                const KMDataNode* n = NULL; */
                const char* nodeName = NULL;
                const char* name = NULL;
                rc = KNamelistGet(names, i, &nodeName);
                DISP_RC2(rc, path, "while calling KNamelistGet");
                if (rc)
                {   break; }
                name = nodeName;
             /* rc = KMDataNodeOpenNodeRead(node, &n, name);
                DISP_RC(rc, name);
                if (rc == 0) {
                    rc = KMDataNodeReadAsU64(n, &u);
                    DISP_RC(rc, name);
                } */
                if (rc == 0) {
                    char* c = strchr(name, '_');
                    if (c != NULL && *(c + 1) != '\0') {
                        name = c + 1;
                        if (sscanf(name, "%d", &quals[i]) != 1) {
                            rc = RC(rcExe,
                                rcNode, rcParsing, rcName, rcUnexpected);
                            PLOGERR(klogInt,
                                (klogInt, rc, "$(name)", "name=%s", nodeName));
                        }
                    }
                    /* OUTMSG(("QUALITY %s %lu\n", name, u)); */
                }
             /* DESTRUCT(KMDataNode, n); */
            }
            if (rc == 0 && count > 0)
            {   ksort(quals, count, sizeof *quals, sort_callback, NULL); }
            if (rc == 0) {
                OUTMSG(("%s", prm->dbPath));
            }
            for (i = 0, nbr = 0; i < count && rc == 0; ++i, ++nbr) {
                uint64_t u = 0;
                char name[64];
                const KMDataNode* n = NULL;
                sprintf(name, "PHRED_%d", quals[i]);
                rc = KMDataNodeOpenNodeRead(node, &n, name);
                DISP_RC(rc, name);
                if (rc == 0) {
                    rc = KMDataNodeReadAsU64(n, &u);
                    DISP_RC(rc, name);
                    if (rc == 0) {
                        while (nbr < quals[i]) {
                            OUTMSG(("\t0"));
                            ++nbr;
                        }
                        OUTMSG(("\t%lu", u));
                    /*  OUTMSG(("QUALITY %d %lu\n", quals[i], u)); */
                    }
                }
                DESTRUCT(KMDataNode, n);
            }
            while (rc == 0 && nbr <= 40) {
                OUTMSG(("\t0"));
                nbr++;
            }
            if (rc == 0) {
                OUTMSG(("\n"));
            }
            DESTRUCT(KNamelist, names);
        }
    }
    DESTRUCT(KMDataNode, node);
    DESTRUCT(KMetadata, meta);
    DESTRUCT(VTable, tbl);
    return rc;
}
static int
convert_aent_to_ace(aclent_t *aclentp, int aclcnt, int isdir,
    ace_t **retacep, int *retacecnt)
{
	ace_t *acep;
	ace_t *dfacep;
	int acecnt = 0;
	int dfacecnt = 0;
	int dfaclstart = 0;
	int dfaclcnt = 0;
	aclent_t *aclp;
	int i;
	int error;
	int acesz, dfacesz;

	ksort((caddr_t)aclentp, aclcnt, sizeof (aclent_t), cmp2acls);

	for (i = 0, aclp = aclentp; i < aclcnt; aclp++, i++) {
		if (aclp->a_type & ACL_DEFAULT)
			break;
	}

	if (i < aclcnt) {
		dfaclstart = i;
		dfaclcnt = aclcnt - i;
	}

	if (dfaclcnt && isdir == 0) {
		return (EINVAL);
	}

	error = ln_aent_to_ace(aclentp, i,  &acep, &acecnt, isdir);
	if (error)
		return (error);

	if (dfaclcnt) {
		error = ln_aent_to_ace(&aclentp[dfaclstart], dfaclcnt,
		    &dfacep, &dfacecnt, isdir);
		if (error) {
			if (acep) {
				cacl_free(acep, acecnt * sizeof (ace_t));
			}
			return (error);
		}
	}

	if (dfacecnt != 0) {
		acesz = sizeof (ace_t) * acecnt;
		dfacesz = sizeof (ace_t) * dfacecnt;
		acep = cacl_realloc(acep, acesz, acesz + dfacesz);
		if (acep == NULL)
			return (ENOMEM);
		if (dfaclcnt) {
			(void) memcpy(acep + acecnt, dfacep, dfacesz);
		}
	}
	if (dfaclcnt)
		cacl_free(dfacep, dfacecnt * sizeof (ace_t));

	*retacecnt = acecnt + dfacecnt;
	*retacep = acep;
	return (0);
}
/*
 * Convert an array of aclent_t into an array of nfsace entries,
 * following POSIX draft -> nfsv4 conversion semantics as outlined in
 * the IETF draft.
 */
static int
ln_aent_to_ace(aclent_t *aclent, int n, ace_t **acepp, int *rescount, int isdir)
{
	int error = 0;
	mode_t mask;
	int numuser, numgroup, needsort;
	int resultsize = 0;
	int i, groupi = 0, skip;
	ace_t *acep, *result = NULL;
	int hasmask;

	error = ln_aent_preprocess(aclent, n, &hasmask, &mask,
	    &numuser, &numgroup, &needsort);
	if (error != 0)
		goto out;

	/* allow + deny for each aclent */
	resultsize = n * 2;
	if (hasmask) {
		/*
		 * stick extra deny on the group_obj and on each
		 * user|group for the mask (the group_obj was added
		 * into the count for numgroup)
		 */
		resultsize += numuser + numgroup;
		/* ... and don't count the mask itself */
		resultsize -= 2;
	}

	/* sort the source if necessary */
	if (needsort)
		ksort((caddr_t)aclent, n, sizeof (aclent_t), cmp2acls);

	if (cacl_malloc((void **)&result, resultsize * sizeof (ace_t)) != 0)
		goto out;

	acep = result;

	for (i = 0; i < n; i++) {
		/*
		 * don't process CLASS_OBJ (mask); mask was grabbed in
		 * ln_aent_preprocess()
		 */
		if (aclent[i].a_type & CLASS_OBJ)
			continue;

		/* If we need an ACL_MASK emulator, prepend it now */
		if ((hasmask) &&
		    (aclent[i].a_type & (USER | GROUP | GROUP_OBJ))) {
			acep->a_type = ACE_ACCESS_DENIED_ACE_TYPE;
			acep->a_flags = 0;
			if (aclent[i].a_type & GROUP_OBJ) {
				acep->a_who = (uid_t)-1;
				acep->a_flags |=
				    (ACE_IDENTIFIER_GROUP|ACE_GROUP);
			} else if (aclent[i].a_type & USER) {
				acep->a_who = aclent[i].a_id;
			} else {
				acep->a_who = aclent[i].a_id;
				acep->a_flags |= ACE_IDENTIFIER_GROUP;
			}
			if (aclent[i].a_type & ACL_DEFAULT) {
				acep->a_flags |= ACE_INHERIT_ONLY_ACE |
				    ACE_FILE_INHERIT_ACE |
				    ACE_DIRECTORY_INHERIT_ACE;
			}
			/*
			 * Set the access mask for the prepended deny
			 * ace.  To do this, we invert the mask (found
			 * in ln_aent_preprocess()) then convert it to an
			 * DENY ace access_mask.
			 */
			acep->a_access_mask = mode_to_ace_access((mask ^ 07),
			    isdir, 0, 0);
			acep += 1;
		}

		/* handle a_perm -> access_mask */
		acep->a_access_mask = mode_to_ace_access(aclent[i].a_perm,
		    isdir, aclent[i].a_type & USER_OBJ, 1);

		/* emulate a default aclent */
		if (aclent[i].a_type & ACL_DEFAULT) {
			acep->a_flags |= ACE_INHERIT_ONLY_ACE |
			    ACE_FILE_INHERIT_ACE |
			    ACE_DIRECTORY_INHERIT_ACE;
		}

		/*
		 * handle a_perm and a_id
		 *
		 * this must be done last, since it involves the
		 * corresponding deny aces, which are handled
		 * differently for each different a_type.
		 */
		if (aclent[i].a_type & USER_OBJ) {
			acep->a_who = (uid_t)-1;
			acep->a_flags |= ACE_OWNER;
			ace_make_deny(acep, acep + 1, isdir, B_TRUE);
			acep += 2;
		} else if (aclent[i].a_type & USER) {
			acep->a_who = aclent[i].a_id;
			ace_make_deny(acep, acep + 1, isdir, B_FALSE);
			acep += 2;
		} else if (aclent[i].a_type & (GROUP_OBJ | GROUP)) {
			if (aclent[i].a_type & GROUP_OBJ) {
				acep->a_who = (uid_t)-1;
				acep->a_flags |= ACE_GROUP;
			} else {
				acep->a_who = aclent[i].a_id;
			}
			acep->a_flags |= ACE_IDENTIFIER_GROUP;
			/*
			 * Set the corresponding deny for the group ace.
			 *
			 * The deny aces go after all of the groups, unlike
			 * everything else, where they immediately follow
			 * the allow ace.
			 *
			 * We calculate "skip", the number of slots to
			 * skip ahead for the deny ace, here.
			 *
			 * The pattern is:
			 * MD1 A1 MD2 A2 MD3 A3 D1 D2 D3
			 * thus, skip is
			 * (2 * numgroup) - 1 - groupi
			 * (2 * numgroup) to account for MD + A
			 * - 1 to account for the fact that we're on the
			 * access (A), not the mask (MD)
			 * - groupi to account for the fact that we have
			 * passed up groupi number of MD's.
			 */
			skip = (2 * numgroup) - 1 - groupi;
			ace_make_deny(acep, acep + skip, isdir, B_FALSE);
			/*
			 * If we just did the last group, skip acep past
			 * all of the denies; else, just move ahead one.
			 */
			if (++groupi >= numgroup)
				acep += numgroup + 1;
			else
				acep += 1;
		} else if (aclent[i].a_type & OTHER_OBJ) {
			acep->a_who = (uid_t)-1;
			acep->a_flags |= ACE_EVERYONE;
			ace_make_deny(acep, acep + 1, isdir, B_FALSE);
			acep += 2;
		} else {
			error = EINVAL;
			goto out;
		}
	}

	*acepp = result;
	*rescount = resultsize;

out:
	if (error != 0) {
		if ((result != NULL) && (resultsize > 0)) {
			cacl_free(result, resultsize * sizeof (ace_t));
		}
	}

	return (error);
}
Example #10
0
rc_t KTocEntryNewChunked ( KTocEntry ** new_entry,
					 const char * name,
					 size_t name_size,
					 KTime_t mtime,
					 uint32_t access,
					 uint64_t size,
					 const KTocChunk * chunks,
					 uint32_t num_chunks )
{
    rc_t	rc;
    KTocChunk * chunkp;
    size_t	nsize;
    size_t	csize;

    /* -----
     * This is a bit ugly...
     *
     * first (Compile time optimizations does much of the heavy lifting) figure out how
     * much is the extra malloc amount
     *
     * Take size of a generic entry - the size of the union part but add back the size of
     * the chunked file part
     *
     * Add to that the size of a 64 bit integer.  This is 8 bytes extra from what is
     * needed by the header alone.
     * 
     * Mask that against the binary bit inverse of 1 less tha the size of a 64 bit integer.
     *
     * Now you have the size of the header plus the number of bytes needed to get to a 8
     * byte address 0.  This is possibly more than is needed as 8 byte quantities could be
     * read from 4 byte boundaries in many cases.
     *
     * Then add to that the size in bytes of the chunked data (all 64 bit numbers).
     */
    nsize = ~( ( size_t ) sizeof(uint64_t)-1) & 
	(sizeof(KTocEntry)
	 - sizeof(union KTocEntryUnion)
	 + sizeof(struct KTocEntryChunkFile)
	 + sizeof(uint64_t));
    csize = sizeof(KTocChunk) * num_chunks;

    if ((rc = KTocEntryNew (new_entry, name, name_size, mtime, access, 
			    nsize + csize))
	!= 0)
    {
	return rc;
    }

    chunkp = (KTocChunk*)((char*)*new_entry + nsize);
    (*new_entry)->type = ktocentrytype_chunked;
    (*new_entry)->u.chunked_file.file_size = size;
    (*new_entry)->u.chunked_file.chunks = chunkp;
    (*new_entry)->u.chunked_file.num_chunks = num_chunks;
    memmove(chunkp, chunks, csize);
    ksort (chunkp, num_chunks, sizeof(KTocChunk), chunkcmp, NULL);
    /* -----
     * TODO: We currently do no validation of the chunks.
     * We accept that after the sort (which is probably superfluous)
     * that for each chunk 
     *
     *	chunkp[N].logical_position + chunkp[N].size <= chunkp[N+1].logical_position
     *
     * We should probably verify this.
     */
    return 0;
}