void CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel(
  Space* sp,
  MemRegion mr,
  OopsInGenClosure* cl,
  CardTableRS* ct,
  uint n_threads)
{
  if (!mr.is_empty()) {
    if (n_threads > 0) {
#if INCLUDE_ALL_GCS
      non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
#else  // INCLUDE_ALL_GCS
      fatal("Parallel gc not supported here.");
#endif // INCLUDE_ALL_GCS
    } else {
      // clear_cl finds contiguous dirty ranges of cards to process and clear.

      // This is the single-threaded version used by DefNew.
      const bool parallel = false;

      DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
      ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);

      clear_cl.do_MemRegion(mr);
    }
  }
}
Пример #2
0
void CardTableRS::younger_refs_in_space_iterate(Space* sp, 
						OopsInGenClosure* cl) {
  DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, _ct_bs.precision(),
                                                   cl->gen_boundary());
  ClearNoncleanCardWrapper clear_cl(dcto_cl, this);

  _ct_bs.non_clean_card_iterate(sp, sp->used_region_at_save_marks(),
                                dcto_cl, &clear_cl, false);
}
void
CardTableModRefBS::
process_stride(Space* sp,
               MemRegion used,
               jint stride, int n_strides,
               OopsInGenClosure* cl,
               CardTableRS* ct,
               jbyte** lowest_non_clean,
               uintptr_t lowest_non_clean_base_chunk_index,
               size_t    lowest_non_clean_chunk_size) {
  // We go from higher to lower addresses here; it wouldn't help that much
  // because of the strided parallelism pattern used here.

  // Find the first card address of the first chunk in the stride that is
  // at least "bottom" of the used region.
  jbyte*    start_card  = byte_for(used.start());
  jbyte*    end_card    = byte_after(used.last());
  uintptr_t start_chunk = addr_to_chunk_index(used.start());
  uintptr_t start_chunk_stride_num = start_chunk % n_strides;
  jbyte* chunk_card_start;

  if ((uintptr_t)stride >= start_chunk_stride_num) {
    chunk_card_start = (jbyte*)(start_card +
                                (stride - start_chunk_stride_num) *
                                ParGCCardsPerStrideChunk);
  } else {
    // Go ahead to the next chunk group boundary, then to the requested stride.
    chunk_card_start = (jbyte*)(start_card +
                                (n_strides - start_chunk_stride_num + stride) *
                                ParGCCardsPerStrideChunk);
  }

  while (chunk_card_start < end_card) {
    // Even though we go from lower to higher addresses below, the
    // strided parallelism can interleave the actual processing of the
    // dirty pages in various ways. For a specific chunk within this
    // stride, we take care to avoid double scanning or missing a card
    // by suitably initializing the "min_done" field in process_chunk_boundaries()
    // below, together with the dirty region extension accomplished in
    // DirtyCardToOopClosure::do_MemRegion().
    jbyte*    chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
    // Invariant: chunk_mr should be fully contained within the "used" region.
    MemRegion chunk_mr       = MemRegion(addr_for(chunk_card_start),
                                         chunk_card_end >= end_card ?
                                           used.end() : addr_for(chunk_card_end));
    assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
    assert(used.contains(chunk_mr), "chunk_mr should be subset of used");

    DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
                                                     cl->gen_boundary());
    ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);


    // Process the chunk.
    process_chunk_boundaries(sp,
                             dcto_cl,
                             chunk_mr,
                             used,
                             lowest_non_clean,
                             lowest_non_clean_base_chunk_index,
                             lowest_non_clean_chunk_size);

    // We want the LNC array updates above in process_chunk_boundaries
    // to be visible before any of the card table value changes as a
    // result of the dirty card iteration below.
    OrderAccess::storestore();

    // We do not call the non_clean_card_iterate_serial() version because
    // we want to clear the cards: clear_cl here does the work of finding
    // contiguous dirty ranges of cards to process and clear.
    clear_cl.do_MemRegion(chunk_mr);

    // Find the next chunk of the stride.
    chunk_card_start += ParGCCardsPerStrideChunk * n_strides;
  }
}
Пример #4
0
static void check(const struct sdparm_mode_page_item * mpi,
                  const struct sdparm_mode_page_item * mpi_b)
{
    unsigned char mask;
    const struct sdparm_mode_page_item * kp = mpi;
    const struct sdparm_mode_page_item * jp = mpi;
    const char * acron;
    int res, prev_mp, prev_msp, prev_pdt, sbyte, sbit, nbits;
    int second_k = 0;
    int second_j = 0;

    clear_cl();
    for (prev_mp = 0, prev_msp = 0, prev_pdt = -1; ; ++kp) {
        if (NULL == kp->acron) {
            if ((NULL == mpi_b) || second_k)
                break;
            prev_mp = 0;
            prev_msp = 0;
            kp = mpi_b;
            second_k = 1;
        }
        acron = kp->acron ? kp->acron : "?";
        if ((prev_mp != kp->page_num) || (prev_msp != kp->subpage_num)) {
            if (prev_mp > kp->page_num)
                printf("  mode page 0x%x,0x%x out of order\n", kp->page_num,
                        kp->subpage_num);
            if ((prev_mp == kp->page_num) && (prev_msp > kp->subpage_num))
                printf("  mode subpage 0x%x,0x%x out of order, previous msp "
                       "was 0x%x\n", kp->page_num, kp->subpage_num, prev_msp);
            prev_mp = kp->page_num;
            prev_msp = kp->subpage_num;
            prev_pdt = kp->pdt;
            clear_cl();
        } else if ((prev_pdt >= 0) && (prev_pdt != kp->pdt)) {
            if (prev_pdt > kp->pdt)
                printf("  mode page 0x%x,0x%x pdt out of order, pdt was "
                       "%d, now %d\n", kp->page_num, kp->subpage_num,
                       prev_pdt, kp->pdt);
            prev_pdt = kp->pdt;
        }
        for (jp = kp + 1, second_j = second_k; ; ++jp) {
            if (NULL == jp->acron) {
                if ((NULL == mpi_b) || second_j)
                    break;
                jp = mpi_b;
                second_j = 1;
            }
            if ((0 == strcmp(acron, jp->acron)) &&
                (! (jp->flags & MF_CLASH_OK)))
                printf("  acronym '%s' with this description: '%s'\n    "
                       "clashes with '%s'\n", acron, kp->description,
                       jp->description);
        }
        sbyte = kp->start_byte;
        if ((unsigned)sbyte + 8 > MAX_MP_LEN) {
            printf("  acronym: %s  start byte too large: %d\n", kp->acron,
                   sbyte);
            continue;
        }
        sbit = kp->start_bit;
        if ((unsigned)sbit > 7) {
            printf("  acronym: %s  start bit too large: %d\n", kp->acron,
                   sbit);
            continue;
        }
        nbits = kp->num_bits;
        if (nbits > 64) {
            printf("  acronym: %s  number of bits too large: %d\n",
                   kp->acron, nbits);
            continue;
        }
        if (nbits < 1) {
            printf("  acronym: %s  number of bits too small: %d\n",
                   kp->acron, nbits);
            continue;
        }
        mask = (1 << (sbit + 1)) - 1;
        if ((nbits - 1) < sbit)
            mask &= ~((1 << (sbit + 1 - nbits)) - 1);
        res = check_cl(sbyte, kp->pdt, mask);
        if (res) {
            if (1 == res)
                printf("  0x%x,0x%x: clash at start_byte: %d, bit: %d "
                       "[latest acron: %s, this pdt]\n", prev_mp, prev_msp,
                       sbyte, sbit, acron);
            else if (2 == res)
                printf("  0x%x,0x%x: clash at start_byte: %d, bit: %d "
                       "[latest acron: %s, another pdt]\n", prev_mp,
                       prev_msp, sbyte, sbit, acron);
            else
                printf("  0x%x,0x%x: clash, bad data at start_byte: %d, "
                       "bit: %d [latest acron: %s]\n", prev_mp,
                       prev_msp, sbyte, sbit, acron);
        }
        set_cl(sbyte, kp->pdt, mask);
        if ((nbits - 1) > sbit) {
            nbits -= (sbit + 1);
            if ((nbits > 7) && (0 != (nbits % 8)))
                printf("  0x%x,0x%x: check nbits: %d, start_byte: %d, bit: "
                       "%d [acron: %s]\n", prev_mp, prev_msp, kp->num_bits,
                       sbyte, sbit, acron);
            do {
                ++sbyte;
                mask = 0xff;
                if (nbits > 7)
                    nbits -= 8;
                else {
                    mask &= ~((1 << (8 - nbits)) - 1);
                    nbits = 0;
                }
                res = check_cl(sbyte, kp->pdt, mask);
                if (res) {
                    if (1 == res)
                        printf("   0x%x,0x%x: clash at start_byte: %d, "
                               "bit: %d [latest acron: %s, this pdt]\n",
                               prev_mp, prev_msp, sbyte, sbit, acron);
                    else if (2 == res)
                        printf("   0x%x,0x%x: clash at start_byte: %d, "
                               "bit: %d [latest acron: %s, another pdt]\n",
                               prev_mp, prev_msp, sbyte, sbit, acron);
                    else
                        printf("   0x%x,0x%x: clash, bad at start_byte: "
                               "%d, bit: %d [latest acron: %s]\n",
                               prev_mp, prev_msp, sbyte, sbit, acron);
                }
                set_cl(sbyte, kp->pdt, mask);
            } while (nbits > 0);
        }
    }
}