Exemplo n.º 1
0
TEST(Test_TensorFlow, Mask_RCNN)
{
    std::string proto = findDataFile("dnn/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt", false);
    std::string model = findDataFile("dnn/mask_rcnn_inception_v2_coco_2018_01_28.pb", false);

    Net net = readNetFromTensorflow(model, proto);
    Mat img = imread(findDataFile("dnn/street.png", false));
    Mat refDetections = blobFromNPY(path("mask_rcnn_inception_v2_coco_2018_01_28.detection_out.npy"));
    Mat refMasks = blobFromNPY(path("mask_rcnn_inception_v2_coco_2018_01_28.detection_masks.npy"));
    Mat blob = blobFromImage(img, 1.0f, Size(800, 800), Scalar(), true, false);

    net.setPreferableBackend(DNN_BACKEND_OPENCV);

    net.setInput(blob);

    // Mask-RCNN predicts bounding boxes and segmentation masks.
    std::vector<String> outNames(2);
    outNames[0] = "detection_out_final";
    outNames[1] = "detection_masks";

    std::vector<Mat> outs;
    net.forward(outs, outNames);

    Mat outDetections = outs[0];
    Mat outMasks = outs[1];
    normAssertDetections(refDetections, outDetections, "", /*threshold for zero confidence*/1e-5);

    // Output size of masks is NxCxHxW where
    // N - number of detected boxes
    // C - number of classes (excluding background)
    // HxW - segmentation shape
    const int numDetections = outDetections.size[2];

    int masksSize[] = {1, numDetections, outMasks.size[2], outMasks.size[3]};
    Mat masks(4, &masksSize[0], CV_32F);

    std::vector<cv::Range> srcRanges(4, cv::Range::all());
    std::vector<cv::Range> dstRanges(4, cv::Range::all());

    outDetections = outDetections.reshape(1, outDetections.total() / 7);
    for (int i = 0; i < numDetections; ++i)
    {
        // Get a class id for this bounding box and copy mask only for that class.
        int classId = static_cast<int>(outDetections.at<float>(i, 1));
        srcRanges[0] = dstRanges[1] = cv::Range(i, i + 1);
        srcRanges[1] = cv::Range(classId, classId + 1);
        outMasks(srcRanges).copyTo(masks(dstRanges));
    }
    cv::Range topRefMasks[] = {Range::all(), Range(0, numDetections), Range::all(), Range::all()};
    normAssert(masks, refMasks(&topRefMasks[0]));
}
Exemplo n.º 2
0
em2d::Images get_projections(const ParticlesTemp &ps,
                             const RegistrationResults &registration_values,
                             int rows, int cols,
                             const ProjectingOptions &options, Strings names) {
  IMP_LOG_VERBOSE("Generating projections from registration results"
                  << std::endl);

  if (options.save_images && (names.size() < registration_values.size())) {
    IMP_THROW("get_projections: Insufficient number of image names provided",
              IOException);
  }

  unsigned long n_projs = registration_values.size();
  em2d::Images projections(n_projs);
  // Precomputation of all the possible projection masks for the particles
  MasksManagerPtr masks(
      new MasksManager(options.resolution, options.pixel_size));
  masks->create_masks(ps);
  for (unsigned long i = 0; i < n_projs; ++i) {
    IMP_NEW(em2d::Image, img, ());
    img->set_size(rows, cols);
    img->set_was_used(true);
    String name = "";
    if (options.save_images) name = names[i];
    get_projection(img, ps, registration_values[i], options, masks, name);
    projections[i] = img;
  }
  return projections;
}
Exemplo n.º 3
0
    int maxProduct(vector<string> & words)
    {
        using TMask = uint32_t;

        size_t const n = words.size();

        vector<TMask> masks(n, 0);
        for (size_t i = 0; i < n; ++i)
        {
            TMask & mask = masks[i];
            for (auto const & c : words[i]) mask |= 1 << (c - 'a'); // 26 bits
        }

        size_t res = 0;
        for (size_t i = 0; i < n; ++i)
        {
            size_t maxl = 0;
            for (size_t j = i + 1; j < n; ++j)
            {
                if ((masks[i] & masks[j]) == 0 && words[j].size() > maxl)
                {
                    size_t t = words[i].size() * words[j].size();
                    if (t > res)
                    {
                        maxl = words[j].size();
                        res = t;
                    }
                }
            }
        }
        return res;
    }
Exemplo n.º 4
0
  void cg_set_masks(ConnectionGeneratorDatum& cg, RangeSet& sources, RangeSet& targets)
  {
    std::vector<ConnectionGenerator::Mask> masks(Communicator::get_num_processes());
    cg_create_masks(&masks, sources, targets);

    cg->setMask(masks, Communicator::get_rank());
  }
Exemplo n.º 5
0
/**
 * Set the masks on the ConnectionGenerator cg. This function also
 * creates the masks from the given RangeSets sources and targets.
 *
 * \param cg The ConnectionGenerator to set the masks on
 * \param sources The source ranges to create the source masks from
 * \param targets The target ranges to create the target masks from
 */
void
cg_set_masks( ConnectionGeneratorDatum& cg, RangeSet& sources, RangeSet& targets )
{
  long np = kernel().mpi_manager.get_num_processes();
  std::vector< ConnectionGenerator::Mask > masks( np, ConnectionGenerator::Mask( 1, np ) );

  cg_create_masks( &masks, sources, targets );
  cg->setMask( masks, kernel().mpi_manager.get_rank() );
}
Exemplo n.º 6
0
void Solver580D::run()
{
    int64_t totalDishes, dishesToEat, nRules;
    cin >> totalDishes >> dishesToEat >> nRules;

    vector<int64_t> tastiness(totalDishes);
    for (auto& t : tastiness)
        cin >> t;

    vector<vector<int64_t>> rules(totalDishes, vector<int64_t>(totalDishes, 0));
    for (int64_t i = 0; i < nRules; ++i)
    {
        int64_t from, to, goodness;
        cin >> from >> to >> goodness;
        rules[from - 1][to - 1] = goodness;
    }
    
    std::vector<std::vector<int64_t>> masks(totalDishes + 1);
    int64_t maxMask = 1 << totalDishes;
    for (int64_t mask = 0; mask < maxMask; ++mask)
        masks[bitsSet(mask)].push_back(mask);

    std::vector<std::vector<int64_t>> dp(totalDishes, std::vector<int64_t>(maxMask, 0));
    
    for (int64_t currDishes = dishesToEat - 2; currDishes >= 0; --currDishes)
    {
        for (int64_t iDish = 0; iDish < totalDishes; ++iDish)
        {
            int64_t iDishMask = 1 << iDish;
            for (auto& m : masks[currDishes])
            {
                for (int64_t dishToAdd = 0; dishToAdd < totalDishes; ++dishToAdd)
                {
                    int64_t dishToAddMask = 1 << dishToAdd;
                    if ((m & iDishMask) == 0 && (iDish != dishToAdd) && (m & dishToAddMask) == 0)
                        if (dp[dishToAdd][m | iDishMask] + tastiness[dishToAdd] + rules[iDish][dishToAdd] > dp[iDish][m])
                            dp[iDish][m] = dp[dishToAdd][m | iDishMask] + tastiness[dishToAdd] + rules[iDish][dishToAdd];
                }
            }
        }
    }
    

    int64_t result = 0;
    for (int64_t iFirstDish = 0; iFirstDish < totalDishes; ++iFirstDish)
        result = max(result, dp[iFirstDish][0] + tastiness[iFirstDish]);

    cout << result;
}
Exemplo n.º 7
0
    static void shiftMapInpaint(const Mat &src, const Mat &mask, Mat &dst,
        const int nTransform = 60, const int psize = 8)
    {
        /** Preparing input **/
        cv::Mat img;
        src.convertTo( img, CV_32F );
        img.setTo(0, 255 - mask);

        /** ANNF computation **/
        std::vector <Matx33f> transforms( nTransform );
        xphotoInternal::dominantTransforms(img,
                    transforms, nTransform, psize);

        /** Warping **/
        std::vector <Mat> images( nTransform + 1 ); // source image transformed with transforms
        std::vector <Mat> masks( nTransform + 1 );  // definition domain for current shift

        Mat_<uchar> invMask = 255 - mask;
        dilate(invMask, invMask, Mat(), Point(-1,-1), 2);

        img.copyTo( images[0] );
        mask.copyTo( masks[0] );

        for (int i = 0; i < nTransform; ++i)
        {
            warpPerspective( images[0], images[i + 1], transforms[i],
                             images[0].size(), INTER_LINEAR );

            warpPerspective( masks[0], masks[i + 1], transforms[i],
                             masks[0].size(), INTER_NEAREST);
            masks[i + 1] &= invMask;
        }

        /** Stitching **/
        Mat photomontageResult;
        xphotoInternal::Photomontage < cv::Vec <float, cn> >( images, masks )
            .assignResImage(photomontageResult);

        /** Writing result **/
        photomontageResult.convertTo( dst, dst.type() );
    }
Exemplo n.º 8
0
  void bootstrap_and_calc_adj_matrix_helper<covariance_pairwise_complete_obs_S>(
      gsl_matrix * A,
      std::list<gsl_matrix *> & boot_matrix,
      detail::write_matrix_3d & write_boot_matrix,
      covariance_pairwise_complete_obs_S filter,
      boost::mt19937 & rng,
      void (* interaction_matrix_generator_fnc)(gsl_matrix *, gsl_matrix *, covariance_pairwise_complete_obs_S),
      bool use_tmp_file
      )
  {
    size_t num_elems = A->size1,
           num_obs = A->size2;

    gsl_matrix * A_shuffled = gsl_matrix_calloc(num_elems, num_obs);
    matrix_mask A_shuffled_mask(num_elems, num_obs);
    covariance_pairwise_complete_obs_S A_shuffled_filter(A_shuffled_mask, filter.min_num_complete_obs);

    two_matrix_mask masks(filter.mask, A_shuffled_filter.mask);

    if (filter.shuffle_by_column)
      shuffle_by_column(A, A_shuffled, rng);
    else
      shuffle(A, A_shuffled, masks, rng);

    gsl_matrix * shuffled_interaction_matrix = gsl_matrix_calloc(num_elems, num_elems);
    interaction_matrix_generator_fnc(A_shuffled, shuffled_interaction_matrix, A_shuffled_filter);

    gsl_matrix_free(A_shuffled);

    if (use_tmp_file)
    {
      write_boot_matrix.write(shuffled_interaction_matrix);
      gsl_matrix_free(shuffled_interaction_matrix);
    }
    else
    {
      boot_matrix.push_back(shuffled_interaction_matrix);
    }

    return;
  }
Exemplo n.º 9
0
Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano)
{
    LOGLN("Warping images (auxiliary)... ");

    vector<Mat> imgs;
    images.getMatVector(imgs);
    if (!imgs.empty())
    {
        CV_Assert(imgs.size() == imgs_.size());

        Mat img;
        seam_est_imgs_.resize(imgs.size());

        for (size_t i = 0; i < imgs.size(); ++i)
        {
            imgs_[i] = imgs[i];
            resize(imgs[i], img, Size(), seam_scale_, seam_scale_);
            seam_est_imgs_[i] = img.clone();
        }

        vector<Mat> seam_est_imgs_subset;
        vector<Mat> imgs_subset;

        for (size_t i = 0; i < indices_.size(); ++i)
        {
            imgs_subset.push_back(imgs_[indices_[i]]);
            seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
        }

        seam_est_imgs_ = seam_est_imgs_subset;
        imgs_ = imgs_subset;
    }

    Mat &pano_ = pano.getMatRef();

    int64 t = getTickCount();

    vector<Point> corners(imgs_.size());
    vector<Mat> masks_warped(imgs_.size());
    vector<Mat> images_warped(imgs_.size());
    vector<Size> sizes(imgs_.size());
    vector<Mat> masks(imgs_.size());

    // Prepare image masks
    for (size_t i = 0; i < imgs_.size(); ++i)
    {
        masks[i].create(seam_est_imgs_[i].size(), CV_8U);
        masks[i].setTo(Scalar::all(255));
    }

    // Warp images and their masks
    Ptr<detail::RotationWarper> w = warper_->create(float(warped_image_scale_ * seam_work_aspect_));
    for (size_t i = 0; i < imgs_.size(); ++i)
    {
        Mat_<float> K;
        cameras_[i].K().convertTo(K, CV_32F);
        K(0,0) *= (float)seam_work_aspect_;
        K(0,2) *= (float)seam_work_aspect_;
        K(1,1) *= (float)seam_work_aspect_;
        K(1,2) *= (float)seam_work_aspect_;

        corners[i] = w->warp(seam_est_imgs_[i], K, cameras_[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
        sizes[i] = images_warped[i].size();

        w->warp(masks[i], K, cameras_[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
    }

    vector<Mat> images_warped_f(imgs_.size());
    for (size_t i = 0; i < imgs_.size(); ++i)
        images_warped[i].convertTo(images_warped_f[i], CV_32F);

    LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

    // Find seams
    exposure_comp_->feed(corners, images_warped, masks_warped);
    seam_finder_->find(images_warped_f, corners, masks_warped);

    // Release unused memory
    seam_est_imgs_.clear();
    images_warped.clear();
    images_warped_f.clear();
    masks.clear();

    LOGLN("Compositing...");
    t = getTickCount();

    Mat img_warped, img_warped_s;
    Mat dilated_mask, seam_mask, mask, mask_warped;

    //double compose_seam_aspect = 1;
    double compose_work_aspect = 1;
    bool is_blender_prepared = false;

    double compose_scale = 1;
    bool is_compose_scale_set = false;

    Mat full_img, img;
    for (size_t img_idx = 0; img_idx < imgs_.size(); ++img_idx)
    {
        LOGLN("Compositing image #" << indices_[img_idx] + 1);

        // Read image and resize it if necessary
        full_img = imgs_[img_idx];
        if (!is_compose_scale_set)
        {
            if (compose_resol_ > 0)
                compose_scale = min(1.0, sqrt(compose_resol_ * 1e6 / full_img.size().area()));
            is_compose_scale_set = true;

            // Compute relative scales
            //compose_seam_aspect = compose_scale / seam_scale_;
            compose_work_aspect = compose_scale / work_scale_;

            // Update warped image scale
            warped_image_scale_ *= static_cast<float>(compose_work_aspect);
            w = warper_->create((float)warped_image_scale_);

            // Update corners and sizes
            for (size_t i = 0; i < imgs_.size(); ++i)
            {
                // Update intrinsics
                cameras_[i].focal *= compose_work_aspect;
                cameras_[i].ppx *= compose_work_aspect;
                cameras_[i].ppy *= compose_work_aspect;

                // Update corner and size
                Size sz = full_img_sizes_[i];
                if (std::abs(compose_scale - 1) > 1e-1)
                {
                    sz.width = cvRound(full_img_sizes_[i].width * compose_scale);
                    sz.height = cvRound(full_img_sizes_[i].height * compose_scale);
                }

                Mat K;
                cameras_[i].K().convertTo(K, CV_32F);
                Rect roi = w->warpRoi(sz, K, cameras_[i].R);
                corners[i] = roi.tl();
                sizes[i] = roi.size();
            }
        }
        if (std::abs(compose_scale - 1) > 1e-1)
            resize(full_img, img, Size(), compose_scale, compose_scale);
        else
            img = full_img;
        full_img.release();
        Size img_size = img.size();

        Mat K;
        cameras_[img_idx].K().convertTo(K, CV_32F);

        // Warp the current image
        w->warp(img, K, cameras_[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);

        // Warp the current image mask
        mask.create(img_size, CV_8U);
        mask.setTo(Scalar::all(255));
        w->warp(mask, K, cameras_[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);

        // Compensate exposure
        exposure_comp_->apply((int)img_idx, corners[img_idx], img_warped, mask_warped);

        img_warped.convertTo(img_warped_s, CV_16S);
        img_warped.release();
        img.release();
        mask.release();

        // Make sure seam mask has proper size
        dilate(masks_warped[img_idx], dilated_mask, Mat());
        resize(dilated_mask, seam_mask, mask_warped.size());

        mask_warped = seam_mask & mask_warped;

        if (!is_blender_prepared)
        {
            blender_->prepare(corners, sizes);
            is_blender_prepared = true;
        }

        // Blend the current image
        blender_->feed(img_warped_s, mask_warped, corners[img_idx]);
    }

    Mat result, result_mask;
    blender_->blend(result, result_mask);

    LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

    // Preliminary result is in CV_16SC3 format, but all values are in [0,255] range,
    // so convert it to avoid user confusing
    result.convertTo(pano_, CV_8U);

    return OK;
}
Exemplo n.º 10
0
 void onemasksamepathdifferentcase() const {
     std::vector<std::string> masks(1, "sRc/");
     PathMatch match(masks, false);
     ASSERT(match.Match("srC/"));
 }
Exemplo n.º 11
0
 void filemaskdifferentcase() const {
     std::vector<std::string> masks(1, "foo.cPp");
     PathMatch match(masks, false);
     ASSERT(match.Match("fOo.cpp"));
 }
Exemplo n.º 12
0
const char* file_dialog_show(GtkWidget* parent, bool open, const char* title, const char* path, const char* pattern)
{
  filetype_t type;

  if(pattern == 0)
  {
    pattern = "*";
  }

  FileTypeList typelist;
  GlobalFiletypes().getTypeList(pattern, &typelist);

  GTKMasks masks(typelist);

  if (title == 0)
    title = open ? "Open File" : "Save File";
    
  GtkWidget* dialog;
  if (open)
  {
    dialog = gtk_file_chooser_dialog_new(title,
                                        GTK_WINDOW(parent),
                                        GTK_FILE_CHOOSER_ACTION_OPEN,
                                        GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL,
                                        GTK_STOCK_OPEN, GTK_RESPONSE_ACCEPT,
                                        NULL);
  }
  else
  {
    dialog = gtk_file_chooser_dialog_new(title,
                                        GTK_WINDOW(parent),
                                        GTK_FILE_CHOOSER_ACTION_SAVE,
                                        GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL,
                                        GTK_STOCK_SAVE, GTK_RESPONSE_ACCEPT,
                                        NULL);
    gtk_file_chooser_set_current_name(GTK_FILE_CHOOSER(dialog), "unnamed");
  }

  gtk_window_set_modal(GTK_WINDOW(dialog), TRUE);
  gtk_window_set_position(GTK_WINDOW(dialog), GTK_WIN_POS_CENTER_ON_PARENT);

  // we expect an actual path below, if the path is 0 we might crash
  if (path != 0 && !string_empty(path))
  {
    ASSERT_MESSAGE(path_is_absolute(path), "file_dialog_show: path not absolute: " << makeQuoted(path));

    Array<char> new_path(strlen(path)+1);

    // copy path, replacing dir separators as appropriate
    Array<char>::iterator w = new_path.begin();
    for(const char* r = path; *r != '\0'; ++r)
    {
      *w++ = (*r == '/') ? G_DIR_SEPARATOR : *r;
    }
    // remove separator from end of path if required
    if(*(w-1) == G_DIR_SEPARATOR)
    {
      --w;
    }
    // terminate string
    *w = '\0';

    gtk_file_chooser_set_current_folder(GTK_FILE_CHOOSER(dialog), new_path.data());
  }

  // we should add all important paths as shortcut folder...
  // gtk_file_chooser_add_shortcut_folder(GTK_FILE_CHOOSER(dialog), "/tmp/", NULL);

  
  for(std::size_t i = 0; i < masks.m_filters.size(); ++i)
  {
    GtkFileFilter* filter = gtk_file_filter_new();
    gtk_file_filter_add_pattern(filter, masks.m_filters[i].c_str());
    gtk_file_filter_set_name(filter, masks.m_masks[i].c_str());
    gtk_file_chooser_add_filter(GTK_FILE_CHOOSER(dialog), filter);
  }

  if(gtk_dialog_run(GTK_DIALOG(dialog)) == GTK_RESPONSE_ACCEPT)
  {
    strcpy(g_file_dialog_file, gtk_file_chooser_get_filename(GTK_FILE_CHOOSER(dialog)));

    if(!string_equal(pattern, "*"))
    {
      GtkFileFilter* filter = gtk_file_chooser_get_filter(GTK_FILE_CHOOSER(dialog));
      if(filter != 0) // no filter set? some file-chooser implementations may allow the user to set no filter, which we treat as 'all files'
      {
        type = masks.GetTypeForGTKMask(gtk_file_filter_get_name(filter)).m_type;
        // last ext separator
        const char* extension = path_get_extension(g_file_dialog_file);
        // no extension
        if(string_empty(extension))
        {
          strcat(g_file_dialog_file, type.pattern+1);
        }
        else
        {
          strcpy(g_file_dialog_file + (extension - g_file_dialog_file), type.pattern+2);
        }
      }
    }

    // convert back to unix format
    for(char* w = g_file_dialog_file; *w!='\0'; w++)
    {
      if(*w=='\\') 
      {
        *w = '/';
      }
    }
  }
  else
  {
    g_file_dialog_file[0] = '\0';
  }

  gtk_widget_destroy(dialog);

  // don't return an empty filename
  if(g_file_dialog_file[0] == '\0') return NULL;

  return g_file_dialog_file;
}
Exemplo n.º 13
0
/*
 * Read enough of the stream to initialize the SkBmpCodec. Returns a bool
 * representing success or failure. If it returned true, and codecOut was
 * not nullptr, it will be set to a new SkBmpCodec.
 * Does *not* take ownership of the passed in SkStream.
 */
bool SkBmpCodec::ReadHeader(SkStream* stream, bool inIco, SkCodec** codecOut) {
    // Header size constants
    static const uint32_t kBmpHeaderBytes = 14;
    static const uint32_t kBmpHeaderBytesPlusFour = kBmpHeaderBytes + 4;
    static const uint32_t kBmpOS2V1Bytes = 12;
    static const uint32_t kBmpOS2V2Bytes = 64;
    static const uint32_t kBmpInfoBaseBytes = 16;
    static const uint32_t kBmpInfoV1Bytes = 40;
    static const uint32_t kBmpInfoV2Bytes = 52;
    static const uint32_t kBmpInfoV3Bytes = 56;
    static const uint32_t kBmpInfoV4Bytes = 108;
    static const uint32_t kBmpInfoV5Bytes = 124;
    static const uint32_t kBmpMaskBytes = 12;

    // The total bytes in the bmp file
    // We only need to use this value for RLE decoding, so we will only
    // check that it is valid in the RLE case.
    uint32_t totalBytes;
    // The offset from the start of the file where the pixel data begins
    uint32_t offset;
    // The size of the second (info) header in bytes
    uint32_t infoBytes;

    // Bmps embedded in Icos skip the first Bmp header
    if (!inIco) {
        // Read the first header and the size of the second header
        SkAutoTDeleteArray<uint8_t> hBuffer(new uint8_t[kBmpHeaderBytesPlusFour]);
        if (stream->read(hBuffer.get(), kBmpHeaderBytesPlusFour) !=
                kBmpHeaderBytesPlusFour) {
            SkCodecPrintf("Error: unable to read first bitmap header.\n");
            return false;
        }

        totalBytes = get_int(hBuffer.get(), 2);
        offset = get_int(hBuffer.get(), 10);
        if (offset < kBmpHeaderBytes + kBmpOS2V1Bytes) {
            SkCodecPrintf("Error: invalid starting location for pixel data\n");
            return false;
        }

        // The size of the second (info) header in bytes
        // The size is the first field of the second header, so we have already
        // read the first four infoBytes.
        infoBytes = get_int(hBuffer.get(), 14);
        if (infoBytes < kBmpOS2V1Bytes) {
            SkCodecPrintf("Error: invalid second header size.\n");
            return false;
        }
    } else {
        // This value is only used by RLE compression.  Bmp in Ico files do not
        // use RLE.  If the compression field is incorrectly signaled as RLE,
        // we will catch this and signal an error below.
        totalBytes = 0;

        // Bmps in Ico cannot specify an offset.  We will always assume that
        // pixel data begins immediately after the color table.  This value
        // will be corrected below.
        offset = 0;

        // Read the size of the second header
        SkAutoTDeleteArray<uint8_t> hBuffer(new uint8_t[4]);
        if (stream->read(hBuffer.get(), 4) != 4) {
            SkCodecPrintf("Error: unable to read size of second bitmap header.\n");
            return false;
        }
        infoBytes = get_int(hBuffer.get(), 0);
        if (infoBytes < kBmpOS2V1Bytes) {
            SkCodecPrintf("Error: invalid second header size.\n");
            return false;
        }
    }

    // We already read the first four bytes of the info header to get the size
    const uint32_t infoBytesRemaining = infoBytes - 4;

    // Read the second header
    SkAutoTDeleteArray<uint8_t> iBuffer(new uint8_t[infoBytesRemaining]);
    if (stream->read(iBuffer.get(), infoBytesRemaining) != infoBytesRemaining) {
        SkCodecPrintf("Error: unable to read second bitmap header.\n");
        return false;
    }

    // The number of bits used per pixel in the pixel data
    uint16_t bitsPerPixel;

    // The compression method for the pixel data
    uint32_t compression = kNone_BmpCompressionMethod;

    // Number of colors in the color table, defaults to 0 or max (see below)
    uint32_t numColors = 0;

    // Bytes per color in the color table, early versions use 3, most use 4
    uint32_t bytesPerColor;

    // The image width and height
    int width, height;

    // Determine image information depending on second header format
    BmpHeaderType headerType;
    if (infoBytes >= kBmpInfoBaseBytes) {
        // Check the version of the header
        switch (infoBytes) {
            case kBmpInfoV1Bytes:
                headerType = kInfoV1_BmpHeaderType;
                break;
            case kBmpInfoV2Bytes:
                headerType = kInfoV2_BmpHeaderType;
                break;
            case kBmpInfoV3Bytes:
                headerType = kInfoV3_BmpHeaderType;
                break;
            case kBmpInfoV4Bytes:
                headerType = kInfoV4_BmpHeaderType;
                break;
            case kBmpInfoV5Bytes:
                headerType = kInfoV5_BmpHeaderType;
                break;
            case 16:
            case 20:
            case 24:
            case 28:
            case 32:
            case 36:
            case 42:
            case 46:
            case 48:
            case 60:
            case kBmpOS2V2Bytes:
                headerType = kOS2VX_BmpHeaderType;
                break;
            default:
                // We do not signal an error here because there is the
                // possibility of new or undocumented bmp header types.  Most
                // of the newer versions of bmp headers are similar to and
                // build off of the older versions, so we may still be able to
                // decode the bmp.
                SkCodecPrintf("Warning: unknown bmp header format.\n");
                headerType = kUnknown_BmpHeaderType;
                break;
        }
        // We check the size of the header before entering the if statement.
        // We should not reach this point unless the size is large enough for
        // these required fields.
        SkASSERT(infoBytesRemaining >= 12);
        width = get_int(iBuffer.get(), 0);
        height = get_int(iBuffer.get(), 4);
        bitsPerPixel = get_short(iBuffer.get(), 10);

        // Some versions do not have these fields, so we check before
        // overwriting the default value.
        if (infoBytesRemaining >= 16) {
            compression = get_int(iBuffer.get(), 12);
            if (infoBytesRemaining >= 32) {
                numColors = get_int(iBuffer.get(), 28);
            }
        }

        // All of the headers that reach this point, store color table entries
        // using 4 bytes per pixel.
        bytesPerColor = 4;
    } else if (infoBytes >= kBmpOS2V1Bytes) {
        // The OS2V1 is treated separately because it has a unique format
        headerType = kOS2V1_BmpHeaderType;
        width = (int) get_short(iBuffer.get(), 0);
        height = (int) get_short(iBuffer.get(), 2);
        bitsPerPixel = get_short(iBuffer.get(), 6);
        bytesPerColor = 3;
    } else {
        // There are no valid bmp headers
        SkCodecPrintf("Error: second bitmap header size is invalid.\n");
        return false;
    }

    // Check for valid dimensions from header
    SkScanlineDecoder::SkScanlineOrder rowOrder = SkScanlineDecoder::kBottomUp_SkScanlineOrder;
    if (height < 0) {
        height = -height;
        rowOrder = SkScanlineDecoder::kTopDown_SkScanlineOrder;
    }
    // The height field for bmp in ico is double the actual height because they
    // contain an XOR mask followed by an AND mask
    if (inIco) {
        height /= 2;
    }
    if (width <= 0 || height <= 0) {
        // TODO: Decide if we want to disable really large bmps as well.
        // https://code.google.com/p/skia/issues/detail?id=3617
        SkCodecPrintf("Error: invalid bitmap dimensions.\n");
        return false;
    }

    // Create mask struct
    SkMasks::InputMasks inputMasks;
    memset(&inputMasks, 0, sizeof(SkMasks::InputMasks));

    // Determine the input compression format and set bit masks if necessary
    uint32_t maskBytes = 0;
    BmpInputFormat inputFormat = kUnknown_BmpInputFormat;
    switch (compression) {
        case kNone_BmpCompressionMethod:
            inputFormat = kStandard_BmpInputFormat;
            break;
        case k8BitRLE_BmpCompressionMethod:
            if (bitsPerPixel != 8) {
                SkCodecPrintf("Warning: correcting invalid bitmap format.\n");
                bitsPerPixel = 8;
            }
            inputFormat = kRLE_BmpInputFormat;
            break;
        case k4BitRLE_BmpCompressionMethod:
            if (bitsPerPixel != 4) {
                SkCodecPrintf("Warning: correcting invalid bitmap format.\n");
                bitsPerPixel = 4;
            }
            inputFormat = kRLE_BmpInputFormat;
            break;
        case kAlphaBitMasks_BmpCompressionMethod:
        case kBitMasks_BmpCompressionMethod:
            // Load the masks
            inputFormat = kBitMask_BmpInputFormat;
            switch (headerType) {
                case kInfoV1_BmpHeaderType: {
                    // The V1 header stores the bit masks after the header
                    SkAutoTDeleteArray<uint8_t> mBuffer(new uint8_t[kBmpMaskBytes]);
                    if (stream->read(mBuffer.get(), kBmpMaskBytes) !=
                            kBmpMaskBytes) {
                        SkCodecPrintf("Error: unable to read bit inputMasks.\n");
                        return false;
                    }
                    maskBytes = kBmpMaskBytes;
                    inputMasks.red = get_int(mBuffer.get(), 0);
                    inputMasks.green = get_int(mBuffer.get(), 4);
                    inputMasks.blue = get_int(mBuffer.get(), 8);
                    break;
                }
                case kInfoV2_BmpHeaderType:
                case kInfoV3_BmpHeaderType:
                case kInfoV4_BmpHeaderType:
                case kInfoV5_BmpHeaderType:
                    // Header types are matched based on size.  If the header
                    // is V2+, we are guaranteed to be able to read at least
                    // this size.
                    SkASSERT(infoBytesRemaining >= 48);
                    inputMasks.red = get_int(iBuffer.get(), 36);
                    inputMasks.green = get_int(iBuffer.get(), 40);
                    inputMasks.blue = get_int(iBuffer.get(), 44);
                    break;
                case kOS2VX_BmpHeaderType:
                    // TODO: Decide if we intend to support this.
                    //       It is unsupported in the previous version and
                    //       in chromium.  I have not come across a test case
                    //       that uses this format.
                    SkCodecPrintf("Error: huffman format unsupported.\n");
                    return false;
                default:
                   SkCodecPrintf("Error: invalid bmp bit masks header.\n");
                   return false;
            }
            break;
        case kJpeg_BmpCompressionMethod:
            if (24 == bitsPerPixel) {
                inputFormat = kRLE_BmpInputFormat;
                break;
            }
            // Fall through
        case kPng_BmpCompressionMethod:
            // TODO: Decide if we intend to support this.
            //       It is unsupported in the previous version and
            //       in chromium.  I think it is used mostly for printers.
            SkCodecPrintf("Error: compression format not supported.\n");
            return false;
        case kCMYK_BmpCompressionMethod:
        case kCMYK8BitRLE_BmpCompressionMethod:
        case kCMYK4BitRLE_BmpCompressionMethod:
            // TODO: Same as above.
            SkCodecPrintf("Error: CMYK not supported for bitmap decoding.\n");
            return false;
        default:
            SkCodecPrintf("Error: invalid format for bitmap decoding.\n");
            return false;
    }

    // Most versions of bmps should be rendered as opaque.  Either they do
    // not have an alpha channel, or they expect the alpha channel to be
    // ignored.  V3+ bmp files introduce an alpha mask and allow the creator
    // of the image to use the alpha channels.  However, many of these images
    // leave the alpha channel blank and expect to be rendered as opaque.  This
    // is the case for almost all V3 images, so we render these as opaque.  For
    // V4+, we will use the alpha channel, and fix the image later if it turns
    // out to be fully transparent.
    // As an exception, V3 bmp-in-ico may use an alpha mask.
    SkAlphaType alphaType = kOpaque_SkAlphaType;
    if ((kInfoV3_BmpHeaderType == headerType && inIco) ||
            kInfoV4_BmpHeaderType == headerType ||
            kInfoV5_BmpHeaderType == headerType) {
        // Header types are matched based on size.  If the header is
        // V3+, we are guaranteed to be able to read at least this size.
        SkASSERT(infoBytesRemaining > 52);
        inputMasks.alpha = get_int(iBuffer.get(), 48);
        if (inputMasks.alpha != 0) {
            alphaType = kUnpremul_SkAlphaType;
        }
    }
    iBuffer.free();

    // Additionally, 32 bit bmp-in-icos use the alpha channel.
    // FIXME (msarett): Don't all bmp-in-icos use the alpha channel?
    // And, RLE inputs may skip pixels, leaving them as transparent.  This
    // is uncommon, but we cannot be certain that an RLE bmp will be opaque.
    if ((inIco && 32 == bitsPerPixel) || (kRLE_BmpInputFormat == inputFormat)) {
        alphaType = kUnpremul_SkAlphaType;
    }

    // Check for valid bits per pixel.
    // At the same time, use this information to choose a suggested color type
    // and to set default masks.
    SkColorType colorType = kN32_SkColorType;
    switch (bitsPerPixel) {
        // In addition to more standard pixel compression formats, bmp supports
        // the use of bit masks to determine pixel components.  The standard
        // format for representing 16-bit colors is 555 (XRRRRRGGGGGBBBBB),
        // which does not map well to any Skia color formats.  For this reason,
        // we will always enable mask mode with 16 bits per pixel.
        case 16:
            if (kBitMask_BmpInputFormat != inputFormat) {
                inputMasks.red = 0x7C00;
                inputMasks.green = 0x03E0;
                inputMasks.blue = 0x001F;
                inputFormat = kBitMask_BmpInputFormat;
            }
            break;
        // We want to decode to kIndex_8 for input formats that are already
        // designed in index format.
        case 1:
        case 2:
        case 4:
        case 8:
            // However, we cannot in RLE format since we may need to leave some
            // pixels as transparent.  Similarly, we also cannot for ICO images
            // since we may need to apply a transparent mask.
            if (kRLE_BmpInputFormat != inputFormat && !inIco) {
                colorType = kIndex_8_SkColorType;
            }
        case 24:
        case 32:
            break;
        default:
            SkCodecPrintf("Error: invalid input value for bits per pixel.\n");
            return false;
    }

    // Check that input bit masks are valid and create the masks object
    SkAutoTDelete<SkMasks>
            masks(SkMasks::CreateMasks(inputMasks, bitsPerPixel));
    if (nullptr == masks) {
        SkCodecPrintf("Error: invalid input masks.\n");
        return false;
    }

    // Check for a valid number of total bytes when in RLE mode
    if (totalBytes <= offset && kRLE_BmpInputFormat == inputFormat) {
        SkCodecPrintf("Error: RLE requires valid input size.\n");
        return false;
    }
    const size_t RLEBytes = totalBytes - offset;

    // Calculate the number of bytes read so far
    const uint32_t bytesRead = kBmpHeaderBytes + infoBytes + maskBytes;
    if (!inIco && offset < bytesRead) {
        // TODO (msarett): Do we really want to fail if the offset in the header is invalid?
        //                 Seems like we can just assume that the offset is zero and try to decode?
        //                 Maybe we don't want to try to decode corrupt images?
        SkCodecPrintf("Error: pixel data offset less than header size.\n");
        return false;
    }

    // Skip to the start of the pixel array.
    // We can do this here because there is no color table to read
    // in bit mask mode.
    if (!inIco && kBitMask_BmpInputFormat == inputFormat) {
        if (stream->skip(offset - bytesRead) != offset - bytesRead) {
            SkCodecPrintf("Error: unable to skip to image data.\n");
            return false;
        }
    }

    if (codecOut) {
        // Set the image info
        const SkImageInfo& imageInfo = SkImageInfo::Make(width, height,
                colorType, alphaType);

        // Return the codec
        switch (inputFormat) {
            case kStandard_BmpInputFormat:
                *codecOut = new SkBmpStandardCodec(imageInfo, stream, bitsPerPixel, numColors,
                        bytesPerColor, offset - bytesRead, rowOrder, inIco);
                return true;
            case kBitMask_BmpInputFormat:
                // Bmp-in-Ico must be standard mode
                if (inIco) {
                    SkCodecPrintf("Error: Icos may not use bit mask format.\n");
                    return false;
                }

                *codecOut = new SkBmpMaskCodec(imageInfo, stream, bitsPerPixel, masks.detach(),
                        rowOrder);
                return true;
            case kRLE_BmpInputFormat:
                // Bmp-in-Ico must be standard mode
                // When inIco is true, this line cannot be reached, since we
                // require that RLE Bmps have a valid number of totalBytes, and
                // Icos skip the header that contains totalBytes.
                SkASSERT(!inIco);
                *codecOut = new SkBmpRLECodec(imageInfo, stream, bitsPerPixel, numColors,
                        bytesPerColor, offset - bytesRead, rowOrder, RLEBytes);
                return true;
            default:
                SkASSERT(false);
                return false;
        }
    }

    return true;
}