コード例 #1
0
void QueryWindow::onNickMessage(Event *pEvent)
{
    Message msg = DCAST(MessageEvent, pEvent)->getMessage();

    // Will print a nick change message to the PM window
    // if we get a NICK message, which will only be if we're in
    // a channel with the person (or if the nick being changed is ours).
    QString oldNick = parseMsgPrefix(msg.m_prefix, MsgPrefixName);
    QString textToPrint = GET_STRING("message.nick")
                          .arg(oldNick)
                          .arg(msg.m_params[0]);
    if(m_pSession->isMyNick(oldNick))
    {
        printOutput(textToPrint, MESSAGE_IRC_NICK);
    }
    else
    {
        // If the target nick has changed and there isn't another query with that name
        // already open, then we can safely change the target's nick.
        bool queryWindowExists = DCAST(StatusWindow, m_pManager->getParentWindow(this))->childIrcWindowExists(msg.m_params[0]);
        if(isTargetNick(oldNick) && !queryWindowExists)
        {
            setTargetNick(msg.m_params[0]);
            printOutput(textToPrint, MESSAGE_IRC_NICK);
        }
    }
}
コード例 #2
0
/**
 * @brief Constructs a new TagStateManager
 * @details This constructs a new TagStateManager. The #main_cam_node should
 *   refer to the main scene camera, and will most likely be base.cam.
 *   It is necessary to pass the camera because the C++ code does not have
 *   access to the showbase.
 *
 * @param main_cam_node The main scene camera
 */
TagStateManager::TagStateManager(NodePath main_cam_node) {
    nassertv(!main_cam_node.is_empty());
    nassertv(DCAST(Camera, main_cam_node.node()) != NULL);
    _main_cam_node = main_cam_node;

    // Set default camera mask
    DCAST(Camera, _main_cam_node.node())->set_camera_mask(BitMask32::bit(1));

    // Init containers
    _containers["shadow"]   = StateContainer("Shadows",  2, false);
    _containers["voxelize"] = StateContainer("Voxelize", 3, false);
    _containers["envmap"]   = StateContainer("Envmap",   4, true);
    _containers["forward"]  = StateContainer("Forward",  5, true);
}
コード例 #3
0
ファイル: world.cpp プロジェクト: drivird/drunken-octo-robot
void World::start()
   {
   // The maze model also has a locator in it for where to start the ball
   // To access it we use the find command
   LPoint3f startPos = m_mazeNp.find("**/start").get_pos();
   // Set the ball in the starting position
   m_ballRootNp.set_pos(startPos);
   // Initial velocity is 0
   m_ballV = LVector3f(0,0,0);
   // Initial acceleration is 0
   m_accelV = LVector3f(0,0,0);

   // For a traverser to actually do collisions, you need to call
   // traverser.traverse() on a part of the scene. Fortunately, base has a
   // task that does this for the entire scene once a frame. This sets up our
   // traverser as the one to be called automatically
   // Note: have to do it manually in C++
   PT(GenericAsyncTask) traverserTaskPtr = new GenericAsyncTask("traverser", call_traverse, this);
   if(traverserTaskPtr != NULL)
      {
      AsyncTaskManager::get_global_ptr()->add(traverserTaskPtr);
      }

   // Create the movement task, but first make sure it is not already running
   PT(GenericAsyncTask) rollTaskPtr = DCAST(GenericAsyncTask, AsyncTaskManager::get_global_ptr()->find_task("rollTask"));
   if(rollTaskPtr == NULL)
      {
      rollTaskPtr = new GenericAsyncTask("rollTask", call_roll, this);
      if(rollTaskPtr != NULL)
         {
         AsyncTaskManager::get_global_ptr()->add(rollTaskPtr);
         }
      }
   m_last = 0;
   }
コード例 #4
0
ファイル: world.cpp プロジェクト: drivird/drunken-octo-robot
// This is the task that deals with making everything interactive
AsyncTask::DoneStatus World::roll(GenericAsyncTask* taskPtr)
   {
   // Standard technique for finding the amount of time since the last frame
   double dt = taskPtr->get_elapsed_time() - m_last;
   m_last = taskPtr->get_elapsed_time();

   // If dt is large, then there has been a # hiccup that could cause the ball
   // to leave the field if this functions runs, so ignore the frame
   if(dt > 0.2) { return AsyncTask::DS_cont; }

   // The collision handler collects the collisions. We dispatch which function
   // to handle the collision based on the name of what was collided into
   for(int i = 0; i < m_cHandlerPtr->get_num_entries(); ++i)
      {
      PT(CollisionEntry) entryPtr = m_cHandlerPtr->get_entry(i);
      const string& name = entryPtr->get_into_node()->get_name();
      if(name == "wall_collide")        { wall_collide_handler(*entryPtr);   }
      else if(name == "ground_collide") { ground_collide_handler(*entryPtr); }
      else if(name == "loseTrigger")    { lose_game(*entryPtr);              }
      }

   // Read the mouse position and tilt the maze accordingly
   PT(MouseWatcher) mouseWatcherPtr = DCAST(MouseWatcher, m_windowFrameworkPtr->get_mouse().node());
   if(mouseWatcherPtr->has_mouse())
      {
      // get the mouse position
      const LPoint2f& mpos = mouseWatcherPtr->get_mouse();
      m_mazeNp.set_p(mpos.get_y() * -10);
      m_mazeNp.set_r(mpos.get_x() * 10);
      }

   // Finally, we move the ball
   // Update the velocity based on acceleration
   m_ballV += m_accelV * dt * ACCEL;
   // Clamp the velocity to the maximum speed
   if(m_ballV.length_squared() > MAX_SPEED_SQ)
      {
      m_ballV.normalize();
      m_ballV *= MAX_SPEED;
      }
   // Update the position based on the velocity
   m_ballRootNp.set_pos(m_ballRootNp.get_pos() + (m_ballV * dt));

   // This block of code rotates the ball. It uses something called a quaternion
   // to rotate the ball around an arbitrary axis. That axis perpendicular to
   // the balls rotation, and the amount has to do with the size of the ball
   // This is multiplied on the previous rotation to incrementally turn it.
   LRotationf prevRot(m_ballNp.get_quat());
   LVector3f axis = UP.cross(m_ballV);
   LRotationf newRot(axis, 45.5 * dt * m_ballV.length());
   m_ballNp.set_quat(prevRot * newRot);

   // Continue the task indefinitely
   return AsyncTask::DS_cont;
   }
コード例 #5
0
/**
 * @brief Cleans up all registered states.
 * @details This cleans up all states which were registered to the TagStateManager.
 *   It also calls Camera::clear_tag_states() on the main_cam_node and all attached
 *   cameras.
 */
void TagStateManager::cleanup_states() {
    if (tagstatemgr_cat.is_info()) {
        tagstatemgr_cat.info() << "cleaning up states" << endl;
    }

    // Clear all tag states of the main camera
    DCAST(Camera, _main_cam_node.node())->clear_tag_states();

    // Clear the containers
    // XXX: Just iterate over the _container map
    cleanup_container_states(_containers["shadow"]);
    cleanup_container_states(_containers["voxelize"]);
    cleanup_container_states(_containers["envmap"]);
    cleanup_container_states(_containers["forward"]);
}
コード例 #6
0
void QueryWindow::onOutput(Event *pEvent)
{
    OutputEvent *pOutputEvt = DCAST(OutputEvent, pEvent);
    QRegExp regex(OutputWindow::s_invalidNickPrefix
                + QRegExp::escape(m_targetNick)
                + OutputWindow::s_invalidNickSuffix);
    regex.setCaseSensitivity(Qt::CaseInsensitive);
    int lastIdx = 0, idx;
    while((idx = regex.indexIn(pOutputEvt->getText(), lastIdx)) >= 0)
    {
        idx += regex.capturedTexts()[1].length();
        lastIdx = idx + m_targetNick.length() - 1;
        pOutputEvt->addLinkInfo(idx, lastIdx);
    }
}
コード例 #7
0
void QueryWindow::onPrivmsgMessage(Event *pEvent)
{
    Message msg = DCAST(MessageEvent, pEvent)->getMessage();
    if(m_pSession->isMyNick(msg.m_params[0]))
    {
        QString fromNick = parseMsgPrefix(msg.m_prefix, MsgPrefixName);
        if(isTargetNick(fromNick))
        {
            QString textToPrint;
            bool shouldHighlight = false;
            OutputMessageType msgType = MESSAGE_CUSTOM;

            CtcpRequestType requestType = getCtcpRequestType(msg);
            if(requestType != RequestTypeInvalid)
            {
                // ACTION is /me, so handle according to that.
                if(requestType == RequestTypeAction)
                {
                    QString action = msg.m_params[1];

                    // Action is in the format of "\1ACTION <action>\1", so
                    // the first 8 and last 1 characters will be excluded.
                    msgType = MESSAGE_IRC_ACTION;
                    QString msgText = action.mid(8, action.size()-9);
                    shouldHighlight = containsNick(msgText);
                    textToPrint = GET_STRING("message.action")
                                  .arg(fromNick)
                                  .arg(msgText);
                }
            }
            else
            {
                msgType = MESSAGE_IRC_SAY;
                shouldHighlight = containsNick(msg.m_params[1]);
                textToPrint = GET_STRING("message.say")
                              .arg(fromNick)
                              .arg(msg.m_params[1]);
            }

            if(!hasFocus())
            {
                QApplication::alert(this);
            }

            printOutput(textToPrint, msgType, shouldHighlight ? COLOR_HIGHLIGHT : COLOR_NONE);
        }
    }
}
コード例 #8
0
void QueryWindow::onNumericMessage(Event *pEvent)
{
    Message msg = DCAST(MessageEvent, pEvent)->getMessage();
    switch(msg.m_command)
    {
        case 401:   // ERR_NOSUCKNICK
        case 404:   // ERR_CANNOTSENDTOCHAN
        {
            // msg.m_params[0]: my nick
            // msg.m_params[1]: nick/channel
            // msg.m_params[2]: "No such nick/channel"
            if(msg.m_params[1].compare(getWindowName(), Qt::CaseInsensitive) == 0)
                printOutput(getNumericText(msg), MESSAGE_IRC_NUMERIC);
        }
    }
}
コード例 #9
0
ファイル: world.cpp プロジェクト: drivird/drunken-octo-robot
// This task gets the position of mouse each frame, and rotates the neck based
// on it.
void World::turn_head()
   {
   // Check to make sure the mouse is readable
   PT(MouseWatcher) mouseWatcherPtr = DCAST(MouseWatcher, m_windowFrameworkPtr->get_mouse().node());
   if(mouseWatcherPtr->has_mouse())
      {
      // get the mouse position as a Vec2. The values for each axis are from -1 to
      // 1. The top-left is (-1,-1), the bottom right is (1,1)
      const LPoint2f& mpos = mouseWatcherPtr->get_mouse();
      // Here we multiply the values to get the amount of degrees to turn
      // Restrain is used to make sure the values returned by getMouse are in the
      // valid range. If this particular model were to turn more than this,
      // significant tearing would be visible
      m_eveNeckNp.set_p(restrain(mpos.get_x()) * 50);
      m_eveNeckNp.set_h(restrain(mpos.get_y()) * 20);
      }
   }
コード例 #10
0
ファイル: cCommonFilters.cpp プロジェクト: Adrasl/OX
// Updates the shader inputs that need to be updated every frame.
// Normally, you shouldn't call this, it's being called in a task.
void CCommonFilters::update()
   {
   if(m_configuration["VolumetricLighting"] != NULL)
      {
      NodePath caster =
            static_cast<VolumetricLightingConfiguration*>
            (m_configuration["VolumetricLighting"])->caster;
      LPoint2f casterpos;
      NodePath cameraNp = m_manager.m_camera;
      PT(Camera) camera = DCAST(Camera, cameraNp.node());
      camera->get_lens()->project(caster.get_pos(cameraNp), casterpos);
      m_finalQuad.set_shader_input("casterpos",
                                   LVector4f(casterpos.get_x() * 0.5 + 0.5,
                                              casterpos.get_y() * 0.5 + 0.5,
                                              0,
                                              0));
      }
   }
コード例 #11
0
/**
 * @brief Updates the ShadowManager
 * @details This updates the ShadowManager, processing all shadow sources which
 *   need to get updated.
 *
 *   This first collects all sources which require an update, sorts them by priority,
 *   and then processes the first <max_updates> ShadowSources.
 *
 *   This may not get called before ShadowManager::init, or an assertion will be
 *   thrown.
 */
void ShadowManager::update() {
    nassertv(_atlas != NULL);                         // ShadowManager::init not called yet
    nassertv(_queued_updates.size() <= _max_updates); // Internal error, should not happen

    // Disable all cameras and regions which will not be used
    for (size_t i = _queued_updates.size(); i < _max_updates; ++i) {
        _cameras[i]->set_active(false);
        _display_regions[i]->set_active(false);
    }

    // Iterate over all queued updates
    for (size_t i = 0; i < _queued_updates.size(); ++i) {
        const ShadowSource* source = _queued_updates[i];

        // Enable the camera and display region, so they perform a render
        _cameras[i]->set_active(true);
        _display_regions[i]->set_active(true);

        // Set the view projection matrix
        DCAST(MatrixLens, _cameras[i]->get_lens())->set_user_mat(source->get_mvp());

        // Optional: Show the camera frustum for debugging
        // _cameras[i]->show_frustum();

        // Set the correct dimensions on the display region
        const LVecBase4f& uv = source->get_uv_region();
        _display_regions[i]->set_dimensions(
            uv.get_x(),              // left
            uv.get_x() + uv.get_z(), // right
            uv.get_y(),              // bottom
            uv.get_y() + uv.get_w()  // top
        );
    }

    // Clear the update list
    _queued_updates.clear();
    _queued_updates.reserve(_max_updates);
}
コード例 #12
0
ファイル: world.cpp プロジェクト: drivird/drunken-octo-robot
World::World(WindowFramework* windowFramework)
   : m_windowFramework(windowFramework),
     m_title(),
     m_escapeEventText(),
     m_onekeyEventText(),
     m_twokeyEventText(),
     m_duckPlane(),
     m_duckTexs(),
     m_duckTask(),
     m_fps(36),
     m_expPlane(),
     m_expTask(),
     m_orientPlane(),
     m_orientTex(),
     m_trackball()
   {
   // Standard initialization stuff
   // Standard title that's on screen in every tutorial
   COnscreenText title("title");
   title.set_text("Panda3D: Tutorial - Texture \"Movies\"");
   title.set_fg(Colorf(1, 1, 1, 1));
   title.set_pos(0.7, -0.95);
   title.set_scale(0.07);
   const NodePath& aspect2d = m_windowFramework->get_aspect_2d();
   title.reparent_to(aspect2d);
   m_title = title.generate();

   // Text to show the keyboard keys and their functions on screen
   COnscreenText escapeEventText("escapeEvent");
   escapeEventText.set_text("ESC: Quit");
   escapeEventText.set_fg(Colorf(1, 1, 1, 1));
   escapeEventText.set_pos(-1.3, 0.95);
   escapeEventText.set_align(TextNode::A_left);
   escapeEventText.set_scale(0.05);
   escapeEventText.reparent_to(aspect2d);
   m_escapeEventText = escapeEventText.generate();

   COnscreenText onekeyEventText("onekeyEvent");
   onekeyEventText.set_text("[1]: Freeview camera");
   onekeyEventText.set_fg(Colorf(1, 1, 1, 1));
   onekeyEventText.set_pos(-1.3, 0.90);
   onekeyEventText.set_align(TextNode::A_left);
   onekeyEventText.set_scale(0.05);
   onekeyEventText.reparent_to(aspect2d);
   m_onekeyEventText = onekeyEventText.generate();

   COnscreenText twokeyEventText("twokeyEvent");
   twokeyEventText.set_text(
      "[2]: Preset Camera Angle 2 (Verify billboard effect)");
   twokeyEventText.set_fg(Colorf(1, 1, 1, 1));
   twokeyEventText.set_pos(-1.3, 0.85);
   twokeyEventText.set_align(TextNode::A_left);
   twokeyEventText.set_scale(0.05);
   twokeyEventText.reparent_to(aspect2d);
   m_twokeyEventText = twokeyEventText.generate();

   // Set the background color
   m_windowFramework->set_background_type(WindowFramework::BT_black);

   // Set up the key input
   // Escape quits
   WORLD_DEFINE_KEY("escape", "exit", quit);
   //Free view
   WORLD_DEFINE_KEY("1", "setViewMain", set_view_main);
   // Billboard effect view
   WORLD_DEFINE_KEY("2", "setViewBillboard", set_view_billboard);

   // Initialization specific to this world
   // Load a polygon plane (4 sided square) to put an animated duck sprite on
   const NodePath& models =
      m_windowFramework->get_panda_framework()->get_models();
   m_duckPlane = m_windowFramework->load_model(models, "../models/plane");
   m_duckPlane.set_pos(-2, 8, 0);         // set its position
   const NodePath& render = m_windowFramework->get_render();
   m_duckPlane.reparent_to(render);       // reparent to render

   // Enable tranparency: this attribute needs to be set for Panda to render the
   // transparency in the duck's texture as transparent rather than opaque
   m_duckPlane.set_transparency(TransparencyAttrib::M_alpha);

   // Now we call our special 'loadTextureMovie' function that returns a list
   // containing all of the textures for the duck sprite.
   // Check the function definition later in this file for its parameters
   load_texture_movie(24, "../duck/duck_fly_left", "png", 2, &m_duckTexs);

   // Next we add a task to our task list that will animate the texture on the
   // duck plane according to the time elapsed.
   m_duckTask = WORLD_ADD_TASK("duckTask",
      TEXTURE_MOVIE(36, m_duckPlane, m_duckTexs));
   // The function texture_movie is set to run any texture movie that
   // animates and loops based on time (rather that some other value like
   // position). To do that, it is set up to expect a number of parameters set
   // in the task object. The following lines set those parameters

   /* Note: passed to the task as template parameters
   #Framerate: The texture will be changed 36 times per second
   self.duckTask.fps = 36
   #self.duckPlane is the object whose texture should be changed
   self.duckTask.obj = self.duckPlane
   #self.duckTexs (which we created earlier with self.oadTextureMovie)
   #contains the list of textures to animate from
   self.duckTask.textures = self.duckTexs
   */

   // Now, instead of a duck, we will put an animated explosion onto a polygon
   // This is the same as loading the duck animation, with the expection that
   // we will "billboard" the explosion so that it always faces the camera
   // load the object
   m_expPlane = m_windowFramework->load_model(models, "../models/plane");
   m_expPlane.set_pos(2, 8, 0);          // set its position
   m_expPlane.reparent_to(render);       // reparent to render
   // enable transparency
   m_expPlane.set_transparency(TransparencyAttrib::M_alpha);
   // load the texture movie
   load_texture_movie(51, "../explosion/explosion", "png", 4, &m_expTexs);

   // create the animation task
   m_expTask = WORLD_ADD_TASK("explosionTask",
      TEXTURE_MOVIE(30, m_expPlane, m_expTexs));
   /* Note: passed to the task as template parameters
   m_expTask.fps = 30                                 #set framerate
   m_expTask.obj = self.expPlane                      #set object
   m_expTask.textures = self.expTexs                  #set texture list
   */

   // This create the "billboard" effect that will rotate the object so that it
   // is always rendered as facing the eye (camera)
   m_expPlane.node()->set_effect(BillboardEffect::make_point_eye());

   // The code below generates the plane you see with the numbers and arrows.
   // This is just to give a sense of orientation as the camera is moved around.
   // Load the object
   m_orientPlane = m_windowFramework->load_model(models, "../models/plane");
   // load the texture
   m_orientTex = TexturePool::load_texture(
      "../models/textures/orientation.png");
   m_orientPlane.set_texture(m_orientTex, 1);        // Set the texture
   m_orientPlane.reparent_to(render);                  // Parent to render
   // Set the position, orientation, and scale
   m_orientPlane.set_pos_hpr_scale(0, 8, -1, 0, -90, 0, 10, 10, 10);

   // Note: mouse support must be activated. The basic method is to call
   //       WindowFramework::setup_trackball. In order to get the same viewpoint
   //       as in the original python tutorial, we get the trackball node and
   //       cancel the previous call to TrackBall::set_pos. Finally, we keep the
   //       trackball's NodePath; we'll use it to enable/disable the mouse.
   NodePath camera = m_windowFramework->get_camera_group();
   m_windowFramework->setup_trackball();
   m_trackball = m_windowFramework->get_mouse().find("**/trackball");
   DCAST(Trackball, m_trackball.node())->set_pos(0, 0, 0);
   }
コード例 #13
0
void QueryWindow::onDoubleClickLink(Event *pEvent)
{
    DoubleClickLinkEvent *pDblClickLinkEvt = DCAST(DoubleClickLinkEvent, pEvent);
    m_pSession->sendData(QString().arg(pDblClickLinkEvt->getText()));
}
コード例 #14
0
#include "glgsg.h"


#include "common.h"

TypeHandle SGRenderNode::_type_handle;

SGRenderNode::SGRenderNode(StaticGeometryHandler *handler, PT(Shader) collector_shader) : PandaNode("SGRender") {
    _handler = handler;
    set_internal_bounds(new OmniBoundingVolume);
    set_final(true);
    create_default_geom();


    CPT(RenderAttrib) sattrib = ShaderAttrib::make_off();
    sattrib = DCAST(ShaderAttrib, sattrib)->set_shader_input("DatasetTex", handler->get_dataset_tex());
    sattrib = DCAST(ShaderAttrib, sattrib)->set_shader_input("MappingTex", handler->get_mapping_tex());
    sattrib = DCAST(ShaderAttrib, sattrib)->set_shader_input("DrawnObjectsTex", handler->get_drawn_objects_tex());
    sattrib = DCAST(ShaderAttrib, sattrib)->set_shader_input("DynamicStripsTex", handler->get_dynamic_strips_tex());

    _base_render_state = RenderState::make(sattrib);

    CPT(RenderAttrib) collect_attrib = sattrib;
    collect_attrib = DCAST(ShaderAttrib, collect_attrib)->set_shader(collector_shader, 100000);
    collect_attrib = DCAST(ShaderAttrib, collect_attrib)->set_shader_input("IndirectTex", handler->get_indirect_tex());
  
    _collect_render_state = RenderState::make(collect_attrib);

}

SGRenderNode::~SGRenderNode() {
コード例 #15
0
ファイル: world.cpp プロジェクト: memberii/drunken-octo-robot
World::World(WindowFramework* windowFramework)
    : m_windowFramework(windowFramework),
      m_title(),
      m_inst1(),
      m_inst2(),
      m_inst3(),
      m_inst4(),
      m_altCam(),
      m_teapot(),
      m_teapotInterval(),
      m_bufferViewer(NULL)
      // m_tvMen
{
    // Note: set background color here
    m_windowFramework->get_graphics_output()->get_active_display_region(0)->
    set_clear_color(Colorf(0, 0, 0, 1));

    // Post the instructions.
    m_title = add_title("Panda3D: Tutorial - Using Render-to-Texture");
    m_inst1 = add_instructions(0.95,"ESC: Quit");
    m_inst2 = add_instructions(0.90,"Up/Down: Zoom in/out on the Teapot");
    m_inst3 = add_instructions(0.85,"Left/Right: Move teapot left/right");
    m_inst4 = add_instructions(0.80,"V: View the render-to-texture results");

    //we get a handle to the default window
    PT(GraphicsOutput) mainWindow = m_windowFramework->get_graphics_output();

    // we now get buffer thats going to hold the texture of our new scene
    PT(GraphicsOutput) altBuffer = mainWindow->make_texture_buffer(
                                       "hello", 256, 256);

    // now we have to setup a new scene graph to make this scene
    NodePath altRender("new render");

    // this takes care of setting up the camera properly
    m_altCam = m_windowFramework->make_camera();
    // Note: set the size and shape of the "film" within the lens equal to the
    //       buffer of our new scene
    DCAST(Camera, m_altCam.node())->get_lens()->set_film_size(
        altBuffer->get_x_size(), altBuffer->get_y_size());
    // Note: make a DisplayRegion for the camera
    PT(DisplayRegion) dr = altBuffer->make_display_region(0, 1, 0, 1);
    dr->set_sort(0);
    dr->set_camera(m_altCam);
    m_altCam.reparent_to(altRender);
    m_altCam.set_pos(0, -10, 0);

    // get the teapot and rotates it for a simple animation
    const NodePath& models =
        m_windowFramework->get_panda_framework()->get_models();
    m_teapot = m_windowFramework->load_model(models, "../models/teapot");
    m_teapot.reparent_to(altRender);
    m_teapot.set_pos(0, 0, -1);

    const bool bakeInStart = true;
    const bool fluid = false;
    m_teapotInterval = new CLerpNodePathInterval("teapotInterval", 1.5,
            CLerpInterval::BT_no_blend, bakeInStart, fluid, m_teapot, NodePath());
    m_teapotInterval->set_start_hpr(m_teapot.get_hpr());
    m_teapotInterval->set_end_hpr(LVecBase3f(m_teapot.get_h()+360,
                                  m_teapot.get_p()+360,
                                  m_teapot.get_r()+360));
    m_teapotInterval->loop();

    // put some lighting on the teapot
    PT(DirectionalLight) dlight = new DirectionalLight("dlight");
    PT(AmbientLight) alight = new AmbientLight("alight");
    NodePath dlnp = altRender.attach_new_node(dlight);
    NodePath alnp = altRender.attach_new_node(alight);
    dlight->set_color(Colorf(0.8, 0.8, 0.5, 1));
    alight->set_color(Colorf(0.2, 0.2, 0.2, 1));
    dlnp.set_hpr(0, -60, 0);
    altRender.set_light(dlnp);
    altRender.set_light(alnp);

    // Panda contains a built-in viewer that lets you view the results of
    // your render-to-texture operations.  This code configures the viewer.

    WORLD_DEFINE_KEY("v", "toggleBufferViewer", toggle_buffer_viewer);
    m_bufferViewer = new CBufferViewer(m_windowFramework);
    m_bufferViewer->set_position(CBufferViewer::CP_llcorner);
    m_bufferViewer->set_card_size(1.0, 0.0);

    // Create the tv-men. Each TV-man will display the
    // offscreen-texture on his TV screen.
    make_tv_man(-5, 30,  1, altBuffer->get_texture(), 0.9);
    make_tv_man( 5, 30,  1, altBuffer->get_texture(), 1.4);
    make_tv_man( 0, 23, -3, altBuffer->get_texture(), 2.0);
    make_tv_man(-5, 20, -6, altBuffer->get_texture(), 1.1);
    make_tv_man( 5, 18, -5, altBuffer->get_texture(), 1.7);

    WORLD_DEFINE_KEY("escape", "exit", quit);
    WORLD_DEFINE_KEY("arrow_up", "zoomIn", zoom_in);
    WORLD_DEFINE_KEY("arrow_down", "zoomOut", zoom_out);
    WORLD_DEFINE_KEY("arrow_left", "moveLeft", move_left);
    WORLD_DEFINE_KEY("arrow_right", "moveRight", move_right);

    WORLD_ADD_TASK("worldAsyncTask", async_task);
}
コード例 #16
0
ファイル: world.cpp プロジェクト: drivird/drunken-octo-robot
World::World(WindowFramework* windowFrameworkPtr)
   : UP(0,0,1),
     m_windowFrameworkPtr(windowFrameworkPtr)
   {
   // preconditions
   if(m_windowFrameworkPtr == NULL)
      {
      nout << "ERROR: parameter windowFrameworkPtr cannot be NULL." << endl;
      return;
      }

   // This code puts the standard title and instruction text on screen
   COnscreenText title("title", COnscreenText::TS_plain);
   title.set_text("Panda3D: Tutorial - Collision Detection");
   title.set_fg(Colorf(1,1,1,1));
   title.set_pos(LVecBase2f(0.7,-0.95));
   title.set_scale(0.07);
   title.reparent_to(m_windowFrameworkPtr->get_aspect_2d());
   m_titleNp = title.generate();


   COnscreenText instructions("instructions");
   instructions.set_text("Mouse pointer tilts the board");
   instructions.set_pos(LVecBase2f(-1.3, 0.95));
   instructions.set_fg(Colorf(1,1,1,1));
   instructions.set_align(TextNode::A_left);
   instructions.set_scale(0.05);
   instructions.reparent_to(m_windowFrameworkPtr->get_aspect_2d());
   m_instructionsNp = instructions.generate();

   // Escape quits
   m_windowFrameworkPtr->enable_keyboard();
   m_windowFrameworkPtr->get_panda_framework()->define_key("escape", "sysExit", sys_exit, NULL);

   // Disable mouse-based camera control
   // Note: irrelevant in C++

   // Place the camera
   NodePath cameraNp = m_windowFrameworkPtr->get_camera_group();
   cameraNp.set_pos_hpr(0, 0, 25, 0, -90, 0);

   // Load the maze and place it in the scene
   NodePath modelsNp = m_windowFrameworkPtr->get_panda_framework()->get_models();
   m_mazeNp = m_windowFrameworkPtr->load_model(modelsNp, "../models/maze");
   NodePath renderNp = m_windowFrameworkPtr->get_render();
   m_mazeNp.reparent_to(renderNp);

   // Most times, you want collisions to be tested against invisible geometry
   // rather than every polygon. This is because testing against every polygon
   // in the scene is usually too slow. You can have simplified or approximate
   // geometry for the solids and still get good results.
   //
   // Sometimes you'll want to create and position your own collision solids in
   // code, but it's often easier to have them built automatically. This can be
   // done by adding special tags into an egg file. Check maze.egg and ball.egg
   // and look for lines starting with <Collide>. The part is brackets tells
   // Panda exactly what to do. Polyset means to use the polygons in that group
   // as solids, while Sphere tells panda to make a collision sphere around them
   // Keep means to keep the polygons in the group as visable geometry (good
   // for the ball, not for the triggers), and descend means to make sure that
   // the settings are applied to any subgroups.
   //
   // Once we have the collision tags in the models, we can get to them using
   // NodePath's find command

   // Find the collision node named wall_collide
   m_wallsNp = m_mazeNp.find("**/wall_collide");

   // Collision objects are sorted using BitMasks. BitMasks are ordinary numbers
   // with extra methods for working with them as binary bits. Every collision
   // solid has both a from mask and an into mask. Before Panda tests two
   // objects, it checks to make sure that the from and into collision masks
   // have at least one bit in common. That way things that shouldn't interact
   // won't. Normal model nodes have collision masks as well. By default they
   // are set to bit 20. If you want to collide against actual visable polygons,
   // set a from collide mask to include bit 20
   //
   // For this example, we will make everything we want the ball to collide with
   // include bit 0
   m_wallsNp.node()->set_into_collide_mask(BitMask32::bit(0));
   // CollisionNodes are usually invisible but can be shown. Uncomment the next
   // line to see the collision walls
   // m_wallsNp.show();

   // We will now find the triggers for the holes and set their masks to 0 as
   // well. We also set their names to make them easier to identify during
   // collisions
   m_loseTriggers.reserve(NB_HOLES);
   for(int i = 0; i < NB_HOLES; ++i)
      {
      ostringstream filename;
      filename << "**/hole_collide" << i;
      NodePath triggerNp = m_mazeNp.find(filename.str());
      triggerNp.node()->set_into_collide_mask(BitMask32::bit(0));
      triggerNp.node()->set_name("loseTrigger");
      m_loseTriggers.push_back(triggerNp);
      // Uncomment this line to see the triggers
      // triggerNp.show();
      }

   // Ground_collide is a single polygon on the same plane as the ground in the
   // maze. We will use a ray to collide with it so that we will know exactly
   // what height to put the ball at every frame. Since this is not something
   // that we want the ball itself to collide with, it has a different
   // bitmask.
   m_mazeGroundNp = m_mazeNp.find("**/ground_collide");
   m_mazeGroundNp.node()->set_into_collide_mask(BitMask32::bit(1));

   // Load the ball and attach it to the scene
   // It is on a root dummy node so that we can rotate the ball itself without
   // rotating the ray that will be attached to it
   m_ballRootNp = renderNp.attach_new_node("ballRoot");
   m_ballNp = m_windowFrameworkPtr->load_model(modelsNp, "../models/ball");
   m_ballNp.reparent_to(m_ballRootNp);

   // Find the collision sphere for the ball which was created in the egg file
   // Notice that it has a from collision mask of bit 0, and an into collision
   // mask of no bits. This means that the ball can only cause collisions, not
   // be collided into
   m_ballSphereNp = m_ballNp.find("**/ball");
   DCAST(CollisionNode, m_ballSphereNp.node())->set_from_collide_mask(BitMask32::bit(0));
   m_ballSphereNp.node()->set_into_collide_mask(BitMask32::all_off());

   // No we create a ray to start above the ball and cast down. This is to
   // Determine the height the ball should be at and the angle the floor is
   // tilting. We could have used the sphere around the ball itself, but it
   // would not be as reliable

   // Create the ray
   m_ballGroundRayPtr = new CollisionRay();
   if(m_ballGroundRayPtr != NULL)
      {
      // Set its origin
      m_ballGroundRayPtr->set_origin(0,0,10);
      // And its direction
      m_ballGroundRayPtr->set_direction(0,0,-1);
      // Collision solids go in CollisionNode
      // Create and name the node
      m_ballGroundColPtr = new CollisionNode("groundRay");
      if(m_ballGroundColPtr != NULL)
         {
         // Add the ray
         m_ballGroundColPtr->add_solid(m_ballGroundRayPtr);
         // Set its bitmasks
         m_ballGroundColPtr->set_from_collide_mask(BitMask32::bit(1));
         m_ballGroundColPtr->set_into_collide_mask(BitMask32::all_off());
         // Attach the node to the ballRoot so that the ray is relative to the ball
         // (it will always be 10 feet over the ball and point down)
         m_ballGroundColNp = m_ballRootNp.attach_new_node(m_ballGroundColPtr);
         // Uncomment this line to see the ray
         // m_ballGroundColNp.show();
         }
      }

   // Finally, we create a CollisionTraverser. CollisionTraversers are what
   // do the job of calculating collisions
   // Note: no need to in this implementation

   // Collision traversers tell collision handlers about collisions, and then
   // the handler decides what to do with the information. We are using a
   // CollisionHandlerQueue, which simply creates a list of all of the
   // collisions in a given pass. There are more sophisticated handlers like
   // one that sends events and another that tries to keep collided objects
   // apart, but the results are often better with a simple queue
   m_cHandlerPtr = new CollisionHandlerQueue();
   if(m_cHandlerPtr != NULL)
      {
      // Now we add the collision nodes that can create a collision to the
      // traverser. The traverser will compare these to all others nodes in the
      // scene. There is a limit of 32 CollisionNodes per traverser
      // We add the collider, and the handler to use as a pair
      m_cTrav.add_collider(m_ballSphereNp, m_cHandlerPtr);
      m_cTrav.add_collider(m_ballGroundColNp, m_cHandlerPtr);
      }

   // Collision traversers have a built in tool to help visualize collisions.
   // Uncomment the next line to see it.
   // m_cTrav.show_collisions(renderNp);

   // This section deals with lighting for the ball. Only the ball was lit
   // because the maze has static lighting pregenerated by the modeler
   PT(AmbientLight) ambientLightPtr = new AmbientLight("ambientLight");
   if(ambientLightPtr != NULL)
      {
      ambientLightPtr->set_color(Colorf(0.55, 0.55, 0.55, 1));
      m_ballRootNp.set_light(renderNp.attach_new_node(ambientLightPtr));
      }
   PT(DirectionalLight) directionalLightPtr = new DirectionalLight("directionalLight");
   if(directionalLightPtr != NULL)
      {
      directionalLightPtr->set_direction(LVecBase3f(0, 0, -1));
      directionalLightPtr->set_color(Colorf(0.375, 0.375, 0.375, 1));
      directionalLightPtr->set_specular_color(Colorf(1, 1, 1, 1));
      m_ballRootNp.set_light(renderNp.attach_new_node(directionalLightPtr));
      }

   // This section deals with adding a specular highlight to the ball to make
   // it look shiny
   PT(Material) materialPtr = new Material();
   if(materialPtr != NULL)
      {
      materialPtr->set_specular(Colorf(1,1,1,1));
      materialPtr->set_shininess(96);
      m_ballNp.set_material(materialPtr, 1);
      }

   // Finally, we call start for more initialization
   start();
   }
コード例 #17
0
ファイル: world.cpp プロジェクト: drivird/drunken-octo-robot
// If the ball hits a hole trigger, then it should fall in the hole.
// This is faked rather than dealing with the actual physics of it.
void World::lose_game(const CollisionEntry& entry)
   {
   // The triggers are set up so that the center of the ball should move to the
   // collision point to be in the hole
   NodePath renderNp = m_windowFrameworkPtr->get_render();
   LPoint3f toPos = entry.get_interior_point(renderNp);

   // Stop the maze task
   PT(GenericAsyncTask) rollTaskPtr = DCAST(GenericAsyncTask, AsyncTaskManager::get_global_ptr()->find_task("rollTask"));
   if(rollTaskPtr != NULL)
      {
      AsyncTaskManager::get_global_ptr()->remove(rollTaskPtr);
      }

   // Move the ball into the hole over a short sequence of time. Then wait a
   // second and call start to reset the game
   // Note: Sequence is a python only class. We have to manage using CMetaInterval for the animation
   //       with a callback event when the animation is done to callback on World::start() to restart the game.
   PT(CLerpNodePathInterval) lerp1Ptr = new CLerpNodePathInterval("lerp1",
                                                                   0.1,
                                                                   CLerpInterval::BT_no_blend,
                                                                   true,
                                                                   false,
                                                                   m_ballRootNp,
                                                                   NodePath());

   PT(CLerpNodePathInterval) lerp2Ptr = new CLerpNodePathInterval("lerp2",
                                                                  0.1,
                                                                  CLerpInterval::BT_no_blend,
                                                                  true,
                                                                  false,
                                                                  m_ballRootNp,
                                                                  NodePath());

   PT(WaitInterval) waitPtr = new WaitInterval(1);

   PT(CMetaInterval) cMetaIntervalPtr = new CMetaInterval("sequence");

   if(lerp1Ptr         == NULL ||
      lerp2Ptr         == NULL ||
      waitPtr          == NULL ||
      cMetaIntervalPtr == NULL)
      {
      nout << "ERROR: out of memory" << endl;
      return;
      }

   float endPosZ = m_ballRootNp.get_pos().get_z() - 0.9;
   LVecBase3f midEndPos(toPos.get_x(),
                       toPos.get_y(),
                       0.5*(m_ballRootNp.get_pos().get_z()+endPosZ));
   lerp1Ptr->set_end_pos(midEndPos);
   LVecBase3f endPos(toPos.get_x(),
                     toPos.get_y(),
                     endPosZ);
   lerp2Ptr->set_end_pos(endPos);

   cMetaIntervalPtr->add_c_interval(lerp1Ptr, 0, CMetaInterval::RS_previous_end);
   cMetaIntervalPtr->add_c_interval(lerp2Ptr, 0, CMetaInterval::RS_previous_end);
   cMetaIntervalPtr->add_c_interval(waitPtr , 0, CMetaInterval::RS_previous_end);
   cMetaIntervalPtr->set_done_event("restartGame");
   cMetaIntervalPtr->start();

   EventHandler::get_global_event_handler()->add_hook("restartGame", call_start, this);

   PT(GenericAsyncTask) intervalManagerTaskPtr = DCAST(GenericAsyncTask, AsyncTaskManager::get_global_ptr()->find_task("intervalManagerTask"));
   if(intervalManagerTaskPtr == NULL)
      {
      intervalManagerTaskPtr = new GenericAsyncTask("intervalManagerTask", step_interval_manager, NULL);
      if(intervalManagerTaskPtr != NULL)
         {
         AsyncTaskManager::get_global_ptr()->add(intervalManagerTaskPtr);
         }
      }
   }