1
0
Fork 0
mirror of https://github.com/luanti-org/luanti.git synced 2025-08-06 17:41:04 +00:00

Implement rendering pipeline and post-processing (#12465)

Co-authored-by: Lars Mueller <appgurulars@gmx.de>
Co-authored-by: sfan5 <sfan5@live.de>
Co-authored-by: lhofhansl <lhofhansl@yahoo.com>
This commit is contained in:
x2048 2022-09-06 08:25:18 +02:00 committed by GitHub
parent 464043b8ab
commit ff6dcfea82
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
32 changed files with 1476 additions and 565 deletions

View file

@ -19,56 +19,73 @@ with this program; if not, write to the Free Software Foundation, Inc.,
*/
#include "sidebyside.h"
#include <ICameraSceneNode.h>
#include "client/hud.h"
#include "client/camera.h"
RenderingCoreSideBySide::RenderingCoreSideBySide(
IrrlichtDevice *_device, Client *_client, Hud *_hud, bool _horizontal, bool _flipped)
: RenderingCoreStereo(_device, _client, _hud), horizontal(_horizontal), flipped(_flipped)
DrawImageStep::DrawImageStep(u8 texture_index, v2f _offset) :
texture_index(texture_index), offset(_offset)
{}
void DrawImageStep::setRenderSource(RenderSource *_source)
{
source = _source;
}
void DrawImageStep::setRenderTarget(RenderTarget *_target)
{
target = _target;
}
void RenderingCoreSideBySide::initTextures()
void DrawImageStep::run(PipelineContext &context)
{
if (target)
target->activate(context);
auto texture = source->getTexture(texture_index);
core::dimension2du output_size = context.device->getVideoDriver()->getScreenSize();
v2s32 pos(offset.X * output_size.Width, offset.Y * output_size.Height);
context.device->getVideoDriver()->draw2DImage(texture, pos);
}
void populateSideBySidePipeline(RenderPipeline *pipeline, Client *client, bool horizontal, bool flipped, v2f &virtual_size_scale)
{
static const u8 TEXTURE_LEFT = 0;
static const u8 TEXTURE_RIGHT = 1;
v2f offset;
if (horizontal) {
image_size = {screensize.X, screensize.Y / 2};
rpos = v2s32(0, screensize.Y / 2);
} else {
image_size = {screensize.X / 2, screensize.Y};
rpos = v2s32(screensize.X / 2, 0);
virtual_size_scale = v2f(1.0f, 0.5f);
offset = v2f(0.0f, 0.5f);
}
else {
virtual_size_scale = v2f(0.5f, 1.0f);
offset = v2f(0.5f, 0.0f);
}
virtual_size = image_size;
left = driver->addRenderTargetTexture(
image_size, "3d_render_left", video::ECF_A8R8G8B8);
right = driver->addRenderTargetTexture(
image_size, "3d_render_right", video::ECF_A8R8G8B8);
}
void RenderingCoreSideBySide::clearTextures()
{
driver->removeTexture(left);
driver->removeTexture(right);
}
TextureBuffer *buffer = pipeline->createOwned<TextureBuffer>();
buffer->setTexture(TEXTURE_LEFT, virtual_size_scale, "3d_render_left", video::ECF_A8R8G8B8);
buffer->setTexture(TEXTURE_RIGHT, virtual_size_scale, "3d_render_right", video::ECF_A8R8G8B8);
void RenderingCoreSideBySide::drawAll()
{
driver->OnResize(image_size); // HACK to make GUI smaller
renderBothImages();
driver->OnResize(screensize);
driver->draw2DImage(left, {});
driver->draw2DImage(right, rpos);
}
auto step3D = pipeline->own(create3DStage(client, virtual_size_scale));
void RenderingCoreSideBySide::useEye(bool _right)
{
driver->setRenderTarget(_right ? right : left, true, true, skycolor);
RenderingCoreStereo::useEye(_right ^ flipped);
}
// eyes
for (bool right : { false, true }) {
pipeline->addStep<OffsetCameraStep>(flipped ? !right : right);
auto output = pipeline->createOwned<TextureBufferOutput>(buffer, right ? TEXTURE_RIGHT : TEXTURE_LEFT);
pipeline->addStep<SetRenderTargetStep>(step3D, output);
pipeline->addStep(step3D);
pipeline->addStep<MapPostFxStep>();
pipeline->addStep<DrawHUD>();
}
void RenderingCoreSideBySide::resetEye()
{
hud->resizeHotbar();
drawHUD();
driver->setRenderTarget(nullptr, false, false, skycolor);
RenderingCoreStereo::resetEye();
}
pipeline->addStep<OffsetCameraStep>(0.0f);
auto screen = pipeline->createOwned<ScreenTarget>();
for (bool right : { false, true }) {
auto step = pipeline->addStep<DrawImageStep>(
right ? TEXTURE_RIGHT : TEXTURE_LEFT,
right ? offset : v2f());
step->setRenderSource(buffer);
step->setRenderTarget(screen);
}
}