Blender V4.5
node_composite_map_uv.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2006 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include "BLI_math_base.hh"
10#include "BLI_math_vector.hh"
12
13#include "GPU_shader.hh"
14#include "GPU_texture.hh"
15
16#include "UI_interface.hh"
17#include "UI_resources.hh"
18
19#include "COM_node_operation.hh"
20#include "COM_utilities.hh"
21
23
24/* **************** Map UV ******************** */
25
27
29{
30 b.add_input<decl::Color>("Image")
31 .default_value({1.0f, 1.0f, 1.0f, 1.0f})
32 .compositor_realization_mode(CompositorInputRealizationMode::Transforms);
33 b.add_input<decl::Vector>("UV")
34 .default_value({1.0f, 0.0f, 0.0f})
35 .min(0.0f)
36 .max(1.0f)
38 b.add_output<decl::Color>("Image");
39}
40
42{
43 layout->prop(ptr, "filter_type", UI_ITEM_R_SPLIT_EMPTY_NAME, "", ICON_NONE);
44}
45
50
51using namespace blender::compositor;
52
54 public:
56
57 void execute() override
58 {
59 const Result &input = this->get_input("Image");
60 if (input.is_single_value()) {
61 Result &output = this->get_result("Image");
62 output.share_data(input);
63 return;
64 }
65
66 if (this->context().use_gpu()) {
67 this->execute_gpu();
68 }
69 else {
70 this->execute_cpu();
71 }
72 }
73
75 {
76 GPUShader *shader = context().get_shader(get_shader_name());
77 GPU_shader_bind(shader);
78
79 const Result &input_image = get_input("Image");
80 if (this->get_nearest_neighbour()) {
81 GPU_texture_mipmap_mode(input_image, false, false);
82 GPU_texture_anisotropic_filter(input_image, false);
83 }
84 else {
85 GPU_texture_mipmap_mode(input_image, true, true);
86 GPU_texture_anisotropic_filter(input_image, true);
87 }
88
90 input_image.bind_as_texture(shader, "input_tx");
91
92 const Result &input_uv = get_input("UV");
93 input_uv.bind_as_texture(shader, "uv_tx");
94
95 const Domain domain = compute_domain();
96 Result &output_image = get_result("Image");
97 output_image.allocate_texture(domain);
98 output_image.bind_as_image(shader, "output_img");
99
101
102 input_image.unbind_as_texture();
103 input_uv.unbind_as_texture();
104 output_image.unbind_as_image();
106 }
107
108 char const *get_shader_name()
109 {
110 return get_nearest_neighbour() ? "compositor_map_uv_nearest_neighbour" :
111 "compositor_map_uv_anisotropic";
112 }
113
115 {
116 const Result &input_uv = get_input("UV");
117 if (input_uv.is_single_value()) {
118 this->execute_single_cpu();
119 return;
120 }
121
122 if (this->get_nearest_neighbour()) {
123 this->execute_cpu_nearest();
124 }
125 else {
127 }
128 }
129
131 {
132 const Result &input_uv = get_input("UV");
133 const Result &input_image = get_input("Image");
134
135 float2 uv_coordinates = input_uv.get_single_value<float3>().xy();
136 float4 sampled_color = input_image.sample_nearest_zero(uv_coordinates);
137
138 /* The UV input is assumed to contain an alpha channel as its third channel, since the
139 * UV coordinates might be defined in only a subset area of the UV texture as mentioned.
140 * In that case, the alpha is typically opaque at the subset area and transparent
141 * everywhere else, and alpha pre-multiplication is then performed. This format of having
142 * an alpha channel in the UV coordinates is the format used by UV passes in render
143 * engines, hence the mentioned logic. */
144 float alpha = input_uv.get_single_value<float3>().z;
145
146 float4 result = sampled_color * alpha;
147
148 Result &output = get_result("Image");
149 output.allocate_single_value();
150 output.set_single_value(result);
151 }
152
154 {
155 const Result &input_image = get_input("Image");
156 const Result &input_uv = get_input("UV");
157
158 const Domain domain = compute_domain();
159 Result &output_image = get_result("Image");
160 output_image.allocate_texture(domain);
161
162 parallel_for(domain.size, [&](const int2 texel) {
163 float2 uv_coordinates = input_uv.load_pixel<float3>(texel).xy();
164
165 float4 sampled_color = input_image.sample_nearest_zero(uv_coordinates);
166
167 /* The UV input is assumed to contain an alpha channel as its third channel, since the
168 * UV coordinates might be defined in only a subset area of the UV texture as mentioned.
169 * In that case, the alpha is typically opaque at the subset area and transparent
170 * everywhere else, and alpha pre-multiplication is then performed. This format of having
171 * an alpha channel in the UV coordinates is the format used by UV passes in render
172 * engines, hence the mentioned logic. */
173 float alpha = input_uv.load_pixel<float3>(texel).z;
174
175 float4 result = sampled_color * alpha;
176
177 output_image.store_pixel(texel, result);
178 });
179 }
180
182 {
183 const Result &input_image = get_input("Image");
184 const Result &input_uv = get_input("UV");
185
186 const Domain domain = compute_domain();
187 Result &output_image = get_result("Image");
188 output_image.allocate_texture(domain);
189
190 /* In order to perform EWA sampling, we need to compute the partial derivative of the UV
191 * coordinates along the x and y directions using a finite difference approximation. But in
192 * order to avoid loading multiple neighboring UV coordinates for each pixel, we operate on
193 * the image in 2x2 blocks of pixels, where the derivatives are computed horizontally and
194 * vertically across the 2x2 block such that odd texels use a forward finite difference
195 * equation while even invocations use a backward finite difference equation. */
196 const int2 size = domain.size;
197 const int2 uv_size = input_uv.domain().size;
198 parallel_for(math::divide_ceil(size, int2(2)), [&](const int2 base_texel) {
199 const int x = base_texel.x * 2;
200 const int y = base_texel.y * 2;
201
202 const int2 lower_left_texel = int2(x, y);
203 const int2 lower_right_texel = int2(x + 1, y);
204 const int2 upper_left_texel = int2(x, y + 1);
205 const int2 upper_right_texel = int2(x + 1, y + 1);
206
207 const float2 lower_left_uv = input_uv.load_pixel<float3>(lower_left_texel).xy();
208 const float2 lower_right_uv = input_uv.load_pixel_extended<float3>(lower_right_texel).xy();
209 const float2 upper_left_uv = input_uv.load_pixel_extended<float3>(upper_left_texel).xy();
210 const float2 upper_right_uv = input_uv.load_pixel_extended<float3>(upper_right_texel).xy();
211
212 /* Compute the partial derivatives using finite difference. Divide by the input size since
213 * sample_ewa_zero assumes derivatives with respect to texel coordinates. */
214 const float2 lower_x_gradient = (lower_right_uv - lower_left_uv) / uv_size.x;
215 const float2 left_y_gradient = (upper_left_uv - lower_left_uv) / uv_size.y;
216 const float2 right_y_gradient = (upper_right_uv - lower_right_uv) / uv_size.y;
217 const float2 upper_x_gradient = (upper_right_uv - upper_left_uv) / uv_size.x;
218
219 /* Computes one of the 2x2 pixels given its texel location, coordinates, and gradients. */
220 auto compute_pixel = [&](const int2 &texel,
221 const float2 &coordinates,
222 const float2 &x_gradient,
223 const float2 &y_gradient) {
224 /* Sample the input using the UV coordinates passing in the computed gradients in order
225 * to utilize the anisotropic filtering capabilities of the sampler. */
226 float4 sampled_color = input_image.sample_ewa_zero(coordinates, x_gradient, y_gradient);
227
228 /* The UV input is assumed to contain an alpha channel as its third channel, since the
229 * UV coordinates might be defined in only a subset area of the UV texture as mentioned.
230 * In that case, the alpha is typically opaque at the subset area and transparent
231 * everywhere else, and alpha pre-multiplication is then performed. This format of having
232 * an alpha channel in the UV coordinates is the format used by UV passes in render
233 * engines, hence the mentioned logic. */
234 float alpha = input_uv.load_pixel<float3>(texel).z;
235
236 float4 result = sampled_color * alpha;
237
238 output_image.store_pixel(texel, result);
239 };
240
241 /* Compute each of the pixels in the 2x2 block, making sure to exempt out of bounds right
242 * and upper pixels. */
243 compute_pixel(lower_left_texel, lower_left_uv, lower_x_gradient, left_y_gradient);
244 if (lower_right_texel.x != size.x) {
245 compute_pixel(lower_right_texel, lower_right_uv, lower_x_gradient, right_y_gradient);
246 }
247 if (upper_left_texel.y != size.y) {
248 compute_pixel(upper_left_texel, upper_left_uv, upper_x_gradient, left_y_gradient);
249 }
250 if (upper_right_texel.x != size.x && upper_right_texel.y != size.y) {
251 compute_pixel(upper_right_texel, upper_right_uv, upper_x_gradient, right_y_gradient);
252 }
253 });
254 }
255
260};
261
263{
264 return new MapUVOperation(context, node);
265}
266
267} // namespace blender::nodes::node_composite_map_uv_cc
268
270{
272
273 static blender::bke::bNodeType ntype;
274
275 cmp_node_type_base(&ntype, "CompositorNodeMapUV", CMP_NODE_MAP_UV);
276 ntype.ui_name = "Map UV";
277 ntype.ui_description =
278 "Map a texture using UV coordinates, to apply a texture to objects in compositing";
279 ntype.enum_name_legacy = "MAP_UV";
281 ntype.declare = file_ns::cmp_node_map_uv_declare;
282 ntype.draw_buttons = file_ns::node_composit_buts_map_uv;
283 ntype.get_compositor_operation = file_ns::get_compositor_operation;
284 ntype.initfunc = file_ns::node_composit_init_map_uv;
285
287}
#define NODE_CLASS_DISTORT
Definition BKE_node.hh:441
#define CMP_NODE_MAP_UV
@ CMP_NODE_MAP_UV_FILTERING_NEAREST
@ CMP_NODE_MAP_UV_FILTERING_ANISOTROPIC
void GPU_shader_bind(GPUShader *shader, const blender::gpu::shader::SpecializationConstants *constants_state=nullptr)
void GPU_shader_unbind()
void GPU_texture_anisotropic_filter(GPUTexture *texture, bool use_aniso)
void GPU_texture_extend_mode(GPUTexture *texture, GPUSamplerExtendMode extend_mode)
@ GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER
void GPU_texture_mipmap_mode(GPUTexture *texture, bool use_mipmap, bool use_filter)
#define NOD_REGISTER_NODE(REGISTER_FUNC)
@ UI_ITEM_R_SPLIT_EMPTY_NAME
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
SIMD_FORCE_INLINE const btScalar & z() const
Return the z value.
Definition btQuadWord.h:117
GPUShader * get_shader(const char *info_name, ResultPrecision precision)
NodeOperation(Context &context, DNode node)
Result & get_result(StringRef identifier)
Definition operation.cc:39
Result & get_input(StringRef identifier) const
Definition operation.cc:138
virtual Domain compute_domain()
Definition operation.cc:56
void share_data(const Result &source)
Definition result.cc:401
void allocate_texture(Domain domain, bool from_pool=true)
Definition result.cc:309
void store_pixel(const int2 &texel, const T &pixel_value)
float4 sample_nearest_zero(const float2 &coordinates) const
void unbind_as_texture() const
Definition result.cc:389
void bind_as_texture(GPUShader *shader, const char *texture_name) const
Definition result.cc:365
T load_pixel_extended(const int2 &texel) const
const Domain & domain() const
T load_pixel(const int2 &texel) const
void bind_as_image(GPUShader *shader, const char *image_name, bool read=false) const
Definition result.cc:376
void unbind_as_image() const
Definition result.cc:395
float4 sample_ewa_zero(const float2 &coordinates, const float2 &x_gradient, const float2 &y_gradient) const
bool is_single_value() const
Definition result.cc:622
const T & get_single_value() const
#define input
#define output
void node_register_type(bNodeType &ntype)
Definition node.cc:2748
void compute_dispatch_threads_at_least(GPUShader *shader, int2 threads_range, int2 local_size=int2(16))
Definition utilities.cc:170
void parallel_for(const int2 range, const Function &function)
VecBase< T, Size > divide_ceil(const VecBase< T, Size > &a, const VecBase< T, Size > &b)
static void node_composit_init_map_uv(bNodeTree *, bNode *node)
static void cmp_node_map_uv_declare(NodeDeclarationBuilder &b)
static void node_composit_buts_map_uv(uiLayout *layout, bContext *, PointerRNA *ptr)
static NodeOperation * get_compositor_operation(Context &context, DNode node)
VecBase< float, 4 > float4
VecBase< int32_t, 2 > int2
VecBase< float, 2 > float2
VecBase< float, 3 > float3
static void register_node_type_cmp_mapuv()
void cmp_node_type_base(blender::bke::bNodeType *ntype, std::string idname, const std::optional< int16_t > legacy_type)
#define min(a, b)
Definition sort.cc:36
int16_t custom2
Defines a node type.
Definition BKE_node.hh:226
std::string ui_description
Definition BKE_node.hh:232
NodeGetCompositorOperationFunction get_compositor_operation
Definition BKE_node.hh:336
void(* initfunc)(bNodeTree *ntree, bNode *node)
Definition BKE_node.hh:277
const char * enum_name_legacy
Definition BKE_node.hh:235
void(* draw_buttons)(uiLayout *, bContext *C, PointerRNA *ptr)
Definition BKE_node.hh:247
NodeDeclareFunction declare
Definition BKE_node.hh:355
void prop(PointerRNA *ptr, PropertyRNA *prop, int index, int value, eUI_Item_Flag flag, std::optional< blender::StringRef > name_opt, int icon, std::optional< blender::StringRef > placeholder=std::nullopt)
int xy[2]
Definition wm_draw.cc:174
PointerRNA * ptr
Definition wm_files.cc:4226