Blender V4.3
tracking_util.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
11
12#include <cstddef>
13
14#include "MEM_guardedalloc.h"
15
16#include "DNA_movieclip_types.h"
17
18#include "BLI_ghash.h"
19#include "BLI_listbase.h"
20#include "BLI_math_color.h"
21#include "BLI_math_vector.h"
22#include "BLI_string.h"
23#include "BLI_string_utils.hh"
24#include "BLI_threads.h"
25#include "BLI_utildefines.h"
26
27#include "BLT_translation.hh"
28
29#include "BKE_movieclip.h"
30#include "BKE_tracking.h"
31
32#include "IMB_imbuf.hh"
33#include "IMB_imbuf_types.hh"
34
35#include "tracking_private.h"
36
37#include "libmv-capi.h"
38
39/* Uncomment this to have caching-specific debug prints. */
40// #define DEBUG_CACHE
41
42#ifdef DEBUG_CACHE
43# define CACHE_PRINTF(...) printf(__VA_ARGS__)
44#else
45# define CACHE_PRINTF(...)
46#endif
47
48/* -------------------------------------------------------------------- */
51
52TracksMap *tracks_map_new(const char *object_name, int num_tracks)
53{
54 TracksMap *map = MEM_cnew<TracksMap>("TrackingsMap");
55
56 STRNCPY(map->object_name, object_name);
57
58 map->num_tracks = num_tracks;
59
60 map->tracks = MEM_cnew_array<MovieTrackingTrack>(num_tracks, "TrackingsMap tracks");
61
62 map->hash = BLI_ghash_ptr_new("TracksMap hash");
63
65
66 return map;
67}
68
70{
71 return map->num_tracks;
72}
73
75{
76 MovieTrackingTrack new_track = *track;
77
78 new_track.markers = static_cast<MovieTrackingMarker *>(MEM_dupallocN(new_track.markers));
79
80 map->tracks[map->ptr] = new_track;
81
82 BLI_ghash_insert(map->hash, &map->tracks[map->ptr], track);
83
84 map->ptr++;
85}
86
88{
89 MovieTrackingTrack *track;
90 ListBase tracks = {nullptr, nullptr}, new_tracks = {nullptr, nullptr};
91 ListBase *old_tracks;
92
93 MovieTrackingObject *tracking_object = BKE_tracking_object_get_named(tracking, map->object_name);
94 if (!tracking_object) {
95 /* object was deleted by user, create new one */
96 tracking_object = BKE_tracking_object_add(tracking, map->object_name);
97 }
98
99 old_tracks = &tracking_object->tracks;
100
101 /* duplicate currently operating tracks to temporary list.
102 * this is needed to keep names in unique state and it's faster to change names
103 * of currently operating tracks (if needed)
104 */
105 for (int a = 0; a < map->num_tracks; a++) {
106 MovieTrackingTrack *old_track;
107 bool mapped_to_old = false;
108
109 track = &map->tracks[a];
110
111 /* find original of operating track in list of previously displayed tracks */
112 old_track = static_cast<MovieTrackingTrack *>(BLI_ghash_lookup(map->hash, track));
113 if (old_track) {
114 if (BLI_findindex(old_tracks, old_track) != -1) {
115 BLI_remlink(old_tracks, old_track);
116
118
119 /* Copy flags like selection back to the track map. */
120 track->flag = old_track->flag;
121 track->pat_flag = old_track->pat_flag;
122 track->search_flag = old_track->search_flag;
123
124 /* Copy all the rest settings back from the map to the actual tracks. */
125 MEM_freeN(old_track->markers);
126 *old_track = *track;
127 old_track->markers = static_cast<MovieTrackingMarker *>(MEM_dupallocN(old_track->markers));
128
130
131 BLI_addtail(&tracks, old_track);
132
133 mapped_to_old = true;
134 }
135 }
136
137 if (mapped_to_old == false) {
139
140 /* Update old-new track mapping */
141 BLI_ghash_reinsert(map->hash, track, new_track, nullptr, nullptr);
142
143 BLI_addtail(&tracks, new_track);
144 }
145 }
146
147 /* move all tracks, which aren't operating */
148 track = static_cast<MovieTrackingTrack *>(old_tracks->first);
149 while (track) {
150 MovieTrackingTrack *next = track->next;
151 BLI_addtail(&new_tracks, track);
152 track = next;
153 }
154
155 /* now move all tracks which are currently operating and keep their names unique */
156 track = static_cast<MovieTrackingTrack *>(tracks.first);
157 while (track) {
158 MovieTrackingTrack *next = track->next;
159
160 BLI_remlink(&tracks, track);
161
162 track->next = track->prev = nullptr;
163 BLI_addtail(&new_tracks, track);
164
165 BLI_uniquename(&new_tracks,
166 track,
168 '.',
170 sizeof(track->name));
171
172 track = next;
173 }
174
175 *old_tracks = new_tracks;
176}
177
179{
180 BLI_ghash_free(map->hash, nullptr, nullptr);
181
182 for (int i = 0; i < map->num_tracks; i++) {
184 }
185
186 MEM_freeN(map->tracks);
187
188 BLI_spin_end(&map->spin_lock);
189
190 MEM_freeN(map);
191}
192
194
195/* -------------------------------------------------------------------- */
198
199/* Three coordinate frames: Frame, Search, and Marker
200 * Two units: Pixels, Unified
201 * Notation: {coordinate frame}_{unit}; for example, "search_pixel" are search
202 * window relative coordinates in pixels, and "frame_unified" are unified 0..1
203 * coordinates relative to the entire frame.
204 */
205static void unified_to_pixel(int frame_width,
206 int frame_height,
207 const float unified_coords[2],
208 float pixel_coords[2])
209{
210 pixel_coords[0] = unified_coords[0] * frame_width;
211 pixel_coords[1] = unified_coords[1] * frame_height;
212}
213
215 const float marker_unified_coords[2],
216 float frame_unified_coords[2])
217{
218 frame_unified_coords[0] = marker_unified_coords[0] + marker->pos[0];
219 frame_unified_coords[1] = marker_unified_coords[1] + marker->pos[1];
220}
221
223 int frame_height,
224 const MovieTrackingMarker *marker,
225 const float marker_unified_coords[2],
226 float frame_pixel_coords[2])
227{
228 marker_to_frame_unified(marker, marker_unified_coords, frame_pixel_coords);
229 unified_to_pixel(frame_width, frame_height, frame_pixel_coords, frame_pixel_coords);
230}
231
233 int frame_height,
234 const MovieTrackingMarker *marker,
235 float frame_pixel[2])
236{
237 /* Get the lower left coordinate of the search window and snap to pixel coordinates */
239 frame_width, frame_height, marker, marker->search_min, frame_pixel);
240 frame_pixel[0] = int(frame_pixel[0]);
241 frame_pixel[1] = int(frame_pixel[1]);
242}
243
244static void pixel_to_unified(int frame_width,
245 int frame_height,
246 const float pixel_coords[2],
247 float unified_coords[2])
248{
249 unified_coords[0] = pixel_coords[0] / frame_width;
250 unified_coords[1] = pixel_coords[1] / frame_height;
251}
252
253static void marker_unified_to_search_pixel(int frame_width,
254 int frame_height,
255 const MovieTrackingMarker *marker,
256 const float marker_unified[2],
257 float search_pixel[2])
258{
259 float frame_pixel[2];
260 float search_origin_frame_pixel[2];
261
263 frame_width, frame_height, marker, marker_unified, frame_pixel);
265 frame_width, frame_height, marker, search_origin_frame_pixel);
266 sub_v2_v2v2(search_pixel, frame_pixel, search_origin_frame_pixel);
267}
268
269static void search_pixel_to_marker_unified(int frame_width,
270 int frame_height,
271 const MovieTrackingMarker *marker,
272 const float search_pixel[2],
273 float marker_unified[2])
274{
275 float frame_unified[2];
276 float search_origin_frame_pixel[2];
277
279 frame_width, frame_height, marker, search_origin_frame_pixel);
280 add_v2_v2v2(frame_unified, search_pixel, search_origin_frame_pixel);
281 pixel_to_unified(frame_width, frame_height, frame_unified, frame_unified);
282
283 /* marker pos is in frame unified */
284 sub_v2_v2v2(marker_unified, frame_unified, marker->pos);
285}
286
288 int frame_height,
289 const MovieTrackingMarker *marker,
290 double search_pixel_x[5],
291 double search_pixel_y[5])
292{
293 float unified_coords[2];
294 float pixel_coords[2];
295
296 /* Convert the corners into search space coordinates. */
297 for (int i = 0; i < 4; i++) {
299 frame_width, frame_height, marker, marker->pattern_corners[i], pixel_coords);
300 search_pixel_x[i] = pixel_coords[0] - 0.5f;
301 search_pixel_y[i] = pixel_coords[1] - 0.5f;
302 }
303
304 /* Convert the center position (aka "pos"); this is the origin */
305 unified_coords[0] = 0.0f;
306 unified_coords[1] = 0.0f;
307 marker_unified_to_search_pixel(frame_width, frame_height, marker, unified_coords, pixel_coords);
308
309 search_pixel_x[4] = pixel_coords[0] - 0.5f;
310 search_pixel_y[4] = pixel_coords[1] - 0.5f;
311}
312
314 int frame_height,
315 MovieTrackingMarker *marker,
316 const double search_pixel_x[5],
317 const double search_pixel_y[5])
318{
319 float marker_unified[2];
320 float search_pixel[2];
321
322 /* Convert the corners into search space coordinates. */
323 for (int i = 0; i < 4; i++) {
324 search_pixel[0] = search_pixel_x[i] + 0.5;
325 search_pixel[1] = search_pixel_y[i] + 0.5;
327 frame_width, frame_height, marker, search_pixel, marker->pattern_corners[i]);
328 }
329
330 /* Convert the center position (aka "pos"); this is the origin */
331 search_pixel[0] = search_pixel_x[4] + 0.5;
332 search_pixel[1] = search_pixel_y[4] + 0.5;
333 search_pixel_to_marker_unified(frame_width, frame_height, marker, search_pixel, marker_unified);
334
335 /* If the tracker tracked nothing, then "marker_unified" would be zero.
336 * Otherwise, the entire patch shifted, and that delta should be applied to
337 * all the coordinates.
338 */
339 for (int i = 0; i < 4; i++) {
340 marker->pattern_corners[i][0] -= marker_unified[0];
341 marker->pattern_corners[i][1] -= marker_unified[1];
342 }
343
344 marker->pos[0] += marker_unified[0];
345 marker->pos[1] += marker_unified[1];
346}
347
348void tracking_principal_point_normalized_to_pixel(const float principal_point_normalized[2],
349 const int frame_width,
350 const int frame_height,
351 float r_principal_point_pixel[2])
352{
353 const float frame_center_x = float(frame_width) / 2;
354 const float frame_center_y = float(frame_height) / 2;
355
356 r_principal_point_pixel[0] = frame_center_x + principal_point_normalized[0] * frame_center_x;
357 r_principal_point_pixel[1] = frame_center_y + principal_point_normalized[1] * frame_center_y;
358}
359
360void tracking_principal_point_pixel_to_normalized(const float principal_point_pixel[2],
361 const int frame_width,
362 const int frame_height,
363 float r_principal_point_normalized[2])
364{
365 const float frame_center_x = float(frame_width) / 2;
366 const float frame_center_y = float(frame_height) / 2;
367
368 r_principal_point_normalized[0] = (principal_point_pixel[0] - frame_center_x) / frame_center_x;
369 r_principal_point_normalized[1] = (principal_point_pixel[1] - frame_center_y) / frame_center_y;
370}
371
373
374/* -------------------------------------------------------------------- */
377
379 const MovieTrackingMarker *ref_marker,
380 bool before,
381 bool overwrite)
382{
383 MovieTrackingMarker marker_new;
384
385 marker_new = *ref_marker;
386 marker_new.flag &= ~MARKER_TRACKED;
387 marker_new.flag |= MARKER_DISABLED;
388
389 if (before) {
390 marker_new.framenr--;
391 }
392 else {
393 marker_new.framenr++;
394 }
395
396 if (overwrite || !BKE_tracking_track_has_marker_at_frame(track, marker_new.framenr)) {
397 BKE_tracking_marker_insert(track, &marker_new);
398 }
399}
400
402 const MovieTrackingCamera *camera, libmv_CameraIntrinsicsOptions *camera_intrinsics_options)
403{
404 switch (camera->distortion_model) {
406 camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_POLYNOMIAL;
407 camera_intrinsics_options->polynomial_k1 = camera->k1;
408 camera_intrinsics_options->polynomial_k2 = camera->k2;
409 camera_intrinsics_options->polynomial_k3 = camera->k3;
410 camera_intrinsics_options->polynomial_p1 = 0.0;
411 camera_intrinsics_options->polynomial_p2 = 0.0;
412 return;
413
415 camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_DIVISION;
416 camera_intrinsics_options->division_k1 = camera->division_k1;
417 camera_intrinsics_options->division_k2 = camera->division_k2;
418 return;
419
421 camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_NUKE;
422 camera_intrinsics_options->nuke_k1 = camera->nuke_k1;
423 camera_intrinsics_options->nuke_k2 = camera->nuke_k2;
424 return;
426 camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_BROWN;
427 camera_intrinsics_options->brown_k1 = camera->brown_k1;
428 camera_intrinsics_options->brown_k2 = camera->brown_k2;
429 camera_intrinsics_options->brown_k3 = camera->brown_k3;
430 camera_intrinsics_options->brown_k4 = camera->brown_k4;
431 camera_intrinsics_options->brown_p1 = camera->brown_p1;
432 camera_intrinsics_options->brown_p2 = camera->brown_p2;
433 return;
434 }
435
436 /* Unknown distortion model, which might be due to opening newer file in older Blender.
437 * Fallback to a known and supported model with 0 distortion. */
438 camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_POLYNOMIAL;
439 camera_intrinsics_options->polynomial_k1 = 0.0;
440 camera_intrinsics_options->polynomial_k2 = 0.0;
441 camera_intrinsics_options->polynomial_k3 = 0.0;
442 camera_intrinsics_options->polynomial_p1 = 0.0;
443 camera_intrinsics_options->polynomial_p2 = 0.0;
444}
445
447 const libmv_CameraIntrinsicsOptions *camera_intrinsics_options, MovieTrackingCamera *camera)
448{
449 switch (camera_intrinsics_options->distortion_model) {
452 camera->k1 = camera_intrinsics_options->polynomial_k1;
453 camera->k2 = camera_intrinsics_options->polynomial_k2;
454 camera->k3 = camera_intrinsics_options->polynomial_k3;
455 return;
456
458 camera->distortion_model = TRACKING_DISTORTION_MODEL_DIVISION;
459 camera->division_k1 = camera_intrinsics_options->division_k1;
460 camera->division_k2 = camera_intrinsics_options->division_k2;
461 return;
462
464 camera->distortion_model = TRACKING_DISTORTION_MODEL_NUKE;
465 camera->nuke_k1 = camera_intrinsics_options->nuke_k1;
466 camera->nuke_k2 = camera_intrinsics_options->nuke_k2;
467 return;
469 camera->distortion_model = TRACKING_DISTORTION_MODEL_BROWN;
470 camera->brown_k1 = camera_intrinsics_options->brown_k1;
471 camera->brown_k2 = camera_intrinsics_options->brown_k2;
472 camera->brown_k3 = camera_intrinsics_options->brown_k3;
473 camera->brown_k4 = camera_intrinsics_options->brown_k4;
474 camera->brown_p1 = camera_intrinsics_options->brown_p1;
475 camera->brown_p2 = camera_intrinsics_options->brown_p2;
476 return;
477 }
478
479 /* Libmv returned distortion model which is not known to Blender. This is a logical error in code
480 * and Blender side is to be updated to match Libmv. */
481 BLI_assert_msg(0, "Unknown distortion model");
482}
483
485 MovieTracking *tracking,
486 const int calibration_width,
487 const int calibration_height,
488 libmv_CameraIntrinsicsOptions *camera_intrinsics_options)
489{
490 MovieTrackingCamera *camera = &tracking->camera;
491 const float aspy = 1.0f / tracking->camera.pixel_aspect;
492
493 float principal_px[2];
495 camera->principal_point, calibration_width, calibration_height, principal_px);
496
497 camera_intrinsics_options->num_threads = BLI_system_thread_count();
498
499 camera_intrinsics_options->focal_length = camera->focal;
500
501 camera_intrinsics_options->principal_point_x = principal_px[0];
502 camera_intrinsics_options->principal_point_y = principal_px[1] * aspy;
503
504 distortion_model_parameters_from_tracking(camera, camera_intrinsics_options);
505
506 camera_intrinsics_options->image_width = calibration_width;
507 camera_intrinsics_options->image_height = int(calibration_height * aspy);
508}
509
511 MovieTracking *tracking, const libmv_CameraIntrinsicsOptions *camera_intrinsics_options)
512{
513 MovieTrackingCamera *camera = &tracking->camera;
514
515 camera->focal = camera_intrinsics_options->focal_length;
516
517 /* NOTE: The image size stored in the `camera_intrinsics_options` is aspect-ratio corrected,
518 * so there is no need to "un-apply" it from the principal point. */
519 const float principal_px[2] = {float(camera_intrinsics_options->principal_point_x),
520 float(camera_intrinsics_options->principal_point_y)};
521
523 camera_intrinsics_options->image_width,
524 camera_intrinsics_options->image_height,
525 camera->principal_point);
526
527 distortion_model_parameters_from_options(camera_intrinsics_options, camera);
528}
529
531 int current_frame,
532 bool backwards)
533{
534 MovieTrackingMarker *marker_keyed = nullptr;
535 MovieTrackingMarker *marker_keyed_fallback = nullptr;
536 int a = BKE_tracking_marker_get(track, current_frame) - track->markers;
537
538 while (a >= 0 && a < track->markersnr) {
539 int next = backwards ? a + 1 : a - 1;
540 bool is_keyframed = false;
541 MovieTrackingMarker *cur_marker = &track->markers[a];
542 MovieTrackingMarker *next_marker = nullptr;
543
544 if (next >= 0 && next < track->markersnr) {
545 next_marker = &track->markers[next];
546 }
547
548 if ((cur_marker->flag & MARKER_DISABLED) == 0) {
549 /* If it'll happen so we didn't find a real keyframe marker,
550 * fallback to the first marker in current tracked segment
551 * as a keyframe.
552 */
553 if (next_marker == nullptr) {
554 /* Could happen when trying to get reference marker for the fist
555 * one on the segment which isn't surrounded by disabled markers.
556 *
557 * There's no really good choice here, just use the reference
558 * marker which looks correct..
559 */
560 if (marker_keyed_fallback == nullptr) {
561 marker_keyed_fallback = cur_marker;
562 }
563 }
564 else if (next_marker->flag & MARKER_DISABLED) {
565 if (marker_keyed_fallback == nullptr) {
566 marker_keyed_fallback = cur_marker;
567 }
568 }
569
570 is_keyframed |= (cur_marker->flag & MARKER_TRACKED) == 0;
571 }
572
573 if (is_keyframed) {
574 marker_keyed = cur_marker;
575
576 break;
577 }
578
579 a = next;
580 }
581
582 if (marker_keyed == nullptr) {
583 marker_keyed = marker_keyed_fallback;
584 }
585
586 return marker_keyed;
587}
588
590
591/* -------------------------------------------------------------------- */
594
596 int clip_index,
597 int frame)
598{
599 MovieClip *clip;
600 MovieClipUser user;
601 ImBuf *ibuf;
602 int scene_frame;
603
604 BLI_assert(clip_index < accessor->num_clips);
605
606 clip = accessor->clips[clip_index];
607 scene_frame = BKE_movieclip_remap_clip_to_scene_frame(clip, frame);
608 BKE_movieclip_user_set_frame(&user, scene_frame);
610 user.render_flag = 0;
611 ibuf = BKE_movieclip_get_ibuf(clip, &user);
612
613 return ibuf;
614}
615
617{
618 ImBuf *grayscale = IMB_allocImBuf(ibuf->x, ibuf->y, 32, 0);
619
620 BLI_assert(ELEM(ibuf->channels, 3, 4));
621
622 /* TODO(sergey): Bummer, currently IMB API only allows to create 4 channels
623 * float buffer, so we do it manually here.
624 *
625 * Will generalize it later.
626 */
627 const size_t num_pixels = size_t(grayscale->x) * size_t(grayscale->y);
628 grayscale->channels = 1;
629 float *rect_float = MEM_cnew_array<float>(num_pixels, "tracking grayscale image");
630 if (rect_float != nullptr) {
631 IMB_assign_float_buffer(grayscale, rect_float, IB_TAKE_OWNERSHIP);
632
633 for (int i = 0; i < grayscale->x * grayscale->y; i++) {
634 const float *pixel = ibuf->float_buffer.data + ibuf->channels * i;
635
636 rect_float[i] = 0.2126f * pixel[0] + 0.7152f * pixel[1] + 0.0722f * pixel[2];
637 }
638 }
639
640 return grayscale;
641}
642
643static void ibuf_to_float_image(const ImBuf *ibuf, libmv_FloatImage *float_image)
644{
645 BLI_assert(ibuf->float_buffer.data != nullptr);
646 float_image->buffer = ibuf->float_buffer.data;
647 float_image->width = ibuf->x;
648 float_image->height = ibuf->y;
649 float_image->channels = ibuf->channels;
650}
651
653{
654 ImBuf *ibuf = IMB_allocImBuf(float_image->width, float_image->height, 32, 0);
655 size_t num_total_channels = size_t(ibuf->x) * size_t(ibuf->y) * float_image->channels;
656 ibuf->channels = float_image->channels;
657 float *rect_float = MEM_cnew_array<float>(num_total_channels, "tracking grayscale image");
658 if (rect_float != nullptr) {
660
661 memcpy(rect_float, float_image->buffer, num_total_channels * sizeof(float));
662 }
663 return ibuf;
664}
665
667 int clip_index,
668 int frame,
669 libmv_InputMode input_mode,
670 int downscale,
671 const libmv_Region *region,
673{
674 /* First try to get fully processed image from the cache. */
675 CACHE_PRINTF("Calculate new buffer for frame %d\n", frame);
676 /* And now we do post-processing of the original frame. */
677 ImBuf *orig_ibuf = accessor_get_preprocessed_ibuf(accessor, clip_index, frame);
678 if (orig_ibuf == nullptr) {
679 return nullptr;
680 }
681 ImBuf *final_ibuf;
682 /* Cut a region if requested. */
683 if (region != nullptr) {
684 int width = region->max[0] - region->min[0], height = region->max[1] - region->min[1];
685
686 /* If the requested region goes outside of the actual frame we still
687 * return the requested region size, but only fill it's partially with
688 * the data we can.
689 */
690 int clamped_origin_x = max_ii(int(region->min[0]), 0),
691 clamped_origin_y = max_ii(int(region->min[1]), 0);
692 int dst_offset_x = clamped_origin_x - int(region->min[0]),
693 dst_offset_y = clamped_origin_y - int(region->min[1]);
694 int clamped_width = width - dst_offset_x, clamped_height = height - dst_offset_y;
695 clamped_width = min_ii(clamped_width, orig_ibuf->x - clamped_origin_x);
696 clamped_height = min_ii(clamped_height, orig_ibuf->y - clamped_origin_y);
697
698 final_ibuf = IMB_allocImBuf(width, height, 32, IB_rectfloat);
699
700 if (orig_ibuf->float_buffer.data != nullptr) {
701 IMB_rectcpy(final_ibuf,
702 orig_ibuf,
703 dst_offset_x,
704 dst_offset_y,
705 clamped_origin_x,
706 clamped_origin_y,
707 clamped_width,
708 clamped_height);
709 }
710 else {
711 /* TODO(sergey): We don't do any color space or alpha conversion
712 * here. Probably Libmv is better to work in the linear space,
713 * but keep sRGB space here for compatibility for now.
714 */
715 for (int y = 0; y < clamped_height; y++) {
716 for (int x = 0; x < clamped_width; x++) {
717 int src_x = x + clamped_origin_x, src_y = y + clamped_origin_y;
718 int dst_x = x + dst_offset_x, dst_y = y + dst_offset_y;
719 int dst_index = (dst_y * width + dst_x) * 4,
720 src_index = (src_y * orig_ibuf->x + src_x) * 4;
721 rgba_uchar_to_float(final_ibuf->float_buffer.data + dst_index,
722 orig_ibuf->byte_buffer.data + src_index);
723 }
724 }
725 }
726 }
727 else {
728 /* Libmv only works with float images,
729 *
730 * This would likely make it so loads of float buffers are being stored
731 * in the cache which is nice on the one hand (faster re-use of the
732 * frames) but on the other hand it bumps the memory usage up.
733 */
735 IMB_float_from_rect(orig_ibuf);
737 final_ibuf = orig_ibuf;
738 }
739 /* Downscale if needed. */
740 if (downscale > 0) {
741 if (final_ibuf == orig_ibuf) {
742 final_ibuf = IMB_dupImBuf(orig_ibuf);
743 }
744 IMB_scale(final_ibuf,
745 orig_ibuf->x / (1 << downscale),
746 orig_ibuf->y / (1 << downscale),
748 false);
749 }
750 /* Apply possible transformation. */
751 if (transform != nullptr) {
752 libmv_FloatImage input_image, output_image;
753 ibuf_to_float_image(final_ibuf, &input_image);
754 libmv_frameAccessorgetTransformRun(transform, &input_image, &output_image);
755 if (final_ibuf != orig_ibuf) {
756 IMB_freeImBuf(final_ibuf);
757 }
758 final_ibuf = float_image_to_ibuf(&output_image);
759 libmv_floatImageDestroy(&output_image);
760 }
761 /* Transform number of channels. */
762 if (input_mode == LIBMV_IMAGE_MODE_RGBA) {
763 BLI_assert(ELEM(orig_ibuf->channels, 3, 4));
764 /* pass */
765 }
766 else /* if (input_mode == LIBMV_IMAGE_MODE_MONO) */ {
767 BLI_assert(input_mode == LIBMV_IMAGE_MODE_MONO);
768 if (final_ibuf->channels != 1) {
769 ImBuf *grayscale_ibuf = make_grayscale_ibuf_copy(final_ibuf);
770 if (final_ibuf != orig_ibuf) {
771 /* We dereference original frame later. */
772 IMB_freeImBuf(final_ibuf);
773 }
774 final_ibuf = grayscale_ibuf;
775 }
776 }
777 /* It's possible processing still didn't happen at this point,
778 * but we really need a copy of the buffer to be transformed
779 * and to be put to the cache.
780 */
781 if (final_ibuf == orig_ibuf) {
782 final_ibuf = IMB_dupImBuf(orig_ibuf);
783 }
784 IMB_freeImBuf(orig_ibuf);
785 return final_ibuf;
786}
787
789 int clip_index,
790 int frame,
791 libmv_InputMode input_mode,
792 int downscale,
793 const libmv_Region *region,
795 float **destination,
796 int *width,
797 int *height,
798 int *channels)
799{
800 TrackingImageAccessor *accessor = (TrackingImageAccessor *)user_data;
801 ImBuf *ibuf;
802
803 BLI_assert(clip_index >= 0 && clip_index < accessor->num_clips);
804
805 ibuf = accessor_get_ibuf(accessor, clip_index, frame, input_mode, downscale, region, transform);
806
807 if (ibuf) {
808 *destination = ibuf->float_buffer.data;
809 *width = ibuf->x;
810 *height = ibuf->y;
811 *channels = ibuf->channels;
812 }
813 else {
814 *destination = nullptr;
815 *width = 0;
816 *height = 0;
817 *channels = 0;
818 }
819
820 return ibuf;
821}
822
824{
825 ImBuf *ibuf = (ImBuf *)cache_key;
826 IMB_freeImBuf(ibuf);
827}
828
830 int clip_index,
831 int frame,
832 int track_index,
833 const libmv_Region *region,
834 float **r_destination,
835 int *r_width,
836 int *r_height)
837{
838 /* Perform sanity checks first. */
839 TrackingImageAccessor *accessor = (TrackingImageAccessor *)user_data;
840 BLI_assert(clip_index < accessor->num_clips);
841 BLI_assert(track_index < accessor->num_tracks);
842 MovieTrackingTrack *track = accessor->tracks[track_index];
843 /* Early output, track does not use mask. */
844 if ((track->algorithm_flag & TRACK_ALGORITHM_FLAG_USE_MASK) == 0) {
845 return nullptr;
846 }
847 MovieClip *clip = accessor->clips[clip_index];
848 /* Construct fake user so we can access movie clip. */
849 MovieClipUser user;
850 int scene_frame = BKE_movieclip_remap_clip_to_scene_frame(clip, frame);
851 BKE_movieclip_user_set_frame(&user, scene_frame);
853 user.render_flag = 0;
854 /* Get frame width and height so we can convert stroke coordinates
855 * and other things from normalized to pixel space.
856 */
857 int frame_width, frame_height;
858 BKE_movieclip_get_size(clip, &user, &frame_width, &frame_height);
859 /* Actual mask sampling. */
861 const float region_min[2] = {
862 region->min[0] - marker->pos[0] * frame_width,
863 region->min[1] - marker->pos[1] * frame_height,
864 };
865 const float region_max[2] = {
866 region->max[0] - marker->pos[0] * frame_width,
867 region->max[1] - marker->pos[1] * frame_height,
868 };
869 *r_destination = tracking_track_get_mask_for_region(
870 frame_width, frame_height, region_min, region_max, track);
871 *r_width = region->max[0] - region->min[0];
872 *r_height = region->max[1] - region->min[1];
873 return *r_destination;
874}
875
877{
878 if (cache_key != nullptr) {
879 float *mask = (float *)cache_key;
881 }
882}
883
885 int num_clips,
887 int num_tracks)
888{
889 TrackingImageAccessor *accessor = MEM_cnew<TrackingImageAccessor>("tracking image accessor");
890
891 BLI_assert(num_clips <= MAX_ACCESSOR_CLIP);
892
893 memcpy(accessor->clips, clips, num_clips * sizeof(MovieClip *));
894 accessor->num_clips = num_clips;
895
896 accessor->tracks = MEM_cnew_array<MovieTrackingTrack *>(num_tracks, "image accessor tracks");
897 memcpy(accessor->tracks, tracks, num_tracks * sizeof(MovieTrackingTrack *));
898 accessor->num_tracks = num_tracks;
899
905
906 BLI_spin_init(&accessor->cache_lock);
907
908 return accessor;
909}
910
912{
914 BLI_spin_end(&accessor->cache_lock);
915 MEM_freeN(accessor->tracks);
916 MEM_freeN(accessor);
917}
918
void BKE_movieclip_user_set_frame(struct MovieClipUser *user, int framenr)
void BKE_movieclip_get_size(struct MovieClip *clip, const struct MovieClipUser *user, int *r_width, int *r_height)
float BKE_movieclip_remap_clip_to_scene_frame(const struct MovieClip *clip, float framenr)
struct ImBuf * BKE_movieclip_get_ibuf(struct MovieClip *clip, const struct MovieClipUser *user)
struct MovieTrackingTrack * BKE_tracking_track_duplicate(struct MovieTrackingTrack *track)
Definition tracking.cc:566
struct MovieTrackingObject * BKE_tracking_object_get_named(struct MovieTracking *tracking, const char *name)
Definition tracking.cc:1966
struct MovieTrackingMarker * BKE_tracking_marker_get(struct MovieTrackingTrack *track, int framenr)
Definition tracking.cc:1358
struct MovieTrackingMarker * BKE_tracking_marker_get_exact(struct MovieTrackingTrack *track, int framenr)
Definition tracking.cc:1391
void BKE_tracking_track_free(struct MovieTrackingTrack *track)
Definition tracking.cc:596
bool BKE_tracking_track_has_marker_at_frame(struct MovieTrackingTrack *track, int framenr)
Definition tracking.cc:709
struct MovieTrackingObject * BKE_tracking_object_add(struct MovieTracking *tracking, const char *name)
Definition tracking.cc:1897
struct MovieTrackingMarker * BKE_tracking_marker_insert(struct MovieTrackingTrack *track, struct MovieTrackingMarker *marker)
Definition tracking.cc:1235
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:57
bool BLI_ghash_reinsert(GHash *gh, void *key, void *val, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
Definition BLI_ghash.c:712
GHash * BLI_ghash_ptr_new(const char *info) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
void * BLI_ghash_lookup(const GHash *gh, const void *key) ATTR_WARN_UNUSED_RESULT
Definition BLI_ghash.c:731
void BLI_ghash_insert(GHash *gh, void *key, void *val)
Definition BLI_ghash.c:707
void BLI_ghash_free(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
Definition BLI_ghash.c:860
void BLI_addtail(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:110
void BLI_remlink(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:130
int BLI_findindex(const struct ListBase *listbase, const void *vlink) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
MINLINE int min_ii(int a, int b)
MINLINE int max_ii(int a, int b)
void rgba_uchar_to_float(float r_col[4], const unsigned char col_ub[4])
MINLINE void add_v2_v2v2(float r[2], const float a[2], const float b[2])
MINLINE void sub_v2_v2v2(float r[2], const float a[2], const float b[2])
#define STRNCPY(dst, src)
Definition BLI_string.h:593
void BLI_uniquename(const struct ListBase *list, void *vlink, const char *defname, char delim, int name_offset, size_t name_maxncpy) ATTR_NONNULL(1
@ LOCK_MOVIECLIP
Definition BLI_threads.h:72
void BLI_thread_unlock(int type)
Definition threads.cc:333
void BLI_thread_lock(int type)
Definition threads.cc:328
int BLI_system_thread_count(void)
Definition threads.cc:253
void BLI_spin_init(SpinLock *spin)
Definition threads.cc:391
void BLI_spin_unlock(SpinLock *spin)
Definition threads.cc:430
void BLI_spin_lock(SpinLock *spin)
Definition threads.cc:405
void BLI_spin_end(SpinLock *spin)
Definition threads.cc:445
#define ELEM(...)
#define CTX_DATA_(context, msgid)
#define BLT_I18NCONTEXT_ID_MOVIECLIP
@ MCLIP_PROXY_RENDER_SIZE_FULL
@ MARKER_TRACKED
@ MARKER_DISABLED
@ TRACK_ALGORITHM_FLAG_USE_MASK
@ TRACKING_DISTORTION_MODEL_DIVISION
@ TRACKING_DISTORTION_MODEL_POLYNOMIAL
@ TRACKING_DISTORTION_MODEL_NUKE
@ TRACKING_DISTORTION_MODEL_BROWN
ImBuf * IMB_dupImBuf(const ImBuf *ibuf1)
void IMB_assign_float_buffer(ImBuf *ibuf, float *buffer_data, ImBufOwnership ownership)
void IMB_rectcpy(ImBuf *dbuf, const ImBuf *sbuf, int destx, int desty, int srcx, int srcy, int width, int height)
Definition rectop.cc:463
void IMB_float_from_rect(ImBuf *ibuf)
Definition divers.cc:802
bool IMB_scale(ImBuf *ibuf, unsigned int newx, unsigned int newy, IMBScaleFilter filter, bool threaded=true)
Definition scaling.cc:779
Contains defines and structs used throughout the imbuf module.
@ IB_TAKE_OWNERSHIP
@ IB_rectfloat
Read Guarded memory(de)allocation.
in reality light always falls off quadratically Particle Retrieve the data of the particle that spawned the object for example to give variation to multiple instances of an object Point Retrieve information about points in a point cloud Retrieve the edges of an object as it appears to Cycles topology will always appear triangulated Convert a blackbody temperature to an RGB value Normal Generate a perturbed normal from an RGB normal map image Typically used for faking highly detailed surfaces Generate an OSL shader from a file or text data block Image Sample an image file as a texture Gabor Generate Gabor noise Gradient Generate interpolated color and intensity values based on the input vector Magic Generate a psychedelic color texture Voronoi Generate Worley noise based on the distance to random points Typically used to generate textures such as or biological cells Brick Generate a procedural texture producing bricks Texture Retrieve multiple types of texture coordinates nTypically used as inputs for texture nodes Vector Convert a or normal between camera
Group Output data from inside of a node group A color picker Mix two input colors RGB to Convert a color s luminance to a grayscale value Generate a normal vector and a dot product Brightness Control the brightness and contrast of the input color Vector Map input vector components with curves Camera Retrieve information about the camera and how it relates to the current shading point s position Clamp a value between a minimum and a maximum Vector Perform vector math operation Invert Invert a producing a negative Combine Generate a color from its and blue channels(Deprecated)") DefNode(ShaderNode
SIMD_FORCE_INLINE btVector3 transform(const btVector3 &point) const
#define offsetof(t, d)
draw_view in_light_buf[] float
draw_view push_constant(Type::INT, "radiance_src") .push_constant(Type capture_info_buf storage_buf(1, Qualifier::READ, "ObjectBounds", "bounds_buf[]") .push_constant(Type draw_view int
void libmv_FrameAccessorDestroy(libmv_FrameAccessor *frame_accessor)
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform *transform, const libmv_FloatImage *input_image, libmv_FloatImage *output_image)
libmv_FrameAccessor * libmv_FrameAccessorNew(libmv_FrameAccessorUserData *user_data, libmv_GetImageCallback get_image_callback, libmv_ReleaseImageCallback release_image_callback, libmv_GetMaskForTrackCallback get_mask_for_track_callback, libmv_ReleaseMaskCallback release_mask_callback)
struct ImBuf * IMB_allocImBuf(unsigned int, unsigned int, unsigned char, unsigned int)
void IMB_freeImBuf(ImBuf *)
@ LIBMV_DISTORTION_MODEL_POLYNOMIAL
@ LIBMV_DISTORTION_MODEL_NUKE
@ LIBMV_DISTORTION_MODEL_BROWN
@ LIBMV_DISTORTION_MODEL_DIVISION
struct libmv_FrameTransform libmv_FrameTransform
void * libmv_CacheKey
struct libmv_FrameAccessorUserData libmv_FrameAccessorUserData
@ LIBMV_IMAGE_MODE_RGBA
@ LIBMV_IMAGE_MODE_MONO
void libmv_floatImageDestroy(libmv_FloatImage *image)
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
void *(* MEM_dupallocN)(const void *vmemh)
Definition mallocn.cc:39
ccl_device_inline float4 mask(const int4 mask, const float4 a)
static ulong * next
ImBufFloatBuffer float_buffer
ImBufByteBuffer byte_buffer
void * first
MovieTrackingMarker * markers
struct MovieTrackingTrack * next
struct MovieTrackingTrack * prev
MovieTrackingCamera camera
struct MovieTrackingTrack ** tracks
struct MovieClip * clips[MAX_ACCESSOR_CLIP]
struct libmv_FrameAccessor * libmv_accessor
char object_name[MAX_NAME]
struct GHash * hash
MovieTrackingTrack * tracks
SpinLock spin_lock
float * tracking_track_get_mask_for_region(const int frame_width, const int frame_height, const float region_min[2], const float region_max[2], const MovieTrackingTrack *track)
Definition tracking.cc:1136
ListBase tracks
Definition tracking.cc:70
#define MAX_ACCESSOR_CLIP
static void accessor_release_image_callback(libmv_CacheKey cache_key)
static libmv_CacheKey accessor_get_image_callback(libmv_FrameAccessorUserData *user_data, int clip_index, int frame, libmv_InputMode input_mode, int downscale, const libmv_Region *region, const libmv_FrameTransform *transform, float **destination, int *width, int *height, int *channels)
static void pixel_to_unified(int frame_width, int frame_height, const float pixel_coords[2], float unified_coords[2])
static void unified_to_pixel(int frame_width, int frame_height, const float unified_coords[2], float pixel_coords[2])
static libmv_CacheKey accessor_get_mask_for_track_callback(libmv_FrameAccessorUserData *user_data, int clip_index, int frame, int track_index, const libmv_Region *region, float **r_destination, int *r_width, int *r_height)
void tracking_marker_insert_disabled(MovieTrackingTrack *track, const MovieTrackingMarker *ref_marker, bool before, bool overwrite)
static void search_pixel_to_marker_unified(int frame_width, int frame_height, const MovieTrackingMarker *marker, const float search_pixel[2], float marker_unified[2])
int tracks_map_get_size(TracksMap *map)
static ImBuf * accessor_get_preprocessed_ibuf(TrackingImageAccessor *accessor, int clip_index, int frame)
void tracking_principal_point_pixel_to_normalized(const float principal_point_pixel[2], const int frame_width, const int frame_height, float r_principal_point_normalized[2])
static void ibuf_to_float_image(const ImBuf *ibuf, libmv_FloatImage *float_image)
static void marker_unified_to_frame_pixel_coordinates(int frame_width, int frame_height, const MovieTrackingMarker *marker, const float marker_unified_coords[2], float frame_pixel_coords[2])
void tracks_map_free(TracksMap *map)
static void marker_unified_to_search_pixel(int frame_width, int frame_height, const MovieTrackingMarker *marker, const float marker_unified[2], float search_pixel[2])
void tracking_cameraIntrinscisOptionsFromTracking(MovieTracking *tracking, const int calibration_width, const int calibration_height, libmv_CameraIntrinsicsOptions *camera_intrinsics_options)
static ImBuf * make_grayscale_ibuf_copy(ImBuf *ibuf)
void tracking_get_marker_coords_for_tracking(int frame_width, int frame_height, const MovieTrackingMarker *marker, double search_pixel_x[5], double search_pixel_y[5])
TrackingImageAccessor * tracking_image_accessor_new(MovieClip *clips[MAX_ACCESSOR_CLIP], int num_clips, MovieTrackingTrack **tracks, int num_tracks)
static void distortion_model_parameters_from_options(const libmv_CameraIntrinsicsOptions *camera_intrinsics_options, MovieTrackingCamera *camera)
static void marker_to_frame_unified(const MovieTrackingMarker *marker, const float marker_unified_coords[2], float frame_unified_coords[2])
void tracks_map_insert(TracksMap *map, MovieTrackingTrack *track)
#define CACHE_PRINTF(...)
static ImBuf * float_image_to_ibuf(libmv_FloatImage *float_image)
void tracking_trackingCameraFromIntrinscisOptions(MovieTracking *tracking, const libmv_CameraIntrinsicsOptions *camera_intrinsics_options)
static void distortion_model_parameters_from_tracking(const MovieTrackingCamera *camera, libmv_CameraIntrinsicsOptions *camera_intrinsics_options)
void tracking_image_accessor_destroy(TrackingImageAccessor *accessor)
TracksMap * tracks_map_new(const char *object_name, int num_tracks)
static ImBuf * accessor_get_ibuf(TrackingImageAccessor *accessor, int clip_index, int frame, libmv_InputMode input_mode, int downscale, const libmv_Region *region, const libmv_FrameTransform *transform)
MovieTrackingMarker * tracking_get_keyframed_marker(MovieTrackingTrack *track, int current_frame, bool backwards)
static void accessor_release_mask_callback(libmv_CacheKey cache_key)
void tracking_set_marker_coords_from_tracking(int frame_width, int frame_height, MovieTrackingMarker *marker, const double search_pixel_x[5], const double search_pixel_y[5])
void tracking_get_search_origin_frame_pixel(int frame_width, int frame_height, const MovieTrackingMarker *marker, float frame_pixel[2])
void tracks_map_merge(TracksMap *map, MovieTracking *tracking)
void tracking_principal_point_normalized_to_pixel(const float principal_point_normalized[2], const int frame_width, const int frame_height, float r_principal_point_pixel[2])