bvh_public.inc 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500
  1. public:
  2. BVHHandle item_add(T *p_userdata, bool p_active, const BOUNDS &p_aabb, int32_t p_subindex, uint32_t p_tree_id, uint32_t p_tree_collision_mask, bool p_invisible = false) {
  3. #ifdef BVH_VERBOSE_TREE
  4. VERBOSE_PRINT("\nitem_add BEFORE");
  5. _debug_recursive_print_tree(p_tree_id);
  6. VERBOSE_PRINT("\n");
  7. #endif
  8. BVHABB_CLASS abb;
  9. abb.from(p_aabb);
  10. // NOTE that we do not expand the AABB for the first create even if
  11. // leaf expansion is switched on. This is for two reasons:
  12. // (1) We don't know if this object will move in future, in which case a non-expanded
  13. // bound would be better...
  14. // (2) We don't yet know how many objects will be paired, which is used to modify
  15. // the expansion margin.
  16. // handle to be filled with the new item ref
  17. BVHHandle handle;
  18. // ref id easier to pass around than handle
  19. uint32_t ref_id;
  20. // this should never fail
  21. ItemRef *ref = _refs.request(ref_id);
  22. // the extra data should be parallel list to the references
  23. uint32_t extra_id;
  24. ItemExtra *extra = _extra.request(extra_id);
  25. BVH_ASSERT(extra_id == ref_id);
  26. // pairs info
  27. if (USE_PAIRS) {
  28. uint32_t pairs_id;
  29. ItemPairs *pairs = _pairs.request(pairs_id);
  30. pairs->clear();
  31. BVH_ASSERT(pairs_id == ref_id);
  32. }
  33. extra->subindex = p_subindex;
  34. extra->userdata = p_userdata;
  35. extra->last_updated_tick = 0;
  36. // add an active reference to the list for slow incremental optimize
  37. // this list must be kept in sync with the references as they are added or removed.
  38. extra->active_ref_id = _active_refs.size();
  39. _active_refs.push_back(ref_id);
  40. extra->tree_id = p_tree_id;
  41. extra->tree_collision_mask = p_tree_collision_mask;
  42. // assign to handle to return
  43. handle.set_id(ref_id);
  44. create_root_node(p_tree_id);
  45. // we must choose where to add to tree
  46. if (p_active) {
  47. ref->tnode_id = _logic_choose_item_add_node(_root_node_id[p_tree_id], abb);
  48. bool refit = _node_add_item(ref->tnode_id, ref_id, abb);
  49. if (refit) {
  50. // only need to refit from the parent
  51. const TNode &add_node = _nodes[ref->tnode_id];
  52. if (add_node.parent_id != BVHCommon::INVALID) {
  53. refit_upward_and_balance(add_node.parent_id, p_tree_id);
  54. }
  55. }
  56. } else {
  57. ref->set_inactive();
  58. }
  59. #ifdef BVH_VERBOSE
  60. // memory use
  61. int mem = _refs.estimate_memory_use();
  62. mem += _nodes.estimate_memory_use();
  63. String sz = _debug_aabb_to_string(abb);
  64. VERBOSE_PRINT("\titem_add [" + itos(ref_id) + "] " + itos(_refs.used_size()) + " refs,\t" + itos(_nodes.used_size()) + " nodes " + sz);
  65. VERBOSE_PRINT("mem use : " + itos(mem) + ", num nodes reserved : " + itos(_nodes.reserved_size()));
  66. #endif
  67. return handle;
  68. }
  69. void _debug_print_refs() {
  70. #ifdef BVH_VERBOSE_TREE
  71. print_line("refs.....");
  72. for (int n = 0; n < _refs.size(); n++) {
  73. const ItemRef &ref = _refs[n];
  74. print_line("tnode_id " + itos(ref.tnode_id) + ", item_id " + itos(ref.item_id));
  75. }
  76. #endif
  77. }
  78. // returns false if noop
  79. bool item_move(BVHHandle p_handle, const BOUNDS &p_aabb) {
  80. uint32_t ref_id = p_handle.id();
  81. // get the reference
  82. ItemRef &ref = _refs[ref_id];
  83. if (!ref.is_active()) {
  84. return false;
  85. }
  86. BVHABB_CLASS abb;
  87. abb.from(p_aabb);
  88. #ifdef BVH_EXPAND_LEAF_AABBS
  89. if (USE_PAIRS) {
  90. // scale the pairing expansion by the number of pairs.
  91. abb.expand(_pairs[ref_id].scale_expansion_margin(_pairing_expansion));
  92. } else {
  93. abb.expand(_pairing_expansion);
  94. }
  95. #endif
  96. BVH_ASSERT(ref.tnode_id != BVHCommon::INVALID);
  97. TNode &tnode = _nodes[ref.tnode_id];
  98. // does it fit within the current leaf aabb?
  99. if (tnode.aabb.is_other_within(abb)) {
  100. // do nothing .. fast path .. not moved enough to need refit
  101. // however we WILL update the exact aabb in the leaf, as this will be needed
  102. // for accurate collision detection
  103. TLeaf &leaf = _node_get_leaf(tnode);
  104. BVHABB_CLASS &leaf_abb = leaf.get_aabb(ref.item_id);
  105. // no change?
  106. #ifdef BVH_EXPAND_LEAF_AABBS
  107. BOUNDS leaf_aabb;
  108. leaf_abb.to(leaf_aabb);
  109. // This test should pass in a lot of cases, and by returning false we can avoid
  110. // collision pairing checks later, which greatly reduces processing.
  111. if (expanded_aabb_encloses_not_shrink(leaf_aabb, p_aabb)) {
  112. return false;
  113. }
  114. #else
  115. if (leaf_abb == abb) {
  116. return false;
  117. }
  118. #endif
  119. #ifdef BVH_VERBOSE_MOVES
  120. print_line("item_move " + itos(p_handle.id()) + "(within tnode aabb) : " + _debug_aabb_to_string(abb));
  121. #endif
  122. leaf_abb = abb;
  123. _integrity_check_all();
  124. return true;
  125. }
  126. #ifdef BVH_VERBOSE_MOVES
  127. print_line("item_move " + itos(p_handle.id()) + "(outside tnode aabb) : " + _debug_aabb_to_string(abb));
  128. #endif
  129. uint32_t tree_id = _handle_get_tree_id(p_handle);
  130. // remove and reinsert
  131. node_remove_item(ref_id, tree_id);
  132. // we must choose where to add to tree
  133. ref.tnode_id = _logic_choose_item_add_node(_root_node_id[tree_id], abb);
  134. // add to the tree
  135. bool needs_refit = _node_add_item(ref.tnode_id, ref_id, abb);
  136. // only need to refit from the PARENT
  137. if (needs_refit) {
  138. // only need to refit from the parent
  139. const TNode &add_node = _nodes[ref.tnode_id];
  140. if (add_node.parent_id != BVHCommon::INVALID) {
  141. // not sure we need to rebalance all the time, this can be done less often
  142. refit_upward(add_node.parent_id);
  143. }
  144. //refit_upward_and_balance(add_node.parent_id);
  145. }
  146. return true;
  147. }
  148. void item_remove(BVHHandle p_handle) {
  149. uint32_t ref_id = p_handle.id();
  150. uint32_t tree_id = _handle_get_tree_id(p_handle);
  151. VERBOSE_PRINT("item_remove [" + itos(ref_id) + "] ");
  152. ////////////////////////////////////////
  153. // remove the active reference from the list for slow incremental optimize
  154. // this list must be kept in sync with the references as they are added or removed.
  155. uint32_t active_ref_id = _extra[ref_id].active_ref_id;
  156. uint32_t ref_id_moved_back = _active_refs[_active_refs.size() - 1];
  157. // swap back and decrement for fast unordered remove
  158. _active_refs[active_ref_id] = ref_id_moved_back;
  159. _active_refs.resize(_active_refs.size() - 1);
  160. // keep the moved active reference up to date
  161. _extra[ref_id_moved_back].active_ref_id = active_ref_id;
  162. ////////////////////////////////////////
  163. // remove the item from the node (only if active)
  164. if (_refs[ref_id].is_active()) {
  165. node_remove_item(ref_id, tree_id);
  166. }
  167. // remove the item reference
  168. _refs.free(ref_id);
  169. _extra.free(ref_id);
  170. if (USE_PAIRS) {
  171. _pairs.free(ref_id);
  172. }
  173. // don't think refit_all is necessary?
  174. //refit_all(_tree_id);
  175. #ifdef BVH_VERBOSE_TREE
  176. _debug_recursive_print_tree(tree_id);
  177. #endif
  178. }
  179. // returns success
  180. bool item_activate(BVHHandle p_handle, const BOUNDS &p_aabb) {
  181. uint32_t ref_id = p_handle.id();
  182. ItemRef &ref = _refs[ref_id];
  183. if (ref.is_active()) {
  184. // noop
  185. return false;
  186. }
  187. // add to tree
  188. BVHABB_CLASS abb;
  189. abb.from(p_aabb);
  190. uint32_t tree_id = _handle_get_tree_id(p_handle);
  191. // we must choose where to add to tree
  192. ref.tnode_id = _logic_choose_item_add_node(_root_node_id[tree_id], abb);
  193. _node_add_item(ref.tnode_id, ref_id, abb);
  194. refit_upward_and_balance(ref.tnode_id, tree_id);
  195. return true;
  196. }
  197. // returns success
  198. bool item_deactivate(BVHHandle p_handle) {
  199. uint32_t ref_id = p_handle.id();
  200. ItemRef &ref = _refs[ref_id];
  201. if (!ref.is_active()) {
  202. // noop
  203. return false;
  204. }
  205. uint32_t tree_id = _handle_get_tree_id(p_handle);
  206. // remove from tree
  207. BVHABB_CLASS abb;
  208. node_remove_item(ref_id, tree_id, &abb);
  209. // mark as inactive
  210. ref.set_inactive();
  211. return true;
  212. }
  213. bool item_get_active(BVHHandle p_handle) const {
  214. uint32_t ref_id = p_handle.id();
  215. const ItemRef &ref = _refs[ref_id];
  216. return ref.is_active();
  217. }
  218. // during collision testing, we want to set the mask and whether pairable for the item testing from
  219. void item_fill_cullparams(BVHHandle p_handle, CullParams &r_params) const {
  220. uint32_t ref_id = p_handle.id();
  221. const ItemExtra &extra = _extra[ref_id];
  222. // which trees does this item want to collide detect against?
  223. r_params.tree_collision_mask = extra.tree_collision_mask;
  224. // The testing user defined object is passed to the user defined cull check function
  225. // for masks etc. This is usually a dummy object of type T with masks set.
  226. // However, if not using the cull_check callback (i.e. returning true), you can pass
  227. // a nullptr instead of dummy object, as it will not be used.
  228. r_params.tester = extra.userdata;
  229. }
  230. bool item_is_pairable(const BVHHandle &p_handle) {
  231. uint32_t ref_id = p_handle.id();
  232. const ItemExtra &extra = _extra[ref_id];
  233. return extra.pairable != 0;
  234. }
  235. void item_get_ABB(const BVHHandle &p_handle, BVHABB_CLASS &r_abb) {
  236. // change tree?
  237. uint32_t ref_id = p_handle.id();
  238. const ItemRef &ref = _refs[ref_id];
  239. TNode &tnode = _nodes[ref.tnode_id];
  240. TLeaf &leaf = _node_get_leaf(tnode);
  241. r_abb = leaf.get_aabb(ref.item_id);
  242. }
  243. bool item_set_tree(const BVHHandle &p_handle, uint32_t p_tree_id, uint32_t p_tree_collision_mask) {
  244. // change tree?
  245. uint32_t ref_id = p_handle.id();
  246. ItemExtra &ex = _extra[ref_id];
  247. ItemRef &ref = _refs[ref_id];
  248. bool active = ref.is_active();
  249. bool tree_changed = ex.tree_id != p_tree_id;
  250. bool mask_changed = ex.tree_collision_mask != p_tree_collision_mask;
  251. bool state_changed = tree_changed | mask_changed;
  252. // Keep an eye on this for bugs of not noticing changes to objects,
  253. // especially when changing client user masks that will not be detected as a change
  254. // in the BVH. You may need to force a collision check in this case with recheck_pairs().
  255. if (active && (tree_changed | mask_changed)) {
  256. // record abb
  257. TNode &tnode = _nodes[ref.tnode_id];
  258. TLeaf &leaf = _node_get_leaf(tnode);
  259. BVHABB_CLASS abb = leaf.get_aabb(ref.item_id);
  260. // make sure current tree is correct prior to changing
  261. uint32_t tree_id = _handle_get_tree_id(p_handle);
  262. // remove from old tree
  263. node_remove_item(ref_id, tree_id);
  264. // we must set the pairable AFTER getting the current tree
  265. // because the pairable status determines which tree
  266. ex.tree_id = p_tree_id;
  267. ex.tree_collision_mask = p_tree_collision_mask;
  268. // add to new tree
  269. tree_id = _handle_get_tree_id(p_handle);
  270. create_root_node(tree_id);
  271. // we must choose where to add to tree
  272. ref.tnode_id = _logic_choose_item_add_node(_root_node_id[tree_id], abb);
  273. bool needs_refit = _node_add_item(ref.tnode_id, ref_id, abb);
  274. // only need to refit from the PARENT
  275. if (needs_refit) {
  276. // only need to refit from the parent
  277. const TNode &add_node = _nodes[ref.tnode_id];
  278. if (add_node.parent_id != BVHCommon::INVALID) {
  279. refit_upward_and_balance(add_node.parent_id, tree_id);
  280. }
  281. }
  282. } else {
  283. // always keep this up to date
  284. ex.tree_id = p_tree_id;
  285. ex.tree_collision_mask = p_tree_collision_mask;
  286. }
  287. return state_changed;
  288. }
  289. void incremental_optimize() {
  290. // first update all aabbs as one off step..
  291. // this is cheaper than doing it on each move as each leaf may get touched multiple times
  292. // in a frame.
  293. for (int n = 0; n < NUM_TREES; n++) {
  294. if (_root_node_id[n] != BVHCommon::INVALID) {
  295. refit_branch(_root_node_id[n]);
  296. }
  297. }
  298. // now do small section reinserting to get things moving
  299. // gradually, and keep items in the right leaf
  300. if (_current_active_ref >= _active_refs.size()) {
  301. _current_active_ref = 0;
  302. }
  303. // special case
  304. if (!_active_refs.size()) {
  305. return;
  306. }
  307. uint32_t ref_id = _active_refs[_current_active_ref++];
  308. _logic_item_remove_and_reinsert(ref_id);
  309. #ifdef BVH_VERBOSE
  310. /*
  311. // memory use
  312. int mem_refs = _refs.estimate_memory_use();
  313. int mem_nodes = _nodes.estimate_memory_use();
  314. int mem_leaves = _leaves.estimate_memory_use();
  315. String sz;
  316. sz += "mem_refs : " + itos(mem_refs) + " ";
  317. sz += "mem_nodes : " + itos(mem_nodes) + " ";
  318. sz += "mem_leaves : " + itos(mem_leaves) + " ";
  319. sz += ", num nodes : " + itos(_nodes.size());
  320. print_line(sz);
  321. */
  322. #endif
  323. }
  324. void update() {
  325. incremental_optimize();
  326. // keep the expansion values up to date with the world bound
  327. //#define BVH_ALLOW_AUTO_EXPANSION
  328. #ifdef BVH_ALLOW_AUTO_EXPANSION
  329. if (_auto_node_expansion || _auto_pairing_expansion) {
  330. BVHABB_CLASS world_bound;
  331. world_bound.set_to_max_opposite_extents();
  332. bool bound_valid = false;
  333. for (int n = 0; n < NUM_TREES; n++) {
  334. uint32_t node_id = _root_node_id[n];
  335. if (node_id != BVHCommon::INVALID) {
  336. world_bound.merge(_nodes[node_id].aabb);
  337. bound_valid = true;
  338. }
  339. }
  340. // if there are no nodes, do nothing, but if there are...
  341. if (bound_valid) {
  342. BOUNDS bb;
  343. world_bound.to(bb);
  344. real_t size = bb.get_longest_axis_size();
  345. // automatic AI decision for best parameters.
  346. // These can be overridden in project settings.
  347. // these magic numbers are determined by experiment
  348. if (_auto_node_expansion) {
  349. _node_expansion = size * 0.025;
  350. }
  351. if (_auto_pairing_expansion) {
  352. _pairing_expansion = size * 0.009;
  353. }
  354. }
  355. }
  356. #endif
  357. }
  358. void params_set_pairing_expansion(real_t p_value) {
  359. if (p_value < 0.0) {
  360. #ifdef BVH_ALLOW_AUTO_EXPANSION
  361. _auto_pairing_expansion = true;
  362. #endif
  363. return;
  364. }
  365. #ifdef BVH_ALLOW_AUTO_EXPANSION
  366. _auto_pairing_expansion = false;
  367. #endif
  368. _pairing_expansion = p_value;
  369. // calculate shrinking threshold
  370. const real_t fudge_factor = 1.1;
  371. _aabb_shrinkage_threshold = _pairing_expansion * POINT::AXIS_COUNT * 2.0 * fudge_factor;
  372. }
  373. // This routine is not just an enclose check, it also checks for special case of shrinkage
  374. bool expanded_aabb_encloses_not_shrink(const BOUNDS &p_expanded_aabb, const BOUNDS &p_aabb) const {
  375. if (!p_expanded_aabb.encloses(p_aabb)) {
  376. return false;
  377. }
  378. // Check for special case of shrinkage. If the aabb has shrunk
  379. // significantly we want to create a new expanded bound, because
  380. // the previous expanded bound will have diverged significantly.
  381. const POINT &exp_size = p_expanded_aabb.size;
  382. const POINT &new_size = p_aabb.size;
  383. real_t exp_l = 0.0;
  384. real_t new_l = 0.0;
  385. for (int i = 0; i < POINT::AXIS_COUNT; ++i) {
  386. exp_l += exp_size[i];
  387. new_l += new_size[i];
  388. }
  389. // is difference above some metric
  390. real_t diff = exp_l - new_l;
  391. if (diff < _aabb_shrinkage_threshold) {
  392. return true;
  393. }
  394. return false;
  395. }