bvh_structs.inc 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. public:
  2. struct ItemRef {
  3. uint32_t tnode_id; // -1 is invalid
  4. uint32_t item_id; // in the leaf
  5. bool is_active() const { return tnode_id != BVHCommon::INACTIVE; }
  6. void set_inactive() {
  7. tnode_id = BVHCommon::INACTIVE;
  8. item_id = BVHCommon::INACTIVE;
  9. }
  10. };
  11. // extra info kept in separate parallel list to the references,
  12. // as this is less used as keeps cache better
  13. struct ItemExtra {
  14. // Before doing user defined pairing checks (especially in the find_leavers function),
  15. // we may want to check that two items have compatible tree ids and tree masks,
  16. // as if they are incompatible they should not pair / collide.
  17. bool are_item_trees_compatible(const ItemExtra &p_other) const {
  18. uint32_t other_type = 1 << p_other.tree_id;
  19. if (tree_collision_mask & other_type) {
  20. return true;
  21. }
  22. uint32_t our_type = 1 << tree_id;
  23. if (p_other.tree_collision_mask & our_type) {
  24. return true;
  25. }
  26. return false;
  27. }
  28. // There can be multiple user defined trees
  29. uint32_t tree_id;
  30. // Defines which trees this item should collision check against.
  31. // 1 << tree_id, and normally items would collide against there own
  32. // tree (but not always).
  33. uint32_t tree_collision_mask;
  34. uint32_t last_updated_tick;
  35. int32_t subindex;
  36. T *userdata;
  37. // the active reference is a separate list of which references
  38. // are active so that we can slowly iterate through it over many frames for
  39. // slow optimize.
  40. uint32_t active_ref_id;
  41. };
  42. // tree leaf
  43. struct TLeaf {
  44. uint16_t num_items;
  45. private:
  46. uint16_t dirty;
  47. // separate data orientated lists for faster SIMD traversal
  48. uint32_t item_ref_ids[MAX_ITEMS];
  49. BVHABB_CLASS aabbs[MAX_ITEMS];
  50. public:
  51. // accessors
  52. BVHABB_CLASS &get_aabb(uint32_t p_id) {
  53. BVH_ASSERT(p_id < MAX_ITEMS);
  54. return aabbs[p_id];
  55. }
  56. const BVHABB_CLASS &get_aabb(uint32_t p_id) const {
  57. BVH_ASSERT(p_id < MAX_ITEMS);
  58. return aabbs[p_id];
  59. }
  60. uint32_t &get_item_ref_id(uint32_t p_id) {
  61. BVH_ASSERT(p_id < MAX_ITEMS);
  62. return item_ref_ids[p_id];
  63. }
  64. const uint32_t &get_item_ref_id(uint32_t p_id) const {
  65. BVH_ASSERT(p_id < MAX_ITEMS);
  66. return item_ref_ids[p_id];
  67. }
  68. bool is_dirty() const { return dirty; }
  69. void set_dirty(bool p) { dirty = p; }
  70. void clear() {
  71. num_items = 0;
  72. set_dirty(true);
  73. }
  74. bool is_full() const { return num_items >= MAX_ITEMS; }
  75. void remove_item_unordered(uint32_t p_id) {
  76. BVH_ASSERT(p_id < num_items);
  77. num_items--;
  78. aabbs[p_id] = aabbs[num_items];
  79. item_ref_ids[p_id] = item_ref_ids[num_items];
  80. }
  81. uint32_t request_item() {
  82. if (num_items < MAX_ITEMS) {
  83. uint32_t id = num_items;
  84. num_items++;
  85. return id;
  86. }
  87. #ifdef DEV_ENABLED
  88. return -1;
  89. #else
  90. ERR_FAIL_V_MSG(0, "BVH request_item error.");
  91. #endif
  92. }
  93. };
  94. // tree node
  95. struct TNode {
  96. BVHABB_CLASS aabb;
  97. // either number of children if positive
  98. // or leaf id if negative (leaf id 0 is disallowed)
  99. union {
  100. int32_t num_children;
  101. int32_t neg_leaf_id;
  102. };
  103. uint32_t parent_id; // or -1
  104. uint16_t children[MAX_CHILDREN];
  105. // height in the tree, where leaves are 0, and all above are 1+
  106. // (or the highest where there is a tie off)
  107. int32_t height;
  108. bool is_leaf() const { return num_children < 0; }
  109. void set_leaf_id(int id) { neg_leaf_id = -id; }
  110. int get_leaf_id() const { return -neg_leaf_id; }
  111. void clear() {
  112. num_children = 0;
  113. parent_id = BVHCommon::INVALID;
  114. height = 0; // or -1 for testing
  115. // for safety set to improbable value
  116. aabb.set_to_max_opposite_extents();
  117. // other members are not blanked for speed .. they may be uninitialized
  118. }
  119. bool is_full_of_children() const { return num_children >= MAX_CHILDREN; }
  120. void remove_child_internal(uint32_t child_num) {
  121. children[child_num] = children[num_children - 1];
  122. num_children--;
  123. }
  124. int find_child(uint32_t p_child_node_id) {
  125. BVH_ASSERT(!is_leaf());
  126. for (int n = 0; n < num_children; n++) {
  127. if (children[n] == p_child_node_id) {
  128. return n;
  129. }
  130. }
  131. // not found
  132. return -1;
  133. }
  134. };
  135. // instead of using linked list we maintain
  136. // item references (for quick lookup)
  137. PooledList<ItemRef, uint32_t, true> _refs;
  138. PooledList<ItemExtra, uint32_t, true> _extra;
  139. PooledList<ItemPairs> _pairs;
  140. // these 2 are not in sync .. nodes != leaves!
  141. PooledList<TNode, uint32_t, true> _nodes;
  142. PooledList<TLeaf, uint32_t, true> _leaves;
  143. // we can maintain an un-ordered list of which references are active,
  144. // in order to do a slow incremental optimize of the tree over each frame.
  145. // This will work best if dynamic objects and static objects are in a different tree.
  146. LocalVector<uint32_t, uint32_t, true> _active_refs;
  147. uint32_t _current_active_ref = 0;
  148. // instead of translating directly to the userdata output,
  149. // we keep an intermediate list of hits as reference IDs, which can be used
  150. // for pairing collision detection
  151. LocalVector<uint32_t, uint32_t, true> _cull_hits;
  152. // We can now have a user definable number of trees.
  153. // This allows using e.g. a non-pairable and pairable tree,
  154. // which can be more efficient for example, if we only need check non pairable against the pairable tree.
  155. // It also may be more efficient in terms of separating static from dynamic objects, by reducing housekeeping.
  156. // However this is a trade off, as there is a cost of traversing two trees.
  157. uint32_t _root_node_id[NUM_TREES];
  158. // these values may need tweaking according to the project
  159. // the bound of the world, and the average velocities of the objects
  160. // node expansion is important in the rendering tree
  161. // larger values give less re-insertion as items move...
  162. // but on the other hand over estimates the bounding box of nodes.
  163. // we can either use auto mode, where the expansion is based on the root node size, or specify manually
  164. real_t _node_expansion = 0.5;
  165. bool _auto_node_expansion = true;
  166. // pairing expansion important for physics pairing
  167. // larger values gives more 'sticky' pairing, and is less likely to exhibit tunneling
  168. // we can either use auto mode, where the expansion is based on the root node size, or specify manually
  169. real_t _pairing_expansion = 0.1;
  170. #ifdef BVH_ALLOW_AUTO_EXPANSION
  171. bool _auto_pairing_expansion = true;
  172. #endif
  173. // when using an expanded bound, we must detect the condition where a new AABB
  174. // is significantly smaller than the expanded bound, as this is a special case where we
  175. // should override the optimization and create a new expanded bound.
  176. // This threshold is derived from the _pairing_expansion, and should be recalculated
  177. // if _pairing_expansion is changed.
  178. real_t _aabb_shrinkage_threshold = 0.0;