big.hh 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. // -*- mode: c++; coding: utf-8 -*-
  2. /// @file big.hh
  3. /// @brief Arrays with dynamic size.
  4. // (c) Daniel Llorens - 2013-2014, 2017-2020
  5. // This library is free software; you can redistribute it and/or modify it under
  6. // the terms of the GNU Lesser General Public License as published by the Free
  7. // Software Foundation; either version 3 of the License, or (at your option) any
  8. // later version.
  9. #pragma once
  10. #include "ra/small.hh"
  11. #include <memory>
  12. #include <iostream>
  13. namespace ra {
  14. // Dope vector element
  15. struct Dim { dim_t size, stride; };
  16. // For debugging
  17. inline std::ostream & operator<<(std::ostream & o, Dim const & dim)
  18. { o << "[Dim " << dim.size << " " << dim.stride << "]"; return o; }
  19. // --------------------
  20. // nested braces for Container initializers
  21. // --------------------
  22. // Avoid clash of T with scalar constructors (for rank 0).
  23. template <class T, rank_t rank>
  24. struct nested_braces { using list = no_arg; };
  25. template <class T, rank_t rank>
  26. requires (rank==1)
  27. struct nested_braces<T, rank>
  28. {
  29. using list = std::initializer_list<T>;
  30. template <size_t N> constexpr static
  31. void shape(list l, std::array<dim_t, N> & s)
  32. {
  33. static_assert(N>0);
  34. s[N-1] = l.size();
  35. }
  36. };
  37. template <class T, rank_t rank>
  38. requires (rank>1)
  39. struct nested_braces<T, rank>
  40. {
  41. using sub = nested_braces<T, rank-1>;
  42. using list = std::initializer_list<typename sub::list>;
  43. template <size_t N> constexpr static
  44. void shape(list l, std::array<dim_t, N> & s)
  45. {
  46. s[N-rank] = l.size();
  47. if (l.size()>0) {
  48. sub::template shape(*(l.begin()), s);
  49. } else {
  50. std::fill(s.begin()+N-rank+1, s.end(), 0);
  51. }
  52. }
  53. };
  54. // --------------------
  55. // Develop indices for Big
  56. // --------------------
  57. struct Indexer1
  58. {
  59. template <class Dim, class P>
  60. constexpr static dim_t index_p(Dim const & dim, P && p)
  61. {
  62. RA_CHECK(dim_t(dim.size())>=start(p).size(0), "too many indices");
  63. // use dim.data() to skip the size check.
  64. dim_t c = 0;
  65. for_each([&c](auto && d, auto && p) { RA_CHECK(inside(p, d.size)); c += d.stride*p; },
  66. ptr(dim.data()), p);
  67. return c;
  68. }
  69. // used by cell_iterator::at() for rank matching on rank<driving rank, no slicing. TODO Static check.
  70. template <class Dim, class P>
  71. constexpr static dim_t index_short(rank_t framer, Dim const & dim, P const & p)
  72. {
  73. dim_t c = 0;
  74. for (rank_t k=0; k<framer; ++k) {
  75. RA_CHECK(inside(p[k], dim[k].size) || (dim[k].size==DIM_BAD && dim[k].stride==0));
  76. c += dim[k].stride * p[k];
  77. }
  78. return c;
  79. }
  80. };
  81. // --------------------
  82. // Big iterator
  83. // --------------------
  84. // TODO Refactor with cell_iterator_small for SmallView?
  85. // V is View. FIXME Parameterize? apparently only for order-of-decl.
  86. template <class V, rank_t cellr_=0>
  87. struct cell_iterator
  88. {
  89. constexpr static rank_t cellr_spec = cellr_;
  90. static_assert(cellr_spec!=RANK_ANY && cellr_spec!=RANK_BAD, "bad cell rank");
  91. constexpr static rank_t fullr = ra::rank_s<V>();
  92. constexpr static rank_t cellr = dependent_cell_rank(fullr, cellr_spec);
  93. constexpr static rank_t framer = dependent_frame_rank(fullr, cellr_spec);
  94. static_assert(cellr>=0 || cellr==RANK_ANY, "bad cell rank");
  95. static_assert(framer>=0 || framer==RANK_ANY, "bad frame rank");
  96. static_assert(fullr==cellr || gt_rank(fullr, cellr), "bad cell rank");
  97. using Dimv_ = typename std::decay_t<V>::Dimv;
  98. using P = typename std::decay_t<V>::P;
  99. using Dimv = std::conditional_t<std::is_lvalue_reference_v<V>, Dimv_ const &, Dimv_>;
  100. Dimv dim;
  101. constexpr static rank_t rank_s() { return framer; }
  102. constexpr rank_t rank() const { return dependent_frame_rank(rank_t(dim.size()), cellr_spec); }
  103. using shape_type = std::conditional_t<rank_s()==DIM_ANY, std::vector<dim_t>,
  104. Small<dim_t, rank_s()==DIM_ANY ? 0 : rank_s()>>; // still needs protection :-/
  105. using atom_type = std::remove_reference_t<decltype(*(std::declval<V>().data()))>;
  106. using cell_type = View<P, cellr>;
  107. using value_type = std::conditional_t<0==cellr, atom_type, cell_type>;
  108. cell_type c;
  109. constexpr cell_iterator(cell_iterator const & ci): dim(ci.dim), c { ci.c.dim, ci.c.p } {}
  110. // s_ is array's full shape; split it into dim/i (frame) and c (cell).
  111. constexpr cell_iterator(Dimv const & dim_, atom_type * p_): dim(dim_)
  112. {
  113. rank_t rank = this->rank();
  114. // see stl_iterator for the case of dim_[0]=0, etc. [ra12].
  115. c.p = p_;
  116. rank_t cellr = dependent_cell_rank(rank_t(dim.size()), cellr_spec);
  117. resize(c.dim, cellr);
  118. for (int k=0; k<cellr; ++k) {
  119. c.dim[k] = dim_[k+rank];
  120. }
  121. }
  122. constexpr static dim_t size_s(int i) { /* RA_CHECK(inside(k, rank())); */ return DIM_ANY; }
  123. constexpr dim_t size(int k) const { RA_CHECK(inside(k, rank())); return dim[k].size; }
  124. constexpr dim_t stride(int k) const { return k<rank() ? dim[k].stride : 0; }
  125. // FIXME handle z or j over rank()? check cell_iterator_small versions.
  126. constexpr bool keep_stride(dim_t st, int z, int j) const { return st*stride(z)==stride(j); }
  127. constexpr void adv(rank_t k, dim_t d) { c.p += stride(k)*d; }
  128. constexpr
  129. auto flat() const
  130. {
  131. if constexpr (0==cellr) {
  132. return c.p;
  133. } else {
  134. return CellFlat<cell_type> { c };
  135. }
  136. }
  137. // Return type to allow either View & or View const & verb. Can't set self b/c original p isn't kept. TODO Think this over.
  138. template <class I> constexpr
  139. decltype(auto) at(I const & i_)
  140. {
  141. RA_CHECK(rank()<=dim_t(i_.size()), "too few indices");
  142. if constexpr (0==cellr) {
  143. return c.p[Indexer1::index_short(rank(), dim, i_)];
  144. } else {
  145. return cell_type { c.dim, c.p+Indexer1::index_short(rank(), dim, i_) };
  146. }
  147. }
  148. FOR_EACH(RA_DEF_ASSIGNOPS, =, *=, +=, -=, /=)
  149. };
  150. // --------------------
  151. // Indexing views
  152. // --------------------
  153. // Always C order. If you need another, transpose this.
  154. // Works on dim vector with sizes already assigned, so that I can work from an expr. Not pretty though.
  155. template <class D>
  156. dim_t filldim(int n, D dend)
  157. {
  158. dim_t next = 1;
  159. for (; n>0; --n) {
  160. --dend;
  161. RA_CHECK((*dend).size>=0, "bad dim", (*dend).size);
  162. (*dend).stride = next;
  163. next *= (*dend).size;
  164. }
  165. return next;
  166. }
  167. template <class D>
  168. dim_t proddim(D d, D dend)
  169. {
  170. dim_t t = 1;
  171. for (; d!=dend; ++d) {
  172. t *= (*d).size;
  173. }
  174. return t;
  175. }
  176. // allow conversion only from T * to T const *.
  177. // FIXME constrain the members instead of using no_arg.
  178. template <class P> struct sub_const { using type = no_arg; };
  179. template <class P> struct add_const { using type = P; };
  180. template <class P> using nop_t = P;
  181. template <class P> using sub_const_t = sub_const<P>::type;
  182. template <class P> using add_const_t = add_const<P>::type;
  183. template <class T> struct sub_const<T const *> { using type = T *; };
  184. template <class T> requires (!std::is_const_v<T>) struct add_const<T *> { using type = T const *; };
  185. // raw <- shared; raw <- unique; shared <-- unique.
  186. // layout is
  187. // [data] (fixed shape)
  188. // [size] p -> [data...] (fixed rank)
  189. // [rank] [size...] p -> [data...] (var rank)
  190. // TODO size is immutable so that it can be kept together with rank.
  191. #define DEF_ITERATORS(RANK) \
  192. template <rank_t c=0> constexpr auto iter() && { return ra::cell_iterator<View<P, RANK>, c>(std::move(dim), p); } \
  193. template <rank_t c=0> constexpr auto iter() & { return ra::cell_iterator<View<P, RANK> &, c>(dim, p); } \
  194. template <rank_t c=0> constexpr auto iter() const & { return ra::cell_iterator<View<add_const_t<P>, RANK> &, c>(dim, p); } \
  195. constexpr auto begin() const { return stl_iterator(iter()); } \
  196. constexpr auto begin() { return stl_iterator(iter()); } \
  197. /* here dim doesn't matter, but we have to give it if it's a ref. */ \
  198. constexpr auto end() const { return stl_iterator(decltype(iter())(dim, nullptr)); } \
  199. constexpr auto end() { return stl_iterator(decltype(iter())(dim, nullptr)); }
  200. // See same thing for SmallBase.
  201. #define DEF_ASSIGNOPS(OP) \
  202. template <class X> View & operator OP (X && x) { ra::start(*this) OP x; return *this; }
  203. #define DEF_VIEW_COMMON(RANK) \
  204. FOR_EACH(DEF_ASSIGNOPS, =, *=, +=, -=, /=) \
  205. /* Constructors using pointers need extra care */ \
  206. constexpr View(): p(nullptr) {} \
  207. constexpr View(Dimv const & dim_, P p_): dim(dim_), p(p_) {} /* [ra36] */ \
  208. template <class SS> \
  209. View(SS && s, P p_): p(p_) \
  210. { \
  211. if constexpr (std::is_convertible_v<value_t<SS>, Dim>) { \
  212. ra::resize(View::dim, start(s).size(0)); /* [ra37] */ \
  213. start(View::dim) = s; \
  214. } else { \
  215. ra::resize(View::dim, start(s).size(0)); /* [ra37] */ \
  216. for_each([](Dim & dim, auto && s) { dim.size = s; }, View::dim, s); \
  217. filldim(View::dim.size(), View::dim.end()); \
  218. } \
  219. } \
  220. View(std::initializer_list<dim_t> s, P p_): View(start(s), p_) {} \
  221. /* lack of these causes runtime bug [ra38] FIXME why? */ \
  222. View(View && x) = default; \
  223. View(View const & x) = default; \
  224. /* declaring View(View &&) deletes this, so we need to repeat it [ra34] */ \
  225. View & operator=(View const & x) \
  226. { \
  227. ra::start(*this) = x; \
  228. return *this; \
  229. } \
  230. /* array type is not deduced by (X &&) */ \
  231. View & operator=(typename nested_braces<T, RANK>::list x) \
  232. { \
  233. ra::iter<-1>(*this) = x; \
  234. return *this; \
  235. } \
  236. /* braces row-major ravel for rank!=1 */ \
  237. using ravel_arg = std::conditional_t<RANK==1, no_arg, std::initializer_list<T>>; \
  238. View & operator=(ravel_arg const x) \
  239. { \
  240. RA_CHECK(p && this->size()==ra::dim_t(x.size()), "bad assignment"); \
  241. std::copy(x.begin(), x.end(), this->begin()); \
  242. return *this; \
  243. } \
  244. bool const empty() const { return 0==size(); } /* TODO Optimize */
  245. inline dim_t select(Dim * dim, Dim const * dim_src, dim_t i)
  246. {
  247. RA_CHECK(inside(i, dim_src->size), " i ", i, " size ", dim_src->size);
  248. return dim_src->stride*i;
  249. }
  250. template <class II>
  251. inline dim_t select(Dim * dim, Dim const * dim_src, ra::Iota<II> i)
  252. {
  253. RA_CHECK((inside(i.i_, dim_src->size) && inside(i.i_+(i.size_-1)*i.stride_, dim_src->size))
  254. || (i.size_==0 && i.i_<=dim_src->size));
  255. dim->size = i.size_;
  256. dim->stride = dim_src->stride * i.stride_;
  257. return dim_src->stride*i.i_;
  258. }
  259. template <class I0, class ... I>
  260. inline dim_t select_loop(Dim * dim, Dim const * dim_src, I0 && i0, I && ... i)
  261. {
  262. return select(dim, dim_src, std::forward<I0>(i0))
  263. + select_loop(dim+is_beatable<I0>::skip, dim_src+is_beatable<I0>::skip_src, std::forward<I>(i) ...);
  264. }
  265. template <int n, class ... I>
  266. inline dim_t select_loop(Dim * dim, Dim const * dim_src, dots_t<n> dots, I && ... i)
  267. {
  268. for (Dim * end = dim+n; dim!=end; ++dim, ++dim_src) {
  269. *dim = *dim_src;
  270. }
  271. return select_loop(dim, dim_src, std::forward<I>(i) ...);
  272. }
  273. template <int n, class ... I>
  274. inline dim_t select_loop(Dim * dim, Dim const * dim_src, insert_t<n> insert, I && ... i)
  275. {
  276. for (Dim * end = dim+n; dim!=end; ++dim) {
  277. dim->size = DIM_BAD;
  278. dim->stride = 0;
  279. }
  280. return select_loop(dim, dim_src, std::forward<I>(i) ...);
  281. }
  282. inline dim_t select_loop(Dim * dim, Dim const * dim_src)
  283. {
  284. return 0;
  285. }
  286. // --------------------
  287. // View for fixed rank
  288. // --------------------
  289. // TODO Parameterize on Child having .data() so that there's only one pointer.
  290. // TODO A constructor, if only for RA_CHECK (positive sizes, strides inside, etc.)
  291. template <std::random_access_iterator P_, rank_t RANK> // FIXME [ra27]
  292. struct View
  293. {
  294. using Dimv = Small<Dim, RANK>;
  295. using P = P_;
  296. Dimv dim;
  297. P p;
  298. using T = std::remove_reference_t<decltype(*p)>;
  299. constexpr static rank_t rank_s() { return RANK; };
  300. constexpr static rank_t rank() { return RANK; }
  301. constexpr static dim_t size_s(int j) { return DIM_ANY; }
  302. constexpr dim_t size() const { return proddim(dim.begin(), dim.end()); }
  303. constexpr dim_t size(int j) const { return dim[j].size; }
  304. constexpr dim_t stride(int j) const { return dim[j].stride; }
  305. constexpr auto data() const { return p; }
  306. // Specialize for rank() integer-args -> scalar, same in ra::SmallBase in small.hh.
  307. #define SUBSCRIPTS(CONST, ADD_CONST) \
  308. template <class ... I> \
  309. requires ((0 + ... + std::is_integral_v<std::decay_t<I>>)<RANK \
  310. && (0 + ... + is_beatable<I>::value)==sizeof...(I)) \
  311. auto operator()(I && ... i) CONST \
  312. { \
  313. constexpr rank_t extended = (0 + ... + (is_beatable<I>::skip-is_beatable<I>::skip_src)); \
  314. constexpr rank_t subrank = rank_sum(RANK, extended); \
  315. static_assert(subrank>=0, "bad subrank"); \
  316. View<ADD_CONST<P>, subrank> sub; \
  317. sub.p = data() + select_loop(sub.dim.data(), this->dim.data(), i ...); \
  318. /* fill the rest of dim, skipping over beatable subscripts */ \
  319. for (int i=(0 + ... + is_beatable<I>::skip); i<subrank; ++i) { \
  320. sub.dim[i] = this->dim[i-extended]; \
  321. } \
  322. return sub; \
  323. } \
  324. /* BUG doesn't handle generic zero-rank indices */ \
  325. template <class ... I> \
  326. requires ((0 + ... + std::is_integral_v<I>)==RANK) \
  327. decltype(auto) operator()(I const & ... i) CONST \
  328. { \
  329. return data()[select_loop(nullptr, this->dim.data(), i ...)]; \
  330. } \
  331. /* TODO > 1 selector... This still covers (unbeatable, integer) for example, which could be reduced. */ \
  332. template <class ... I> \
  333. requires (!(is_beatable<I>::value && ...)) \
  334. decltype(auto) operator()(I && ... i) CONST \
  335. { \
  336. return from(*this, std::forward<I>(i) ...); \
  337. } \
  338. template <class I> \
  339. decltype(auto) at(I && i) CONST \
  340. { \
  341. constexpr rank_t subrank = rank_diff(RANK, ra::size_s<I>()); /* gcc accepts i.size() */ \
  342. using Sub = View<ADD_CONST<P>, subrank>; \
  343. if constexpr (subrank==RANK_ANY) { \
  344. return Sub { typename Sub::Dimv(dim.begin()+ra::size(i), dim.end()), /* Dimv is std::vector */ \
  345. data() + Indexer1::index_p(dim, i) }; \
  346. } else { \
  347. return Sub { typename Sub::Dimv(ptr(dim.begin()+ra::size(i))), /* Dimv is ra::Small */ \
  348. data() + Indexer1::index_p(dim, i) }; \
  349. } \
  350. } \
  351. constexpr decltype(auto) operator[](dim_t const i) CONST \
  352. { \
  353. return (*this)(i); \
  354. }
  355. SUBSCRIPTS(/* const */, nop_t)
  356. SUBSCRIPTS(const, add_const_t)
  357. #undef SUBSCRIPTS
  358. DEF_ITERATORS(RANK)
  359. DEF_VIEW_COMMON(RANK)
  360. // implicit conversion from var rank [ra18]
  361. template <rank_t R>
  362. requires (R==RANK_ANY && R!=RANK)
  363. View(View<P, R> const & x): dim(x.dim), p(x.p) {}
  364. // implicit conversion from var rank non const [ra29]
  365. template <rank_t R>
  366. requires (R==RANK_ANY && R!=RANK)
  367. View(View<sub_const_t<P>, R> const & x): dim(x.dim), p(x.p) {}
  368. // conversion from non const [ra29]
  369. operator View<add_const_t<P>, RANK> const & () const
  370. {
  371. return *reinterpret_cast<View<add_const_t<P>, RANK> const *>(this);
  372. }
  373. // conversions to scalar.
  374. operator T & () { static_assert(RANK==0, "bad rank"); return data()[0]; }
  375. operator T const & () const { static_assert(RANK==0, "bad rank"); return data()[0]; }
  376. };
  377. template <class P, rank_t RANK>
  378. struct ra_traits_def<View<P, RANK>>
  379. {
  380. using V = View<P, RANK>;
  381. static decltype(auto) shape(V const & v) { return ra::shape(v.iter()); }
  382. static dim_t size(V const & v) { return v.size(); }
  383. constexpr static rank_t rank(V const & v) { return v.rank(); }
  384. constexpr static rank_t rank_s() { return RANK; };
  385. constexpr static dim_t size_s() { return RANK==0 ? 1 : DIM_ANY; }
  386. };
  387. // --------------------
  388. // View for var rank
  389. // --------------------
  390. template <std::random_access_iterator P_> // FIXME [ra27]
  391. struct View<P_, RANK_ANY>
  392. {
  393. using Dimv = std::vector<Dim>; // hmm
  394. using P = P_;
  395. Dimv dim;
  396. P p;
  397. using T = std::remove_reference_t<decltype(*p)>;
  398. constexpr static rank_t rank_s() { return RANK_ANY; }
  399. constexpr rank_t rank() const { return rank_t(dim.size()); }
  400. constexpr static dim_t size_s() { return DIM_ANY; }
  401. constexpr dim_t size() const { return proddim(dim.begin(), dim.end()); }
  402. // checking because std::vector doesn't.
  403. constexpr dim_t size(int const j) const { RA_CHECK(j<rank(), " j : ", j, " rank ", rank()); return dim[j].size; }
  404. constexpr dim_t stride(int const j) const { RA_CHECK(j<rank(), " j : ", j, " rank ", rank()); return dim[j].stride; }
  405. constexpr auto data() const { return p; }
  406. // Contrary to RANK!=RANK_ANY, the scalar case cannot be separated at compile time. So operator() will return a rank 0 view in that case (and rely on conversion if, say, this ends up assigned to a scalar).
  407. #define SUBSCRIPTS(CONST, ADD_CONST) \
  408. template <class ... I> \
  409. requires (is_beatable<I>::value && ...) \
  410. decltype(auto) operator()(I && ... i) CONST \
  411. { \
  412. constexpr rank_t extended = (0 + ... + (is_beatable<I>::skip-is_beatable<I>::skip_src)); \
  413. assert(this->rank()+extended>=0); \
  414. View<ADD_CONST<P>, RANK_ANY> sub; \
  415. sub.dim.resize(this->rank()+extended); \
  416. sub.p = data() + select_loop(sub.dim.data(), this->dim.data(), i ...); \
  417. for (int i=(0 + ... + is_beatable<I>::skip); i<sub.rank(); ++i) { \
  418. sub.dim[i] = this->dim[i-extended]; \
  419. } \
  420. return sub; \
  421. } \
  422. /* TODO More than one selector... */ \
  423. template <class ... I> \
  424. requires (!(is_beatable<I>::value && ...)) \
  425. decltype(auto) operator()(I && ... i) CONST \
  426. { \
  427. return from(*this, std::forward<I>(i) ...); \
  428. } \
  429. template <class I> \
  430. decltype(auto) at(I && i) CONST \
  431. { \
  432. return View<ADD_CONST<P>, RANK_ANY> { Dimv(dim.begin()+i.size(), dim.end()), /* Dimv is std::vector */ \
  433. data() + Indexer1::index_p(dim, i) }; \
  434. } \
  435. constexpr decltype(auto) operator[](dim_t const i) CONST \
  436. { \
  437. return (*this)(i); \
  438. }
  439. SUBSCRIPTS(/* const */, nop_t)
  440. SUBSCRIPTS(const, add_const_t)
  441. DEF_ITERATORS(RANK_ANY)
  442. DEF_VIEW_COMMON(RANK_ANY)
  443. // implicit conversion from fixed rank [ra18]
  444. template <rank_t R>
  445. requires (R!=RANK_ANY)
  446. View(View<P, R> const & x): dim(x.dim.begin(), x.dim.end()), p(x.p) {}
  447. // implicit conversion from fixed rank non const [ra29]
  448. template <rank_t R>
  449. requires (R!=RANK_ANY)
  450. View(View<sub_const_t<P>, R> const & x): dim(x.dim.begin(), x.dim.end()), p(x.p) {}
  451. // conversion from non const [ra29]
  452. operator View<add_const_t<P>> const & ()
  453. {
  454. return *reinterpret_cast<View<add_const_t<P>> const *>(this);
  455. }
  456. // conversions to scalar
  457. operator T & () { RA_CHECK(rank()==0, "converting rank ", rank(), " to scalar"); return data()[0]; };
  458. operator T const & () const { RA_CHECK(rank()==0, "converting rank ", rank(), " to scalar"); return data()[0]; };
  459. };
  460. #undef DEF_ITERATORS
  461. #undef DEF_VIEW_COMMON
  462. #undef DEF_ASSIGNOPS
  463. // --------------------
  464. // Container types
  465. // --------------------
  466. template <class V> struct storage_traits
  467. {
  468. using T = std::decay_t<decltype(*std::declval<V>().get())>;
  469. static V create(dim_t n) { RA_CHECK(n>=0); return V(new T[n]); }
  470. static T const * data(V const & v) { return v.get(); }
  471. static T * data(V & v) { return v.get(); }
  472. };
  473. template <class T_, class A> struct storage_traits<std::vector<T_, A>>
  474. {
  475. using T = T_;
  476. static_assert(!std::is_same_v<std::remove_const_t<T>, bool>, "No pointers to bool in std::vector<bool>");
  477. static std::vector<T, A> create(dim_t n) { return std::vector<T, A>(n); }
  478. static T const * data(std::vector<T, A> const & v) { return v.data(); }
  479. static T * data(std::vector<T, A> & v) { return v.data(); }
  480. };
  481. template <class P, rank_t RANK> inline
  482. bool is_c_order(View<P, RANK> const & a)
  483. {
  484. dim_t s = 1;
  485. for (int i=a.rank()-1; i>=0; --i) {
  486. if (s!=a.stride(i)) {
  487. return false;
  488. }
  489. s *= a.size(i);
  490. if (s==0) {
  491. return true;
  492. }
  493. }
  494. return true;
  495. }
  496. // TODO be convertible to View only, so that View::p is not duplicated
  497. template <class Store, rank_t RANK>
  498. struct Container: public View<typename storage_traits<Store>::T *, RANK>
  499. {
  500. Store store;
  501. using T = typename storage_traits<Store>::T;
  502. using View = ra::View<T *, RANK>;
  503. using View::rank_s;
  504. using shape_arg = typename decltype(std::declval<View>().iter())::shape_type;
  505. View & view() { return *this; }
  506. View const & view() const { return *this; }
  507. template <class ... A> decltype(auto) operator()(A && ... a) { return View::operator()(std::forward<A>(a) ...); }
  508. template <class ... A> decltype(auto) operator()(A && ... a) const { return View::operator()(std::forward<A>(a) ...); }
  509. // TODO Explicit definitions are needed only to have View::p set. Remove the duplication as in SmallBase/SmallArray, then remove these, both the constructors and the operator=s.
  510. Container(Container && w): store(std::move(w.store))
  511. {
  512. View::dim = std::move(w.dim);
  513. View::p = storage_traits<Store>::data(store);
  514. }
  515. Container(Container const & w): store(w.store)
  516. {
  517. View::dim = w.dim;
  518. View::p = storage_traits<Store>::data(store);
  519. }
  520. Container(Container & w): store(w.store)
  521. {
  522. View::dim = w.dim;
  523. View::p = storage_traits<Store>::data(store);
  524. }
  525. // Override View::operator= to allow initialization-of-reference. Unfortunately operator>>(std::istream &, Container &) requires it. The presence of these operator= means that A(shape 2 3) = type-of-A [1 2 3] initializes so it doesn't behave as A(shape 2 3) = not-type-of-A [1 2 3] which will use View::operator= and frame match. See test/ownership.cc [ra20].
  526. // TODO do this through .set() op.
  527. // TODO don't require copiable T from constructors, see fill1 below. That requires initialization and not update semantics for operator=.
  528. Container & operator=(Container && w)
  529. {
  530. store = std::move(w.store);
  531. View::dim = std::move(w.dim);
  532. View::p = storage_traits<Store>::data(store);
  533. return *this;
  534. }
  535. Container & operator=(Container const & w)
  536. {
  537. store = w.store;
  538. View::dim = w.dim;
  539. View::p = storage_traits<Store>::data(store);
  540. return *this;
  541. }
  542. Container & operator=(Container & w)
  543. {
  544. store = w.store;
  545. View::dim = w.dim;
  546. View::p = storage_traits<Store>::data(store);
  547. return *this;
  548. }
  549. // Provided so that {} calls shape_arg constructor below.
  550. Container()
  551. {
  552. if constexpr (RANK==RANK_ANY) {
  553. View::dim = { Dim {0, 1} }; // rank 1 to avoid store init
  554. } else {
  555. for (Dim & dimi: View::dim) { dimi = {0, 1}; } // 1 so we can push_back()
  556. }
  557. }
  558. template <class S> void init(S && s)
  559. {
  560. static_assert(!std::is_convertible_v<value_t<S>, Dim>);
  561. // no rank extension here, because it's error prone and not very useful.
  562. static_assert(1==ra::rank_s<S>(), "rank mismatch for init shape");
  563. // [ra37] Need two parts because Dimv might be STL type. Otherwise I'd just View::dim.set(map(...)).
  564. if constexpr (RANK==RANK_ANY) {
  565. ra::resize(View::dim, ra::size(s));
  566. }
  567. for_each([](Dim & dim, auto const & s) { dim.size = s; }, View::dim, s);
  568. dim_t t = filldim(View::dim.size(), View::dim.end());
  569. store = storage_traits<Store>::create(t);
  570. View::p = storage_traits<Store>::data(store);
  571. }
  572. // FIXME use of fill1 requires T to be copiable, this is unfortunate as it conflicts with the semantics of view_.operator=.
  573. // store(x) avoids it for Big, but doesn't work for Unique. Should construct in place as std::vector does.
  574. template <class Pbegin> void fill1(dim_t xsize, Pbegin xbegin)
  575. {
  576. RA_CHECK(this->size()==xsize, "mismatched sizes");
  577. std::copy_n(xbegin, xsize, this->begin()); // TODO Use xpr traversal.
  578. }
  579. // shape_arg overloads handle {...} arguments. Size check is courtesy of conversion (if shape_arg is Small) or init().
  580. // explicit shape.
  581. Container(shape_arg const & s, none_t) { init(s); }
  582. template <class XX> Container(shape_arg const & s, XX && x): Container(s, none) { view() = x; }
  583. // shape from data.
  584. template <class XX> Container(XX && x): Container(ra::shape(x), none) { view() = x; }
  585. Container(typename nested_braces<T, RANK>::list x)
  586. {
  587. static_assert(RANK!=RANK_ANY);
  588. std::array<dim_t, RANK> s;
  589. nested_braces<T, RANK>::template shape(x, s);
  590. init(s);
  591. view() = x;
  592. }
  593. // braces row-major ravel for rank!=1
  594. Container(typename View::ravel_arg x)
  595. : Container({dim_t(x.size())}, none) { fill1(x.size(), x.begin()); }
  596. // shape + row-major ravel. // TODO Maybe remove these? See also small.hh.
  597. Container(shape_arg const & s, std::initializer_list<T> x)
  598. : Container(s, none) { fill1(x.size(), x.begin()); }
  599. template <class TT>
  600. Container(shape_arg const & s, TT * p)
  601. : Container(s, none) { fill1(this->size(), p); }
  602. template <class P>
  603. Container(shape_arg const & s, P pbegin, P pend)
  604. : Container(s, none) { fill1(this->size(), pbegin); }
  605. // these are needed when shape_arg is std::vector, since that doesn't handle conversions like Small does.
  606. template <class SS> Container(SS && s, none_t) { init(s); }
  607. template <class SS, class XX> Container(SS && s, XX && x): Container(s, none) { view() = x; }
  608. template <class SS> Container(SS const & s, std::initializer_list<T> x)
  609. : Container(s, none) { fill1(x.size(), x.begin()); }
  610. using View::operator=;
  611. // only for some kinds of store.
  612. void resize(dim_t const s)
  613. {
  614. static_assert(RANK==RANK_ANY || RANK>0); RA_CHECK(this->rank()>0);
  615. store.resize(proddim(View::dim.begin()+1, View::dim.end())*s);
  616. View::dim[0].size = s;
  617. View::p = store.data();
  618. }
  619. void resize(dim_t const s, T const & t)
  620. {
  621. static_assert(RANK==RANK_ANY || RANK>0); RA_CHECK(this->rank()>0);
  622. store.resize(proddim(View::dim.begin()+1, View::dim.end())*s, t);
  623. View::dim[0].size = s;
  624. View::p = store.data();
  625. }
  626. // lets us move. A template + std::forward wouldn't work for push_back(brace-enclosed-list).
  627. void push_back(T && t)
  628. {
  629. static_assert(RANK==1 || RANK==RANK_ANY); RA_CHECK(this->rank()==1);
  630. store.push_back(std::move(t));
  631. ++View::dim[0].size;
  632. View::p = store.data();
  633. }
  634. void push_back(T const & t)
  635. {
  636. static_assert(RANK==1 || RANK==RANK_ANY); RA_CHECK(this->rank()==1);
  637. store.push_back(t);
  638. ++View::dim[0].size;
  639. View::p = store.data();
  640. }
  641. template <class ... A>
  642. void emplace_back(A && ... a)
  643. {
  644. static_assert(RANK==1 || RANK==RANK_ANY); RA_CHECK(this->rank()==1);
  645. store.emplace_back(std::forward<A>(a) ...);
  646. ++View::dim[0].size;
  647. View::p = store.data();
  648. }
  649. void pop_back()
  650. {
  651. static_assert(RANK==1 || RANK==RANK_ANY); RA_CHECK(this->rank()==1);
  652. RA_CHECK(View::dim[0].size>0);
  653. store.pop_back();
  654. --View::dim[0].size;
  655. }
  656. bool empty() const
  657. {
  658. return this->size()==0;
  659. }
  660. T const & back() const { RA_CHECK(this->rank()==1 && this->size()>0); return store[this->size()-1]; }
  661. T & back() { RA_CHECK(this->rank()==1 && this->size()>0); return store[this->size()-1]; }
  662. // Container is always compact/row-major. Then the 0-rank STL-like iterators can be raw pointers. TODO But .iter() should also be able to benefit from this constraint, and the check should be faster for some cases (like RANK==1) or ellidable.
  663. auto begin() { assert(is_c_order(*this)); return this->data(); }
  664. auto begin() const { assert(is_c_order(*this)); return this->data(); }
  665. auto end() { return this->data()+this->size(); }
  666. auto end() const { return this->data()+this->size(); }
  667. };
  668. template <class Store, rank_t RANK>
  669. struct ra_traits_def<Container<Store, RANK>>
  670. : public ra_traits_def<View<typename Container<Store, RANK>::T *, RANK>> {};
  671. template <class Store, rank_t RANK>
  672. void swap(Container<Store, RANK> & a, Container<Store, RANK> & b)
  673. {
  674. std::swap(a.dim, b.dim);
  675. std::swap(a.store, b.store);
  676. std::swap(a.p, b.p);
  677. }
  678. // Default storage for Big - see https://stackoverflow.com/a/21028912
  679. // Allocator adaptor that interposes construct() calls to convert value initialization into default initialization.
  680. template <typename T, typename A=std::allocator<T>>
  681. struct default_init_allocator: public A
  682. {
  683. using a_t = std::allocator_traits<A>;
  684. using A::A;
  685. template <typename U>
  686. struct rebind
  687. {
  688. using other = default_init_allocator<U, typename a_t::template rebind_alloc<U>>;
  689. };
  690. template <typename U>
  691. void construct(U * ptr) noexcept(std::is_nothrow_default_constructible<U>::value)
  692. {
  693. ::new(static_cast<void *>(ptr)) U;
  694. }
  695. template <typename U, typename... Args>
  696. void construct(U * ptr, Args &&... args)
  697. {
  698. a_t::construct(static_cast<A &>(*this), ptr, std::forward<Args>(args)...);
  699. }
  700. };
  701. // Beyond this, we probably should have fixed-size (~std::dynarray), resizeable (~std::vector).
  702. template <class T, rank_t RANK=RANK_ANY> using Big = Container<std::vector<T, default_init_allocator<T>>, RANK>;
  703. template <class T, rank_t RANK=RANK_ANY> using Unique = Container<std::unique_ptr<T []>, RANK>;
  704. template <class T, rank_t RANK=RANK_ANY> using Shared = Container<std::shared_ptr<T>, RANK>;
  705. // -------------
  706. // Used in the Guile wrappers to allow an array parameter to either borrow from Guile
  707. // storage or convert into a new array (e.g. passing 'f32 into 'f64).
  708. // TODO Can use unique_ptr's deleter for this?
  709. // TODO Shared/Unique should maybe have constructors with unique_ptr/shared_ptr args
  710. // -------------
  711. struct NullDeleter { template <class T> void operator()(T * p) {} };
  712. struct Deleter { template <class T> void operator()(T * p) { delete[] p; } };
  713. template <rank_t RANK, class T>
  714. Shared<T, RANK> shared_borrowing(View<T *, RANK> & raw)
  715. {
  716. Shared<T, RANK> a;
  717. a.dim = raw.dim;
  718. a.p = raw.p;
  719. a.store = std::shared_ptr<T>(raw.data(), NullDeleter());
  720. return a;
  721. }
  722. } // namespace ra