12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273 |
- /*
- * These are copied from glibc/stdlib/longlong.h
- */
- #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
- do { \
- UWtype __x; \
- __x = (al) + (bl); \
- (sh) = (ah) + (bh) + (__x < (al)); \
- (sl) = __x; \
- } while (0)
- #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
- do { \
- UWtype __x; \
- __x = (al) - (bl); \
- (sh) = (ah) - (bh) - (__x > (al)); \
- (sl) = __x; \
- } while (0)
- #define umul_ppmm(w1, w0, u, v) \
- __asm__ ("dmulu.l %2,%3\n\tsts macl,%1\n\tsts mach,%0" \
- : "=r" ((u32)(w1)), "=r" ((u32)(w0)) \
- : "r" ((u32)(u)), "r" ((u32)(v)) \
- : "macl", "mach")
- #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
- #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
- #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
- #define udiv_qrnnd(q, r, n1, n0, d) \
- do { \
- UWtype __d1, __d0, __q1, __q0; \
- UWtype __r1, __r0, __m; \
- __d1 = __ll_highpart (d); \
- __d0 = __ll_lowpart (d); \
- \
- __r1 = (n1) % __d1; \
- __q1 = (n1) / __d1; \
- __m = (UWtype) __q1 * __d0; \
- __r1 = __r1 * __ll_B | __ll_highpart (n0); \
- if (__r1 < __m) \
- { \
- __q1--, __r1 += (d); \
- if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
- if (__r1 < __m) \
- __q1--, __r1 += (d); \
- } \
- __r1 -= __m; \
- \
- __r0 = __r1 % __d1; \
- __q0 = __r1 / __d1; \
- __m = (UWtype) __q0 * __d0; \
- __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
- if (__r0 < __m) \
- { \
- __q0--, __r0 += (d); \
- if (__r0 >= (d)) \
- if (__r0 < __m) \
- __q0--, __r0 += (d); \
- } \
- __r0 -= __m; \
- \
- (q) = (UWtype) __q1 * __ll_B | __q0; \
- (r) = __r0; \
- } while (0)
- #define abort() return 0
- #define __BYTE_ORDER __LITTLE_ENDIAN
|