LCOV - code coverage report
Current view: top level - include/linux - math64.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 12 23 52.2 %
Date: 2021-04-22 12:43:58 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_MATH64_H
       3             : #define _LINUX_MATH64_H
       4             : 
       5             : #include <linux/types.h>
       6             : #include <vdso/math64.h>
       7             : #include <asm/div64.h>
       8             : 
       9             : #if BITS_PER_LONG == 64
      10             : 
      11             : #define div64_long(x, y) div64_s64((x), (y))
      12             : #define div64_ul(x, y)   div64_u64((x), (y))
      13             : 
      14             : /**
      15             :  * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
      16             :  * @dividend: unsigned 64bit dividend
      17             :  * @divisor: unsigned 32bit divisor
      18             :  * @remainder: pointer to unsigned 32bit remainder
      19             :  *
      20             :  * Return: sets ``*remainder``, then returns dividend / divisor
      21             :  *
      22             :  * This is commonly provided by 32bit archs to provide an optimized 64bit
      23             :  * divide.
      24             :  */
      25      120293 : static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
      26             : {
      27      120293 :         *remainder = dividend % divisor;
      28      117094 :         return dividend / divisor;
      29             : }
      30             : 
      31             : /*
      32             :  * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
      33             :  * @dividend: signed 64bit dividend
      34             :  * @divisor: signed 32bit divisor
      35             :  * @remainder: pointer to signed 32bit remainder
      36             :  *
      37             :  * Return: sets ``*remainder``, then returns dividend / divisor
      38             :  */
      39           0 : static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
      40             : {
      41           0 :         *remainder = dividend % divisor;
      42           0 :         return dividend / divisor;
      43             : }
      44             : 
      45             : /*
      46             :  * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
      47             :  * @dividend: unsigned 64bit dividend
      48             :  * @divisor: unsigned 64bit divisor
      49             :  * @remainder: pointer to unsigned 64bit remainder
      50             :  *
      51             :  * Return: sets ``*remainder``, then returns dividend / divisor
      52             :  */
      53           0 : static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
      54             : {
      55           0 :         *remainder = dividend % divisor;
      56           0 :         return dividend / divisor;
      57             : }
      58             : 
      59             : /*
      60             :  * div64_u64 - unsigned 64bit divide with 64bit divisor
      61             :  * @dividend: unsigned 64bit dividend
      62             :  * @divisor: unsigned 64bit divisor
      63             :  *
      64             :  * Return: dividend / divisor
      65             :  */
      66         256 : static inline u64 div64_u64(u64 dividend, u64 divisor)
      67             : {
      68         249 :         return dividend / divisor;
      69             : }
      70             : 
      71             : /*
      72             :  * div64_s64 - signed 64bit divide with 64bit divisor
      73             :  * @dividend: signed 64bit dividend
      74             :  * @divisor: signed 64bit divisor
      75             :  *
      76             :  * Return: dividend / divisor
      77             :  */
      78           0 : static inline s64 div64_s64(s64 dividend, s64 divisor)
      79             : {
      80           0 :         return dividend / divisor;
      81             : }
      82             : 
      83             : #elif BITS_PER_LONG == 32
      84             : 
      85             : #define div64_long(x, y) div_s64((x), (y))
      86             : #define div64_ul(x, y)   div_u64((x), (y))
      87             : 
      88             : #ifndef div_u64_rem
      89             : static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
      90             : {
      91             :         *remainder = do_div(dividend, divisor);
      92             :         return dividend;
      93             : }
      94             : #endif
      95             : 
      96             : #ifndef div_s64_rem
      97             : extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
      98             : #endif
      99             : 
     100             : #ifndef div64_u64_rem
     101             : extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
     102             : #endif
     103             : 
     104             : #ifndef div64_u64
     105             : extern u64 div64_u64(u64 dividend, u64 divisor);
     106             : #endif
     107             : 
     108             : #ifndef div64_s64
     109             : extern s64 div64_s64(s64 dividend, s64 divisor);
     110             : #endif
     111             : 
     112             : #endif /* BITS_PER_LONG */
     113             : 
     114             : /**
     115             :  * div_u64 - unsigned 64bit divide with 32bit divisor
     116             :  * @dividend: unsigned 64bit dividend
     117             :  * @divisor: unsigned 32bit divisor
     118             :  *
     119             :  * This is the most common 64bit divide and should be used if possible,
     120             :  * as many 32bit archs can optimize this variant better than a full 64bit
     121             :  * divide.
     122             :  */
     123             : #ifndef div_u64
     124      119555 : static inline u64 div_u64(u64 dividend, u32 divisor)
     125             : {
     126      119555 :         u32 remainder;
     127      115716 :         return div_u64_rem(dividend, divisor, &remainder);
     128             : }
     129             : #endif
     130             : 
     131             : /**
     132             :  * div_s64 - signed 64bit divide with 32bit divisor
     133             :  * @dividend: signed 64bit dividend
     134             :  * @divisor: signed 32bit divisor
     135             :  */
     136             : #ifndef div_s64
     137           0 : static inline s64 div_s64(s64 dividend, s32 divisor)
     138             : {
     139           0 :         s32 remainder;
     140           0 :         return div_s64_rem(dividend, divisor, &remainder);
     141             : }
     142             : #endif
     143             : 
     144             : u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
     145             : 
     146             : #ifndef mul_u32_u32
     147             : /*
     148             :  * Many a GCC version messes this up and generates a 64x64 mult :-(
     149             :  */
     150        7854 : static inline u64 mul_u32_u32(u32 a, u32 b)
     151             : {
     152        7854 :         return (u64)a * b;
     153             : }
     154             : #endif
     155             : 
     156             : #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
     157             : 
     158             : #ifndef mul_u64_u32_shr
     159      451088 : static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
     160             : {
     161      451088 :         return (u64)(((unsigned __int128)a * mul) >> shift);
     162             : }
     163             : #endif /* mul_u64_u32_shr */
     164             : 
     165             : #ifndef mul_u64_u64_shr
     166             : static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
     167             : {
     168             :         return (u64)(((unsigned __int128)a * mul) >> shift);
     169             : }
     170             : #endif /* mul_u64_u64_shr */
     171             : 
     172             : #else
     173             : 
     174             : #ifndef mul_u64_u32_shr
     175             : static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
     176             : {
     177             :         u32 ah, al;
     178             :         u64 ret;
     179             : 
     180             :         al = a;
     181             :         ah = a >> 32;
     182             : 
     183             :         ret = mul_u32_u32(al, mul) >> shift;
     184             :         if (ah)
     185             :                 ret += mul_u32_u32(ah, mul) << (32 - shift);
     186             : 
     187             :         return ret;
     188             : }
     189             : #endif /* mul_u64_u32_shr */
     190             : 
     191             : #ifndef mul_u64_u64_shr
     192             : static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
     193             : {
     194             :         union {
     195             :                 u64 ll;
     196             :                 struct {
     197             : #ifdef __BIG_ENDIAN
     198             :                         u32 high, low;
     199             : #else
     200             :                         u32 low, high;
     201             : #endif
     202             :                 } l;
     203             :         } rl, rm, rn, rh, a0, b0;
     204             :         u64 c;
     205             : 
     206             :         a0.ll = a;
     207             :         b0.ll = b;
     208             : 
     209             :         rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
     210             :         rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
     211             :         rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
     212             :         rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
     213             : 
     214             :         /*
     215             :          * Each of these lines computes a 64-bit intermediate result into "c",
     216             :          * starting at bits 32-95.  The low 32-bits go into the result of the
     217             :          * multiplication, the high 32-bits are carried into the next step.
     218             :          */
     219             :         rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
     220             :         rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
     221             :         rh.l.high = (c >> 32) + rh.l.high;
     222             : 
     223             :         /*
     224             :          * The 128-bit result of the multiplication is in rl.ll and rh.ll,
     225             :          * shift it right and throw away the high part of the result.
     226             :          */
     227             :         if (shift == 0)
     228             :                 return rl.ll;
     229             :         if (shift < 64)
     230             :                 return (rl.ll >> shift) | (rh.ll << (64 - shift));
     231             :         return rh.ll >> (shift & 63);
     232             : }
     233             : #endif /* mul_u64_u64_shr */
     234             : 
     235             : #endif
     236             : 
     237             : #ifndef mul_u64_u32_div
     238             : static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
     239             : {
     240             :         union {
     241             :                 u64 ll;
     242             :                 struct {
     243             : #ifdef __BIG_ENDIAN
     244             :                         u32 high, low;
     245             : #else
     246             :                         u32 low, high;
     247             : #endif
     248             :                 } l;
     249             :         } u, rl, rh;
     250             : 
     251             :         u.ll = a;
     252             :         rl.ll = mul_u32_u32(u.l.low, mul);
     253             :         rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
     254             : 
     255             :         /* Bits 32-63 of the result will be in rh.l.low. */
     256             :         rl.l.high = do_div(rh.ll, divisor);
     257             : 
     258             :         /* Bits 0-31 of the result will be in rl.l.low. */
     259             :         do_div(rl.ll, divisor);
     260             : 
     261             :         rl.l.high = rh.l.low;
     262             :         return rl.ll;
     263             : }
     264             : #endif /* mul_u64_u32_div */
     265             : 
     266             : u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
     267             : 
     268             : #define DIV64_U64_ROUND_UP(ll, d)       \
     269             :         ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
     270             : 
     271             : /**
     272             :  * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
     273             :  * @dividend: unsigned 64bit dividend
     274             :  * @divisor: unsigned 64bit divisor
     275             :  *
     276             :  * Divide unsigned 64bit dividend by unsigned 64bit divisor
     277             :  * and round to closest integer.
     278             :  *
     279             :  * Return: dividend / divisor rounded to nearest integer
     280             :  */
     281             : #define DIV64_U64_ROUND_CLOSEST(dividend, divisor)      \
     282             :         ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
     283             : 
     284             : /*
     285             :  * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
     286             :  * @dividend: signed 64bit dividend
     287             :  * @divisor: signed 32bit divisor
     288             :  *
     289             :  * Divide signed 64bit dividend by signed 32bit divisor
     290             :  * and round to closest integer.
     291             :  *
     292             :  * Return: dividend / divisor rounded to nearest integer
     293             :  */
     294             : #define DIV_S64_ROUND_CLOSEST(dividend, divisor)(       \
     295             : {                                                       \
     296             :         s64 __x = (dividend);                           \
     297             :         s32 __d = (divisor);                            \
     298             :         ((__x > 0) == (__d > 0)) ?                        \
     299             :                 div_s64((__x + (__d / 2)), __d) :       \
     300             :                 div_s64((__x - (__d / 2)), __d);        \
     301             : }                                                       \
     302             : )
     303             : #endif /* _LINUX_MATH64_H */

Generated by: LCOV version 1.14