NANO
return (u32)div_u64((u64)val * NANO, pclk);
u64 m = (u64)val * NANO;
tmp0 = div_s64_rem(tmp2, NANO, &tmp1);
*val2 = mul_u64_u32_div(gain % AD4030_GAIN_MIDLE_POINT, NANO,
nv = (u64)ret * NANO;
((u64)val * NANO + val2);
divisor = (u64)AD4130_MAX_ODR * NANO;
temp = div_u64((u64)dividend * NANO, divisor);
*val = div_u64_rem(temp, NANO, val2);
nv = (u64)chan_info->input_range_uv * NANO;
scale_uv = ((u64)st->int_vref_mv * NANO) >>
scale_buffer[i][1] = do_div(scale_uv, NANO);
*val2 = NANO / AD7191_TEMP_CODES_PER_DEGREE;
*val += DIV_ROUND_CLOSEST_ULL((u64)st->aincom_mv * NANO,
#define ADAQ776X_GAIN_MAX_NANO (128 * NANO)
tmp0 = div_u64_rem(tmp2, NANO, &tmp1);
gain_nano = gain_int * NANO + gain_fract;
tmp = DIV_ROUND_CLOSEST_ULL(gain_nano << precision, NANO);
tmp1 = ((u64)ref * NANO) >> pow;
div_u64_rem(tmp1, NANO, &tmp0);
tmp = div_s64(dividend * (s64)NANO, divisor);
*val = div_s64_rem(tmp, NANO, val2);
s64 tmp = curr_scale[0] * (s64)NANO + curr_scale[1];
*val = div_s64_rem(tmp, NANO, val2);
if (scaler > NANO || !scaler)
(u64)(scale_nano / (NANO / scaler));
return iio_gts_delinearize(lin_scale, NANO, scale_int, scale_nano);
return iio_gts_delinearize(tmp, NANO, scale_int, scale_nano);
ret = iio_gts_linearize(max_scale_int, max_scale_nano, NANO,
if (scaler > NANO)
*scale_nano = frac * (NANO / scaler);
ret = iio_gts_linearize(scale_int, scale_nano, NANO, &scale_linear);
denominator = NANO;
data->p_scale = div_s64_rem(div_s64(pdelta * NANO, odelta), NANO, &tmp);
hsc->p_scale = div_s64_rem(tmp, NANO, &hsc->p_scale_dec);
data->scale = div_s64_rem(div_s64(pdelta * NANO, odelta), NANO, &tmp);
period_ms = div_u64(NANO, (val * MEGA + val2));
#define PPB_MULT NANO
self->utimer_info->resolution = (NANO / FRAME_RATE * PERIOD_SIZE);