(FPCore (b) :precision binary64 (/ (sqrt (- 1.0 (pow b 2.0))) (+ 1.0 b)))
double code(double b) { return sqrt((1.0 - pow(b, 2.0))) / (1.0 + b); }
real(8) function code(b) real(8), intent (in) :: b code = sqrt((1.0d0 - (b ** 2.0d0))) / (1.0d0 + b) end function
public static double code(double b) { return Math.sqrt((1.0 - Math.pow(b, 2.0))) / (1.0 + b); }
def code(b): return math.sqrt((1.0 - math.pow(b, 2.0))) / (1.0 + b)
function code(b) return Float64(sqrt(Float64(1.0 - (b ^ 2.0))) / Float64(1.0 + b)) end
function tmp = code(b) tmp = sqrt((1.0 - (b ^ 2.0))) / (1.0 + b); end
code[b_] := N[(N[Sqrt[N[(1.0 - N[Power[b, 2.0], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[(1.0 + b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\sqrt{1 - {b}^{2}}}{1 + b} \end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (b) :precision binary64 (/ (sqrt (- 1.0 (pow b 2.0))) (+ 1.0 b)))
double code(double b) { return sqrt((1.0 - pow(b, 2.0))) / (1.0 + b); }
real(8) function code(b) real(8), intent (in) :: b code = sqrt((1.0d0 - (b ** 2.0d0))) / (1.0d0 + b) end function
public static double code(double b) { return Math.sqrt((1.0 - Math.pow(b, 2.0))) / (1.0 + b); }
def code(b): return math.sqrt((1.0 - math.pow(b, 2.0))) / (1.0 + b)
function code(b) return Float64(sqrt(Float64(1.0 - (b ^ 2.0))) / Float64(1.0 + b)) end
function tmp = code(b) tmp = sqrt((1.0 - (b ^ 2.0))) / (1.0 + b); end
code[b_] := N[(N[Sqrt[N[(1.0 - N[Power[b, 2.0], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[(1.0 + b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\sqrt{1 - {b}^{2}}}{1 + b} \end{array}
(FPCore (b) :precision binary64 (/ (sqrt (- 1.0 (pow (sqrt b) 4.0))) (+ 1.0 b)))
double code(double b) { return sqrt((1.0 - pow(sqrt(b), 4.0))) / (1.0 + b); }
real(8) function code(b) real(8), intent (in) :: b code = sqrt((1.0d0 - (sqrt(b) ** 4.0d0))) / (1.0d0 + b) end function
public static double code(double b) { return Math.sqrt((1.0 - Math.pow(Math.sqrt(b), 4.0))) / (1.0 + b); }
def code(b): return math.sqrt((1.0 - math.pow(math.sqrt(b), 4.0))) / (1.0 + b)
function code(b) return Float64(sqrt(Float64(1.0 - (sqrt(b) ^ 4.0))) / Float64(1.0 + b)) end
function tmp = code(b) tmp = sqrt((1.0 - (sqrt(b) ^ 4.0))) / (1.0 + b); end
code[b_] := N[(N[Sqrt[N[(1.0 - N[Power[N[Sqrt[b], $MachinePrecision], 4.0], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[(1.0 + b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\sqrt{1 - {\left(\sqrt{b}\right)}^{4}}}{1 + b} \end{array}
Initial program 100.0%
lift-pow.f64
N/A
metadata-eval
N/A
metadata-eval
N/A
pow-pow
N/A
metadata-eval
N/A
lower-pow.f64
N/A
metadata-eval
N/A
unpow1/2
N/A
lower-sqrt.f64
N/A
metadata-eval
100.0
Applied rewrites100.0%
(FPCore (b) :precision binary64 (/ (sqrt (fma (- b) b 1.0)) (+ 1.0 b)))
double code(double b) { return sqrt(fma(-b, b, 1.0)) / (1.0 + b); }
function code(b) return Float64(sqrt(fma(Float64(-b), b, 1.0)) / Float64(1.0 + b)) end
code[b_] := N[(N[Sqrt[N[((-b) * b + 1.0), $MachinePrecision]], $MachinePrecision] / N[(1.0 + b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\sqrt{\mathsf{fma}\left(-b, b, 1\right)}}{1 + b} \end{array}
Initial program 100.0%
lift--.f64
N/A
sub-neg
N/A
+-commutative
N/A
lift-pow.f64
N/A
unpow2
N/A
distribute-lft-neg-in
N/A
lower-fma.f64
N/A
lower-neg.f64
100.0
Applied rewrites100.0%
(FPCore (b) :precision binary64 (fma (fma (fma -0.5 b 0.5) b -1.0) b 1.0))
double code(double b) { return fma(fma(fma(-0.5, b, 0.5), b, -1.0), b, 1.0); }
function code(b) return fma(fma(fma(-0.5, b, 0.5), b, -1.0), b, 1.0) end
code[b_] := N[(N[(N[(-0.5 * b + 0.5), $MachinePrecision] * b + -1.0), $MachinePrecision] * b + 1.0), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.5, b, 0.5\right), b, -1\right), b, 1\right) \end{array}
Initial program 100.0%
Taylor expanded in b around 0
+-commutative
N/A
*-commutative
N/A
lower-fma.f64
N/A
sub-neg
N/A
*-commutative
N/A
metadata-eval
N/A
lower-fma.f64
N/A
+-commutative
N/A
lower-fma.f64
99.4
Applied rewrites99.4%
(FPCore (b) :precision binary64 (fma (fma 0.5 b -1.0) b 1.0))
double code(double b) { return fma(fma(0.5, b, -1.0), b, 1.0); }
function code(b) return fma(fma(0.5, b, -1.0), b, 1.0) end
code[b_] := N[(N[(0.5 * b + -1.0), $MachinePrecision] * b + 1.0), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(\mathsf{fma}\left(0.5, b, -1\right), b, 1\right) \end{array}
Initial program 100.0%
Taylor expanded in b around 0
+-commutative
N/A
*-commutative
N/A
lower-fma.f64
N/A
sub-neg
N/A
metadata-eval
N/A
lower-fma.f64
99.3
Applied rewrites99.3%
(FPCore (b) :precision binary64 (- 1.0 b))
double code(double b) { return 1.0 - b; }
real(8) function code(b) real(8), intent (in) :: b code = 1.0d0 - b end function
public static double code(double b) { return 1.0 - b; }
def code(b): return 1.0 - b
function code(b) return Float64(1.0 - b) end
function tmp = code(b) tmp = 1.0 - b; end
code[b_] := N[(1.0 - b), $MachinePrecision]
\begin{array}{l} \\ 1 - b \end{array}
Initial program 100.0%
Taylor expanded in b around 0
mul-1-neg
N/A
unsub-neg
N/A
lower--.f64
98.9
Applied rewrites98.9%
(FPCore (b) :precision binary64 1.0)
double code(double b) { return 1.0; }
real(8) function code(b) real(8), intent (in) :: b code = 1.0d0 end function
public static double code(double b) { return 1.0; }
def code(b): return 1.0
function code(b) return 1.0 end
function tmp = code(b) tmp = 1.0; end
code[b_] := 1.0
\begin{array}{l} \\ 1 \end{array}
Initial program 100.0%
Taylor expanded in b around 0
Applied rewrites97.6%
herbie shell --seed 1
(FPCore (b)
:name "sqrt(1-pow(b,2))/(1+b)"
:precision binary64
:pre (and (<= 0.0 b) (<= b 1.0))
(/ (sqrt (- 1.0 (pow b 2.0))) (+ 1.0 b)))