Scientific Computing, TU Berlin, WS 2019/2020, Lecture 04
Jürgen Fuhrmann, WIAS Berlin
typeof()
Macros for performance testing:
LOAD_PATH
LOAD_PATH
by adding e.g. the actual directory Package
with a subdirectory src
Package/src/Package.jl
defines a module named Package
includet
which triggers automatic recompile of the included file and those used therein upon change on the disk. ~/.julia/config/startup.jl
: if isinteractive()
try
@eval using Revise
Revise.async_steal_repl_backend()
catch err
@warn "Could not load Revise."
end
end
Start Julia at the command prompt with julia -i
int
$\leftrightarrow\quad$ Int32
float
$\leftrightarrow\quad$ Float32
double
$\leftrightarrow\quad$ Float64
open("cadd.c", "w") do io
write(io, """double cadd(double x, double y) { return x+y; }""")
end
cadd.so
run(`gcc --shared cadd.c -o libcadd.so`)
cadd
using the Julia ccall
method(:cadd, "libcadd")
: call cadd from libcadd.so
Float64
: return type
(Float64,Float64,)
: parameter types
x,y
: actual data passed
libcadd.so
into Julia
cadd()
, no intermediate wrapper code
cadd(x,y)=ccall((:cadd, "libcadd"), Float64, (Float64,Float64,),x,y)
Call wrapper
@show cadd(1.5,2.5);
open("pyadd.py", "w") do io
write(io, """
def pyadd(x,y):
return x+y
""")
end
using Pkg
Pkg.add("PyCall")
using PyCall
pyadd=pyimport("pyadd")
pyadd
from imported module
@show pyadd.pyadd(3.5,6.5)
Numbers of course are represented by bits
@show bitstring(Int16(1))
@show bitstring(Float16(1))
@show bitstring(Int64(1))
@show bitstring(Float64(1))
x=\pm \sum_{i=0}^{\infty} d_i\beta^{-i} \beta^e
\end{align*}precision | Julia | C/C++ | k | t | bits |
---|---|---|---|---|---|
quadruple | n/a | long double | 16 | 111 | 128 |
double | Float64 | double | 11 | 53 | 64 |
single | Float32 | float | 8 | 23 | 32 |
half | Float16 | n/a | 5 | 10 | 16 |
function floatbits(x::Float32)
s=bitstring(x)
return s[1]*" "*s[2:9]*" "*s[10:end]
end
function floatbits(x::Float64)
s=bitstring(x)
return s[1]*" "*s[2:12]*" "*s[13:end]
end
floatbits(Float32(1))
floatbits(Float32(2))
floatbits(Float32(1/2))
floatbits(Float32(0.1))
floatbits(zero(Float32))
floatbits(-zero(Float32))
symmetry wrt. 0 because of sign bit
smallest positive denormalized number: $d_i=0, i=0\dots t-2, d_{t-1}=1$ $\Rightarrow$ $x_{min} = \beta^{1-t}\beta^L$
@show nextfloat(zero(Float32));
@show floatbits(nextfloat(zero(Float32)));
@show nextfloat(zero(Float64));
@show floatbits(nextfloat(zero(Float64)));
@show floatmin(Float32);
@show floatbits(floatmin(Float32));
@show floatmin(Float64);
@show floatbits(floatmin(Float64));
@show floatmax(Float32)
@show floatbits(floatmax(Float32));
@show floatmax(Float64)
@show floatbits(floatmax(Float64));
@show typemax(Float32)
@show floatbits(typemax(Float32))
@show prevfloat(typemax(Float32));
@show typemax(Float64)
@show floatbits(typemax(Float64))
@show prevfloat(typemax(Float64));
E.g. Addition
The smallest number one can add to 1 can have at most $t$ bit shifts of normalized mantissa until mantissa becomes 0, so its value must be $2^{-t}$.
ϵ=eps(Float32)
@show ϵ, floatbits(ϵ)
@show one(Float32)+ϵ/2
@show one(Float32)+ϵ,floatbits(one(Float32)+ϵ)
@show nextfloat(one(Float32))-one(Float32);
ϵ=eps(Float64)
@show ϵ, floatbits(ϵ)
@show one(Float64)+ϵ/2
@show one(Float64)+ϵ,floatbits(one(Float64)+ϵ)
@show nextfloat(one(Float64))-one(Float64);
@show (1.0 + 0.5*eps(Float64)) - 1.0
@show 1.0 + (0.5*eps(Float64) - 1.0);
ϵ=eps(Float64)
@show (1.0 + ϵ/2) - 1.0
@show 1.0 + (ϵ/2 - 1.0);
function fpdens(x::AbstractFloat;sample_size=1000)
xleft=x
xright=x
for i=1:sample_size
xleft=prevfloat(xleft)
xright=nextfloat(xright)
end
return prevfloat(2.0*sample_size/(xright-xleft))
end
x=10.0 .^collect(-10.0:0.1:10.0)
using Plots
using Plots
plot(x,fpdens.(x), xaxis=:log, yaxis=:log, label="",xlabel="x", ylabel="floating point numbers per unit interval")
using LinearAlgebra
x=[3.0,2.0,5.0]
@show norm(x,1);
@show norm(x,2);
@show norm(x);
@show norm(x, Inf);
Matrix $A= (a_{ij})\in \mathbb R^{n} \times \mathbb R^{n}$
y_i= \sum_{j=1}^n a_{ij} x_j
\end{align*}
||A||_p&=\max_{x\in \mathbb R^n,x\neq 0} \frac{||Ax||_p}{||x||_p}\\
&=\max_{x\in \mathbb R^n,||x||_p=1} \frac{||Ax||_p}{||x||_p}
\end{align*}
A=[3.0 2.0 5.0; 0.1 0.3 0.5 ; 0.6 2 3]
@show opnorm(A,1);
@show opnorm(A,Inf);
@show opnorm(A,2);
Therefore \begin{equation*} \left { \begin{array}{rl}
\Delta x&=A^{-1} \Delta b\\ Ax&=b
\end{array} \right} \Rightarrow \left{ \begin{array}{rl}
||A||\cdot ||x||&\geq||b|| \\ ||\Delta x||&\leq ||A^{-1}||\cdot ||\Delta b||
\end{array} \right.
\end{equation*}
\begin{equation*} \Rightarrow \frac{||\Delta x||}{||x||} \leq \kappa(A) \frac{||\Delta b||}{||b||} \end{equation*}
where $\kappa(A)= ||A||\cdot ||A^{-1}||$ is the condition number of $A$.
A=[ 1.0 -1.0 ; 1.0e5 1.0e5];
Ainv=inv(A)
κ=opnorm(A)*opnorm(Ainv)
@show Ainv
@show κ;
x=[ 1.0, 1.0]
b=A*x
@show b
Δb=1*[eps(1.0), eps(1.0)]
Δx=Ainv*(b+Δb)-x
@show norm(Δx)/norm(x)
@show norm(Δb)/norm(b)
@show κ*norm(Δb)/norm(b)
This notebook was generated using Literate.jl.