content stringlengths 6 1.03M | input_ids listlengths 4 535k | ratio_char_token float64 0.68 8.61 | token_count int64 4 535k |
|---|---|---|---|
<reponame>q2kuhn/QuantumLab.jl<gh_stars>10-100
using QuantumLab
using Test
using Dates
using LinearAlgebra
INFO(str) = @info "$(Dates.format(now(),dateformat"Y/m/d HH:MM:SS,sss")) $str"
if (!@isdefined(indent))
indent = ""
end
INFO("$(indent)READING: h2o.xyz -> h2o::Geometry ")
h2o = readGeometryXYZ("h2o.xyz")
INFO("$(indent)OBTAINING: BasisSetExchange -> sto3g::BasisSet")
sto3g = BasisSet("sto-3g")
INFO("$(indent)COMPUTING: sto3g,h2o -> bas")
bas = computeBasis(sto3g,h2o)
INFO("$(indent)COMPUTING: bas,h2o -> matrixOverlap, matrixKinetic, matrixNuclearAttraction, ERIs")
matrixOverlap = computeMatrixOverlap(bas)
matrixKinetic = computeMatrixKinetic(bas)
matrixNuclearAttraction = computeMatrixNuclearAttraction(bas,h2o)
ERIs = computeTensorElectronRepulsionIntegrals(bas)
INFO("$(indent)COMPUTING: h2o -> matrixSADguess")
matrixSADguess = computeDensityGuessSAD("HF","STO-3G",h2o)
INFO("$(indent)COMPUTING: sto3g, h2o -> shells, shells_native")
shells = computeBasisShellsLibInt2(sto3g,h2o)
shells_native = computeBasisShells(sto3g,h2o)
INFO("$(indent)COMPUTING: (HartreeFock) shells, h2o, matrixSADguess -> density")
density = evaluateSCF(shells,h2o,sum(matrixSADguess)/2,info=false,detailedinfo=false)[3]
INFO("$(indent)COMPUTING: density, matrixKinetic, matrixNuclearAttraction, ERIs -> matrixFock")
matrixFock = computeMatrixFock(density,matrixKinetic,matrixNuclearAttraction,ERIs)
densityvirt = inv(matrixOverlap) - density
INFO("$(indent)COMPUTING: shells, density -> S, T, J, K, Fock")
S = computeMatrixOverlap(shells)
T = computeMatrixKinetic(shells)
J = computeMatrixCoulomb(shells,density)
K = computeMatrixExchange(shells,density)
V = computeMatrixNuclearAttraction(shells,h2o)
fock = computeMatrixFock(T,V,J,K)
moenergies = eigvals(Symmetric(fock),Symmetric(S))
| [
27,
7856,
261,
480,
29,
80,
17,
74,
7456,
77,
14,
24915,
388,
17822,
13,
20362,
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
3500,
29082,
17822,
198,
3500,
6208,
198,
3500,
44712,
198,
3500,
44800,
2348,
29230,
198,
198,
10778,
7,
... | 2.449393 | 741 |
using DiscgolfRecord
using Test
@testset "DiscgolfRecord.jl" begin
# Write your tests here.
# Make sure we can preview courses
@test_nowarn preview_course(COURSES["kit_carson"])
@test_nowarn preview_course(COURSES["mast_park"])
# Test the round reader
sample_round_file = "20210719_mastpark.csv"
@test_nowarn rnd = DiscgolfRecord.read_round(sample_round_file)
end
| [
3500,
8444,
70,
4024,
23739,
198,
3500,
6208,
198,
198,
31,
9288,
2617,
366,
15642,
70,
4024,
23739,
13,
20362,
1,
2221,
198,
220,
220,
220,
1303,
19430,
534,
5254,
994,
13,
628,
220,
220,
220,
1303,
6889,
1654,
356,
460,
12714,
109... | 2.635762 | 151 |
@doc doc"""
latexraw(arg)
Generate LaTeX equations from `arg`.
Parses expressions, ParameterizedFunctions, SymEngine.Base and arrays thereof.
Returns a string formatted for LaTeX.
# Examples
## using expressions
```jldoctest
expr = :(x/(y+x))
latexraw(expr)
# output
"\\frac{x}{y + x}"
```
```jldoctest
expr = Meta.parse("x/(y+x)")
latexraw(expr)
# output
"\\frac{x}{y + x}"
```
## using ParameterizedFunctions
```julia
using DifferentialEquations;
f = @ode_def feedback begin
dx = y/c_1 - x
dy = x^c_2 - y
end c_1=>1.0 c_2=>1.0
latexraw(f)
# output
2-element Array{String,1}:
"dx/dt = \\frac{y}{c_{1}} - x"
"dy/dt = x^{c_{2}} - y"
```
## using SymEngine
```jldoctest
using SymEngine
@vars x y
symExpr = x + x + x*y*y
latexraw(symExpr)
# output
"2 \\cdot x + x \\cdot y^{2}"
```
"""
latexraw(args...; kwargs...) = process_latexify(args...; kwargs..., env=:raw)
function _latexraw(inputex::Expr; convert_unicode=true, kwargs...)
## Pass all arrays or matrices in the expr to latexarray
inputex = postwalk(x -> x isa Expr && x.head in [:hcat, :vcat, :vect, :typed_vcat, :typed_hcat] ?
latexarray(expr_to_array(x); kwargs...)
: x,
inputex)
recurseexp!(lstr::LaTeXString) = lstr.s
function recurseexp!(ex)
prevOp = Vector{Symbol}(undef, length(ex.args))
fill!(prevOp, :none)
for i in 1:length(ex.args)
if isa(ex.args[i], Expr)
length(ex.args[i].args) > 1 && ex.args[i].args[1] isa Symbol && (prevOp[i] = ex.args[i].args[1])
ex.args[i] = recurseexp!(ex.args[i])
elseif ex.args[i] isa AbstractArray
ex.args[i] = latexraw(ex.args[i]; kwargs...)
end
end
return latexoperation(ex, prevOp; convert_unicode=convert_unicode, kwargs...)
end
ex = deepcopy(inputex)
str = recurseexp!(ex)
convert_unicode && (str = unicode2latex(str))
return LaTeXString(str)
end
function _latexraw(args...; kwargs...)
@assert length(args) > 1 "latexify does not support objects of type $(typeof(args[1]))."
_latexraw(args; kwargs...)
end
_latexraw(arr::Union{AbstractArray, Tuple}; kwargs...) = _latexarray(arr; kwargs...)
_latexraw(i::Nothing; kwargs...) = ""
_latexraw(i::SubString; kwargs...) = latexraw(Meta.parse(i); kwargs...)
_latexraw(i::SubString{LaTeXStrings.LaTeXString}; kwargs...) = i
_latexraw(i::Rational; kwargs...) = i.den == 1 ? latexraw(i.num; kwargs...) : latexraw(:($(i.num)/$(i.den)); kwargs...)
_latexraw(z::Complex; kwargs...) = LaTeXString("$(latexraw(z.re;kwargs...))$(z.im < 0 ? "-" : "+" )$(latexraw(abs(z.im);kwargs...))\\textit{i}")
#latexraw(i::DataFrames.DataArrays.NAtype) = "\\textrm{NA}"
_latexraw(str::LaTeXStrings.LaTeXString; kwargs...) = str
function _latexraw(i::Number; fmt=PlainNumberFormatter(), kwargs...)
try i == Inf && return LaTeXString("\\infty") catch; end
try i == -Inf && return LaTeXString("-\\infty") catch; end
fmt isa String && (fmt = PrintfNumberFormatter(fmt))
return fmt(i)
end
function _latexraw(i::Char; convert_unicode=true, kwargs...)
LaTeXString(convert_unicode ? unicode2latex(string(i)) : string(i))
end
function _latexraw(i::Symbol; convert_unicode=true, kwargs...)
str = string(i == :Inf ? :∞ : i)
str = convertSubscript(str)
convert_unicode && (str = unicode2latex(str))
return LaTeXString(str)
end
function _latexraw(i::String; kwargs...)
try
ex = Meta.parse(i)
return latexraw(ex; kwargs...)
catch ParseError
error("""
in Latexify.jl:
You are trying to create latex-maths from a `String` that cannot be parsed as
an expression.
`latexify` will, by default, try to parse any string inputs into expressions
and this parsing has just failed.
If you are passing strings that you want returned verbatim as part of your input,
try making them `LaTeXString`s first.
If you are trying to make a table with plain text, try passing the keyword
argument `latex=false`. You should also ensure that you have chosen an output
environment that is capable of displaying not-maths objects. Try for example
`env=:table` for a latex table or `env=:mdtable` for a markdown table.
""")
end
end
_latexraw(i::Missing; kwargs...) = "\\textrm{NA}"
| [
31,
15390,
2205,
37811,
198,
220,
220,
220,
47038,
1831,
7,
853,
8,
198,
198,
8645,
378,
4689,
49568,
27490,
422,
4600,
853,
44646,
198,
198,
47,
945,
274,
14700,
11,
25139,
2357,
1143,
24629,
2733,
11,
15845,
13798,
13,
14881,
290,
... | 2.355277 | 1,838 |
push!(LOAD_PATH,"../source/0.6/src/")
# include package
info("Include all...")
try
include("../source/0.6/src/App.jl")
include("../source/0.6/src/ThreadManager.jl")
catch e # do not exit this run!
warn(e)
end
info("Include done.")
info("Create Docs...")
using Documenter, App
makedocs(
build = joinpath(@__DIR__, "../build/docs"),
modules = [
CoreExtended,
TimeManager,
LoggerManager,
RessourceManager,
FileManager,
Environment,
JLScriptManager,
WindowManager,
MatrixMath,
JLGEngine,
App,
ThreadManager
],
clean = true,
doctest = true, # :fix
#linkcheck = true,
strict = false,
checkdocs = :none,
format = :html, #:latex
sitename = "JGE",
authors = "Gilga",
#analytics = "UA-89508993-1",
#html_prettyurls = true,
#html_canonical = "https://gilga.github.io/JGE/",
pages = Any[ # Compat: `Any` for 0.4 compat
"Home" => "index.md",
"Manual" => Any[
"manual/start.md",
],
"Source Files" => Any[
"files/App.md",
"files/CoreExtended.md",
"files/Environment.md",
"files/FileManager.md",
"files/JLScriptManager.md",
"files/LoggerManager.md",
"files/MatrixMath.md",
"files/RessourceManager.md",
"files/ThreadFunctions.md",
"files/ThreadManager.md",
"files/TimeManager.md",
"files/WindowManager.md",
"files/JLGEngine.md",
"files/JLGEngine/CameraManager.md",
"files/JLGEngine/ChunkManager.md",
"files/JLGEngine/EntityManager.md",
"files/JLGEngine/GameObjectManager.md",
"files/JLGEngine/GraphicsManager.md",
"files/JLGEngine/Management.md",
"files/JLGEngine/MeshManager.md",
"files/JLGEngine/ModelManager.md",
"files/JLGEngine/RenderManager.md",
"files/JLGEngine/ShaderManager.md",
"files/JLGEngine/StorageManager.md",
"files/JLGEngine/SzeneManager.md",
"files/JLGEngine/TextureManager.md",
"files/JLGEngine/TransformManager.md",
"files/JLGEngine/ModelManager/MeshData.md",
"files/JLGEngine/ModelManager/MeshFabric.md",
"files/JLGEngine/ModelManager/MeshLoader_OBJ.md",
"files/JLGEngine/LibGL/LibGL.md",
"files/JLGEngine/LibGL/GLDebugControl.md",
"files/JLGEngine/LibGL/GLExtendedFunctions.md",
"files/JLGEngine/LibGL/GLLists.md",
"files/JLGEngine/LibGL/GLSLParser.md",
"files/JLGEngine/LibGL/GLSLRessources.md",
"files/JLGEngine/LibGL/ShaderManager.md",
"files/JLGEngine/LibGL/StorageManager.md",
"files/JLGEngine/LibGL/TextureManager.md",
],
],
)
deploydocs(
deps = Deps.pip("mkdocs", "python-markdown-math"), #, "curl"
repo = "https://github.com/Gilga/JGE.git",
branch = "gh-pages",
julia = "0.6.2",
)
info("Docs done.") | [
14689,
0,
7,
35613,
62,
34219,
553,
40720,
10459,
14,
15,
13,
21,
14,
10677,
14,
4943,
198,
198,
2,
2291,
5301,
198,
10951,
7203,
818,
9152,
477,
9313,
8,
198,
28311,
198,
220,
2291,
7203,
40720,
10459,
14,
15,
13,
21,
14,
10677,
... | 2.09682 | 1,415 |
<gh_stars>0
int_rules_6_1_10 = @theory begin
#= ::Subsection::Closed:: =#
#= 6.1.10*(c+d*x)^m*(a+b*sinh)^n =#
@apply_utils Antiderivative((~u) ^ ~(m') * (~(a') + ~(b') * sinh(~v)) ^ ~(n'), ~x) => Antiderivative(ExpandToSum(~u, ~x) ^ ~m * (~a + ~b * sinh(ExpandToSum(~v, ~x))) ^ ~n, ~x) <-- FreeQ([~a, ~b, ~m, ~n], ~x) && (LinearQ([~u, ~v], ~x) && Not(LinearMatchQ([~u, ~v], ~x)))
@apply_utils Antiderivative((~u) ^ ~(m') * (~(a') + ~(b') * cosh(~v)) ^ ~(n'), ~x) => Antiderivative(ExpandToSum(~u, ~x) ^ ~m * (~a + ~b * cosh(ExpandToSum(~v, ~x))) ^ ~n, ~x) <-- FreeQ([~a, ~b, ~m, ~n], ~x) && (LinearQ([~u, ~v], ~x) && Not(LinearMatchQ([~u, ~v], ~x)))
end
| [
27,
456,
62,
30783,
29,
15,
198,
600,
62,
38785,
62,
21,
62,
16,
62,
940,
796,
2488,
1169,
652,
2221,
628,
220,
220,
220,
1303,
28,
7904,
7004,
5458,
3712,
2601,
1335,
3712,
796,
2,
198,
220,
220,
220,
1303,
28,
718,
13,
16,
1... | 1.838356 | 365 |
<reponame>JuliaPackageMirrors/DataArrays.jl
## mapreduce implementation that skips NA
function skipna_init(f, op, na::BitArray, data::Array, ifirst::Int, ilast::Int)
# Get first non-NA element
ifirst = Base.findnextnot(na, ifirst)
@inbounds d1 = data[ifirst]
# Get next non-NA element
ifirst = Base.findnextnot(na, ifirst+1)
@inbounds d2 = data[ifirst]
# Reduce first two elements
(op(f(d1), f(d2)), ifirst)
end
function mapreduce_seq_impl_skipna(f, op, T, A::DataArray, ifirst::Int, ilast::Int)
data = A.data
na = A.na
chunks = na.chunks
v, i = skipna_init(f, op, na, data, ifirst, ilast)
while i < ilast
i += 1
@inbounds na = Base.unsafe_bitgetindex(chunks, i)
na && continue
@inbounds d = data[i]
v = op(v, f(d))
end
v
end
# Pairwise map-reduce
function mapreduce_pairwise_impl_skipna{T}(f, op, A::DataArray{T}, bytefirst::Int, bytelast::Int, n_notna::Int, blksize::Int)
if n_notna <= blksize
ifirst = 64*(bytefirst-1)+1
ilast = min(64*bytelast, length(A))
# Fall back to Base implementation if no NAs in block
return ilast - ifirst + 1 == n_notna ? Base.mapreduce_seq_impl(f, op, A.data, ifirst, ilast) :
mapreduce_seq_impl_skipna(f, op, T, A, ifirst, ilast)
end
# Find byte in the middle of range
# The block size is restricted so that there will always be at
# least two non-NA elements in the returned range
chunks = A.na.chunks
nmid = 0
imid = bytefirst-1
while nmid < (n_notna >> 1)
imid += 1
@inbounds nmid += count_zeros(chunks[imid])
end
v1 = mapreduce_pairwise_impl_skipna(f, op, A, bytefirst, imid, nmid, blksize)
v2 = mapreduce_pairwise_impl_skipna(f, op, A, imid+1, bytelast, n_notna-nmid, blksize)
op(v1, v2)
end
if isdefined(Base, :pairwise_blocksize)
sum_pairwise_blocksize(f) = Base.pairwise_blocksize(f, +)
else
const sum_pairwise_blocksize = Base.sum_pairwise_blocksize
end
mapreduce_impl_skipna{T}(f, op, A::DataArray{T}) =
mapreduce_seq_impl_skipna(f, op, T, A, 1, length(A.data))
mapreduce_impl_skipna(f, op::typeof(@functorize(+)), A::DataArray) =
mapreduce_pairwise_impl_skipna(f, op, A, 1, length(A.na.chunks),
length(A.na)-countnz(A.na),
max(128, sum_pairwise_blocksize(f)))
## general mapreduce interface
function _mapreduce_skipna{T}(f, op, A::DataArray{T})
n = length(A)
na = A.na
nna = countnz(na)
nna == n && return Base.mr_empty(f, op, T)
nna == n-1 && return Base.r_promote(op, f(A.data[Base.findnextnot(na, 1)]))
nna == 0 && return Base.mapreduce_impl(f, op, A.data, 1, n)
mapreduce_impl_skipna(f, op, A)
end
# This is only safe when we can guarantee that if a function is passed
# NA, it returns NA. Otherwise we will fall back to the implementation
# in Base, which is slow because it's type-unstable, but guarantees the
# correct semantics
typealias SafeMapFuns @compat Union{typeof(@functorize(identity)), typeof(@functorize(abs)), typeof(@functorize(abs2)),
typeof(@functorize(exp)), typeof(@functorize(log)), typeof(@functorize(centralizedabs2fun))}
typealias SafeReduceFuns @compat Union{typeof(@functorize(+)), typeof(@functorize(*)), typeof(@functorize(max)), typeof(@functorize(min))}
function Base._mapreduce(f::SafeMapFuns, op::SafeReduceFuns, A::DataArray)
any(A.na) && return NA
Base._mapreduce(f, op, A.data)
end
function Base.mapreduce(f, op::Function, A::DataArray; skipna::Bool=false)
is(op, +) ? (skipna ? _mapreduce_skipna(f, @functorize(+), A) : Base._mapreduce(f, @functorize(+), A)) :
is(op, *) ? (skipna ? _mapreduce_skipna(f, @functorize(*), A) : Base._mapreduce(f, @functorize(*), A)) :
is(op, &) ? (skipna ? _mapreduce_skipna(f, @functorize(&), A) : Base._mapreduce(f, @functorize(&), A)) :
is(op, |) ? (skipna ? _mapreduce_skipna(f, @functorize(|), A) : Base._mapreduce(f, @functorize(|), A)) :
skipna ? _mapreduce_skipna(f, op, A) : Base._mapreduce(f, op, A)
end
# To silence deprecations, but could be more efficient
Base.mapreduce(f, op::(@compat Union{typeof(@functorize(|)), typeof(@functorize(&))}), A::DataArray; skipna::Bool=false) =
skipna ? _mapreduce_skipna(f, op, A) : Base._mapreduce(f, op, A)
Base.mapreduce(f, op, A::DataArray; skipna::Bool=false) =
skipna ? _mapreduce_skipna(f, op, A) : Base._mapreduce(f, op, A)
Base.reduce(op, A::DataArray; skipna::Bool=false) =
mapreduce(@functorize(identity), op, A; skipna=skipna)
## usual reductions
for (fn, op) in ((:(Base.sum), @functorize(+)),
(:(Base.prod), @functorize(*)),
(:(Base.minimum), @functorize(min)),
(:(Base.maximum), @functorize(max)))
@eval begin
$fn(f::(@compat Union{Function,$(supertype(typeof(@functorize(abs))))}), a::DataArray; skipna::Bool=false) =
mapreduce(f, $op, a; skipna=skipna)
$fn(a::DataArray; skipna::Bool=false) =
mapreduce(@functorize(identity), $op, a; skipna=skipna)
end
end
for (fn, f, op) in ((:(Base.sumabs), @functorize(abs), @functorize(+)),
(:(Base.sumabs2), @functorize(abs2), @functorize(+)))
@eval $fn(a::DataArray; skipna::Bool=false) = mapreduce($f, $op, a; skipna=skipna)
end
## mean
Base.mean(a::DataArray; skipna::Bool=false) =
sum(a; skipna=skipna) / (length(a.na)-(skipna ? countnz(a.na) : 0))
## variance
function Base.varm{T}(A::DataArray{T}, m::Number; corrected::Bool=true, skipna::Bool=false)
if skipna
n = length(A)
na = A.na
nna = countnz(na)
nna == n && return convert(Base.momenttype(T), NaN)
nna == n-1 && return convert(Base.momenttype(T),
abs2(A.data[Base.findnextnot(na, 1)] - m)/(1 - @compat(Int(corrected))))
/(nna == 0 ? Base.centralize_sumabs2(A.data, m, 1, n) :
mapreduce_impl_skipna(@functorize(centralizedabs2fun)(m), @functorize(+), A),
n - nna - @compat(Int(corrected)))
else
any(A.na) && return NA
Base.varm(A.data, m; corrected=corrected)
end
end
Base.varm{T}(A::DataArray{T}, m::NAtype; corrected::Bool=true, skipna::Bool=false) = NA
function Base.var(A::DataArray; corrected::Bool=true, mean=nothing, skipna::Bool=false)
mean == 0 ? Base.varm(A, 0; corrected=corrected, skipna=skipna) :
mean == nothing ? varm(A, Base.mean(A; skipna=skipna); corrected=corrected, skipna=skipna) :
isa(mean, (@compat Union{Number, NAtype})) ?
varm(A, mean; corrected=corrected, skipna=skipna) :
throw(ErrorException("Invalid value of mean."))
end
Base.stdm(A::DataArray, m::Number; corrected::Bool=true, skipna::Bool=false) =
sqrt(varm(A, m; corrected=corrected, skipna=skipna))
Base.std(A::DataArray; corrected::Bool=true, mean=nothing, skipna::Bool=false) =
sqrt(var(A; corrected=corrected, mean=mean, skipna=skipna))
## weighted mean
function Base.mean(a::DataArray, w::WeightVec; skipna::Bool=false)
if skipna
v = a .* w.values
sum(v; skipna=true) / sum(DataArray(w.values, v.na); skipna=true)
else
anyna(a) ? NA : mean(a.data, w)
end
end
function Base.mean{W,V<:DataArray}(a::DataArray, w::WeightVec{W,V}; skipna::Bool=false)
if skipna
v = a .* w.values
sum(v; skipna=true) / sum(DataArray(w.values.data, v.na); skipna=true)
else
anyna(a) || anyna(w.values) ? NA : wsum(a.data, w.values.data) / w.sum
end
end
| [
27,
7856,
261,
480,
29,
16980,
544,
27813,
27453,
5965,
14,
6601,
3163,
20477,
13,
20362,
198,
2235,
3975,
445,
7234,
7822,
326,
1341,
2419,
11746,
198,
198,
8818,
14267,
2616,
62,
15003,
7,
69,
11,
1034,
11,
12385,
3712,
13128,
19182... | 2.175633 | 3,513 |
using ModelingToolkit, OrdinaryDiffEq, Test
@parameters t α β γ δ
@variables x(t) y(t)
D = Differential(t)
eqs = [D(x) ~ α*x - β*x*y,
D(y) ~ -δ*y + γ*x*y]
sys = ODESystem(eqs)
u0 = [x => 1.0,
y => 1.0]
p = [α => 1.5,
β => 1.0,
δ => 3.0,
γ => 1.0]
tspan = (0.0,10.0)
prob = ODEProblem(sys,u0,tspan,p)
sol = solve(prob,Tsit5())
sys2 = liouville_transform(sys)
@variables trJ
u0 = [x => 1.0,
y => 1.0,
trJ => 1.0]
prob = ODEProblem(sys2,u0,tspan,p,jac=true)
sol = solve(prob,Tsit5())
@test sol[end,end] ≈ 1.0742818931017244
| [
3500,
9104,
278,
25391,
15813,
11,
14230,
3219,
28813,
36,
80,
11,
6208,
198,
198,
31,
17143,
7307,
256,
26367,
27169,
7377,
111,
7377,
112,
198,
31,
25641,
2977,
2124,
7,
83,
8,
331,
7,
83,
8,
198,
35,
796,
20615,
498,
7,
83,
8... | 1.728916 | 332 |
using Base.Test
using QuantumOptics
@testset "spin" begin
D(op1::Operator, op2::Operator) = abs(tracedistance_nh(full(op1), full(op2)))
# Test creation
@test_throws AssertionError SpinBasis(1//3)
@test_throws AssertionError SpinBasis(-1//2)
@test_throws AssertionError SpinBasis(0)
for spinnumber=[1//2, 1, 3//2, 4//2]
spinbasis = SpinBasis(spinnumber)
I = operators.identityoperator(spinbasis)
Zero = SparseOperator(spinbasis)
sx = sigmax(spinbasis)
sy = sigmay(spinbasis)
sz = sigmaz(spinbasis)
sp = sigmap(spinbasis)
sm = sigmam(spinbasis)
# Test traces
@test 0 == trace(sx)
@test 0 == trace(sy)
@test 0 == trace(sz)
# Test kommutation relations
kommutator(x, y) = x*y - y*x
@test 1e-12 > D(kommutator(sx, sx), Zero)
@test 1e-12 > D(kommutator(sx, sy), 2im*sz)
@test 1e-12 > D(kommutator(sx, sz), -2im*sy)
@test 1e-12 > D(kommutator(sy, sx), -2im*sz)
@test 1e-12 > D(kommutator(sy, sy), Zero)
@test 1e-12 > D(kommutator(sy, sz), 2im*sx)
@test 1e-12 > D(kommutator(sz, sx), 2im*sy)
@test 1e-12 > D(kommutator(sz, sy), -2im*sx)
@test 1e-12 > D(kommutator(sz, sz), Zero)
# Test creation and anihilation operators
@test 0 == D(sp, 0.5*(sx + 1im*sy))
@test 0 == D(sm, 0.5*(sx - 1im*sy))
@test 0 == D(sx, (sp + sm))
@test 0 == D(sy, -1im*(sp - sm))
# Test commutation relations with creation and anihilation operators
@test 1e-12 > D(kommutator(sp, sm), sz)
@test 1e-12 > D(kommutator(sz, sp), 2*sp)
@test 1e-12 > D(kommutator(sz, sm), -2*sm)
# Test v x (v x u) relation: [sa, [sa, sb]] = 4*(1-delta_{ab})*sb
@test 1e-12 > D(kommutator(sx, kommutator(sx, sx)), Zero)
@test 1e-12 > D(kommutator(sx, kommutator(sx, sy)), 4*sy)
@test 1e-12 > D(kommutator(sx, kommutator(sx, sz)), 4*sz)
@test 1e-12 > D(kommutator(sy, kommutator(sy, sx)), 4*sx)
@test 1e-12 > D(kommutator(sy, kommutator(sy, sy)), Zero)
@test 1e-12 > D(kommutator(sy, kommutator(sy, sz)), 4*sz)
@test 1e-12 > D(kommutator(sz, kommutator(sz, sx)), 4*sx)
@test 1e-12 > D(kommutator(sz, kommutator(sz, sy)), 4*sy)
@test 1e-12 > D(kommutator(sz, kommutator(sz, sz)), Zero)
# Test spinup and spindown states
@test 1 ≈ norm(spinup(spinbasis))
@test 1 ≈ norm(spindown(spinbasis))
@test 0 ≈ norm(sp*spinup(spinbasis))
@test 0 ≈ norm(sm*spindown(spinbasis))
end
# Test special relations for spin 1/2
spinbasis = SpinBasis(1//2)
I = identityoperator(spinbasis)
Zero = SparseOperator(spinbasis)
sx = sigmax(spinbasis)
sy = sigmay(spinbasis)
sz = sigmaz(spinbasis)
sp = sigmap(spinbasis)
sm = sigmam(spinbasis)
# Test antikommutator
antikommutator(x, y) = x*y + y*x
@test 0 ≈ D(antikommutator(sx, sx), 2*I)
@test 0 ≈ D(antikommutator(sx, sy), Zero)
@test 0 ≈ D(antikommutator(sx, sz), Zero)
@test 0 ≈ D(antikommutator(sy, sx), Zero)
@test 0 ≈ D(antikommutator(sy, sy), 2*I)
@test 0 ≈ D(antikommutator(sy, sz), Zero)
@test 0 ≈ D(antikommutator(sz, sx), Zero)
@test 0 ≈ D(antikommutator(sz, sy), Zero)
@test 0 ≈ D(antikommutator(sz, sz), 2*I)
# Test if involutory for spin 1/2
@test 0 ≈ D(sx*sx, I)
@test 0 ≈ D(sy*sy, I)
@test 0 ≈ D(sz*sz, I)
@test 0 ≈ D(-1im*sx*sy*sz, I)
# Test consistency of spin up and down with sigmap and sigmam
@test 1e-11 > norm(sm*spinup(spinbasis) - spindown(spinbasis))
@test 1e-11 > norm(sp*spindown(spinbasis) - spinup(spinbasis))
end # testset
| [
3500,
7308,
13,
14402,
198,
3500,
29082,
27871,
873,
198,
198,
31,
9288,
2617,
366,
39706,
1,
2221,
198,
198,
35,
7,
404,
16,
3712,
18843,
1352,
11,
1034,
17,
3712,
18843,
1352,
8,
796,
2352,
7,
2213,
2286,
9311,
62,
77,
71,
7,
... | 2.012295 | 1,708 |
# Use baremodule to shave off a few KB from the serialized `.ji` file
baremodule Xorg_xcb_util_wm_jll
using Base
using Base: UUID
import JLLWrappers
JLLWrappers.@generate_main_file_header("Xorg_xcb_util_wm")
JLLWrappers.@generate_main_file("Xorg_xcb_util_wm", UUID("c22f9ab0-d5fe-5066-847c-f4bb1cd4e361"))
end # module Xorg_xcb_util_wm_jll
| [
2,
5765,
6247,
21412,
284,
34494,
572,
257,
1178,
14204,
422,
262,
11389,
1143,
4600,
13,
7285,
63,
2393,
198,
49382,
21412,
1395,
2398,
62,
87,
21101,
62,
22602,
62,
26377,
62,
73,
297,
198,
3500,
7308,
198,
3500,
7308,
25,
471,
27... | 2.310811 | 148 |
<gh_stars>1-10
##### Beginning of file
@info("Importing the RemoveLFS module...")
import RemoveLFS
import TimeZones
@info("Reading config files...")
include(joinpath("config","preferences","branches.jl",))
include(joinpath("config","preferences","git-hosts.jl",))
include(joinpath("config","preferences","git-user.jl",))
include(joinpath("config","preferences","time-zone.jl",))
include(
joinpath(
"config", "repositories",
"do-not-push-to-these-destinations.jl",
)
)
include(
joinpath(
"config", "repositories",
"do-not-try-url-list.jl",
)
)
include(
joinpath(
"config", "repositories",
"download-git-lfs-repos-list.jl",
)
)
include(
joinpath(
"config", "repositories",
"try-but-allow-failures-url-list.jl",
)
)
RemoveLFS.CommandLine.run_removelfs_snapshots_command_line!!(
;
arguments = ARGS,
git_user_name = GIT_USER_NAME,
git_user_email = GIT_USER_EMAIL,
git_lfs_repos = GIT_LFS_REPOS,
dst_provider = dst_provider,
include_branches = INCLUDE_BRANCHES,
exclude_branches = EXCLUDE_BRANCHES,
do_not_try_url_list = DO_NOT_TRY_URL_LIST,
do_not_push_to_these_destinations = DO_NOT_PUSH_TO_THESE_DESTINATIONS,
try_but_allow_failures_url_list = TRY_BUT_ALLOW_FAILURES_URL_LIST,
time_zone = TIME_ZONE,
)
##### End of file
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
4242,
2,
25976,
286,
2393,
198,
198,
31,
10951,
7203,
20939,
278,
262,
17220,
43,
10652,
8265,
9313,
8,
198,
198,
11748,
17220,
43,
10652,
198,
198,
11748,
3862,
57,
1952,
198,
198,
31,
10... | 2.171561 | 647 |
<reponame>BradLyman/AWS.jl<filename>src/services/comprehendmedical.jl
# This file is auto-generated by AWSMetadata.jl
using AWS
using AWS.AWSServices: comprehendmedical
using AWS.Compat
using AWS.UUIDs
"""
DescribeEntitiesDetectionV2Job()
Gets the properties associated with a medical entities detection job. Use this operation to get the status of a detection job.
# Required Parameters
- `JobId`: The identifier that Amazon Comprehend Medical generated for the job. The StartEntitiesDetectionV2Job operation returns this identifier in its response.
"""
describe_entities_detection_v2_job(JobId; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("DescribeEntitiesDetectionV2Job", Dict{String, Any}("JobId"=>JobId); aws_config=aws_config)
describe_entities_detection_v2_job(JobId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("DescribeEntitiesDetectionV2Job", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("JobId"=>JobId), args)); aws_config=aws_config)
"""
DescribeICD10CMInferenceJob()
Gets the properties associated with an InferICD10CM job. Use this operation to get the status of an inference job.
# Required Parameters
- `JobId`: The identifier that Amazon Comprehend Medical generated for the job. The StartICD10CMInferenceJob operation returns this identifier in its response.
"""
describe_icd10_cminference_job(JobId; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("DescribeICD10CMInferenceJob", Dict{String, Any}("JobId"=>JobId); aws_config=aws_config)
describe_icd10_cminference_job(JobId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("DescribeICD10CMInferenceJob", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("JobId"=>JobId), args)); aws_config=aws_config)
"""
DescribePHIDetectionJob()
Gets the properties associated with a protected health information (PHI) detection job. Use this operation to get the status of a detection job.
# Required Parameters
- `JobId`: The identifier that Amazon Comprehend Medical generated for the job. The StartPHIDetectionJob operation returns this identifier in its response.
"""
describe_phidetection_job(JobId; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("DescribePHIDetectionJob", Dict{String, Any}("JobId"=>JobId); aws_config=aws_config)
describe_phidetection_job(JobId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("DescribePHIDetectionJob", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("JobId"=>JobId), args)); aws_config=aws_config)
"""
DescribeRxNormInferenceJob()
Gets the properties associated with an InferRxNorm job. Use this operation to get the status of an inference job.
# Required Parameters
- `JobId`: The identifier that Amazon Comprehend Medical generated for the job. The StartRxNormInferenceJob operation returns this identifier in its response.
"""
describe_rx_norm_inference_job(JobId; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("DescribeRxNormInferenceJob", Dict{String, Any}("JobId"=>JobId); aws_config=aws_config)
describe_rx_norm_inference_job(JobId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("DescribeRxNormInferenceJob", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("JobId"=>JobId), args)); aws_config=aws_config)
"""
DetectEntities()
The DetectEntities operation is deprecated. You should use the DetectEntitiesV2 operation instead. Inspects the clinical text for a variety of medical entities and returns specific information about them such as entity category, location, and confidence score on that information .
# Required Parameters
- `Text`: A UTF-8 text string containing the clinical content being examined for entities. Each string must contain fewer than 20,000 bytes of characters.
"""
detect_entities(Text; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("DetectEntities", Dict{String, Any}("Text"=>Text); aws_config=aws_config)
detect_entities(Text, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("DetectEntities", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("Text"=>Text), args)); aws_config=aws_config)
"""
DetectEntitiesV2()
Inspects the clinical text for a variety of medical entities and returns specific information about them such as entity category, location, and confidence score on that information. Amazon Comprehend Medical only detects medical entities in English language texts. The DetectEntitiesV2 operation replaces the DetectEntities operation. This new action uses a different model for determining the entities in your medical text and changes the way that some entities are returned in the output. You should use the DetectEntitiesV2 operation in all new applications. The DetectEntitiesV2 operation returns the Acuity and Direction entities as attributes instead of types.
# Required Parameters
- `Text`: A UTF-8 string containing the clinical content being examined for entities. Each string must contain fewer than 20,000 bytes of characters.
"""
detect_entities_v2(Text; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("DetectEntitiesV2", Dict{String, Any}("Text"=>Text); aws_config=aws_config)
detect_entities_v2(Text, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("DetectEntitiesV2", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("Text"=>Text), args)); aws_config=aws_config)
"""
DetectPHI()
Inspects the clinical text for protected health information (PHI) entities and returns the entity category, location, and confidence score for each entity. Amazon Comprehend Medical only detects entities in English language texts.
# Required Parameters
- `Text`: A UTF-8 text string containing the clinical content being examined for PHI entities. Each string must contain fewer than 20,000 bytes of characters.
"""
detect_phi(Text; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("DetectPHI", Dict{String, Any}("Text"=>Text); aws_config=aws_config)
detect_phi(Text, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("DetectPHI", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("Text"=>Text), args)); aws_config=aws_config)
"""
InferICD10CM()
InferICD10CM detects medical conditions as entities listed in a patient record and links those entities to normalized concept identifiers in the ICD-10-CM knowledge base from the Centers for Disease Control. Amazon Comprehend Medical only detects medical entities in English language texts.
# Required Parameters
- `Text`: The input text used for analysis. The input for InferICD10CM is a string from 1 to 10000 characters.
"""
infer_icd10_cm(Text; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("InferICD10CM", Dict{String, Any}("Text"=>Text); aws_config=aws_config)
infer_icd10_cm(Text, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("InferICD10CM", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("Text"=>Text), args)); aws_config=aws_config)
"""
InferRxNorm()
InferRxNorm detects medications as entities listed in a patient record and links to the normalized concept identifiers in the RxNorm database from the National Library of Medicine. Amazon Comprehend Medical only detects medical entities in English language texts.
# Required Parameters
- `Text`: The input text used for analysis. The input for InferRxNorm is a string from 1 to 10000 characters.
"""
infer_rx_norm(Text; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("InferRxNorm", Dict{String, Any}("Text"=>Text); aws_config=aws_config)
infer_rx_norm(Text, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("InferRxNorm", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("Text"=>Text), args)); aws_config=aws_config)
"""
ListEntitiesDetectionV2Jobs()
Gets a list of medical entity detection jobs that you have submitted.
# Optional Parameters
- `Filter`: Filters the jobs that are returned. You can filter jobs based on their names, status, or the date and time that they were submitted. You can only set one filter at a time.
- `MaxResults`: The maximum number of results to return in each page. The default is 100.
- `NextToken`: Identifies the next page of results to return.
"""
list_entities_detection_v2_jobs(; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("ListEntitiesDetectionV2Jobs"; aws_config=aws_config)
list_entities_detection_v2_jobs(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("ListEntitiesDetectionV2Jobs", args; aws_config=aws_config)
"""
ListICD10CMInferenceJobs()
Gets a list of InferICD10CM jobs that you have submitted.
# Optional Parameters
- `Filter`: Filters the jobs that are returned. You can filter jobs based on their names, status, or the date and time that they were submitted. You can only set one filter at a time.
- `MaxResults`: The maximum number of results to return in each page. The default is 100.
- `NextToken`: Identifies the next page of results to return.
"""
list_icd10_cminference_jobs(; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("ListICD10CMInferenceJobs"; aws_config=aws_config)
list_icd10_cminference_jobs(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("ListICD10CMInferenceJobs", args; aws_config=aws_config)
"""
ListPHIDetectionJobs()
Gets a list of protected health information (PHI) detection jobs that you have submitted.
# Optional Parameters
- `Filter`: Filters the jobs that are returned. You can filter jobs based on their names, status, or the date and time that they were submitted. You can only set one filter at a time.
- `MaxResults`: The maximum number of results to return in each page. The default is 100.
- `NextToken`: Identifies the next page of results to return.
"""
list_phidetection_jobs(; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("ListPHIDetectionJobs"; aws_config=aws_config)
list_phidetection_jobs(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("ListPHIDetectionJobs", args; aws_config=aws_config)
"""
ListRxNormInferenceJobs()
Gets a list of InferRxNorm jobs that you have submitted.
# Optional Parameters
- `Filter`: Filters the jobs that are returned. You can filter jobs based on their names, status, or the date and time that they were submitted. You can only set one filter at a time.
- `MaxResults`: Identifies the next page of results to return.
- `NextToken`: Identifies the next page of results to return.
"""
list_rx_norm_inference_jobs(; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("ListRxNormInferenceJobs"; aws_config=aws_config)
list_rx_norm_inference_jobs(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("ListRxNormInferenceJobs", args; aws_config=aws_config)
"""
StartEntitiesDetectionV2Job()
Starts an asynchronous medical entity detection job for a collection of documents. Use the DescribeEntitiesDetectionV2Job operation to track the status of a job.
# Required Parameters
- `DataAccessRoleArn`: The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. For more information, see Role-Based Permissions Required for Asynchronous Operations.
- `InputDataConfig`: Specifies the format and location of the input data for the job.
- `LanguageCode`: The language of the input documents. All documents must be in the same language.
- `OutputDataConfig`: Specifies where to send the output files.
# Optional Parameters
- `ClientRequestToken`: A unique identifier for the request. If you don't set the client request token, Amazon Comprehend Medical generates one.
- `JobName`: The identifier of the job.
- `KMSKey`: An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text.
"""
start_entities_detection_v2_job(DataAccessRoleArn, InputDataConfig, LanguageCode, OutputDataConfig; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StartEntitiesDetectionV2Job", Dict{String, Any}("DataAccessRoleArn"=>DataAccessRoleArn, "InputDataConfig"=>InputDataConfig, "LanguageCode"=>LanguageCode, "OutputDataConfig"=>OutputDataConfig, "ClientRequestToken"=>string(uuid4())); aws_config=aws_config)
start_entities_detection_v2_job(DataAccessRoleArn, InputDataConfig, LanguageCode, OutputDataConfig, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StartEntitiesDetectionV2Job", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DataAccessRoleArn"=>DataAccessRoleArn, "InputDataConfig"=>InputDataConfig, "LanguageCode"=>LanguageCode, "OutputDataConfig"=>OutputDataConfig, "ClientRequestToken"=>string(uuid4())), args)); aws_config=aws_config)
"""
StartICD10CMInferenceJob()
Starts an asynchronous job to detect medical conditions and link them to the ICD-10-CM ontology. Use the DescribeICD10CMInferenceJob operation to track the status of a job.
# Required Parameters
- `DataAccessRoleArn`: The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. For more information, see Role-Based Permissions Required for Asynchronous Operations.
- `InputDataConfig`: Specifies the format and location of the input data for the job.
- `LanguageCode`: The language of the input documents. All documents must be in the same language.
- `OutputDataConfig`: Specifies where to send the output files.
# Optional Parameters
- `ClientRequestToken`: A unique identifier for the request. If you don't set the client request token, Amazon Comprehend Medical generates one.
- `JobName`: The identifier of the job.
- `KMSKey`: An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text.
"""
start_icd10_cminference_job(DataAccessRoleArn, InputDataConfig, LanguageCode, OutputDataConfig; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StartICD10CMInferenceJob", Dict{String, Any}("DataAccessRoleArn"=>DataAccessRoleArn, "InputDataConfig"=>InputDataConfig, "LanguageCode"=>LanguageCode, "OutputDataConfig"=>OutputDataConfig, "ClientRequestToken"=>string(uuid4())); aws_config=aws_config)
start_icd10_cminference_job(DataAccessRoleArn, InputDataConfig, LanguageCode, OutputDataConfig, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StartICD10CMInferenceJob", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DataAccessRoleArn"=>DataAccessRoleArn, "InputDataConfig"=>InputDataConfig, "LanguageCode"=>LanguageCode, "OutputDataConfig"=>OutputDataConfig, "ClientRequestToken"=>string(uuid4())), args)); aws_config=aws_config)
"""
StartPHIDetectionJob()
Starts an asynchronous job to detect protected health information (PHI). Use the DescribePHIDetectionJob operation to track the status of a job.
# Required Parameters
- `DataAccessRoleArn`: The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. For more information, see Role-Based Permissions Required for Asynchronous Operations.
- `InputDataConfig`: Specifies the format and location of the input data for the job.
- `LanguageCode`: The language of the input documents. All documents must be in the same language.
- `OutputDataConfig`: Specifies where to send the output files.
# Optional Parameters
- `ClientRequestToken`: A unique identifier for the request. If you don't set the client request token, Amazon Comprehend Medical generates one.
- `JobName`: The identifier of the job.
- `KMSKey`: An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text.
"""
start_phidetection_job(DataAccessRoleArn, InputDataConfig, LanguageCode, OutputDataConfig; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StartPHIDetectionJob", Dict{String, Any}("DataAccessRoleArn"=>DataAccessRoleArn, "InputDataConfig"=>InputDataConfig, "LanguageCode"=>LanguageCode, "OutputDataConfig"=>OutputDataConfig, "ClientRequestToken"=>string(uuid4())); aws_config=aws_config)
start_phidetection_job(DataAccessRoleArn, InputDataConfig, LanguageCode, OutputDataConfig, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StartPHIDetectionJob", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DataAccessRoleArn"=>DataAccessRoleArn, "InputDataConfig"=>InputDataConfig, "LanguageCode"=>LanguageCode, "OutputDataConfig"=>OutputDataConfig, "ClientRequestToken"=>string(uuid4())), args)); aws_config=aws_config)
"""
StartRxNormInferenceJob()
Starts an asynchronous job to detect medication entities and link them to the RxNorm ontology. Use the DescribeRxNormInferenceJob operation to track the status of a job.
# Required Parameters
- `DataAccessRoleArn`: The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. For more information, see Role-Based Permissions Required for Asynchronous Operations.
- `InputDataConfig`: Specifies the format and location of the input data for the job.
- `LanguageCode`: The language of the input documents. All documents must be in the same language.
- `OutputDataConfig`: Specifies where to send the output files.
# Optional Parameters
- `ClientRequestToken`: A unique identifier for the request. If you don't set the client request token, Amazon Comprehend Medical generates one.
- `JobName`: The identifier of the job.
- `KMSKey`: An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text.
"""
start_rx_norm_inference_job(DataAccessRoleArn, InputDataConfig, LanguageCode, OutputDataConfig; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StartRxNormInferenceJob", Dict{String, Any}("DataAccessRoleArn"=>DataAccessRoleArn, "InputDataConfig"=>InputDataConfig, "LanguageCode"=>LanguageCode, "OutputDataConfig"=>OutputDataConfig, "ClientRequestToken"=>string(uuid4())); aws_config=aws_config)
start_rx_norm_inference_job(DataAccessRoleArn, InputDataConfig, LanguageCode, OutputDataConfig, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StartRxNormInferenceJob", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DataAccessRoleArn"=>DataAccessRoleArn, "InputDataConfig"=>InputDataConfig, "LanguageCode"=>LanguageCode, "OutputDataConfig"=>OutputDataConfig, "ClientRequestToken"=>string(uuid4())), args)); aws_config=aws_config)
"""
StopEntitiesDetectionV2Job()
Stops a medical entities detection job in progress.
# Required Parameters
- `JobId`: The identifier of the medical entities job to stop.
"""
stop_entities_detection_v2_job(JobId; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StopEntitiesDetectionV2Job", Dict{String, Any}("JobId"=>JobId); aws_config=aws_config)
stop_entities_detection_v2_job(JobId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StopEntitiesDetectionV2Job", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("JobId"=>JobId), args)); aws_config=aws_config)
"""
StopICD10CMInferenceJob()
Stops an InferICD10CM inference job in progress.
# Required Parameters
- `JobId`: The identifier of the job.
"""
stop_icd10_cminference_job(JobId; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StopICD10CMInferenceJob", Dict{String, Any}("JobId"=>JobId); aws_config=aws_config)
stop_icd10_cminference_job(JobId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StopICD10CMInferenceJob", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("JobId"=>JobId), args)); aws_config=aws_config)
"""
StopPHIDetectionJob()
Stops a protected health information (PHI) detection job in progress.
# Required Parameters
- `JobId`: The identifier of the PHI detection job to stop.
"""
stop_phidetection_job(JobId; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StopPHIDetectionJob", Dict{String, Any}("JobId"=>JobId); aws_config=aws_config)
stop_phidetection_job(JobId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StopPHIDetectionJob", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("JobId"=>JobId), args)); aws_config=aws_config)
"""
StopRxNormInferenceJob()
Stops an InferRxNorm inference job in progress.
# Required Parameters
- `JobId`: The identifier of the job.
"""
stop_rx_norm_inference_job(JobId; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StopRxNormInferenceJob", Dict{String, Any}("JobId"=>JobId); aws_config=aws_config)
stop_rx_norm_inference_job(JobId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = comprehendmedical("StopRxNormInferenceJob", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("JobId"=>JobId), args)); aws_config=aws_config)
| [
27,
7856,
261,
480,
29,
30805,
31633,
805,
14,
12298,
50,
13,
20362,
27,
34345,
29,
10677,
14,
30416,
14,
785,
3866,
15631,
41693,
13,
20362,
198,
2,
770,
2393,
318,
8295,
12,
27568,
416,
30865,
9171,
14706,
13,
20362,
198,
3500,
30... | 3.373441 | 6,416 |
<reponame>EcoJulia/Julia-PhD-course-Copenhagen
### A Pluto.jl notebook ###
# v0.16.1
using Markdown
using InteractiveUtils
# ╔═╡ bc0083ae-2c24-11ec-27d1-d3151362feba
using VegaLite, DataFrames, CSV, RDatasets
# ╔═╡ af8af33a-f75c-4ac1-9c3e-7afa932fd4c1
md"## Plotting with VegaLite
Vegalite is a modern grammar-of-graphics programming language. See information and gallery here https://vega.github.io/vega-lite/
"
# ╔═╡ 5b856367-3bf0-4f0c-b82f-6ae3de31e3b5
md"Load the relevant libraries, and get the iris dataset"
# ╔═╡ 9742e502-f9df-4576-8783-a3b81c40a685
# ╔═╡ 2c955350-cd80-4429-8215-a13ada4b9efd
iris = dataset("datasets", "iris")
# ╔═╡ 08a306fe-684b-42ae-8586-a0e61fc9d889
md"The most basic plots involve a DataFrame that you `pipe` (using `|>`) to the `@vlplot` macro. You choose a `mark` (or `geom`) and define `x` and `y`."
# ╔═╡ e563a075-5fd9-4541-99ee-07a84080287b
iris |> @vlplot(:point, x = :SepalLength, y = :SepalWidth)
# ╔═╡ 16b538b2-1d30-4b91-988f-59448783545e
md"By default, scales start at 0. To control the scales for x and y, pass a specification to x and y"
# ╔═╡ 4a59b6e3-1619-485e-a862-a8ab2c863e5a
iris |> @vlplot(:point,
x = {:SepalLength, scale={zero=false}},
y = {:SepalWidth, scale={zero=false}}
)
# ╔═╡ 0a84be01-7907-4482-9f35-f92e81c7ef5d
md"Everything inside the `@vlplot` first bracket counts as a `mapping` or `aesthetic`. Specify `color` to have different colors for different species"
# ╔═╡ 506b7d99-cce0-4f0d-afc4-4e151038e0d8
iris |> @vlplot(:point,
x = {:SepalLength, scale={zero=false}},
y = {:SepalWidth, scale={zero=false}},
color = :Species
)
# ╔═╡ f97a6cdd-a871-42f3-9ba6-ac8be800fbb2
md"You can stack `@vlplot`s on top of each other with`+`. Unspecified arguments will get passed over from the previous. Here to add both a line and points"
# ╔═╡ 6d9354ee-ed0a-49af-9c40-835c1f20d35e
iris |> @vlplot(
x = {:SepalLength, scale={zero=false}},
y = {:SepalWidth, scale={zero=false}},
color = :Species) +
@vlplot(:point) + @vlplot(:line)
# ╔═╡ a5bb11a0-b215-487e-ae9d-b72ef4cd6465
md"In practice what you probably want is a trendline. To add something to the plot that is a derived function of the data, use a `transform`. Note that `groupby` in the regression transform requires a vector"
# ╔═╡ 2a2bed9a-7f97-4975-aea8-62e17bf37923
iris |> @vlplot(
x = {:SepalLength, scale={zero=false}},
y = {:SepalWidth, scale={zero=false}},
color = :Species) +
@vlplot(:point) +
@vlplot(:line,
transform = [{
regression=:SepalWidth,
on = :SepalLength,
groupby = [:Species]
}])
# ╔═╡ 66073b7c-e972-451a-ab77-63dcb3dbe7f1
md"Add a `count()` transformation function to use a bar plot to plot a histogram"
# ╔═╡ c08554d6-c6d9-408e-bc96-1fa9eaedb4d3
iris |> @vlplot(:bar,
x={
:SepalLength,
bin={maxbins=10},
},
y="count()")
# ╔═╡ 983c4ab3-969a-4421-b9ce-6b6cec0e8438
md"You can easily create faceted plots by mapping a variable to the `row` or `col` argument"
# ╔═╡ 7e81c820-c212-49a1-a21f-46161cb4716c
iris |> @vlplot(:bar,
x={
:SepalLength,
bin={maxbins=10},
},
y="count()",
row = :Species
)
# ╔═╡ c06bcfcd-34f9-4254-93f1-206271982843
md"Let's load a different dataset - a version of the sleep dataset"
# ╔═╡ d508a53d-015c-483c-aae6-2e65c858d18e
allison = CSV.read("data/allison.csv", DataFrame, missingstring = "NA")
# ╔═╡ b1fa7bf2-3a96-4f44-8ca1-a07094467fdf
md"A basic plot reveals a very skewed distribution of x values"
# ╔═╡ effc71fb-261a-406f-b305-604bf8a2207a
allison |> @vlplot(:point, x = :BodyWt, y = :Gestation)
# ╔═╡ 5e76f741-56a8-4b0b-8fb8-a7a0aaf6763e
md"We can address that by applying a different `scale` to `x`"
# ╔═╡ 8cd88777-a081-40ec-83cc-63451043c71a
allison |> @vlplot(:point, x = {:BodyWt, scale = {type = :log}},
y = :Gestation, color = :Predation)
# ╔═╡ 9731b8e1-a464-4280-82eb-ff4e3831227b
md"Apply and `:o` to the `Predation` variable to signal that we are dealing with an ordinal variable`"
# ╔═╡ b4e8e702-bd06-4d64-b05f-c2a901c453f6
allison |> @vlplot(:point, x = {:BodyWt, scale = {type = :log}},
y = :Gestation, color = "Predation:o")
# ╔═╡ dc120eee-0308-425b-a22b-0725b0a18f53
md"""
### Exercise 1
Create different histograms for total sleep for different predation categories
"""
# ╔═╡ ad5bcb43-7806-40a4-8210-601ee67f8bb2
# ╔═╡ f73627ad-a8fb-40c2-810c-6052e1a4216a
md"""
### Exercise 2
Create one or more diagrams investigating the relationship between predation risk and gestation time
"""
# ╔═╡ bbadec0d-6242-481a-bc9c-f7ef132dbea5
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
RDatasets = "ce6b1742-4840-55fa-b093-852dadbb1d8b"
VegaLite = "112f6efa-9a02-5b7d-90c0-432ed331239a"
[compat]
CSV = "~0.8.5"
DataFrames = "~1.2.2"
RDatasets = "~0.7.5"
VegaLite = "~2.6.0"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
julia_version = "1.7.0-rc1"
manifest_format = "2.0"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.CSV]]
deps = ["Dates", "Mmap", "Parsers", "PooledArrays", "SentinelArrays", "Tables", "Unicode"]
git-tree-sha1 = "b83aa3f513be680454437a0eee21001607e5d983"
uuid = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
version = "0.8.5"
[[deps.CategoricalArrays]]
deps = ["DataAPI", "Future", "Missings", "Printf", "Requires", "Statistics", "Unicode"]
git-tree-sha1 = "fbc5c413a005abdeeb50ad0e54d85d000a1ca667"
uuid = "324d7699-5711-5eae-9e2f-1d82baa6b597"
version = "0.10.1"
[[deps.CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.0"
[[deps.Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "31d0151f5716b655421d9d75b7fa74cc4e744df2"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.39.0"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
[[deps.ConstructionBase]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "f74e9d5388b8620b4cee35d4c5a618dd4dc547f4"
uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
version = "1.3.0"
[[deps.Crayons]]
git-tree-sha1 = "3f71217b538d7aaee0b69ab47d9b7724ca8afa0d"
uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f"
version = "4.0.4"
[[deps.DataAPI]]
git-tree-sha1 = "cc70b17275652eb47bc9e5f81635981f13cea5c8"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.9.0"
[[deps.DataFrames]]
deps = ["Compat", "DataAPI", "Future", "InvertedIndices", "IteratorInterfaceExtensions", "LinearAlgebra", "Markdown", "Missings", "PooledArrays", "PrettyTables", "Printf", "REPL", "Reexport", "SortingAlgorithms", "Statistics", "TableTraits", "Tables", "Unicode"]
git-tree-sha1 = "d785f42445b63fc86caa08bb9a9351008be9b765"
uuid = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
version = "1.2.2"
[[deps.DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "7d9d316f04214f7efdbb6398d545446e246eff02"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.10"
[[deps.DataValueInterfaces]]
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
version = "1.0.0"
[[deps.DataValues]]
deps = ["DataValueInterfaces", "Dates"]
git-tree-sha1 = "d88a19299eba280a6d062e135a43f00323ae70bf"
uuid = "e7dc6d0d-1eca-5fa6-8ad6-5aecde8b7ea5"
version = "0.4.13"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[deps.Downloads]]
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
[[deps.ExprTools]]
git-tree-sha1 = "b7e3d17636b348f005f11040025ae8c6f645fe92"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.6"
[[deps.FileIO]]
deps = ["Pkg", "Requires", "UUIDs"]
git-tree-sha1 = "3c041d2ac0a52a12a27af2782b34900d9c3ee68c"
uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549"
version = "1.11.1"
[[deps.FilePaths]]
deps = ["FilePathsBase", "MacroTools", "Reexport", "Requires"]
git-tree-sha1 = "919d9412dbf53a2e6fe74af62a73ceed0bce0629"
uuid = "8fc22ac5-c921-52a6-82fd-178b2807b824"
version = "0.8.3"
[[deps.FilePathsBase]]
deps = ["Dates", "Mmap", "Printf", "Test", "UUIDs"]
git-tree-sha1 = "7fb0eaac190a7a68a56d2407a6beff1142daf844"
uuid = "48062228-2e41-5def-b9a4-89aafe57970f"
version = "0.9.12"
[[deps.Formatting]]
deps = ["Printf"]
git-tree-sha1 = "8339d61043228fdd3eb658d86c926cb282ae72a8"
uuid = "59287772-0a20-5a39-b81b-1366585eb4c0"
version = "0.4.2"
[[deps.Future]]
deps = ["Random"]
uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820"
[[deps.HTTP]]
deps = ["Base64", "Dates", "IniFile", "Logging", "MbedTLS", "NetworkOptions", "Sockets", "URIs"]
git-tree-sha1 = "14eece7a3308b4d8be910e265c724a6ba51a9798"
uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3"
version = "0.9.16"
[[deps.IniFile]]
deps = ["Test"]
git-tree-sha1 = "098e4d2c533924c921f9f9847274f2ad89e018b8"
uuid = "83e8ac13-25f8-5344-8a64-a9f2b223428f"
version = "0.5.0"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.InvertedIndices]]
git-tree-sha1 = "bee5f1ef5bf65df56bdd2e40447590b272a5471f"
uuid = "41ab1584-1d38-5bbf-9106-f11c6c58b48f"
version = "1.1.0"
[[deps.IteratorInterfaceExtensions]]
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
uuid = "82899510-4779-5014-852e-03e436cf321d"
version = "1.0.0"
[[deps.JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "8076680b162ada2a031f707ac7b4953e30667a37"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.2"
[[deps.JSONSchema]]
deps = ["HTTP", "JSON", "URIs"]
git-tree-sha1 = "2f49f7f86762a0fbbeef84912265a1ae61c4ef80"
uuid = "7d188eb4-7ad8-530c-ae41-71a32a6d4692"
version = "0.3.4"
[[deps.LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
[[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[deps.LinearAlgebra]]
deps = ["Libdl", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[deps.MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "5a5bc6bf062f0f95e62d0fe0a2d99699fed82dd9"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.8"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[deps.MbedTLS]]
deps = ["Dates", "MbedTLS_jll", "Random", "Sockets"]
git-tree-sha1 = "1c38e51c3d08ef2278062ebceade0e46cefc96fe"
uuid = "739be429-bea8-5141-9913-cc70e7f3736d"
version = "1.0.3"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
[[deps.Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "bf210ce90b6c9eed32d25dbcae1ebc565df2687f"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.0.2"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[deps.Mocking]]
deps = ["Compat", "ExprTools"]
git-tree-sha1 = "29714d0a7a8083bba8427a4fbfb00a540c681ce7"
uuid = "78c3b35d-d492-501b-9361-3d52fe80e533"
version = "0.7.3"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
[[deps.NodeJS]]
deps = ["Pkg"]
git-tree-sha1 = "905224bbdd4b555c69bb964514cfa387616f0d3a"
uuid = "2bd173c7-0d6d-553b-b6af-13a54713934c"
version = "1.3.0"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
[[deps.OrderedCollections]]
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.4.1"
[[deps.Parsers]]
deps = ["Dates"]
git-tree-sha1 = "bfd7d8c7fd87f04543810d9cbd3995972236ba1b"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "1.1.2"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[deps.PooledArrays]]
deps = ["DataAPI", "Future"]
git-tree-sha1 = "a193d6ad9c45ada72c14b731a318bedd3c2f00cf"
uuid = "2dfb63ee-cc39-5dd5-95bd-886bf059d720"
version = "1.3.0"
[[deps.PrettyTables]]
deps = ["Crayons", "Formatting", "Markdown", "Reexport", "Tables"]
git-tree-sha1 = "69fd065725ee69950f3f58eceb6d144ce32d627d"
uuid = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d"
version = "1.2.2"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.RData]]
deps = ["CategoricalArrays", "CodecZlib", "DataFrames", "Dates", "FileIO", "Requires", "TimeZones", "Unicode"]
git-tree-sha1 = "19e47a495dfb7240eb44dc6971d660f7e4244a72"
uuid = "df47a6cb-8c03-5eed-afd8-b6050d6c41da"
version = "0.8.3"
[[deps.RDatasets]]
deps = ["CSV", "CodecZlib", "DataFrames", "FileIO", "Printf", "RData", "Reexport"]
git-tree-sha1 = "06d4da8e540edb0314e88235b2e8f0429404fdb7"
uuid = "ce6b1742-4840-55fa-b093-852dadbb1d8b"
version = "0.7.5"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Random]]
deps = ["Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[deps.RecipesBase]]
git-tree-sha1 = "44a75aa7a527910ee3d1751d1f0e4148698add9e"
uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
version = "1.1.2"
[[deps.Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[deps.Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.1.3"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
[[deps.SentinelArrays]]
deps = ["Dates", "Random"]
git-tree-sha1 = "54f37736d8934a12a200edea2f9206b03bdf3159"
uuid = "91c51154-3ec4-41a3-a24f-3f23e20d615c"
version = "1.3.7"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.Setfield]]
deps = ["ConstructionBase", "Future", "MacroTools", "Requires"]
git-tree-sha1 = "fca29e68c5062722b5b4435594c3d1ba557072a3"
uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46"
version = "0.7.1"
[[deps.SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "b3363d7460f7d098ca0912c69b082f75625d7508"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.0.1"
[[deps.SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
[[deps.TableTraits]]
deps = ["IteratorInterfaceExtensions"]
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c"
version = "1.0.1"
[[deps.TableTraitsUtils]]
deps = ["DataValues", "IteratorInterfaceExtensions", "Missings", "TableTraits"]
git-tree-sha1 = "78fecfe140d7abb480b53a44f3f85b6aa373c293"
uuid = "382cd787-c1b6-5bf2-a167-d5b971a19bda"
version = "1.0.2"
[[deps.Tables]]
deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "TableTraits", "Test"]
git-tree-sha1 = "fed34d0e71b91734bf0a7e10eb1bb05296ddbcd0"
uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
version = "1.6.0"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.TimeZones]]
deps = ["Dates", "Future", "LazyArtifacts", "Mocking", "Pkg", "Printf", "RecipesBase", "Serialization", "Unicode"]
git-tree-sha1 = "a5688ffdbd849a98503c6650effe79fe89a41252"
uuid = "f269a46b-ccf7-5d73-abea-4c690281aa53"
version = "1.5.9"
[[deps.TranscodingStreams]]
deps = ["Random", "Test"]
git-tree-sha1 = "216b95ea110b5972db65aa90f88d8d89dcb8851c"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
version = "0.9.6"
[[deps.URIParser]]
deps = ["Unicode"]
git-tree-sha1 = "53a9f49546b8d2dd2e688d216421d050c9a31d0d"
uuid = "30578b45-9adc-5946-b283-645ec420af67"
version = "0.4.1"
[[deps.URIs]]
git-tree-sha1 = "97bbe755a53fe859669cd907f2d96aee8d2c1355"
uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4"
version = "1.3.0"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[deps.Vega]]
deps = ["DataStructures", "DataValues", "Dates", "FileIO", "FilePaths", "IteratorInterfaceExtensions", "JSON", "JSONSchema", "MacroTools", "NodeJS", "Pkg", "REPL", "Random", "Setfield", "TableTraits", "TableTraitsUtils", "URIParser"]
git-tree-sha1 = "43f83d3119a868874d18da6bca0f4b5b6aae53f7"
uuid = "239c3e63-733f-47ad-beb7-a12fde22c578"
version = "2.3.0"
[[deps.VegaLite]]
deps = ["Base64", "DataStructures", "DataValues", "Dates", "FileIO", "FilePaths", "IteratorInterfaceExtensions", "JSON", "MacroTools", "NodeJS", "Pkg", "REPL", "Random", "TableTraits", "TableTraitsUtils", "URIParser", "Vega"]
git-tree-sha1 = "3e23f28af36da21bfb4acef08b144f92ad205660"
uuid = "112f6efa-9a02-5b7d-90c0-432ed331239a"
version = "2.6.0"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl", "OpenBLAS_jll"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
"""
# ╔═╡ Cell order:
# ╟─af8af33a-f75c-4ac1-9c3e-7afa932fd4c1
# ╟─5b856367-3bf0-4f0c-b82f-6ae3de31e3b5
# ╠═9742e502-f9df-4576-8783-a3b81c40a685
# ╠═bc0083ae-2c24-11ec-27d1-d3151362feba
# ╠═2c955350-cd80-4429-8215-a13ada4b9efd
# ╟─08a306fe-684b-42ae-8586-a0e61fc9d889
# ╠═e563a075-5fd9-4541-99ee-07a84080287b
# ╟─16b538b2-1d30-4b91-988f-59448783545e
# ╠═4a59b6e3-1619-485e-a862-a8ab2c863e5a
# ╟─0a84be01-7907-4482-9f35-f92e81c7ef5d
# ╠═506b7d99-cce0-4f0d-afc4-4e151038e0d8
# ╟─f97a6cdd-a871-42f3-9ba6-ac8be800fbb2
# ╠═6d9354ee-ed0a-49af-9c40-835c1f20d35e
# ╟─a5bb11a0-b215-487e-ae9d-b72ef4cd6465
# ╠═2a2bed9a-7f97-4975-aea8-62e17bf37923
# ╟─66073b7c-e972-451a-ab77-63dcb3dbe7f1
# ╠═c08554d6-c6d9-408e-bc96-1fa9eaedb4d3
# ╟─983c4ab3-969a-4421-b9ce-6b6cec0e8438
# ╠═7e81c820-c212-49a1-a21f-46161cb4716c
# ╟─c06bcfcd-34f9-4254-93f1-206271982843
# ╠═d508a53d-015c-483c-aae6-2e65c858d18e
# ╟─b1fa7bf2-3a96-4f44-8ca1-a07094467fdf
# ╠═effc71fb-261a-406f-b305-604bf8a2207a
# ╟─5e76f741-56a8-4b0b-8fb8-a7a0aaf6763e
# ╠═8cd88777-a081-40ec-83cc-63451043c71a
# ╟─9731b8e1-a464-4280-82eb-ff4e3831227b
# ╠═b4e8e702-bd06-4d64-b05f-c2a901c453f6
# ╟─dc120eee-0308-425b-a22b-0725b0a18f53
# ╠═ad5bcb43-7806-40a4-8210-601ee67f8bb2
# ╟─f73627ad-a8fb-40c2-810c-6052e1a4216a
# ╠═bbadec0d-6242-481a-bc9c-f7ef132dbea5
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002
| [
27,
7856,
261,
480,
29,
36,
1073,
16980,
544,
14,
16980,
544,
12,
2725,
35,
12,
17319,
12,
34,
9654,
71,
11286,
198,
21017,
317,
32217,
13,
20362,
20922,
44386,
198,
2,
410,
15,
13,
1433,
13,
16,
198,
198,
3500,
2940,
2902,
198,
... | 1.873372 | 10,827 |
"""
unitRandom3Cartesian(rng = MersenneTwister())
Returns a unit vector pointing in a random direction in Cartesian coordinates.
"""
function unitRandom3Cartesian(rng = MersenneTwister())
θ = π*rand(rng)
ϕ = 2.0*π*rand(rng)
return Vector([
cos(ϕ)*sin(θ),
sin(ϕ)*sin(θ),
cos(θ)
])
end
export unitRandom3Cartesian
"""
randbtw(a::T,b::T,rng=MersenneTwister()) where {T<:Real}
Generates a random number between a and b.
"""
function randbtw(a::T,b::T,rng=MersenneTwister()) where {T<:Real}
if a > b
return randbtw(b,a,rng)
end
return a+rand(rng)*(b-a)
end
export randbtw
"""
dist(n,func,domain,range,rng=MersenneTwister())
Generates n values following the distribution func in a given domain and range
- SLOW
- Can be used on any distribution function
"""
function dist(n,func,domain,range,rng=MersenneTwister())
out = zeros(n)
for i=1:n
val = 0.0
tryagain = true
while tryagain
xtest = randbtw(domain...,rng)
fxtest = abs(func(xtest))
if abs(randbtw(range...,rng)) < fxtest
val = xtest
tryagain = false
end
end
out[i] = val
end
return out
end
export dist
"""
PowerLaw
Sampler for a power law distribution
"""
struct PowerLaw <: Sampleable{Univariate, Continuous}
α::Real
domain::Tuple{T,T} where {T<:Real}
end
export PowerLaw
"""
rand(rng::AbstractRNG,s::PowerLaw)
Generate one sample from a power law distribution
"""
function Base.rand(rng::AbstractRNG,s::PowerLaw)
ξ = rand(rng)
γ = 1.0-s.α
e0γ= s.domain[1]^(γ)
efγ = s.domain[2]^(γ)
out = (efγ-e0γ)*ξ+e0γ
return out^(1/γ)
end
"""
BrokenPowerLaw
Sampler for a broken power law distribution
"""
struct BrokenPowerLaw <: Sampleable{Univariate, Continuous}
α::Array{T} where {T<:Real}
domain::Array{T} where {T<:Real}
function BrokenPowerLaw(α::Array{T}, domain::Array{T}) where {T<:Real}
@assert length(α)+1 == length(domain) "Requires length(α) == length(domain)-1"
bOrder = true
for i=1:(length(domain)-1)
bOrder *= domain[i] < domain[i+1]
end
@assert bOrder "domain breaks must be in order"
new(α, domain)
end
end
export BrokenPowerLaw
function afterIndex(val,vec)
for i=1:(length(vec)-1)
check = vec[i] < val
check *= val < vec[i+1]
if check
return i
end
end
return 0
end
"""
rand(rng::AbstractRNG,s::BrokenPowerLaw)
Generate one sample from a broken power law distribution.
"""
function Base.rand(rng::AbstractRNG,s::BrokenPowerLaw)
ξ = rand(rng)
γ = 1 .- s.α
cx = ones(length(s.α))
for i = 1:(length(cx)-1)
β = s.α[i]/s.α[i+1]
cx[i+1] = cx[i]^β*s.domain[i+1]^(β-1)
end
cx = map(/,cx,γ)
arts = [
begin
(s.domain[i+1]^γ[i] - s.domain[i]^γ[i])*cx[i]
end for i=1:length(γ)
]
parts = vcat([0],arts)
sumParts = cumsum(parts)
norm = sumParts[end]
cnorm = ξ*norm
supIndex = afterIndex(cnorm,sumParts)
out = cnorm - sumParts[supIndex]
out /= cx[supIndex]
out += s.domain[supIndex]^γ[supIndex]
out = out^(1/γ[supIndex])
return out
end
| [
37811,
198,
220,
220,
220,
4326,
29531,
18,
43476,
35610,
7,
81,
782,
796,
337,
364,
29727,
5080,
1694,
28955,
198,
198,
35561,
257,
4326,
15879,
10609,
287,
257,
4738,
4571,
287,
13690,
35610,
22715,
13,
198,
37811,
198,
8818,
4326,
... | 2.134448 | 1,495 |
<filename>sims/sim_study/sim.jl
println("Loading packages...")
@time begin
using Cytof5
using Random, Distributions
# TODO: Get rid of this dep
using RCall
using BSON
using ArgParse
import Cytof5.Model.logger
include("util.jl")
end
println("Done loading packages.")
# TODO: review
# ARG PARSING
# Define behavior for vector numerical args (input as space delimited string)
ArgParse.parse_item(::Type{Vector{T}}, x::AbstractString) where {T <: Number} = parse.(T, split(x))
function parse_cmd()
s = ArgParseSettings()
@add_arg_table s begin
"--simdat_path"
arg_type = String
required = true
"--MCMC_ITER"
arg_type = Int
default = 1000
"--BURN"
arg_type = Int
default = 10000
"--K_MCMC"
arg_type = Int
required = true
"--L0_MCMC"
arg_type = Int
required = true
"--L1_MCMC"
arg_type = Int
required = true
# Missin mechanism
"--pBounds"
arg_type = Vector{Float64}
default = [.05, .8, .05] # paper
"--yQuantiles"
arg_type = Vector{Float64}
default = [0.0, .25, .5] # paper
"--RESULTS_DIR"
arg_type = String
required = true
"--SEED"
arg_type = Int
default = 0
"--EXP_NAME"
arg_type = String
"--printFreq"
arg_type = Int
default = 50
end
PARSED_ARGS = parse_args(s)
if PARSED_ARGS["EXP_NAME"] == nothing
logger("EXP_NAME not defined. Making default experiment name.")
MAIN_ARGS = filter(d -> !(d.first in ("RESULTS_DIR", "EXP_NAME")), PARSED_ARGS)
PARSED_ARGS["EXP_NAME"] = join(["$k$v" for (k, v) in MAIN_ARGS], '_')
end
return PARSED_ARGS
end
PARSED_ARGS = parse_cmd()
for (k,v) in PARSED_ARGS
logger("$k => $v")
end
SIMDAT_PATH = PARSED_ARGS["simdat_path"]
MCMC_ITER = PARSED_ARGS["MCMC_ITER"]
BURN = PARSED_ARGS["BURN"]
K_MCMC = PARSED_ARGS["K_MCMC"]
L0_MCMC = PARSED_ARGS["L0_MCMC"]
L1_MCMC = PARSED_ARGS["L1_MCMC"]
L_MCMC = Dict(0 => L0_MCMC, 1 => L1_MCMC)
# missing mechanism
yQuantiles = PARSED_ARGS["yQuantiles"]
pBounds = PARSED_ARGS["pBounds"]
EXP_NAME = PARSED_ARGS["EXP_NAME"]
SEED = PARSED_ARGS["SEED"]
RESULTS_DIR = PARSED_ARGS["RESULTS_DIR"]
printFreq = PARSED_ARGS["printFreq"]
# END OF ARG PARSING
# CREATE RESULTS DIR
OUTDIR = "$(RESULTS_DIR)/$(EXP_NAME)/"
mkpath(OUTDIR)
# Set random seed
Random.seed!(SEED);
logger("Load simulated data ...");
BSON.@load SIMDAT_PATH simdat
dat = Cytof5.Model.Data(simdat[:y])
logger("Generating priors ...");
@time c = Cytof5.Model.defaultConstants(dat, K_MCMC, L_MCMC,
tau0=10.0, tau1=10.0,
sig2_prior=InverseGamma(3.0, 2.0),
alpha_prior=Gamma(0.1, 10.0),
yQuantiles=yQuantiles, pBounds=pBounds,
similarity_Z=Cytof5.Model.sim_fn_abs(10000),
probFlip_Z=2.0 / (dat.J * K_MCMC),
noisyDist=Normal(0.0, 3.16))
Cytof5.Model.printConstants(c)
println("dat.I: $(dat.I)")
println("dat.J: $(dat.J)")
println("dat.N: $(dat.N)")
# Plot missing mechanism
logger("Plot missing mechanism")
util.plotPdf("$(OUTDIR)/prob_miss.pdf")
R"par(mfrow=c($(dat.I), 1))"
for i in 1:dat.I
util.plotProbMiss(c.beta, i)
end
R"par(mfrow=c(1,1))"
util.devOff()
logger("Generating initial state ...");
# @time init = Cytof5.Model.genInitialState(c, dat)
logger("use smart init ...")
@time init = Cytof5.Model.smartInit(c, dat)
# Plot initial Z
util.plotPdf("$(OUTDIR)/Z_init.pdf")
addGridLines(J::Int, K::Int, col="grey") = util.abline(v=(1:K) .+ .5, h=(1:J) .+ .5, col=col)
util.myImage(init.Z, xlab="Features", ylab="Markers", addL=false, f=Z->addGridLines(dat.J, c.K))
util.devOff()
# Fit Model
nsamps_to_thin(nsamps::Int, nmcmc::Int) = max(1, div(nmcmc, nsamps))
@time out, lastState, ll, metrics, dden=
Cytof5.Model.cytof5_fit(init, c, dat,
monitors=[[:Z, :lam, :W,
:sig2, :delta,
:alpha, :v,
:eta, :eps],
[:y_imputed, :gam]],
thins=[2, nsamps_to_thin(10, MCMC_ITER)],
nmcmc=MCMC_ITER, nburn=BURN,
computeLPML=true, computeDIC=true,
computedden=true, thin_dden=nsamps_to_thin(200, MCMC_ITER),
use_repulsive=false,
joint_update_Z=true,
printFreq=10, flushOutput=true)
logger("Saving Data ...");
println("length of dden: $(length(dden))")
# @save "$(OUTDIR)/output.jld2" out dat ll lastState c dat metrics
BSON.@save "$(OUTDIR)/output.bson" out ll lastState c metrics init dden simdat
logger("MCMC Completed.");
#= Test
julia --color=yes sim.jl --I=3 --J=32 --N_factor=100 --K=8 --K_MCMC=10 --L0_MCMC=5 --L1_MCMC=5 \
--MCMC_ITER=10 --BURN=10 --RESULTS_DIR=results --EXP_NAME=bla
=#
| [
27,
34345,
29,
82,
12078,
14,
14323,
62,
44517,
14,
14323,
13,
20362,
198,
35235,
7203,
19031,
10392,
9313,
8,
198,
31,
2435,
2221,
198,
220,
1262,
5934,
1462,
69,
20,
198,
220,
1262,
14534,
11,
46567,
507,
198,
220,
1303,
16926,
46... | 1.942966 | 2,630 |
<filename>src/processes.jl
# ------------------------------------------------------------------
# Licensed under the MIT License. See LICENCE in the project root.
# ------------------------------------------------------------------
"""
Process
A geological process of evolution.
"""
abstract type Process end
"""
evolve!(state, proc, Δt)
Evolve the `state` with process `proc` for a time period `Δt`.
"""
evolve!(state, proc, Δt::Float64) = error("not implemented")
"""
TimelessProcess
A geological process implemented without the notion of time.
"""
abstract type TimelessProcess <: Process end
"""
evolve!(land, proc)
Evolve the `land` with timeless process `proc`.
"""
evolve!(land::Matrix, proc::TimelessProcess) = error("not implemented")
"""
evolve!(land, proc, Δt)
Evolve the `land` with timeless process `proc` for a time period `Δt`.
"""
function evolve!(land::Matrix, proc::TimelessProcess, Δt::Float64)
t = mean(land)
evolve!(land, proc)
@. land = t + Δt + land
nothing
end
"""
evolve!(state, proc, Δt)
Evolve the landscape `state` with timeless process `proc` for a time period `Δt`.
"""
evolve!(state::LandState, proc::TimelessProcess, Δt::Float64) =
evolve!(state.land, proc, Δt)
#------------------
# IMPLEMENTATIONS
#------------------
include("processes/geostats.jl")
include("processes/smoothing.jl")
include("processes/sequential.jl")
include("processes/analytical.jl")
| [
27,
34345,
29,
10677,
14,
14681,
274,
13,
20362,
198,
2,
16529,
438,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
38559,
18310,
287,
262,
1628,
6808,
13,
198,
2,
16529,
438,
198,
198,
37811,
198,
220,
220,
220,
10854,
198,
198,
... | 3.234234 | 444 |
<filename>test/FESpacesTests/ConstrainedFESpacesTests.jl
module ConstrainedFESpacesTests
using Test
using Gridap
using Gridap.ConstrainedFESpaces: VectorOfTwoParts
D = 2
dom = fill(4,D)
model = CartesianDiscreteModel(partition=tuple(dom...))
order = 1
diritag = "boundary"
_fespace = CLagrangianFESpace(Float64,model,order,diritag)
fixeddofs = [2,5]
fespace = ConstrainedFESpace(_fespace,fixeddofs)
ufun(x) = x[1] + x[2] + 2.0
trian = Triangulation(model)
quad = CellQuadrature(trian,degree=2)
bh = FEBasis(fespace)
uh = zero(fespace)
cellmat = integrate(inner(bh,bh),trian,quad)
cellvec = integrate(inner(bh,uh),trian,quad)
nfree = 7
ndiri = 16
test_fe_space(fespace, nfree, ndiri, cellmat, cellvec, ufun)
g(x) = 2.0
U = TrialFESpace(fespace,g)
u(x) = x[1] + x[2] + 2.0
uh = interpolate(fespace,u)
#writevtk(trian,"trian",cellfields=["uh"=>uh])
nfree = 10
ndiri = 4
fixeddofs = [3,1,5,7]
nfixed = length(fixeddofs)
nfree_new = nfree - nfixed
dof_to_new_dof = zeros(Int,nfree)
is_fixed = fill(false,nfree)
is_fixed[fixeddofs] .= true
is_free = is_fixed.==false
dof_to_new_dof[is_free] .= 1:nfree_new
dof_to_new_dof[is_fixed] .= (-(ndiri+1)):-1:(-(ndiri+nfixed))
freevals=[20,40,60,80,90,100]
fixedvals=[30,10,50,70]
v = VectorOfTwoParts(dof_to_new_dof,freevals,fixedvals,ndiri)
@test all(v[is_free] .== freevals)
@test all(v[is_fixed] .== fixedvals)
end # module
| [
27,
34345,
29,
9288,
14,
37,
1546,
43076,
51,
3558,
14,
3103,
2536,
1328,
37,
1546,
43076,
51,
3558,
13,
20362,
198,
21412,
1482,
2536,
1328,
37,
1546,
43076,
51,
3558,
198,
198,
3500,
6208,
198,
3500,
24846,
499,
198,
3500,
24846,
... | 2.167712 | 638 |
module FESpacesTests1
using Test
@testset "ConformingFESpaces" begin include("ConformingFESpacesTests.jl") end
@testset "FESpacesInterfaces" begin include("FESpacesInterfacesTests.jl") end
@testset "SingleFieldFESpaces" begin include("SingleFieldFESpacesTests.jl") end
@testset "TrialFESpaces" begin include("TrialFESpacesTests.jl") end
@testset "Assemblers" begin include("AssemblersTests.jl") end
@testset "SparseMatrixAssemblers" begin include("SparseMatrixAssemblersTests.jl") end
@testset "FEOperators" begin include("FEOperatorsTests.jl") end
@testset "AffineFEOperators" begin include("AffineFEOperatorsTests.jl") end
@testset "FEOperatorsFromWeakForm" begin include("FEOperatorsFromWeakFormTests.jl") end
@testset "FESolvers" begin include("FESolversTests.jl") end
@testset "DiscontinuousFESpaces" begin include("DiscontinuousFESpacesTests.jl") end
@testset "DivConformingFESpaces" begin include("DivConformingFESpacesTests.jl") end
@testset "CurlConformingFESpaces" begin include("CurlConformingFESpacesTests.jl") end
end # module
| [
21412,
376,
1546,
43076,
51,
3558,
16,
198,
198,
3500,
6208,
198,
198,
31,
9288,
2617,
366,
3103,
15464,
37,
1546,
43076,
1,
2221,
2291,
7203,
3103,
15464,
37,
1546,
43076,
51,
3558,
13,
20362,
4943,
886,
198,
198,
31,
9288,
2617,
3... | 2.890411 | 365 |
@testset "Testing IndexZero" begin
IndexZero = CounterTools.IndexZero
x = CounterTools.IndexZero(0)
@test CounterTools.value(x) == 0
# `Integers` should have 1 subtracted from them.
# `IndexZero`s should just pass through
y = CounterTools.indexzero(1)
z = CounterTools.indexzero(x)
@test CounterTools.value(y) == 0
@test y == z
A = [1,2,3]
B = (1,2,3)
@test A[y] == A[1]
@test B[y] == B[1]
# Iterator interface
@test collect(y) == [y]
end
| [
31,
9288,
2617,
366,
44154,
12901,
28667,
1,
2221,
198,
220,
220,
220,
12901,
28667,
796,
15034,
33637,
13,
15732,
28667,
198,
220,
220,
220,
2124,
796,
15034,
33637,
13,
15732,
28667,
7,
15,
8,
198,
220,
220,
220,
2488,
9288,
15034,
... | 2.345794 | 214 |
<reponame>mattirish/PowerSimulations.jl<filename>src/devices_models/devices/hydro_generation.jl
abstract type AbstractHydroFormulation <: AbstractDeviceFormulation end
abstract type AbstractHydroDispatchFormulation <: AbstractHydroFormulation end
abstract type AbstractHydroUnitCommitment <: AbstractHydroFormulation end
struct HydroFixed <: AbstractHydroFormulation end
struct HydroDispatchRunOfRiver <: AbstractHydroDispatchFormulation end
struct HydroDispatchSeasonalFlow <: AbstractHydroDispatchFormulation end
struct HydroCommitmentRunOfRiver <: AbstractHydroUnitCommitment end
struct HydroCommitmentSeasonalFlow <: AbstractHydroUnitCommitment end
########################### Hydro generation variables #################################
function activepower_variables!(canonical::Canonical,
devices::IS.FlattenIteratorWrapper{H}) where H<:PSY.HydroGen
add_variable(canonical,
devices,
Symbol("P_$(H)"),
false,
:nodal_balance_active;
lb_value = d -> d.tech.activepowerlimits.min,
ub_value = d -> d.tech.activepowerlimits.max,
init_value = d -> PSY.get_activepower(PSY.get_tech(d)))
return
end
function reactivepower_variables!(canonical::Canonical,
devices::IS.FlattenIteratorWrapper{H}) where H<:PSY.HydroGen
add_variable(canonical,
devices,
Symbol("Q_$(H)"),
false,
:nodal_balance_reactive,
ub_value = d -> d.tech.reactivepowerlimits.max,
lb_value = d -> d.tech.reactivepowerlimits.min,
init_value = d -> d.tech.reactivepower)
return
end
"""
This function add the variables for power generation commitment to the model
"""
function commitment_variables!(canonical::Canonical,
devices::IS.FlattenIteratorWrapper{H}) where {H<:PSY.HydroGen}
time_steps = model_time_steps(canonical)
var_names = [Symbol("ON_$(H)"), Symbol("START_$(H)"), Symbol("STOP_$(H)")]
for v in var_names
add_variable(canonical, devices, v, true)
end
return
end
### Constraints for Thermal Generation without commitment variables ####
"""
This function adds the Commitment Status constraint when there are CommitmentVariables
"""
function commitment_constraints!(canonical::Canonical,
devices::IS.FlattenIteratorWrapper{H},
device_formulation::Type{D},
system_formulation::Type{S}) where {H<:PSY.HydroGen,
D<:AbstractHydroUnitCommitment,
S<:PM.AbstractPowerModel}
key = ICKey(DeviceStatus, H)
if !(key in keys(canonical.initial_conditions))
error("Initial status conditions not provided. This can lead to unwanted results")
end
device_commitment(canonical,
canonical.initial_conditions[key],
Symbol("commitment_$(H)"),
(Symbol("START_$(H)"),
Symbol("STOP_$(H)"),
Symbol("ON_$(H)"))
)
return
end
####################################### Reactive Power Constraints #########################
function reactivepower_constraints!(canonical::Canonical,
devices::IS.FlattenIteratorWrapper{H},
device_formulation::Type{AbstractHydroDispatchFormulation},
system_formulation::Type{<:PM.AbstractPowerModel}) where H<:PSY.HydroGen
range_data = Vector{NamedMinMax}(undef, length(devices))
for (ix, d) in enumerate(devices)
tech = PSY.get_tech(d)
name = PSY.get_name(d)
if isnothing(PSY.get_reactivepowerlimits(tech))
limits = (min = 0.0, max = 0.0)
range_data[ix] = (PSY.get_name(d), limits)
@warn("Reactive Power Limits of $(name) are nothing. Q_$(name) is set to 0.0")
else
range_data[ix] = (name, PSY.get_reactivepowerlimits(tech))
end
end
device_range(canonical,
range_data,
Symbol("reactiverange_$(H)"),
Symbol("Q_$(H)"))
return
end
######################## output constraints without Time Series ############################
function _get_time_series(canonical::Canonical,
devices::IS.FlattenIteratorWrapper{<:PSY.HydroGen})
initial_time = model_initial_time(canonical)
use_forecast_data = model_uses_forecasts(canonical)
parameters = model_has_parameters(canonical)
time_steps = model_time_steps(canonical)
device_total = length(devices)
ts_data_active = Vector{Tuple{String, Int64, Float64, Vector{Float64}}}(undef, device_total)
ts_data_reactive = Vector{Tuple{String, Int64, Float64, Vector{Float64}}}(undef, device_total)
for (ix, device) in enumerate(devices)
bus_number = PSY.get_number(PSY.get_bus(device))
name = PSY.get_name(device)
tech = PSY.get_tech(device)
# pf = sin(acos(PSY.get_powerfactor(PSY.get_tech(device))))
active_power = use_forecast_data ? PSY.get_rating(tech) : PSY.get_activepower(device)
if use_forecast_data
ts_vector = TS.values(PSY.get_data(PSY.get_forecast(PSY.Deterministic,
device,
initial_time,
"rating")))
else
ts_vector = ones(time_steps[end])
end
ts_data_active[ix] = (name, bus_number, active_power, ts_vector)
ts_data_reactive[ix] = ts_data_active[ix] # (name, bus_number, active_power * pf, ts_vector)
end
return ts_data_active, ts_data_reactive
end
function activepower_constraints!(canonical::Canonical,
devices::IS.FlattenIteratorWrapper{H},
device_formulation::Type{<:AbstractHydroDispatchFormulation},
system_formulation::Type{<:PM.AbstractPowerModel}) where H<:PSY.HydroGen
parameters = model_has_parameters(canonical)
use_forecast_data = model_uses_forecasts(canonical)
if !parameters && !use_forecast_data
range_data = [(PSY.get_name(d), (min = 0.0, max = PSY.get_rating(PSY.get_tech(d)))) for d in devices]
device_range(canonical,
range_data,
Symbol("activerange_$(H)"),
Symbol("P_$(H)"))
return
end
ts_data_active, _ = _get_time_series(canonical, devices)
if parameters
device_timeseries_param_ub(canonical,
ts_data_active,
Symbol("activerange_$(H)"),
UpdateRef{H}(:rating),
Symbol("P_$(H)"))
else
device_timeseries_ub(canonical,
ts_data_active,
Symbol("activerange_$(H)"),
Symbol("P_$(H)"))
end
return
end
function activepower_constraints!(canonical::Canonical,
devices::IS.FlattenIteratorWrapper{H},
device_formulation::Type{<:AbstractHydroUnitCommitment},
system_formulation::Type{<:PM.AbstractPowerModel}) where H<:PSY.HydroGen
parameters = model_has_parameters(canonical)
use_forecast_data = model_uses_forecasts(canonical)
if !parameters && !use_forecast_data
range_data = [(PSY.get_name(d), (min = 0.0, max = PSY.get_rating(PSY.get_tech(d)))) for d in devices]
device_semicontinuousrange(canonical,
range_data,
Symbol("activerange_$(H)"),
Symbol("P_$(H)"),
Symbol("ON_$(H)"))
return
end
ts_data_active, _ = _get_time_series(canonical, devices)
if parameters
device_timeseries_ub_bigM(canonical,
ts_data_active,
Symbol("activerange_$(H)"),
Symbol("P_$(H)"),
UpdateRef{H}(:rating),
Symbol("ON_$(H)"))
else
device_timeseries_ub_bin(canonical,
ts_data_active,
Symbol("activerange_$(H)"),
Symbol("P_$(H)"),
Symbol("ON_$(H)"))
end
return
end
########################## Make initial Conditions for a Model #############################
function initial_conditions!(canonical::Canonical,
devices::IS.FlattenIteratorWrapper{H},
device_formulation::Type{<:AbstractHydroUnitCommitment}) where {H<:PSY.HydroGen}
status_init(canonical, devices)
output_init(canonical, devices)
duration_init(canonical, devices)
return
end
function initial_conditions!(canonical::Canonical,
devices::IS.FlattenIteratorWrapper{H},
device_formulation::Type{D}) where {H<:PSY.HydroGen,
D<:AbstractHydroDispatchFormulation}
output_init(canonical, devices)
return
end
########################## Addition of to the nodal balances ###############################
function nodal_expression!(canonical::Canonical,
devices::IS.FlattenIteratorWrapper{H},
system_formulation::Type{<:PM.AbstractPowerModel}) where H<:PSY.HydroGen
parameters = model_has_parameters(canonical)
ts_data_active, ts_data_reactive = _get_time_series(canonical, devices)
if parameters
include_parameters(canonical,
ts_data_active,
UpdateRef{H}(:rating),
:nodal_balance_active)
include_parameters(canonical,
ts_data_reactive,
UpdateRef{H}(:rating),
:nodal_balance_reactive)
return
end
for t in model_time_steps(canonical)
for device_value in ts_data_active
_add_to_expression!(canonical.expressions[:nodal_balance_active],
device_value[2],
t,
device_value[3]*device_value[4][t])
end
for device_value in ts_data_reactive
_add_to_expression!(canonical.expressions[:nodal_balance_reactive],
device_value[2],
t,
device_value[3]*device_value[4][t])
end
end
return
end
function nodal_expression!(canonical::Canonical,
devices::IS.FlattenIteratorWrapper{H},
system_formulation::Type{<:PM.AbstractActivePowerModel}) where H<:PSY.HydroGen
parameters = model_has_parameters(canonical)
ts_data_active, _ = _get_time_series(canonical, devices)
if parameters
include_parameters(canonical,
ts_data_active,
UpdateRef{H}(:rating),
:nodal_balance_active)
return
end
for t in model_time_steps(canonical)
for device_value in ts_data_active
_add_to_expression!(canonical.expressions[:nodal_balance_active],
device_value[2],
t,
device_value[3]*device_value[4][t])
end
end
return
end
##################################### Hydro generation cost ############################
function cost_function(canonical::Canonical,
devices::IS.FlattenIteratorWrapper{PSY.HydroDispatch},
device_formulation::Type{D},
system_formulation::Type{<:PM.AbstractPowerModel}) where D<:AbstractHydroFormulation
add_to_cost(canonical,
devices,
Symbol("P_HydroDispatch"),
:fixed,
-1.0)
return
end
##################################### Water/Energy Budget Constraint ############################
function _get_budget(canonical::Canonical,
devices::IS.FlattenIteratorWrapper{H}) where H<:PSY.HydroGen
initial_time = model_initial_time(canonical)
use_forecast_data = model_uses_forecasts(canonical)
parameters = model_has_parameters(canonical)
time_steps = model_time_steps(canonical)
device_total = length(devices)
budget_data = Vector{Tuple{String, Int64, Float64, Vector{Float64}}}(undef, device_total)
for (ix, device) in enumerate(devices)
bus_number = PSY.get_number(PSY.get_bus(device))
name = PSY.get_name(device)
tech = PSY.get_tech(device)
# This is where you would get the water/energy storage capacity
# which is then multiplied by the forecast value to get you the energy budget
energy_capacity = use_forecast_data ? PSY.get_storagecapacity(device) : PSY.get_activepower(device)
if use_forecast_data
ts_vector = TS.values(PSY.get_data(PSY.get_forecast(PSY.Deterministic,
device,
initial_time,
"storagecapacity")))
else
ts_vector = ones(time_steps[end])
end
budget_data[ix] = (name, bus_number, energy_capacity, ts_vector)
end
return budget_data
end
function budget_constraints!(canonical::Canonical,
devices::IS.FlattenIteratorWrapper{H},
device_formulation::Type{<:AbstractHydroDispatchFormulation},
system_formulation::Type{<:PM.AbstractPowerModel}) where H<:PSY.HydroGen
parameters = model_has_parameters(canonical)
budget_data = _get_budget(canonical, devices)
if parameters
device_budget_param_ub(canonical,
budget_data,
Symbol("budget_$(H)"), # TODO: better name for this constraint
UpdateRef{H}(:storagecapacity),
Symbol("P_$(H)"))
else
device_budget_param_ub(canonical,
budget_data,
Symbol("budget_$(H)"), # TODO: better name for this constraint
Symbol("P_$(H)"))
end
end
function device_budget_param_ub(canonical::Canonical,
budget_data::Vector{Tuple{String, Int64, Float64, Vector{Float64}}},
cons_name::Symbol,
param_reference::UpdateRef,
var_name::Symbol)
time_steps = model_time_steps(canonical)
variable = get_variable(canonical, var_name)
set_name = (r[1] for r in budget_data)
no_of_budgets = length(budget_data[1][4])
time_lengths = time_steps/length(budget_data[1][4])
time_chunks = reshape(time_steps, (time_lengths, no_of_budgets))
constraint = _add_cons_container!(canonical, cons_name, set_name, no_of_budgets)
param = _add_param_container!(canonical, param_reference, names, no_of_budgets)
for data in budget_data, i in 1:no_of_budgets
name = data[1]
forecast = data[4][i]
multiplier = data[3]
param[name] = PJ.add_parameter(canonical.JuMPmodel, forecast)
constraint[name] = JuMP.@constraint(canonical.JuMPmodel,
sum([variable[name, t] for t in time_chunks[:, i]]) <= multiplier*param[name])
end
return
end
function device_budget_ub(canonical::Canonical,
budget_data::Vector{Tuple{String, Int64, Float64, Vector{Float64}}},
cons_name::Symbol,
var_name::Symbol)
time_steps = model_time_steps(canonical)
variable = get_variable(canonical, var_name)
set_name = (r[1] for r in budget_data)
no_of_budgets = length(budget_data[1][4])
time_lengths = time_steps/length(budget_data[1][4])
time_chunks = reshape(time_steps, (time_lengths, no_of_budgets))
constraint = _add_cons_container!(canonical, cons_name, set_name, no_of_budgets)
for data in budget_data, i in 1:no_of_budgets
name = data[1]
forecast = data[4][i]
multiplier = data[3]
constraint[name] = JuMP.@constraint(canonical.JuMPmodel,
sum([variable[name, t] for t in time_chunks[:, i]]) <= multiplier*forecast)
end
return
end
| [
27,
7856,
261,
480,
29,
76,
1078,
343,
680,
14,
13434,
8890,
5768,
13,
20362,
27,
34345,
29,
10677,
14,
42034,
62,
27530,
14,
42034,
14,
15511,
305,
62,
20158,
13,
20362,
198,
397,
8709,
2099,
27741,
40436,
305,
8479,
1741,
1279,
25... | 1.993614 | 8,613 |
<reponame>scottiealexander/Spk.jl<filename>Histogram/src/Histogram.jl
module Histogram
# simple histogram functions copied from Julia v0.4.7 to avoid having to
# pull in all of Statistics.jl
export hist, hist!
function histrange(v::AbstractArray{T}, n::Integer) where {T<:AbstractFloat}
nv = length(v)
if nv == 0 && n < 0
throw(ArgumentError("number of bins must be ≥ 0 for an empty array, got $n"))
elseif nv > 0 && n < 1
throw(ArgumentError("number of bins must be ≥ 1 for a non-empty array, got $n"))
end
if nv == 0
return 0.0:1.0:0.0
end
lo, hi = extrema(v)
if hi == lo
step = 1.0
else
bw = (hi - lo) / n
e = 10.0^floor(log10(bw))
r = bw / e
if r <= 2
step = 2*e
elseif r <= 5
step = 5*e
else
step = 10*e
end
end
start = step*(ceil(lo/step)-1)
nm1 = ceil(Int,(hi - start)/step)
return start:step:(start + nm1*step)
end
function histrange(v::AbstractArray{T}, n::Integer) where {T<:Integer}
nv = length(v)
if nv == 0 && n < 0
throw(ArgumentError("number of bins must be ≥ 0 for an empty array, got $n"))
elseif nv > 0 && n < 1
throw(ArgumentError("number of bins must be ≥ 1 for a non-empty array, got $n"))
end
if nv == 0
return 0:1:0
end
if n <= 0
throw(ArgumentError("number of bins n = $n must be positive"))
end
lo, hi = extrema(v)
if hi == lo
step = 1
else
bw = (hi - lo) / n
e = 10^max(0,floor(Int,log10(bw)))
r = bw / e
if r <= 1
step = e
elseif r <= 2
step = 2*e
elseif r <= 5
step = 5*e
else
step = 10*e
end
end
start = step*(ceil(lo/step)-1)
nm1 = ceil(Int,(hi - start)/step)
return start:step:(start + nm1*step)
end
# Sturges' formula
function sturges(n::Integer)
n==0 && return one(n)
return ceil(Int, log2(n)) + 1
end
function hist!(h::AbstractArray{HT}, v::AbstractVector, edg::AbstractVector) where HT
n = length(edg) - 1
length(h) == n || throw(DimensionMismatch("length(histogram) must equal length(edges) - 1"))
fill!(h, zero(HT))
for x in v
i = searchsortedfirst(edg, x)-1
if 1 <= i <= n
h[i] += 1
end
end
return edg, h
end
hist(v::AbstractVector, edg::AbstractVector) = hist!(Array{Int}(undef, length(edg)-1), v, edg)
hist(v::AbstractVector, n::Integer) = hist(v,histrange(v,n))
hist(v::AbstractVector) = hist(v,sturges(length(v)))
end
| [
27,
7856,
261,
480,
29,
1416,
1252,
494,
1000,
87,
4066,
14,
4561,
74,
13,
20362,
27,
34345,
29,
13749,
21857,
14,
10677,
14,
13749,
21857,
13,
20362,
198,
21412,
5590,
21857,
198,
198,
2,
2829,
1554,
21857,
5499,
18984,
422,
22300,
... | 2.079239 | 1,262 |
using Catlab.CategoricalAlgebra, Catlab.Graphs, Catlab.Present, Catlab.Graphics, Catlab.Theories, Catlab.Present
using Catlab.CategoricalAlgebra.FinCats: FinCatGraphEq
@present ThSIR(FreeSchema) begin
(S,I,R)::Ob
end
"""This code will work for any choice of 'type graph'"""
function update_state(state::StructACSet{S}) where S
rules = []
while !isempty(rules)
state = apply_rule(state, pop!(rules))
end
end
@acset_type SIR(ThSIR)
# infect = Rule("name", L, R)
I = @acset SIR begin I=1 end
I2 = @acset SIR begin I=2 end
SI = @acset SIR begin S=1; I=1 end
# L_infect = homomorphism(I, SI) # equivalent to below b/c only one morphism
L_infect = ACSetTransformation(I, SI; I=[1])
R_infect = ACSetTransformation(I, I2; I=[1])
ex_state = @acset SIR begin S=10; I=3; R=1 end
# rewrite(L_infect, R_infect, ex_state) # execute immediately
match = homomorphism(SI, ex_state)
matches = [match_1, match_2] # pointing to ex_state
"""
L I R
|
G <- K -> H (updated G)
kg kh
"""
_, kg, _, kh = rewrite_match_maps(L_infect, R_infect, match)
# pretend state_2 is a homomorphism from state_1 -> state_2
matches = [match for match in matches]
state_3 = rewrite_match(L_infect, R_infect, match_2)
# # type 2 --------------------------------------------------
# @present ThSIR2(FreeSchema) begin
# State::Ob
# Agents::Ob
# state::Hom(Agents, State)
# end
# @acset_type SIR2(ThSIR2)
# state0 = vcat(fill.(1:3, [5,4,1])...)
# SIR2_state = @acset SIR2 begin State=3; Agents=10; state=state0 end
# I = @acset SIR2 begin Agents=1; State=3; state=2 end
# I2 = @acset SIR2 begin Agents=2; State=3; state=[2,2] end
# SI = @acset SIR2 begin Agents=2; State=3; state=[1,2] end
# # L_infect = homomorphism(I, SI) # equivalent to below b/c only one morphism
# homomorphism(I, SI)
# homomorphism(I, I2)
# L_infect = ACSetTransformation(I, SI; Agents=[1])
# R_infect = ACSetTransformation(I, I2) | [
3500,
5181,
23912,
13,
34,
2397,
12409,
2348,
29230,
11,
5181,
23912,
13,
37065,
82,
11,
5181,
23912,
13,
34695,
11,
5181,
23912,
13,
18172,
11,
5181,
23912,
13,
464,
1749,
11,
5181,
23912,
13,
34695,
198,
3500,
5181,
23912,
13,
34,
... | 2.456494 | 793 |
import Base.Cartesian.@ntuple
nparticles(p) = length(p)
nparticles(p::Type{<:AbstractParticles{T,N}}) where {T,N} = N
nparticles(p::AbstractParticles{T,N}) where {T,N} = N
nparticles(p::ParticleArray) = nparticles(eltype(p))
nparticles(p::Type{<:ParticleArray}) = nparticles(eltype(p))
particletype(p::AbstractParticles) = typeof(p)
particletype(::Type{P}) where P <: AbstractParticles = P
particletype(p::AbstractArray{<:AbstractParticles}) = eltype(p)
particleeltype(::AbstractParticles{T,N}) where {T,N} = T
particleeltype(::AbstractArray{<:AbstractParticles{T,N}}) where {T,N} = T
vecindex(p,i) = getindex(p,i)
vecindex(p::AbstractParticles,i) = getindex(p.particles,i)
vecindex(p::ParticleArray,i) = vecindex.(p,i)
vecindex(p::NamedTuple,i) = (; Pair.(keys(p), ntuple(j->arggetter(i,p[j]), fieldcount(typeof(p))))...)
function indexof_particles(args)
inds = findall(a-> a <: SomeKindOfParticles, args)
inds === nothing && throw(ArgumentError("At least one argument should be <: AbstractParticles. If particles appear nested as fields inside an argument, see `with_workspace` and `Workspace`"))
all(nparticles(a) == nparticles(args[inds[1]]) for a in args[inds]) || throw(ArgumentError("All p::Particles must have the same number of particles."))
(inds...,)
# TODO: test all same number of particles
end
function arggetter(i,a::Union{SomeKindOfParticles, NamedTuple})
vecindex(a,i)
end
arggetter(i,a) = a
"""
@bymap f(p, args...)
Call `f` with particles or vectors of particles by using `map`. This can be utilized if registering `f` using [`register_primitive`](@ref) fails. See also [`Workspace`](@ref) if `bymap` fails.
"""
macro bymap(ex)
@capture(ex, f_(args__)) || error("expected a function call")
quote
bymap($(esc(f)),$(esc.(args)...))
end
end
"""
bymap(f, args...)
Uncertainty propagation using the `map` function.
Call `f` with particles or vectors of particles by using `map`. This can be utilized if registering `f` using [`register_primitive`](@ref) fails. See also [`Workspace`](@ref) if `bymap` fails.
"""
function bymap(f::F, args...) where F
inds = indexof_particles(typeof.(args))
T,N,PT = particletypetuple(args[first(inds)])
individuals = map(1:N) do i
argsi = ntuple(j->arggetter(i,args[j]), length(args))
f(argsi...)
end
PTNT = PT{eltype(eltype(individuals)),N}
if (eltype(individuals) <: AbstractArray{TT,0} where TT) || eltype(individuals) <: Number
PTNT(individuals)
elseif eltype(individuals) <: AbstractArray{TT,1} where TT
PTNT(copy(reduce(hcat,individuals)'))
elseif eltype(individuals) <: AbstractArray{TT,2} where TT
# @show PT{eltype(individuals),N}
reshape(PTNT(copy(reduce(hcat,vec.(individuals))')), size(individuals[1],1),size(individuals[1],2))::Matrix{PTNT}
else
error("Output with dimension >2 is currently not supported by `bymap`. Consider if `ℝⁿ2ℝⁿ_function($(f), $(args...))` works for your use case.")
end
end
"""
Distributed uncertainty propagation using the `pmap` function. See [`bymap`](@ref) for more details.
"""
function bypmap(f::F, args...) where F
inds = indexof_particles(typeof.(args))
T,N,PT = particletypetuple(args[first(inds)])
individuals = map(1:N) do i
argsi = ntuple(j->arggetter(i,args[j]), length(args))
f(argsi...)
end
PTNT = PT{eltype(eltype(individuals)),N}
if (eltype(individuals) <: AbstractArray{TT,0} where TT) || eltype(individuals) <: Number
PTNT(individuals)
elseif eltype(individuals) <: AbstractArray{TT,1} where TT
PTNT(copy(reduce(hcat,individuals)'))
elseif eltype(individuals) <: AbstractArray{TT,2} where TT
# @show PT{eltype(individuals),N}
reshape(PTNT(copy(reduce(hcat,vec.(individuals))')), size(individuals[1],1),size(individuals[1],2))::Matrix{PTNT}
else
error("Output with dimension >2 is currently not supported by `bymap`. Consider if `ℝⁿ2ℝⁿ_function($(f), $(args...))` works for your use case.")
end
end
"""
@bypmap f(p, args...)
Call `f` with particles or vectors of particles by using parallel `pmap`. This can be utilized if registering `f` using [`register_primitive`](@ref) fails. See also [`Workspace`](@ref) if `bymap` fails.
"""
macro bypmap(ex)
@capture(ex, f_(args__)) || error("expected a function call")
quote
bypmap($(esc(f)),$(esc.(args)...))
end
end
"""
@prob a < b
Calculate the probability that an event on any of the forms `a < b, a > b, a <= b, a >= b` occurs, where `a` and/or `b` are of type `AbstractParticles`.
"""
macro prob(ex)
ex.head == :call && ex.args[1] ∈ (:<,:>,:<=,:>=) || error("Expected an expression on any of the forms `a < b, a > b, a <= b, a >= b`")
op = ex.args[1]
a = ex.args[2]
b = ex.args[3]
quote
mean($op.(MonteCarloMeasurements.maybe_particles($(esc(a))), MonteCarloMeasurements.maybe_particles($(esc(b)))))
end
end
| [
11748,
7308,
13,
43476,
35610,
13,
31,
429,
29291,
198,
198,
77,
3911,
2983,
7,
79,
8,
796,
4129,
7,
79,
8,
198,
77,
3911,
2983,
7,
79,
3712,
6030,
90,
27,
25,
23839,
7841,
2983,
90,
51,
11,
45,
11709,
8,
810,
1391,
51,
11,
... | 2.511335 | 1,985 |
<gh_stars>1-10
using FileIO
using Images
field = [ "s3", "s8", "s14", "s15", "s20","s37", "s40"]
data_dir = "/datahub/rawdata/tandeng/mRNA_imaging/mRNA_confocal_hamamatsu-60X-TIRF/20200316";
output_dir = "$(data_dir)_visualization"
try
mkdir("$output_dir")
catch
end
function max_projection(pos)
println("Processing $pos")
raw_img = load(File(format"TIFF", "$data_dir/HE7-11-1-80uw-PWM_1_$pos.ome.tiff"));
img_size = size(raw_img);
img_max = zeros(N0f16, img_size[1], img_size[2], Int(img_size[3]/20));
for i in 1:Int(img_size[3]/20)
img_max[:, :, i] = maximum(raw_img[:, :, (i-1)*20+1:i*20], dims=3);
end
save("$output_dir/$pos.tiff", img_max);
end
for pos in field
max_projection(pos)
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
198,
3500,
9220,
9399,
198,
3500,
5382,
198,
198,
3245,
796,
685,
366,
82,
18,
1600,
366,
82,
23,
1600,
366,
82,
1415,
1600,
366,
82,
1314,
1600,
366,
82,
1238,
2430,
82,
2718,
1600,
366... | 2.064246 | 358 |
<gh_stars>0
import Flatten: flattenable
@metadata initial_value nothing
import FieldMetadata: @units, units
import FieldMetadata: @limits, limits
import FieldMetadata: @prior, prior
import FieldMetadata: @description, description
@metadata bounds nothing
import FieldMetadata: @logscaled, logscaled
@metadata reference nothing
"""
AbstractParameters{T} <: AbstractVector{T}
An abstract type for AIBECS model parameters.
Parameters in AIBECS use the following convenience packages:
- Parameters
- FieldMetadata
- FieldDefaults
- Flatten
- Unitful
- DataFrames
- Distributions
These aim to allow for some nice features, which include
- nice syntax for unpacking parameters in functions via the `@unpack` macro (fron UnPack.jl)
- additional metadata on parameters
- easy conversion to and from vectors
- use of units and automatic conversions if necessary
- pretty table-format displays
- loading and saving to and from CSV files
- prior estimates for bayesian inference and optimization
See the list of examples to get an idea of how to generate parameters for your model.
# Examples
Generate a simple parameter type via
```jldoctest
julia> struct SimpleParams{T} <: AbstractParameters{T}
α::T
β::T
γ::T
end
SimpleParams
```
To create an instance of the `SimpleParams(Float64)` type, you can do
```jldoctest
julia> p = SimpleParams(1.0, 2.0, 3.0)
SimpleParams{Float64}
│ Row │ Symbol │ Value │
│ │ Symbol │ Float64 │
├─────┼────────┼─────────┤
│ 1 │ α │ 1.0 │
│ 2 │ β │ 2.0 │
│ 3 │ γ │ 3.0 │
```
One of the core features from Parameters is unpacking in functions, e.g.,
```jldoctest
julia> function simplef(p)
@unpack α, γ = p
return α + γ
end
simplef (generic function with 1 method)
julia> simplef(p) # 1.0 + 3.0
4.0
```
More complex examples are permitted by adding metadata (thanks to FieldMetadata.jl).
You can add units
```jldoctest
julia> @units struct UnitParams{T} <: AbstractParameters{T}
α::T | u"km"
β::T | u"hr"
γ::T | u"m/s"
end ;
julia> p = UnitParams(1.0, 2.0, 3.0)
UnitParams{Float64}
│ Row │ Symbol │ Value │ Unit │
│ │ Symbol │ Float64 │ Unitful… │
├─────┼────────┼─────────┼──────────┤
│ 1 │ α │ 1.0 │ km │
│ 2 │ β │ 2.0 │ hr │
│ 3 │ γ │ 3.0 │ m s^-1 │
```
Note that when adding units to your parameters, they will be converted to SI
when unpacked, as in, e.g.,
```jldoctest
julia> function speed(p)
@unpack α, β, γ = p
return α / β + γ
end
speed (generic function with 1 method)
julia> speed(p) # (1.0 km / 2.0 hr + 3 m/s) in m/s
3.138888888888889
```
Another example for optimizable/flattenable parameters
```jldoctest
julia> @initial_value @units @flattenable struct OptParams{T} <: AbstractParameters{T}
α::T | 3.6 | u"km" | true
β::T | 1.0 | u"hr" | false
γ::T | 1.0 | u"m/s" | true
end ;
julia> p = OptParams(initial_value(OptParams)...)
OptParams{Float64}
│ Row │ Symbol │ Value │ Initial value │ Unit │ Optimizable │
│ │ Symbol │ Float64 │ Float64 │ Unitful… │ Bool │
├─────┼────────┼─────────┼───────────────┼──────────┼─────────────┤
│ 1 │ α │ 3.6 │ 3.6 │ km │ 1 │
│ 2 │ β │ 1.0 │ 1.0 │ hr │ 0 │
│ 3 │ γ │ 1.0 │ 1.0 │ m s^-1 │ 1 │
```
Thanks to the FieldMetaData interface, you can chain the following preloaded metadata:
- initial_value
- units (from Unitful.jl)
- prior (from Distributions.jl)
- description (`String`)
- bounds (2-element `Tuple`)
- logscaled (`Bool`)
- flattenable (to convert to vectors of optimizable parameters only)
- reference (`String`)
Here is an example of parameter with all the possible metadata available in AIBECS:
```jldoctest
julia> @initial_value @units @prior @description @bounds @logscaled @flattenable @reference struct FullParams{T} <: AbstractParameters{T}
α::T | 1.0 | u"km" | Normal(0,1) | "The distance" | (-Inf, Inf) | false | false | "Jean et al., 2042"
β::T | 2.0 | u"hr" | LogNormal(0,1) | "The time" | ( 0, Inf) | true | true | "Claude et al. 1983"
γ::T | 3.0 | u"mol" | Normal(1,2) | "The # of moles" | ( -1, 1) | false | true | "Dusse et al. 2000"
end ;
julia> FullParams(4.0, 5.0, 6.0)
FullParams{Float64}
│ Row │ Symbol │ Value │ Initial value │ Unit │ Prior │ Description │ Bounds │ Logscaled │ Optimizable │ Reference │
│ │ Symbol │ Float64 │ Float64 │ Unitful… │ Distribu… │ String │ Tuple… │ Bool │ Bool │ String │
├─────┼────────┼─────────┼───────────────┼──────────┼──────────────────────────────────┼────────────────┼─────────────┼───────────┼─────────────┼────────────────────┤
│ 1 │ α │ 4.0 │ 1.0 │ km │ Normal{Float64}(μ=0.0, σ=1.0) │ The distance │ (-Inf, Inf) │ 0 │ 0 │ Jean et al., 2042 │
│ 2 │ β │ 5.0 │ 2.0 │ hr │ LogNormal{Float64}(μ=0.0, σ=1.0) │ The time │ (0, Inf) │ 1 │ 1 │ Claude et al. 1983 │
│ 3 │ γ │ 6.0 │ 3.0 │ mol │ Normal{Float64}(μ=1.0, σ=2.0) │ The # of moles │ (-1, 1) │ 0 │ 1 │ Dusse et al. 2000 │
```
Note that there is no check that the metadata you give is consistent.
These metadata will hopefully be useful for advanced usage of AIBECS, e.g., using prior information and/or bounds for optimization.
"""
abstract type AbstractParameters{T} <: AbstractVector{T} end
"""
symbols(p)
Returns the symbols in `p`.
Can also be used directly on the type of `p`
(because the symbols of `p::T` are contained in the type `T`).
"""
symbols(::Type{T}) where {T <: AbstractParameters} = fieldnames(T)
symbols(::T) where {T <: AbstractParameters} = symbols(T)
"""
flattenable_symbols(p)
Returns the flattenable symbols in `p`.
The flattenable symbols are those symbols that are kepth when using `p` as a vector,
e.g., when doing `vec(p)`.
(Useful when passing parameters to an optimization routine expeting a vector of optimizable parameters.)
Can also be used directly on the type of `p`
(because the flattenable symbols of `p::T` are contained in the type `T`).
"""
flattenable_symbols(::T) where {T <: AbstractParameters} = flattenable_symbols(T)
flattenable_symbols(::Type{T}) where {T <: AbstractParameters} = fieldnames(T)[collect(flattenable(T))]
"""
values(p::T) where {T <: AbstractParameters}
Returns a vector of **all** the values of `p`.
Note that `values(p)` is different from `vec(p)`.
"""
Base.values(p::T) where {T <: AbstractParameters} = [getfield(p, s) for s in symbols(T)]
"""
flattenable_values(p::T) where {T <: AbstractParameters}
Returns a vector of the **flattenable** values of `p`.
Note that `vec(p)` is different from `values(p)`.
"""
flattenable_values(p::T) where {T <: AbstractParameters} = [getfield(p, s) for s in flattenable_symbols(T)]
"""
vec(p::T) where {T <: AbstractParameters}
Returns a **SI-unit-converted** vector of flattenable values of `p`.
Note that `vec(p) ≠ flattenable_values(p)` if `p` has units.
"""
Base.vec(p::T) where {T <: AbstractParameters} = [UnPack.unpack(p, Val(s)) for s in flattenable_symbols(T)]
"""
length(p::AbstractParameter)
Returns the length of the **flattened/optimzable** vector of `p`.
May be different from the number of parameters.
Can also be used directly on the type of `p`.
"""
Base.length(::T) where {T <: AbstractParameters} = sum(flattenable(T))
Base.length(::Type{T}) where {T <: AbstractParameters} = sum(flattenable(T))
"""
size(p::AbstractParameter)
Returns the size of the **flattened/optimzable** vector of `p`.
May be different from the number of parameters.
Can also be used directly on the type of `p`.
"""
Base.size(::T) where {T <: AbstractParameters} = (length(T),)
Base.size(::Type{T}) where {T <: AbstractParameters} = (length(T),)
function Base.show(io::IO, p::T) where T <: AbstractParameters
print(T)
t = table(p)
show(io, t; summary=false)
end
function Base.show(io::IO, m::MIME"text/plain", p::T) where T <: AbstractParameters
print(T)
t = table(p)
show(io, m, t; summary=false)
end
function table(p::T, nf::NamedTuple) where {T <: AbstractParameters}
t = DataFrame(Symbol = collect(symbols(p)), Value = values(p))
for (n,f) in zip(keys(nf), nf)
setproperty!(t, n, collect(f(p)))
end
return t
end
"""
table(p)
Returns a `DataFrame` (a table) of `p`.
Useful for printing and saving into an actual text/latex table.
"""
function table(p::AbstractParameters)
ks, vs = Symbol[], Function[]
all(isnothing, initial_value(p)) || (push!(ks, Symbol("Initial value")); push!(vs, initial_value))
all(isequal(1), units(p)) || (push!(ks, :Unit); push!(vs, units))
all(isnothing, prior(p)) || (push!(ks, Symbol("Prior")); push!(vs, prior))
all(isempty, description(p)) || (push!(ks, Symbol("Description")); push!(vs, description))
all(isnothing, bounds(p)) || (push!(ks, Symbol("Bounds")); push!(vs, bounds))
any(logscaled(p)) && (push!(ks, Symbol("Logscaled")); push!(vs, logscaled))
all(flattenable(p)) || (push!(ks, Symbol("Optimizable")); push!(vs, flattenable))
all(isnothing, reference(p)) || (push!(ks, Symbol("Reference")); push!(vs, reference))
nf = (; zip(ks, vs)...)
t = table(p, nf)
end
"""
latex(p)
Returns a LaTeX-formatted table of the parameters.
"""
latex(p::AbstractParameters) = show(stdout, MIME("text/latex"), table(p))
"""
unpack(p <: AbstractParameters, s)
Unpacks the parameter `s` from `p`.
Note this is specialized and will convert the parameter value to SI units.
"""
@inline UnPack.unpack(p::T, ::Val{f}) where {T<:AbstractParameters,f} = ustrip(upreferred(getproperty(p, f) * units(T, f)))
"""
+(p::T, v::Vector) where {T <: AbstractParameters}
Adds the flattened vector `v` to `p`.
**Warning:** This method for `+` is implemented only for differentiation
using dual and hyperdual numbers.
If you want to change the values of `p`, you should do so explicitly
rather than use this `+` method.
"""
Base.:+(p::T, v::Vector) where {T <: AbstractParameters} = reconstruct(T, vec(p) + v)
"""
reconstruct(T, v)
Reconstructs the parameter of type `T` from flattenable vector `v`.
"""
function reconstruct(::Type{T}, v::Tv) where {T <: AbstractParameters, Tv <: Vector}
all(isnothing, initial_value(T)) && error("Can't reconstruct without initial values")
vunits = units(T)[[flattenable(T)...]]
v = @. v * upreferred(vunits) |> vunits |> ustrip
reconstructed_v = convert(Tv, collect(initial_value(T)))
reconstructed_v[collect(flattenable(T))] .= v
return T.name.wrapper(reconstructed_v...)
end
"""
getindex(p::T, i) where {T <: AbstractParameters}
Returns the i-th element of vec(p).
This is not efficient and only used for testing the derivatives with ForwardDiff.
"""
Base.getindex(p::T, i) where {T <: AbstractParameters} = getindex(vec(p), i)
function (::Type{T})(args::Quantity...) where {T <: AbstractParameters}
all(isequal(1), units(T)) && error("$T needs `units` for this construction to work")
return T([ustrip(x |> units(T, f)) for (x,f) in zip(args, fieldnames(T))]...)
end
function (::Type{T})(;kwargs...) where {T <: AbstractParameters}
all(isnothing, initial_value(T)) && length(kwargs) ≠ length(symbols(T)) && error("$T needs `initial_value` if one of the parameters is not supplied")
value(f::Symbol, v::Quantity) = ustrip(v |> units(T, f))
value(f::Symbol, v) = v
return T([f ∈ keys(kwargs) ? value(f, kwargs[f]) : initial_value(T, f) for f in fieldnames(T)]...)
end
#===============================
Writing to savable formats
===============================#
function Base.Dict(p::T, s=symbols(p)) where {T<:AbstractParameters}
v = [getfield(p,s) for s in s]
u = [units(p,s) for s in s]
return Dict([(s,v*u) for (s,v,u) in zip(s,v,u)])
end
function Base.NamedTuple(p::T, s=symbols(p)) where {T<:AbstractParameters}
v = [getfield(p,s) for s in s]
u = [units(p,s) for s in s]
return (; zip(s, v .* u)...)
end
#=====================
mismatch of parameters
=====================#
"""
mismatch(p::AbstractParameters)
Returns the sum of the negative log-likelihood of each flattenable parameter.
"""
function mismatch(p::AbstractParameters)
return sum(-logpdf(prior(p,k), UnPack.unpack(p,Val(k))) for k in flattenable_symbols(p))
end
function ∇mismatch(p::AbstractParameters)
return transpose([-gradlogpdf(prior(p,k), UnPack.unpack(p,Val(k))) for k in flattenable_symbols(p)])
end
# The functions below is just for ForwardDiff to work with vectors instead of p
# which requires `mismatch` to know about the priors, which are containted in `T`
function generate_objective(ωs, μx, σ²x, v, ωp, ::Type{T}) where {T<:AbstractParameters}
nt, nb = length(ωs), length(v)
tracers(x) = state_to_tracers(x, nb, nt)
f(x, p) = ωp * mismatch(T, p) +
sum([ωⱼ * mismatch(xⱼ, μⱼ, σⱼ², v) for (ωⱼ, xⱼ, μⱼ, σⱼ²) in zip(ωs, tracers(x), μx, σ²x)])
return f
end
function mismatch(::Type{T}, v) where {T<:AbstractParameters}
return sum(-logpdf(prior(T,k), v[i]) for (i,k) in enumerate(flattenable_symbols(T)))
end
#==================
Change of variables
==================#
"""
subfun
Returns the substitution function for the change of variables of parameters.
If the prior of parameter `pᵢ` is `LogNormal`, then the substitution function is `exp`.
If the prior is `Uniform`, then the change of variables is the logit function.
Otherwise, it's `identity`.
"""
function subfun(::Type{T}) where {T<:AbstractParameters}
return λ -> reconstruct(T, [subfun(T, s)(λᵢ) for (λᵢ,s) in zip(λ, flattenable_symbols(T))])
end
function ∇subfun(::Type{T}) where {T<:AbstractParameters}
return λ -> reconstruct(T, [∇subfun(T, s)(λᵢ) for (λᵢ,s) in zip(λ, flattenable_symbols(T))])
end
function ∇²subfun(::Type{T}) where {T<:AbstractParameters}
return λ -> reconstruct(T, [∇²subfun(T, s)(λᵢ) for (λᵢ,s) in zip(λ, flattenable_symbols(T))])
end
function invsubfun(::Type{T}) where {T<:AbstractParameters}
return p -> [invsubfun(T, s)(pᵢ) for (pᵢ,s) in zip(vec(p), flattenable_symbols(T))]
end
# substitution function (change of variables) is determined from prior distribution
subfun(::Type{T}, s::Symbol) where {T<:AbstractParameters} = subfun(prior(T,s))
∇subfun(::Type{T}, s::Symbol) where {T<:AbstractParameters} = ∇subfun(prior(T,s))
∇²subfun(::Type{T}, s::Symbol) where {T<:AbstractParameters} = ∇²subfun(prior(T,s))
invsubfun(::Type{T}, s::Symbol) where {T<:AbstractParameters} = invsubfun(prior(T,s))
# Fallback rule for change of variables is identity
subfun(::Distribution) = identity
∇subfun(::Distribution) = x -> one(x)
∇²subfun(::Distribution) = x -> zero(x)
invsubfun(::Distribution) = identity
# p = exp(λ) for LogNormal
subfun(::LogNormal) = exp
∇subfun(::LogNormal) = exp
∇²subfun(::LogNormal) = exp
invsubfun(::LogNormal) = log
# p = logistic(λ) for Uniform
subfun(d::Uniform) = λ -> d.a + (d.b - d.a) / (exp(-λ) + 1)
∇subfun(d::Uniform) = λ -> (d.b - d.a) * exp(-λ) / (exp(-λ) + 1)^2
∇²subfun(d::Uniform) = λ -> (d.a - d.b) * exp(-λ) / (exp(-λ) + 1)^2 + 2(d.b - d.a) * exp(-2λ) / (exp(-λ) + 1)^3
invsubfun(d::Uniform) = p -> -log((d.b - d.a) / (p - d.a) - 1)
export subfun, ∇subfun, ∇²subfun, invsubfun
export AbstractParameters, latex
| [
27,
456,
62,
30783,
29,
15,
198,
11748,
1610,
41769,
25,
27172,
21633,
198,
31,
38993,
4238,
62,
8367,
2147,
198,
11748,
7663,
9171,
14706,
25,
2488,
41667,
11,
4991,
198,
11748,
7663,
9171,
14706,
25,
2488,
49196,
11,
7095,
198,
1174... | 2.481211 | 6,307 |
using StatsBase, Plots; pyplot()
names = ["Mary","Mel","David","John","Kayley","Anderson"]
randomName() = rand(names)
X = 3:8
N = 10^6
sampleLengths = [length(randomName()) for _ in 1:N]
bar(X,counts(sampleLengths)/N, ylims=(0,0.35),
xlabel="Name length", ylabel="Estimated p(x)", legend=:none) | [
3500,
20595,
14881,
11,
1345,
1747,
26,
12972,
29487,
3419,
198,
198,
14933,
796,
14631,
24119,
2430,
21102,
2430,
11006,
2430,
7554,
2430,
37247,
1636,
2430,
42991,
8973,
198,
25120,
5376,
3419,
796,
43720,
7,
14933,
8,
198,
55,
796,
5... | 2.586207 | 116 |
<filename>test/testsuite/good/intarith4.jl
int main () {
printInt(fact(7)) ;
printInt(factr(7)) ;
return 0 ;
}
// iterative factorial
int fact (int n) {
int i,r ;
i = 1 ;
r = 1 ;
while (i <= n) {
r = r * i ;
i++ ;
}
return r ;
}
// recursive factorial
int factr (int n) {
if (n < 2)
return 1 ;
else
return n * factr(n-1) ;
}
| [
27,
34345,
29,
9288,
14,
9288,
2385,
578,
14,
11274,
14,
600,
283,
342,
19,
13,
20362,
198,
600,
1388,
7499,
1391,
198,
4798,
5317,
7,
22584,
7,
22,
4008,
2162,
198,
4798,
5317,
7,
22584,
81,
7,
22,
4008,
2162,
198,
7783,
657,
2... | 2.226027 | 146 |
<reponame>dpsanders/IntervalUnionArithmetic.jl<gh_stars>0
"""
Interval unions sets of defined by unions of disjoint intervals.
This file includes constructors, arithmetic (including intervals and scalars)
and complement functions
Empty sets and intersecting intervals are appropriately handled in the constructor:
julia> a = interval(0,2) ∪ interval(3,4)
[0, 2] ∪ [3, 4]
julia> b = interval(1,2) ∪ interval(4,5) ∪ ∅
[1, 2] ∪ [4, 5]
julia> c = a * b
[0, 10] ∪ [12, 20]
julia> complement(c)
[-∞, 0] ∪ [10, 12] ∪ [20, ∞]
"""
###
# IntervalUnion constructor. Consists of a vector of intervals
###
struct IntervalU{T<:Real} <: IntervalUnion{T}
v :: Array{Interval{T}}
end
###
# Outer constructors
###
function intervalU(x)
x = IntervalU(x)
sort!(x.v)
x = remove_empties(x)
x = condense(x)
closeGaps!(x)
return x
end
intervalU(num :: Real) = IntervalU([interval(num)])
intervalU(lo :: Real, hi :: Real) = IntervalU([interval(lo,hi)])
IntervalU(lo :: Real, hi :: Real) = IntervalU([interval(lo,hi)])
intervalU(x :: Interval) = IntervalU([x])
∪(x :: Interval) = intervalU(x)
∪(x :: Interval, y :: Interval) = intervalU([x; y])
∪(x :: Array{Interval{T}}) where T <:Real = intervalU(x)
intervalU(x :: Interval, y :: IntervalU) = intervalU([x; y.v])
∪(x :: Interval, y :: IntervalU) = intervalU(x,y)
intervalU(x :: IntervalU, y :: Interval) = intervalU([x.v; y])
∪(x :: IntervalU, y :: Interval) = intervalU(x,y)
intervalM(x :: IntervalU, y :: IntervalU) = intervalU([x.v; y.v])
∪(x :: IntervalU, y :: IntervalU) = intervalU(x,y)
# MultiInterval can act like a vector
getindex(x :: IntervalU, ind :: Integer) = getindex(x.v,ind)
getindex(x :: IntervalU, ind :: Array{ <: Integer}) = getindex(x.v,ind)
# Remove ∅ from IntervalUnion
function remove_empties(x :: IntervalU)
v = x.v
Vnew = v[v .!= ∅]
return IntervalU(Vnew)
end
function closeGaps!(x :: IntervalU, maxInts = MAXINTS[1])
while length(x.v) > maxInts # Global
# Complement code
v = sort(x.v)
vLo = left.(v)
vHi = right.(v)
vLo[1] == -∞ ? popfirst!(vLo) : pushfirst!(vHi, -∞)
vHi[end] == ∞ ? pop!(vHi) : push!(vLo, ∞)
xc = interval.(vHi,vLo)
# Close smallest width of complement
widths = diam.(xc)
d, i = findmin(widths)
merge = hull(x[i-1], x[i])
popat!(x.v, i)
popat!(x.v, i-1)
push!(x.v, merge)
sort!(x.v)
end
return x
end
# Recursively envolpe intervals which intersect.
function condense(x :: IntervalU)
if iscondensed(x); return x; end
v = sort(x.v)
v = unique(v)
Vnew = Interval{Float64}[]
for i =1:length(v)
intersects = intersect.(v[i],v )
these = findall( intersects .!= ∅)
push!(Vnew, hull(v[these]))
end
return condense( intervalU(Vnew) )
end
function iscondensed(x :: IntervalU)
v = sort(x.v)
for i=1:length(v)
intersects = findall( intersect.(v[i],v[1:end .!= i]) .!= ∅)
if !isempty(intersects); return false; end
end
return true
end
# Recursively envolpe intervals which intersect, except those which touch
function condense_weak(x :: IntervalU)
if iscondensed(x); return x; end
v = sort(x.v)
v = unique(v)
Vnew = Interval{Float64}[]
for i =1:length(v)
intersects = intersect.(v[i],v )
notempty = intersects .!= ∅
isItThin = isthin.(intersects) # Don't hull intervals which touch
notEmptyOrThin = notempty .* (1 .- isItThin)
them = findall(notEmptyOrThin .== 1)
push!(Vnew, hull(v[them]))
end
return condense( intervalU(Vnew) )
end
function iscondensed_weak(x :: IntervalU)
v = sort(x.v)
for i=1:length(v)
intersects = intersect.(v[i],v[1:end .!= i])
notempty = intersects .!= ∅
isItThin = isthin.(intersects)
notEmptyOrThin = notempty .* (1 .- isItThin)
if sum(notEmptyOrThin) != 0; return false; end
end
return true
end
| [
27,
7856,
261,
480,
29,
67,
862,
45070,
14,
9492,
2100,
38176,
3163,
29848,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
37811,
628,
220,
220,
220,
4225,
2100,
11936,
5621,
286,
5447,
416,
11936,
286,
595,
73,
1563,
20016,
13,
198,
... | 2.19795 | 1,854 |
<gh_stars>0
#=
reverser.jl
based on
https://stackoverflow.com/questions/27411401/julia-reverse-n-dimensional-arrays
=#
export reverser
using Test: @test
"""
y = reverser(x, dims)
reverse array along specified dimensions (or all if unspecified)
"""
function reverser(x::AbstractArray, dims::AbstractVector{<:Int})
y = copy(x)
for d in dims
y = reverse(y, dims=d)
end
return y
end
reverser(x::AbstractArray) = reverser(x, 1:ndims(x)) # all dimensions
reverser(x::AbstractArray, d::Int) = reverser(x, [d])
"""
reverser(:test)
self test
"""
function reverser(test::Symbol)
test != :test && throw(ArgumentError("test $test"))
@test reverser(1:3) == 3:-1:1
@test reverser(1:3, 1) == 3:-1:1
@test reverser((1:3)', 1) == (1:3)'
@test reverser((1:3)', 2) == (3:-1:1)'
true
end
| [
27,
456,
62,
30783,
29,
15,
198,
2,
28,
198,
260,
690,
263,
13,
20362,
198,
3106,
319,
198,
5450,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
507,
14,
28857,
16562,
486,
14,
73,
43640,
12,
50188,
12,
77,
12,
19577,
12,
3258,
59... | 2.388235 | 340 |
<gh_stars>0
"""
# Module FortranReader
FortranReader provides basic commands to read the 'unformatted' files written
by Fortran programs.
Although not technically portable, most Fortran programs write these files in
a predictable way, in 'records'. Each record contains either one or several
variables, marked before and after with the length of the variable(s) in bytes.
This marker is written as a 4-byte integer.
function `read_record(::IO, ::DataType, rank::Int, dims)` returns the value in
the next record from the stream and data type given.
"""
module FortranReader
export
read_record,
skip_record,
write_record
# Standard Fortran number sizes
const Real4 = Float32
const Real8 = Float64
const Integer4 = Int32
const Integer8 = Int64
const Complex4 = Complex{Float32}
const Complex8 = Complex{Float64}
const Character = String
const len4 = sizeof(Integer4)
const len8 = sizeof(Integer8)
const permissible_types = (Real4, Real8, Integer4, Integer8, Complex4, Complex8, Character)
"""
read_record(f::IO, T::DataType, dims...) -> v::T
Return the next record in the stream `f`, in the format of `T`. This means,
`read_record` assumes that the type of `v` is `T` (e.g., `Float64`).
`dims...` holds the dimensions; the default (no argument) assumes a scalar record.
"""
function read_record(f::IOStream, T::DataType, dims...)
all(Bool[typeof(dim) <: Integer for dim in dims]) || error("`dims` must be integers")
d, len = read_raw(f)
if T <: String
return String(d)
end
Tlen = sizeof(T)
rank = length(dims)
if rank == 0
len == Tlen || error("Record at $(position(f) - len4 - len) is not correct " *
"length for scalar of type $T")
reinterpret(T, d)[1]
elseif rank == 1
n = len ÷ Tlen
n != dims[1] && warn("Length of record does not match dimension supplied " *
"(Requested $(dims[1]); read size $n)")
reinterpret(T, d)
elseif rank >= 2
len ÷ Tlen == prod(dims) || error("Requested dimensions do not match size "*
"of record. (Requested $dims (size $(prod(dims))); record size $(len÷Tlen))")
reshape(reinterpret(T, d), dims)
else
error("Arrays must have positive rank")
end
end
"""
skip_record(f::IOStream) -> n::Int
Skip over the next record, returning the length of the record.
"""
function skip_record(f::IOStream)
nstart = reinterpret(Integer4, read(f, len4))[1]
pos = position(f)
skip(f, nstart)
nend = reinterpret(Integer4, read(f, len4))[1]
nstart == nend || error("Error skipping over record at byte $pos " *
"in stream $(f.name)")
nstart
end
"""
read_raw(f) -> d::Array{UInt8,1}, n::Int
Return the raw data from stream `f`, checked that the start and end records match,
and the number of bytes `n` read.
"""
function read_raw(f::IOStream)
nstart = reinterpret(Integer4, read(f, len4))[1]
pos = position(f)
d = read(f, nstart)
nend = reinterpret(Integer4, read(f, len4))[1]
nstart == nend || error("Error reading raw data at position $pos from stream "
* "$(f.name): record length markers do not match ($nstart != $nend)")
d, nstart
end
"""
write_record(f::IOStream, T::DataType, x...) -> n::Int
Write a record to the stream `f` containing `x`, which will be converted to have
element type `T`. `T` must therefore correspond to one of the Fortran types
`Float{32,64}`, `Int{32,64}`, `Complex{64,128}` or `String`. You can also use the
equivalent Fortran names which are exported by the module: `Integer{4,8}`, `Real{4,8}`,
`Complex{4,8}` and `Character`.
Return the **total** number of bytes written to the stream `f`, including the start-
and end-markers of the record.
"""
function write_record(f::IOStream, T::DataType, xs...)
T in permissible_types || error("Type of record $T is not supported. " *
"Choose one of $permissible_types")
bytes_written = 0
for x in xs
n = T == Character ? Integer4(length(x)) : Integer4(sizeof(eltype(T))*length(x))
write(f, n)
if T == Character
write(f, x)
elseif ndims(x) > 0
write(f, convert(Array{T}, x))
else
write(f, convert(T, x))
end
write(f, n)
bytes_written += 2*len4 + n
end
bytes_written
end
end # module
| [
27,
456,
62,
30783,
29,
15,
198,
37811,
198,
2,
19937,
6401,
2596,
33634,
198,
198,
21926,
2596,
33634,
3769,
4096,
9729,
284,
1100,
262,
705,
403,
687,
16898,
6,
3696,
3194,
198,
1525,
6401,
2596,
4056,
13,
198,
198,
7003,
407,
144... | 2.636199 | 1,652 |
<reponame>ali-ramadhan/Atmosfoolery.jl<gh_stars>1-10
using Test
using Logging
using Statistics
using Printf
using JLD2
using CUDA
using Oceananigans
using Oceananigans.Architectures
using JULES
Logging.global_logger(OceananigansLogger())
Archs = [CPU]
@hascuda Archs = [GPU]
CUDA.allowscalar(true)
@testset "JULES" begin
include("test_models.jl")
include("test_lazy_fields.jl")
include("test_time_stepping.jl")
include("test_regression.jl")
end
| [
27,
7856,
261,
480,
29,
7344,
12,
859,
324,
7637,
14,
2953,
16785,
69,
970,
1924,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
3500,
6208,
198,
3500,
5972,
2667,
198,
3500,
14370,
198,
3500,
12578,
69,
198,
3500,
449,
1116... | 2.497382 | 191 |
<gh_stars>0
using SearchLight, SearchLight.Migrations, SearchLight.Relationships
cd(@__DIR__)
connection_file = joinpath(@__DIR__,"mysql_connection.yml")
conn_info_postgres = SearchLight.Configuration.load(connection_file)
const conn = SearchLight.connect(conn_info_postgres)
try
SearchLight.Migrations.status()
catch _
SearchLight.Migrations.create_migrations_table()
end
isempty(SearchLight.Migrations.downed_migrations()) || SearchLight.Migrations.all_up!!()
Base.@kwdef mutable struct User <: AbstractModel
id::DbId = DbId()
username::String = ""
password::String = ""
name::String = ""
email::String = ""
end
Base.@kwdef mutable struct Role <: AbstractModel
id::DbId = DbId()
name::String = ""
end
Base.@kwdef mutable struct Ability <: AbstractModel
id::DbId = DbId()
name::String = ""
end
u1 = findone_or_create(User, username = "a") |> save!
r1 = findone_or_create(Role, name = "abcd") |> save!
for x in 'a':'d'
findone_or_create(Ability, name = "$x") |> save!
end
Relationships.Relationship!(u1, r1)
for a in all(Ability)
Relationships.Relationship!(r1, a)
end
Relationships.related(u1, Role)
Relationships.related(findone(Role, id = 1), Ability)
Relationships.related(u1, Ability, through = [Role])
| [
27,
456,
62,
30783,
29,
15,
198,
3500,
11140,
15047,
11,
11140,
15047,
13,
44,
3692,
602,
11,
11140,
15047,
13,
47117,
5748,
198,
198,
10210,
7,
31,
834,
34720,
834,
8,
198,
198,
38659,
62,
7753,
796,
4654,
6978,
7,
31,
834,
34720... | 2.78125 | 448 |
<reponame>bovine3dom/JustJoshing.jl<gh_stars>0
#!/usr/bin/env julia
# Best run in the REPL until I work out how to get unicodeplots to print to stdout when in an `include`
# `env JULIA_NUM_THREADS=(nproc) julia --project`
using JustJoshing
import Plots
Plots.unicodeplots()
# This should look like Figure 5.3, page 121 in Joshi (it does)
# NB: looks like Joshi's graph is mislabelled - should be spot price
p = Plots.plot(); for t in 0:0.2499:1; Plots.plot!(p,x->C(x,t;K=100,σ=0.3,r=0),60:140); end; p
Plots.plot(x->C(1,x),0:0.001:0.9999) # Value of at-the-money approaches zero as time approaches maturity (Fig 2.1, page 35)
Plots.plot(x->C(0.5,0;σ=x),0:0.01:1) # Call options with volatile underlyings are more expensive (Fig 3.8, page 66)
# Simulated stock price
Plots.plot(x->B(x,σ=0.02),0:0.01:1)
# Monte-Carlo validation of contracts from payoffs
# max(S-K,0): call option
let S_t=100, t=0.01, K=100, r=0.02, σ=0.05, T=1, trials=100_000_000
@show bs = C(S_t,t;T=T,K=K,r=r,σ=σ)
@show mc = mc_pricer((S,K,t,T,r)->max(S-K,0),S_t;t=t,T=T,K=S_t,r=r,σ=σ) |> first
@assert isapprox(mc, bs, rtol=1e-3)
end
# S-K: forward contract
let S_t=100, t=0.01, K=100, r=0.02, σ=0.05, T=1, trials=100_000_000
@show exact = S_t - K*exp(-r*(T-t))
@show mc = mc_pricer((S,K,t,T,r)->S-K,S_t;t=t,T=T,K=K,r=r,σ=σ) |> first
@assert isapprox(mc, exact, rtol=1e-3)
end
# Int(S>E): binary option
let S_t=100, t=0.01, E=100, r=0.02, σ=0.05, T=1, trials=100_000_000
@show exact = binary(S_t,t;E=E,r=r,σ=σ,T=T)
@show mc = mc_pricer((S,E,t,T,r)->Int(S>E),S_t;t=t,T=T,K=E,r=r,σ=σ) |> first
@assert isapprox(mc, exact, rtol=1e-3)
end
# Asian option, fixed strike
# TODO: compare prices with European options, look at impact volatility has
s = 80:1:120; t = [0; round.(-1*10 .^(-2:0.2:-1);sigdigits = 2)]
a = (x->mc_pricer_pathdep((S,K,t,T,r)->max(mean(S)-100,0),x[1],trials=100_000,r=0,t=x[2]:0.1:0,T=0,σ=0.5)).(Iterators.product(s,t))
plot(s,first.(a),labels=string.(t'))
| [
27,
7856,
261,
480,
29,
65,
709,
500,
18,
3438,
14,
5703,
41,
418,
722,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
2,
48443,
14629,
14,
8800,
14,
24330,
474,
43640,
198,
198,
2,
6705,
1057,
287,
262,
45285,
1566,
314,
670,
503,... | 1.993021 | 1,003 |
__precompile__()
module PAPA
export papa_reconstruction, papa_reconstruction_debug
using LinearAlgebra, LeastSquaresOptim
greet() = print("Welcome to PAPA world!")
include("Process_Solver.jl")
include("SupportFunctions.jl")
"""
papa_reconstruction(N, Npairs, pair_order, initial_chilist, sigmaVec[, CP_penalty, f_tol, iter])
Function for PAPA bootstrapping from a list of χ matrices to a larger χ matrix.
# Arguments
- `N::Integer`: the number of qubits
- `Npairs::Integer`: the number of qubit pairs
# Examples
```jldoctest
julia>
```
"""
function papa_reconstruction(N::Int,Npairs::Int,pair_order::Array{Int,2},initial_chilist::Vector{Float64},sigmaVec::Vector{Float64};CP_penalty = 1.0,x_tol=1.0e-7,f_tol=1.0e-7,iter=1000000,alg=LevenbergMarquardt(),flag=true)
setup_tuple = setup_problem(N,Npairs,pair_order)
Nel_tot = convert(Int,2*Npairs*16*17/2 + 16*2*Npairs + Npairs)
function min_fun!(F::Vector{Float64},x::Vector{Float64})
process_solver!(F,x,sigmaVec,setup_tuple,CP_penalty)
end
# function min_fun(x)
# return process_solver(x,sigmaVec,setup_tuple,CP_penalty)
# end
# result = nlsolve(min_fun!,initial_chilist;xtol=x_tol,ftol=f_tol,iterations=iter)
# chi_list_final = result.zero
if flag
print("Optimization Beginning\n")
end
lsa_papa = LeastSquaresProblem(x = initial_chilist,f! = min_fun!,output_length = Nel_tot)
result = optimize!(lsa_papa,alg;x_tol=x_tol,f_tol=f_tol,iterations=iter)
chilist = result.minimizer
# optimize(min_fun,initial_chilist)
dim2q = 16
chi_PAPA = zeros(ComplexF64,Npairs,dim2q,dim2q)
chitemp = zeros(ComplexF64,16,16)
Nel = 136
for nn = 1:1:Npairs
fill!(chitemp,0.)
start = 1 + (nn-1)*Nel
stop = nn*Nel;
chitemp[triu(trues(dim2q,dim2q))] = chilist[start:1:stop] + 1im*chilist[(start+Npairs*Nel):1:(stop+Npairs*Nel)]
chitemp[:,:] = chitemp+chitemp'
chitemp[:,:] = chitemp - diagm(0 => diag(chitemp))/2
chi_PAPA[nn,:,:] = chitemp[:,:]
end
return (chi_PAPA, result.ssr)
end
function papa_reconstruction_debug(N::Int,Npairs::Int,pair_order::Array{Int,2},initial_chilist::Vector{Float64},sigmaVec::Vector{Float64};CP_penalty = 1.0,x_tol=1.0e-7,f_tol=1.0e-7,iter=1000000,alg=LevenbergMarquardt(),flag=true)
setup_tuple = setup_problem(N,Npairs,pair_order)
Nel_tot = convert(Int,2*Npairs*16*17/2 + 16*2*Npairs + Npairs)
function min_fun!(F::Vector{Float64},x::Vector{Float64})
process_solver!(F,x,sigmaVec,setup_tuple,CP_penalty)
end
# function min_fun(x)
# return process_solver(x,sigmaVec,setup_tuple,CP_penalty)
# end
# result = nlsolve(min_fun!,initial_chilist;xtol=x_tol,ftol=f_tol,iterations=iter)
# chi_list_final = result.zero
if flag
print("Optimization Beginning\n")
end
lsa_papa = LeastSquaresProblem(x = initial_chilist,f! = min_fun!,output_length = Nel_tot)
result = optimize!(lsa_papa,alg;x_tol=x_tol,f_tol=f_tol,iterations=iter)
chilist = result.minimizer
# optimize(min_fun,initial_chilist)
dim2q = 16
chi_PAPA = zeros(ComplexF64,Npairs,dim2q,dim2q)
chitemp = zeros(ComplexF64,16,16)
Nel = 136
for nn = 1:1:Npairs
fill!(chitemp,0.)
start = 1 + (nn-1)*Nel
stop = nn*Nel;
chitemp[triu(trues(dim2q,dim2q))] = chilist[start:1:stop] + 1im*chilist[(start+Npairs*Nel):1:(stop+Npairs*Nel)]
chitemp[:,:] = chitemp+chitemp'
chitemp[:,:] = chitemp - diagm(0 => diag(chitemp))/2
chi_PAPA[nn,:,:] = chitemp[:,:]
end
return (chi_PAPA, result)
end
pair_order_1 = [1 2]
# Experimental data
sigma12_I = zeros(16,16)
max_ent_state!(4;mrho=sigma12_I)
sigma12_I = convert(Array{ComplexF64,2},sigma12_I)
# Conver to vector of data
sigmaVec_I = [real(sigma12_I[triu(trues(16,16))]);imag(sigma12_I[triu(trues(16,16))])]
# Initial state
chi12_idI = zeros(ComplexF64,16,16)
for i1 = 1:1:4
for i2 = 1:1:4
for i3 = 1:1:4
for i4 = 1:1:4
chi12_idI[4*(i1-1)+i2,4*(i3-1)+i4] = 4*sigma12_I[4*(i2-1)+i1,4*(i4-1)+i3]
end
end
end
end
initial_chilist_I = [real(chi12_idI[triu(trues(16,16))]);imag(chi12_idI[triu(trues(16,16))])]
function __init__()
print("Precompiling for speed\n")
papa_reconstruction(2,1,pair_order_1,initial_chilist_I,sigmaVec_I,CP_penalty = 1.0,x_tol=1.0e-7,f_tol=1.0e-7,iter=1000000,alg=LevenbergMarquardt(),flag=false);
papa_reconstruction(2,1,pair_order_1,initial_chilist_I,sigmaVec_I,CP_penalty = 1.0,x_tol=1.0e-7,f_tol=1.0e-7,iter=1000000,alg=Dogleg(),flag=false);
nothing
end
end # module
| [
834,
3866,
5589,
576,
834,
3419,
198,
198,
21412,
350,
2969,
32,
198,
198,
39344,
20461,
64,
62,
260,
9979,
2762,
11,
20461,
64,
62,
260,
9979,
2762,
62,
24442,
198,
198,
3500,
44800,
2348,
29230,
11,
1004,
459,
22266,
3565,
27871,
... | 1.991056 | 2,348 |
# Run package tests
println("Testing Silo.jl in Julia version ", VERSION)
using Base.Test
include(joinpath("..", "src", "Silo.jl"))
# using Silo
# run(`cd $(dirname(@__FILE__))/files && make`)
include("test1dwriteInt.jl")
include("test1dreadwrite.jl")
# Silo.DBInqFile(dbfile.file_name)
| [
2,
5660,
5301,
5254,
198,
35235,
7203,
44154,
4243,
78,
13,
20362,
287,
22300,
2196,
33172,
44156,
2849,
8,
198,
198,
3500,
7308,
13,
14402,
198,
17256,
7,
22179,
6978,
7203,
492,
1600,
366,
10677,
1600,
366,
15086,
78,
13,
20362,
487... | 2.636364 | 110 |
# Modelo de turbina sem sangria
type Turbina
Turbina()=begin
PP2=outers.PP2
new(
DanaPlugin(Dict{Symbol,Any}(
:Brief=>"Steam tables"
)),
Entalpia(),
Eficiencia(Dict{Symbol,Any}(
:Brief=>"Eficiencia da turbina"
)),
Corrente(Dict{Symbol,Any}(
:Symbol=>"_{in}",
:PosX=>0,
:PosY=>0.25
)),
Potencia(Dict{Symbol,Any}(
:Brief=>"Potencia da turbina",
:PosX=>1,
:PosY=>0.5
)),
Corrente(Dict{Symbol,Any}(
:Symbol=>"_{out}",
:PosX=>1,
:PosY=>1
)),
[
:(H_IS = PP2.propPS(Fout.P,Fin.S)),
:(Fout.H = (H_IS - Fin.H) * EF_T + Fin.H),
:([Fout.S,Fout.T] = PP2.propPH(Fout.P,Fout.H)),
:(Fin.F * (Fin.H - Fout.H) = POT_TURB),
:(Fout.F = Fin.F),
],
[
"","","","","",
],
[:PP2,],
[:H_IS,:EF_T,:Fin,:POT_TURB,:Fout,]
)
end
PP2::DanaPlugin
H_IS::Entalpia
EF_T::Eficiencia
Fin::Corrente
POT_TURB::Potencia
Fout::Corrente
equations::Array{Expr,1}
equationNames::Array{String,1}
parameters::Array{Symbol,1}
variables::Array{Symbol,1}
attributes::Dict{Symbol,Any}
end
export Turbina
function setEquationFlow(in::Turbina)
addEquation(1)
addEquation(2)
addEquation(3)
addEquation(4)
addEquation(5)
end
function atributes(in::Turbina,_::Dict{Symbol,Any})
fields::Dict{Symbol,Any}=Dict{Symbol,Any}()
fields[:Pallete]=true
fields[:Icon]="icon/turbina"
drive!(fields,_)
return fields
end
Turbina(_::Dict{Symbol,Any})=begin
newModel=Turbina()
newModel.attributes=atributes(newModel,_)
newModel
end
| [
2,
9104,
78,
390,
14830,
1437,
5026,
25889,
7496,
198,
4906,
3831,
65,
1437,
198,
197,
51,
5945,
1437,
3419,
28,
27471,
198,
197,
197,
10246,
17,
28,
280,
1010,
13,
10246,
17,
198,
197,
197,
3605,
7,
198,
197,
197,
197,
35,
2271,
... | 1.850856 | 818 |
getevennumbers(arr) = filter(iseven,arr) | [
1136,
10197,
77,
17024,
7,
3258,
8,
796,
8106,
7,
786,
574,
11,
3258,
8
] | 2.666667 | 15 |
@testset "Bus Constructors" begin
tBus = Bus()
tLoadZones = LoadZones()
end
@testset "Generation Constructors" begin
tEconThermal = EconThermal()
@test tEconThermal isa PowerSystems.Component
tTechThermal = TechThermal()
@test tTechThermal isa PowerSystems.Component
tThermalGen = ThermalDispatch()
@test tThermalGen isa PowerSystems.Component
tThermalGenSeason = ThermalGenSeason()
@test tThermalGenSeason isa PowerSystems.Component
tTechHydro = TechHydro()
@test tTechHydro isa PowerSystems.Component
tEconHydro = EconHydro()
@test tEconHydro isa PowerSystems.Component
tHydroFix = HydroFix()
@test tHydroFix isa PowerSystems.Component
tHydroCurtailment = HydroCurtailment()
@test tHydroCurtailment isa PowerSystems.Component
tHydroStorage = HydroStorage()
@test tHydroStorage isa PowerSystems.Component
tTechRenewable = TechRenewable()
@test tTechRenewable isa PowerSystems.Component
tEconRenewable = EconRenewable()
@test tEconRenewable isa PowerSystems.Component
tRenewableFix = RenewableFix()
@test tRenewableFix isa PowerSystems.Component
tRenewableFullDispatch = RenewableFullDispatch()
@test tRenewableFullDispatch isa PowerSystems.Component
tRenewableCurtailment = RenewableCurtailment()
@test tRenewableCurtailment isa PowerSystems.Component
end
@testset "Storage Constructors" begin
tStorage = GenericBattery()
@test tStorage isa PowerSystems.Component
end
@testset "Load Constructors" begin
tPowerLoad = PowerLoad()
@test tPowerLoad isa PowerSystems.Component
tPowerLoadPF = PowerLoadPF()
@test tPowerLoadPF isa PowerSystems.Component
tPowerLoad = PowerLoad("init", true, Bus(), 0.0, 0.0)
@test tPowerLoad isa PowerSystems.Component
tPowerLoadPF = PowerLoadPF("init", true, Bus(), 0.0, 1.0)
@test tPowerLoadPF isa PowerSystems.Component
tLoad = InterruptibleLoad()
@test tLoad isa PowerSystems.Component
end
@testset "Branch Constructors" begin
tLine = Line()
@test tLine isa PowerSystems.Component
tMonitoredLine = MonitoredLine()
@test tMonitoredLine isa PowerSystems.Component
tHVDCLine = HVDCLine()
@test tHVDCLine isa PowerSystems.Component
tVSCDCLine = VSCDCLine()
@test tVSCDCLine isa PowerSystems.Component
tTransformer2W = Transformer2W()
@test tTransformer2W isa PowerSystems.Component
tTapTransformer = TapTransformer()
@test tTapTransformer isa PowerSystems.Component
tPhaseShiftingTransformer = PhaseShiftingTransformer()
@test tPhaseShiftingTransformer isa PowerSystems.Component
end
@testset "Product Constructors" begin
tProportionalReserve = ProportionalReserve()
@test tProportionalReserve isa PowerSystems.Service
tStaticReserve = StaticReserve()
@test tStaticReserve isa PowerSystems.Service
end
| [
31,
9288,
2617,
366,
16286,
28407,
669,
1,
2221,
198,
220,
220,
220,
256,
16286,
796,
5869,
3419,
198,
220,
220,
220,
256,
8912,
57,
1952,
796,
8778,
57,
1952,
3419,
198,
437,
198,
198,
31,
9288,
2617,
366,
8645,
341,
28407,
669,
... | 2.755513 | 1,043 |
<reponame>ChristianSchuler/GeophysicalModelGenerator.jl
using Base: Int64, Float64, NamedTuple
using Printf
using Glob
# LaMEM I/O
#
# These are routines that help to create a LaMEM marker files from a ParaviewData structure, which can be used to perform geodynamic simulations
# We also include routines with which we can read LaMEM *.pvtr files into julia
export LaMEM_grid, ReadLaMEM_InputFile
export Save_LaMEMMarkersParallel, Save_LaMEMTopography
export GetProcessorPartitioning, ReadData_VTR, ReadData_PVTR, CreatePartitioningFile
"""
Structure that holds information about the LaMEM grid (usually read from an input file).
"""
struct LaMEM_grid
nmark_x :: Int64
nmark_y :: Int64
nmark_z :: Int64
nump_x :: Int64
nump_y :: Int64
nump_z :: Int64
nel_x :: Int64
nel_y :: Int64
nel_z :: Int64
W :: Float64
L :: Float64
H :: Float64
coord_x
coord_y
coord_z
x1D_c
y1D_c
z1D_c
X
Y
Z
end
"""
ParaviewData(Grid::LaMEM_grid, fields::NamedTuple)
Creates a `ParaviewData` struct from a LaMEM grid and from fields stored on that grid. Note that one needs to have a field `Phases` and optionally a field `Temp` to create LaMEM marker files.
"""
ParaviewData(Grid::LaMEM_grid, fields::NamedTuple) = ParaviewData(Grid.X, Grid.Y, Grid.Z, fields)
"""
CartData(Grid::LaMEM_grid, fields::NamedTuple)
Creates a `CartData` struct from a LaMEM grid and from fields stored on that grid. Note that one needs to have a field `Phases` and optionally a field `Temp` to create LaMEM marker files.
"""
CartData(Grid::LaMEM_grid, fields::NamedTuple) = CartData(Grid.X, Grid.Y, Grid.Z, fields)
"""
Below = BelowSurface(Data_LaMEM::LaMEM_grid, DataSurface_Cart::CartData)
Determines if points within the 3D `LaMEM_grid` structure are below the Cartesian surface DataSurface_Cart
"""
function BelowSurface(Grid::LaMEM_grid, DataSurface_Cart::CartData)
return AboveSurface(CartData(Grid,(Z=Grid.Z,)), DataSurface_Cart; above=false)
end
"""
Above = AboveSurface(Data_LaMEM::LaMEM_grid, DataSurface_Cart::CartData)
Determines if points within the 3D `LaMEM_grid` structure are above the Cartesian surface DataSurface_Cart
"""
function AboveSurface(Grid::LaMEM_grid, DataSurface_Cart::CartData)
return AboveSurface(CartData(Grid,(Z=Grid.Z,)), DataSurface_Cart; above=true)
end
"""
value = ParseValue_LaMEM_InputFile(file,keyword,type)
Extracts a certain `keyword` from a LaMEM input `file` and convert it to a certain type
# Example
```julia
julia> nmark_z = ParseValue_LaMEM_InputFile("SaltModels.dat","nmark_z",Int64)
```
"""
function ParseValue_LaMEM_InputFile(file,keyword,type)
value = nothing
for line in eachline(file)
line_strip = lstrip(line) # strip leading tabs/spaces
# Strip comments
ind = findfirst("#", line)
if isnothing(ind)
# no comments
else
line_strip = line_strip[1:ind[1]-2];
end
line_strip = rstrip(line_strip) # strip last tabs/spaces
if startswith(line_strip, keyword)
ind = findfirst("=", line_strip)
if type==String
value = split(line_strip)[3:end]
else
value = parse.(type,split(line_strip)[3:end])
if length(value)==1
value=value[1];
end
end
end
end
return value
end
"""
Grid::LaMEM_grid = ReadLaMEM_InputFile(file)
Parses a LaMEM input file and stores grid information in the `Grid` structure.
# Example
```julia
julia> Grid = ReadLaMEM_InputFile("SaltModels.dat")
LaMEM Grid:
nel : (32, 32, 32)
marker/cell : (3, 3, 3)
markers : (96, 96, 96)
x ϵ [-3.0 : 3.0]
y ϵ [-2.0 : 2.0]
z ϵ [-2.0 : 0.0]
```
"""
function ReadLaMEM_InputFile(file)
nmark_x = ParseValue_LaMEM_InputFile(file,"nmark_x",Int64)
nmark_y = ParseValue_LaMEM_InputFile(file,"nmark_y",Int64)
nmark_z = ParseValue_LaMEM_InputFile(file,"nmark_z",Int64)
nel_x = ParseValue_LaMEM_InputFile(file,"nel_x",Int64)
nel_y = ParseValue_LaMEM_InputFile(file,"nel_y",Int64)
nel_z = ParseValue_LaMEM_InputFile(file,"nel_z",Int64)
coord_x = ParseValue_LaMEM_InputFile(file,"coord_x",Float64)
coord_y = ParseValue_LaMEM_InputFile(file,"coord_y",Float64)
coord_z = ParseValue_LaMEM_InputFile(file,"coord_z",Float64)
if (length(coord_x)>2) || (length(coord_y)>2) || (length(coord_z)>2)
error("Routine currently not working for variable grid spacing")
end
W = coord_x[end]-coord_x[1];
L = coord_y[end]-coord_y[1];
H = coord_z[end]-coord_z[1];
nump_x = nel_x*nmark_x;
nump_y = nel_y*nmark_y;
nump_z = nel_z*nmark_z;
dx = W/nump_x;
dy = L/nump_y;
dz = H/nump_z;
# these lines should be replaced with a separate routine for variable spacing
x = coord_x[1]+dx/2: dx : coord_x[end]-dx/2;
y = coord_y[1]+dy/2: dy : coord_y[end]-dy/2;
z = coord_z[1]+dz/2: dz : coord_z[end]-dz/2;
X,Y,Z = XYZGrid(x,y,z); # create 3D grid using regular spacing
Grid = LaMEM_grid( nmark_x, nmark_y, nmark_z,
nump_x, nump_y, nump_z,
nel_x, nel_y, nel_z,
W, L, H,
coord_x, coord_y, coord_z,
x, y, z,
X, Y, Z);
return Grid
end
# Print an overview of the LaMEM Grid struct:
function Base.show(io::IO, d::LaMEM_grid)
println(io,"LaMEM Grid: ")
println(io," nel : ($(d.nel_x), $(d.nel_y), $(d.nel_z))")
println(io," marker/cell : ($(d.nmark_x), $(d.nmark_y), $(d.nmark_z))")
println(io," markers : ($(d.nump_x), $(d.nump_x), $(d.nump_x))")
println(io," x ϵ [$(d.coord_x[1]) : $(d.coord_x[2])]")
println(io," y ϵ [$(d.coord_y[1]) : $(d.coord_y[2])]")
println(io," z ϵ [$(d.coord_z[1]) : $(d.coord_z[2])]")
end
"""
Save_LaMEMMarkersParallel(Grid::CartData; PartitioningFile=empty, directory="./markers", verbose=true)
Saves a LaMEM marker file from the `CartData` structure `Grid`. It must have a field called `Phases`, holding phase information (as integers) and optionally a field `Temp` with temperature info.
It is possible to provide a LaMEM partitioning file `PartitioningFile`. If not, output is assumed to be for one processor.
The size of `Grid` should be consistent with what is provided in the LaMEM input file. In practice, the size of the mesh can be retrieved from a LaMEM input file using `ReadLaMEM_InputFile`.
# Example
```
julia> Grid = ReadLaMEM_InputFile("LaMEM_input_file.dat")
julia> Phases = zeros(Int32,size(Grid.X));
julia> Temp = ones(Float64,size(Grid.X));
julia> Model3D = CartData(Grid, (Phases=Phases,Temp=Temp))
julia> Save_LaMEMMarkersParallel(Model3D)
Writing LaMEM marker file -> ./markers/mdb.00000000.dat
```
If you want to create a LaMEM input file for multiple processors:
```
julia> Save_LaMEMMarkersParallel(Model3D, PartitioningFile="ProcessorPartitioning_4cpu_1.2.2.bin")
Writing LaMEM marker file -> ./markers/mdb.00000000.dat
Writing LaMEM marker file -> ./markers/mdb.00000001.dat
Writing LaMEM marker file -> ./markers/mdb.00000002.dat
Writing LaMEM marker file -> ./markers/mdb.00000003.dat
```
"""
function Save_LaMEMMarkersParallel(Grid::CartData; PartitioningFile=empty, directory="./markers", verbose=true)
x = ustrip.(Grid.x.val[:,1,1]);
y = ustrip.(Grid.y.val[1,:,1]);
z = ustrip.(Grid.z.val[1,1,:]);
if haskey(Grid.fields,:Phases)
Phases = Grid.fields[:Phases];
else
error("You must provide the field :Phases in the structure")
end
if haskey(Grid.fields,:Temp)
Temp = Grid.fields[:Temp];
else
if verbose
println("Field :Temp is not provided; setting it to zero")
end
Temp = zeros(size(Phases));
end
if PartitioningFile==empty
# in case we run this on 1 processor only
Nprocx = 1;
Nprocy = 1;
Nprocz = 1;
xc,yc,zc = x,y,z;
else
Nprocx,Nprocy,Nprocz,
xc,yc,zc,
nNodeX,nNodeY,nNodeZ = GetProcessorPartitioning(PartitioningFile)
end
Nproc = Nprocx*Nprocy*Nprocz;
num, num_i, num_j, num_k = get_numscheme(Nprocx, Nprocy, Nprocz);
xi,ix_start,ix_end = get_ind(x,xc,Nprocx);
yi,iy_start,iy_end = get_ind(y,yc,Nprocy);
zi,iz_start,iz_end = get_ind(z,zc,Nprocz);
x_start = ix_start[num_i[:]];
y_start = iy_start[num_j[:]];
z_start = iz_start[num_k[:]];
x_end = ix_end[num_i[:]];
y_end = iy_end[num_j[:]];
z_end = iz_end[num_k[:]];
# Loop over all processors partition
for n=1:Nproc
# Extract coordinates for current processor
part_x = ustrip.(Grid.x.val[x_start[n]:x_end[n],y_start[n]:y_end[n],z_start[n]:z_end[n]]);
part_y = ustrip.(Grid.y.val[x_start[n]:x_end[n],y_start[n]:y_end[n],z_start[n]:z_end[n]]);
part_z = ustrip.(Grid.z.val[x_start[n]:x_end[n],y_start[n]:y_end[n],z_start[n]:z_end[n]]);
part_phs = Phases[x_start[n]:x_end[n],y_start[n]:y_end[n],z_start[n]:z_end[n]];
part_T = Temp[x_start[n]:x_end[n],y_start[n]:y_end[n],z_start[n]:z_end[n]];
num_particles = size(part_x,1)* size(part_x,2) * size(part_x,3);
# Information vector per processor
num_prop = 5; # number of properties we save [x/y/z/phase/T]
lvec_info = num_particles;
lvec_prtcls = zeros(Float64,num_prop*num_particles);
lvec_prtcls[1:num_prop:end] = part_x[:];
lvec_prtcls[2:num_prop:end] = part_y[:];
lvec_prtcls[3:num_prop:end] = part_z[:];
lvec_prtcls[4:num_prop:end] = part_phs[:];
lvec_prtcls[5:num_prop:end] = part_T[:];
# Write output files
if ~isdir(directory); mkdir(directory); end # Create dir if not existent
fname = @sprintf "%s/mdb.%1.8d.dat" directory (n-1); # Name
if verbose
println("Writing LaMEM marker file -> $fname") # print info
end
lvec_output = [lvec_info; lvec_prtcls]; # one vec with info about length
PetscBinaryWrite_Vec(fname, lvec_output) # Write PETSc vector as binary file
end
end
# Internal routine to retrieve indices of local portion of the grid
function get_ind(x,xc,Nprocx)
if Nprocx == 1
xi = length(x);
ix_start = [1];
ix_end = [length(x)];
else
xi = zeros(Int64,Nprocx)
for k= 1:Nprocx
if k==1
xi[k] = length(x[ (x .>=xc[k]) .& (x .<=xc[k+1]) ]);
else
xi[k] = length(x[ (x.>xc[k]) .& (x.<=xc[k+1])]);
end
end
ix_start = cumsum( [0; xi[1:end-1]] ) .+ 1;
ix_end = cumsum(xi[1:end]);
end
return xi,ix_start,ix_end
end
# Internal routine
function get_numscheme(Nprocx,Nprocy,Nprocz)
n = zeros(Int64, Nprocx*Nprocy*Nprocz)
nix = zeros(Int64, Nprocx*Nprocy*Nprocz)
njy = zeros(Int64, Nprocx*Nprocy*Nprocz)
nkz = zeros(Int64, Nprocx*Nprocy*Nprocz)
num=0;
for k=1:Nprocz
for j=1:Nprocy
for i=1:Nprocx
num=num+1;
n[num] = num;
nix[num]= i;
njy[num]= j;
nkz[num]= k;
end
end
end
return n,nix,njy,nkz
end
# Internal routine, to write a PETSc vector (as Float64)
"""
PetscBinaryWrite_Vec(filename, A)
Writes a vector `A` to disk, such that it can be read with `PetscBinaryRead` (which assumes a Big Endian type)
"""
function PetscBinaryWrite_Vec(filename, A)
# Note: use "hton" to transfer to Big Endian type, which is what PETScBinaryRead expects
open(filename,"w+") do f
n = length(A);
nummark = A[1]; # number of markers
write(f,hton(Float64(1211214))); # header (not actually used)
write(f,hton(Float64(nummark))); # info about # of markers written
for i=2:n
write(f,hton(Float64(A[i]))); # Write data itself
end
end
end
"""
nProcX,nProcY,nProcZ, xc,yc,zc, nNodeX,nNodeY,nNodeZ = GetProcessorPartitioning(filename)
Reads a LaMEM processor partitioning file, used to create marker files, and returns the parallel layout
"""
function GetProcessorPartitioning(filename)
io = open(filename, "r")
nProcX = ntoh(read(io,Int32))
nProcY = ntoh(read(io,Int32))
nProcZ = ntoh(read(io,Int32))
nNodeX = ntoh(read(io,Int32))
nNodeY = ntoh(read(io,Int32))
nNodeZ = ntoh(read(io,Int32))
iX = [ntoh(read(io,Int32)) for i=1:nProcX+1];
iY = [ntoh(read(io,Int32)) for i=1:nProcY+1];
iZ = [ntoh(read(io,Int32)) for i=1:nProcZ+1];
CharLength = ntoh(read(io,Float64))
xcoor = [ntoh(read(io,Float64)) for i=1:nNodeX].*CharLength;
ycoor = [ntoh(read(io,Float64)) for i=1:nNodeY].*CharLength;
zcoor = [ntoh(read(io,Float64)) for i=1:nNodeZ].*CharLength;
xc = xcoor[iX .+ 1]
yc = ycoor[iY .+ 1]
zc = zcoor[iZ .+ 1]
close(io)
return nProcX,nProcY,nProcZ,
xc,yc,zc,
nNodeX,nNodeY,nNodeZ
end
"""
coord, Data_3D_Arrays, Name_Vec = ReadData_VTR(fname)
Reads a VTR (structured grid) VTK file `fname` and extracts the coordinates, data arrays and names of the data.
In general, this only contains a piece of the data, and one should open a `*.pvtr` file to retrieve the full data
"""
function ReadData_VTR(fname, FullSize)
file = open(fname, "r")
header = true
num = 1;
CoordOffset = zeros(Int64,3);
Offset_Vec = [];
Name_Vec = []; Type_Vec = [];
NumComp_Vec = []; PieceExtent=[]; WholeExtent=[];
while header==true
line = readline(file)
line_strip = lstrip(line)
if startswith(line_strip, "<RectilinearGrid WholeExtent")
id_start = findfirst("\"", line_strip)[1]+1
id_end = findlast("\"", line_strip)[1]-1
WholeExtent = parse.(Int64,split(line_strip[id_start:id_end]))
end
if startswith(line_strip, "<Piece Extent=")
id_start = findfirst("\"", line_strip)[1]+1
id_end = findlast("\"", line_strip)[1]-1
PieceExtent = parse.(Int64,split(line_strip[id_start:id_end]))
end
if startswith(line_strip, "<Coordinates>")
# Read info where the coordinates are stored
Type, Name, NumberOfComponents, CoordOffset[1] = Parse_VTR_Line(readline(file)); num += 1
Type, Name, NumberOfComponents, CoordOffset[2] = Parse_VTR_Line(readline(file)); num += 1
Type, Name, NumberOfComponents, CoordOffset[3] = Parse_VTR_Line(readline(file)); num += 1
end
if startswith(line_strip, "<PointData>")
line_strip = lstrip(readline(file))
while ~startswith(line_strip, "</PointData>")
Type, Name, NumberOfComponents, Offset = Parse_VTR_Line(line_strip); num += 1
Offset_Vec = [Offset_Vec; Offset];
Name_Vec = [Name_Vec; Name];
Type_Vec = [Type_Vec; Type];
NumComp_Vec = [NumComp_Vec; NumberOfComponents];
line_strip = lstrip(readline(file))
end
end
if startswith(line_strip, "<CellData>")
line_strip = lstrip(readline(file))
while ~startswith(line_strip, "</CellData>")
Type, Name, NumberOfComponents, Offset = Parse_VTR_Line(line_strip); num += 1
Offset_Vec = [Offset_Vec; Offset];
Name_Vec = [Name_Vec; Name];
Type_Vec = [Type_Vec; Type];
NumComp_Vec = [NumComp_Vec; NumberOfComponents];
line_strip = lstrip(readline(file))
# if we have cell Data, for some reason we need to increment this by one.
PieceExtent[1:2:end] .+= 1
end
end
if startswith(line_strip, "<AppendedData ")
header=false
end
num += 1
end
# Skip to beginning of raw data (linebreak)
skip(file, 5)
start_bin = position(file); # start of binary data
# Determine the end of the raw data
seekend(file);
skip(file, -29)
end_bin = position(file);
# Start with reading the coordinate arrays:
coord_x = ReadBinaryData(file, start_bin, CoordOffset[1], (PieceExtent[2]-PieceExtent[1]+1)*sizeof(Float32))
coord_y = ReadBinaryData(file, start_bin, CoordOffset[2], (PieceExtent[4]-PieceExtent[3]+1)*sizeof(Float32))
coord_z = ReadBinaryData(file, start_bin, CoordOffset[3], (PieceExtent[6]-PieceExtent[5]+1)*sizeof(Float32))
# Read data arrays:
Data_3D_Arrays = [];
ix = PieceExtent[1]:PieceExtent[2];
iy = PieceExtent[3]:PieceExtent[4];
iz = PieceExtent[5]:PieceExtent[6];
numPoints = length(ix)*length(iy)*length(iz);
coord_x_full = zeros(Float64, FullSize[1]);
coord_y_full = zeros(Float64, FullSize[2]);
coord_z_full = zeros(Float64, FullSize[3]);
coord_x_full[ix] = coord_x[1:length(ix)];
coord_y_full[iy] = coord_y[1:length(iy)];
coord_z_full[iz] = coord_z[1:length(iz)];
for i=1:length(Name_Vec)-1
data3D = ReadBinaryData(file, start_bin, Offset_Vec[i], numPoints*NumComp_Vec[i]*sizeof(Float32) )
data3D = getArray(data3D, PieceExtent, NumComp_Vec[i]);
data3D_full = zeros(Float64,NumComp_Vec[i],FullSize[1],FullSize[2],FullSize[3]) # Generate full data
# uggly hack to make it work with parallel files
ix_left = ix; ix_right = 1:length(ix_left);
iy_left = iy; iy_right = 1:length(iy_left);
iz_left = iz; iz_right = 1:length(iz_left);
if ix_left[1]>1; ix_left = ix_left[2:end]; ix_right=ix_right[2:end]; end
if iy_left[1]>1; iy_left = iy_left[2:end]; iy_right=iy_right[2:end]; end
if iz_left[1]>1; iz_left = iz_left[2:end]; iz_right=iz_right[2:end]; end
data3D_full[1:NumComp_Vec[i], ix_left, iy_left, iz_left] = data3D[1:NumComp_Vec[i],ix_right, iy_right, iz_right];
#data3D_full[1:NumComp_Vec[i], ix, iy, iz] = data3D;
Data_3D_Arrays = [Data_3D_Arrays; data3D_full]
end
i=length(Name_Vec);
if Type_Vec[i]=="UInt8"
data3D = ReadBinaryData(file, start_bin, Offset_Vec[i], numPoints*NumComp_Vec[i]*sizeof(UInt8), DataType=UInt8)
else
data3D = ReadBinaryData(file, start_bin, Offset_Vec[i], numPoints*NumComp_Vec[i]*sizeof(Float32) )
end
data3D = getArray(data3D, PieceExtent, NumComp_Vec[i]);
data3D_full = zeros(Float64,NumComp_Vec[i],FullSize[1],FullSize[2],FullSize[3]) # Generate full d
data3D_full[1:NumComp_Vec[i], ix, iy, iz] = data3D[1:NumComp_Vec[i],1:length(ix),1:length(iy),1:length(iz)];
Data_3D_Arrays = [Data_3D_Arrays; data3D_full]
return coord_x_full, coord_y_full, coord_z_full, Data_3D_Arrays, Name_Vec, NumComp_Vec, ix, iy, iz
end
# Parses a line of a *.vtr file & retrieve Type/Name/NumberOfComponents/Offset
function Parse_VTR_Line(line)
line_strip = lstrip(line)
# Retrieve Type
if findfirst("type", line_strip) != nothing
id_start = findfirst("type", line_strip)[1]+6
line_strip = line_strip[id_start:end]
id_end = findfirst("\"", line_strip)[1]-1
Type = line_strip[1:id_end]
line_strip = line_strip[id_end:end]
else
Type=nothing;
end
# Retrieve Name
if findfirst("Name", line_strip) != nothing
id_start = findfirst("Name", line_strip)[1]+6
line_strip = line_strip[id_start:end]
id_end = findfirst("\"", line_strip)[1]-1
Name = line_strip[1:id_end]
line_strip = line_strip[id_end:end]
else
Name=nothing
end
# Retrieve number of components
if findfirst("NumberOfComponents", line_strip) != nothing
id_start = findfirst("NumberOfComponents", line_strip)[1]+20
line_strip = line_strip[id_start:end]
id_end = findfirst("\"", line_strip)[1]-1
NumberOfComponents = parse(Int64,line_strip[1:id_end])
line_strip = line_strip[id_end:end]
else
NumberOfComponents=nothing
end
# Offset
if findfirst("offset", line_strip) != nothing
id_start = findfirst("offset", line_strip)[1]+8
line_strip = line_strip[id_start:end]
id_end = findfirst("\"", line_strip)[1]-1
Offset = parse(Int64,line_strip[1:id_end])
else
Offset=nothing;
end
return Type, Name, NumberOfComponents, Offset
end
function getArray(data, PieceExtent, NumComp)
data = reshape(data, (NumComp, PieceExtent[2]-PieceExtent[1]+1, PieceExtent[4]-PieceExtent[3]+1, PieceExtent[6]-PieceExtent[5]+1))
return data
end
function ReadBinaryData(file::IOStream, start_bin::Int64, Offset::Int64, BytesToRead; DataType=Float32)
seekstart(file); # go to start
skip(file, start_bin+Offset) # move to beginning of raw binary data
buffer = read(file,BytesToRead) # Read necesaary bytes
data = reinterpret(DataType,buffer) # Transfer to buffer
data = Float64.(data[1:end]); # Transfer to Float64
return data
end
"""
Data::ParaviewData = ReadData_PVTR(fname, dir)
Reads a parallel, rectilinear, `*.vts` file with the name `fname` and located in `dir` and create a 3D `Data` struct from it.
# Example
```julia
julia> Data = ReadData_PVTR("Haaksbergen.pvtr", "./Timestep_00000005_3.35780500e-01/")
ParaviewData
size : (33, 33, 33)
x ϵ [ -3.0 : 3.0]
y ϵ [ -2.0 : 2.0]
z ϵ [ -2.0 : 0.0]
fields: (:phase, :density, :visc_total, :visc_creep, :velocity, :pressure, :temperature, :dev_stress, :strain_rate, :j2_dev_stress, :j2_strain_rate, :plast_strain, :plast_dissip, :tot_displ, :yield, :moment_res, :cont_res)
```
"""
function ReadData_PVTR(fname, dir)
file = open(joinpath(dir,fname), "r")
header = true
num = 1;
FullSize= (1,1,1);
num_data_sets = 1;
Data_3D=[]; coord_x=[]; coord_y=[]; coord_z=[]; NumComp=[]; Names=[]
while header==true
line = readline(file)
line_strip = lstrip(line)
if startswith(line_strip, "<PRectilinearGrid")
id_start = findfirst("WholeExtent=", line_strip)[1]+13
line_strip = line_strip[id_start:end]
id_end = findfirst("\"", line_strip)[1]-1
line_piece = line_strip[1:id_end]
WholeExtent = parse.(Int64,split(line_piece))
FullSize = (WholeExtent[2],WholeExtent[4],WholeExtent[6])
end
if startswith(line_strip, "<Piece")
id_start = findfirst("Source=", line_strip)[1]+8
line_strip = line_strip[id_start:end]
id_end = findfirst("\"", line_strip)[1]-1
fname_piece = line_strip[1:id_end]
if num_data_sets==1
coord_x, coord_y, coord_z, Data_3D, Names, NumComp, ix,iy,iz = ReadData_VTR(joinpath(dir,fname_piece), FullSize);
else
coord_x1, coord_y1, coord_z1, Data_3D1, Names, NumComp, ix,iy,iz = ReadData_VTR(joinpath(dir,fname_piece), FullSize);
coord_x[ix] = coord_x1[ix];
coord_y[iy] = coord_y1[iy];
coord_z[iz] = coord_z1[iz];
Data_3D = Data_3D+Data_3D1;
end
num_data_sets += 1
end
if startswith(line_strip, "</PRectilinearGrid")
header=false;
end
end
# Create a named-Tuple out of the fields
NamesSymbol = [];
for i=1:length(Names)
id = findfirst(" ", Names[i])
if id == nothing
Names_Strip = Names[i]
else
Names_Strip = Names[i][1:findfirst(" ", Names[i])[1]-1];
end
NamesSymbol = [NamesSymbol; Names_Strip]
end
# NamesSymbol = [Names[i][1:findfirst(" ", Names[i])[1]-1] for i=1:length(Names)]
Names1 = Symbol.(NamesSymbol)
Data_Array = [];
num = 1;
for i=1:length(NumComp)
data = Data_3D[num:num+NumComp[i]-1,:,:,:];
data_arrays = [data[i,:,:,:] for i=1:size(data,1)]
data_tuple = tuple(data_arrays...)
if size(data,1)>1
Data_NamedTuple = NamedTuple{(Names1[i],)}((data_tuple,))
else
Data_NamedTuple = NamedTuple{(Names1[i],)}((data_tuple[1],))
end
Data_Array = [Data_Array; Data_NamedTuple]
num = num+NumComp[i];
end
# Merge vector with tuples into a NamedTuple
fields = Data_Array[1];
for i=2:length(Data_Array)
fields = merge(fields, Data_Array[i])
end
# Create a ParaviewData struct from it.
X,Y,Z = XYZGrid(coord_x, coord_y, coord_z)
DataC = ParaviewData(X,Y,Z, fields);
return DataC
end
"""
Save_LaMEMTopography(Topo::CartData, filename::String)
This writes a topography file `Topo` for use in LaMEM, which should have size `(nx,ny,1)` and contain the field `:Topography`
"""
function Save_LaMEMTopography(Topo::CartData, filename::String)
if (size(Topo.z.val,3) != 1)
error("Not a valid `CartData' Topography file (size in 3rd dimension should be 1)")
end
if !haskey(Topo.fields,:Topography)
error("The topography `CartData` structure requires a field :Topography")
end
# Code the topograhic data into a vector
nx = Float64(size(Topo.fields.Topography,1));
ny = Float64(size(Topo.fields.Topography,2));
x0 = ustrip(Topo.x.val[1,1,1])
y0 = ustrip(Topo.y.val[1,1,1])
dx = ustrip(Topo.x.val[2,2,1]) - x0
dy = ustrip(Topo.y.val[2,2,1]) - y0
Topo_vec = [ nx;ny;x0;y0;dx;dy; ustrip.(Topo.fields.Topography[:])]
# Write as PetscBinary file
PetscBinaryWrite_Vec(filename, Topo_vec)
println("Written LaMEM topography file: $(filename)")
return nothing
end
"""
CreatePartitioningFile(LaMEM_input::String, NumProc::Int64; LaMEM_dir::String=pwd(), LaMEM_options::String="", MPI_dir="")
This executes LaMEM for the input file `LaMEM_input` & creates a parallel partitioning file for `NumProc` processors.
The directory where the LaMEM binary is can be specified; if not it is assumed to be in the current directory.
Likewise for the `mpiexec` directory (if not specified it is assumed to be available on the command line).
"""
function CreatePartitioningFile(LaMEM_input::String,NumProc::Int64; LaMEM_dir::String=pwd(), LaMEM_options="", MPI_dir="")
# Create string to execute LaMEM
mpi_str = MPI_dir*"mpiexec -n $(NumProc) "
LaMEM_str = LaMEM_dir*"/"*"LaMEM -ParamFile "*LaMEM_input*" -mode save_grid "
str = mpi_str*LaMEM_str
println("Executing command: $str")
# Run
exit=run(`sh -c $str`, wait=false);
# Retrieve newest file
if success(exit)
files=readdir(glob"ProcessorPartitioning_*.bin")
time_modified = zeros(length(files))
for (i,file) in enumerate(files)
time_modified[i] = stat(file).mtime
end
id = findall(time_modified.==maximum(time_modified)) # last modified
PartFile = files[id]
println("Successfuly generated PartitioningFile: $(PartFile[1])")
else
error("Something went wrong with executing command ")
end
return PartFile[1]
end | [
27,
7856,
261,
480,
29,
20298,
14874,
18173,
14,
10082,
41789,
17633,
8645,
1352,
13,
20362,
198,
3500,
7308,
25,
2558,
2414,
11,
48436,
2414,
11,
34441,
51,
29291,
198,
3500,
12578,
69,
198,
3500,
40713,
198,
198,
2,
4689,
44,
3620,
... | 2.021001 | 14,142 |
mutable struct NewickXMLElement <: MyXMLElement
el::XMLOrNothing
newick::String
fix_tree::Bool
NewickXMLElement(newick::String) = new(nothing, newick, true)
end
function make_xml(nl::NewickXMLElement)
el = new_element(bn.NEWICK)
set_attribute(el, bn.ID, bn.DEFAULT_TREE_NAME)
if nl.fix_tree
set_attribute(el, bn.USING_HEIGHTS, bn.TRUE)
set_attribute(el, bn.USING_DATES, bn.FALSE)
end
set_content(el, "$(nl.newick);")
nl.el = el
return el
end
| [
76,
18187,
2878,
968,
624,
37643,
2538,
1732,
1279,
25,
2011,
37643,
2538,
1732,
198,
220,
220,
220,
1288,
3712,
55,
5805,
5574,
18465,
198,
220,
220,
220,
649,
624,
3712,
10100,
198,
220,
220,
220,
4259,
62,
21048,
3712,
33,
970,
6... | 2.139831 | 236 |
<gh_stars>0
global DISABLESTBPRTLINES = false
function togglePrtStbLines()
global DISABLESTBPRTLINES
DISABLESTBPRTLINES = !DISABLESTBPRTLINES
end
function plotLsrScanFeats(br::Array{Float64,2})
Cart = zeros(size(br))
Cart[:,1] = br[:,2].*cos(br[:,1])
Cart[:,2] = br[:,2].*sin(br[:,1])
plot(x=Cart[:,1],y=Cart[:,2],Geom.point,
Guide.xticks(ticks=collect(-60:10:60)),
Guide.yticks(ticks=collect(0:10:80)))
end
function drawFeatTrackers(trkrs::Dict{Int64,Feature}, bfts::Array{Float64,2})
musX = Float64[]
varX = Float64[]
musY = Float64[]
varY = Float64[]
allPtsX = Float64[]
allPtsY = Float64[]
for ftr in trkrs
pts = getPoints(ftr[2].bel)
allPtsX = [allPtsX; vec(pts[1,:])]
allPtsY = [allPtsY; vec(pts[2,:])]
push!(musX, Statistics.mean(vec(pts[1,:])))
push!(varX, Statistics.std(vec(pts[1,:])))
push!(musY, Statistics.mean(vec(pts[2,:])))
push!(varY, Statistics.std(vec(pts[2,:])))
end
X = Float64[]
Y = Float64[]
if size(bfts,2) > 0
if bfts[1,1] != 0.0 && bfts[2,1] != 0.0 && bfts[3,1] != 0.0
for i in 1:size(bfts,2)
u, R = p2c(vec(bfts[:,i]))
push!(X, u[1])
push!(Y, u[2])
end
end
end
# Guide.yticks(ticks=collect(-60:10:60)),
# Guide.xticks(ticks=collect(0:10:80))
p = plot(layer(x=musX, y=musY, Geom.point, Theme(default_color=colorant"red")),
layer(x=allPtsX, y=allPtsY, Geom.histogram2d),
Guide.yticks(ticks=collect(-70:10:70)),
Guide.xticks(ticks=collect(-40:10:80)))
for i in 1:length(X)
push!(p.layers, Gadfly.layer(x=[0.0;X[i]], y=[0.0;Y[i]], Geom.line, Gadfly.Theme(default_color=colorant"magenta"))[1])
end
p
end
function saveImgSeq(d::Dict{Int64,Array{Float64,2}}; from::Int=1,to::Int=10,step::Int=1)
for i in from:step:to
p = plotLsrScanFeats(lsrBR(d[i]));
Gadfly.draw(PNG(string("imgs/img",i,".png"),25cm,25cm),p)
end
nothing
end
# --------------------------------------------------------------
# transfered in from IncrementalInference
## TODO -- you were here with port starboard lines
function stbPrtLineLayers!(pl, Xpp, Ypp, Thpp; l::Float64=5.0)
if DISABLESTBPRTLINES
return nothing
end
lnstpr = [0.0;l;0.0]
lnstpg = [0.0;-l;0.0]
Rd =SE2(lnstpr)
Gr = SE2(lnstpg)
for i in 1:length(Xpp)
lnstt = [Xpp[i];Ypp[i];Thpp[i]]
Ps = SE2(lnstt)
lnr = se2vee(Ps*Rd)
lng = se2vee(Ps*Gr)
xsr = [Xpp[i];lnr[1]]
ysr = [Ypp[i];lnr[2]]
xsg = [Xpp[i];lng[1]]
ysg = [Ypp[i];lng[2]]
push!(pl.layers, layer(x=xsr, y=ysr, Geom.path(), Gadfly.Theme(default_color=colorant"red", line_width=1.5pt))[1] )
push!(pl.layers, layer(x=xsg, y=ysg, Geom.path(), Gadfly.Theme(default_color=colorant"green", line_width=1.5pt))[1] )
end
nothing
end
# draw the reference frame as a red-green dyad
function addXYLineLayers!(pl, Xpp, Ypp, Thpp; l::Float64=1.0)
lnstpr = [l;0.0;0.0]
lnstpg = [0.0;l;0.0]
Rd =SE2(lnstpr)
Gr = SE2(lnstpg)
for i in 1:length(Xpp)
lnstt = [Xpp[i];Ypp[i];Thpp[i]]
Ps = SE2(lnstt)
lnr = se2vee(Ps*Rd)
lng = se2vee(Ps*Gr)
xsr = [Xpp[i];lnr[1]]
ysr = [Ypp[i];lnr[2]]
xsg = [Xpp[i];lng[1]]
ysg = [Ypp[i];lng[2]]
push!(pl.layers, layer(x=xsr, y=ysr, Geom.path(), Gadfly.Theme(default_color=colorant"red", line_width=1.5pt))[1] )
push!(pl.layers, layer(x=xsg, y=ysg, Geom.path(), Gadfly.Theme(default_color=colorant"green", line_width=1.5pt))[1] )
end
nothing
end
# function lblsFromTo(from,to)
# lbls=String[]
# [push!(lbls, "$(i)") for i in from:to]
# return lbls
# end
"""
$(SIGNATURES)
2D plot of all poses, assuming poses are labeled from ``::Symbol` type `:x0, :x1, ..., :xn`. Use `to` and `from` to limit the range of numbers `n` to be drawn. The underlying histogram can be enabled or disabled, and the size of maximum-point belief estimate cursors can be controlled with `spscale`.
Future:
- Relax to user defined pose labeling scheme, for example `:p1, :p2, ...`
"""
function drawPoses(fg::G;
from::Int64=0,
to::Int64=99999999,
meanmax=:max,
lbls=true,
drawhist=true,
spscale::Float64=5.0,
contour::Bool=true, levels::Int=1,
regexPoses=r"x" ) where G <: AbstractDFG
#
@info "drawPoses always sets orientation to max, regardless of meanmax setting. TODO modefit."
#Gadfly.set_default_plot_size(20cm, 30cm)
# Xpp = Float64[]; Ypp=Float64[]; Thpp=Float64[]; LBLS=String[];
uXpp,uYpp, uThpp, uLBLS = get2DPoseMeans(fg, from=from, to=to)
xXpp,xYpp, xThpp, xLBLS = get2DPoseMax(fg, from=from, to=to)
# if meanmax == :mean
# elseif meanmax == :max
# end
Xpp = meanmax == :mean ? uXpp : xXpp
Ypp = meanmax == :mean ? uYpp : xYpp
Thpp = xThpp # always use max -- should be modefit
LBLS = meanmax == :mean ? uLBLS : xLBLS
# lbls = lblsFromTo(1,length(Xpp))
psplt = Union{}
if lbls
psplt = Gadfly.plot(
Gadfly.layer(x=Xpp,y=Ypp,label=LBLS,Geom.path(), Theme(line_width=1pt), Geom.label),
Coord.cartesian(fixed=true)
)
else
psplt = Gadfly.plot(
Gadfly.layer(x=Xpp,y=Ypp,Geom.path(), Theme(line_width=1pt)),Coord.cartesian(fixed=true),
Coord.cartesian(fixed=true)
)
end
# return psplt
addXYLineLayers!(psplt, Xpp, Ypp, Thpp, l=spscale)
if drawhist
Xp,Yp = get2DPoseSamples(fg, from=from, to=to)
push!(psplt.layers, Gadfly.layer(x=Xp, y=Yp, Geom.histogram2d)[1] )#(xbincount=100, ybincount=100))
end
# add contours to pose estimates
if contour
varsyms = Symbol.(LBLS)
for vsym in varsyms
pln = plotKDE(fg, vsym, dims=[1;2], levels=levels, c=["gray90"])
union!(psplt.layers, pln.layers)
end
end
return psplt
end
"""
$(SIGNATURES)
2D plot of landmarks, assuming `:l1, :l2, ... :ln`. Use `from` and `to` to control the range of landmarks `n` to include.
"""
function drawLandms(fg::AbstractDFG;
from::Int64=0, to::Int64=99999999,
minnei::Int64=0,
meanmax=:max,
lbls=true,showmm=false,drawhist=true,
contour::Bool=true, levels::Int=1,
c="red",
MM::Dict{Int,T}=Dict{Int,Int}(),
point_size=1pt,
regexLandmark::Regex=r"l" ) where T
#Gadfly.set_default_plot_size(20cm, 30cm)
Xp,Yp = get2DLandmSamples(fg, from=from, to=to)
Xpp = Float64[]; Ypp=Float64[]; Thpp=Float64[]; lblstags=String[];
if meanmax==:mean
Xpp,Ypp, t, lbltags = get2DLandmMeans(fg, from=from, to=to, regexLandmark=regexLandmark)
elseif meanmax==:max
Xpp,Ypp, t, lbltags = get2DLandmMax(fg, from=from, to=to,showmm=showmm,MM=MM, regexLandmark=regexLandmark)
end
if lbls
psplt = Gadfly.plot(
Gadfly.layer(x=Xpp,y=Ypp, label=lbltags, Geom.point, Theme(line_width=1pt, default_color=parse(Colorant,c), point_size=point_size), Geom.label),
Coord.cartesian(fixed=true)
# ,Gadfly.layer(x=Xp, y=Yp, Geom.histogram2d)#(xbincount=100, ybincount=100)
)
else
psplt = Gadfly.plot(
Gadfly.layer(x=Xpp,y=Ypp, Geom.point, Theme(line_width=1pt, default_color=parse(Colorant,c), point_size=1pt)),
Coord.cartesian(fixed=true)
)
end
if drawhist
push!(psplt.layers, Gadfly.layer(x=Xp, y=Yp, Geom.histogram2d)[1])#(xbincount=100, ybincount=100)
end
if contour
varsyms = Symbol.(lbltags)
for vsym in varsyms
pln = plotKDE(fg, vsym, dims=[1;2], levels=levels, c=["gray90"])
union!(psplt.layers, pln.layers)
end
end
psplt
end
"""
$(SIGNATURES)
2D plot of both poses and landmarks contained in factor graph. Assuming poses and landmarks are labeled `:x1, :x2, ...` and `:l0, :l1, ...`, respectively. The rnage of numbers to include can be controlled with `from` and `to` along with other keyword functionality for manipulating the plot.
Notes
- assumes `:l1`, `:l2`, ... for landmarks -- not using `tags=[:LANDMARK]` here yet (TODO).
"""
function drawPosesLandms(fgl::AbstractDFG;
from::Int64=0, to::Int64=99999999, minnei::Int64=0,
meanmax=:max, lbls=true, drawhist=false, MM::Dict{Int,T}=Dict{Int,Int}(),
contour::Bool=true, levels::Int=1,
showmm=true,
spscale::Float64=5.0,window::Union{Nothing, Tuple{Symbol, Real}}=nothing,
xmin=nothing, xmax=nothing, ymin=nothing, ymax=nothing,
point_size=1pt,
regexLandmark=r"l",
regexPoses=r"x" ) where {T}
#
xmin != nothing && xmax != nothing && xmin == xmax ? error("xmin must be less than xmax") : nothing
ymin != nothing && ymax != nothing && ymin == ymax ? error("ymin must be less than ymax") : nothing
ll = getVariableIds(fgl, regexLandmark)
p = drawPoses(fgl, from=from,to=to,meanmax=meanmax,lbls=lbls,drawhist=drawhist, spscale=spscale, contour=contour)
if length(ll) > 0
pl = drawLandms(fgl, from=from, to=to, minnei=minnei,lbls=lbls,drawhist=drawhist, MM=MM, showmm=showmm, point_size=point_size, contour=contour)
for l in pl.layers
push!(p.layers, l)
end
end
if window != nothing
focusX = getKDEMax(getKDE(getVariable(fgl,window[1])))
pwind = window[2]
p.coord = Coord.cartesian(xmin=focusX[1]-pwind,xmax=focusX[1]+pwind,ymin=focusX[2]-pwind,ymax=focusX[2]+pwind)
end
co = Coord.Cartesian(xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax)
p.coord = co
return p
end
function drawSubmaps(fgl::G, fromto::Array{Int,2};
m1hist=false, m2hist=false, m3hist=false,
showmm=false, MM::Dict{Int,T} = Dict{Int,Any}(),
xmin=nothing, xmax=nothing, ymin=nothing, ymax=nothing ) where {G <: AbstractDFG, T}
#
p = drawLandms(fgl, from=fromto[1,1], to=fromto[1,2], drawhist=m1hist, showmm=showmm, MM=MM)
if size(fromto,1) >1
p2 = drawLandms(fgl, from=fromto[2,1], to=fromto[2,2], drawhist=m2hist,c="blue", showmm=showmm, MM=MM)
for l in p2.layers
push!(p.layers, l)
end
end
if size(fromto,1) >2
p3 = drawLandms(fgl, from=fromto[3,1], to=fromto[3,2], drawhist=m3hist,c="magenta", showmm=showmm, MM=MM)
for l in p3.layers
push!(p.layers, l)
end
end
co = Coord.Cartesian(xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax)
p.coord = co
return p
end
function drawSubmaps(fgl::G, fromto::Array{Int,1}; spread::Int=25,
m1hist=false, m2hist=false, m3hist=false,
showmm=false, MM::Dict{Int,T}=Dict{Int,Any}(),
xmin=nothing, xmax=nothing, ymin=nothing, ymax=nothing ) where {G <: AbstractDFG, T}
#
ft = zeros(Int,length(fromto),2)
for i in 1:length(fromto)
ft[i,1] = fromto[i]-spread; ft[i,2] = fromto[i]+spread;
end
drawSubmaps(fgl, ft, m1hist=m1hist, m2hist=m2hist, m3hist=m3hist, showmm=showmm, MM=MM, xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax)
end
# function getKDEMax(p::BallTreeDensity;N=200)
# m = zeros(p.bt.dims)
# for i in 1:p.bt.dims
# mm = marginal(p,[i])
# rangeV = getKDERange(mm)
# X = linspace(rangeV[1],rangeV[2],N)
# yV = evaluateDualTree(mm,X)
# m[i] = X[findfirst(yV,maximum(yV))]
# end
# return m
# end
# function plotPose(::Pose2, bels::Vector{BallTreeDensity}, title; levels::Int=5, c=nothing)
# p1 = plotKDE(bels, dims=[1;2], levels=levels, c=c, title=title)
# p2 = plotKDE(bels, dims=[3], c=c)
#
#
# Gadfly.vstack(p1,p2)
# end
import KernelDensityEstimate: getKDERange
function getKDERange(bds::Vector{BallTreeDensity}; extend=0.15)
dims = Ndim(bds[1])
ran = getKDERange(bds[1],extend=extend)
for bd in bds
rr = getKDERange(bd,extend=extend)
for i in 2:dims, j in 1:2
ran[i,j] = maximum([rr[i,j]; ran[i,j]])
end
end
return ran
end
# import RoMEPlotting: plotPose
"""
$SIGNATURES
Plot pose belief as contour information on visually sensible manifolds.
"""
function plotPose(pt::Pose2,
pp::Vector{BallTreeDensity},
title="plotPose2";
levels=3,
c=nothing,
axis=nothing,
scale::Float64=0.2,
overlay=nothing,
hdl=[] )
#
# ops = buildHybridManifoldCallbacks(pt.manifolds)
# @show ran = getKDERange(p, addop=ops[1], diffop=ops[2])
ran = axis == nothing ? getKDERange(pp) : axis
p1 = plotKDE(pp, dims=[1;2], levels=levels, c=c, title=title, axis=ran )
# p2 = plotKDE(bels, dims=[3], c=c)
cc = c == nothing ? getColorsByLength(length(pp)) : c
GG = BallTreeDensity[]
for ppc in pp
gg = marginal(ppc,[3])
# gg = (x)->pc(reshape([x], :,1))[1]
push!(GG, gg)
end
# p2 = AMP.plotCircBeliefs(GG, c=cc)
p2 = AMP.plotKDECircular(GG, scale=scale, c=cc)
# deal with overlay
push!(hdl, p1)
push!(hdl, p2)
Gadfly.hstack(p1,p2)
end
function plotPose(pt::Pose2,
pp::BallTreeDensity,
title="plotPose2";
levels=3,
c=nothing,
axis=nothing,
scale::Float64=0.2,
overlay=nothing,
hdl=[] )
#
plotPose(pt, [pp;],title,levels=levels,c=c,axis=axis,scale=scale, overlay=overlay, hdl=hdl )
end
function plotPose(::DynPose2,
bels::Vector{BallTreeDensity},
title;
levels::Int=5,
c=nothing,
axis=nothing,
hdl=[] )
#
p1 = plotKDE(bels, dims=[1;2], levels=levels, c=c, title=title)
p2 = plotKDE(bels, dims=[3], c=c)
p3 = plotKDE(bels, dims=[4;5], levels=levels, c=c)
push!(hdl, p1)
push!(hdl, p2)
push!(hdl, p3)
Gadfly.vstack(p1,p2,p3)
end
# import RoMEPlotting: plotPose
function plotPose(::Pose3,
bels::Vector{BallTreeDensity},
title;
levels::Int=5,
c=nothing,
axis=nothing,
hdl=[] )
#
@show title
p1 = plotKDE(bels, dims=[1;2], levels=levels, c=c, title=title)
p2 = plotKDE(bels, dims=[3], c=c)
p3 = plotKDE(bels, dims=[4;5], levels=levels, c=c)
p4 = plotKDE(bels, dims=[6], c=c)
push!(hdl, p1)
push!(hdl, p2)
push!(hdl, p3)
push!(hdl, p4)
Gadfly.vstack(p1,p2,p3,p4)
end
"""
$(SIGNATURES)
Example: pl = plotPose(fg, [:x1; :x2; :x3])
"""
function plotPose(fgl::G,
syms::Vector{Symbol};
levels::Int=5,
c=nothing,
axis=nothing,
scale::Float64=0.2,
show::Bool=false,
filepath::AS="/tmp/tempposeplot.svg",
app::AS="eog",
hdl=[] ) where {G <: AbstractDFG, AS <: AbstractString}
#
typ = getData(getVariable(fgl, syms[1])).softtype
pt = string(string.(syms)...)
getvertsgg = (sym) -> getKDE(getVariable(fgl, sym))
pl = plotPose(typ, getvertsgg.(syms), pt, levels=levels, c=c, axis=axis, scale=scale, hdl=hdl )
if length(filepath) > 0
ext = split(filepath, '.')[end]
cmd = getfield(Gadfly,Symbol(uppercase(ext)))
h = 1*7Gadfly.cm
if typ == DynPose2
h *= 1.5
end
Gadfly.draw(cmd(filepath,15Gadfly.cm,h),pl)
@async !show ? nothing : run(`$app $filepath`)
end
return pl
end
function plotPose(fgl::G,
sym::Symbol;
levels::Int=5,
c=nothing,
axis=nothing,
scale::Float64=0.2,
show::Bool=false,
filepath::AS="/tmp/tempposeplot.svg",
app::AS="eog",
hdl=[] ) where {G <: AbstractDFG, AS <: AbstractString}
#
plotPose(fgl, [sym;], levels=levels, axis=axis, show=show, filepath=filepath, app=app, hdl=hdl )
end
# deprecated
function investigatePoseKDE(p::BallTreeDensity, p0::BallTreeDensity)
# co = ["black"; "blue"]
# h = Union{}
# x = plotKDE([marginal(p,[1]); marginal(p0,[1])], c=co )
# y = plotKDE([marginal(p,[2]); marginal(p0,[2])], c=co )
# if p.bt.dims >= 3
# th = plotKDE([marginal(p,[3]); marginal(p0,[3])], c=co )
# h = hstack(x,y,th)
# else
# h = hstack(x,y)
# end
#
# return h
return investigateMultidimKDE(p, p0)
end
function investigatePoseKDE(p::Array{BallTreeDensity,1})
# co = ["black"; "blue"; "green"; "red"; "magenta"; "cyan"; "cyan1"; "cyan2";
# "magenta"; "cyan"; "cyan1"; "cyan2"; "magenta"; "cyan"; "cyan1"; "cyan2"; "magenta";
# "cyan"; "cyan1"; "cyan2"; "magenta"; "cyan"; "cyan1"; "cyan2"]
# # compute all the marginals
# Pm = Array{Array{BallTreeDensity,1},1}()
# push!(Pm,stackMarginals(p,1)) #[marginal(p[1],[1]); marginal(p[2],[1])]
# push!(Pm,stackMarginals(p,2)) #[marginal(p[1],[2]); marginal(p[2],[2])]
#
# h = Union{}
# x = plotKDE(Pm[1], c=co )
# y = plotKDE(Pm[2], c=co )
# if p[1].bt.dims >= 3
# #Pm3 = [marginal(p[1],[3]); marginal(p[2],[3])]
# push!(Pm,stackMarginals(p,3)) # [marginal(p[1],[3]); marginal(p[2],[3])]
# th = plotKDE(Pm[3], c=co )
# h = hstack(x,y,th)
# else
# h = hstack(x,y)
# end
# return h
return investigateMultidimKDE(p)
end
function investigatePoseKDE(p::BallTreeDensity)
# x = plotKDE(marginal(p,[1]) )
# y = plotKDE(marginal(p,[2]) )
# if p.bt.dims >= 3
# th = plotKDE(marginal(p,[3]) )
# return hstack(x,y,th)
# end
# return hstack(x,y)
return investigateMultidimKDE(p)
end
# import RoMEPlotting: drawMarginalContour
function drawMarginalContour(fgl::G, lbl::String;
xmin=-150,xmax=150,ymin=-150,ymax=150,n=200 ) where G <: AbstractDFG
#
p = getKDE(getVariable(fgl,Symbol(lbl))) # p = getKDE(getVert(fgl,lbl))
Gadfly.plot(z=(x,y)->evaluateDualTree(p,vectoarr2([x,y]))[1],
x=collect(range(xmin,stop=xmax,length=n)),
y=collect(range(ymin,stop=ymax,length=n)),
Geom.contour,
Coord.Cartesian(xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax),
Guide.title(lbl)
)
end
function accumulateMarginalContours(fgl, order;
xmin=-150,xmax=150,ymin=-150,ymax=150,n=200 )
#
pl = drawMarginalContour(fgl, order[1],xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax,n=n)
pl2 = nothing
PL = []
for or in order[1:end]
pl2 = drawMarginalContour(fgl, or, xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax,n=n)
push!(PL, pl2)
push!(pl.layers, pl2.layers[1])
end
return pl, PL
end
function plotPose3Pairs(fgl::G, sym::Symbol; fill::Bool=true) where {G <: AbstractDFG}
p1= plotKDE(fgl, :x1, dims=[1;2], fill=fill)
p2 = plotKDE(fgl, :x1, dims=[6;3], fill=fill)
p3 = plotKDE(fgl, :x1, dims=[4;5], fill=fill)
Gadfly.draw(PDF("/tmp/RoMEvstackPose3.pdf",15cm, 20cm), vstack(p1,p2,p3) )
@async run(`evince /tmp/RoMEvstackPose3.pdf`)
nothing
end
# Victoria Park Plotting functions
function progressExamplePlot(dOdo, lsrFeats; toT=Inf)
len = length(dOdo)
pose = SE2(zeros(3))
lastpose = zeros(3)
idx = 1
T = dOdo[idx][4]
lstlaseridx = 1
WFTSX = Array{Float64,1}()
WFTSY = Array{Float64,1}()
WLBLS = ASCIIString[]
lastX = Array{Float64,1}()
lastY = Array{Float64,1}()
while T < toT && idx <= len
lastX = Array{Float64,1}()
lastY = Array{Float64,1}()
pose = pose*SE2(dOdo[idx][1:3]) # todo -- replace with inferred latest pose
#@show idx, T, vec(pose[1:2,3])
lastpose = vec(se2vee(pose))
# lstlaseridx, Ta = getFeatsAtT(lsrFeats, T, prev=lstlaseridx)
# bfts = lsrFeats[lstlaseridx].feats
fe = lsrFeats[idx]
if length(lsrFeats[idx]) > 0
bfts = zeros(3,length(fe))
lbls = ASCIIString[]
k = collect(keys(fe))
for i in 1:length(fe)
bfts[1:length(fe[k[i]]),i] = fe[k[i]]
push!(lbls, "l$(k[i])")
end
if bfts[1,1] != 0.0 && bfts[2,1] != 0.0 && bfts[3,1] != 0.0
wfts = rotateFeatsToWorld(bfts, pose)
for i in 1:size(wfts,2)
push!(WFTSX, wfts[1,i])
push!(WFTSY, wfts[2,i])
push!(WLBLS, lbls[i])
push!(lastX, wfts[1,i])
push!(lastY, wfts[2,i])
end
end
end
idx += 1
if idx <= len
T = dOdo[idx][4]
end
end
p = plotPoseDict(dOdo,to=idx-1)
if length(WFTSX) > 0
l = Gadfly.layer(x=WFTSX, y=WFTSY, label=WLBLS, Geom.label, Geom.point, Gadfly.Theme(default_color=colorant"red"))
push!(p.layers, l[1])
l2 = Gadfly.layer(x=WFTSX, y=WFTSY, Geom.point, Gadfly.Theme(default_color=colorant"red"))
push!(p.layers, l2[1])
for i in 1:length(lastX)
push!(p.layers, Gadfly.layer(x=[lastpose[1];lastX[i]], y=[lastpose[2];lastY[i]], Geom.line, Gadfly.Theme(default_color=colorant"magenta"))[1])
end
end
p
end
function plotTrckStep(DBG, i, fid, m)
@show keys(DBG[i])
pf = DBG[i][fid]
arr = Array{BallTreeDensity,1}()
for j in 1:3
push!(arr, marginal(pf[j],[m]))
end
plotKDE(arr, c=["red";"green";"black"])
end
function plotPose3Pairs(fgl::FactorGraph, sym::Symbol; fill::Bool=true)
p1= plotKDE(fgl, sym, dims=[1;2], fill=fill)
p2 = plotKDE(fgl, sym, dims=[6;3], fill=fill)
p3 = plotKDE(fgl, sym, dims=[4;5], fill=fill)
Gadfly.draw(PDF("/tmp/RoMEvstackPose3.pdf",15cm, 20cm), vstack(p1,p2,p3) )
@async run(`evince /tmp/RoMEvstackPose3.pdf`)
nothing
end
function plotKDE(fgl::FactorGraph,
vsym::Vector{Symbol};
axis=nothing,
dims=nothing,
c=getColorsByLength(length(vsym)),
levels::Int=4,
title::Union{Nothing, T}=nothing,
overlay=nothing ) where {T <: AbstractString}
#
verts = map((x)->getKDE(getVariable(fgl, x)), vsym)
plotKDE(verts, dims=dims, c=c, axis=axis, levels=levels, title=title, overlay=overlay )
end
function plotKDE(fgl::FactorGraph, vsym::Symbol; axis=nothing, dims=nothing, c=nothing, levels=4, title::Union{Nothing, T}=nothing) where {T <: AbstractString}
@warn "plotKDE for FactorGraph is deprecated, use DFG objects instead."
plotKDE(fgl, Symbol[vsym;], dims=dims, c=c, axis=axis, levels=levels, title=title)
end
function plotTrailingPoses(pt::Pose2,
pp::Vector{BallTreeDensity},
title="";
levels=2,
c=nothing,
axis=nothing,
scale::Float64=0.2,
circlen::Int=5)
ran = axis == nothing ? getKDERange(pp) : axis
cc=["red"; ["pink" for i in 1:100]]
p1 = plotKDE(pp, dims=[1;2], levels=levels, c=cc, title=title, axis=ran )
GG = BallTreeDensity[]
for ppc in pp
gg = marginal(ppc,[3])
# gg = (x)->pc(reshape([x], :,1))[1]
push!(GG, gg)
end
p2 = AMP.plotKDECircular(GG[(end-circlen):end], scale=scale, c=cc)
p2,p1
end
function plotTrailingPoses(fg::G,
pp::Vector{Symbol},
title="";
levels=2,
c=nothing,
axis=nothing,
scale::Float64=0.2,
circlen::Int=5) where G <: AbstractDFG
#
plotTrailingPoses(Pose2(), map(x->getKDE(fg,x),pp), scale=scale, title=title, circlen=circlen)
end
# gg = (x)->plotTrailingPoses(fg, [Symbol("x$i") for i in (x+60):-5:x],circlen=3)
#
# for i in 5:5:290
# g1,g2 = gg(i)
#
# g1 |> SVG("/tmp/trailingimgs/g1_$(i).svg")
# g1 |> SVG("/tmp/trailingimgs/g1_$(i+1).svg")
# g1 |> SVG("/tmp/trailingimgs/g1_$(i+2).svg")
# g1 |> SVG("/tmp/trailingimgs/g1_$(i+3).svg")
# g1 |> SVG("/tmp/trailingimgs/g1_$(i+4).svg")
#
# g2 |> SVG("/tmp/trailingimgs/g2_$(i).svg")
# g2 |> SVG("/tmp/trailingimgs/g2_$(i+1).svg")
# g2 |> SVG("/tmp/trailingimgs/g2_$(i+2).svg")
# g2 |> SVG("/tmp/trailingimgs/g2_$(i+3).svg")
# g2 |> SVG("/tmp/trailingimgs/g2_$(i+4).svg")
# end
#
| [
27,
456,
62,
30783,
29,
15,
198,
20541,
13954,
6242,
43,
6465,
33,
4805,
14990,
1268,
1546,
796,
3991,
198,
198,
8818,
19846,
6836,
83,
1273,
65,
43,
1127,
3419,
198,
220,
3298,
13954,
6242,
43,
6465,
33,
4805,
14990,
1268,
1546,
19... | 1.915194 | 12,735 |
module QuakePAK
const _fileid = 0x5041434b # The chars "PACK" used as a signature of PAK archives
struct ReadableFile <: IO
_io::IO
filename:: String
offset:: Int
size:: Int
end
Base.position(rf::ReadableFile) = position(rf._io) - rf.offset
Base.eof(rf.ReadableFile) = position(rf._io) < rf.offset + rf.size
Base.show(io::IO, p::PakFileEntry) =
print(io, "$(p.pak_filename)/$(p.filename) [offset: $(p.offset), size: $(p.size) byte]")
function read_file_entry(io::IO)
pos = position(io)
name = readuntil(io, '\0')
seek(io, pos+56)
offset = read(io, Int32)
size = read(io, Int32)
return name, offset, size
end
function open_pak(filename::String)
fileentries = []
open(filename) do f
# Check if the file is a PAK-File
@assert read(f, UInt32) == _fileid
# Read meta data
offset = read(f, Int32) # Where the file entries start
size = read(f, Int32) # Size of file entry table
nbentries = size÷64 # Each file entry is 64 bytes long
# Iterate over all file entries
seek(f, offset)
for _ in 1:nbentries
name, offset, size = read_file_entry(f)
push!(fileentries, PakFileEntry(filename, name, offset, size))
end
end
return fileentries
end
export open_pak
end # module
| [
21412,
42901,
4537,
42,
198,
198,
9979,
4808,
7753,
312,
796,
657,
87,
33580,
1415,
2682,
65,
220,
220,
220,
220,
220,
1303,
383,
34534,
366,
47,
8120,
1,
973,
355,
257,
9877,
286,
8147,
42,
22415,
198,
198,
7249,
4149,
540,
8979,
... | 2.519763 | 506 |
@testset "ladderize" begin
start_tree = ParseNewick("((A,(B,(C,(D,E)))),(F,(G,H)));")
descending_tree = ParseNewick("(((((D,E),C),B),A),((G,H),F));")
ascending_tree = ParseNewick("((F,(G,H)),(A,(B,(C,(D,E)))));")
start_newick = newick(start_tree)
desc_newick = newick(descending_tree)
asc_newick = newick(ascending_tree)
ladderized_asc_tree = ladderize_tree(start_tree)
@test newick(ladderized_asc_tree) == asc_newick
ladderized_desc_tree = ladderize_tree(start_tree,false)
@test newick(ladderized_desc_tree) == desc_newick
ladderize_tree!(start_tree)
@test newick(start_tree) == asc_newick
start_tree = ParseNewick("((A,(B,(C,(D,E)))),(F,(G,H)));")
ladderize_tree!(start_tree, false)
@test newick(start_tree) == desc_newick
end
| [
198,
31,
9288,
2617,
366,
9435,
1082,
1096,
1,
2221,
198,
220,
220,
220,
923,
62,
21048,
796,
2547,
325,
3791,
624,
7203,
19510,
32,
11,
7,
33,
11,
7,
34,
11,
7,
35,
11,
36,
22305,
828,
7,
37,
11,
7,
38,
11,
39,
4008,
1776,
... | 2.196133 | 362 |
<gh_stars>1-10
@everywhere module DatasetPreloader
using HDF5
using Distributed
export preload, load, getPreloadFileName
preloadFileName = ""
preloadFuture = 0
function readFromHDF(filename)
GC.gc()
try
return h5read(filename,"/FullSpectra/TofData")
catch
return 0
end
end
getPreloadFileName() = preloadFileName
function preload(filename)
global preloadFileName = filename
println("preloading file $filename")
global preloadFuture = remotecall(readFromHDF,2,filename)
end
function load(filename)
global preloadFileName
global preloadFuture
if (preloadFileName != "" && preloadFileName == filename && preloadFuture != 0)
println("fetching $filename from future")
ds = fetch(preloadFuture)
preloadFuture = 0
preloadFilename = ""
return ds
else
println("reading $filename directly")
preloadFuture = 0
preloadFilename = ""
return readFromHDF(filename)
end
end
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
31,
16833,
3003,
8265,
16092,
292,
316,
6719,
29356,
198,
197,
3500,
5572,
37,
20,
198,
197,
3500,
4307,
6169,
198,
197,
39344,
662,
2220,
11,
3440,
11,
651,
6719,
2220,
8979,
5376,
628,
1... | 2.78806 | 335 |
module TracePrecompiles
function run_julia(cmd)
current_proj = unsafe_string(Base.JLOptions().project)
run(`$(Base.julia_cmd()) --project=$(current_proj) --startup-file=no $(cmd)`)
end
function trace_compiles(package, trace_file, outputfile)
tdir = mktempdir(; cleanup=true)
trace_out = joinpath(tdir, "precompiles.jl")
run_julia(`--trace-compile=$(trace_out) $trace_file`)
prs = joinpath(@__DIR__, "process-prs.jl")
run_julia(` $prs $(package) $(trace_out) $(outputfile)`)
end
end
| [
21412,
34912,
6719,
5589,
2915,
198,
198,
8818,
1057,
62,
73,
43640,
7,
28758,
8,
198,
220,
220,
220,
1459,
62,
1676,
73,
796,
21596,
62,
8841,
7,
14881,
13,
41,
21982,
8544,
22446,
16302,
8,
198,
220,
220,
220,
1057,
7,
63,
3,
... | 2.419811 | 212 |
import ..KERNEL.System
"Save information needed to identify a SSBond"
struct SSBond #Tuple of chain_id and residue_number
chain_id::Char
res_num::Int64
end
"Packages the parsed fields from a conect line in a struct to avoid allocations"
mutable struct ConectLineParams
bonding_atoms::Vector{Int}
tokens::Vector{String}
end
"Packages the parsed fields from an atom line in a struct to avoid allocations"
mutable struct AtomLineParams
name::String
x::Float64
y::Float64
z::Float64
elem::String
occupancy::Union{Float64,Nothing}
temp_factor::Union{Float64,Nothing}
charge::Union{Float64,Nothing}
serial::Int64
AtomLineParams() = new()
end
"""
parseConectLine(line::String)
Parses a line starting with "CONNECT" in a PDB file.
"""
parseConectLine(params::ConectLineParams, line::String) = begin
if length(line) == 16
push!(params.tokens, line[12:16])
elseif length(line) == 21
push!(params.tokens, line[12:16], line[17:21])
elseif length(line) == 26
push!(params.tokens, line[12:16], line[17:21],
line[22:26])
else
push!(params.tokens, line[12:16], line[17:21],
line[22:26], line[27:31])
end
for x in params.tokens
if strip(x) != ""
push!(params.bonding_atoms, parse(Int,strip(x)))
end
end
return params
end
"""
parseAtomLine(line::String)
Parses a line starting with "ATOM" or "HET" in a PDB file.
"""
parseAtomLine(params::AtomLineParams, line::String) = begin
params.name = strip(line[13:16])
params.x = parse(Float64,line[31:38])
params.y = parse(Float64,line[39:46])
params.z = parse(Float64,line[47:54])
params.elem = strip(line[77:78])
params.occupancy = strip(line[55:60]) == "" ? nothing : parse(Float64,line[55:60])
params.temp_factor = strip(line[61:66]) == "" ? nothing : parse(Float64,line[61:66])
if length(line) >= 80
params.charge = strip(line[79:80]) == "" ? nothing : parse(Float64,line[79:80])
else
params.charge = nothing
end
result = Atom(params.name,params.x,params.y,params.z,params.elem,params.charge,
params.occupancy,params.serial,params.temp_factor)::Atom
if startswith(line,"HET")
setProperty(result,("hetero",true))
end
return result
end
"""
compare_ssbonds(b1::SSBond, b2::SSBond)
Comparison function for sorting.
"""
compare_ssbonds(b1::SSBond, b2::SSBond) = begin
b1.chain_id < b2.chain_id && return true
b1.chain_id == b2.chain_id && b1.res_num < b2.res_num && return true
return false
end
"""
parseSSBondLine(line::String)
Parses a line starting with "SSOBND" ina PDB file.
"""
parseSSBondLine(line::String) = begin
ssbond1::SSBond = SSBond(line[16], parse(Int,strip(line[18:21])))
ssbond2::SSBond = SSBond(line[30] ,parse(Int,strip(line[32:35])))
return (ssbond1, ssbond2)
end #returns 2 ssbonds
#only use following Function to build a System initially
"""
adjust_ter_indices(indices::Vector{Int64}, ter_pos::Vector{Int64})
Adjusts the indices in a list of stom serials read from a PDB file. The "TER" entries in PDB files
have a serial number like the Atoms, which makes the serial numbers not continuous.
"""
adjust_ter_indices(indices::Vector{Int64}, ter_pos::Vector{Int64}) = begin
for i in 1:length(indices)
c = 0
for ter in ter_pos
if indices[i] > ter
c += 1
end
end
indices[i] -= c
end
return indices
end
adjust_ter_indices(x::Int64, ter_pos::Vector{Int64}) = adjust_ter_indices([x],ter_pos)
"""
parsePDB(path::String)
Parses a PDB file from `path`. Creates a representation using [`KERNEL`](@ref KERNEL_header) types.\n
If the file has multiple models the property \"fromNMR\" is added to `root`.
See [Hint](https://www.wwpdb.org/documentation/file-format-content/format33/sect9.html#MODEL).
"""
parsePDB(path::String) = begin
# `ssbonds` holds all the ssbonds in order of appearance in the file
ssbonds::Vector{SSBond} = Vector{SSBond}()
# When reading the atoms from the file, relate the atom to an SSBond endpoint
ssbonds_to_atoms::Dict{SSBond, Int64} = Dict{SSBond, Int64}() #mapping ssbonds to atom serial number
#List of pairs of atoms bonded by a SSBond
ssbond_pairs::Vector{Tuple{SSBond,SSBond}} = Vector{Tuple{SSBond,SSBond}}()
current_ssbond_index::Int = 1
root::System = System()
target_system::System = System()
atoms::Vector{Atom} = Atom[]
ter_positions::Vector{Int} = Int64[]
latest_chain::Chain = Chain() #assume chains are named in alphabetical order with capitalized letters
latest_residue::Residue = Residue()
latest_residue.res_number_ = -127 #init value, which hopefully is NOT the same as the first residue-number in the file
atom_counter::Int = 0
latest_chain.id_ = '-' #init value, which hopefully is NOT the same as the first chain id in the file
appendChild(target_system, latest_chain)
seen_header::Bool = false::Bool
ready_to_sort_ssbonds::Bool = false
seen_ssbonds::Bool = false
seen_atoms::Bool = false
seen_model::Bool = false
#init fields for parsing a "CONECT" line
conect_line_params::ConectLineParams = ConectLineParams(Int[],String[])
#init fields for parsing a "ATOM" line
atom_line_params::AtomLineParams = AtomLineParams()
record_residue_name::String = ""
open(path) do file
for line in eachline(file)
#header
if !seen_header && startswith(line,"HEADER")
if length(line) >= 66
root.name_ = strip(line[63:66])
end
seen_header = true::Bool
end
if startswith(line,"TER")
push!(ter_positions, parse(Int64,strip(line[7:11])))
end
#save infos about the atoms which participate in ssbonds
if !seen_ssbonds && startswith(line, "SSBOND")
pair = parseSSBondLine(line)
push!(ssbond_pairs, pair)
push!(ssbonds, pair...)
ready_to_sort_ssbonds = true
end
#after having seen all of the ssbonds sort them to later find the correct atoms easier
if ready_to_sort_ssbonds && !startswith(line,"SSBOND")
seen_ssbonds = true
ready_to_sort_ssbonds = false
sort!(ssbonds, alg=InsertionSort, lt = compare_ssbonds)
end
#atoms of name "SG" belong to ssbonds. associate atom to ssbond entry
if (current_ssbond_index <= length(ssbonds)) && startswith(line,"ATOM") &&
(strip(line[13:16]) == "SG")
#find items from SSBOND
current_ssbond = ssbonds[current_ssbond_index]
#replace "SG" with a list of possible names of atoms which engage in ssbonds
if line[22] == current_ssbond.chain_id && current_ssbond.res_num == parse(Int,strip(line[23:26]))
ssbonds_to_atoms[current_ssbond] = adjust_ter_indices(parse(Int64,strip(line[7:11])), ter_positions)[1]
current_ssbond_index += 1
end
end
#model lines - means we need to construct a new model
if startswith(line,"MODEL")
if !seen_model
nothing
else
#finalize target_system and append it to system
appendChild(root, target_system)
target_system = System()
latest_chain = Chain()
latest_chain.id_ = 'A'
prev_residue_name = latest_residue.name_
latest_residue = Residue()
latest_residue.res_number_ = 1
latest_residue.name_ = prev_residue_name
latest_residue.is_hetero_ = false
appendChild(target_system, latest_chain)
appendChild(latest_chain, latest_residue)
atom_counter = 0
end
seen_model = true
end
if (!seen_atoms && startswith(line,"ATOM") ||
!seen_atoms && startswith(line,"HETATM")) &&
line[17] in (' ','A')
atom_counter += 1
record_chain_id = line[22]
latest_chain.id_ == '-' && (latest_chain.id_ = record_chain_id)
record_residue_name = strip(line[18:20])
record_residue_number = parse(Int64,strip(line[23:26]))
#create chain if new chain
if record_chain_id != latest_chain.id_
#is the next residues belong to another chain, create the chain
latest_chain = Chain()
latest_chain.id_ = record_chain_id
appendChild(target_system, latest_chain)
end
if record_residue_number != latest_residue.res_number_
latest_residue = Residue()
latest_residue.res_number_ = record_residue_number
latest_residue.name_ = record_residue_name
latest_residue.is_hetero_ = false
if latest_residue.name_ in Amino_Acids
setProperty(latest_residue, ("amino_acid",true))
end
appendChild(latest_chain, latest_residue)
end
#create atoms
atom_line_params.serial = atom_counter
parsed_atom::Atom = parseAtomLine(atom_line_params, line)
#the only property right now can be the hetero property
if length(parsed_atom.properties_) != 0
latest_residue.is_hetero_ = true
end
appendChild(latest_residue, parsed_atom)
end
if startswith(line, "CONECT")
if seen_atoms == false
seen_atoms = true
atoms = collectAtoms(target_system)
end
if seen_model
appendChild(root, target_system)
end
empty!(conect_line_params.bonding_atoms)
empty!(conect_line_params.tokens)
bonding_atom_serials = parseConectLine(conect_line_params, line).bonding_atoms
bonding_atom_serials = adjust_ter_indices(bonding_atom_serials, ter_positions)
for serial in bonding_atom_serials[2:end]
createBond(atoms[bonding_atom_serials[1]], atoms[serial],
order=ORDER__SINGLE, type=TYPE__COVALENT)
end
end
end
end
#for each ssbond pair, make a bond betwen the two corresponding atoms using the dict
for (ssbond1, ssbond2) in ssbond_pairs
deleteBond(atoms[ssbonds_to_atoms[ssbond1]], atoms[ssbonds_to_atoms[ssbond2]])
a = createBond(atoms[ssbonds_to_atoms[ssbond1]], atoms[ssbonds_to_atoms[ssbond2]],
order=ORDER__SINGLE, type=TYPE__DISULPHIDE_BRIDGE)
end
if root.name_ == ""
temp = findlast("/",path)
if isnothing(temp)
root_name_ = path[1:end-4]
else
root.name_ = path[temp[1]+1:end-4]
end
end
if !seen_model
target_system.name_ = root.name_
i = 1
for chain in collectChains(target_system)
if chain.id_ < 'A'+(i-1)
idx = Int(chain.id_) - Int('A') +1
if idx < 1 #catch chains with empty/too low char
chain.id_ = 'A'+(i-1)
i += 1
continue
end
ch_l = collectChains(target_system)[idx]
appendSibling(ch_l.last_child_, chain.first_child_)
removeChild(chain)
else
chain.id_ = 'A'+(i-1)
i += 1
end
end
return target_system
else
setProperty(root,("fromNMR",true))
for (num,system) in enumerate(collectSystems(root)[2:end])
system.name_ = root.name_*"-"*string(num)
i = 1
for chain in collectChains(target_system)
if chain.id_ < 'A'+(i-1)
idx = Int(chain.id_) - Int('A') +1
ch_l = collectChains(target_system)[idx]
appendSibling(ch_l.last_child_, chain.first_child_)
removeChild(chain)
else
chain.id_ = 'A'+(i-1)
i += 1
end
end
end
#copy bonds over
last_sys = last(getChildren(root))
last_sys_bonds = collectBonds(last_sys)
atom_serials_pairs::Vector{Tuple{Int64, Int64}} = [(bond.source_.serial_, bond.target_.serial_)
for bond in last_sys_bonds]
for sys in getChildren(root)
atoms = collectAtoms(sys)
for bond in last_sys_bonds
createBond(atoms[bond.source_.serial_], atoms[bond.target_.serial_],
name = bond.name_, order = bond.bond_order_, type = bond.bond_type_,
properties = bond.properties_)
end
end
return root
end
return root
end
# BioStructures reads structure and internal parser only the bonds
"""
System(path::String)
Constructs a `System` from a PDB file at `path`.
"""
function System(path::String)
if endswith(path, ".pdb")
root = parsePDB(path)::System
for sys in collectSystems(root)
sys.number_of_children_ = countChildren(sys)
end
return root
end
end
| [
198,
11748,
11485,
42,
28778,
3698,
13,
11964,
628,
198,
198,
1,
16928,
1321,
2622,
284,
5911,
257,
6723,
33,
623,
1,
198,
7249,
6723,
33,
623,
220,
220,
220,
1303,
51,
29291,
286,
6333,
62,
312,
290,
35186,
62,
17618,
198,
220,
2... | 2.013446 | 6,991 |
<reponame>jeremiahpslewis/Term.jl<gh_stars>0
import Term: Panel, TextBox
@testset "\e[34mPanel - no content" begin
for style in ("default", "red", "on_blue")
testpanel(
Panel(;fit=true, style=style), 3, 2
)
testpanel(
Panel(), 88, 2
)
testpanel(
Panel(; width=12, height=4, style=style), 12, 4
)
end
end
@testset "\e[34mPANEL - fit - measure" begin
for style in ("default", "red", "on_blue")
for justify in (:left, :center, :right)
# ----------------------------- text only content ---------------------------- #
testpanel(
Panel("t"; fit=true, style=style), 7, 3
)
testpanel(
Panel("test"; fit=true, style=style), 10, 3
)
testpanel(
Panel("1234\n123456789012"; fit=true, style=style), 18, 4
)
testpanel(
Panel("나랏말싸미 듕귁에 달아"; fit=true, style=style), 28, 3
)
testpanel(
Panel("나랏말싸미 듕귁에 달아\n1234567890123456789012"; fit=true, style=style), 28, 4
)
testpanel(
Panel("."^500; fit=true, style=style), displaysize(stdout)[2]-4, nothing
)
# ------------------------------- nested panels ------------------------------ #
testpanel(
Panel(
Panel("test"; fit=true, style=style);
fit=true, style=style),
16, 5
)
testpanel(
Panel(
Panel(Panel("."; fit=true, style=style); fit=true, style=style);
fit=true, style=style), 19, 7
)
# @test_nothrow Panel(
# Panel("."^250; fit=true, style=style); fit=true, style=style
# )
end
end
end
@testset "\e[34mPANEL - nofit - measure" begin
for style in ("default", "red", "on_blue")
for justify in (:left, :center, :right)
# ----------------------------- text only content ---------------------------- #
testpanel(
Panel("t"; style=style), 88, 3
)
testpanel(
Panel("test"; style=style), 88, 3
)
testpanel(
Panel("1234\n123456789012"; style=style), 88, 4
)
testpanel(
Panel("나랏말싸미 듕귁에 달아"; style=style), 88, 3
)
testpanel(
Panel("나랏말싸미 듕귁에 달아\n1234567890123456789012"; style=style), 88, 4
)
testpanel(
Panel("."^1500; style=style), 88, 21
)
# ------------------------------- nested panels ------------------------------ #
testpanel(
Panel(
Panel("test");
fit=true, style=style), 94, 5
)
testpanel(
Panel(
Panel(Panel("."); fit=true, style=style);
fit=true, style=style), 100, 7
)
testpanel(
Panel(
Panel("."^250); fit=true, style=style
), 94, 8
)
testpanel(
Panel(
Panel("test"; style=style);
), 94, 5
)
testpanel(
Panel(
Panel(Panel("."; style=style); style=style);
), 100, 7
)
testpanel(
Panel(
Panel("."^250; style=style);
), 94, 8
)
testpanel(
Panel(
Panel("t1"; style=style),
Panel("t2"; style=style),
), 94, 8
)
testpanel(
Panel(
Panel("test", width=22); width=30, height=8
), 30, 8
)
testpanel(
Panel(
Panel("test", width=42); width=30, height=8
), 48, 8
)
testpanel(
Panel(
Panel("test", width=42,height=12); width=30, height=8
), 48, 14
)
end
end
end
@testset "\e[34mPanel + renderables" begin
testpanel(
Panel(
RenderableText("x".^5)
), 88, 3
)
testpanel(
Panel(
RenderableText("x".^500)
), 88, nothing
)
testpanel(
Panel(
RenderableText("x".^5); fit=true
), 11, 3
)
testpanel(
Panel(
RenderableText("x".^500); fit=true
), displaysize(stdout)[2]-4, nothing
)
end
@testset "\e[34mPANEL - titles" begin
for fit in (true, false)
for justify in (:left, :center, :right)
for style in ("red", "bold", "default", "on_green")
testpanel(
Panel("."^50, title="test",
title_style=style,
title_justify=justify,
subtitle="subtest",
subtitle_style=style,
subtitle_justify=justify,
fit=fit
),
fit ? nothing : 88,
nothing
)
testpanel(
Panel(
Panel("."^50, title="test",
title_style=style,
title_justify=justify,
subtitle="subtest",
subtitle_style=style,
subtitle_justify=justify,
fit=fit,
)
),
fit ? nothing : 94,
nothing
)
end
end
end
end
@testset "\e[34mTBOX" begin
w = displaysize(stdout)[2]
for justify in (:left, :center, :right)
testpanel(
TextBox(
"nofit"^25;
width=1000,
justify=justify
),w - 4, nothing)
testpanel(
TextBox(
"truncate"^25;
width=100,
fit=:truncate,
justify=justify
), 100, 3)
testpanel(
TextBox(
"truncate"^25;
width=100,
justify=justify
), 100, 7)
testpanel(
TextBox(
"truncate"^8;
fit=:fit,
justify=justify
), 68, 4)
testpanel(
TextBox(
"[red]truncate[/red]"^8;
fit=:fit,
justify=justify
), 68, 4)
testpanel(
TextBox(
"[red]truncate[/red]test"^8;
fit=:fit,
justify=justify
), 100, 4)
testpanel(TextBox(
"[red]tru\nncate[/red]test"^1;
fit=:fit,
justify=justify
), 13, 7)
end
end
@testset "\e[34mPanel - padding" begin
p = Panel("."^24; padding = [4, 4, 2, 2])
testpanel(p, 88, 7)
# @test string(p) == "\e[22m╭──────────────────────────────────────────────────────────────────────────────────────╮\e[22m\n\e[22m│\e[22m \e[22m│\e[22m\n\e[22m│\e[22m \e[22m│\e[22m\n\e[22m│\e[22m ........................ \e[22m│\e[22m\n\e[22m│\e[22m \e[22m│\e[22m\n\e[22m│\e[22m \e[22m│\e[22m\n\e[22m╰──────────────────────────────────────────────────────────────────────────────────────╯\e[22m"
p = Panel("."^24; padding = [4, 4, 2, 2], fit=true)
testpanel(p, 34, 7)
end | [
27,
7856,
261,
480,
29,
73,
567,
35029,
862,
293,
86,
271,
14,
40596,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
11748,
35118,
25,
18810,
11,
8255,
14253,
628,
198,
198,
31,
9288,
2617,
37082,
68,
58,
2682,
76,
26639,
532,
645,
... | 1.572339 | 5,336 |
<reponame>mschauer/Gadfly.jl<filename>src/statistics.jl
module Stat
import Gadfly
import StatsBase
using DataArrays
using Compose
using Color
using Loess
using Hexagons
import Gadfly: Scale, Coord, element_aesthetics, default_scales, isconcrete,
nonzero_length, setfield!
import StatsBase: bandwidth, kde
import Distributions: Uniform
import Iterators: chain, cycle, product, partition
include("bincount.jl")
# Apply a series of statistics.
#
# Args:
# stats: Statistics to apply in order.
# scales: Scales used by the plot.
# aes: A Aesthetics instance.
#
# Returns:
# Nothing, modifies aes.
#
function apply_statistics(stats::Vector{Gadfly.StatisticElement},
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
for stat in stats
apply_statistic(stat, scales, coord, aes)
end
nothing
end
immutable Nil <: Gadfly.StatisticElement
end
const nil = Nil
immutable Identity <: Gadfly.StatisticElement
end
function apply_statistic(stat::Identity,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
nothing
end
const identity = Identity
immutable HistogramStatistic <: Gadfly.StatisticElement
minbincount::Int
maxbincount::Int
orientation::Symbol
function HistogramStatistic(; bincount=nothing,
minbincount=3,
maxbincount=150,
orientation::Symbol=:vertical)
if bincount != nothing
new(bincount, bincount, orientation)
else
new(minbincount, maxbincount, orientation)
end
end
end
element_aesthetics(::HistogramStatistic) = [:x]
const histogram = HistogramStatistic
function apply_statistic(stat::HistogramStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
if stat.orientation == :horizontal
var = :y
othervar = :x
minvar = :ymin
maxvar = :ymax
drawmaxvar = :xdrawmax
labelvar = :x_label
else
var = :x
othervar = :y
minvar = :xmin
maxvar = :xmax
drawmaxvar = :ydrawmax
labelvar = :y_label
end
Gadfly.assert_aesthetics_defined("HistogramStatistic", aes, var)
values = getfield(aes, var)
if stat.minbincount > stat.maxbincount
error("Histogram minbincount > maxbincount")
end
if isempty(getfield(aes, var))
setfield!(aes, minvar, Float64[1.0])
setfield!(aes, maxvar, Float64[1.0])
setfield!(aes, othervar, Float64[0.0])
return
end
if haskey(scales, var) && isa(scales[var], Scale.DiscreteScale)
x_min = minimum(values)
x_max = maximum(values)
d = x_max - x_min + 1
bincounts = zeros(Int, d)
for x in values
bincounts[x - x_min + 1] += 1
end
else
d, bincounts = choose_bin_count_1d(values,
stat.minbincount,
stat.maxbincount)
end
x_min = Gadfly.concrete_minimum(values)
x_max = Gadfly.concrete_maximum(values)
binwidth = (x_max - x_min) / d
if aes.color === nothing
setfield!(aes, minvar, Array(Float64, d))
setfield!(aes, maxvar, Array(Float64, d))
setfield!(aes, othervar, Array(Float64, d))
for j in 1:d
getfield(aes, minvar)[j] = x_min + (j - 1) * binwidth
getfield(aes, maxvar)[j] = x_min + j * binwidth
getfield(aes, othervar)[j] = bincounts[j]
end
else
groups = Dict()
for (x, c) in zip(values, cycle(aes.color))
if !Gadfly.isconcrete(x)
continue
end
if !haskey(groups, c)
groups[c] = Float64[x]
else
push!(groups[c], x)
end
end
setfield!(aes, minvar, Array(Float64, d * length(groups)))
setfield!(aes, maxvar, Array(Float64, d * length(groups)))
setfield!(aes, othervar, Array(Float64, d * length(groups)))
colors = Array(ColorValue, d * length(groups))
x_min = Gadfly.concrete_minimum(values)
x_max = Gadfly.concrete_maximum(values)
stack_height = zeros(Int, d)
for (i, (c, xs)) in enumerate(groups)
fill!(bincounts, 0)
for x in xs
if !Gadfly.isconcrete(x)
continue
end
bin = max(1, min(d, int(ceil((x - x_min) / binwidth))))
bincounts[bin] += 1
end
stack_height += bincounts[1:d]
for j in 1:d
idx = (i-1)*d + j
getfield(aes, minvar)[idx] = x_min + (j - 1) * binwidth
getfield(aes, maxvar)[idx] = x_min + j * binwidth
getfield(aes, othervar)[idx] = bincounts[j]
colors[idx] = c
end
end
drawmax = float64(maximum(stack_height))
aes_drawmax = getfield(aes, drawmaxvar)
if aes_drawmax === nothing || aes_drawmax < drawmax
setfield!(aes, drawmaxvar, drawmax)
end
aes.color = PooledDataArray(colors)
end
setfield!(aes, labelvar, Scale.identity_formatter)
end
immutable DensityStatistic <: Gadfly.StatisticElement
# Number of points sampled
n::Int
function DensityStatistic(n=300)
new(n)
end
end
const density = DensityStatistic
element_aesthetics(::DensityStatistic) = [:x, :y]
function apply_statistic(stat::DensityStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
if !isdefined(:kde)
error("KDE is currently not available for your version of Julia.")
end
Gadfly.assert_aesthetics_defined("DensityStatistic", aes, :x)
if aes.color === nothing
if !isa(aes.x[1], Real)
error("Kernel density estimation only works on Real types.")
end
x_f64 = convert(Vector{Float64}, aes.x)
# When will stat.n ever be <= 1? Seems pointless
# certainly its length will always be 1
window = stat.n > 1 ? bandwidth(x_f64) : 0.1
f = kde(x_f64, width=window, npoints=stat.n)
aes.x = f.x
aes.y = f.density
else
groups = Dict()
for (x, c) in zip(aes.x, cycle(aes.color))
if !haskey(groups, c)
groups[c] = Float64[x]
else
push!(groups[c], x)
end
end
colors = Array(ColorValue, 0)
aes.x = Array(Float64, 0)
aes.y = Array(Float64, 0)
for (c, xs) in groups
window = stat.n > 1 ? bandwidth(xs) : 0.1
f = kde(xs, width=window, npoints=stat.n)
append!(aes.x, f.x)
append!(aes.y, f.density)
for _ in 1:length(f.x)
push!(colors, c)
end
end
aes.color = PooledDataArray(colors)
end
aes.y_label = Gadfly.Scale.identity_formatter
end
immutable Histogram2DStatistic <: Gadfly.StatisticElement
xminbincount::Int
xmaxbincount::Int
yminbincount::Int
ymaxbincount::Int
function Histogram2DStatistic(; xbincount=nothing,
xminbincount=3,
xmaxbincount=150,
ybincount=nothing,
yminbincount=3,
ymaxbincount=150)
if xbincount != nothing
xminbincount = xbincount
xmaxbincount = xbincount
end
if ybincount != nothing
yminbincount = ybincount
ymaxbincount = ybincount
end
new(xminbincount, xmaxbincount, yminbincount, ymaxbincount)
end
end
element_aesthetics(::Histogram2DStatistic) = [:x, :y, :color]
default_scales(::Histogram2DStatistic) = [Gadfly.Scale.continuous_color()]
const histogram2d = Histogram2DStatistic
function apply_statistic(stat::Histogram2DStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
Gadfly.assert_aesthetics_defined("Histogram2DStatistic", aes, :x, :y)
x_min, x_max = Gadfly.concrete_minimum(aes.x), Gadfly.concrete_maximum(aes.x)
y_min, y_max = Gadfly.concrete_minimum(aes.y), Gadfly.concrete_maximum(aes.y)
if haskey(scales, :x) && isa(scales[:x], Scale.DiscreteScale)
x_categorial = true
xminbincount = x_max - x_min + 1
xmaxbincount = xminbincount
else
x_categorial = false
xminbincount = stat.xminbincount
xmaxbincount = stat.xmaxbincount
end
if haskey(scales, :y) && isa(scales[:y], Scale.DiscreteScale)
y_categorial = true
yminbincount = y_max - y_min + 1
ymaxbincount = yminbincount
else
y_categorial = false
yminbincount = stat.yminbincount
ymaxbincount = stat.ymaxbincount
end
dy, dx, bincounts = choose_bin_count_2d(aes.x, aes.y,
xminbincount, xmaxbincount,
yminbincount, ymaxbincount)
wx = x_categorial ? 1 : (x_max - x_min) / dx
wy = y_categorial ? 1 : (y_max - y_min) / dx
n = 0
for cnt in bincounts
if cnt > 0
n += 1
end
end
if x_categorial
aes.x = Array(Int64, n)
else
aes.xmin = Array(Float64, n)
aes.xmax = Array(Float64, n)
end
if y_categorial
aes.y = Array(Int64, n)
else
aes.ymin = Array(Float64, n)
aes.ymax = Array(Float64, n)
end
k = 1
for i in 1:dy, j in 1:dx
cnt = bincounts[i, j]
if cnt > 0
if x_categorial
aes.x[k] = x_min + (j - 1)
else
aes.xmin[k] = x_min + (j - 1) * wx
aes.xmax[k] = x_min + j * wx
end
if y_categorial
aes.y[k] = y_min + (i - 1)
else
aes.ymin[k] = y_min + (i - 1) * wy
aes.ymax[k] = y_min + i * wy
end
k += 1
end
end
@assert k - 1 == n
if !haskey(scales, :color)
error("Histogram2DStatistic requires a color scale.")
end
color_scale = scales[:color]
if !(typeof(color_scale) <: Scale.ContinuousColorScale)
error("Histogram2DStatistic requires a continuous color scale.")
end
aes.color_key_title = "Count"
data = Gadfly.Data()
data.color = Array(Int, n)
k = 1
for cnt in bincounts
if cnt > 0
data.color[k] = cnt
k += 1
end
end
if x_categorial
aes.x = PooledDataArray(aes.x)
end
if y_categorial
aes.y = PooledDataArray(aes.y)
end
Scale.apply_scale(color_scale, [aes], data)
nothing
end
# Find reasonable places to put tick marks and grid lines.
immutable TickStatistic <: Gadfly.StatisticElement
in_vars::Vector{Symbol}
out_var::String
# fixed ticks, or nothing
ticks::Union(Nothing, AbstractArray)
end
function xticks(ticks::Union(Nothing, AbstractArray)=nothing)
TickStatistic([:x, :xmin, :xmax, :xdrawmin, :xdrawmax], "x", ticks)
end
function yticks(ticks::Union(Nothing, AbstractArray)=nothing)
TickStatistic(
[:y, :ymin, :ymax, :middle, :lower_hinge, :upper_hinge,
:lower_fence, :upper_fence, :ydrawmin, :ydrawmax], "y", ticks)
end
# Apply a tick statistic.
#
# Args:
# stat: statistic.
# aes: aesthetics.
#
# Returns:
# nothing
#
# Modifies:
# aes
#
function apply_statistic(stat::TickStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
in_group_var = symbol(string(stat.out_var, "group"))
minval, maxval = nothing, nothing
if getfield(aes, in_group_var) === nothing
in_values = {}
categorical = true
for var in stat.in_vars
vals = getfield(aes, var)
if vals != nothing && !isa(vals, PooledDataArray)
categorical = false
end
if vals != nothing
if minval == nothing
minval = first(vals)
end
if maxval == nothing
maxval = first(vals)
end
T = isempty(vals) ? eltype(vals) : typeof(first(vals))
if stat.out_var == "x"
dsize = aes.xsize === nothing ? [nothing] : aes.xsize
elseif stat.out_var == "y"
dsize = aes.ysize === nothing ? [nothing] : aes.ysize
else
dsize = [nothing]
end
size = aes.size === nothing ? [nothing] : aes.size
for (val, s, ds) in zip(vals, cycle(size), cycle(dsize))
if !Gadfly.isconcrete(val) || !isfinite(val)
continue
end
if val < minval || !isfinite(minval)
minval = val
end
if val > maxval || !isfinite(maxval)
maxval = val
end
if s != nothing
minval = min(minval, val - s)
maxval = max(maxval, val + s)
end
if ds != nothing
minval = min(minval, val - ds)
maxval = max(maxval, val + ds)
end
end
push!(in_values, vals)
end
end
if isempty(in_values)
return
end
in_values = chain(in_values...)
else
vals = getfield(aes. in_group_var)
in_values = vals
minval = Gadfly.concrete_minimum(in_values)
maxval = Gadfly.concrete_maximum(in_values)
categorical = true
end
# consider forced tick marks
if stat.ticks != nothing
minval = min(minval, minimum(stat.ticks))
maxval = max(maxval, maximum(stat.ticks))
end
# TODO: handle the outliers aesthetic
n = Gadfly.concrete_length(in_values)
# take into account a forced viewport in cartesian coordinates.
if typeof(coord) == Coord.Cartesian
if stat.out_var == "x"
if !is(coord.xmin, nothing)
minval = min(minval, coord.xmin)
end
if !is(coord.xmax, nothing)
maxval = max(maxval, coord.xmax)
end
elseif stat.out_var == "y"
if !is(coord.ymin, nothing)
minval = min(minval, coord.ymin)
end
if !is(coord.ymax, nothing)
maxval = max(maxval, coord.ymax)
end
end
end
# check the x/yviewmin/max pesudo-aesthetics
if stat.out_var == "x"
if aes.xviewmin != nothing
minval = aes.xviewmin
end
if aes.xviewmax != nothing
maxval = aes.xviewmax
end
elseif stat.out_var == "y"
if aes.yviewmin != nothing
minval = aes.yviewmin
end
if aes.yviewmax != nothing
maxval = aes.yviewmax
end
end
# all the input values in order.
if stat.ticks != nothing
grids = ticks = stat.ticks
viewmin = minval
viewmax = maxval
elseif categorical
ticks = Set()
for val in in_values
push!(ticks, val)
end
ticks = Float64[t for t in ticks]
sort!(ticks)
maxgap = 0
for (i, j) in partition(ticks, 2, 1)
if j - i > maxgap
maxgap = j -i
end
end
if length(ticks) > 20 || maxgap > 1
ticks, viewmin, viewmax = Gadfly.optimize_ticks(minval, maxval)
if ticks[1] == 0
ticks[1] = 1
end
grids = ticks
else
grids = (ticks .- 0.5)[2:end]
end
viewmin = minimum(ticks)
viewmax = maximum(ticks)
else
minval, maxval = promote(minval, maxval)
ticks, viewmin, viewmax =
Gadfly.optimize_ticks(minval, maxval, extend_ticks=true)
grids = ticks
end
# We use the first label function we find for any of the aesthetics. I'm not
# positive this is the right thing to do, or would would be.
labeler = getfield(aes, symbol(string(stat.out_var, "_label")))
setfield!(aes, symbol(string(stat.out_var, "tick")), ticks)
setfield!(aes, symbol(string(stat.out_var, "grid")), grids)
setfield!(aes, symbol(string(stat.out_var, "tick_label")), labeler)
viewmin_var = symbol(string(stat.out_var, "viewmin"))
if getfield(aes, viewmin_var) === nothing ||
getfield(aes, viewmin_var) > viewmin
setfield!(aes, viewmin_var, viewmin)
end
viewmax_var = symbol(string(stat.out_var, "viewmax"))
if getfield(aes, viewmax_var) === nothing ||
getfield(aes, viewmax_var) < viewmax
setfield!(aes, viewmax_var, viewmax)
end
nothing
end
immutable BoxplotStatistic <: Gadfly.StatisticElement
end
element_aesthetics(::BoxplotStatistic) = [:x, :y]
const boxplot = BoxplotStatistic
function apply_statistic(stat::BoxplotStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
Gadfly.assert_aesthetics_defined("BoxplotStatistic", aes, :y)
groups = Dict()
aes_x = aes.x === nothing ? [nothing] : aes.x
aes_color = aes.color === nothing ? [nothing] : aes.color
T = eltype(aes.y)
for (x, y, c) in zip(cycle(aes_x), aes.y, cycle(aes_color))
if !haskey(groups, (x, c))
groups[(x, c)] = Array(T, 0)
end
push!(groups[(x, c)], y)
end
m = length(groups)
aes.middle = Array(T, m)
aes.lower_hinge = Array(T, m)
aes.upper_hinge = Array(T, m)
aes.lower_fence = Array(T, m)
aes.upper_fence = Array(T, m)
aes.outliers = Vector{T}[]
for (i, ((x, c), ys)) in enumerate(groups)
sort!(ys)
aes.lower_hinge[i], aes.middle[i], aes.upper_hinge[i] =
quantile!(ys, [0.25, 0.5, 0.75])
iqr = aes.upper_hinge[i] - aes.lower_hinge[i]
idx = searchsortedfirst(ys, aes.lower_hinge[i] - 1.5iqr)
aes.lower_fence[i] = ys[idx]
idx = searchsortedlast(ys, aes.upper_hinge[i] + 1.5iqr)
aes.upper_fence[i] = ys[idx]
push!(aes.outliers,
filter(y -> y < aes.lower_fence[i] || y > aes.upper_fence[i], ys))
end
if !is(aes.x, nothing)
aes.x = PooledDataArray(Int64[x for (x, c) in keys(groups)])
end
if !is(aes.color, nothing)
aes.color = PooledDataArray(ColorValue[c for (x, c) in keys(groups)],
levels(aes.color))
end
nothing
end
immutable SmoothStatistic <: Gadfly.StatisticElement
method::Symbol
smoothing::Float64
function SmoothStatistic(; method::Symbol=:loess, smoothing::Float64=0.75)
new(method, smoothing)
end
end
const smooth = SmoothStatistic
element_aesthetics(::SmoothStatistic) = [:x, :y]
function apply_statistic(stat::SmoothStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
Gadfly.assert_aesthetics_defined("Stat.smooth", aes, :x, :y)
Gadfly.assert_aesthetics_equal_length("Stat.smooth", aes, :x, :y)
if stat.method != :loess
error("The only Stat.smooth method currently supported is loess.")
end
num_steps = 750
if aes.color === nothing
x_min, x_max = minimum(aes.x), maximum(aes.x)
if x_min == x_max
error("Stat.smooth requires more than one distinct x value")
end
# loess can't predict points <x_min or >x_max. Make sure that doesn't
# happen through a floating point fluke
nudge = 1e-5 * (x_max - x_min)
local xs, ys
try
xs = convert(Vector{Float64}, aes.x)
ys = convert(Vector{Float64}, aes.y)
catch
error("Stat.loess requires that x and y be bound to arrays of plain numbers.")
end
aes.x = collect((x_min + nudge):((x_max - x_min) / num_steps):(x_max - nudge))
aes.y = predict(loess(xs, ys, span=stat.smoothing), aes.x)
else
groups = Dict()
aes_color = aes.color === nothing ? [nothing] : aes.color
for (x, y, c) in zip(aes.x, aes.y, cycle(aes_color))
if !haskey(groups, c)
groups[c] = (Float64[], Float64[])
end
try
push!(groups[c][1], x)
push!(groups[c][2], y)
catch
error("Stat.loess requires that x and y be bound to arrays of plain numbers.")
end
end
aes.x = Array(Float64, length(groups) * num_steps)
aes.y = Array(Float64, length(groups) * num_steps)
colors = Array(ColorValue, length(groups) * num_steps)
for (i, (c, (xs, ys))) in enumerate(groups)
x_min, x_max = minimum(xs), maximum(xs)
if x_min == x_max
error("Stat.smooth requires more than one distinct x value")
end
nudge = 1e-5 * (x_max - x_min)
steps = collect((x_min + nudge):((x_max - x_min) / num_steps):(x_max - nudge))
for (j, (x, y)) in enumerate(zip(steps, predict(loess(xs, ys, span=stat.smoothing), steps)))
aes.x[(i - 1) * num_steps + j] = x
aes.y[(i - 1) * num_steps + j] = y
colors[(i - 1) * num_steps + j] = c
end
end
aes.color = PooledDataArray(colors)
end
end
immutable HexBinStatistic <: Gadfly.StatisticElement
xbincount::Int
ybincount::Int
function HexBinStatistic(; xbincount=50, ybincount=50)
new(xbincount, ybincount)
end
end
const hexbin = HexBinStatistic
function apply_statistic(stat::HexBinStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
xmin, xmax = minimum(aes.x), maximum(aes.x)
ymin, ymax = minimum(aes.y), maximum(aes.y)
xspan, yspan = xmax - xmin, ymax - ymin
xsize = xspan / stat.xbincount
ysize = yspan / stat.ybincount
counts = Dict{(Any, Any), Int}()
for (x, y) in zip(aes.x, aes.y)
h = convert(HexagonOffsetOddR, pointhex(x - xmin + xspan/2,
y - ymin + yspan/2,
xsize, ysize))
idx = (h.q, h.r)
if !haskey(counts, idx)
counts[idx] = 1
else
counts[idx] += 1
end
end
N = length(counts)
aes.x = Array(Float64, N)
aes.y = Array(Float64, N)
data = Gadfly.Data()
data.color = Array(Int, N)
k = 1
for (idx, cnt) in counts
x, y = center(HexagonOffsetOddR(idx[1], idx[2]), xsize, ysize,
xmin - xspan/2, ymin - yspan/2)
aes.x[k] = x
aes.y[k] = y
data.color[k] = cnt
k += 1
end
aes.xsize = [xsize]
aes.ysize = [ysize]
color_scale = scales[:color]
if !(typeof(color_scale) <: Scale.ContinuousColorScale)
error("HexBinGeometry requires a continuous color scale.")
end
Scale.apply_scale(color_scale, [aes], data)
end
function default_scales(::HexBinStatistic)
return [Gadfly.Scale.continuous_color()]
end
immutable StepStatistic <: Gadfly.StatisticElement
direction::Symbol
function StepStatistic(; direction::Symbol=:hv)
return new(direction)
end
end
const step = StepStatistic
function element_aesthetics(::StepStatistic)
return [:x, :y]
end
function apply_statistic(stat::StepStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
Gadfly.assert_aesthetics_defined("StepStatistic", aes, :x)
Gadfly.assert_aesthetics_defined("StepStatistic", aes, :y)
Gadfly.assert_aesthetics_equal_length("StepStatistic", aes, :x, :y)
points = collect(zip(aes.x, aes.y))
sort!(points, by=first)
n = length(points)
x_step = Array(eltype(aes.x), 2n - 1)
y_step = Array(eltype(aes.y), 2n - 1)
for i in 1:(2n-1)
if isodd(i)
x_step[i] = points[div(i-1,2)+1][1]
y_step[i] = points[div(i-1,2)+1][2]
elseif stat.direction == :hv
x_step[i] = points[div(i-1,2)+2][1]
y_step[i] = points[div(i-1,2)+1][2]
else
x_step[i] = points[div(i-1,2)+1][1]
y_step[i] = points[div(i-1,2)+2][2]
end
end
aes.x = x_step
aes.y = y_step
end
end # module Stat
| [
27,
7856,
261,
480,
29,
907,
354,
16261,
14,
38,
324,
12254,
13,
20362,
27,
34345,
29,
10677,
14,
14269,
3969,
13,
20362,
198,
21412,
5133,
198,
198,
11748,
20925,
12254,
198,
11748,
20595,
14881,
198,
3500,
6060,
3163,
20477,
198,
35... | 1.905493 | 13,544 |
using CSV, DataFrames, Distributed, Dates, LinearAlgebra, Distributions, DelimitedFiles, SharedArrays
# Custom package
using Jevo
# Get date to append to output file
date = Dates.format(Dates.today(), "yyyy_mm_dd")
# Get number of workers as a script argument
if length(ARGS) == 1
addprocs(parse(Int64, ARGS[1]))
elseif length(ARGS) > 1
throw(ArgumentError("Only one command line argument (cores)."))
end
# Import packages needed for all workers
@everywhere begin
using Jevo
using Distributions
using DelimitedFiles
using SharedArrays
using LinearAlgebra
end
# Parameters
@everywhere begin
gap = 10
l_0 = 20
fl = .7l_0
f0 = 20l_0
κ_arr = 0:2:20
n = 4
N = 1000
steps = 10^8
reps = 200
F = Jevo.num_fermi(n, l_0, gap, f0/2N, fl/2N)
emat = gap/l_0 * (ones(n, n) - Matrix{Float64}(I, n, n))
end
# Run one rep
@everywhere function run(N, l, emat, F, κ, l_0, gap, steps)
pop = Jevo.mono_pop(N=N, l=l)
Jevo.initiate!(pop, emat)
for i in 1:steps
Jevo.bp_substitution!(pop, emat, F)
if rand() < κ/N
Jevo.driver_mutation!(pop)
end
if (rand() < 1/10N) && (Jevo.get_energy(pop, emat)*l_0/length(pop.seqs)/gap < Jevo.γ_0(n))
Jevo.l_substitution!(pop, emat, F)
elseif (Jevo.get_energy(pop, emat)*l_0/length(pop.seqs)/gap > Jevo.γ_0(n))
pop = Jevo.mono_pop(N=N, l=length(pop.seqs))
Jevo.initiate!(pop, emat)
end
end
return Jevo.get_energy(pop, emat) * l_0/length(pop.seqs)/gap, length(pop.seqs)
end
# Store Metadata
open(date*"_METADATA.txt", "a") do io
write(io, "gap=$gap\n")
write(io, "l_0=$l_0\n")
write(io, "f0=$f0\n")
write(io, "fl=$fl\n")
write(io, "kappa=$κ_arr\n")
write(io, "n=$n\n")
write(io, "N=$N\n")
write(io, "steps=$steps\n")
write(io, "reps=$reps")
end
E_results = SharedArray{Float64, 2}(length(κ_arr), reps)
l_list = SharedArray{Float64, 2}(length(κ_arr), reps)
kappa_list = SharedArray{Float64, 2}(length(κ_arr), reps)
# Run simulations and enjoy speed
@sync @distributed for j in 1:reps
for (i1, κ) in enumerate(κ_arr)
E, l= run(N, 150, emat, F, κ, l_0, gap, steps)
E_results[i1, j] = E
l_list[i1, j] = l
kappa_list[i1, j] = κ
end
println("Run $j done.")
end
df = DataFrame(gamma=[(E_results...)...], l=[(l_list...)...], kappa=[(kappa_list...)...])
CSV.write(date * "_results.csv", df)
| [
3500,
44189,
11,
6060,
35439,
11,
4307,
6169,
11,
44712,
11,
44800,
2348,
29230,
11,
46567,
507,
11,
4216,
320,
863,
25876,
11,
39403,
3163,
20477,
198,
198,
2,
8562,
5301,
198,
3500,
449,
1990,
78,
628,
198,
2,
3497,
3128,
284,
244... | 2.06914 | 1,186 |
<reponame>ranocha/EllipsisNotation.jl
__precompile__()
module EllipsisNotation
import Base: to_indices, tail
const .. = Val{:..}()
@inline fillcolons(inds, I) = fillcolons((), inds, I)
@inline fillcolons(colons, ::Tuple{}, ::Tuple{}) = colons
@noinline fillcolons(colons, ::Tuple{}, ::Tuple) = throw(ArgumentError("too many indices provided"))
@inline fillcolons(colons, t::NTuple{N, <:Any}, ::NTuple{N, <:Any}) where {N} = colons
@inline fillcolons(colons, t::Tuple, s::Tuple) = fillcolons((colons..., :), tail(t), s)
@inline function to_indices(A, inds, I::Tuple{Val{:..}, Vararg{Any, N}}) where N
# Align the remaining indices to the tail of the `inds`
colons = fillcolons(inds, tail(I))
to_indices(A, inds, (colons..., tail(I)...))
end
export ..
end # module
| [
27,
7856,
261,
480,
29,
2596,
5374,
64,
14,
30639,
2419,
271,
3673,
341,
13,
20362,
198,
834,
3866,
5589,
576,
834,
3419,
198,
198,
21412,
7122,
2419,
271,
3673,
341,
198,
198,
11748,
7308,
25,
284,
62,
521,
1063,
11,
7894,
198,
9... | 2.473186 | 317 |
<gh_stars>10-100
using ImageSegmentation
using ImageSegmentation.Colors
using ImageSegmentation.FixedPointNumbers
using FileIO
using Statistics
using SparseArrays
using Test
@testset "flood_fill" begin
# 0d
a = reshape([true])
@test flood(identity, a, CartesianIndex()) == a
@test_throws ArgumentError flood(!, a, CartesianIndex())
# 1d
a = 1:7
@test flood(==(2), a, CartesianIndex(2)) == (a .== 2)
@test_throws ArgumentError flood(==(2), a, CartesianIndex(3))
@test flood(x -> 1 < x < 4, a, CartesianIndex(2)) == [false, true, true, false, false, false, false]
@test flood(isinteger, a, CartesianIndex(2)) == trues(7)
# 2d
ab = [true false false false;
true true false false;
true false false true;
true true true true]
an0f8 = N0f8.(ab)
agray = Gray.(an0f8)
for (f, a) in ((identity, ab), (==(1), an0f8), (==(1), agray))
for idx in CartesianIndices(a)
if f(a[idx])
@test flood(f, a, idx) == a
else
@test_throws ArgumentError flood(f, a, idx)
end
end
end
@test flood(identity, ab, Int16(1)) == ab
# 3d
k = 10
a = falses(k, k, k)
idx = CartesianIndex(1,1,1)
incs = [CartesianIndex(1,0,0), CartesianIndex(0,1,0), CartesianIndex(0,0,1)]
a[idx] = true
while any(<(k), Tuple(idx))
d = rand(1:3)
idx += incs[d]
idx = min(idx, CartesianIndex(k,k,k))
a[idx] = true
end
for idx in eachindex(a)
if a[idx]
@test flood(identity, a, idx) == a
else
@test_throws ArgumentError flood(identity, a, idx)
end
end
# Colors
path = download("https://github.com/JuliaImages/juliaimages.github.io/raw/source/docs/src/pkgs/segmentation/assets/flower.jpg")
img = load(path)
seg = flood(img, CartesianIndex(87,280); thresh=0.3*sqrt(3)) # TODO: eliminate the sqrt(3) when we transition to `abs2(c) = c ⋅ c`
@test 0.2*length(seg) <= sum(seg) <= 0.25*length(seg)
c = mean(img[seg])
# N0f8 makes for easier approximate testing
@test N0f8(red(c)) ≈ N0f8(0.855)
@test N0f8(green(c)) ≈ N0f8(0.161)
@test N0f8(blue(c)) ≈ N0f8(0.439)
# flood_fill!
near3(x) = round(Int, x) == 3
a0 = [range(2, 4, length=9);]
a = copy(a0)
idx = (length(a)+1)÷2
dest = fill!(similar(a, Bool), false)
@test flood_fill!(near3, dest, a, idx) == (round.(a) .== 3)
a = copy(a0)
flood_fill!(near3, a, idx; fillvalue=-1)
@test a == [near3(a0[i]) ? -1 : a[i] for i in eachindex(a)]
a = copy(a0)
@test_throws ArgumentError flood_fill!(near3, a, idx; fillvalue=-1, isfilled=near3)
# warning
a = [1:7;]
@test_logs (:warn, r"distinct.*incomplete") flood_fill!(<(5), a, 1; fillvalue=3)
@test a == [3,3,3,4,5,6,7]
a = [1:7;]
dest = fill(-1, size(a))
@test_logs flood_fill!(<(5), dest, a, 1; fillvalue=3) # no warnings
@test dest == [3,3,3,3,-1,-1,-1]
a = [1:7;]
@test_logs flood_fill!(<(5), a, 1; fillvalue=11)
@test a == [11,11,11,11,5,6,7]
# This mimics a "big data" application in which we have several structures we want
# to label with different segment numbers, and the `src` array is too big to fit
# in memory.
# It would be better to use a package like SparseArrayKit, which allows efficient
# insertions and supports arbitrary dimensions.
a = Bool[0 0 0 0 0 0 1 1;
1 1 0 0 0 0 0 0]
dest = spzeros(Int, size(a)...) # stores the nonzero indexes in a Dict
flood_fill!(identity, dest, a, CartesianIndex(2, 1); fillvalue=1)
flood_fill!(identity, dest, a, CartesianIndex(1, 7); fillvalue=2)
@test dest == [0 0 0 0 0 0 2 2;
1 1 0 0 0 0 0 0]
end
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
3500,
7412,
41030,
14374,
198,
3500,
7412,
41030,
14374,
13,
5216,
669,
198,
3500,
7412,
41030,
14374,
13,
13715,
12727,
49601,
198,
3500,
9220,
9399,
198,
3500,
14370,
198,
3500,
1338,
17208,... | 2.090308 | 1,816 |
<reponame>mppmu/SIS3316.jl<gh_stars>1-10
# This file is a part of StruckVMEDevices.jl, licensed under the MIT License (MIT).
import Test
Test.@testset "SIS3316Digitizers" begin
end # testset
| [
27,
7856,
261,
480,
29,
76,
381,
30300,
14,
50,
1797,
2091,
1433,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
2,
770,
2393,
318,
257,
636,
286,
520,
30915,
15996,
1961,
1990,
1063,
13,
20362,
11,
11971,
739,
262,
17168,
... | 2.573333 | 75 |
using STC.SUniward
using Base.Test
using Images
function testreflectedsetindex()
a=zeros(20,30)
b= padarray(a,Pad(:symmetric,[16,16],[16,16]))
@testset begin
for j in 1:size(a,2)
for i in 1:size(a,1)
a[i,j]=1
SUniward.reflectedsetindex!(b,i,j,1)
c = padarray(a,Pad(:symmetric,[16,16],[16,16]))
@test sum(abs.(c.parent - b.parent)) == 0
end
end
end
end
function testsuniwardcosts()
cover=load("suniward.jld","cover")
rho=load("suniward.jld","rho")
@testset begin
@test sum(abs.(rho-SUniward.suniwardcosts(cover)))<1e-6
end
end
function testoneitemconv2_1(flt)
# a=rand(1:30,30,30)
a=rand(30,30)
# i,j,v=3,3,1
(i,j,v)=rand(1:size(a,1)),rand(1:size(a,2)),rand()
aa = padarray(SUniward.sameconv2(a,flt),Pad(:symmetric,[size(flt,1),size(flt,2)],[size(flt,1),size(flt,2)]))
a[i,j]+=v
sum(abs.(SUniward.sameconv2(a,flt) - SUniward.oneitemconv2!(aa,flt,i,j,v).parent[size(flt,1)+1:end-size(flt,1),size(flt,2)+1:end-size(flt,2)]))
end
function testoneitemconv2_2(flt)
a=rand(30,30)
aa = padarray(zeros(size(a)),Pad(:symmetric,[size(flt,1),size(flt,2)],[size(flt,1),size(flt,2)]))
for i in 1:size(a,1)
for j in 1:size(a,2)
SUniward.oneitemconv2!(aa,flt,i,j,a[i,j])
end
end
sum(abs.(SUniward.sameconv2(a,flt) - aa.parent[size(flt,1)+1:end-size(flt,1),size(flt,2)+1:end-size(flt,2)]))
end
function testweightedconv(flt)
sigma=1
cover=rand(30,30)
stego=rand(30,30)
diff=cover-stego
paddedcover= padarray(cover,Pad(:symmetric,[size(flt,1),size(flt,2)],[size(flt,1),size(flt,2)]))
paddeddiff= padarray(diff,Pad(:symmetric,[size(flt,1),size(flt,2)],[size(flt,1),size(flt,2)]))
invr = padarray(zeros(size(diff)),Pad(:symmetric,[size(flt,1),size(flt,2)],[size(flt,1),size(flt,2)]))
rc = SUniward.sameconv2(paddedcover.parent, flt)
invr.parent.=1./(abs.(rc)+sigma)
aa = padarray(zeros(size(diff)),Pad(:symmetric,[size(flt,1),size(flt,2)],[size(flt,1),size(flt,2)]))
for i in 1:size(diff,1)
for j in 1:size(diff,2)
SUniward.oneitemconv2!(aa,invr,flt,i,j,diff[i,j])
end
end
costs=SUniward.sameconv2(diff,flt).*invr.parent[size(flt,1)+1:end-size(flt,1),size(flt,2)+1:end-size(flt,2)]
sum(abs.(costs - aa.parent[size(flt,1)+1:end-size(flt,1),size(flt,2)+1:end-size(flt,2)]))
end
function testincrementalcosts(flt)
sigma=1
cover=rand(30,30)
stego=rand(30,30)
diff=cover-stego
paddedcover= padarray(cover,Pad(:symmetric,[size(flt,1),size(flt,2)],[size(flt,1),size(flt,2)]))
paddeddiff= padarray(diff,Pad(:symmetric,[size(flt,1),size(flt,2)],[size(flt,1),size(flt,2)]))
invr = padarray(zeros(size(diff)),Pad(:symmetric,[size(flt,1),size(flt,2)],[size(flt,1),size(flt,2)]))
rc = SUniward.sameconv2(paddedcover.parent, flt)
invr.parent.=1./(abs(rc)+sigma)
aa = padarray(zeros(size(diff)),Pad(:symmetric,[size(flt,1),size(flt,2)],[size(flt,1),size(flt,2)]))
distortion=zero(eltype(cover))
(ul,vl)=(cld(size(flt,1),2),cld(size(flt,2),2))
(up,vp)=(fld(size(flt,1),2),fld(size(flt,2),2))
for i in 1:size(diff,1)
for j in 1:size(diff,2)
idxs=(max(1,i-up):min(size(diff,1),i+ul-1),max(1,j-vp):min(size(diff,2),j+vl-1))
# println((i,j), " ",idxs)
distortion-=sumabs(aa[idxs...])
SUniward.oneitemconv2!(aa,invr,flt,i,j,diff[i,j])
distortion+=sumabs(aa[idxs...])
end
end
costs=SUniward.sameconv2(diff,flt).*invr.parent[size(flt,1)+1:end-size(flt,1),size(flt,2)+1:end-size(flt,2)]
sum(abs.(sum(abs.(costs)-distortion)))
end
function testincrementalsuniward()
cover=UInt8.(rand(0:255,30,30))
stego=UInt8.(rand(0:255,30,30))
image=SUniward.IncrementalSUniward(cover)
for i in 1:size(cover,1)
for j in 1:size(cover,2)
image[i,j]=stego[i,j]
end
end
return(abs(SUniward.suniwardcosts(Float64.(cover),Float64.(stego))-image.distortion))
end
function testincrementalsuniward2()
cover=UInt8.(rand(0:255,30,30))
stego=UInt8.(rand(0:255,30,30))
image=SUniward.IncrementalSUniward(cover)
for i in 1:size(cover,1)
for j in 1:size(cover,2)
image[sub2ind(size(cover),i,j)]=stego[i,j]
end
end
return(abs(SUniward.suniwardcosts(Float64.(cover),Float64.(stego))-image.distortion))
end
function testLSBcostfun()
cover=UInt8.(rand(0:255,30,30))
pidx=rand(1:length(cover))
cover[pidx]=8
stego=deepcopy(cover)
stego[pidx]=9
image=SUniward.IncrementalSUniward(cover);
w0,w1=SUniward.LSBcostfun(image,pidx)
r=sum(abs.(w1-SUniward.suniwardcosts(Float64.(cover),Float64.(stego))))
cover=UInt8.(rand(0:255,30,30))
pidx=rand(1:length(cover))
cover[pidx]=9
stego=deepcopy(cover)
stego[pidx]=8
image=SUniward.IncrementalSUniward(cover);
w0,w1=SUniward.LSBcostfun(image,pidx)
r+=sum(abs.(w0-SUniward.suniwardcosts(Float64.(cover),Float64.(stego))))
return(r)
end
function testtrypmone()
cover=UInt8.(rand(0:255,30,30))
image=SUniward.IncrementalSUniward(cover);
d1=0.0
d2=0.0
for pidx in 1:length(cover)
d1 += min(SUniward.tryvalue(image,cover[pidx]+1,pidx),SUniward.tryvalue(image,cover[pidx]-1,pidx))
d2 += min(SUniward.trypmone(image,pidx)...)
s = rand([-1,1]);
s = (image[pidx] == 0)? 1 : s;
s = (image[pidx] == 255)? -1 : s;
image[pidx]=cover[pidx] + s
end
return(abs(d1-d2))
end
function testbinaryembedding(s=8,h=3)
println("binary embedding with s = $s h = $h")
payload = 0.4;
cover=rawview(channelview(load("1.pgm")));
l=size(cover);
s=div(s,2);
cover = cover[div(l[1],2)-s+1:div(l[1],2)+s,div(l[2],2)-s+1:div(l[2],2)+s];
numofcols=Int(round(1/payload));
hhat=rand(1:2^h-1,numofcols);
numofblocks=Int(floor(length(cover)/numofcols));
message=rand(0:1,numofblocks);
embpath = randperm(length(cover));
stegos = SUniward.IncrementalSUniward(cover,2^h;sigma=1.0,T=Float16);
@time (stego,distortion)=STC.variablecoding(cover,stegos,message,hhat,h,embpath);
δ = Int.(cover) - Int.(stego.image)
println(@sprintf("stc cost = %g true cost = %g pixel increased / decreased %d / %d",distortion,SUniward.suniwardcosts(cover,stego.image),sum(δ .> 0),sum(δ .< 0)));
stegos = SUniward.IncrementalSUniward(cover,2^h;sigma=1.0,T=Float16);
@time (stego,distortion)=STC.variablecoding(cover,stegos,message,hhat,h,embpath);
δ = Int.(cover) - Int.(stego.image)
println(@sprintf("stc cost = %g true cost = %g pixel increased / decreased %d / %d",distortion,SUniward.suniwardcosts(cover,stego.image),sum(δ .> 0),sum(δ .< 0)));
stegos = [SUniward.SUniwardAdd(cover) for i in 1:2^h];
@time (stego,distortion)=STC.variablecoding(cover,stegos,message,hhat,h,embpath);
δ = Int.(cover) - Int.(stego.image)
println(@sprintf("stc cost = %g true cost = %g pixel increased / decreased %d / %d",distortion,SUniward.suniwardcosts(cover,stego.image),sum(δ .> 0),sum(δ .< 0)));
println()
end
function testternaryembedding(s=8,h=3,)
println("ternary embedding with s = $s h = $h")
payload = 0.66*0.1;
cover=rawview(channelview(load("1.pgm")));
l=size(cover);
s=div(s,2);
cover = cover[div(l[1],2)-s+1:div(l[1],2)+s,div(l[2],2)-s+1:div(l[2],2)+s];
numofcols=Int(round(1/payload));
hhat=rand(1:3^h-1,numofcols);
numofblocks=Int(floor(length(cover)/numofcols));
message=rand(0:2,numofblocks);
embpath = randperm(length(cover));
stegos = SUniward.IncrementalSUniward(cover,3^h;sigma=1.0,T=Float16);
@time (stego,distortion)=STC.variablecoding(cover,stegos,message,hhat,h,embpath,3);
δ = Int.(cover) - Int.(stego.image)
println(@sprintf("stc cost = %g true cost = %g pixel increased / decreased %d / %d",distortion,SUniward.suniwardcosts(cover,stego.image),sum(δ .> 0),sum(δ .< 0)));
stegos = [SUniward.SUniwardAdd(cover) for i in 1:3^h];
@time (stego,distortion)=STC.variablecoding(cover,stegos,message,hhat,h,embpath,3);
δ = Int.(cover) - Int.(stego.image);
println(@sprintf("stc cost = %g true cost = %g pixel increased / decreased %d / %d",distortion,SUniward.suniwardcosts(cover,stego.image),sum(δ .> 0),sum(δ .< 0)));
println()
end
function testsaturation()
println("testing saturation")
for k in [1,2,254,255]
h = 3;
cover = UInt8.(k*ones(8,8))
payload = 0.66*0.4;
numofcols=Int(round(1/payload));
hhat=rand(1:3^h-1,numofcols);
numofblocks=Int(floor(length(cover)/numofcols));
message=rand(0:2,numofblocks);
embpath = randperm(length(cover));
stegos = [SUniward.SUniwardAdd(cover) for i in 1:3^h];
@time (stego,distortion)=STC.variablecoding(cover,stegos,message,hhat,h,embpath,3);
δ = Int.(cover) - Int.(stego.image)
println(@sprintf("stc cost = %g true cost = %g pixel increased / decreased %d / %d",distortion,SUniward.suniwardcosts(cover,stego.image),sum(δ .> 0),sum(δ .< 0)));
end
println()
end
function testadditiveembedding(h=3,s=8,p=0.3)
payload = p;
cover=rawview(channelview(load("1.pgm")));
l=size(cover);
s=div(s,2);
cover = cover[div(l[1],2)-s+1:div(l[1],2)+s,div(l[2],2)-s+1:div(l[2],2)+s];
numofcols=Int(round(1/payload));
numofblocks=Int(floor(length(cover)/numofcols));
message=rand(0:1,numofblocks);
embpath = randperm(length(cover));
hhat=rand(1:2^h-1,numofcols);
ρ = SUniward.suniwardcosts(Float64.(cover))
costfun(img,j) = ((mod(img[j],2) == 0)? (0.0,ρ[j]) : (ρ[j],0.0))
stego,distortion = STC.additivecoding(cover,message,hhat,h,costfun,embpath);
δ = Int.(cover) - Int.(stego)
println(@sprintf("stc cost = %g true cost = %g pixel increased / decreased %d / %d",distortion,SUniward.suniwardcosts(cover,stego),sum(δ .> 0),sum(δ .< 0)));
stegos = [SUniward.SUniwardAdd(cover) for i in 1:2^h]
@time (stego,distortion)=STC.variablecoding(cover,stegos,message,hhat,h,embpath,2);
δ = Int.(cover) - Int.(stego.image)
println(@sprintf("stc cost = %g true cost = %g pixel increased / decreased %d / %d",distortion,SUniward.suniwardcosts(cover,stego.image),sum(δ .> 0),sum(δ .< 0)));
end
testsuniwardcosts()
testreflectedsetindex()
@testset begin
@test testoneitemconv2_1([1 2 3; 3 4 5; 6 7 8])<1e-6
@test testoneitemconv2_1([1 2; 3 4; 6 7])<1e-6
@test testoneitemconv2_1([1 2 3; 3 4 5])<1e-6
@test testoneitemconv2_1(randn(16,16))<1e-6
end
@testset begin
@test testoneitemconv2_2([1 2 3; 3 4 5; 6 7 8])<1e-6
@test testoneitemconv2_2([1 2; 3 4; 6 7])<1e-6
@test testoneitemconv2_2([1 2 3; 3 4 5])<1e-6
@test testoneitemconv2_2(randn(16,16))<1e-6
end
@testset begin
@test testweightedconv([1 2 3; 3 4 5; 6 7 8])<1e-6
@test testweightedconv([1 2; 3 4; 6 7])<1e-6
@test testweightedconv([1 2 3; 3 4 5])<1e-6
@test testweightedconv(randn(16,16))<1e-6
end
@testset begin
@test testincrementalsuniward()<1e-6
@test testLSBcostfun()<1e-6
@test testtrypmone()<1e-6
end
testbinaryembedding(32,3)
testternaryembedding(32,3)
testsaturation() | [
3500,
3563,
34,
13,
50,
3118,
72,
904,
198,
3500,
7308,
13,
14402,
198,
3500,
5382,
198,
198,
8818,
1332,
5420,
12609,
2617,
9630,
3419,
198,
220,
257,
28,
9107,
418,
7,
1238,
11,
1270,
8,
198,
220,
275,
28,
14841,
18747,
7,
64,
... | 1.995199 | 5,624 |
const packages = [
"AverageShiftedHistograms",
"BDF",
"BrainWave",
"Diversity",
"IterativeSolvers",
"RCall",
"SDT",
"Sims",
"TargetedLearning"
]
const failures = Set()
for package in packages
print(" - ", package)
try
require(package)
println(" ✓")
catch err
push!(failures, (package, err))
println(" ×")
end
end
println("-"^50)
for (package, err) in failures
println("""
# $(package)
$(err)
---
""")
end
failures
| [
9979,
10392,
796,
685,
198,
220,
220,
220,
366,
26287,
2484,
21715,
13749,
26836,
1600,
198,
220,
220,
220,
366,
33,
8068,
1600,
198,
220,
220,
220,
366,
44687,
39709,
1600,
198,
220,
220,
220,
366,
35,
1608,
1600,
198,
220,
220,
22... | 2.19917 | 241 |
function retrieve_parent_ex(parent_handle::SpecHandle, func::SpecFunc)
parent_handle_var = findfirst(==(parent_handle.name), func.params.type)
@match n = func.name begin
if !isnothing(parent_handle_var)
end => wrap_identifier(func.params[parent_handle_var])
_ => nothing
end
end
function retrieve_parent_ex(parent_handle::SpecHandle, create::CreateFunc)
throw_error() = error("Could not retrieve parent ($(parent_handle.name)) variable from the arguments of $create")
@match retrieve_parent_ex(parent_handle, create.func) begin
sym::Symbol => sym
::Nothing && if !isnothing(create.create_info_param)
end => begin
p = create.create_info_param
s = create.create_info_struct
m_index = findfirst(in([parent_handle.name, :(Ptr{$(parent_handle.name)})]), s.members.type)
if !isnothing(m_index)
m = s.members[m_index]
var_p, var_m = wrap_identifier.((p, m))
broadcast_ex(:(getproperty($var_p, $(QuoteNode(var_m)))), is_arr(m))
else
throw_error()
end
end
_ => throw_error()
end
end
function assigned_parent_symbol(parent_ex)
@match parent_ex begin
::Symbol => parent_ex
::Expr && GuardBy(is_broadcast) => :parents
::Expr => :parent
end
end
assign_parent(::Symbol) = nothing
assign_parent(parent_ex::Expr) = :($(assigned_parent_symbol(parent_ex)) = $parent_ex)
function parent_handles(spec::SpecStruct)
filter(x -> x.type ∈ spec_handles.name, children(spec))
end
"""
These handle types are consumed by whatever command uses them. From the specification: "The following object types are consumed when they are passed into a Vulkan command and not further accessed by the objects they are used to create.".
"""
const consumed_handles = handle_by_name.([:VkShaderModule, :VkPipelineCache, :VkValidationCacheEXT])
is_consumed(spec::SpecHandle) = spec ∈ consumed_handles
is_consumed(name::Symbol) = is_consumed(handle_by_name(name))
is_consumed(spec::Union{SpecFuncParam,SpecStructMember}) = is_consumed(spec.type)
function Parent(def::HandleDefinition)
p = Dict(
:category => :function,
:name => :parent,
:short => true,
:args => [:($(wrap_identifier(def.spec))::$(name(def)))],
:body => :($(wrap_identifier(def.spec)).$(wrap_identifier(parent_spec(def.spec)))),
)
Parent(def, p)
end
| [
8818,
19818,
62,
8000,
62,
1069,
7,
8000,
62,
28144,
3712,
22882,
37508,
11,
25439,
3712,
22882,
37,
19524,
8,
198,
220,
220,
220,
2560,
62,
28144,
62,
7785,
796,
1064,
11085,
7,
855,
7,
8000,
62,
28144,
13,
3672,
828,
25439,
13,
... | 2.458045 | 1,013 |
################################################################################
# Common models used in testing
################################################################################
function reldiff(a, b)
diff = sum(abs(a - b))
norm = sum(abs(a))
return diff / (norm + 1e-10)
end
function rand_dims(max_ndim=6)
tuple(rand(1:10, rand(1:max_ndim))...)
end
function mlp2()
data = mx.Variable(:data)
out = mx.FullyConnected(data=data, name=:fc1, num_hidden=1000)
out = mx.Activation(data=out, act_type=:relu)
out = mx.FullyConnected(data=out, name=:fc2, num_hidden=10)
return out
end
| [
29113,
29113,
14468,
198,
2,
8070,
4981,
973,
287,
4856,
198,
29113,
29113,
14468,
198,
8818,
302,
335,
733,
7,
64,
11,
275,
8,
198,
220,
814,
796,
2160,
7,
8937,
7,
64,
532,
275,
4008,
198,
220,
2593,
796,
2160,
7,
8937,
7,
64,... | 3.034653 | 202 |
<filename>src/utilities.jl
get_all_plots_types() = Set([:fit, :residuals, :normal_checks, :cooksd, :leverage, :homoscedasticity])
get_needed_plots(s::String) = return get_needed_plots([s])
get_needed_plots(s::Symbol) = return get_needed_plots([s])
get_needed_plots(s::Vector{String}) = return get_needed_plots(Symbol.(lowercase.(s)))
get_needed_plots(::Vector{Any}) = return get_needed_plots([:none])
get_needed_plots(::Set{Any}) = return get_needed_plots([:none])
get_needed_plots(s::Set{Symbol}) = return get_needed_plots(collect(s))
function get_needed_plots(p::Vector{Symbol})
needed_plots = Set{Symbol}()
length(p) == 0 && return needed_plots
:none in p && return needed_plots
if :all in p
return get_all_plots_types()
end
:fit in p && push!(needed_plots, :fit)
:residuals in p && push!(needed_plots, :residuals)
:normal_checks in p && push!(needed_plots, :normal_checks)
:cooksd in p && push!(needed_plots, :cooksd)
:leverage in p && push!(needed_plots, :leverage)
:homoscedasticity in p && push!(needed_plots, :homoscedasticity)
return needed_plots
end
"""
function get_robust_cov_stats()
Return all robust covariance estimators.
"""
get_all_robust_cov_stats() = Set([:white, :nw, :hc0, :hc1, :hc2, :hc3])
get_needed_robust_cov_stats(s::String) = return get_needed_robust_cov_stats([s])
get_needed_robust_cov_stats(s::Symbol) = return get_needed_robust_cov_stats([s])
get_needed_robust_cov_stats(s::Vector{String}) = return get_needed_robust_cov_stats(Symbol.(lowercase.(s)))
get_needed_robust_cov_stats(::Vector{Any}) = return get_needed_robust_cov_stats([:none])
get_needed_robust_cov_stats(::Set{Any}) = return get_needed_robust_cov_stats(Set([:none]))
get_needed_robust_cov_stats(s::Set{Symbol}) = return get_needed_robust_cov_stats(collect(s))
function get_needed_robust_cov_stats(s::Vector{Symbol})
needed_white = Vector{Symbol}()
needed_hac = Vector{Symbol}()
length(s) == 0 && return (needed_white, needed_hac)
:none in s && return (needed_white, needed_hac)
if :all in s
s = collect(get_all_robust_cov_stats())
end
:white in s && push!(needed_white, :white)
:hc0 in s && push!(needed_white, :hc0)
:hc1 in s && push!(needed_white, :hc1)
:hc2 in s && push!(needed_white, :hc2)
:hc3 in s && push!(needed_white, :hc3)
:nw in s && push!(needed_hac, :nw)
return (needed_white, needed_hac)
end
"""
function get_all_model_stats()
Returns all statistics availble for the fitted model.
"""
get_all_model_stats() = Set([:coefs, :sse, :mse, :sst, :rmse, :aic, :sigma, :t_statistic, :vif, :r2, :adjr2, :stderror, :t_values, :p_values, :ci,
:diag_normality, :diag_ks, :diag_ad, :diag_jb, :diag_heteroskedasticity, :diag_white, :diag_bp, :press,
:t1ss, :t2ss, :pcorr1, :pcorr2 , :scorr1, :scorr2 ])
get_needed_model_stats(req_stats::String) = return get_needed_model_stats([req_stats])
get_needed_model_stats(req_stats::Symbol) = return get_needed_model_stats(Set([req_stats]))
get_needed_model_stats(req_stats::Vector{String}) = return get_needed_model_stats(Symbol.(lowercase.(req_stats)))
get_needed_model_stats(::Vector{Any}) = return get_needed_model_stats([:none])
get_needed_model_stats(::Set{Any}) = return get_needed_model_stats(Set([:none]))
get_needed_model_stats(req_stats::Set{Symbol}) = get_needed_model_stats(collect(req_stats))
"""
function get_needed_model_stats(req_stats::Vector{Symbol})
return the list of needed statistics given the list of statistics about the model the caller wants.
"""
function get_needed_model_stats(req_stats::Vector{Symbol})
needed = Set([:coefs, :sse, :mse])
default = Set([:coefs, :sse, :mse, :sst, :rmse, :sigma, :t_statistic, :r2, :adjr2, :stderror, :t_values, :p_values, :ci])
full = get_all_model_stats()
unique!(req_stats)
length(req_stats) == 0 && return needed
:none in req_stats && return needed
:all in req_stats && return full
:default in req_stats && union!(needed, default)
:sst in req_stats && push!(needed, :sst)
:t1ss in req_stats && push!(needed, :t1ss)
:t2ss in req_stats && push!(needed, :t2ss)
:press in req_stats && push!(needed, :press)
:rmse in req_stats && push!(needed, :rmse)
:aic in req_stats && push!(needed, :aic)
:sigma in req_stats && push!(needed, :sigma)
:t_statistic in req_stats && push!(needed, :t_statistic)
:vif in req_stats && push!(needed, :vif)
:diag_ks in req_stats && push!(needed, :diag_ks)
:diag_ad in req_stats && push!(needed, :diag_ad)
:diag_jb in req_stats && push!(needed, :diag_jb)
:diag_white in req_stats && push!(needed, :diag_white)
:diag_bp in req_stats && push!(needed, :diag_bp)
if :diag_normality in req_stats
push!(needed, :diag_ks)
push!(needed, :diag_ad)
push!(needed, :diag_jb)
end
if :diag_heteroskedasticity in req_stats
push!(needed, :diag_white)
push!(needed, :diag_bp)
end
if :pcorr1 in req_stats
push!(needed, :t1ss)
push!(needed, :pcorr1)
end
if :pcorr2 in req_stats
push!(needed, :t2ss)
push!(needed, :pcorr2)
end
if :scorr1 in req_stats
push!(needed, :sst)
push!(needed, :t1ss)
push!(needed, :scorr1)
end
if :scorr2 in req_stats
push!(needed, :sst)
push!(needed, :t2ss)
push!(needed, :scorr2)
end
if :r2 in req_stats
push!(needed, :sst)
push!(needed, :r2)
end
if :adjr2 in req_stats
push!(needed, :sst)
push!(needed, :r2)
push!(needed, :adjr2)
end
if :stderror in req_stats
push!(needed, :sigma)
push!(needed, :stderror)
end
if :t_values in req_stats
push!(needed, :sigma)
push!(needed, :stderror)
push!(needed, :t_values)
end
if :p_values in req_stats
push!(needed, :sigma)
push!(needed, :stderror)
push!(needed, :t_values)
push!(needed, :p_values)
end
if :ci in req_stats
push!(needed, :sigma)
push!(needed, :stderror)
push!(needed, :t_statistic)
push!(needed, :ci)
end
return needed
end
"""
function get_all_prediction_stats()
get all the available statistics about the values predicted by a fitted model
"""
get_all_prediction_stats() = Set([:predicted, :residuals, :leverage, :stdp, :stdi, :stdr, :student, :rstudent, :lcli, :ucli, :lclp, :uclp, :press, :cooksd])
get_prediction_stats(req_stats::String) = return get_prediction_stats([req_stats])
get_prediction_stats(req_stats::Vector{String}) = return get_prediction_stats(Symbol.(lowercase.(req_stats)))
get_prediction_stats(::Vector{Any}) = return get_prediction_stats([:none])
get_prediction_stats(::Set{Any}) = return get_prediction_stats(Set([:none]))
get_prediction_stats(req_stats::Set{Symbol}) = return get_prediction_stats(collect(req_stats))
"""
function get_prediction_stats(req_stats::Vector{Symbol})
return the list of needed statistics and the statistics that need to be presentd given the list of statistics about the predictions the caller wants.
"""
function get_prediction_stats(req_stats::Vector{Symbol})
needed = Set([:predicted])
full = get_all_prediction_stats()
present = Set([:predicted])
unique!(req_stats)
length(req_stats) == 0 && return needed, present
:none in req_stats && return needed, present
:all in req_stats && return full, full
:leverage in req_stats && push!(present, :leverage)
:residuals in req_stats && push!(present, :residuals)
if :stdp in req_stats
push!(needed, :leverage)
push!(present, :stdp)
end
if :stdi in req_stats
push!(needed, :leverage)
push!(present, :stdi)
end
if :stdr in req_stats
push!(needed, :leverage)
push!(present, :stdr)
end
if :student in req_stats
push!(needed, :leverage)
push!(needed, :residuals)
push!(needed, :stdr)
push!(present, :student)
end
if :rstudent in req_stats
push!(needed, :leverage)
push!(needed, :residuals)
push!(needed, :stdr)
push!(needed, :student)
push!(present, :rstudent)
end
if :lcli in req_stats
push!(needed, :leverage)
push!(needed, :stdi)
push!(present, :lcli)
end
if :ucli in req_stats
push!(needed, :leverage)
push!(needed, :stdi)
push!(present, :ucli)
end
if :lclp in req_stats
push!(needed, :leverage)
push!(needed, :stdp)
push!(present, :lclp)
end
if :uclp in req_stats
push!(needed, :leverage)
push!(needed, :stdp)
push!(present, :uclp)
end
if :press in req_stats
push!(needed, :residuals)
push!(needed, :leverage)
push!(present, :press)
end
if :cooksd in req_stats
push!(needed, :leverage)
push!(needed, :stdp)
push!(needed, :stdr)
push!(needed, :residuals)
push!(needed, :student)
push!(present, :cooksd)
end
union!(needed, present)
return needed, present
end
"""
function encapsulate_string(s)
(internal) Only used to encapsulate a string into an array.
used exclusively to handle the function ```StatsBase.coefnames``` which sometime return an array or when there is only one element the element alone.
"""
function encapsulate_string(s::String)
return [s]
end
"""
function encapsulate_string(v)
(internal) Only used to encapsulate a string into an array.
used exclusively to handle the function ```StatsBase.coefnames``` which sometime return an array or when there is only one element the element alone.
"""
function encapsulate_string(v::Vector{String})
return v
end
import Printf
"""
macro gprintf(fmt::String)
(internal) used to format with %g
Taken from message published by user o314 at https://discourse.julialang.org/t/printf-with-variable-format-string/3805/6
"""
macro gprintf(fmt::String)
:((io::IO, arg) -> Printf.@printf(io, $fmt, arg))
end
"""
function fmt_pad(s::String, value, pad=0)
(internal) helper to format and pad string for results display
"""
function fmt_pad(s::String, value, pad=0)
fmt = @gprintf("%g")
return rpad(s * sprint(fmt, value), pad)
end
using NamedArrays
"""
function my_namedarray_print([io::IO = stdout], n::NamedArray)
(internal) Print the NamedArray without the type annotation (on the first line).
"""
function my_namedarray_print(io::IO, n)
tmpio = IOBuffer()
show(tmpio, n)
println(io, split(String(take!(tmpio)), "\n", limit=2)[2])
end
my_namedarray_print(n::NamedArray) = my_namedarray_print(stdout::IO, n)
"""
function helper_print_table(io, title, stats::Vector, stats_name::Vector, updformula)
(Internal) Convenience function to display a table of statistics to the user.
"""
function helper_print_table(io::IO, title, stats::Vector, stats_name::Vector, updformula)
println(io, "\n$title")
todelete = [i for (i, v) in enumerate(stats) if isnothing(v)]
deleteat!(stats, todelete)
deleteat!(stats_name, todelete)
m_all_stats = reduce(hcat, stats)
if m_all_stats isa Vector
m_all_stats = reshape(m_all_stats, length(m_all_stats), 1)
end
na = NamedArray(m_all_stats)
setnames!(na, encapsulate_string(string.(StatsBase.coefnames(updformula.rhs))), 1)
setnames!(na, encapsulate_string(string.(stats_name)), 2)
setdimnames!(na, ("Terms", "Stats"))
my_namedarray_print(io, na)
end
function present_breusch_pagan_test(X, residuals, α)
bpt = HypothesisTests.BreuschPaganTest(X, residuals)
pval = pvalue(bpt)
alpha_value= round((1 - α)*100, digits=3)
topresent = string("Breush-Pagan Test (heteroskedasticity of residuals):\n T*R² statistic: $(round(bpt.lm, sigdigits=6)) degrees of freedom: $(round(bpt.dof, digits=6)) p-value: $(round(pval, digits=6))\n")
if pval > α
topresent *= " with $(alpha_value)% confidence: fail to reject null hyposthesis.\n"
else
topresent *= " with $(alpha_value)% confidence: reject null hyposthesis.\n"
end
return topresent
end
function present_white_test(X, residuals, α)
bpt = HypothesisTests.WhiteTest(X, residuals)
pval = pvalue(bpt)
alpha_value= round((1 - α)*100, digits=3)
topresent = string("White Test (heteroskedasticity of residuals):\n T*R² statistic: $(round(bpt.lm, sigdigits=6)) degrees of freedom: $(round(bpt.dof, digits=6)) p-value: $(round(pval, digits=6))\n")
if pval > α
topresent *= " with $(alpha_value)% confidence: fail to reject null hyposthesis.\n"
else
topresent *= " with $(alpha_value)% confidence: reject null hyposthesis.\n"
end
return topresent
end
function present_kolmogorov_smirnov_test(residuals, α)
fitted_residuals = fit(Normal, residuals)
kst = HypothesisTests.ApproximateOneSampleKSTest(residuals, fitted_residuals)
pval = pvalue(kst)
KS_stat = sqrt(kst.n)*kst.δ
alpha_value= round((1 - α)*100, digits=3)
topresent = string("Kolmogorov-Smirnov test (Normality of residuals):\n KS statistic: $(round(KS_stat, sigdigits=6)) observations: $(kst.n) p-value: $(round(pval, digits=6))\n")
if pval > α
topresent *= " with $(alpha_value)% confidence: fail to reject null hyposthesis.\n"
else
topresent *= " with $(alpha_value)% confidence: reject null hyposthesis.\n"
end
end
function present_anderson_darling_test(residuals, α)
fitted_residuals = fit(Normal, residuals)
adt = HypothesisTests.OneSampleADTest(residuals, fitted_residuals)
pval = pvalue(adt)
alpha_value= round((1 - α)*100, digits=3)
topresent = string("Anderson–Darling test (Normality of residuals):\n A² statistic: $(round(adt.A², digits=6)) observations: $(adt.n) p-value: $(round(pval, digits=6))\n")
if pval > α
topresent *= " with $(alpha_value)% confidence: fail to reject null hyposthesis.\n"
else
topresent *= " with $(alpha_value)% confidence: reject null hyposthesis.\n"
end
end
function present_jarque_bera_test(residuals, α)
jbt = HypothesisTests.JarqueBeraTest(residuals)
pval = pvalue(jbt)
alpha_value= round((1 - α)*100, digits=3)
topresent = string("Jarque-Bera test (Normality of residuals):\n JB statistic: $(round(jbt.JB, digits=6)) observations: $(jbt.n) p-value: $(round(pval, digits=6))\n")
if pval > α
topresent *= " with $(alpha_value)% confidence: fail to reject null hyposthesis.\n"
else
topresent *= " with $(alpha_value)% confidence: reject null hyposthesis.\n"
end
end
function warn_sigma(lm, stat)
warn_sigma(lm.white_types, lm.hac_types , stat)
end
function warn_sigma(white_needed, hac_needed, stat)
if length(white_needed) > 0 || length(hac_needed) > 0
println(io, "The $(stat) statistic that relies on Sigma^2 has been requested. At least one robust covariance have been requested indicating that the assumptions needed for Sigma^2 may not be present.")
end
end
function real_sqrt(x)
return @. real(sqrt(complex(x, 0)))
end
isnotintercept(t::AbstractTerm) = t isa InterceptTerm ? false : true
iscontinuousterm(t::AbstractTerm) = t isa ContinuousTerm ? true : false
iscategorical(t::AbstractTerm) = t isa CategoricalTerm ? true : false
function check_cardinality(df::AbstractDataFrame, f, verbose=false)
cate_terms = [a.sym for a in filter(iscategorical, terms(f.rhs))]
if length(cate_terms) > 0
freqt = freqtable(df, cate_terms...)
if count(i -> (i == 0), freqt) > 0
println("At least one group of categories have no observation. Use frequency tables to identify which one(s).")
println(my_namedarray_print(freqt))
elseif verbose == true
println("No issue identified.")
end
end
end | [
27,
34345,
29,
10677,
14,
315,
2410,
13,
20362,
198,
1136,
62,
439,
62,
489,
1747,
62,
19199,
3419,
796,
5345,
26933,
25,
11147,
11,
1058,
411,
312,
723,
82,
11,
1058,
11265,
62,
42116,
11,
1058,
27916,
21282,
11,
1058,
293,
1857,
... | 2.363663 | 6,803 |
<gh_stars>0
abstract type CMF end
abstract type CIE1931_CMF <: CMF end
abstract type CIE1964_CMF <: CMF end
abstract type CIE1931J_CMF <: CMF end
abstract type CIE1931JV_CMF <: CMF end
abstract type CIE2006_2_CMF <: CMF end
abstract type CIE2006_10_CMF <: CMF end
"""
colormatch(wavelength)
colormatch(matchingfunction, wavelength)
Evaluate the CIE standard observer color match function.
# Arguments
- matchingfunction (optional): a type used to specify the matching function. Choices include:
- - `CIE1931_CMF` (the default, the CIE 1931 2° matching function)
- - `CIE1964_CMF` (the CIE 1964 10° color matching function)
- - `CIE1931J_CMF` (Judd adjustment to `CIE1931_CMF`)
- - `CIE1931JV_CMF` (Judd-Vos adjustment to `CIE1931_CMF`)
- wavelength: Wavelength of stimulus in nanometers.
Returns the XYZ value of perceived color.
"""
function colormatch(wavelen::Real)
return colormatch(CIE1931_CMF, wavelen)
end
@deprecate cie_color_match colormatch
# Interpolate between values in a cmf table, where the table starts
# at `start` nm and has `step` nm between entries.
function interpolate_table(tbl, start, step, wavelen)
n = size(tbl, 1)
stop = start + step * (n - 1)
i = (wavelen - start) / step
a = floor(Integer, i) + 1
ac = 1 <= a <= n ? tbl[a,:] : [0.0, 0.0, 0.0]
b = ceil(Integer, i) + 1
bc = 1 <= b <= n ? tbl[b,:] : [0.0, 0.0, 0.0]
p = i % 1.0
ac = p * bc + (1.0 - p) * ac
return XYZ(ac[1], ac[2], ac[3])
end
function colormatch(::Type{CIE1931_CMF}, wavelen::Real)
return interpolate_table(cie1931_cmf_table, 360.0, 1.0, wavelen)
end
# CIE 1931 2° color matching function, 1nm increments starting at 360nm
const cie1931_cmf_table =
[0.000129900000 0.000003917000 0.000606100000;
0.000145847000 0.000004393581 0.000680879200;
0.000163802100 0.000004929604 0.000765145600;
0.000184003700 0.000005532136 0.000860012400;
0.000206690200 0.000006208245 0.000966592800;
0.000232100000 0.000006965000 0.001086000000;
0.000260728000 0.000007813219 0.001220586000;
0.000293075000 0.000008767336 0.001372729000;
0.000329388000 0.000009839844 0.001543579000;
0.000369914000 0.000011043230 0.001734286000;
0.000414900000 0.000012390000 0.001946000000;
0.000464158700 0.000013886410 0.002177777000;
0.000518986000 0.000015557280 0.002435809000;
0.000581854000 0.000017442960 0.002731953000;
0.000655234700 0.000019583750 0.003078064000;
0.000741600000 0.000022020000 0.003486000000;
0.000845029600 0.000024839650 0.003975227000;
0.000964526800 0.000028041260 0.004540880000;
0.001094949000 0.000031531040 0.005158320000;
0.001231154000 0.000035215210 0.005802907000;
0.001368000000 0.000039000000 0.006450001000;
0.001502050000 0.000042826400 0.007083216000;
0.001642328000 0.000046914600 0.007745488000;
0.001802382000 0.000051589600 0.008501152000;
0.001995757000 0.000057176400 0.009414544000;
0.002236000000 0.000064000000 0.010549990000;
0.002535385000 0.000072344210 0.011965800000;
0.002892603000 0.000082212240 0.013655870000;
0.003300829000 0.000093508160 0.015588050000;
0.003753236000 0.000106136100 0.017730150000;
0.004243000000 0.000120000000 0.020050010000;
0.004762389000 0.000134984000 0.022511360000;
0.005330048000 0.000151492000 0.025202880000;
0.005978712000 0.000170208000 0.028279720000;
0.006741117000 0.000191816000 0.031897040000;
0.007650000000 0.000217000000 0.036210000000;
0.008751373000 0.000246906700 0.041437710000;
0.010028880000 0.000281240000 0.047503720000;
0.011421700000 0.000318520000 0.054119880000;
0.012869010000 0.000357266700 0.060998030000;
0.014310000000 0.000396000000 0.067850010000;
0.015704430000 0.000433714700 0.074486320000;
0.017147440000 0.000473024000 0.081361560000;
0.018781220000 0.000517876000 0.089153640000;
0.020748010000 0.000572218700 0.098540480000;
0.023190000000 0.000640000000 0.110200000000;
0.026207360000 0.000724560000 0.124613300000;
0.029782480000 0.000825500000 0.141701700000;
0.033880920000 0.000941160000 0.161303500000;
0.038468240000 0.001069880000 0.183256800000;
0.043510000000 0.001210000000 0.207400000000;
0.048995600000 0.001362091000 0.233692100000;
0.055022600000 0.001530752000 0.262611400000;
0.061718800000 0.001720368000 0.294774600000;
0.069212000000 0.001935323000 0.330798500000;
0.077630000000 0.002180000000 0.371300000000;
0.086958110000 0.002454800000 0.416209100000;
0.097176720000 0.002764000000 0.465464200000;
0.108406300000 0.003117800000 0.519694800000;
0.120767200000 0.003526400000 0.579530300000;
0.134380000000 0.004000000000 0.645600000000;
0.149358200000 0.004546240000 0.718483800000;
0.165395700000 0.005159320000 0.796713300000;
0.181983100000 0.005829280000 0.877845900000;
0.198611000000 0.006546160000 0.959439000000;
0.214770000000 0.007300000000 1.039050100000;
0.230186800000 0.008086507000 1.115367300000;
0.244879700000 0.008908720000 1.188497100000;
0.258777300000 0.009767680000 1.258123300000;
0.271807900000 0.010664430000 1.323929600000;
0.283900000000 0.011600000000 1.385600000000;
0.294943800000 0.012573170000 1.442635200000;
0.304896500000 0.013582720000 1.494803500000;
0.313787300000 0.014629680000 1.542190300000;
0.321645400000 0.015715090000 1.584880700000;
0.328500000000 0.016840000000 1.622960000000;
0.334351300000 0.018007360000 1.656404800000;
0.339210100000 0.019214480000 1.685295900000;
0.343121300000 0.020453920000 1.709874500000;
0.346129600000 0.021718240000 1.730382100000;
0.348280000000 0.023000000000 1.747060000000;
0.349599900000 0.024294610000 1.760044600000;
0.350147400000 0.025610240000 1.769623300000;
0.350013000000 0.026958570000 1.776263700000;
0.349287000000 0.028351250000 1.780433400000;
0.348060000000 0.029800000000 1.782600000000;
0.346373300000 0.031310830000 1.782968200000;
0.344262400000 0.032883680000 1.781699800000;
0.341808800000 0.034521120000 1.779198200000;
0.339094100000 0.036225710000 1.775867100000;
0.336200000000 0.038000000000 1.772110000000;
0.333197700000 0.039846670000 1.768258900000;
0.330041100000 0.041768000000 1.764039000000;
0.326635700000 0.043766000000 1.758943800000;
0.322886800000 0.045842670000 1.752466300000;
0.318700000000 0.048000000000 1.744100000000;
0.314025100000 0.050243680000 1.733559500000;
0.308884000000 0.052573040000 1.720858100000;
0.303290400000 0.054980560000 1.705936900000;
0.297257900000 0.057458720000 1.688737200000;
0.290800000000 0.060000000000 1.669200000000;
0.283970100000 0.062601970000 1.647528700000;
0.276721400000 0.065277520000 1.623412700000;
0.268917800000 0.068042080000 1.596022300000;
0.260422700000 0.070911090000 1.564528000000;
0.251100000000 0.073900000000 1.528100000000;
0.240847500000 0.077016000000 1.486111400000;
0.229851200000 0.080266400000 1.439521500000;
0.218407200000 0.083666800000 1.389879900000;
0.206811500000 0.087232800000 1.338736200000;
0.195360000000 0.090980000000 1.287640000000;
0.184213600000 0.094917550000 1.237422300000;
0.173327300000 0.099045840000 1.187824300000;
0.162688100000 0.103367400000 1.138761100000;
0.152283300000 0.107884600000 1.090148000000;
0.142100000000 0.112600000000 1.041900000000;
0.132178600000 0.117532000000 0.994197600000;
0.122569600000 0.122674400000 0.947347300000;
0.113275200000 0.127992800000 0.901453100000;
0.104297900000 0.133452800000 0.856619300000;
0.095640000000 0.139020000000 0.812950100000;
0.087299550000 0.144676400000 0.770517300000;
0.079308040000 0.150469300000 0.729444800000;
0.071717760000 0.156461900000 0.689913600000;
0.064580990000 0.162717700000 0.652104900000;
0.057950010000 0.169300000000 0.616200000000;
0.051862110000 0.176243100000 0.582328600000;
0.046281520000 0.183558100000 0.550416200000;
0.041150880000 0.191273500000 0.520337600000;
0.036412830000 0.199418000000 0.491967300000;
0.032010000000 0.208020000000 0.465180000000;
0.027917200000 0.217119900000 0.439924600000;
0.024144400000 0.226734500000 0.416183600000;
0.020687000000 0.236857100000 0.393882200000;
0.017540400000 0.247481200000 0.372945900000;
0.014700000000 0.258600000000 0.353300000000;
0.012161790000 0.270184900000 0.334857800000;
0.009919960000 0.282293900000 0.317552100000;
0.007967240000 0.295050500000 0.301337500000;
0.006296346000 0.308578000000 0.286168600000;
0.004900000000 0.323000000000 0.272000000000;
0.003777173000 0.338402100000 0.258817100000;
0.002945320000 0.354685800000 0.246483800000;
0.002424880000 0.371698600000 0.234771800000;
0.002236293000 0.389287500000 0.223453300000;
0.002400000000 0.407300000000 0.212300000000;
0.002925520000 0.425629900000 0.201169200000;
0.003836560000 0.444309600000 0.190119600000;
0.005174840000 0.463394400000 0.179225400000;
0.006982080000 0.482939500000 0.168560800000;
0.009300000000 0.503000000000 0.158200000000;
0.012149490000 0.523569300000 0.148138300000;
0.015535880000 0.544512000000 0.138375800000;
0.019477520000 0.565690000000 0.128994200000;
0.023992770000 0.586965300000 0.120075100000;
0.029100000000 0.608200000000 0.111700000000;
0.034814850000 0.629345600000 0.103904800000;
0.041120160000 0.650306800000 0.096667480000;
0.047985040000 0.670875200000 0.089982720000;
0.055378610000 0.690842400000 0.083845310000;
0.063270000000 0.710000000000 0.078249990000;
0.071635010000 0.728185200000 0.073208990000;
0.080462240000 0.745463600000 0.068678160000;
0.089739960000 0.761969400000 0.064567840000;
0.099456450000 0.777836800000 0.060788350000;
0.109600000000 0.793200000000 0.057250010000;
0.120167400000 0.808110400000 0.053904350000;
0.131114500000 0.822496200000 0.050746640000;
0.142367900000 0.836306800000 0.047752760000;
0.153854200000 0.849491600000 0.044898590000;
0.165500000000 0.862000000000 0.042160000000;
0.177257100000 0.873810800000 0.039507280000;
0.189140000000 0.884962400000 0.036935640000;
0.201169400000 0.895493600000 0.034458360000;
0.213365800000 0.905443200000 0.032088720000;
0.225749900000 0.914850100000 0.029840000000;
0.238320900000 0.923734800000 0.027711810000;
0.251066800000 0.932092400000 0.025694440000;
0.263992200000 0.939922600000 0.023787160000;
0.277101700000 0.947225200000 0.021989250000;
0.290400000000 0.954000000000 0.020300000000;
0.303891200000 0.960256100000 0.018718050000;
0.317572600000 0.966007400000 0.017240360000;
0.331438400000 0.971260600000 0.015863640000;
0.345482800000 0.976022500000 0.014584610000;
0.359700000000 0.980300000000 0.013400000000;
0.374083900000 0.984092400000 0.012307230000;
0.388639600000 0.987418200000 0.011301880000;
0.403378400000 0.990312800000 0.010377920000;
0.418311500000 0.992811600000 0.009529306000;
0.433449900000 0.994950100000 0.008749999000;
0.448795300000 0.996710800000 0.008035200000;
0.464336000000 0.998098300000 0.007381600000;
0.480064000000 0.999112000000 0.006785400000;
0.495971300000 0.999748200000 0.006242800000;
0.512050100000 1.000000000000 0.005749999000;
0.528295900000 0.999856700000 0.005303600000;
0.544691600000 0.999304600000 0.004899800000;
0.561209400000 0.998325500000 0.004534200000;
0.577821500000 0.996898700000 0.004202400000;
0.594500000000 0.995000000000 0.003900000000;
0.611220900000 0.992600500000 0.003623200000;
0.627975800000 0.989742600000 0.003370600000;
0.644760200000 0.986444400000 0.003141400000;
0.661569700000 0.982724100000 0.002934800000;
0.678400000000 0.978600000000 0.002749999000;
0.695239200000 0.974083700000 0.002585200000;
0.712058600000 0.969171200000 0.002438600000;
0.728828400000 0.963856800000 0.002309400000;
0.745518800000 0.958134900000 0.002196800000;
0.762100000000 0.952000000000 0.002100000000;
0.778543200000 0.945450400000 0.002017733000;
0.794825600000 0.938499200000 0.001948200000;
0.810926400000 0.931162800000 0.001889800000;
0.826824800000 0.923457600000 0.001840933000;
0.842500000000 0.915400000000 0.001800000000;
0.857932500000 0.907006400000 0.001766267000;
0.873081600000 0.898277200000 0.001737800000;
0.887894400000 0.889204800000 0.001711200000;
0.902318100000 0.879781600000 0.001683067000;
0.916300000000 0.870000000000 0.001650001000;
0.929799500000 0.859861300000 0.001610133000;
0.942798400000 0.849392000000 0.001564400000;
0.955277600000 0.838622000000 0.001513600000;
0.967217900000 0.827581300000 0.001458533000;
0.978600000000 0.816300000000 0.001400000000;
0.989385600000 0.804794700000 0.001336667000;
0.999548800000 0.793082000000 0.001270000000;
1.009089200000 0.781192000000 0.001205000000;
1.018006400000 0.769154700000 0.001146667000;
1.026300000000 0.757000000000 0.001100000000;
1.033982700000 0.744754100000 0.001068800000;
1.040986000000 0.732422400000 0.001049400000;
1.047188000000 0.720003600000 0.001035600000;
1.052466700000 0.707496500000 0.001021200000;
1.056700000000 0.694900000000 0.001000000000;
1.059794400000 0.682219200000 0.000968640000;
1.061799200000 0.669471600000 0.000929920000;
1.062806800000 0.656674400000 0.000886880000;
1.062909600000 0.643844800000 0.000842560000;
1.062200000000 0.631000000000 0.000800000000;
1.060735200000 0.618155500000 0.000760960000;
1.058443600000 0.605314400000 0.000723680000;
1.055224400000 0.592475600000 0.000685920000;
1.050976800000 0.579637900000 0.000645440000;
1.045600000000 0.566800000000 0.000600000000;
1.039036900000 0.553961100000 0.000547866700;
1.031360800000 0.541137200000 0.000491600000;
1.022666200000 0.528352800000 0.000435400000;
1.013047700000 0.515632300000 0.000383466700;
1.002600000000 0.503000000000 0.000340000000;
0.991367500000 0.490468800000 0.000307253300;
0.979331400000 0.478030400000 0.000283160000;
0.966491600000 0.465677600000 0.000265440000;
0.952847900000 0.453403200000 0.000251813300;
0.938400000000 0.441200000000 0.000240000000;
0.923194000000 0.429080000000 0.000229546700;
0.907244000000 0.417036000000 0.000220640000;
0.890502000000 0.405032000000 0.000211960000;
0.872920000000 0.393032000000 0.000202186700;
0.854449900000 0.381000000000 0.000190000000;
0.835084000000 0.368918400000 0.000174213300;
0.814946000000 0.356827200000 0.000155640000;
0.794186000000 0.344776800000 0.000135960000;
0.772954000000 0.332817600000 0.000116853300;
0.751400000000 0.321000000000 0.000100000000;
0.729583600000 0.309338100000 0.000086133330;
0.707588800000 0.297850400000 0.000074600000;
0.685602200000 0.286593600000 0.000065000000;
0.663810400000 0.275624500000 0.000056933330;
0.642400000000 0.265000000000 0.000049999990;
0.621514900000 0.254763200000 0.000044160000;
0.601113800000 0.244889600000 0.000039480000;
0.581105200000 0.235334400000 0.000035720000;
0.561397700000 0.226052800000 0.000032640000;
0.541900000000 0.217000000000 0.000030000000;
0.522599500000 0.208161600000 0.000027653330;
0.503546400000 0.199548800000 0.000025560000;
0.484743600000 0.191155200000 0.000023640000;
0.466193900000 0.182974400000 0.000021813330;
0.447900000000 0.175000000000 0.000020000000;
0.429861300000 0.167223500000 0.000018133330;
0.412098000000 0.159646400000 0.000016200000;
0.394644000000 0.152277600000 0.000014200000;
0.377533300000 0.145125900000 0.000012133330;
0.360800000000 0.138200000000 0.000010000000;
0.344456300000 0.131500300000 0.000007733333;
0.328516800000 0.125024800000 0.000005400000;
0.313019200000 0.118779200000 0.000003200000;
0.298001100000 0.112769100000 0.000001333333;
0.283500000000 0.107000000000 0.000000000000;
0.269544800000 0.101476200000 0.000000000000;
0.256118400000 0.096188640000 0.000000000000;
0.243189600000 0.091122960000 0.000000000000;
0.230727200000 0.086264850000 0.000000000000;
0.218700000000 0.081600000000 0.000000000000;
0.207097100000 0.077120640000 0.000000000000;
0.195923200000 0.072825520000 0.000000000000;
0.185170800000 0.068710080000 0.000000000000;
0.174832300000 0.064769760000 0.000000000000;
0.164900000000 0.061000000000 0.000000000000;
0.155366700000 0.057396210000 0.000000000000;
0.146230000000 0.053955040000 0.000000000000;
0.137490000000 0.050673760000 0.000000000000;
0.129146700000 0.047549650000 0.000000000000;
0.121200000000 0.044580000000 0.000000000000;
0.113639700000 0.041758720000 0.000000000000;
0.106465000000 0.039084960000 0.000000000000;
0.099690440000 0.036563840000 0.000000000000;
0.093330610000 0.034200480000 0.000000000000;
0.087400000000 0.032000000000 0.000000000000;
0.081900960000 0.029962610000 0.000000000000;
0.076804280000 0.028076640000 0.000000000000;
0.072077120000 0.026329360000 0.000000000000;
0.067686640000 0.024708050000 0.000000000000;
0.063600000000 0.023200000000 0.000000000000;
0.059806850000 0.021800770000 0.000000000000;
0.056282160000 0.020501120000 0.000000000000;
0.052971040000 0.019281080000 0.000000000000;
0.049818610000 0.018120690000 0.000000000000;
0.046770000000 0.017000000000 0.000000000000;
0.043784050000 0.015903790000 0.000000000000;
0.040875360000 0.014837180000 0.000000000000;
0.038072640000 0.013810680000 0.000000000000;
0.035404610000 0.012834780000 0.000000000000;
0.032900000000 0.011920000000 0.000000000000;
0.030564190000 0.011068310000 0.000000000000;
0.028380560000 0.010273390000 0.000000000000;
0.026344840000 0.009533311000 0.000000000000;
0.024452750000 0.008846157000 0.000000000000;
0.022700000000 0.008210000000 0.000000000000;
0.021084290000 0.007623781000 0.000000000000;
0.019599880000 0.007085424000 0.000000000000;
0.018237320000 0.006591476000 0.000000000000;
0.016987170000 0.006138485000 0.000000000000;
0.015840000000 0.005723000000 0.000000000000;
0.014790640000 0.005343059000 0.000000000000;
0.013831320000 0.004995796000 0.000000000000;
0.012948680000 0.004676404000 0.000000000000;
0.012129200000 0.004380075000 0.000000000000;
0.011359160000 0.004102000000 0.000000000000;
0.010629350000 0.003838453000 0.000000000000;
0.009938846000 0.003589099000 0.000000000000;
0.009288422000 0.003354219000 0.000000000000;
0.008678854000 0.003134093000 0.000000000000;
0.008110916000 0.002929000000 0.000000000000;
0.007582388000 0.002738139000 0.000000000000;
0.007088746000 0.002559876000 0.000000000000;
0.006627313000 0.002393244000 0.000000000000;
0.006195408000 0.002237275000 0.000000000000;
0.005790346000 0.002091000000 0.000000000000;
0.005409826000 0.001953587000 0.000000000000;
0.005052583000 0.001824580000 0.000000000000;
0.004717512000 0.001703580000 0.000000000000;
0.004403507000 0.001590187000 0.000000000000;
0.004109457000 0.001484000000 0.000000000000;
0.003833913000 0.001384496000 0.000000000000;
0.003575748000 0.001291268000 0.000000000000;
0.003334342000 0.001204092000 0.000000000000;
0.003109075000 0.001122744000 0.000000000000;
0.002899327000 0.001047000000 0.000000000000;
0.002704348000 0.000976589600 0.000000000000;
0.002523020000 0.000911108800 0.000000000000;
0.002354168000 0.000850133200 0.000000000000;
0.002196616000 0.000793238400 0.000000000000;
0.002049190000 0.000740000000 0.000000000000;
0.001910960000 0.000690082700 0.000000000000;
0.001781438000 0.000643310000 0.000000000000;
0.001660110000 0.000599496000 0.000000000000;
0.001546459000 0.000558454700 0.000000000000;
0.001439971000 0.000520000000 0.000000000000;
0.001340042000 0.000483913600 0.000000000000;
0.001246275000 0.000450052800 0.000000000000;
0.001158471000 0.000418345200 0.000000000000;
0.001076430000 0.000388718400 0.000000000000;
0.000999949300 0.000361100000 0.000000000000;
0.000928735800 0.000335383500 0.000000000000;
0.000862433200 0.000311440400 0.000000000000;
0.000800750300 0.000289165600 0.000000000000;
0.000743396000 0.000268453900 0.000000000000;
0.000690078600 0.000249200000 0.000000000000;
0.000640515600 0.000231301900 0.000000000000;
0.000594502100 0.000214685600 0.000000000000;
0.000551864600 0.000199288400 0.000000000000;
0.000512429000 0.000185047500 0.000000000000;
0.000476021300 0.000171900000 0.000000000000;
0.000442453600 0.000159778100 0.000000000000;
0.000411511700 0.000148604400 0.000000000000;
0.000382981400 0.000138301600 0.000000000000;
0.000356649100 0.000128792500 0.000000000000;
0.000332301100 0.000120000000 0.000000000000;
0.000309758600 0.000111859500 0.000000000000;
0.000288887100 0.000104322400 0.000000000000;
0.000269539400 0.000097335600 0.000000000000;
0.000251568200 0.000090845870 0.000000000000;
0.000234826100 0.000084800000 0.000000000000;
0.000219171000 0.000079146670 0.000000000000;
0.000204525800 0.000073858000 0.000000000000;
0.000190840500 0.000068916000 0.000000000000;
0.000178065400 0.000064302670 0.000000000000;
0.000166150500 0.000060000000 0.000000000000;
0.000155023600 0.000055981870 0.000000000000;
0.000144621900 0.000052225600 0.000000000000;
0.000134909800 0.000048718400 0.000000000000;
0.000125852000 0.000045447470 0.000000000000;
0.000117413000 0.000042400000 0.000000000000;
0.000109551500 0.000039561040 0.000000000000;
0.000102224500 0.000036915120 0.000000000000;
0.000095394450 0.000034448680 0.000000000000;
0.000089023900 0.000032148160 0.000000000000;
0.000083075270 0.000030000000 0.000000000000;
0.000077512690 0.000027991250 0.000000000000;
0.000072313040 0.000026113560 0.000000000000;
0.000067457780 0.000024360240 0.000000000000;
0.000062928440 0.000022724610 0.000000000000;
0.000058706520 0.000021200000 0.000000000000;
0.000054770280 0.000019778550 0.000000000000;
0.000051099180 0.000018452850 0.000000000000;
0.000047676540 0.000017216870 0.000000000000;
0.000044485670 0.000016064590 0.000000000000;
0.000041509940 0.000014990000 0.000000000000;
0.000038733240 0.000013987280 0.000000000000;
0.000036142030 0.000013051550 0.000000000000;
0.000033723520 0.000012178180 0.000000000000;
0.000031464870 0.000011362540 0.000000000000;
0.000029353260 0.000010600000 0.000000000000;
0.000027375730 0.000009885877 0.000000000000;
0.000025524330 0.000009217304 0.000000000000;
0.000023793760 0.000008592362 0.000000000000;
0.000022178700 0.000008009133 0.000000000000;
0.000020673830 0.000007465700 0.000000000000;
0.000019272260 0.000006959567 0.000000000000;
0.000017966400 0.000006487995 0.000000000000;
0.000016749910 0.000006048699 0.000000000000;
0.000015616480 0.000005639396 0.000000000000;
0.000014559770 0.000005257800 0.000000000000;
0.000013573870 0.000004901771 0.000000000000;
0.000012654360 0.000004569720 0.000000000000;
0.000011797230 0.000004260194 0.000000000000;
0.000010998440 0.000003971739 0.000000000000;
0.000010253980 0.000003702900 0.000000000000;
0.000009559646 0.000003452163 0.000000000000;
0.000008912044 0.000003218302 0.000000000000;
0.000008308358 0.000003000300 0.000000000000;
0.000007745769 0.000002797139 0.000000000000;
0.000007221456 0.000002607800 0.000000000000;
0.000006732475 0.000002431220 0.000000000000;
0.000006276423 0.000002266531 0.000000000000;
0.000005851304 0.000002113013 0.000000000000;
0.000005455118 0.000001969943 0.000000000000;
0.000005085868 0.000001836600 0.000000000000;
0.000004741466 0.000001712230 0.000000000000;
0.000004420236 0.000001596228 0.000000000000;
0.000004120783 0.000001488090 0.000000000000;
0.000003841716 0.000001387314 0.000000000000;
0.000003581652 0.000001293400 0.000000000000;
0.000003339127 0.000001205820 0.000000000000;
0.000003112949 0.000001124143 0.000000000000;
0.000002902121 0.000001048009 0.000000000000;
0.000002705645 0.000000977058 0.000000000000;
0.000002522525 0.000000910930 0.000000000000;
0.000002351726 0.000000849251 0.000000000000;
0.000002192415 0.000000791721 0.000000000000;
0.000002043902 0.000000738090 0.000000000000;
0.000001905497 0.000000688110 0.000000000000;
0.000001776509 0.000000641530 0.000000000000;
0.000001656215 0.000000598090 0.000000000000;
0.000001544022 0.000000557575 0.000000000000;
0.000001439440 0.000000519808 0.000000000000;
0.000001341977 0.000000484612 0.000000000000;
0.000001251141 0.000000451810 0.000000000000];
function colormatch(::Type{CIE1964_CMF}, wavelen::Real)
return interpolate_table(cie1964_cmf_table, 360.0, 1.0, wavelen)
end
# CIE 1964 10° color matching function, 1nm increments starting at 360nm
const cie1964_cmf_table =
[0.000000122200 0.000000013398 0.000000535027;
0.000000185138 0.000000020294 0.000000810720;
0.000000278830 0.000000030560 0.000001221200;
0.000000417470 0.000000045740 0.000001828700;
0.000000621330 0.000000068050 0.000002722200;
0.000000919270 0.000000100650 0.000004028300;
0.000001351980 0.000000147980 0.000005925700;
0.000001976540 0.000000216270 0.000008665100;
0.000002872500 0.000000314200 0.000012596000;
0.000004149500 0.000000453700 0.000018201000;
0.000005958600 0.000000651100 0.000026143700;
0.000008505600 0.000000928800 0.000037330000;
0.000012068600 0.000001317500 0.000052987000;
0.000017022600 0.000001857200 0.000074764000;
0.000023868000 0.000002602000 0.000104870000;
0.000033266000 0.000003625000 0.000146220000;
0.000046087000 0.000005019000 0.000202660000;
0.000063472000 0.000006907000 0.000279230000;
0.000086892000 0.000009449000 0.000382450000;
0.000118246000 0.000012848000 0.000520720000;
0.000159952000 0.000017364000 0.000704776000;
0.000215080000 0.000023327000 0.000948230000;
0.000287490000 0.000031150000 0.001268200000;
0.000381990000 0.000041350000 0.001686100000;
0.000504550000 0.000054560000 0.002228500000;
0.000662440000 0.000071560000 0.002927800000;
0.000864500000 0.000093300000 0.003823700000;
0.001121500000 0.000120870000 0.004964200000;
0.001446160000 0.000155640000 0.006406700000;
0.001853590000 0.000199200000 0.008219300000;
0.002361600000 0.000253400000 0.010482200000;
0.002990600000 0.000320200000 0.013289000000;
0.003764500000 0.000402400000 0.016747000000;
0.004710200000 0.000502300000 0.020980000000;
0.005858100000 0.000623200000 0.026127000000;
0.007242300000 0.000768500000 0.032344000000;
0.008899600000 0.000941700000 0.039802000000;
0.010870900000 0.001147800000 0.048691000000;
0.013198900000 0.001390300000 0.059210000000;
0.015929200000 0.001674000000 0.071576000000;
0.019109700000 0.002004400000 0.086010900000;
0.022788000000 0.002386000000 0.102740000000;
0.027011000000 0.002822000000 0.122000000000;
0.031829000000 0.003319000000 0.144020000000;
0.037278000000 0.003880000000 0.168990000000;
0.043400000000 0.004509000000 0.197120000000;
0.050223000000 0.005209000000 0.228570000000;
0.057764000000 0.005985000000 0.263470000000;
0.066038000000 0.006833000000 0.301900000000;
0.075033000000 0.007757000000 0.343870000000;
0.084736000000 0.008756000000 0.389366000000;
0.095041000000 0.009816000000 0.437970000000;
0.105836000000 0.010918000000 0.489220000000;
0.117066000000 0.012058000000 0.542900000000;
0.128682000000 0.013237000000 0.598810000000;
0.140638000000 0.014456000000 0.656760000000;
0.152893000000 0.015717000000 0.716580000000;
0.165416000000 0.017025000000 0.778120000000;
0.178191000000 0.018399000000 0.841310000000;
0.191214000000 0.019848000000 0.906110000000;
0.204492000000 0.021391000000 0.972542000000;
0.217650000000 0.022992000000 1.038900000000;
0.230267000000 0.024598000000 1.103100000000;
0.242311000000 0.026213000000 1.165100000000;
0.253793000000 0.027841000000 1.224900000000;
0.264737000000 0.029497000000 1.282500000000;
0.275195000000 0.031195000000 1.338200000000;
0.285301000000 0.032927000000 1.392600000000;
0.295143000000 0.034738000000 1.446100000000;
0.304869000000 0.036654000000 1.499400000000;
0.314679000000 0.038676000000 1.553480000000;
0.324355000000 0.040792000000 1.607200000000;
0.333570000000 0.042946000000 1.658900000000;
0.342243000000 0.045114000000 1.708200000000;
0.350312000000 0.047333000000 1.754800000000;
0.357719000000 0.049602000000 1.798500000000;
0.364482000000 0.051934000000 1.839200000000;
0.370493000000 0.054337000000 1.876600000000;
0.375727000000 0.056822000000 1.910500000000;
0.380158000000 0.059399000000 1.940800000000;
0.383734000000 0.062077000000 1.967280000000;
0.386327000000 0.064737000000 1.989100000000;
0.387858000000 0.067285000000 2.005700000000;
0.388396000000 0.069764000000 2.017400000000;
0.387978000000 0.072218000000 2.024400000000;
0.386726000000 0.074704000000 2.027300000000;
0.384696000000 0.077272000000 2.026400000000;
0.382006000000 0.079979000000 2.022300000000;
0.378709000000 0.082874000000 2.015300000000;
0.374915000000 0.086000000000 2.006000000000;
0.370702000000 0.089456000000 1.994800000000;
0.366089000000 0.092947000000 1.981400000000;
0.361045000000 0.096275000000 1.965300000000;
0.355518000000 0.099535000000 1.946400000000;
0.349486000000 0.102829000000 1.924800000000;
0.342957000000 0.106256000000 1.900700000000;
0.335893000000 0.109901000000 1.874100000000;
0.328284000000 0.113835000000 1.845100000000;
0.320150000000 0.118167000000 1.813900000000;
0.311475000000 0.122932000000 1.780600000000;
0.302273000000 0.128201000000 1.745370000000;
0.292858000000 0.133457000000 1.709100000000;
0.283502000000 0.138323000000 1.672300000000;
0.274044000000 0.143042000000 1.634700000000;
0.264263000000 0.147787000000 1.595600000000;
0.254085000000 0.152761000000 1.554900000000;
0.243392000000 0.158102000000 1.512200000000;
0.232187000000 0.163941000000 1.467300000000;
0.220488000000 0.170362000000 1.419900000000;
0.208198000000 0.177425000000 1.370000000000;
0.195618000000 0.185190000000 1.317560000000;
0.183034000000 0.193025000000 1.262400000000;
0.170222000000 0.200313000000 1.205000000000;
0.157348000000 0.207156000000 1.146600000000;
0.144650000000 0.213644000000 1.088000000000;
0.132349000000 0.219940000000 1.030200000000;
0.120584000000 0.226170000000 0.973830000000;
0.109456000000 0.232467000000 0.919430000000;
0.099042000000 0.239025000000 0.867460000000;
0.089388000000 0.245997000000 0.818280000000;
0.080507000000 0.253589000000 0.772125000000;
0.072034000000 0.261876000000 0.728290000000;
0.063710000000 0.270643000000 0.686040000000;
0.055694000000 0.279645000000 0.645530000000;
0.048117000000 0.288694000000 0.606850000000;
0.041072000000 0.297665000000 0.570060000000;
0.034642000000 0.306469000000 0.535220000000;
0.028896000000 0.315035000000 0.502340000000;
0.023876000000 0.323335000000 0.471400000000;
0.019628000000 0.331366000000 0.442390000000;
0.016172000000 0.339133000000 0.415254000000;
0.013300000000 0.347860000000 0.390024000000;
0.010759000000 0.358326000000 0.366399000000;
0.008542000000 0.370001000000 0.344015000000;
0.006661000000 0.382464000000 0.322689000000;
0.005132000000 0.395379000000 0.302356000000;
0.003982000000 0.408482000000 0.283036000000;
0.003239000000 0.421588000000 0.264816000000;
0.002934000000 0.434619000000 0.247848000000;
0.003114000000 0.447601000000 0.232318000000;
0.003816000000 0.460777000000 0.218502000000;
0.005095000000 0.474340000000 0.205851000000;
0.006936000000 0.488200000000 0.193596000000;
0.009299000000 0.502340000000 0.181736000000;
0.012147000000 0.516740000000 0.170281000000;
0.015444000000 0.531360000000 0.159249000000;
0.019156000000 0.546190000000 0.148673000000;
0.023250000000 0.561180000000 0.138609000000;
0.027690000000 0.576290000000 0.129096000000;
0.032444000000 0.591500000000 0.120215000000;
0.037465000000 0.606741000000 0.112044000000;
0.042956000000 0.622150000000 0.104710000000;
0.049114000000 0.637830000000 0.098196000000;
0.055920000000 0.653710000000 0.092361000000;
0.063349000000 0.669680000000 0.087088000000;
0.071358000000 0.685660000000 0.082248000000;
0.079901000000 0.701550000000 0.077744000000;
0.088909000000 0.717230000000 0.073456000000;
0.098293000000 0.732570000000 0.069268000000;
0.107949000000 0.747460000000 0.065060000000;
0.117749000000 0.761757000000 0.060709000000;
0.127839000000 0.775340000000 0.056457000000;
0.138450000000 0.788220000000 0.052609000000;
0.149516000000 0.800460000000 0.049122000000;
0.161041000000 0.812140000000 0.045954000000;
0.172953000000 0.823330000000 0.043050000000;
0.185209000000 0.834120000000 0.040368000000;
0.197755000000 0.844600000000 0.037839000000;
0.210538000000 0.854870000000 0.035384000000;
0.223460000000 0.865040000000 0.032949000000;
0.236491000000 0.875211000000 0.030451000000;
0.249633000000 0.885370000000 0.028029000000;
0.262972000000 0.895370000000 0.025862000000;
0.276515000000 0.905150000000 0.023920000000;
0.290269000000 0.914650000000 0.022174000000;
0.304213000000 0.923810000000 0.020584000000;
0.318361000000 0.932550000000 0.019127000000;
0.332705000000 0.940810000000 0.017740000000;
0.347232000000 0.948520000000 0.016403000000;
0.361926000000 0.955600000000 0.015064000000;
0.376772000000 0.961988000000 0.013676000000;
0.391683000000 0.967540000000 0.012308000000;
0.406594000000 0.972230000000 0.011056000000;
0.421539000000 0.976170000000 0.009915000000;
0.436517000000 0.979460000000 0.008872000000;
0.451584000000 0.982200000000 0.007918000000;
0.466782000000 0.984520000000 0.007030000000;
0.482147000000 0.986520000000 0.006223000000;
0.497738000000 0.988320000000 0.005453000000;
0.513606000000 0.990020000000 0.004714000000;
0.529826000000 0.991761000000 0.003988000000;
0.546440000000 0.993530000000 0.003289000000;
0.563426000000 0.995230000000 0.002646000000;
0.580726000000 0.996770000000 0.002063000000;
0.598290000000 0.998090000000 0.001533000000;
0.616053000000 0.999110000000 0.001091000000;
0.633948000000 0.999770000000 0.000711000000;
0.651901000000 1.000000000000 0.000407000000;
0.669824000000 0.999710000000 0.000184000000;
0.687632000000 0.998850000000 0.000047000000;
0.705224000000 0.997340000000 0.000000000000;
0.722773000000 0.995260000000 0.000000000000;
0.740483000000 0.992740000000 0.000000000000;
0.758273000000 0.989750000000 0.000000000000;
0.776083000000 0.986300000000 0.000000000000;
0.793832000000 0.982380000000 0.000000000000;
0.811436000000 0.977980000000 0.000000000000;
0.828822000000 0.973110000000 0.000000000000;
0.845879000000 0.967740000000 0.000000000000;
0.862525000000 0.961890000000 0.000000000000;
0.878655000000 0.955552000000 0.000000000000;
0.894208000000 0.948601000000 0.000000000000;
0.909206000000 0.940981000000 0.000000000000;
0.923672000000 0.932798000000 0.000000000000;
0.937638000000 0.924158000000 0.000000000000;
0.951162000000 0.915175000000 0.000000000000;
0.964283000000 0.905954000000 0.000000000000;
0.977068000000 0.896608000000 0.000000000000;
0.989590000000 0.887249000000 0.000000000000;
1.001910000000 0.877986000000 0.000000000000;
1.014160000000 0.868934000000 0.000000000000;
1.026500000000 0.860164000000 0.000000000000;
1.038800000000 0.851519000000 0.000000000000;
1.051000000000 0.842963000000 0.000000000000;
1.062900000000 0.834393000000 0.000000000000;
1.074300000000 0.825623000000 0.000000000000;
1.085200000000 0.816764000000 0.000000000000;
1.095200000000 0.807544000000 0.000000000000;
1.104200000000 0.797947000000 0.000000000000;
1.112000000000 0.787893000000 0.000000000000;
1.118520000000 0.777405000000 0.000000000000;
1.123800000000 0.766490000000 0.000000000000;
1.128000000000 0.755309000000 0.000000000000;
1.131100000000 0.743845000000 0.000000000000;
1.133200000000 0.732190000000 0.000000000000;
1.134300000000 0.720353000000 0.000000000000;
1.134300000000 0.708281000000 0.000000000000;
1.133300000000 0.696055000000 0.000000000000;
1.131200000000 0.683621000000 0.000000000000;
1.128100000000 0.671048000000 0.000000000000;
1.123990000000 0.658341000000 0.000000000000;
1.118900000000 0.645545000000 0.000000000000;
1.112900000000 0.632718000000 0.000000000000;
1.105900000000 0.619815000000 0.000000000000;
1.098000000000 0.606887000000 0.000000000000;
1.089100000000 0.593878000000 0.000000000000;
1.079200000000 0.580781000000 0.000000000000;
1.068400000000 0.567653000000 0.000000000000;
1.056700000000 0.554490000000 0.000000000000;
1.044000000000 0.541228000000 0.000000000000;
1.030480000000 0.527963000000 0.000000000000;
1.016000000000 0.514634000000 0.000000000000;
1.000800000000 0.501363000000 0.000000000000;
0.984790000000 0.488124000000 0.000000000000;
0.968080000000 0.474935000000 0.000000000000;
0.950740000000 0.461834000000 0.000000000000;
0.932800000000 0.448823000000 0.000000000000;
0.914340000000 0.435917000000 0.000000000000;
0.895390000000 0.423153000000 0.000000000000;
0.876030000000 0.410526000000 0.000000000000;
0.856297000000 0.398057000000 0.000000000000;
0.836350000000 0.385835000000 0.000000000000;
0.816290000000 0.373951000000 0.000000000000;
0.796050000000 0.362311000000 0.000000000000;
0.775610000000 0.350863000000 0.000000000000;
0.754930000000 0.339554000000 0.000000000000;
0.733990000000 0.328309000000 0.000000000000;
0.712780000000 0.317118000000 0.000000000000;
0.691290000000 0.305936000000 0.000000000000;
0.669520000000 0.294737000000 0.000000000000;
0.647467000000 0.283493000000 0.000000000000;
0.625110000000 0.272222000000 0.000000000000;
0.602520000000 0.260990000000 0.000000000000;
0.579890000000 0.249877000000 0.000000000000;
0.557370000000 0.238946000000 0.000000000000;
0.535110000000 0.228254000000 0.000000000000;
0.513240000000 0.217853000000 0.000000000000;
0.491860000000 0.207780000000 0.000000000000;
0.471080000000 0.198072000000 0.000000000000;
0.450960000000 0.188748000000 0.000000000000;
0.431567000000 0.179828000000 0.000000000000;
0.412870000000 0.171285000000 0.000000000000;
0.394750000000 0.163059000000 0.000000000000;
0.377210000000 0.155151000000 0.000000000000;
0.360190000000 0.147535000000 0.000000000000;
0.343690000000 0.140211000000 0.000000000000;
0.327690000000 0.133170000000 0.000000000000;
0.312170000000 0.126400000000 0.000000000000;
0.297110000000 0.119892000000 0.000000000000;
0.282500000000 0.113640000000 0.000000000000;
0.268329000000 0.107633000000 0.000000000000;
0.254590000000 0.101870000000 0.000000000000;
0.241300000000 0.096347000000 0.000000000000;
0.228480000000 0.091063000000 0.000000000000;
0.216140000000 0.086010000000 0.000000000000;
0.204300000000 0.081187000000 0.000000000000;
0.192950000000 0.076583000000 0.000000000000;
0.182110000000 0.072198000000 0.000000000000;
0.171770000000 0.068024000000 0.000000000000;
0.161920000000 0.064052000000 0.000000000000;
0.152568000000 0.060281000000 0.000000000000;
0.143670000000 0.056697000000 0.000000000000;
0.135200000000 0.053292000000 0.000000000000;
0.127130000000 0.050059000000 0.000000000000;
0.119480000000 0.046998000000 0.000000000000;
0.112210000000 0.044096000000 0.000000000000;
0.105310000000 0.041345000000 0.000000000000;
0.098786000000 0.038750700000 0.000000000000;
0.092610000000 0.036297800000 0.000000000000;
0.086773000000 0.033983200000 0.000000000000;
0.081260600000 0.031800400000 0.000000000000;
0.076048000000 0.029739500000 0.000000000000;
0.071114000000 0.027791800000 0.000000000000;
0.066454000000 0.025955100000 0.000000000000;
0.062062000000 0.024226300000 0.000000000000;
0.057930000000 0.022601700000 0.000000000000;
0.054050000000 0.021077900000 0.000000000000;
0.050412000000 0.019650500000 0.000000000000;
0.047006000000 0.018315300000 0.000000000000;
0.043823000000 0.017068600000 0.000000000000;
0.040850800000 0.015905100000 0.000000000000;
0.038072000000 0.014818300000 0.000000000000;
0.035468000000 0.013800800000 0.000000000000;
0.033031000000 0.012849500000 0.000000000000;
0.030753000000 0.011960700000 0.000000000000;
0.028623000000 0.011130300000 0.000000000000;
0.026635000000 0.010355500000 0.000000000000;
0.024781000000 0.009633200000 0.000000000000;
0.023052000000 0.008959900000 0.000000000000;
0.021441000000 0.008332400000 0.000000000000;
0.019941300000 0.007748800000 0.000000000000;
0.018544000000 0.007204600000 0.000000000000;
0.017241000000 0.006697500000 0.000000000000;
0.016027000000 0.006225100000 0.000000000000;
0.014896000000 0.005785000000 0.000000000000;
0.013842000000 0.005375100000 0.000000000000;
0.012862000000 0.004994100000 0.000000000000;
0.011949000000 0.004639200000 0.000000000000;
0.011100000000 0.004309300000 0.000000000000;
0.010311000000 0.004002800000 0.000000000000;
0.009576880000 0.003717740000 0.000000000000;
0.008894000000 0.003452620000 0.000000000000;
0.008258100000 0.003205830000 0.000000000000;
0.007666400000 0.002976230000 0.000000000000;
0.007116300000 0.002762810000 0.000000000000;
0.006605200000 0.002564560000 0.000000000000;
0.006130600000 0.002380480000 0.000000000000;
0.005690300000 0.002209710000 0.000000000000;
0.005281900000 0.002051320000 0.000000000000;
0.004903300000 0.001904490000 0.000000000000;
0.004552630000 0.001768470000 0.000000000000;
0.004227500000 0.001642360000 0.000000000000;
0.003925800000 0.001525350000 0.000000000000;
0.003645700000 0.001416720000 0.000000000000;
0.003385900000 0.001315950000 0.000000000000;
0.003144700000 0.001222390000 0.000000000000;
0.002920800000 0.001135550000 0.000000000000;
0.002713000000 0.001054940000 0.000000000000;
0.002520200000 0.000980140000 0.000000000000;
0.002341100000 0.000910660000 0.000000000000;
0.002174960000 0.000846190000 0.000000000000;
0.002020600000 0.000786290000 0.000000000000;
0.001877300000 0.000730680000 0.000000000000;
0.001744100000 0.000678990000 0.000000000000;
0.001620500000 0.000631010000 0.000000000000;
0.001505700000 0.000586440000 0.000000000000;
0.001399200000 0.000545110000 0.000000000000;
0.001300400000 0.000506720000 0.000000000000;
0.001208700000 0.000471110000 0.000000000000;
0.001123600000 0.000438050000 0.000000000000;
0.001044760000 0.000407410000 0.000000000000;
0.000971560000 0.000378962000 0.000000000000;
0.000903600000 0.000352543000 0.000000000000;
0.000840480000 0.000328001000 0.000000000000;
0.000781870000 0.000305208000 0.000000000000;
0.000727450000 0.000284041000 0.000000000000;
0.000676900000 0.000264375000 0.000000000000;
0.000629960000 0.000246109000 0.000000000000;
0.000586370000 0.000229143000 0.000000000000;
0.000545870000 0.000213376000 0.000000000000;
0.000508258000 0.000198730000 0.000000000000;
0.000473300000 0.000185115000 0.000000000000;
0.000440800000 0.000172454000 0.000000000000;
0.000410580000 0.000160678000 0.000000000000;
0.000382490000 0.000149730000 0.000000000000;
0.000356380000 0.000139550000 0.000000000000;
0.000332110000 0.000130086000 0.000000000000;
0.000309550000 0.000121290000 0.000000000000;
0.000288580000 0.000113106000 0.000000000000;
0.000269090000 0.000105501000 0.000000000000;
0.000250969000 0.000098428000 0.000000000000;
0.000234130000 0.000091853000 0.000000000000;
0.000218470000 0.000085738000 0.000000000000;
0.000203910000 0.000080048000 0.000000000000;
0.000190350000 0.000074751000 0.000000000000;
0.000177730000 0.000069819000 0.000000000000;
0.000165970000 0.000065222000 0.000000000000;
0.000155020000 0.000060939000 0.000000000000;
0.000144800000 0.000056942000 0.000000000000;
0.000135280000 0.000053217000 0.000000000000;
0.000126390000 0.000049737000 0.000000000000;
0.000118100000 0.000046491000 0.000000000000;
0.000110370000 0.000043464000 0.000000000000;
0.000103150000 0.000040635000 0.000000000000;
0.000096427000 0.000038000000 0.000000000000;
0.000090151000 0.000035540500 0.000000000000;
0.000084294000 0.000033244800 0.000000000000;
0.000078830000 0.000031100600 0.000000000000;
0.000073729000 0.000029099000 0.000000000000;
0.000068969000 0.000027230700 0.000000000000;
0.000064525800 0.000025486000 0.000000000000;
0.000060376000 0.000023856100 0.000000000000;
0.000056500000 0.000022333200 0.000000000000;
0.000052880000 0.000020910400 0.000000000000;
0.000049498000 0.000019580800 0.000000000000;
0.000046339000 0.000018338400 0.000000000000;
0.000043389000 0.000017177700 0.000000000000;
0.000040634000 0.000016093400 0.000000000000;
0.000038060000 0.000015080000 0.000000000000;
0.000035657000 0.000014133600 0.000000000000;
0.000033411700 0.000013249000 0.000000000000;
0.000031315000 0.000012422600 0.000000000000;
0.000029355000 0.000011649900 0.000000000000;
0.000027524000 0.000010927700 0.000000000000;
0.000025811000 0.000010251900 0.000000000000;
0.000024209000 0.000009619600 0.000000000000;
0.000022711000 0.000009028100 0.000000000000;
0.000021308000 0.000008474000 0.000000000000;
0.000019994000 0.000007954800 0.000000000000;
0.000018764000 0.000007468600 0.000000000000;
0.000017611500 0.000007012800 0.000000000000;
0.000016532000 0.000006585800 0.000000000000;
0.000015521000 0.000006185700 0.000000000000;
0.000014574000 0.000005810700 0.000000000000;
0.000013686000 0.000005459000 0.000000000000;
0.000012855000 0.000005129800 0.000000000000;
0.000012075000 0.000004820600 0.000000000000;
0.000011345000 0.000004531200 0.000000000000;
0.000010659000 0.000004259100 0.000000000000;
0.000010017000 0.000004004200 0.000000000000;
0.000009413630 0.000003764730 0.000000000000;
0.000008847900 0.000003539950 0.000000000000;
0.000008317100 0.000003329140 0.000000000000;
0.000007819000 0.000003131150 0.000000000000;
0.000007351600 0.000002945290 0.000000000000;
0.000006913000 0.000002770810 0.000000000000;
0.000006501500 0.000002607050 0.000000000000;
0.000006115300 0.000002453290 0.000000000000;
0.000005752900 0.000002308940 0.000000000000;
0.000005412700 0.000002173380 0.000000000000;
0.000005093470 0.000002046130 0.000000000000;
0.000004793800 0.000001926620 0.000000000000;
0.000004512500 0.000001814400 0.000000000000;
0.000004248300 0.000001708950 0.000000000000;
0.000004000200 0.000001609880 0.000000000000;
0.000003767100 0.000001516770 0.000000000000;
0.000003548000 0.000001429210 0.000000000000;
0.000003342100 0.000001346860 0.000000000000;
0.000003148500 0.000001269450 0.000000000000;
0.000002966500 0.000001196620 0.000000000000;
0.000002795310 0.000001128090 0.000000000000;
0.000002634500 0.000001063680 0.000000000000;
0.000002483400 0.000001003130 0.000000000000;
0.000002341400 0.000000946220 0.000000000000;
0.000002207800 0.000000892630 0.000000000000;
0.000002082000 0.000000842160 0.000000000000;
0.000001963600 0.000000794640 0.000000000000;
0.000001851900 0.000000749780 0.000000000000;
0.000001746500 0.000000707440 0.000000000000;
0.000001647100 0.000000667480 0.000000000000;
0.000001553140 0.000000629700 0.000000000000]
function colormatch(::Type{CIE1931J_CMF}, wavelen::Real)
return interpolate_table(cie1931j_cmf_table, 370.0, 10.0, wavelen)
end
# Judd adjustment to the CIE 1931 2° CMF, 10nm increments starting at 370nm
const cie1931j_cmf_table =
[0.0008 0.0001 0.0046;
0.0045 0.0004 0.0224;
0.0201 0.0015 0.0925;
0.0611 0.0045 0.2799;
0.1267 0.0093 0.5835;
0.2285 0.0175 1.0622;
0.3081 0.0273 1.4526;
0.3312 0.0379 1.6064;
0.2888 0.0468 1.4717;
0.2323 0.0600 1.2880;
0.1745 0.0910 1.1133;
0.0920 0.1390 0.7552;
0.0318 0.2080 0.4461;
0.0048 0.3230 0.2644;
0.0093 0.5030 0.1541;
0.0636 0.7100 0.0763;
0.1668 0.8620 0.0412;
0.2926 0.9540 0.0200;
0.4364 0.9950 0.0088;
0.5970 0.9950 0.0039;
0.7642 0.9520 0.0020;
0.9159 0.8700 0.0016;
1.0225 0.7570 0.0011;
1.0544 0.6310 0.0007;
0.9922 0.5030 0.0003;
0.8432 0.3810 0.0002;
0.6327 0.2650 0.0001;
0.4404 0.1750 0.0000;
0.2787 0.1070 0.0000;
0.1619 0.0610 0.0000;
0.0858 0.0320 0.0000;
0.0459 0.0170 0.0000;
0.0222 0.0082 0.0000;
0.0113 0.0041 0.0000;
0.0057 0.0021 0.0000;
0.0028 0.0011 0.0000;
0.0015 0.0005 0.0000;
0.0005 0.0002 0.0000;
0.0003 0.0001 0.0000;
0.0002 0.0001 0.0000;
0.0001 0.0000 0.0000]
function colormatch(::Type{CIE1931JV_CMF}, wavelen::Real)
return interpolate_table(cie1931jv_cmf_table, 380.0, 5.0, wavelen)
end
# Judd-Vos adjustment to the CIE 1931 2° CMF, 5nm increments starting at 380nm
const cie1931jv_cmf_table =
[2.689900e-003 2.000000e-004 1.226000e-002;
5.310500e-003 3.955600e-004 2.422200e-002;
1.078100e-002 8.000000e-004 4.925000e-002;
2.079200e-002 1.545700e-003 9.513500e-002;
3.798100e-002 2.800000e-003 1.740900e-001;
6.315700e-002 4.656200e-003 2.901300e-001;
9.994100e-002 7.400000e-003 4.605300e-001;
1.582400e-001 1.177900e-002 7.316600e-001;
2.294800e-001 1.750000e-002 1.065800e+000;
2.810800e-001 2.267800e-002 1.314600e+000;
3.109500e-001 2.730000e-002 1.467200e+000;
3.307200e-001 3.258400e-002 1.579600e+000;
3.333600e-001 3.790000e-002 1.616600e+000;
3.167200e-001 4.239100e-002 1.568200e+000;
2.888200e-001 4.680000e-002 1.471700e+000;
2.596900e-001 5.212200e-002 1.374000e+000;
2.327600e-001 6.000000e-002 1.291700e+000;
2.099900e-001 7.294200e-002 1.235600e+000;
1.747600e-001 9.098000e-002 1.113800e+000;
1.328700e-001 1.128400e-001 9.422000e-001;
9.194400e-002 1.390200e-001 7.559600e-001;
5.698500e-002 1.698700e-001 5.864000e-001;
3.173100e-002 2.080200e-001 4.466900e-001;
1.461300e-002 2.580800e-001 3.411600e-001;
4.849100e-003 3.230000e-001 2.643700e-001;
2.321500e-003 4.054000e-001 2.059400e-001;
9.289900e-003 5.030000e-001 1.544500e-001;
2.927800e-002 6.081100e-001 1.091800e-001;
6.379100e-002 7.100000e-001 7.658500e-002;
1.108100e-001 7.951000e-001 5.622700e-002;
1.669200e-001 8.620000e-001 4.136600e-002;
2.276800e-001 9.150500e-001 2.935300e-002;
2.926900e-001 9.540000e-001 2.004200e-002;
3.622500e-001 9.800400e-001 1.331200e-002;
4.363500e-001 9.949500e-001 8.782300e-003;
5.151300e-001 1.000100e+000 5.857300e-003;
5.974800e-001 9.950000e-001 4.049300e-003;
6.812100e-001 9.787500e-001 2.921700e-003;
7.642500e-001 9.520000e-001 2.277100e-003;
8.439400e-001 9.155800e-001 1.970600e-003;
9.163500e-001 8.700000e-001 1.806600e-003;
9.770300e-001 8.162300e-001 1.544900e-003;
1.023000e+000 7.570000e-001 1.234800e-003;
1.051300e+000 6.948300e-001 1.117700e-003;
1.055000e+000 6.310000e-001 9.056400e-004;
1.036200e+000 5.665400e-001 6.946700e-004;
9.923900e-001 5.030000e-001 4.288500e-004;
9.286100e-001 4.417200e-001 3.181700e-004;
8.434600e-001 3.810000e-001 2.559800e-004;
7.398300e-001 3.205200e-001 1.567900e-004;
6.328900e-001 2.650000e-001 9.769400e-005;
5.335100e-001 2.170200e-001 6.894400e-005;
4.406200e-001 1.750000e-001 5.116500e-005;
3.545300e-001 1.381200e-001 3.601600e-005;
2.786200e-001 1.070000e-001 2.423800e-005;
2.148500e-001 8.165200e-002 1.691500e-005;
1.616100e-001 6.100000e-002 1.190600e-005;
1.182000e-001 4.432700e-002 8.148900e-006;
8.575300e-002 3.200000e-002 5.600600e-006;
6.307700e-002 2.345400e-002 3.954400e-006;
4.583400e-002 1.700000e-002 2.791200e-006;
3.205700e-002 1.187200e-002 1.917600e-006;
2.218700e-002 8.210000e-003 1.313500e-006;
1.561200e-002 5.772300e-003 9.151900e-007;
1.109800e-002 4.102000e-003 6.476700e-007;
7.923300e-003 2.929100e-003 4.635200e-007;
5.653100e-003 2.091000e-003 3.330400e-007;
4.003900e-003 1.482200e-003 2.382300e-007;
2.825300e-003 1.047000e-003 1.702600e-007;
1.994700e-003 7.401500e-004 1.220700e-007;
1.399400e-003 5.200000e-004 8.710700e-008;
9.698000e-004 3.609300e-004 6.145500e-008;
6.684700e-004 2.492000e-004 4.316200e-008;
4.614100e-004 1.723100e-004 3.037900e-008;
3.207300e-004 1.200000e-004 2.155400e-008;
2.257300e-004 8.462000e-005 1.549300e-008;
1.597300e-004 6.000000e-005 1.120400e-008;
1.127500e-004 4.244600e-005 8.087300e-009;
7.951300e-005 3.000000e-005 5.834000e-009;
5.608700e-005 2.121000e-005 4.211000e-009;
3.954100e-005 1.498900e-005 3.038300e-009;
2.785200e-005 1.058400e-005 2.190700e-009;
1.959700e-005 7.465600e-006 1.577800e-009;
1.377000e-005 5.259200e-006 1.134800e-009;
9.670000e-006 3.702800e-006 8.156500e-010;
6.791800e-006 2.607600e-006 5.862600e-010;
4.770600e-006 1.836500e-006 4.213800e-010;
3.355000e-006 1.295000e-006 3.031900e-010;
2.353400e-006 9.109200e-007 2.175300e-010;
1.637700e-006 6.356400e-007 1.547600e-010]
# CIE2006 proposed XYZ CMFs[*]. Yet to be adopted by the CIE.
# The according LMS CMFs got adopted by the CIE.
# The original CIE datasets range from 390 to 830 nm
# The new CIE CMFs are based on a much larger sample of observers,
# and the main difference to the old data is the stronger weighting
# in the short wavelength part of the visual spectrum.
# One goal was to design a dataset where all three CMFs
# integrate to the exact same value. The shortening to the upper limit
# of 780 nm in the version presented here may lead to minor and insignificant
# differences between the three integrals.
# [*] http://cvrl.ioo.ucl.ac.uk/database/text/cienewxyz/cie2012xyz2.htm
# To be tested: differences between shortened and original dataset
# testing of the effect of extrapolation down to 380 nm
function colormatch(::Type{CIE2006_2_CMF}, wavelen::Real)
return interpolate_table(cie2006_2deg_xyz_cmf_table, 380.0, 1.0, wavelen)
end
const cie2006_2deg_xyz_cmftable=
[0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
3.769647e-03 4.146161e-04 1.847260e-02;
4.532416e-03 5.028333e-04 2.221101e-02;
5.446553e-03 6.084991e-04 2.669819e-02;
6.538868e-03 7.344436e-04 3.206937e-02;
7.839699e-03 8.837389e-04 3.847832e-02;
9.382967e-03 1.059646e-03 4.609784e-02;
1.120608e-02 1.265532e-03 5.511953e-02;
1.334965e-02 1.504753e-03 6.575257e-02;
1.585690e-02 1.780493e-03 7.822113e-02;
1.877286e-02 2.095572e-03 9.276013e-02;
2.214302e-02 2.452194e-03 1.096090e-01;
2.601285e-02 2.852216e-03 1.290077e-01;
3.043036e-02 3.299115e-03 1.512047e-01;
3.544325e-02 3.797466e-03 1.764441e-01;
4.109640e-02 4.352768e-03 2.049517e-01;
4.742986e-02 4.971717e-03 2.369246e-01;
5.447394e-02 5.661014e-03 2.725123e-01;
6.223612e-02 6.421615e-03 3.117820e-01;
7.070048e-02 7.250312e-03 3.547064e-01;
7.982513e-02 8.140173e-03 4.011473e-01;
8.953803e-02 9.079860e-03 4.508369e-01;
9.974848e-02 1.005608e-02 5.034164e-01;
1.104019e-01 1.106456e-02 5.586361e-01;
1.214566e-01 1.210522e-02 6.162734e-01;
1.328741e-01 1.318014e-02 6.760982e-01;
1.446214e-01 1.429377e-02 7.378822e-01;
1.566468e-01 1.545004e-02 8.013019e-01;
1.687901e-01 1.664093e-02 8.655573e-01;
1.808328e-01 1.785302e-02 9.295791e-01;
1.925216e-01 1.907018e-02 9.921293e-01;
2.035729e-01 2.027369e-02 1.051821e+00;
2.137531e-01 2.144805e-02 1.107509e+00;
2.231348e-01 2.260041e-02 1.159527e+00;
2.319245e-01 2.374789e-02 1.208869e+00;
2.403892e-01 2.491247e-02 1.256834e+00;
2.488523e-01 2.612106e-02 1.305008e+00;
2.575896e-01 2.739923e-02 1.354758e+00;
2.664991e-01 2.874993e-02 1.405594e+00;
2.753532e-01 3.016909e-02 1.456414e+00;
2.838921e-01 3.165145e-02 1.505960e+00;
2.918246e-01 3.319038e-02 1.552826e+00;
2.989200e-01 3.477912e-02 1.595902e+00;
3.052993e-01 3.641495e-02 1.635768e+00;
3.112031e-01 3.809569e-02 1.673573e+00;
3.169047e-01 3.981843e-02 1.710604e+00;
3.227087e-01 4.157940e-02 1.748280e+00;
3.288194e-01 4.337098e-02 1.787504e+00;
3.349242e-01 4.517180e-02 1.826609e+00;
3.405452e-01 4.695420e-02 1.863108e+00;
3.451688e-01 4.868718e-02 1.894332e+00;
3.482554e-01 5.033657e-02 1.917479e+00;
3.494153e-01 5.187611e-02 1.930529e+00;
3.489075e-01 5.332218e-02 1.934819e+00;
3.471746e-01 5.470603e-02 1.932650e+00;
3.446705e-01 5.606335e-02 1.926395e+00;
3.418483e-01 5.743393e-02 1.918437e+00;
3.390240e-01 5.885107e-02 1.910430e+00;
3.359926e-01 6.030809e-02 1.901224e+00;
3.324276e-01 6.178644e-02 1.889000e+00;
3.280157e-01 6.326570e-02 1.871996e+00;
3.224637e-01 6.472352e-02 1.848545e+00;
3.156225e-01 6.614749e-02 1.817792e+00;
3.078201e-01 6.757256e-02 1.781627e+00;
2.994771e-01 6.904928e-02 1.742514e+00;
2.909776e-01 7.063280e-02 1.702749e+00;
2.826646e-01 7.238339e-02 1.664439e+00;
2.747962e-01 7.435960e-02 1.629207e+00;
2.674312e-01 7.659383e-02 1.597360e+00;
2.605847e-01 7.911436e-02 1.568896e+00;
2.542749e-01 8.195345e-02 1.543823e+00;
2.485254e-01 8.514816e-02 1.522157e+00;
2.433039e-01 8.872657e-02 1.503611e+00;
2.383414e-01 9.266008e-02 1.486673e+00;
2.333253e-01 9.689723e-02 1.469595e+00;
2.279619e-01 1.013746e-01 1.450709e+00;
2.219781e-01 1.060145e-01 1.428440e+00;
2.151735e-01 1.107377e-01 1.401587e+00;
2.075619e-01 1.155111e-01 1.370094e+00;
1.992183e-01 1.203122e-01 1.334220e+00;
1.902290e-01 1.251161e-01 1.294275e+00;
1.806905e-01 1.298957e-01 1.250610e+00;
1.707154e-01 1.346299e-01 1.203696e+00;
1.604471e-01 1.393309e-01 1.154316e+00;
1.500244e-01 1.440235e-01 1.103284e+00;
1.395705e-01 1.487372e-01 1.051347e+00;
1.291920e-01 1.535066e-01 9.991789e-01;
1.189859e-01 1.583644e-01 9.473958e-01;
1.090615e-01 1.633199e-01 8.966222e-01;
9.951424e-02 1.683761e-01 8.473981e-01;
9.041850e-02 1.735365e-01 8.001576e-01;
8.182895e-02 1.788048e-01 7.552379e-01;
7.376817e-02 1.841819e-01 7.127879e-01;
6.619477e-02 1.896559e-01 6.725198e-01;
5.906380e-02 1.952101e-01 6.340976e-01;
5.234242e-02 2.008259e-01 5.972433e-01;
4.600865e-02 2.064828e-01 5.617313e-01;
4.006154e-02 2.121826e-01 5.274921e-01;
3.454373e-02 2.180279e-01 4.948809e-01;
2.949091e-02 2.241586e-01 4.642586e-01;
2.492140e-02 2.307302e-01 4.358841e-01;
2.083981e-02 2.379160e-01 4.099313e-01;
1.723591e-02 2.458706e-01 3.864261e-01;
1.407924e-02 2.546023e-01 3.650566e-01;
1.134516e-02 2.640760e-01 3.454812e-01;
9.019658e-03 2.742490e-01 3.274095e-01;
7.097731e-03 2.850680e-01 3.105939e-01;
5.571145e-03 2.964837e-01 2.948102e-01;
4.394566e-03 3.085010e-01 2.798194e-01;
3.516303e-03 3.211393e-01 2.654100e-01;
2.887638e-03 3.344175e-01 2.514084e-01;
2.461588e-03 3.483536e-01 2.376753e-01;
2.206348e-03 3.629601e-01 2.241211e-01;
2.149559e-03 3.782275e-01 2.107484e-01;
2.337091e-03 3.941359e-01 1.975839e-01;
2.818931e-03 4.106582e-01 1.846574e-01;
3.649178e-03 4.277595e-01 1.720018e-01;
4.891359e-03 4.453993e-01 1.596918e-01;
6.629364e-03 4.635396e-01 1.479415e-01;
8.942902e-03 4.821376e-01 1.369428e-01;
1.190224e-02 5.011430e-01 1.268279e-01;
1.556989e-02 5.204972e-01 1.176796e-01;
1.997668e-02 5.401387e-01 1.094970e-01;
2.504698e-02 5.600208e-01 1.020943e-01;
3.067530e-02 5.800972e-01 9.527993e-02;
3.674999e-02 6.003172e-01 8.890075e-02;
4.315171e-02 6.206256e-01 8.283548e-02;
4.978584e-02 6.409398e-01 7.700982e-02;
5.668554e-02 6.610772e-01 7.144001e-02;
6.391651e-02 6.808134e-01 6.615436e-02;
7.154352e-02 6.999044e-01 6.117199e-02;
7.962917e-02 7.180890e-01 5.650407e-02;
8.821473e-02 7.351593e-01 5.215121e-02;
9.726978e-02 7.511821e-01 4.809566e-02;
1.067504e-01 7.663143e-01 4.431720e-02;
1.166192e-01 7.807352e-01 4.079734e-02;
1.268468e-01 7.946448e-01 3.751912e-02;
1.374060e-01 8.082074e-01 3.446846e-02;
1.482471e-01 8.213817e-01 3.163764e-02;
1.593076e-01 8.340701e-01 2.901901e-02;
1.705181e-01 8.461711e-01 2.660364e-02;
1.818026e-01 8.575799e-01 2.438164e-02;
1.931090e-01 8.682408e-01 2.234097e-02;
2.045085e-01 8.783061e-01 2.046415e-02;
2.161166e-01 8.879907e-01 1.873456e-02;
2.280650e-01 8.975211e-01 1.713788e-02;
2.405015e-01 9.071347e-01 1.566174e-02;
2.535441e-01 9.169947e-01 1.429644e-02;
2.671300e-01 9.269295e-01 1.303702e-02;
2.811351e-01 9.366731e-01 1.187897e-02;
2.954164e-01 9.459482e-01 1.081725e-02;
3.098117e-01 9.544675e-01 9.846470e-03;
3.241678e-01 9.619834e-01 8.960687e-03;
3.384319e-01 9.684390e-01 8.152811e-03;
3.525786e-01 9.738289e-01 7.416025e-03;
3.665839e-01 9.781519e-01 6.744115e-03;
3.804244e-01 9.814106e-01 6.131421e-03;
3.940988e-01 9.836669e-01 5.572778e-03;
4.076972e-01 9.852081e-01 5.063463e-03;
4.213484e-01 9.863813e-01 4.599169e-03;
4.352003e-01 9.875357e-01 4.175971e-03;
4.494206e-01 9.890228e-01 3.790291e-03;
4.641616e-01 9.910811e-01 3.438952e-03;
4.794395e-01 9.934913e-01 3.119341e-03;
4.952180e-01 9.959172e-01 2.829038e-03;
5.114395e-01 9.980205e-01 2.565722e-03;
5.280233e-01 9.994608e-01 2.327186e-03;
5.448696e-01 9.999930e-01 2.111280e-03;
5.618898e-01 9.997557e-01 1.915766e-03;
5.790137e-01 9.989839e-01 1.738589e-03;
5.961882e-01 9.979123e-01 1.577920e-03;
6.133784e-01 9.967737e-01 1.432128e-03;
6.305897e-01 9.957356e-01 1.299781e-03;
6.479223e-01 9.947115e-01 1.179667e-03;
6.654866e-01 9.935534e-01 1.070694e-03;
6.833782e-01 9.921156e-01 9.718623e-04;
7.016774e-01 9.902549e-01 8.822531e-04;
7.204110e-01 9.878596e-01 8.010231e-04;
7.394495e-01 9.849324e-01 7.273884e-04;
7.586285e-01 9.815036e-01 6.606347e-04;
7.777885e-01 9.776035e-01 6.001146e-04;
7.967750e-01 9.732611e-01 5.452416e-04;
8.154530e-01 9.684764e-01 4.954847e-04;
8.337389e-01 9.631369e-01 4.503642e-04;
8.515493e-01 9.571062e-01 4.094455e-04;
8.687862e-01 9.502540e-01 3.723345e-04;
8.853376e-01 9.424569e-01 3.386739e-04;
9.011588e-01 9.336897e-01 3.081396e-04;
9.165278e-01 9.242893e-01 2.804370e-04;
9.318245e-01 9.146707e-01 2.552996e-04;
9.474524e-01 9.052333e-01 2.324859e-04;
9.638388e-01 8.963613e-01 2.117772e-04;
9.812596e-01 8.883069e-01 1.929758e-04;
9.992953e-01 8.808462e-01 1.759024e-04;
1.017343e+00 8.736445e-01 1.603947e-04;
1.034790e+00 8.663755e-01 1.463059e-04;
1.051011e+00 8.587203e-01 1.335031e-04;
1.065522e+00 8.504295e-01 1.218660e-04;
1.078421e+00 8.415047e-01 1.112857e-04;
1.089944e+00 8.320109e-01 1.016634e-04;
1.100320e+00 8.220154e-01 9.291003e-05;
1.109767e+00 8.115868e-01 8.494468e-05;
1.118438e+00 8.007874e-01 7.769425e-05;
1.126266e+00 7.896515e-01 7.109247e-05;
1.133138e+00 7.782053e-01 6.507936e-05;
1.138952e+00 7.664733e-01 5.960061e-05;
1.143620e+00 7.544785e-01 5.460706e-05;
1.147095e+00 7.422473e-01 5.005417e-05;
1.149464e+00 7.298229e-01 4.590157e-05;
1.150838e+00 7.172525e-01 4.211268e-05;
1.151326e+00 7.045818e-01 3.865437e-05;
1.151033e+00 6.918553e-01 3.549661e-05;
1.150002e+00 6.791009e-01 3.261220e-05;
1.148061e+00 6.662846e-01 2.997643e-05;
1.144998e+00 6.533595e-01 2.756693e-05;
1.140622e+00 6.402807e-01 2.536339e-05;
1.134757e+00 6.270066e-01 2.334738e-05;
1.127298e+00 6.135148e-01 2.150221e-05;
1.118342e+00 5.998494e-01 1.981268e-05;
1.108033e+00 5.860682e-01 1.826500e-05;
1.096515e+00 5.722261e-01 1.684667e-05;
1.083928e+00 5.583746e-01 1.554631e-05;
1.070387e+00 5.445535e-01 1.435360e-05;
1.055934e+00 5.307673e-01 1.325915e-05;
1.040592e+00 5.170130e-01 1.225443e-05;
1.024385e+00 5.032889e-01 1.133169e-05;
1.007344e+00 4.895950e-01 1.048387e-05;
9.895268e-01 4.759442e-01 0.000000e+00;
9.711213e-01 4.623958e-01 0.000000e+00;
9.523257e-01 4.490154e-01 0.000000e+00;
9.333248e-01 4.358622e-01 0.000000e+00;
9.142877e-01 4.229897e-01 0.000000e+00;
8.952798e-01 4.104152e-01 0.000000e+00;
8.760157e-01 3.980356e-01 0.000000e+00;
8.561607e-01 3.857300e-01 0.000000e+00;
8.354235e-01 3.733907e-01 0.000000e+00;
8.135565e-01 3.609245e-01 0.000000e+00;
7.904565e-01 3.482860e-01 0.000000e+00;
7.664364e-01 3.355702e-01 0.000000e+00;
7.418777e-01 3.228963e-01 0.000000e+00;
7.171219e-01 3.103704e-01 0.000000e+00;
6.924717e-01 2.980865e-01 0.000000e+00;
6.681600e-01 2.861160e-01 0.000000e+00;
6.442697e-01 2.744822e-01 0.000000e+00;
6.208450e-01 2.631953e-01 0.000000e+00;
5.979243e-01 2.522628e-01 0.000000e+00;
5.755410e-01 2.416902e-01 0.000000e+00;
5.537296e-01 2.314809e-01 0.000000e+00;
5.325412e-01 2.216378e-01 0.000000e+00;
5.120218e-01 2.121622e-01 0.000000e+00;
4.922070e-01 2.030542e-01 0.000000e+00;
4.731224e-01 1.943124e-01 0.000000e+00;
4.547417e-01 1.859227e-01 0.000000e+00;
4.368719e-01 1.778274e-01 0.000000e+00;
4.193121e-01 1.699654e-01 0.000000e+00;
4.018980e-01 1.622841e-01 0.000000e+00;
3.844986e-01 1.547397e-01 0.000000e+00;
3.670592e-01 1.473081e-01 0.000000e+00;
3.497167e-01 1.400169e-01 0.000000e+00;
3.326305e-01 1.329013e-01 0.000000e+00;
3.159341e-01 1.259913e-01 0.000000e+00;
2.997374e-01 1.193120e-01 0.000000e+00;
2.841189e-01 1.128820e-01 0.000000e+00;
2.691053e-01 1.067113e-01 0.000000e+00;
2.547077e-01 1.008052e-01 0.000000e+00;
2.409319e-01 9.516653e-02 0.000000e+00;
2.277792e-01 8.979594e-02 0.000000e+00;
2.152431e-01 8.469044e-02 0.000000e+00;
2.033010e-01 7.984009e-02 0.000000e+00;
1.919276e-01 7.523372e-02 0.000000e+00;
1.810987e-01 7.086061e-02 0.000000e+00;
1.707914e-01 6.671045e-02 0.000000e+00;
1.609842e-01 6.277360e-02 0.000000e+00;
1.516577e-01 5.904179e-02 0.000000e+00;
1.427936e-01 5.550703e-02 0.000000e+00;
1.343737e-01 5.216139e-02 0.000000e+00;
1.263808e-01 4.899699e-02 0.000000e+00;
1.187979e-01 4.600578e-02 0.000000e+00;
1.116088e-01 4.317885e-02 0.000000e+00;
1.047975e-01 4.050755e-02 0.000000e+00;
9.834835e-02 3.798376e-02 0.000000e+00;
9.224597e-02 3.559982e-02 0.000000e+00;
8.647506e-02 3.334856e-02 0.000000e+00;
8.101986e-02 3.122332e-02 0.000000e+00;
7.586514e-02 2.921780e-02 0.000000e+00;
7.099633e-02 2.732601e-02 0.000000e+00;
6.639960e-02 2.554223e-02 0.000000e+00;
6.206225e-02 2.386121e-02 0.000000e+00;
5.797409e-02 2.227859e-02 0.000000e+00;
5.412533e-02 2.079020e-02 0.000000e+00;
5.050600e-02 1.939185e-02 0.000000e+00;
4.710606e-02 1.807939e-02 0.000000e+00;
4.391411e-02 1.684817e-02 0.000000e+00;
4.091411e-02 1.569188e-02 0.000000e+00;
3.809067e-02 1.460446e-02 0.000000e+00;
3.543034e-02 1.358062e-02 0.000000e+00;
3.292138e-02 1.261573e-02 0.000000e+00;
3.055672e-02 1.170696e-02 0.000000e+00;
2.834146e-02 1.085608e-02 0.000000e+00;
2.628033e-02 1.006476e-02 0.000000e+00;
2.437465e-02 9.333376e-03 0.000000e+00;
2.262306e-02 8.661284e-03 0.000000e+00;
2.101935e-02 8.046048e-03 0.000000e+00;
1.954647e-02 7.481130e-03 0.000000e+00;
1.818727e-02 6.959987e-03 0.000000e+00;
1.692727e-02 6.477070e-03 0.000000e+00;
1.575417e-02 6.027677e-03 0.000000e+00;
1.465854e-02 5.608169e-03 0.000000e+00;
1.363571e-02 5.216691e-03 0.000000e+00;
1.268205e-02 4.851785e-03 0.000000e+00;
1.179394e-02 4.512008e-03 0.000000e+00;
1.096778e-02 4.195941e-03 0.000000e+00;
1.019964e-02 3.902057e-03 0.000000e+00;
9.484317e-03 3.628371e-03 0.000000e+00;
8.816851e-03 3.373005e-03 0.000000e+00;
8.192921e-03 3.134315e-03 0.000000e+00;
7.608750e-03 2.910864e-03 0.000000e+00;
7.061391e-03 2.701528e-03 0.000000e+00;
6.549509e-03 2.505796e-03 0.000000e+00;
6.071970e-03 2.323231e-03 0.000000e+00;
5.627476e-03 2.153333e-03 0.000000e+00;
5.214608e-03 1.995557e-03 0.000000e+00;
4.831848e-03 1.849316e-03 0.000000e+00;
4.477579e-03 1.713976e-03 0.000000e+00;
4.150166e-03 1.588899e-03 0.000000e+00;
3.847988e-03 1.473453e-03 0.000000e+00;
3.569452e-03 1.367022e-03 0.000000e+00;
3.312857e-03 1.268954e-03 0.000000e+00;
3.076022e-03 1.178421e-03 0.000000e+00;
2.856894e-03 1.094644e-03 0.000000e+00;
2.653681e-03 1.016943e-03 0.000000e+00;
2.464821e-03 9.447269e-04 0.000000e+00;
2.289060e-03 8.775171e-04 0.000000e+00;
2.125694e-03 8.150438e-04 0.000000e+00;
1.974121e-03 7.570755e-04 0.000000e+00;
1.833723e-03 7.033755e-04 0.000000e+00;
1.703876e-03 6.537050e-04 0.000000e+00;
1.583904e-03 6.078048e-04 0.000000e+00;
1.472939e-03 5.653435e-04 0.000000e+00;
1.370151e-03 5.260046e-04 0.000000e+00;
1.274803e-03 4.895061e-04 0.000000e+00;
1.186238e-03 4.555970e-04 0.000000e+00;
1.103871e-03 4.240548e-04 0.000000e+00;
1.027194e-03 3.946860e-04 0.000000e+00;
9.557493e-04 3.673178e-04 0.000000e+00;
8.891262e-04 3.417941e-04 0.000000e+00;
8.269535e-04 3.179738e-04 0.000000e+00;
7.689351e-04 2.957441e-04 0.000000e+00;
7.149425e-04 2.750558e-04 0.000000e+00;
6.648590e-04 2.558640e-04 0.000000e+00;
6.185421e-04 2.381142e-04 0.000000e+00;
5.758303e-04 2.217445e-04 0.000000e+00;
5.365046e-04 2.066711e-04 0.000000e+00;
5.001842e-04 1.927474e-04 0.000000e+00;
4.665005e-04 1.798315e-04 0.000000e+00;
4.351386e-04 1.678023e-04 0.000000e+00;
4.058303e-04 1.565566e-04 0.000000e+00;
3.783733e-04 1.460168e-04 0.000000e+00;
3.526892e-04 1.361535e-04 0.000000e+00;
3.287199e-04 1.269451e-04 0.000000e+00;
3.063998e-04 1.183671e-04 0.000000e+00;
2.856577e-04 1.103928e-04 0.000000e+00;
2.664108e-04 1.029908e-04 0.000000e+00;
2.485462e-04 9.611836e-05 0.000000e+00;
2.319529e-04 8.973323e-05 0.000000e+00;
2.165300e-04 8.379694e-05 0.000000e+00;
2.021853e-04 7.827442e-05 0.000000e+00;
1.888338e-04 7.313312e-05 0.000000e+00;
1.763935e-04 6.834142e-05 0.000000e+00;
1.647895e-04 6.387035e-05 0.000000e+00;
1.539542e-04 5.969389e-05 0.000000e+00;
1.438270e-04 5.578862e-05 0.000000e+00;
1.343572e-04 5.213509e-05 0.000000e+00;
1.255141e-04 4.872179e-05 0.000000e+00;
1.172706e-04 4.553845e-05 0.000000e+00;
1.095983e-04 4.257443e-05 0.000000e+00;
1.024685e-04 3.981884e-05 0.000000e+00;
9.584715e-05 3.725877e-05 0.000000e+00;
8.968316e-05 3.487467e-05 0.000000e+00;
8.392734e-05 3.264765e-05 0.000000e+00;
7.853708e-05 3.056140e-05 0.000000e+00;
7.347551e-05 2.860175e-05 0.000000e+00;
6.871576e-05 2.675841e-05 0.000000e+00;
6.425257e-05 2.502943e-05 0.000000e+00;
6.008292e-05 2.341373e-05 0.000000e+00;
5.620098e-05 2.190914e-05 0.000000e+00;
5.259870e-05 2.051259e-05 0.000000e+00;
4.926279e-05 1.921902e-05 0.000000e+00;
4.616623e-05 1.801796e-05 0.000000e+00;
4.328212e-05 1.689899e-05 0.000000e+00;
4.058715e-05 1.585309e-05 0.000000e+00;
3.806114e-05 1.487243e-05 0.000000e+00]
# CIE 2006 10° observer XYZ CMFs. For further information
# see comment section for CIE 2006 2° observer XYZ CMFs
# Transformed from the CIE (2006) 2° LMS cone fundamentals[*]
# [*] http://cvrl.ioo.ucl.ac.uk/database/text/cienewxyz/cie2012xyz10.htm
function colormatch(::Type{CIE2006_10_CMF}, wavelen::Real)
return interpolate_table(cie2006_10deg_xyz_cmf_table, 380.0, 1.0, wavelen)
end
const cie2006_10deg_xyz_cmf_table=
[0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
0.000000e+00 0.000000e+00 0.000000e+00;
2.952420e-03 4.076779e-04 1.318752e-02;
3.577275e-03 4.977769e-04 1.597879e-02;
4.332146e-03 6.064754e-04 1.935758e-02;
5.241609e-03 7.370040e-04 2.343758e-02;
6.333902e-03 8.929388e-04 2.835021e-02;
7.641137e-03 1.078166e-03 3.424588e-02;
9.199401e-03 1.296816e-03 4.129467e-02;
1.104869e-02 1.553159e-03 4.968641e-02;
1.323262e-02 1.851463e-03 5.962964e-02;
1.579791e-02 2.195795e-03 7.134926e-02;
1.879338e-02 2.589775e-03 8.508254e-02;
2.226949e-02 3.036799e-03 1.010753e-01;
2.627978e-02 3.541926e-03 1.195838e-01;
3.087862e-02 4.111422e-03 1.408647e-01;
3.611890e-02 4.752618e-03 1.651644e-01;
4.204986e-02 5.474207e-03 1.927065e-01;
4.871256e-02 6.285034e-03 2.236782e-01;
5.612868e-02 7.188068e-03 2.582109e-01;
6.429866e-02 8.181786e-03 2.963632e-01;
7.319818e-02 9.260417e-03 3.381018e-01;
8.277331e-02 1.041303e-02 3.832822e-01;
9.295327e-02 1.162642e-02 4.316884e-01;
1.037137e-01 1.289884e-02 4.832440e-01;
1.150520e-01 1.423442e-02 5.379345e-01;
1.269771e-01 1.564080e-02 5.957740e-01;
1.395127e-01 1.712968e-02 6.568187e-01;
1.526661e-01 1.871265e-02 7.210459e-01;
1.663054e-01 2.038394e-02 7.878635e-01;
1.802197e-01 2.212935e-02 8.563391e-01;
1.941448e-01 2.392985e-02 9.253017e-01;
2.077647e-01 2.576133e-02 9.933444e-01;
2.207911e-01 2.760156e-02 1.059178e+00;
2.332355e-01 2.945513e-02 1.122832e+00;
2.452462e-01 3.133884e-02 1.184947e+00;
2.570397e-01 3.327575e-02 1.246476e+00;
2.688989e-01 3.529554e-02 1.308674e+00;
2.810677e-01 3.742705e-02 1.372628e+00;
2.933967e-01 3.967137e-02 1.437661e+00;
3.055933e-01 4.201998e-02 1.502449e+00;
3.173165e-01 4.446166e-02 1.565456e+00;
3.281798e-01 4.698226e-02 1.624940e+00;
3.378678e-01 4.956742e-02 1.679488e+00;
3.465097e-01 5.221219e-02 1.729668e+00;
3.543953e-01 5.491387e-02 1.776755e+00;
3.618655e-01 5.766919e-02 1.822228e+00;
3.693084e-01 6.047429e-02 1.867751e+00;
3.770107e-01 6.332195e-02 1.914504e+00;
3.846850e-01 6.619271e-02 1.961055e+00;
3.918591e-01 6.906185e-02 2.005136e+00;
3.980192e-01 7.190190e-02 2.044296e+00;
4.026189e-01 7.468288e-02 2.075946e+00;
4.052637e-01 7.738452e-02 2.098231e+00;
4.062482e-01 8.003601e-02 2.112591e+00;
4.060660e-01 8.268524e-02 2.121427e+00;
4.052283e-01 8.538745e-02 2.127239e+00;
4.042529e-01 8.820537e-02 2.132574e+00;
4.034808e-01 9.118925e-02 2.139093e+00;
4.025362e-01 9.431041e-02 2.144815e+00;
4.008675e-01 9.751346e-02 2.146832e+00;
3.979327e-01 1.007349e-01 2.142250e+00;
3.932139e-01 1.039030e-01 2.128264e+00;
3.864108e-01 1.069639e-01 2.103205e+00;
3.779513e-01 1.099676e-01 2.069388e+00;
3.684176e-01 1.129992e-01 2.030030e+00;
3.583473e-01 1.161541e-01 1.988178e+00;
3.482214e-01 1.195389e-01 1.946651e+00;
3.383830e-01 1.232503e-01 1.907521e+00;
3.288309e-01 1.273047e-01 1.870689e+00;
3.194977e-01 1.316964e-01 1.835578e+00;
3.103345e-01 1.364178e-01 1.801657e+00;
3.013112e-01 1.414586e-01 1.768440e+00;
2.923754e-01 1.468003e-01 1.735338e+00;
2.833273e-01 1.524002e-01 1.701254e+00;
2.739463e-01 1.582021e-01 1.665053e+00;
2.640352e-01 1.641400e-01 1.625712e+00;
2.534221e-01 1.701373e-01 1.582342e+00;
2.420135e-01 1.761233e-01 1.534439e+00;
2.299346e-01 1.820896e-01 1.482544e+00;
2.173617e-01 1.880463e-01 1.427438e+00;
2.044672e-01 1.940065e-01 1.369876e+00;
1.914176e-01 1.999859e-01 1.310576e+00;
1.783672e-01 2.060054e-01 1.250226e+00;
1.654407e-01 2.120981e-01 1.189511e+00;
1.527391e-01 2.183041e-01 1.129050e+00;
1.403439e-01 2.246686e-01 1.069379e+00;
1.283167e-01 2.312426e-01 1.010952e+00;
1.167124e-01 2.380741e-01 9.541809e-01;
1.056121e-01 2.451798e-01 8.995253e-01;
9.508569e-02 2.525682e-01 8.473720e-01;
8.518206e-02 2.602479e-01 7.980093e-01;
7.593120e-02 2.682271e-01 7.516389e-01;
6.733159e-02 2.765005e-01 7.082645e-01;
5.932018e-02 2.850035e-01 6.673867e-01;
5.184106e-02 2.936475e-01 6.284798e-01;
4.486119e-02 3.023319e-01 5.911174e-01;
3.836770e-02 3.109438e-01 5.549619e-01;
3.237296e-02 3.194105e-01 5.198843e-01;
2.692095e-02 3.278683e-01 4.862772e-01;
2.204070e-02 3.365263e-01 4.545497e-01;
1.773951e-02 3.456176e-01 4.249955e-01;
1.400745e-02 3.554018e-01 3.978114e-01;
1.082291e-02 3.660893e-01 3.730218e-01;
8.168996e-03 3.775857e-01 3.502618e-01;
6.044623e-03 3.896960e-01 3.291407e-01;
4.462638e-03 4.021947e-01 3.093356e-01;
3.446810e-03 4.148227e-01 2.905816e-01;
3.009513e-03 4.273539e-01 2.726773e-01;
3.090744e-03 4.398206e-01 2.555143e-01;
3.611221e-03 4.523360e-01 2.390188e-01;
4.491435e-03 4.650298e-01 2.231335e-01;
5.652072e-03 4.780482e-01 2.078158e-01;
7.035322e-03 4.915173e-01 1.930407e-01;
8.669631e-03 5.054224e-01 1.788089e-01;
1.060755e-02 5.197057e-01 1.651287e-01;
1.290468e-02 5.343012e-01 1.520103e-01;
1.561956e-02 5.491344e-01 1.394643e-01;
1.881640e-02 5.641302e-01 1.275353e-01;
2.256923e-02 5.792416e-01 1.163771e-01;
2.694456e-02 5.944264e-01 1.061161e-01;
3.199910e-02 6.096388e-01 9.682266e-02;
3.778185e-02 6.248296e-01 8.852389e-02;
4.430635e-02 6.399656e-01 8.118263e-02;
5.146516e-02 6.550943e-01 7.463132e-02;
5.912224e-02 6.702903e-01 6.870644e-02;
6.714220e-02 6.856375e-01 6.327834e-02;
7.538941e-02 7.012292e-01 5.824484e-02;
8.376697e-02 7.171103e-01 5.353812e-02;
9.233581e-02 7.330917e-01 4.914863e-02;
1.011940e-01 7.489041e-01 4.507511e-02;
1.104362e-01 7.642530e-01 4.131175e-02;
1.201511e-01 7.788199e-01 3.784916e-02;
1.303960e-01 7.923410e-01 3.467234e-02;
1.411310e-01 8.048510e-01 3.175471e-02;
1.522944e-01 8.164747e-01 2.907029e-02;
1.638288e-01 8.273520e-01 2.659651e-02;
1.756832e-01 8.376358e-01 2.431375e-02;
1.878114e-01 8.474653e-01 2.220677e-02;
2.001621e-01 8.568868e-01 2.026852e-02;
2.126822e-01 8.659242e-01 1.849246e-02;
2.253199e-01 8.746041e-01 1.687084e-02;
2.380254e-01 8.829552e-01 1.539505e-02;
2.507787e-01 8.910274e-01 1.405450e-02;
2.636778e-01 8.989495e-01 1.283354e-02;
2.768607e-01 9.068753e-01 1.171754e-02;
2.904792e-01 9.149652e-01 1.069415e-02;
3.046991e-01 9.233858e-01 9.753000e-03;
3.196485e-01 9.322325e-01 8.886096e-03;
3.352447e-01 9.412862e-01 8.089323e-03;
3.513290e-01 9.502378e-01 7.359131e-03;
3.677148e-01 9.587647e-01 6.691736e-03;
3.841856e-01 9.665325e-01 6.083223e-03;
4.005312e-01 9.732504e-01 5.529423e-03;
4.166669e-01 9.788415e-01 5.025504e-03;
4.325420e-01 9.832867e-01 4.566879e-03;
4.481063e-01 9.865720e-01 4.149405e-03;
4.633109e-01 9.886887e-01 3.769336e-03;
4.781440e-01 9.897056e-01 3.423302e-03;
4.927483e-01 9.899849e-01 3.108313e-03;
5.073315e-01 9.899624e-01 2.821650e-03;
5.221315e-01 9.900731e-01 2.560830e-03;
5.374170e-01 9.907500e-01 2.323578e-03;
5.534217e-01 9.922826e-01 2.107847e-03;
5.701242e-01 9.943837e-01 1.911867e-03;
5.874093e-01 9.966221e-01 1.734006e-03;
6.051269e-01 9.985649e-01 1.572736e-03;
6.230892e-01 9.997775e-01 1.426627e-03;
6.410999e-01 9.999440e-01 1.294325e-03;
6.590659e-01 9.992200e-01 1.174475e-03;
6.769436e-01 9.978793e-01 1.065842e-03;
6.947143e-01 9.961934e-01 9.673215e-04;
7.123849e-01 9.944304e-01 8.779264e-04;
7.299978e-01 9.927831e-01 7.967847e-04;
7.476478e-01 9.911578e-01 7.231502e-04;
7.654250e-01 9.893925e-01 6.563501e-04;
7.834009e-01 9.873288e-01 5.957678e-04;
8.016277e-01 9.848127e-01 5.408385e-04;
8.201041e-01 9.817253e-01 4.910441e-04;
8.386843e-01 9.780714e-01 4.459046e-04;
8.571936e-01 9.738860e-01 4.049826e-04;
8.754652e-01 9.692028e-01 3.678818e-04;
8.933408e-01 9.640545e-01 3.342429e-04;
9.106772e-01 9.584409e-01 3.037407e-04;
9.273554e-01 9.522379e-01 2.760809e-04;
9.432502e-01 9.452968e-01 2.509970e-04;
9.582244e-01 9.374773e-01 2.282474e-04;
9.721304e-01 9.286495e-01 2.076129e-04;
9.849237e-01 9.187953e-01 1.888948e-04;
9.970067e-01 9.083014e-01 1.719127e-04;
1.008907e+00 8.976352e-01 1.565030e-04;
1.021163e+00 8.872401e-01 1.425177e-04;
1.034327e+00 8.775360e-01 1.298230e-04;
1.048753e+00 8.687920e-01 1.182974e-04;
1.063937e+00 8.607474e-01 1.078310e-04;
1.079166e+00 8.530233e-01 9.832455e-05;
1.093723e+00 8.452535e-01 8.968787e-05;
1.106886e+00 8.370838e-01 8.183954e-05;
1.118106e+00 8.282409e-01 7.470582e-05;
1.127493e+00 8.187320e-01 6.821991e-05;
1.135317e+00 8.086352e-01 6.232132e-05;
1.141838e+00 7.980296e-01 5.695534e-05;
1.147304e+00 7.869950e-01 5.207245e-05;
1.151897e+00 7.756040e-01 4.762781e-05;
1.155582e+00 7.638996e-01 4.358082e-05;
1.158284e+00 7.519157e-01 3.989468e-05;
1.159934e+00 7.396832e-01 3.653612e-05;
1.160477e+00 7.272309e-01 3.347499e-05;
1.159890e+00 7.145878e-01 3.068400e-05;
1.158259e+00 7.017926e-01 2.813839e-05;
1.155692e+00 6.888866e-01 2.581574e-05;
1.152293e+00 6.759103e-01 2.369574e-05;
1.148163e+00 6.629035e-01 2.175998e-05;
1.143345e+00 6.498911e-01 1.999179e-05;
1.137685e+00 6.368410e-01 1.837603e-05;
1.130993e+00 6.237092e-01 1.689896e-05;
1.123097e+00 6.104541e-01 1.554815e-05;
1.113846e+00 5.970375e-01 1.431231e-05;
1.103152e+00 5.834395e-01 1.318119e-05;
1.091121e+00 5.697044e-01 1.214548e-05;
1.077902e+00 5.558892e-01 1.119673e-05;
1.063644e+00 5.420475e-01 1.032727e-05;
1.048485e+00 5.282296e-01 9.530130e-06;
1.032546e+00 5.144746e-01 8.798979e-06;
1.015870e+00 5.007881e-01 8.128065e-06;
9.984859e-01 4.871687e-01 7.512160e-06;
9.804227e-01 4.736160e-01 6.946506e-06;
9.617111e-01 4.601308e-01 6.426776e-06;
9.424119e-01 4.467260e-01 0.000000e+00;
9.227049e-01 4.334589e-01 0.000000e+00;
9.027804e-01 4.203919e-01 0.000000e+00;
8.828123e-01 4.075810e-01 0.000000e+00;
8.629581e-01 3.950755e-01 0.000000e+00;
8.432731e-01 3.828894e-01 0.000000e+00;
8.234742e-01 3.709190e-01 0.000000e+00;
8.032342e-01 3.590447e-01 0.000000e+00;
7.822715e-01 3.471615e-01 0.000000e+00;
7.603498e-01 3.351794e-01 0.000000e+00;
7.373739e-01 3.230562e-01 0.000000e+00;
7.136470e-01 3.108859e-01 0.000000e+00;
6.895336e-01 2.987840e-01 0.000000e+00;
6.653567e-01 2.868527e-01 0.000000e+00;
6.413984e-01 2.751807e-01 0.000000e+00;
6.178723e-01 2.638343e-01 0.000000e+00;
5.948484e-01 2.528330e-01 0.000000e+00;
5.723600e-01 2.421835e-01 0.000000e+00;
5.504353e-01 2.318904e-01 0.000000e+00;
5.290979e-01 2.219564e-01 0.000000e+00;
5.083728e-01 2.123826e-01 0.000000e+00;
4.883006e-01 2.031698e-01 0.000000e+00;
4.689171e-01 1.943179e-01 0.000000e+00;
4.502486e-01 1.858250e-01 0.000000e+00;
4.323126e-01 1.776882e-01 0.000000e+00;
4.150790e-01 1.698926e-01 0.000000e+00;
3.983657e-01 1.623822e-01 0.000000e+00;
3.819846e-01 1.550986e-01 0.000000e+00;
3.657821e-01 1.479918e-01 0.000000e+00;
3.496358e-01 1.410203e-01 0.000000e+00;
3.334937e-01 1.341614e-01 0.000000e+00;
3.174776e-01 1.274401e-01 0.000000e+00;
3.017298e-01 1.208887e-01 0.000000e+00;
2.863684e-01 1.145345e-01 0.000000e+00;
2.714900e-01 1.083996e-01 0.000000e+00;
2.571632e-01 1.025007e-01 0.000000e+00;
2.434102e-01 9.684588e-02 0.000000e+00;
2.302389e-01 9.143944e-02 0.000000e+00;
2.176527e-01 8.628318e-02 0.000000e+00;
2.056507e-01 8.137687e-02 0.000000e+00;
1.942251e-01 7.671708e-02 0.000000e+00;
1.833530e-01 7.229404e-02 0.000000e+00;
1.730097e-01 6.809696e-02 0.000000e+00;
1.631716e-01 6.411549e-02 0.000000e+00;
1.538163e-01 6.033976e-02 0.000000e+00;
1.449230e-01 5.676054e-02 0.000000e+00;
1.364729e-01 5.336992e-02 0.000000e+00;
1.284483e-01 5.016027e-02 0.000000e+00;
1.208320e-01 4.712405e-02 0.000000e+00;
1.136072e-01 4.425383e-02 0.000000e+00;
1.067579e-01 4.154205e-02 0.000000e+00;
1.002685e-01 3.898042e-02 0.000000e+00;
9.412394e-02 3.656091e-02 0.000000e+00;
8.830929e-02 3.427597e-02 0.000000e+00;
8.281010e-02 3.211852e-02 0.000000e+00;
7.761208e-02 3.008192e-02 0.000000e+00;
7.270064e-02 2.816001e-02 0.000000e+00;
6.806167e-02 2.634698e-02 0.000000e+00;
6.368176e-02 2.463731e-02 0.000000e+00;
5.954815e-02 2.302574e-02 0.000000e+00;
5.564917e-02 2.150743e-02 0.000000e+00;
5.197543e-02 2.007838e-02 0.000000e+00;
4.851788e-02 1.873474e-02 0.000000e+00;
4.526737e-02 1.747269e-02 0.000000e+00;
4.221473e-02 1.628841e-02 0.000000e+00;
3.934954e-02 1.517767e-02 0.000000e+00;
3.665730e-02 1.413473e-02 0.000000e+00;
3.412407e-02 1.315408e-02 0.000000e+00;
3.173768e-02 1.223092e-02 0.000000e+00;
2.948752e-02 1.136106e-02 0.000000e+00;
2.736717e-02 1.054190e-02 0.000000e+00;
2.538113e-02 9.775050e-03 0.000000e+00;
2.353356e-02 9.061962e-03 0.000000e+00;
2.182558e-02 8.402962e-03 0.000000e+00;
2.025590e-02 7.797457e-03 0.000000e+00;
1.881892e-02 7.243230e-03 0.000000e+00;
1.749930e-02 6.734381e-03 0.000000e+00;
1.628167e-02 6.265001e-03 0.000000e+00;
1.515301e-02 5.830085e-03 0.000000e+00;
1.410230e-02 5.425391e-03 0.000000e+00;
1.312106e-02 5.047634e-03 0.000000e+00;
1.220509e-02 4.695140e-03 0.000000e+00;
1.135114e-02 4.366592e-03 0.000000e+00;
1.055593e-02 4.060685e-03 0.000000e+00;
9.816228e-03 3.776140e-03 0.000000e+00;
9.128517e-03 3.511578e-03 0.000000e+00;
8.488116e-03 3.265211e-03 0.000000e+00;
7.890589e-03 3.035344e-03 0.000000e+00;
7.332061e-03 2.820496e-03 0.000000e+00;
6.809147e-03 2.619372e-03 0.000000e+00;
6.319204e-03 2.430960e-03 0.000000e+00;
5.861036e-03 2.254796e-03 0.000000e+00;
5.433624e-03 2.090489e-03 0.000000e+00;
5.035802e-03 1.937586e-03 0.000000e+00;
4.666298e-03 1.795595e-03 0.000000e+00;
4.323750e-03 1.663989e-03 0.000000e+00;
4.006709e-03 1.542195e-03 0.000000e+00;
3.713708e-03 1.429639e-03 0.000000e+00;
3.443294e-03 1.325752e-03 0.000000e+00;
3.194041e-03 1.229980e-03 0.000000e+00;
2.964424e-03 1.141734e-03 0.000000e+00;
2.752492e-03 1.060269e-03 0.000000e+00;
2.556406e-03 9.848854e-04 0.000000e+00;
2.374564e-03 9.149703e-04 0.000000e+00;
2.205568e-03 8.499903e-04 0.000000e+00;
2.048294e-03 7.895158e-04 0.000000e+00;
1.902113e-03 7.333038e-04 0.000000e+00;
1.766485e-03 6.811458e-04 0.000000e+00;
1.640857e-03 6.328287e-04 0.000000e+00;
1.524672e-03 5.881375e-04 0.000000e+00;
1.417322e-03 5.468389e-04 0.000000e+00;
1.318031e-03 5.086349e-04 0.000000e+00;
1.226059e-03 4.732403e-04 0.000000e+00;
1.140743e-03 4.404016e-04 0.000000e+00;
1.061495e-03 4.098928e-04 0.000000e+00;
9.877949e-04 3.815137e-04 0.000000e+00;
9.191847e-04 3.550902e-04 0.000000e+00;
8.552568e-04 3.304668e-04 0.000000e+00;
7.956433e-04 3.075030e-04 0.000000e+00;
7.400120e-04 2.860718e-04 0.000000e+00;
6.880980e-04 2.660718e-04 0.000000e+00;
6.397864e-04 2.474586e-04 0.000000e+00;
5.949726e-04 2.301919e-04 0.000000e+00;
5.535291e-04 2.142225e-04 0.000000e+00;
5.153113e-04 1.994949e-04 0.000000e+00;
4.801234e-04 1.859336e-04 0.000000e+00;
4.476245e-04 1.734067e-04 0.000000e+00;
4.174846e-04 1.617865e-04 0.000000e+00;
3.894221e-04 1.509641e-04 0.000000e+00;
3.631969e-04 1.408466e-04 0.000000e+00;
3.386279e-04 1.313642e-04 0.000000e+00;
3.156452e-04 1.224905e-04 0.000000e+00;
2.941966e-04 1.142060e-04 0.000000e+00;
2.742235e-04 1.064886e-04 0.000000e+00;
2.556624e-04 9.931439e-05 0.000000e+00;
2.384390e-04 9.265512e-05 0.000000e+00;
2.224525e-04 8.647225e-05 0.000000e+00;
2.076036e-04 8.072780e-05 0.000000e+00;
1.938018e-04 7.538716e-05 0.000000e+00;
1.809649e-04 7.041878e-05 0.000000e+00;
1.690167e-04 6.579338e-05 0.000000e+00;
1.578839e-04 6.148250e-05 0.000000e+00;
1.474993e-04 5.746008e-05 0.000000e+00;
1.378026e-04 5.370272e-05 0.000000e+00;
1.287394e-04 5.018934e-05 0.000000e+00;
1.202644e-04 4.690245e-05 0.000000e+00;
1.123502e-04 4.383167e-05 0.000000e+00;
1.049725e-04 4.096780e-05 0.000000e+00;
9.810596e-05 3.830123e-05 0.000000e+00;
9.172477e-05 3.582218e-05 0.000000e+00;
8.579861e-05 3.351903e-05 0.000000e+00;
8.028174e-05 3.137419e-05 0.000000e+00;
7.513013e-05 2.937068e-05 0.000000e+00;
7.030565e-05 2.749380e-05 0.000000e+00;
6.577532e-05 2.573083e-05 0.000000e+00;
6.151508e-05 2.407249e-05 0.000000e+00;
5.752025e-05 2.251704e-05 0.000000e+00;
5.378813e-05 2.106350e-05 0.000000e+00;
5.031350e-05 1.970991e-05 0.000000e+00;
4.708916e-05 1.845353e-05 0.000000e+00;
4.410322e-05 1.728979e-05 0.000000e+00;
4.133150e-05 1.620928e-05 0.000000e+00;
3.874992e-05 1.520262e-05 0.000000e+00;
3.633762e-05 1.426169e-05 0.000000e+00;
3.407653e-05 1.337946e-05 0.000000e+00]
| [
27,
456,
62,
30783,
29,
15,
198,
198,
397,
8709,
2099,
16477,
37,
886,
198,
397,
8709,
2099,
327,
10008,
1129,
3132,
62,
24187,
37,
1279,
25,
16477,
37,
886,
198,
397,
8709,
2099,
327,
10008,
46477,
62,
24187,
37,
1279,
25,
16477,
... | 1.839334 | 52,824 |
<reponame>giadasp/Psychometrics.jl
include("je_polyagamma_struct.jl")
include("je_polyagamma_optimize.jl")
function joint_estimate_pg!(
items::Vector{<:AbstractItem},
examinees::Vector{<:AbstractExaminee},
responses::Vector{<:AbstractResponse};
max_time::Int64 = 100,
mcmc_iter::Int64 = 10,
x_tol_rel::Float64 = 0.001,
item_sampling::Bool = false,
examinee_sampling::Bool = false,
kwargs...
)
I = size(items, 1)
N = size(examinees, 1)
#from now we work only on these, the algorithm is dependent on the type of latents and parameters.
response_matrix = get_response_matrix(responses, I, N);
parameters = map(i -> copy(get_parameters(i)), items)
latents = map(e -> copy(get_latents(e)), examinees)
#extract items per examinee and examinees per item indices
n_index = Vector{Vector{Int64}}(undef, I)
i_index = Array{Array{Int64,1},1}(undef, N)
for n = 1 : N
i_index[n] = findall(.!ismissing.(response_matrix[:, n]))
if n <= I
n_index[n] = findall(.!ismissing.(response_matrix[n, :]))
end
end #15ms
responses_per_item = [ Vector{Float64}(response_matrix[i, n_index[i]]) for i = 1 : I]
responses_per_examinee = [ Vector{Float64}(response_matrix[i_index[n], n]) for n = 1 : N]
# #set starting chain
# map(
# p -> begin
# p.chain = [[p.a, p.b] for j = 1:1000]
# end,
# parameters,
# );
je_pg_model = JointEstimationPolyaGammaModel(
parameters,
latents,
responses_per_item,
responses_per_examinee,
n_index,
i_index,
item_sampling,
examinee_sampling,
[Float64(mcmc_iter), Float64(mcmc_iter), Float64(verbosity)]
)
parameters, latents = optimize(je_pg_model)
for n in 1 : N
e = examinees[n]
l = latents[n]
examinees[n] = Examinee(e.idx, e.id, l)
if n<=I
i = items[n]
p = parameters[n]
items[n] = Item(i.idx, i.id, p)
end
end
map(i -> update_estimate!(i), items);
map(e -> update_estimate!(e), examinees);
return nothing
end
# function joint_estimate_pg!(
# items::Vector{<:AbstractItem},
# examinees::Vector{<:AbstractExaminee},
# responses::Vector{<:AbstractResponse};
# max_time::Int64 = 100,
# mcmc_iter::Int64 = 10,
# x_tol_rel::Float64 = 0.001,
# item_sampling::Bool = false,
# examinee_sampling::Bool = false,
# kwargs...
# )
# responses_per_examinee = map(
# e -> sort(get_responses_by_examinee_id(e.id, responses), by = x -> x.item_idx),
# examinees,
# );
# items_idx_per_examinee = map(
# e -> sort(map(r -> items[r.item_idx].idx, responses_per_examinee[e.idx])),
# examinees,
# );
# responses_per_item = map(
# i -> sort(get_responses_by_item_id(i.id, responses), by = x -> x.examinee_idx),
# items,
# );
# examinees_idx_per_item = map(
# i -> sort(map(r -> examinees[r.examinee_idx].idx, responses_per_item[i.idx])),
# items,
# );
# map(
# i -> begin
# i.parameters.chain = [[i.parameters.a, i.parameters.b] for j = 1:1000]
# end,
# items,
# );
# stop = false
# old_pars = get_parameters_vals(items)
# start_time = time()
# iter = 1
# while !stop
# W = generate_w(
# items,
# map(i -> examinees[examinees_idx_per_item[i.idx]], items),
# )
# map(
# i -> mcmc_iter_pg!(
# i,
# examinees[examinees_idx_per_item[i.idx]],
# responses_per_item[i.idx],
# filter(w -> w.i_idx == i.idx, W);
# sampling = item_sampling,
# already_sorted = true,
# ),
# items,
# )
# map(
# e -> mcmc_iter_pg!(
# e,
# items[items_idx_per_examinee[e.idx]],
# responses_per_examinee[e.idx],
# filter(w -> w.e_idx == e.idx, W);
# sampling = examinee_sampling,
# already_sorted = true,
# ),
# examinees,
# )
# if (iter % 200) == 0
# #map(i -> update_estimate!(i), items);
# if any([
# check_iter(iter; max_iter = mcmc_iter),
# check_time(start_time; max_time = max_time)
# #check_x_tol_rel!(
# # items,
# # old_pars;
# # x_tol_rel = x_tol_rel)
# ])
# stop = true
# end
# end
# iter += 1
# end
# map(i -> update_estimate!(i), items);
# map(e -> update_estimate!(e), examinees);
# return nothing
# end | [
27,
7856,
261,
480,
29,
12397,
324,
5126,
14,
31923,
908,
10466,
13,
20362,
198,
17256,
7203,
18015,
62,
35428,
363,
321,
2611,
62,
7249,
13,
20362,
4943,
198,
17256,
7203,
18015,
62,
35428,
363,
321,
2611,
62,
40085,
1096,
13,
20362,... | 1.764164 | 2,930 |
<reponame>sloede/TriplotRecipes.jl
module TriplotRecipes
using PlotUtils,RecipesBase,TriplotBase
export tricontour,tricontour!,tripcolor,tripcolor!,dgtripcolor,dgtripcolor!,trimesh,trimesh!
function append_with_nan!(a,b)
append!(a,b)
push!(a,NaN)
end
@recipe function f(contours::Vector{TriplotBase.Contour{T}}) where {T}
color = get(plotattributes, :seriescolor, :auto)
set_line_z = (color == :auto || plot_color(color) isa ColorGradient)
for c=contours
x = T[]
y = T[]
z = T[]
for polyline=c.polylines
append_with_nan!(x,first.(polyline))
append_with_nan!(y,last.(polyline))
append!(z,fill(c.level,length(polyline)))
end
@series begin
label := nothing
if set_line_z
line_z := z
end
x,y
end
end
end
tricontour(x,y,z,t,levels;kw...) = RecipesBase.plot(TriplotBase.tricontour(x,y,z,t,levels);kw...)
tricontour!(x,y,z,t,levels;kw...) = RecipesBase.plot!(TriplotBase.tricontour(x,y,z,t,levels);kw...)
struct TriPseudocolor{X,Y,Z,T} x::X; y::Y; z::Z; t::T; end
@recipe function f(p::TriPseudocolor;px=512,py=512,ncolors=256)
cmap = range(extrema(p.z)...,length=ncolors)
x = range(extrema(p.x)...,length=px)
y = range(extrema(p.y)...,length=py)
z = TriplotBase.tripcolor(p.x,p.y,p.z,p.t,cmap;bg=NaN,px=px,py=py)
seriestype := :heatmap
x,y,z'
end
tripcolor(x,y,z,t;kw...) = RecipesBase.plot(TriPseudocolor(x,y,z,t);kw...)
tripcolor!(x,y,z,t;kw...) = RecipesBase.plot!(TriPseudocolor(x,y,z,t);kw...)
struct DGTriPseudocolor{X,Y,Z,T} x::X; y::Y; z::Z; t::T; end
@recipe function f(p::DGTriPseudocolor;px=512,py=512,ncolors=256)
cmap = range(extrema(p.z)...,length=ncolors)
x = range(extrema(p.x)...,length=px)
y = range(extrema(p.y)...,length=py)
z = TriplotBase.dgtripcolor(p.x,p.y,p.z,p.t,cmap;bg=NaN,px=px,py=py)
seriestype := :heatmap
x,y,z'
end
dgtripcolor(x,y,z,t;kw...) = RecipesBase.plot(DGTriPseudocolor(x,y,z,t);kw...)
dgtripcolor!(x,y,z,t;kw...) = RecipesBase.plot!(DGTriPseudocolor(x,y,z,t);kw...)
struct TriMesh{X,Y,T} x::X; y::Y; t::T; end
@recipe function f(m::TriMesh)
x = Vector{eltype(m.x)}()
y = Vector{eltype(m.y)}()
for t=eachcol(m.t)
append_with_nan!(x,[m.x[t];m.x[t[1]]])
append_with_nan!(y,[m.y[t];m.y[t[1]]])
end
seriestype := :shape
seriescolor --> RGB(0.7,1.0,0.8)
label --> nothing
x,y
end
trimesh(x,y,t;kw...) = RecipesBase.plot(TriMesh(x,y,t);kw...)
trimesh!(x,y,t;kw...) = RecipesBase.plot!(TriMesh(x,y,t);kw...)
end
| [
27,
7856,
261,
480,
29,
82,
5439,
18654,
14,
14824,
29487,
6690,
18636,
13,
20362,
198,
21412,
7563,
29487,
6690,
18636,
198,
198,
3500,
28114,
18274,
4487,
11,
6690,
18636,
14881,
11,
14824,
29487,
14881,
198,
198,
39344,
491,
291,
756... | 1.919037 | 1,371 |
using LinearAlgebra
using Random
using SparseArrays
using Test
using Jutils.Elements
using Jutils.Functions
using Jutils.Integration
using Jutils.Mesh
using Jutils.Transforms
using Jutils.Topologies
const lineelt = Element(Simplex{1}(), 1)
const squareelt = Element(Tensor([Simplex{1}(), Simplex{1}()]), 1)
ev(func, pt, elt) = callable(func)(pt, elt)
@testset "Transforms" begin include("Transforms.jl") end
@testset "Functions" begin include("Functions.jl") end
@testset "Gradients" begin include("Gradients.jl") end
@testset "Optimization" begin include("Optimization.jl") end
@testset "Integration" begin include("Integration.jl") end
| [
3500,
44800,
2348,
29230,
198,
3500,
14534,
198,
3500,
1338,
17208,
3163,
20477,
198,
3500,
6208,
198,
198,
3500,
449,
26791,
13,
36,
3639,
198,
3500,
449,
26791,
13,
24629,
2733,
198,
3500,
449,
26791,
13,
34500,
1358,
198,
3500,
449,
... | 3.037559 | 213 |
using Test
using Base.BinaryPlatforms
import Libdl
using BinaryBuilderBase
using BinaryBuilderBase: template, dlopen_flags_str
# The platform we're running on
const platform = HostPlatform()
@testset "Products" begin
@test template(raw"$libdir/foo-$arch/$nbits/bar-$target", Platform("x86_64", "windows")) ==
"bin/foo-x86_64/64/bar-x86_64-w64-mingw32"
@test template(raw"$target/$nbits/$arch/$libdir", Platform("x86_64", "linux"; libc = "musl")) ==
"x86_64-linux-musl/64/x86_64/lib"
lp = LibraryProduct("libfakechroot", :libfakechroot, "lib/fakechroot")
@test lp.libnames == ["libfakechroot"]
@test lp.dir_paths == ["lib/fakechroot"]
ep = ExecutableProduct("fooify", :fooify, "bin/foo_inc")
@test ep.binnames == ["fooify"]
@test_throws ErrorException LibraryProduct("sin", :sin)
@test_throws ErrorException ExecutableProduct("convert", :convert)
@test_throws ErrorException FileProduct("open", :open)
# Test sorting of products....
@test sort([LibraryProduct("libbar", :libbar), ExecutableProduct("foo", :foo), FrameworkProduct("buzz", :buzz)]) ==
[FrameworkProduct("buzz", :buzz), ExecutableProduct("foo", :foo), LibraryProduct("libbar", :libbar)]
# ...and products info
p1 = LibraryProduct(["libchafa"], :libchafa, ) => Dict("soname" => "libchafa.so.0","path" => "lib/libchafa.so")
p2 = ExecutableProduct(["chafa"], :chafa, ) => Dict("path" => "bin/chafa")
products_info = Dict{Product,Any}(p1, p2)
@test sort(products_info) == [p2, p1]
temp_prefix() do prefix
# Test that basic satisfication is not guaranteed
e_path = joinpath(bindir(prefix), "fooifier")
l_path = joinpath(last(libdirs(prefix)), "libfoo.$(Libdl.dlext)")
e = ExecutableProduct("fooifier", :fooifier)
ef = FileProduct(joinpath("bin", "fooifier"), :fooifier)
l = LibraryProduct("libfoo", :libfoo)
lf = FileProduct(l_path, :libfoo)
@test @test_logs (:info, r"does not exist") !satisfied(e, prefix; verbose=true)
@test @test_logs (:info, r"not found") !satisfied(ef, prefix; verbose=true)
@test @test_logs (:info, r"^Could not locate") !satisfied(l, prefix; verbose=true)
@test @test_logs (:info, r"^Could not locate") !satisfied(l, prefix; verbose=true, isolate=true)
@test @test_logs (:info, r"^FileProduct .* not found") !satisfied(lf, prefix; verbose=true)
# Test that simply creating a file that is not executable doesn't
# satisfy an Executable Product (and say it's on Linux so it doesn't
# complain about the lack of an .exe extension)
mkpath(bindir(prefix))
touch(e_path)
@test @test_logs (:info, r"^FileProduct .* found at") satisfied(ef, prefix; verbose=true)
@static if !Sys.iswindows()
# Windows doesn't care about executable bit, grumble grumble
@test_logs (:info, r"is not executable") (:info, r"does not exist") begin
@test !satisfied(e, prefix; verbose=true, platform=Platform("x86_64", "linux"))
end
end
# Make it executable and ensure this does satisfy the Executable
chmod(e_path, 0o777)
@test_logs (:info, r"matches our search criteria") begin
@test satisfied(e, prefix; verbose=true, platform=Platform("x86_64", "linux"))
end
# Remove it and add a `$(path).exe` version to check again, this
# time saying it's a Windows executable
Base.rm(e_path; force=true)
touch("$(e_path).exe")
chmod("$(e_path).exe", 0o777)
@test locate(e, prefix; platform=Platform("x86_64", "windows")) == "$(e_path).exe"
# Test that simply creating a library file doesn't satisfy it if we are
# testing something that matches the current platform's dynamic library
# naming scheme, because it must be `dlopen()`able.
mkpath(last(libdirs(prefix)))
touch(l_path)
@test @test_logs (:info, r"^FileProduct .* found at") satisfied(lf, prefix; verbose=true)
@test @test_logs (:info, r"^Found a valid") (:info, r"matches our search criteria") (:info, r"cannot be dlopen'ed") (:info, r"^Could not locate") !satisfied(l, prefix; verbose=true)
@test @test_logs (:info, r"^FileProduct .* found at") satisfied(lf, prefix; verbose=true, isolate=true)
@test @test_logs (:info, r"^Found a valid") (:info, r"matches our search criteria") (:info, r"cannot be dlopen'ed") (:info, r"^Could not locate") !satisfied(l, prefix; verbose=true, isolate=true)
# But if it is from a different platform, simple existence will be
# enough to satisfy a LibraryProduct
@static if Sys.iswindows()
p = Platform("x86_64", "linux")
mkpath(last(libdirs(prefix, p)))
l_path = joinpath(last(libdirs(prefix, p)), "libfoo.so")
touch(l_path)
@test @test_logs (:info, r"^Found a valid") (:info, r"matches our search criteria") satisfied(l, prefix; verbose=true, platform=p)
@test @test_logs (:info, r"^Found a valid") (:info, r"matches our search criteria") satisfied(l, prefix; verbose=true, platform=p, isolate=true)
# Check LibraryProduct objects with explicit directory paths
ld = LibraryProduct("libfoo", :libfoo)
@test @test_logs (:info, r"^Found a valid") (:info, r"matches our search criteria") satisfied(ld, prefix; verbose=true, platform=p)
@test @test_logs (:info, r"^Found a valid") (:info, r"matches our search criteria") satisfied(ld, prefix; verbose=true, platform=p, isolate=true)
else
p = Platform("x86_64", "windows")
mkpath(last(libdirs(prefix, p)))
l_path = joinpath(last(libdirs(prefix, p)), "libfoo.dll")
touch(l_path)
@test @test_logs (:info, r"^Found a valid") (:info, r"matches our search criteria") satisfied(l, prefix; verbose=true, platform=p)
@test @test_logs (:info, r"^Found a valid") (:info, r"matches our search criteria") satisfied(l, prefix; verbose=true, platform=p, isolate=true)
# Check LibraryProduct objects with explicit directory paths
ld = LibraryProduct("libfoo", :libfoo)
@test @test_logs (:info, r"^Found a valid") (:info, r"matches our search criteria") satisfied(ld, prefix; verbose=true, platform=p)
@test @test_logs (:info, r"^Found a valid") (:info, r"matches our search criteria") satisfied(ld, prefix; verbose=true, platform=p, isolate=true)
end
end
# Ensure that the test suite thinks that these libraries are foreign
# so that it doesn't try to `dlopen()` them:
foreign_platform = if platform == Platform("aarch64", "linux")
# Arbitrary architecture that is not dlopen()'able
Platform("powerpc64le", "linux")
else
# If we're not Platform("aarch64", "linux"), then say the libraries are
Platform("aarch64", "linux")
end
# Test for valid library name permutations
for ext in ["so", "so.1", "so.1.2", "so.1.2.3"]
temp_prefix() do prefix
l_path = joinpath(last(libdirs(prefix, foreign_platform)), "libfoo.$ext")
l = LibraryProduct("libfoo", :libfoo)
mkdir(dirname(l_path))
touch(l_path)
@test @test_logs (:info, r"^Found a valid") (:info, r"matches our search criteria") satisfied(l, prefix; verbose=true, platform=foreign_platform)
end
end
# Test for invalid library name permutations
for ext in ["1.so", "so.1.2.3a", "so.1.a"]
temp_prefix() do prefix
l_path = joinpath(last(libdirs(prefix, foreign_platform)), "libfoo.$ext")
l = LibraryProduct("libfoo", :libfoo)
mkdir(dirname(l_path))
touch(l_path)
if ext == "1.so"
@test_logs (:info, r"^Found a valid") (:info, r"^Could not locate") begin
@test !satisfied(l, prefix; verbose=true, platform=foreign_platform)
end
else
@test_logs (:info, r"^Could not locate") begin
@test !satisfied(l, prefix; verbose=true, platform=foreign_platform)
end
end
end
end
# Test for proper repr behavior
temp_prefix() do prefix
l = LibraryProduct("libfoo", :libfoo)
@test repr(l) == "LibraryProduct($(repr(["libfoo"])), :libfoo)"
l = LibraryProduct(["libfoo", "libfoo2"], :libfoo)
@test repr(l) == "LibraryProduct($(repr(["libfoo", "libfoo2"])), :libfoo)"
e = ExecutableProduct("fooifier", :fooifier)
@test repr(e) == "ExecutableProduct([\"fooifier\"], :fooifier)"
e = ExecutableProduct("fooifier", :fooifier, "bin/qux")
@test repr(e) == "ExecutableProduct([\"fooifier\"], :fooifier, \"bin/qux\")"
f = FileProduct(joinpath("etc", "fooifier"), :foo_conf)
@test repr(f) == "FileProduct([$(repr(joinpath("etc", "fooifier")))], :foo_conf)"
f = FileProduct(joinpath(prefix, "etc", "foo.conf"), :foo_conf)
@test repr(f) == "FileProduct([$(repr(joinpath(prefix, "etc", "foo.conf")))], :foo_conf)"
end
# Test that FileProduct's can have `${target}` within their paths:
temp_prefix() do prefix
multilib_dir = joinpath(prefix, "foo", triplet(platform))
mkpath(multilib_dir)
touch(joinpath(multilib_dir, "bar"))
for path in ("foo/\$target/bar", "foo/\${target}/bar")
f = FileProduct(path, :bar)
@test @test_logs (:info, r"^FileProduct .* found at") satisfied(f, prefix; verbose=true, platform=platform)
end
end
end
@testset "Dlopen flags" begin
lp = LibraryProduct("libfoo2", :libfoo2; dlopen_flags=[:RTLD_GLOBAL, :RTLD_NOLOAD])
@test lp.dlopen_flags == [:RTLD_GLOBAL, :RTLD_NOLOAD]
fp = FrameworkProduct("libfoo2", :libfoo2; dlopen_flags=[:RTLD_GLOBAL, :RTLD_NOLOAD])
@test fp.libraryproduct.dlopen_flags == [:RTLD_GLOBAL, :RTLD_NOLOAD]
for p in (lp, fp)
flag_str = dlopen_flags_str(p)
@test flag_str == "RTLD_GLOBAL | RTLD_NOLOAD"
@test Libdl.eval(Meta.parse(flag_str)) == (Libdl.RTLD_NOLOAD | Libdl.RTLD_GLOBAL)
end
lp = LibraryProduct("libfoo2", :libfoo2; dont_dlopen=true)
@test dlopen_flags_str(lp) == "nothing"
end
| [
3500,
6208,
198,
3500,
7308,
13,
33,
3219,
37148,
82,
198,
11748,
7980,
25404,
198,
3500,
45755,
32875,
14881,
198,
3500,
45755,
32875,
14881,
25,
11055,
11,
288,
75,
9654,
62,
33152,
62,
2536,
198,
198,
2,
383,
3859,
356,
821,
2491,
... | 2.359073 | 4,403 |
<reponame>mwhatters/SpringCollab2020
module SpringCollab2020TrollStrawberry
using ..Ahorn, Maple
@mapdef Entity "SpringCollab2020/trollStrawberry" TrollStrawberry(x::Integer, y::Integer, winged::Bool=false)
const placements = Ahorn.PlacementDict(
"Troll Strawberry (Spring Collab 2020)" => Ahorn.EntityPlacement(
TrollStrawberry
),
"Troll Strawberry (Winged) (Spring Collab 2020)" => Ahorn.EntityPlacement(
TrollStrawberry,
"point",
Dict{String, Any}(
"winged" => true
)
),
)
# name, winged
sprites = Dict{Tuple{String, Bool}, String}(
("SpringCollab2020/trollStrawberry", false) => "collectables/strawberry/normal00",
("SpringCollab2020/trollStrawberry", true) => "collectables/strawberry/wings01",
)
fallback = "collectables/strawberry/normal00"
Ahorn.nodeLimits(entity::TrollStrawberry) = 0, -1
function Ahorn.selection(entity::TrollStrawberry)
x, y = Ahorn.position(entity)
winged = get(entity.data, "winged", false)
sprite = sprites[(entity.name, winged)]
res = Ahorn.Rectangle[Ahorn.getSpriteRectangle(sprite, x, y)]
return res
end
function Ahorn.renderSelectedAbs(ctx::Ahorn.Cairo.CairoContext, entity::TrollStrawberry)
end
function Ahorn.renderAbs(ctx::Ahorn.Cairo.CairoContext, entity::TrollStrawberry, room::Maple.Room)
x, y = Ahorn.position(entity)
winged = get(entity.data, "winged", false)
sprite = sprites[(entity.name, winged)]
Ahorn.drawSprite(ctx, sprite, x, y)
end
end | [
27,
7856,
261,
480,
29,
76,
10919,
1010,
14,
30387,
22667,
397,
42334,
198,
171,
119,
123,
21412,
8225,
22667,
397,
42334,
51,
2487,
1273,
1831,
8396,
198,
198,
3500,
11485,
10910,
1211,
11,
21249,
198,
198,
31,
8899,
4299,
20885,
366... | 2.542714 | 597 |
<gh_stars>0
module triangleInterpolator
using Images
export rasterizationBBOX
function pointLine(x::Float64,
y::Float64,
line::Array{Float64},
linex::Float64,
liney::Float64
)::Float64
return line[2]*x - line[1]*y - line[2]*linex + line[1]*liney
end
function swap_points(ax::Float64,
ay::Float64,
bx::Float64,
by::Float64,
colorA::RGB{N0f8},
colorB::RGB{N0f8}
)::Tuple{Float64, Float64, Float64, Float64, RGB{N0f8}, RGB{N0f8}}
buf_x = ax
buf_y = ay
ax = bx
ay = by
bx = buf_x
by = buf_y
colorBuf = colorA
colorA = colorB
colorB = colorBuf
return ax, ay, bx, by, colorA, colorB
end
function validate_entry(ax::Float64,
ay::Float64,
bx::Float64,
by::Float64,
cx::Float64,
cy::Float64,
colorA::RGB{N0f8},
colorB::RGB{N0f8},
colorC::RGB{N0f8}
)::Tuple{Float64, Float64, Float64, Float64, Float64, Float64, RGB{N0f8}, RGB{N0f8}, RGB{N0f8}}
ab = [bx-ax, by-ay]
bc = [cx-bx, cy-by]
ca = [ax-cx, ay-cy]
if pointLine(cx, cy, ab, ax, ay) < 0
ax, ay, cx, cy, colorA, colorC = swap_points(ax, ay, cx, cy, colorA, colorC)
elseif pointLine(ax, ay, bc, bx, by) < 0
ax, ay, bx, by, colorA, colorB = swap_points(ax, ay, bx, by, colorA, colorB)
elseif pointLine(bx, by, ca, cx, cy) < 0
bx, by, cx, cy, colorB, colorC = swap_points(bx, by, cx, cy, colorB, colorC)
end
ax, ay, bx, by, cx, cy, colorA, colorB, colorC
end
function interpolateColors(position::Array{Float64},
ax::Float64,
ay::Float64,
bx::Float64,
by::Float64,
cx::Float64,
cy::Float64,
colorA::RGB{N0f8},
colorB::RGB{N0f8},
colorC::RGB{N0f8}
)::RGB{N0f8}
ab = [bx-ax, by-ay]
bc = [cx-bx, cy-by]
ca = [ax-cx, ay-cy]
i, j = position
alpha = pointLine(j, i, bc, bx, by) / pointLine(ax, ay, bc, bx, by)
beta = pointLine(j, i, ca, cx, cy) / pointLine(bx, by, ca, cx, cy)
phi = pointLine(j, i, ab, ax, ay) / pointLine(cx, cy, ab, ax, ay)
return alpha * colorA + beta * colorB + phi * colorC
end
function setupBBOX(A::Array{Float64},
B::Array{Float64},
C::Array{Float64},
maxX::Int64,
maxY::Int64
)::NTuple{4, Float64}
maxHeight = max(A[1], B[1], C[1])
minHeight = min(A[1], B[1], C[1])
if maxHeight > maxY
maxHeight = maxY
end
if minHeight < 1
minHeight = 1
end
maxWidth = max(A[2], B[2], C[2])
minWidth = min(A[2], B[2], C[2])
if maxWidth > maxX
maxWidth = maxX
end
if minWidth < 1
minWidth = 1
end
return maxHeight, minHeight, maxWidth, minWidth
end
function rasterizationBBOX(img::Array{RGB{N0f8}, 2},
A::Array{Float64},
B::Array{Float64},
C::Array{Float64},
colorA::RGB{N0f8},
colorB::RGB{N0f8},
colorC::RGB{N0f8}
)
maxHeight, minHeight, maxWidth, minWidth = setupBBOX(A, B, C, size(img)[2], size(img)[1])
ax, ay, bx, by, cx, cy = A[2], A[1], B[2], B[1], C[2], C[1]
ax, ay, bx, by, cx, cy, colorA, colorB, colorC = validate_entry(ax, ay, bx, by, cx, cy, colorA, colorB, colorC)
ab = [bx-ax, by-ay]
bc = [cx-bx, cy-by]
ca = [ax-cx, ay-cy]
for i=floor(Int, minHeight):ceil(Int, maxHeight)
for j=floor(Int, minWidth):ceil(Int, maxWidth)
floatj = Float64(j)
floati = Float64(i)
alpha = pointLine(floatj, floati, bc, bx, by)
beta = pointLine(floatj, floati, ca, cx, cy)
phi = pointLine(floatj, floati, ab, ax, ay)
if beta >= 0 && alpha >= 0 && phi >= 0
img[i, j] = interpolateColors([floati, floatj], ax, ay, bx, by, cx, cy, colorA, colorB, colorC)
end
end
end
end
end | [
27,
456,
62,
30783,
29,
15,
198,
21412,
22950,
9492,
16104,
1352,
198,
220,
220,
220,
1262,
5382,
198,
220,
220,
220,
10784,
374,
1603,
1634,
33,
39758,
628,
220,
220,
220,
2163,
966,
13949,
7,
87,
3712,
43879,
2414,
11,
198,
220,
... | 1.602572 | 3,110 |
using Lazy
import Lazy: cycle, range, drop, take
using Test
# dummy function to test threading macros on
function add_things(n1, n2, n3)
100n1 + 10n2 + n3
end
# dummy macro to test threading macros on
macro m_add_things(n1, n2, n3)
quote
100 * $(esc(n1)) + 10 * $(esc(n2)) + $(esc(n3))
end
end
# define structs for @forward macro testing below (PR #112)
struct Foo112 end
struct Bar112 f::Foo112 end
@testset "Lazy" begin
if VERSION >= v"1.0.0"
@test isempty(detect_ambiguities(Base, Core, Lazy))
end
@testset "Lists" begin
@test list(1, 2, 3)[2] == 2
@test prepend(1, list(2,3,4)) == 1:list(2, 3, 4)
@test seq([1, 2, 3]) == list(1, 2, 3)
@test seq(1:3) == list(1, 2, 3)
@test constantly(1)[50] == 1
testfn() = 1
@test repeatedly(testfn)[50] == 1
@test cycle([1, 2, 3])[50] == 2
@test iterated(x->x^2, 2)[3] == 16
@test range(1, 5)[3] == 3
@test range(1, 5)[10] == nothing
@test range(1, 5)[-1] == 1
@test list(1, 2, 3) * list(4, 5, 6) == list(1, 2, 3, 4, 5, 6)
@test first(list(1, 2, 3)) == 1
@test tail(list(1, 2, 3)) == list(2, 3)
@test flatten(list(1,2,list(3,4))) == list(1, 2, 3, 4)
@test list(1,2,list(3,4))[3] == list(3, 4)
@test list(list(1), list(2))[1] == list(1)
@test reductions(+, 0, list(1, 2, 3)) == list(1, 3, 6)
@test [i for i in @lazy[1,2,3]] == [1,2,3]
l = list(1, 2, 3)
@test l:7:l == list(list(1, 2, 3), 7, 1, 2, 3) # ambiguity test
end
@testset "Fibs" begin
fibs = @lazy 0:1:(fibs + drop(1, fibs));
@test fibs[20] == 4181
@test take(5, fibs) == list(0, 1, 1, 2, 3)
end
@testset "Primes" begin
isprime(n) =
@>> primes begin
take_while(x -> x<=sqrt(n))
map(x -> n % x == 0)
any; !
end
primes = filter(isprime, range(2));
end
@testset "Even squares" begin
esquares = @>> range() map(x->x^2) filter(iseven);
@test take(5, esquares) == list(4, 16, 36, 64, 100)
end
@testset "Threading macros" begin
temp = @> [2 3] sum
@test temp == 5
# Reverse from after index 2
temp = @>> 2 reverse([1, 2, 3, 4, 5])
@test temp == [1, 5, 4, 3, 2]
temp = @as x 2 begin
x^2
x + 2
end
@test temp == 6
# test that threading macros work with functions
temp = @> 1 add_things(2,3)
@test temp == 123
temp = @>> 3 add_things(1,2)
@test temp == 123
temp = @as x 2 add_things(1,x,3)
@test temp == 123
# test that threading macros work with macros
temp = @> 1 @m_add_things(2,3)
@test temp == 123
temp = @>> 3 @m_add_things(1,2)
@test temp == 123
temp = @as x 2 @m_add_things(1,x,3)
@test temp == 123
end
@testset "Forward macro" begin
play(x::Foo112; y) = y # uses keyword arg
play(x::Foo112, z) = z # uses regular arg
play(x::Foo112, z1, z2; y) = y + z1 + z2 # uses both
@forward Bar112.f play # forward `play` function to field `f`
let f = Foo112(), b = Bar112(f)
@test play(f, y = 1) === play(b, y = 1)
@test play(f, 2) === play(b, 2)
@test play(f, 2, 3, y = 1) === play(b, 2, 3, y = 1)
end
end
@testset "getindex" begin
l = Lazy.range(1,10)
@test l[1] == 1
@test collect(l[1:5]) == collect(1:5)
end
@testset "Listables" begin
@test_throws MethodError sin()
end
@static VERSION ≥ v"1.2" && @testset "avoid stackoverflow" begin
@test (length(takewhile(<(10), Lazy.range(1))); true)
@test (length(takewhile(<(100000), Lazy.range(1))); true)
end
@testset "any/all" begin
let xs = list(true, false, false)
@test any(identity, xs) == true
@test any(xs) == true
@test all(identity, xs) == false
@test all(xs) == false
end
let yy = list(1, 0, 1)
@test any(Bool, yy) == true
@test all(Bool, yy) == false
end
# Base method--ensures no ambiguity with methods here
@test all([true true; true true], dims=1) == [true true]
end
end
| [
3500,
406,
12582,
198,
11748,
406,
12582,
25,
6772,
11,
2837,
11,
4268,
11,
1011,
198,
3500,
6208,
198,
198,
2,
31548,
2163,
284,
1332,
4704,
278,
34749,
319,
198,
8818,
751,
62,
27971,
7,
77,
16,
11,
299,
17,
11,
299,
18,
8,
19... | 2.059898 | 1,970 |
import Base: isempty
export ResetMap,
get_A,
get_b
"""
ResetMap{N<:Real, S<:LazySet{N}} <: LazySet{N}
Type that represents a lazy reset map.
A reset map is a special case of an affine map ``A x + b, x ∈ X`` where the
linear map ``A`` is the identity matrix with zero entries in all reset
dimensions, and the translation vector ``b`` is zero in all other dimensions.
### Fields
- `X` -- convex set
- `resets` -- resets (a mapping from an index to a new value)
### Example
```jldoctest resetmap
julia> X = BallInf([2.0, 2.0, 2.0], 1.0);
julia> r = Dict(1 => 4.0, 3 => 0.0);
julia> rm = ResetMap(X, r);
```
Here `rm` modifies the set `X` such that `x1` is reset to 4 and `x3` is reset to
0, while `x2` is not modified.
Hence `rm` is equivalent to the set
`Hyperrectangle([4.0, 2.0, 0.0], [0.0, 1.0, 0.0])`, i.e., an axis-aligned line
segment embedded in 3D.
The corresponding affine map ``A x + b`` would be:
```math
\begin{pmatrix} 0 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 0 \end{pmatrix} x +
\begin{pmatrix} 4 & 0 & 0 \end{pmatrix}
```
Use the function `get_A` (resp. `get_b`) to create the matrix `A` (resp. vector
`b`) corresponding to a given reset map.
The (in this case unique) support vector of `rm` in direction `ones(3)` is:
```jldoctest resetmap
julia> σ(ones(3), rm)
3-element Array{Float64,1}:
4.0
3.0
0.0
```
"""
struct ResetMap{N<:Real, S<:LazySet{N}} <: LazySet{N}
X::S
resets::Dict{Int, N}
end
"""
get_A(rm::ResetMap{N}) where {N<:Real}
Return the ``A`` matrix of the affine map ``A x + b, x ∈ X`` represented by a
reset map.
### Input
- `rm` -- reset map
### Output
The (sparse) square matrix for the affine map ``A x + b, x ∈ X`` represented by
the reset map.
### Algorithm
We construct the identity matrix and set all entries in the reset dimensions to
zero.
"""
function get_A(rm::ResetMap{N}) where {N<:Real}
n = dim(rm)
A = sparse(N(1)*I, n, n)
for i in keys(rm.resets)
A[i, i] = zero(N)
end
return A
end
"""
get_b(rm::ResetMap{N}) where {N<:Real}
Return the ``b`` vector of the affine map ``A x + b, x ∈ X`` represented by a
reset map.
### Input
- `rm` -- reset map
### Output
The (sparse) vector for the affine map ``A x + b, x ∈ X`` represented by the
reset map.
The vector contains the reset value for all reset dimensions, and is zero for
all other dimensions.
"""
function get_b(rm::ResetMap{N}) where {N<:Real}
n = dim(rm)
b = sparsevec(Int[], N[], n)
for (i, val) in rm.resets
b[i] = val
end
return b
end
# --- LazySet interface functions ---
"""
dim(rm::ResetMap)
Return the dimension of a reset map.
### Input
- `rm` -- reset map
### Output
The dimension of a reset map.
"""
function dim(rm::ResetMap)::Int
return dim(rm.X)
end
"""
σ(d::AbstractVector{N}, rm::ResetMap{N}) where {N<:Real}
Return the support vector of a reset map.
### Input
- `d` -- direction
- `rm` -- reset map
### Output
The support vector in the given direction.
If the direction has norm zero, the result depends on the wrapped set.
"""
function σ(d::AbstractVector{N}, rm::ResetMap{N}) where {N<:Real}
d_reset = copy(d)
for var in keys(rm.resets)
d_reset[var] = zero(N)
end
return substitute(rm.resets, σ(d_reset, rm.X))
end
"""
ρ(d::AbstractVector{N}, rm::ResetMap{N}) where {N<:Real}
Return the support function of a reset map.
### Input
- `d` -- direction
- `rm` -- reset map
### Output
The support function in the given direction.
### Notes
We use the usual dot-product definition, but for unbounded sets we redefine the
product between ``0`` and ``±∞`` as ``0``; Julia returns `NaN` here.
```jldoctest
julia> Inf * 0.0
NaN
```
See the discussion
[here](https://math.stackexchange.com/questions/28940/why-is-infty-cdot-0-not-clearly-equal-to-0).
"""
function ρ(d::AbstractVector{N}, rm::ResetMap{N}) where {N<:Real}
return dot_zero(d, σ(d, rm))
end
"""
an_element(rm::ResetMap)
Return some element of a reset map.
### Input
- `rm` -- reset map
### Output
An element in the reset map.
It relies on the `an_element` function of the wrapped set.
"""
function an_element(rm::ResetMap)
return substitute(rm.resets, an_element(rm.X))
end
"""
isempty(rm::ResetMap)::Bool
Return if a reset map is empty or not.
### Input
- `rm` -- reset map
### Output
`true` iff the wrapped set is empty.
"""
function isempty(rm::ResetMap)::Bool
return isempty(rm.X)
end
"""
constraints_list(rm::ResetMap{N}) where {N<:Real}
Return the list of constraints of a polytopic reset map.
### Input
- `rm` -- reset map of a polytope
### Output
The list of constraints of the reset map.
### Notes
We assume that the underlying set `X` is a polytope, i.e., is bounded and offers
a method `constraints_list(X)`.
### Algorithm
We fall back to `constraints_list` of a `LinearMap` of the `A`-matrix in the
affine-map view of a reset map.
Each reset dimension ``i`` is projected to zero, expressed by two constraints
for each reset dimension.
Then it remains to shift these constraints to the new value.
For instance, if the dimension ``5`` was reset to ``4``, then there will be
constraints ``x₅ ≤ 0`` and ``-x₅ ≤ 0``.
We then modify the right-hand side of these constraints to ``x₅ ≤ 4`` and
``-x₅ ≤ -4``, respectively.
"""
function constraints_list(rm::ResetMap{N}) where {N<:Real}
# if `vector` has exactly one non-zero entry, return its index
# otherwise return 0
function find_unique_nonzero_entry(vector::AbstractVector{N})
res = 0
for (i, v) in enumerate(vector)
if v != zero(N)
if res != 0
# at least two non-zero entries
return 0
else
# first non-zero entry so far
res = i
end
end
end
return res
end
constraints = copy(constraints_list(LinearMap(get_A(rm), rm.X)))
for (i, c) in enumerate(constraints)
constrained_dim = find_unique_nonzero_entry(c.a)
if constrained_dim > 0 # constraint in only one dimension
if !haskey(rm.resets, constrained_dim)
continue # not a dimension we are interested in
end
new_value = rm.resets[constrained_dim]
if new_value == zero(N)
@assert c.b == zero(N)
continue # a reset to 0 needs not create a new constraint
end
if c.a[constrained_dim] < zero(N)
# change sign for lower bound
new_value = -new_value
end
constraints[i] = HalfSpace(c.a, new_value)
end
end
return constraints
end
"""
constraints_list(rm::ResetMap{N, S}) where
{N<:Real, S<:AbstractHyperrectangle}
Return the list of constraints of a hyperrectangular reset map.
### Input
- `rm` -- reset map of a hyperrectangular set
### Output
The list of constraints of the reset map.
### Algorithm
We iterate through all dimensions.
If there is a reset, we construct the corresponding (flat) constraints.
Otherwise, we construct the corresponding constraints of the underlying set.
"""
function constraints_list(rm::ResetMap{N, S}
) where {N<:Real, S<:AbstractHyperrectangle}
H = rm.X
n = dim(H)
constraints = Vector{LinearConstraint{N}}(undef, 2*n)
j = 1
for i in 1:n
ei = LazySets.Approximations.UnitVector(i, n, one(N))
if haskey(rm.resets, i)
# reset dimension => add flat constraints
v = rm.resets[i]
constraints[j] = HalfSpace(ei, v)
constraints[j+1] = HalfSpace(-ei, -v)
else
# non-reset dimension => use the hyperrectangle's constraints
constraints[j] = HalfSpace(ei, high(H, i))
constraints[j+1] = HalfSpace(-ei, -low(H, i))
end
j += 2
end
return constraints
end
| [
11748,
7308,
25,
318,
28920,
198,
198,
39344,
30027,
13912,
11,
198,
220,
220,
220,
220,
220,
220,
651,
62,
32,
11,
198,
220,
220,
220,
220,
220,
220,
651,
62,
65,
198,
198,
37811,
198,
220,
220,
220,
30027,
13912,
90,
45,
27,
2... | 2.417473 | 3,308 |
function tuning_display(p)
lines=show(p.output, p.tuner, progress=true)
println();
return lines
end
function convergence_display(p)
try
ciplot=lineplot([p.counter-(length(p.convergence_history)-1):p.counter...], p.convergence_history, title="Convergence Interval Recent History", xlabel="Iterate",ylabel="CI", color=:yellow)
lines=nrows(ciplot.graphics)+5
show(p.output, ciplot); println()
return lines
catch
printstyled(p.output, "CONVERGENCE PLOT UNAVAILABLE. STANDBY\n", bold=true, color=:yellow); println()
return 2
end
end
function evidence_display(p)
try
log_Zis=p.e.log_Zi[2:end]
posidx=findfirst(i->i > -Inf, log_Zis)
log_Zis[1:posidx-1].=log_Zis[posidx]
evplot=lineplot(log_Zis, title="Evidence History", xlabel="Iterate", color=:red, name="Ensemble logZ")
lines=nrows(evplot.graphics)+5
show(p.output, evplot); println()
return lines
catch
printstyled(p.output, "EVIDENCE PLOT UNAVAILABLE. STANDBY\n", bold=true, color=:red); println()
return 2
end
end
function info_display(p)
try
infoplot=lineplot(p.e.Hi[2:end], title="Information History", xlabel="Iterate", color=:green, name="Ensemble H")
lines=nrows(infoplot.graphics)+5
show(p.output, infoplot); println()
return lines
catch
printstyled(p.output, "INFORMATION PLOT UNAVAILABLE. STANDBY\n", bold=true, color=:green); println()
return 2
end
end
function lh_display(p)
try
lhplot=lineplot(p.e.log_Li[2:end], title="Contour History", xlabel="Iterate", color=:magenta, name="Ensemble logLH")
lines=nrows(lhplot.graphics)+5
show(p.output, lhplot); println()
return lines
catch
printstyled(p.output, "CONTOUR HISTORY UNAVAILABLE. STANDBY\n", bold=true, color=:magenta); println()
return 2
end
end
function liwi_display(p)
try
liwiplot=lineplot([max(2,p.counter-(CONVERGENCE_MEMORY-1)):p.counter...],p.e.log_Liwi[max(2,end-(CONVERGENCE_MEMORY-1)):end], title="Recent iterate evidentiary weight", xlabel="Iterate", name="Ensemble log Liwi", color=:cyan)
lines=nrows(liwiplot.graphics)+5
show(p.output, liwiplot); println()
return lines
catch
printstyled(p.output, "EVIDENTIARY HISTORY UNAVAILABLE. STANDBY\n", bold=true, color=:cyan); println()
return 2
end
end
function ensemble_display(p)
return lines=show(p.output, p.e, progress=true)
end
function model_display(p)
try
println("Current MAP model:")
return lines=show(p.output, p.top_m, progress=true)
catch
printstyled(p.output, "MODEL DISPLAY UNAVAILABLE\n", bold=true, color=:blue); println()
return 3
end
end
function model_obs_display(p)
try
println("Current MAP model")
return lines=show(p.output, p.top_m, p.e, progress=true)
catch
printstyled(p.output, "MODEL DISPLAY UNAVAILABLE\n", bold=true, color=:blue); println()
return 3
end
end | [
8818,
24549,
62,
13812,
7,
79,
8,
198,
220,
220,
220,
3951,
28,
12860,
7,
79,
13,
22915,
11,
279,
13,
28286,
263,
11,
4371,
28,
7942,
8,
198,
220,
220,
220,
44872,
9783,
198,
220,
220,
220,
1441,
3951,
198,
437,
198,
198,
8818,
... | 2.252547 | 1,374 |
<gh_stars>10-100
struct ERBFilterbank{C,G,T<:Real,U<:Real,V<:Real} <: Filterbank
filters::Vector{SecondOrderSections{C,G}}
ERB::Vector{T}
center_frequencies::Vector{U}
fs::V
end
function make_erb_filterbank(fs, num_channels, low_freq, EarQ = 9.26449, minBW = 24.7, order = 1)
T = 1/fs
if length(num_channels) == 1
cf = erb_space(low_freq, fs/2, num_channels)
else
cf = num_channels
if size(cf,2) > size(cf,1)
cf = cf'
end
end
ERB = ((cf/EarQ).^order .+ minBW.^order).^(1/order)
B = 1.019*2*pi*ERB
B0 = T
B2 = 0.0
A0 = 1.0
A1 = -2*cos.(2*cf*pi*T)./exp.(B*T)
A2 = exp.(-2*B*T)
B11 = -(2*T*cos.(2*cf*pi*T)./exp.(B*T) .+ 2*sqrt(3+2^1.5)*T*sin.(2*cf*pi*T)./exp.(B*T))/2
B12 = -(2*T*cos.(2*cf*pi*T)./exp.(B*T) .- 2*sqrt(3+2^1.5)*T*sin.(2*cf*pi*T)./exp.(B*T))/2
B13 = -(2*T*cos.(2*cf*pi*T)./exp.(B*T) .+ 2*sqrt(3-2^1.5)*T*sin.(2*cf*pi*T)./exp.(B*T))/2
B14 = -(2*T*cos.(2*cf*pi*T)./exp.(B*T) .- 2*sqrt(3-2^1.5)*T*sin.(2*cf*pi*T)./exp.(B*T))/2
gain = abs.((-2*exp.(4*im*cf*pi*T)*T .+ 2*exp.(-(B*T) .+
2*im*cf*pi*T).*T.*(cos.(2*cf*pi*T) .- sqrt(3 - 2^(3/2))*
sin.(2*cf*pi*T))) .* (-2*exp.(4*im*cf*pi*T)*T .+ 2*exp.(-(B*T) .+
2*im*cf*pi*T).*T.* (cos.(2*cf*pi*T) .+ sqrt(3 - 2^(3/2)) *
sin.(2*cf*pi*T))).* (-2*exp.(4*im*cf*pi*T)*T .+ 2*exp.(-(B*T) .+
2*im*cf*pi*T).*T.* (cos.(2*cf*pi*T) - sqrt(3 +
2^(3/2))*sin.(2*cf*pi*T))) .* (-2*exp.(4*im*cf*pi*T)*T .+
2*exp.(-(B*T) + 2*im*cf*pi*T).*T.* (cos.(2*cf*pi*T) + sqrt(3 +
2^(3/2))*sin.(2*cf*pi*T))) ./ (-2 ./ exp.(2*B*T) -
2*exp.(4*im*cf*pi*T) + 2*(1 .+ exp.(4*im*cf*pi*T))./exp.(B*T)).^4)
C = typeof(B0)
filters = Array{SOSFilter{C,C}}(undef,num_channels)
for ch=1:num_channels
biquads = Array{BiquadFilter{C}}(undef,4)
biquads[1] = BiquadFilter(B0, B11[ch], B2, A0, A1[ch], A2[ch])
biquads[2] = BiquadFilter(B0, B12[ch], B2, A0, A1[ch], A2[ch])
biquads[3] = BiquadFilter(B0, B13[ch], B2, A0, A1[ch], A2[ch])
biquads[4] = BiquadFilter(B0, B14[ch], B2, A0, A1[ch], A2[ch])
filters[ch] = SOSFilter(biquads, 1/gain[ch])
end
ERBFilterbank(filters, ERB, cf, fs)
end
function erb_space(low_freq, high_freq, num_channels, EarQ = 9.26449, minBW = 24.7, order = 1)
# All of the following expressions are derived in Apple TR #35, "An
# Efficient Implementation of the Patterson-Holdsworth Cochlear
# Filter Bank." See pages 33-34.
space = (1:num_channels).*(-log(high_freq + EarQ*minBW) + log(low_freq + EarQ*minBW))/num_channels
cfArray = -(EarQ*minBW) .+ exp.(space) * (high_freq + EarQ*minBW)
end
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
7249,
13793,
33,
22417,
17796,
90,
34,
11,
38,
11,
51,
27,
25,
15633,
11,
52,
27,
25,
15633,
11,
53,
27,
25,
15633,
92,
1279,
25,
25853,
17796,
198,
220,
220,
220,
16628,
3712,
38469,
... | 1.740956 | 1,548 |
<reponame>KlausC/ResourceBundles.jl
using Logging
bundle = ResourceBundle(@__MODULE__, "messages2")
@test bundle.path == abspath("resources")
bundle2 = ResourceBundle(ResourceBundles, "bundle")
@test realpath(bundle2.path) == realpath(normpath(pwd(), "resources"))
bundle3 = ResourceBundle(ResourceBundles, "does1not2exist")
@test bundle3.path == "."
bundle4 = ResourceBundle(Test, "XXX")
@test bundle4.path == "."
@test_throws ArgumentError ResourceBundle(Main, "")
@test_throws ArgumentError ResourceBundle(Main, "d_n_e")
const results = Dict(
(LocaleId("C"), "T0") => "T0",
(LocaleId("C"), "T1") => "T1 - empty",
(LocaleId("C"), "T2") => "T2 - empty",
(LocaleId("C"), "T3") => "T3 - empty",
(LocaleId("C"), "T4") => "T4 - empty",
(LocaleId("C"), "T5") => "T5 - empty",
(LocaleId("C"), "T6") => "T6",
(LocaleId("C"), "T7") => "T7",
(LocaleId("en"), "T0") => "T0",
(LocaleId("en"), "T1") => "T1 - empty",
(LocaleId("en"), "T2") => "T2 - en",
(LocaleId("en"), "T3") => "T3 - en",
(LocaleId("en"), "T4") => "T4 - en",
(LocaleId("en"), "T5") => "T5 - en",
(LocaleId("en"), "T6") => "T6",
(LocaleId("en"), "T7") => "T7",
(LocaleId("en-US"), "T0") => "T0",
(LocaleId("en-US"), "T1") => "T1 - empty",
(LocaleId("en-US"), "T2") => "T2 - en",
(LocaleId("en-US"), "T3") => "T3 - en_US",
(LocaleId("en-US"), "T4") => "T4 - en",
(LocaleId("en-US"), "T5") => "T5 - en_US",
(LocaleId("en-US"), "T6") => "T6 - en_US",
(LocaleId("en-US"), "T7") => "T7 - en_US",
(LocaleId("en-Latn"), "T0") => "T0",
(LocaleId("en-Latn"), "T1") => "T1 - empty",
(LocaleId("en-Latn"), "T2") => "T2 - en",
(LocaleId("en-Latn"), "T3") => "T3 - en",
(LocaleId("en-Latn"), "T4") => "T4 - en_Latn",
(LocaleId("en-Latn"), "T5") => "T5 - en_Latn",
(LocaleId("en-Latn"), "T6") => "T6 - en_Latn",
(LocaleId("en-Latn"), "T7") => "T7",
(LocaleId("en-Latn-US"), "T0") => "T0",
(LocaleId("en-Latn-US"), "T1") => "T1 - empty",
(LocaleId("en-Latn-US"), "T2") => "T2 - en",
(LocaleId("en-Latn-US"), "T3") => "T3 - en_US",
(LocaleId("en-Latn-US"), "T4") => "T4 - en_Latn",
(LocaleId("en-Latn-US"), "T5") => "T5 - en_Latn_US",
(LocaleId("en-Latn-US"), "T6") => ("T6 - en_Latn", "Ambiguous"),
(LocaleId("en-Latn-US"), "T7") => "T7 - en_US",
(LocaleId("en-x-1"), "T0") => "T0",
(LocaleId("en-x-1"), "T1") => "T1 - empty",
(LocaleId("en-x-1"), "T2") => "T2 - en",
(LocaleId("en-x-1"), "T3") => "T3 - en",
(LocaleId("en-x-1"), "T4") => "T4 - en",
(LocaleId("en-x-1"), "T5") => "T5 - en",
(LocaleId("en-x-1"), "T6") => "T6",
(LocaleId("en-x-1"), "T7") => "T7",
)
locs = LocaleId.(("C", "en", "en-US", "en-Latn", "en-Latn-US", "en-x-1"))
keya = ((x->"T" * string(x)).(0:7))
log = Test.TestLogger(min_level=Logging.Warn)
locm = locale_id(LC.MESSAGES)
@testset "message lookup for locale $loc and key $key" for loc in locs, key in keya
res = results[loc, key]
if isa(res, Tuple)
r, w = res
else
r, w = res, ""
end
with_logger(log) do
@test get(bundle, loc, key, key) == r
@test test_log(log, w)
set_locale!(loc, LC.MESSAGES)
@test get(bundle, key, key) == r
test_log(log, w)
end
end
set_locale!(locm, LC.MESSAGES)
with_logger(log) do
@test keys(bundle, LocaleId("")) == ["T1", "T2", "T3", "T4", "T5"]
@test keys(bundle, LocaleId("de")) == ["T1", "T2", "T3", "T4", "T5"]
@test keys(bundle2) == []
@test test_log(log)
@test keys(bundle, LocaleId("de-us")) == ["T1", "T2", "T3", "T4", "T5"]
@test test_log(log, "Wrong type 'Dict{Int64,Int64}'")
@test keys(bundle, LocaleId("de-us-america")) == ["T1", "T2", "T3", "T4", "T5"]
@test test_log(log, "Wrong type 'String'")
@test keys(bundle, LocaleId("de-us-america-x-1")) == ["T1", "T2", "T3", "T4", "T5"]
@test test_log(log, "Wrong type 'Nothing'")
@test keys(bundle3, LocaleId("")) == String[]
@test keys(bundle) == ["T1", "T2", "T3", "T4", "T5", "T6", "T7", "hello"]
end
@test resource_bundle(@__MODULE__, "messages2") === RB_messages2
@test resource_bundle(@__MODULE__, "bundle") === RB_bundle
@test @resource_bundle("d1n2e").path == ""
bundlea = @resource_bundle("bundle")
@test bundlea === RB_bundle
# Test in submodule Main.XXX
module XXX
using ResourceBundles, Test
@test @resource_bundle("messages2") === RB_messages2
@test Main.XXX.RB_messages2 === Main.RB_messages2
@test @resource_bundle("bundle") === RB_bundle
@test Main.XXX.RB_bundle !== Main.RB_bundle
# Test in submodule Main.XXX.YYY
module YYY
using ResourceBundles, Test
@test @resource_bundle("bundle") === RB_bundle
@test Main.XXX.YYY.RB_bundle === Main.XXX.RB_bundle
end
end
@test resource_bundle(ResourceBundles, "messages2") === ResourceBundles.RB_messages2
@test resource_bundle(ResourceBundles, "bundle") === ResourceBundles.RB_bundle
bundlea = ResourceBundles.eval(:(@resource_bundle("bundle")))
@test bundlea === ResourceBundles.RB_bundle
# test in submodule ResourceBundles.XXX
ResourceBundles.eval(:(module XXX
using ResourceBundles, Test
@test string(@__MODULE__) == "ResourceBundles.XXX"
@test @resource_bundle("messages2") === RB_messages2
@test ResourceBundles.XXX.RB_messages2 === ResourceBundles.RB_messages2
@test @resource_bundle("bundle") === RB_bundle
@test ResourceBundles.XXX.RB_bundle !== ResourceBundles.RB_bundle
end )
)
lpa = ResourceBundles.locale_pattern
sep = Base.Filesystem.path_separator
@test lpa(".jl") == LocaleId("C")
@test lpa("-en.jl") == LocaleId("en")
@test lpa("_en.jl") == LocaleId("en")
@test lpa(sep*"en.jl") == LocaleId("en")
@test lpa("-en"*sep*".jl") == LocaleId("en")
@test lpa(sep*"en"*sep*".jl") == LocaleId("en")
@test lpa(sep*"en."*sep*"jl") == nothing
| [
27,
7856,
261,
480,
29,
42,
38024,
34,
14,
26198,
33,
917,
829,
13,
20362,
198,
3500,
5972,
2667,
198,
198,
65,
31249,
796,
20857,
33,
31249,
7,
31,
834,
33365,
24212,
834,
11,
366,
37348,
1095,
17,
4943,
198,
31,
9288,
18537,
13,... | 2.083304 | 2,845 |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.3'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Julia 1.0.3
# language: julia
# name: julia-1.0
# ---
module SEIRmodel
using OrdinaryDiffEq
#Susceptible-exposed-infected-recovered model function
function seir_ode(dY,Y,p,t)
#Infected per-Capita Rate
β = p[1]
#Incubation Rate
σ = p[2]
#Recover per-capita rate
γ = p[3]
#Death Rate
μ = p[4]
#Susceptible Individual
S = Y[1]
#Exposed Individual
E = Y[2]
#Infected Individual
I = Y[3]
#Recovered Individual
#R = Y[4]
dY[1] = μ-β*S*I-μ*S
dY[2] = β*S*I-(σ+μ)*E
dY[3] = σ*E - (γ+μ)*I
end
function main()
#Pram (Infected Rate, Incubation Rate, Recover Rate, Death Rate)
pram=[520/365,1/60,1/30,774835/(65640000*365)]
#Initialize Param(Susceptible Individuals, Exposed Individuals, Infected Individuals)
init=[0.8,0.1,0.1]
tspan=(0.0,365.0)
seir_prob = ODEProblem(seir_ode,init,tspan,pram)
sol=solve(seir_prob, alg=Tsit5());
return sol
end
# using Plots
# va = VectorOfArray(sol.u)
# y = convert(Array,va)
# R = ones(size(sol.t))' - sum(y,dims=1);
# plot(sol.t,[y',R'],xlabel="Time",ylabel="Proportion")
end
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
11420,
198,
2,
474,
929,
88,
353,
25,
198,
2,
220,
220,
474,
929,
88,
5239,
25,
198,
2,
220,
220,
220,
220,
2420,
62,
15603,
341,
25,
198,
2,
220,
220,
220,
... | 2.011834 | 676 |
<reponame>DarioSarra/FLPDevelopment.jl
# function NormalDifference(df, group, var; ind_summary = mean, ylabel = "Median ...", xyfont = font(18, "Bookman Light"))
# res_plt, res_group, res_individual = mean_sem_scatter(df, group, var; ind_summary = ind_summary)
# res_test = test_difference(res_individual, group, var; normality = true)
# pooledSD = sqrt(((res_group.Central[2])^2 + (res_group.Central[2])^2)/2)
# res_effect = (res_group.Central[2] - res_group.Central[2]) / pooledSD
# # if res_effect < 0.5
# # res_effect = ((res_test.nx * res_test.ny) - res_test.U) / (res_test.nx * res_test.ny)
# # end
# e = round(res_effect; digits =2)
# add_info!(res_plt, res_group, pvalue(res_test), e; normality = true)
# xprop = ("Group", xyfont)
# yprop = (ylabel, xyfont)
# plot!(xaxis = xprop, yaxis = yprop)
# return (plt = res_plt,
# test = res_test,
# effect = res_effect,
# groupdf = res_group,
# individual_df = res_individual)
# end
"""
`Difference(df, group, var; ind_summary = mean, ylabel = "Median ...", xyfont = font(18, "Bookman Light"), ylims = nothing)`
A - plot a scatter with median ± CI of the variable var per each value of group
B - perform MannWhitneyUTest and annotate the results
C - Calculate the effect size if p < 0.05
C - return a named tuple with the plot, MannWhitneyUTest, effect size
a Dataframe with the group median and CI of var,
and a Dataframe with the individual mouse mean value of var
"""
function Difference(df, group, var; ind_summary = mean, ylabel = "Median ...", xyfont = font(18, "Bookman Light"), ylims = nothing)
res_plt, res_group, res_individual = median_ci_scatter(df, group, var; ind_summary = ind_summary)
res_test = MWU_test(res_individual, group, var)
res_effect = res_test.U/(res_test.nx * res_test.ny)
if res_effect < 0.5
res_effect = ((res_test.nx * res_test.ny) - res_test.U) / (res_test.nx * res_test.ny)
end
e = round(res_effect; digits =2)
add_info!(res_plt, res_group, pvalue(res_test), e; ylims = ylims)
xprop = ("Group", xyfont)
yprop = (ylabel, xyfont)
plot!(xaxis = xprop, yaxis = yprop)
return (plt = res_plt,
test = res_test,
effect = res_effect,
groupdf = res_group,
individual_df = res_individual)
end
"""
`median_ci_scatter(df, grouping, var)`
A - calculate the mean of the variable var for each animal using individual summary
B - calculate the median and ci for each group using group_summary
C - Plots the value as a scatter plot ± CI
"""
function median_ci_scatter(df, grouping, var; ind_summary = mean)
df1 = individual_summary(df, grouping, var; summary = ind_summary)
df2 = group_summary(df1, grouping, var; normality = false)
if typeof(grouping) <: AbstractVector && sizeof(grouping) > 1
df2[!,:xaxis] = [join(x,"_") for x in eachrow(df2[:, grouping])]
xaxis = :xaxis
else
xaxis = grouping
end
try
cases = levels(df[:,grouping])
df2[!,:color] = [val == cases[1] ? OGCol1 : OGCol2 for val in df2[:,xaxis]]
catch
df2[!,:color] .= :gray75
end
plt = @df df2 scatter(1:nrow(df2),:Central,
yerror = :ERR,
xlims = (0.5, nrow(df2) + 0.5),
xticks = (1:nrow(df2),string.(cols(xaxis))),
color = :color,
legend = false)
return plt, df2, df1
end
function add_info!(plt, df, p, e; normality = false, ylims = nothing)
plt_lim = maximum(df.Central .+ last.(df.ERR))
plt_span = plt_lim/10
ref, span = plt_lim + plt_span, plt_span/2
add_bar!(plt, ref, span)
if p >= 0.05
pval = "N.S."
else
round(p, digits = 2) == 0 ? pval = round(p, digits = 3) : pval = round(p, digits = 2)
add_effect!(plt, ref+span/2, span/2, e)
end
if normality
pmessage = "T-test, p = $(pval)"
else
pmessage = "Mann-Whitney U test, p = $(pval)"
end
add_pvalue!(plt, ref+span/2, span/2, pmessage)
isnothing(ylims) && (ylims = (0,plt_lim + 2plt_span))
yaxis!(ylims = ylims)
return plt
end
function add_bar!(plt, ref, span)
plot!(plt,[1,1],[ref,ref+span], linecolor = :black)
plot!(plt,[2,2],[ref,ref+span], linecolor = :black)
plot!(plt,[1,2],[ref+span/2,ref+span/2], linecolor = :black)
end
function add_pvalue!(plt, ref, span, p)
if isa(p, String)
message = p
else
message = p < 0.01 ? "p < 0.01" : "p < 0.05"
end
annotate!(plt,[(1.5,ref + span,
Plots.text(message,
8, :center))])
return plt
end
function add_effect!(plt, ref, span, e)
message = "Effect size = $e"
annotate!(plt,[(1.5,ref - span,
Plots.text(message,
8, :center))])
return plt
end
"""
`function_analysis(df,variable, f; grouping = nothing, step =0.05, calc = :basic,
color = [:auto], linestyle = [:auto])`
Apply the function f over the vaariable var per each value of grouping and
plots the result over the variable var
"""
function function_analysis(df,var, f; grouping = nothing, step =0.05, calc = :basic,
color = [:auto], linestyle = [:auto])
subgroups = isnothing(grouping) ? [:MouseID] : vcat(:MouseID,grouping)
xaxis = range(extrema(df[:, var])..., step = step)
dd1 = combine(groupby(df,subgroups), var => (t-> f(t,xaxis = xaxis)) => AsTable)
rename!(dd1, Dict(:Xaxis => var))
sort!(dd1,[:MouseID,var])
if calc == :bootstrapping
dd2 = combine(groupby(dd1,grouping)) do dd3
group_summary(dd3,var,:fy; normality = false)
end
dd2[!,:low] = [x[1] for x in dd2.ERR]
dd2[!,:up] = [x[2] for x in dd2.ERR]
elseif calc == :quantiles
dd2 = combine(groupby(dd1,[grouping,var]), :fy =>(t-> (Central = mean(t),
low= abs(mean(t) - quantile(t,0.25),
up = abs(quantile(t,0.975)-mean(t))),
# ERR = (abs(mean(t) - quantile(t,0.25)) + abs(quantile(t,0.975)-mean(t)))/2,
SEM = sem(t))) => AsTable)
elseif calc == :basic
dd2 = combine(groupby(dd1,[grouping,var]), :fy =>(t-> (Central = mean(t),up = sem(t), low = sem(t))) => AsTable)
end
sort!(dd2,var)
plt = @df dd2 plot(cols(var),:Central, ribbon = (:low, :up), group = cols(grouping), linecolor = :auto, color = color, linestyle = linestyle)
return plt, dd2
end
function mediansurvival_analysis(streakdf,variable, grouping; plt = plot())
dd1 = combine(groupby(streakdf,[:MouseID,grouping]), variable => median => variable)
# dd2 = combine(groupby(dd1,grouping), variable => (t-> (Mean = mean(t),Sem = sem(t))) => AsTable)
dd2 = group_summary(dd1,grouping,variable; normality = false)
# dd2[!,:low] = [x[1] for x in dd2.ERR]
# dd2[!,:up] = [x[2] for x in dd2.ERR]
isa(dd2[:, grouping], CategoricalArray) && (dd2[!,grouping] = string.(dd2[!,grouping]))
println(isa(dd2[:, grouping], CategoricalArray))
@df dd2 scatter!(plt,cols(grouping), :Central, yerror = :ERR,
# xlims = (-0.25,2.25), xlabel = "Group",
ylabel = "Median survival time", label = "")
return plt
end
| [
27,
7856,
261,
480,
29,
35,
4982,
50,
283,
430,
14,
3697,
47,
41206,
13,
20362,
198,
2,
2163,
14435,
28813,
1945,
7,
7568,
11,
1448,
11,
1401,
26,
773,
62,
49736,
796,
1612,
11,
331,
18242,
796,
366,
9921,
666,
35713,
11,
2124,
... | 2.246867 | 3,192 |
# ---
# title: 20. Valid Parentheses
# id: problem20
# author: <NAME>
# date: 2020-10-31
# difficulty: Easy
# categories: String, Stack
# link: <https://leetcode.com/problems/valid-parentheses/description/>
# hidden: true
# ---
#
# Given a string `s` containing just the characters `'('`, `')'`, `'{'`, `'}'`,
# `'['` and `']'`, determine if the input string is valid.
#
# An input string is valid if:
#
# 1. Open brackets must be closed by the same type of brackets.
# 2. Open brackets must be closed in the correct order.
#
#
#
# **Example 1:**
#
#
#
# Input: s = "()"
# Output: true
#
#
# **Example 2:**
#
#
#
# Input: s = "()[]{}"
# Output: true
#
#
# **Example 3:**
#
#
#
# Input: s = "(]"
# Output: false
#
#
# **Example 4:**
#
#
#
# Input: s = "([)]"
# Output: false
#
#
# **Example 5:**
#
#
#
# Input: s = "{[]}"
# Output: true
#
#
#
#
# **Constraints:**
#
# * `1 <= s.length <= 104`
# * `s` consists of parentheses only `'()[]{}'`.
#
#
## @lc code=start
using LeetCode
## add your code here:
## @lc code=end
| [
2,
11420,
198,
2,
3670,
25,
1160,
13,
48951,
16774,
39815,
198,
2,
4686,
25,
1917,
1238,
198,
2,
1772,
25,
1279,
20608,
29,
198,
2,
3128,
25,
12131,
12,
940,
12,
3132,
198,
2,
8722,
25,
16789,
198,
2,
9376,
25,
10903,
11,
23881,... | 2.101266 | 553 |
function assemble_structures(layup,n_pt,twist,sloc,xaf,yaf,nchord,lam_t,designparams,x_offset,orientation,xloc,zloc,geom,webloc)
# assemble structural properties
mat = Array{Array{Composites.material,1}}(n_pt)
lam = Array{Composites.laminate,1}(n_pt)
precompinput = Array{PreComp.input,1}(n_pt)
precompoutput = Array{PreComp.output,1}(n_pt)
twist_d = twist*180.0/pi
twistrate_d = PreComp.tw_rate(n_pt,sloc[1:n_pt],twist_d)
# strainx = zeros(xafstrain)
# strainy = zeros(yafstrain)
# rot = zeros(2,2)
for i = 1:n_pt
# find leading edge
lei = indmin(abs.(xaf[i,:]))
# shift so leading edge is first
xpc = circshift(xaf[i,:],-(lei-1))
ypc = circshift(yaf[i,:],-(lei-1))
# assemble input
precompinput[i],mat[i],lam[i] = layup(nchord[i],twist_d[i],
twistrate_d[i],xpc,ypc,lam_t[i,:],designparams,x_offset[i],orientation)
# calculate composite properties: stiffness, mass, etc
precompoutput[i] = PreComp.properties(precompinput[i])
# rot[1,:] = [cos(-twist[i]),-sin(-twist[i])]
# rot[2,:] = [sin(-twist[i]),cos(-twist[i])]
#
# xy = [xafstrain[i,:]*nchord[i],yafstrain[i,:]*nchord[i]]
# xyrot = rot*xy
# # determine locations at which to calculate strain
# strainx[i,:] = xyrot[1] +xloc[i]
# strainy[i,:] = xyrot[2] +zloc[i]
end
# Get ABD Matrices
A = Array{Array{Float64,2},1}(n_pt)
B = Array{Array{Float64,2},1}(n_pt)
D = Array{Array{Float64,2},1}(n_pt)
for i = 1:n_pt
Q = Composites.getQ(mat[i])
A[i],B[i],D[i] = Composites.getABD(lam[i],Q)
end
# Get buckling strains
bucklingstrain = zeros(Float64,n_pt,2) #currently hardcoded for a single web
_,bucklingstrain[:,1] = Composites.localbuckling(A,D,webloc*geom.normalchord)
_,bucklingstrain[:,2] = Composites.localbuckling(A,D,(1-webloc)*geom.normalchord)
# Assemble structural properties
return compstructure(mat,lam,A,B,D,bucklingstrain,precompinput,precompoutput)
end
function p_layup(normalchord,twist_d,twistrate_d,xaf,yaf,
p_lam_t,designparams,p_x_offset,p_orientation)
t1 = p_lam_t[1]#uni
t2 = p_lam_t[2]#weave
mat = []
# Materials Input
e1 = zeros(length(designparams.p_usedmaterials))
e2 = zeros(length(designparams.p_usedmaterials))
g12 = zeros(length(designparams.p_usedmaterials))
anu12 = zeros(length(designparams.p_usedmaterials))
density = zeros(length(designparams.p_usedmaterials))
for i = 1:length(designparams.p_usedmaterials)
matnames = designparams.plyprops.names
idx = find(matnames -> matnames == designparams.p_usedmaterials[i],matnames)
material = designparams.plyprops.plies[idx[1]]
push!(mat,material)
e1[i] = material.e1
e2[i] = material.e2
g12[i] = material.g12
anu12[i] = material.nu12
density[i] = material.rho
end
web1 = designparams.p_webloc[1]
xsec_nodeU=[0.0;web1;1.0]
n_laminaU=[2,2]
n_pliesU=[1,1,
1,1]
t_lamU=[t1,t2,
t1,t2]
tht_lamU=[p_orientation[1],p_orientation[2],
p_orientation[1],p_orientation[2]]
mat_lamU=[1,2,
1,2]
# Lower surface
xsec_nodeL = xsec_nodeU
n_laminaL = n_laminaU
n_pliesL = n_pliesU
t_lamL = t_lamU
tht_lamL = tht_lamU
mat_lamL = mat_lamU
# Web
loc_web = [web1]
n_laminaW = [2]
n_pliesW = [1,1]
t_lamW = [0.0,0.0]
tht_lamW = [p_orientation[1],p_orientation[2]]
mat_lamW = [1,2]
leloc = p_x_offset
# assemble output
lam = Composites.laminate{Int64,Float64}(mat_lamL[1:div(end,2)],n_pliesL[1:div(end,2)],t_lamL[1:div(end,2)],tht_lamL[1:div(end,2)])
precompinput = PreComp.input(
normalchord,
-twist_d,-twistrate_d,
leloc,xaf,yaf,
e1,e2,g12,anu12,density,
xsec_nodeU,n_laminaU,n_pliesU,t_lamU,tht_lamU,mat_lamU,
xsec_nodeL,n_laminaL,n_pliesL,t_lamL,tht_lamL,mat_lamL,
loc_web,n_laminaW,n_pliesW,t_lamW,tht_lamW,mat_lamW)
return precompinput,mat,lam
end
function w_layup(normalchord,twist_d,twistrate_d,xaf,yaf,
w_lam_t,designparams,w_x_offset,w_orientation)
t1 = w_lam_t[1]#uni
t2 = w_lam_t[2]#weave
t3 = w_lam_t[3]#wing foam
t4 = w_lam_t[4]#web fabric
t5 = w_lam_t[5]#web foam
mat = []
# Materials Input
e1 = zeros(length(designparams.w_usedmaterials))
e2 = zeros(length(designparams.w_usedmaterials))
g12 = zeros(length(designparams.w_usedmaterials))
anu12 = zeros(length(designparams.w_usedmaterials))
density = zeros(length(designparams.w_usedmaterials))
for i = 1:length(designparams.w_usedmaterials)
matnames = designparams.plyprops.names
idx = find(matnames -> matnames == designparams.w_usedmaterials[i],matnames)
material = designparams.plyprops.plies[idx[1]]
push!(mat,material)
e1[i] = material.e1
e2[i] = material.e2
g12[i] = material.g12
anu12[i] = material.nu12
density[i] = material.rho
end
web1 = designparams.w_webloc[1]
xsec_nodeU=[0.0;web1;1.0]
n_laminaU=[5,5]
n_pliesU=[1,1,1,1,1,
1,1,1,1,1]
t_lamU=[t1,t2,t3,t2,t1,
t1,t2,t3,t2,t1]
tht_lamU=[w_orientation[1],w_orientation[2],0.0,w_orientation[2],w_orientation[1],
w_orientation[1],w_orientation[2],0.0,w_orientation[2],w_orientation[1]]
mat_lamU=[1,2,3,2,1,
1,2,3,2,1]
# Lower surface
xsec_nodeL = xsec_nodeU
n_laminaL = n_laminaU
n_pliesL = n_pliesU
t_lamL = t_lamU
tht_lamL = tht_lamU
mat_lamL = mat_lamU
# Web
loc_web = [web1]
n_laminaW = [3]
n_pliesW = [1,1,1]
t_lamW = [t4,t5,t4]
tht_lamW = [w_orientation[3],0.0,w_orientation[3]]
mat_lamW = [2,3,2]
leloc = w_x_offset
# assemble output
lam = Composites.laminate{Int64,Float64}(mat_lamL[1:div(end,2)],n_pliesL[1:div(end,2)],t_lamL[1:div(end,2)],tht_lamL[1:div(end,2)])
precompinput = PreComp.input(
normalchord,
-twist_d,-twistrate_d,
leloc,xaf,yaf,
e1,e2,g12,anu12,density,
xsec_nodeU,n_laminaU,n_pliesU,t_lamU,tht_lamU,mat_lamU,
xsec_nodeL,n_laminaL,n_pliesL,t_lamL,tht_lamL,mat_lamL,
loc_web,n_laminaW,n_pliesW,t_lamW,tht_lamW,mat_lamW)
return precompinput,mat,lam
end
function beam_strain_wrapper(Fp,n_sec,ctlparams,printiter,plots,xaf,yaf,xafstrain,yafstrain,twist,x_offset,aerocenter,nchord,structure,spanyloc,xloc,yloc,zloc)
momarmx_scac = zeros(n_sec) #shear center (sc) to aero center (ac) moment arm
momarmy_scac = zeros(n_sec)
momarmx_cmtc = zeros(n_sec)
momarmy_cmtc = zeros(n_sec)
momarmx_sccm = zeros(n_sec)
momarmy_sccm = zeros(n_sec)
if (printiter%(ctlparams.printfreq)==0.0 && plots)
figure("af")
clf()
end
strainx = zeros(xafstrain) # for strain locations about the shear center
strainy = zeros(yafstrain)
strainx_vis = zeros(xafstrain)
strainy_vis = zeros(yafstrain)
rot = zeros(2,2)
for i = 1:n_sec
# Calculate aerodynamic center from precalculated x-aero center and chord line
idxle = indmin(abs.(xaf[i,:]))
idxte = indmax(abs.(xaf[i,:]))
yle = yaf[i,idxle]
yte = yaf[i,idxte]
acx = aerocenter[i]
acy = (yte-yle)/(1)*acx + yle #y/mx+b
y_ac = (acx+x_offset[i])*nchord[i] #x and y are flipped in precomp
x_ac = acy*nchord[i]
x_sc = x_ac + structure.precompoutput[i].x_sc
y_sc = y_ac + structure.precompoutput[i].y_sc
x_tc = x_ac + structure.precompoutput[i].x_tc
y_tc = y_ac + structure.precompoutput[i].y_tc
x_cm = x_ac + structure.precompoutput[i].x_cm
y_cm = y_ac + structure.precompoutput[i].y_cm
#moment arms defined positive as in Fig. 13 in precomp manual with tc above cm, x and y are flipped as well in precomp
momarmx_scac[i] = x_ac-y_sc
momarmy_scac[i] = y_ac-x_sc
momarmx_cmtc[i] = y_tc-y_cm
momarmy_cmtc[i] = x_tc-x_cm
momarmx_sccm[i] = y_cm-y_sc
momarmy_sccm[i] = x_cm-x_sc
#-------- Calculate Rotated Strain Locations about the Shear Center --------#
rot[1,:] = [cos(-twist[i]),-sin(-twist[i])]
rot[2,:] = [sin(-twist[i]),cos(-twist[i])]
xy = [xafstrain[i,:]*nchord[i]+x_offset[i]*nchord[i]-y_sc,yafstrain[i,:]*nchord[i]-x_sc] #Offest by the shear center, precomp FOR is swapped
xyrot = rot*xy
# determine locations at which to calculate strain
strainx[i,:] = xyrot[1]
strainy[i,:] = xyrot[2]
# add offset for visualization
xy = [xafstrain[i,:]*nchord[i]-0.125*nchord[i],yafstrain[i,:]*nchord[i]] #Offest by the shear center, precomp FOR is swapped
xyrot = rot*xy
strainx_vis[i,:] = xyrot[1] + xloc[i]
strainy_vis[i,:] = xyrot[2] + zloc[i]
# Plots
if (i%5==0 || i==n_sec) && (printiter%(ctlparams.printfreq)==0.0 && plots)
figure("af$i")
# clf()
plot((xaf[i,:]+x_offset[i])*nchord[i],yaf[i,:]*nchord[i])
if i==n_sec
plot(y_ac,x_ac,"x",label = "Aero Center")
plot(y_sc,x_sc,"D",label = "Shear Center")
plot(y_tc,x_tc,".",label = "Mass Center")
plot(y_cm,x_cm,"+",label = "Tension Center")
else
plot(y_ac,x_ac,"x")
plot(y_sc,x_sc,"D")
plot(y_tc,x_tc,".")
plot(y_cm,x_cm,"+")
end
axis("equal")
legend(loc="center left", bbox_to_anchor=(1, 0.5))
pause(0.001)
figure("strain_af$i")
# clf()
plot(strainx[i,:],strainy[i,:])
plot(xafstrain[i,:]*nchord[i]+x_offset[i]*nchord[i],yafstrain[i,:]*nchord[i])
if i==n_sec
plot(y_ac-y_sc,x_ac-x_sc,"x",label = "Aero Center")
plot(y_sc-y_sc,x_sc-x_sc,"D",label = "Shear Center")
plot(y_tc-y_sc,x_tc-x_sc,".",label = "Mass Center")
plot(y_cm-y_sc,x_cm-x_sc,"+",label = "Tension Center")
plot(y_ac,x_ac,"x",label = "Aero Center")
plot(y_sc,x_sc,"D",label = "Shear Center")
plot(y_tc,x_tc,".",label = "Mass Center")
plot(y_cm,x_cm,"+",label = "Tension Center")
else
plot(y_ac-y_sc,x_ac-x_sc,"x")
plot(y_sc-y_sc,x_sc-x_sc,"D")
plot(y_tc-y_sc,x_tc-x_sc,".")
plot(y_cm-y_sc,x_cm-x_sc,"+")
plot(y_ac,x_ac,"x")
plot(y_sc,x_sc,"D")
plot(y_tc,x_tc,".")
plot(y_cm,x_cm,"+")
end
axis("equal")
legend(loc="center left", bbox_to_anchor=(1, 0.5))
pause(0.001)
end
end
#-------- TRANSLATE CCBLADE LOADS TO PRECOMP/BEAM LOADS -------#
nodes = length(spanyloc)
elements = nodes - 1
#Beam goes from root to tip
Py = Fp[3,:] #Lift is z in aero, y in beam
Pz = Fp[1,:] #Drag is x in aero, z in beam
Px = Fp[2,:] #compression is y in aero, x in beam
# --- extract the point forces from distributed in order to apply moments from ac to sc etc -----
DOF = 6
F = zeros(DOF*nodes)
for i = 1:elements
start = (i-1)*DOF # (0, 0) start of matrix
_, _, Fsub = BeamFEA.beam_matrix(spanyloc[i+1] - spanyloc[i], [0.0,0.0], [0.0,0.0], [0.0,0.0],
[0.0,0.0], [0.0,0.0], [0.0,0.0], Px[i:i+1], Py[i:i+1], Pz[i:i+1])
idx = start+1:start+2*DOF
F[idx] += Fsub
end
# These are in the beam frame of reference
EIy = zeros(nodes)
EIz = zeros(nodes)
EA = zeros(nodes)
GJ = zeros(nodes)
rhoA = zeros(nodes)
rhoJ = zeros(nodes)
# Px = zeros(nodes) # Already Calculated
# Py = zeros(nodes)
# Pz = zeros(nodes)
Fx = zeros(nodes)
Fy = zeros(nodes)
Fz = zeros(nodes)
Mx = zeros(nodes)
My = zeros(nodes)
Mz = zeros(nodes)
kx = zeros(nodes)
ky = zeros(nodes)
kz = zeros(nodes)
kthetax = zeros(nodes)
kthetay = zeros(nodes)
kthetaz = zeros(nodes)
idxf = 1
for i = 1:nodes
# these are all distributed properties
mass = structure.precompoutput[i].mass
iyy = structure.precompoutput[i].flap_iner
ixx = structure.precompoutput[i].lag_iner
d_sccm2 =momarmx_sccm[i]^2 + momarmy_sccm[i]^2
izz = ixx+iyy + mass*d_sccm2 #perpendicular axis theorem and parallel axis theorm
EIy[i] = structure.precompoutput[i].ei_flap
EIz[i] = structure.precompoutput[i].ei_lag
EA[i] = structure.precompoutput[i].ea
GJ[i] = structure.precompoutput[i].gj
rhoA[i] = mass
rhoJ[i] = izz
# Px[i] = 0.0 # Already Calculated
# Py[i] = 0.0
# Pz[i] = 0.0
Fx[i] = 0.0 #TODO:Extra Point Loads centripetal force here
Fy[i] = 0.0
Fz[i] = 0.0
Mx[i] = F[idxf+3]*momarmy_scac[i] + F[idxf+5]*momarmx_scac[i]
My[i] = 0.0 #TODO:centripetal here, but tension center not aligned with shear center so...?
Mz[i] = 0.0 #TODO:centripetal here, but tension center not aligned with shear center so...?
idxf += DOF
end
#fixed at hub (future work may allow axial twisting via a passive pitching device)
kx[1] = Inf
ky[1] = Inf
kz[1] = Inf
kthetax[1] = Inf
kthetay[1] = Inf
kthetaz[1] = Inf
#-------- FEA ANALYSIS -------#
delta, freq, V, K, M, F = BeamFEA.fea_analysis(spanyloc, EIy, EIz, EA, GJ, rhoA, rhoJ, Px, Py, Pz,
Fx, Fy, Fz, Mx, My, Mz, kx, ky, kz, kthetax, kthetay, kthetaz)
# println(minimum(delta))
#TODO: use non-rigid strain calculation, i.e. composite curvature
#TODO: include shear strain
# get strain for each x strip along the blade
strains = zeros(length(spanyloc),length(strainy[1,:]))
for i = 1:length(strainy[1,:])
strains[:,i], Nx, Vy, Vz, Tx, Myout, Mzout = BeamFEA.strain(spanyloc,
strainy[:,i], strainx[:,i], EIy, EIz, EA,
Px, Py, Pz, Fx, Fy, Fz, Mx, My, Mz, false)
end
mass_structure = trapz(spanyloc,rhoA)
return strains, delta, freq, mass_structure,strainx_vis,strainy_vis
end
function bucklingcon(structure,strain,webloc,normalchord)
# Used for constraining buckling
sf = 1.5 #safety factor
# buckling strain is negative, strain is always positive
# NOT SMOOTH - will cause problems if webloc is design variable
c_buckling = zeros(size(strain,1),size(strain,2))
for i = 1:size(c_buckling,1)
for iloc = 1:size(c_buckling,2)
# if structure.strainlocx[i,iloc] < webloc*normalchord[i] #|| webloc == 0.0
c_buckling[i,iloc] = (structure.bucklingstrain[i,1]+sf*strain[i,iloc])
# else
# c_buckling[i,iloc] = (structure.bucklingstrain[i,2]+sf*strain[i,iloc])
# end
end
end
# # SMOOTH, but unnecessarily conservative
# c_buckling = zeros(size(strain,1),size(strain,2),size(def.bucklingstrain,2)*2)
# for i = 1:size(c_buckling,1)
# for iloc = 1:size(c_buckling,2)
# for ibuckle = 1:size(def.bucklingstrain,2)
# c_buckling[i,iloc,2*ibuckle-1] = (-def.bucklingstrain[i,ibuckle]-sf*strain[i,iloc])/def.bucklingstrain[i,ibuckle]
# c_buckling[i,iloc,2*ibuckle] = (-def.bucklingstrain[i,ibuckle]+sf*strain[i,iloc])/def.bucklingstrain[i,ibuckle]
# end
# end
# end
return c_buckling[:]/1E2
end
function stresscalc(strain::Array{Float64,2},shear::Array{Float64,1},
mat::Composites.material,theta_d::Float64)
# Get rotated material stiffness matrix
qbar = Composites.getQ(mat,theta_d)
# Determine Stresses from Strains in individual plys
nnodes = size(strain,1)
nloc = size(strain,2)
plystress = zeros(nnodes,nloc,3)
for i = 1:nnodes #Run entire length of wing #
for iloc = 1:nloc # Run at every specified strain location
# Convert shear stress to shear strain
gam12 = shear[i]/qbar[3,3]-qbar[1,3]/qbar[3,3]*strain[i,iloc]
# Calculate laminate stresses
stress = qbar*[strain[i,iloc],0,gam12]
# Transform stresses to ply axes
plystress[i,iloc,:] = Composites.rotstress(stress,theta_d)
end
end
return plystress
end #stress_calc
function stresscon(stress,mat::Composites.material,criteria = "tsaiwu";BVID=0.65,sf=1.5)
# Used to constrain stress
sigma1 = sf*stress[:,:,1]
sigma2 = sf*stress[:,:,2]
tau12 = sf*stress[:,:,3]
sigma1tu = BVID*mat.xt
sigma2tu = BVID*mat.yt
sigma1cu = BVID*mat.xc
sigma2cu = BVID*mat.yc
tau12u = BVID*mat.s
if criteria=="maxstress"
c_stress = zeros(Float64,6,size(sigma1)...)
for i = 1:size(sigma1,1)
for j = 1:size(sigma1,2)
c_stress[:,i,j],_ = Composites.maxstress(sigma1[i,j],sigma2[i,j],tau12[i,j],
sigma1tu,sigma1cu,sigma2tu,sigma2cu,tau12u)
end
end
elseif criteria=="tsaiwu"
c_stress = zeros(Float64,size(sigma1)...)
for i = 1:size(sigma1,1)
for j = 1:size(sigma1,2)
c_stress[i,j],_ = Composites.tsaiwu(sigma1[i,j],sigma2[i,j],tau12[i,j],
sigma1tu,sigma1cu,sigma2tu,sigma2cu,tau12u)
# maximum(c_stress)
end
end
elseif criteria=="hashinrotem"
c_stress = zeros(Float64,4,size(sigma1)...)
for i = 1:size(sigma1,1)
for j = 1:size(sigma1,2)
c_stress[:,i,j],_ = Composites.hashinrotem(sigma1[i,j],sigma2[i,j],tau12[i,j],
sigma1tu,sigma1cu,sigma2tu,sigma2cu,tau12u)
end
end
else
error("Specified failure criteria has no implementation")
end
return c_stress[:]-1.0
end #stresscon
function stress_wrapper(usedmaterials,plyprops,spanlocy,orientation,strain,shear)
c_stress = []
stress = [] #used in vtk output
stresslayer = zeros(length(usedmaterials),length(spanlocy),length(strain[1,:]))
j=1
for i = 1:length(usedmaterials)
if !contains(usedmaterials[i],"foam") #don't check stress on foam
matnames = plyprops.names
idx = find(matnames -> matnames == usedmaterials[i],matnames)
material = plyprops.plies[idx[1]]
orien = orientation[j]
j+=1 #so we don't have to have extra design variables in orientation
stressi = stresscalc(strain,shear,material,orien)
if i==1 #only calc once since it's the same for the whole structure (assuming thin layups)
stress = sqrt.(stressi[:,:,1].^2+stressi[:,:,2].^2+stressi[:,:,3].^2) #TODO break up or calculate von-mises etc
end
# determine contraint values
c_stressi = stresscon(stressi,material)
stresslayer[i,:,:] = c_stressi
c_stress = [c_stress;c_stressi]
end
end
return c_stress, stress, stresslayer
end #stress_wrapper
function spancoord(spanyloc,Rtip,dihedral,sweep,twist,x_offset,chord)
# Define number of sections
nsections = length(spanyloc)
# Main sections
xloc = zeros(nsections)
yloc = zeros(nsections)
zloc = zeros(nsections)
xloc[2:end] = cumsum((spanyloc[2:end]-spanyloc[1:(end-1)]).*cos.(dihedral[1:nsections-1]).*sin.(sweep[1:nsections-1]))
yloc[2:end] = cumsum((spanyloc[2:end]-spanyloc[1:(end-1)]).*cos.(dihedral[1:nsections-1]).*cos.(sweep[1:nsections-1]))
zloc[2:end] = cumsum((spanyloc[2:end]-spanyloc[1:(end-1)]).*sin.(dihedral[1:nsections-1]).*cos.(sweep[1:nsections-1]))
# Find y length of by stepping back spanylochrough let
yend = Rtip
# Scale appropriately to match span
scaling = yend/yloc[nsections]
xloc = xloc*scaling - cos.(twist).*x_offset.*chord
yloc = yloc*scaling
zloc = zloc*scaling + sin.(twist).*x_offset.*chord
# Get structural spanwise parameter
sloc = zeros(nsections)
for i = 2:nsections
sloc[i] = sloc[i-1]+sqrt((xloc[i]-xloc[i-1])^2.0+
(yloc[i]-yloc[i-1])^2.0+(zloc[i]-zloc[i-1])^2.0)
end
# Assemble full parameters
sloc = sloc
xloc = xloc
yloc = yloc
zloc = zloc
return sloc,xloc,yloc,zloc
end
function chordlengths(chord,sweep)
# Determine number of sections
nsections = length(sweep) + 1
# Normal Chord lengths
normalchord = zeros(nsections)
normalchord[1] = chord[1]*cos(sweep[1])
for i = 2:(nsections-1)
normalchord[i] = chord[i]*cos((sweep[i-1]+sweep[i])/2.0)
end
normalchord[end] = chord[end]*cos(sweep[end])
return normalchord
end
function printmaxviol(c,name)
if !isempty(find(c.>-1e-4))
println(string(name),": ",maximum(c))
end
end
#
# (private)
# trapezoidal integration
#
function trapz(x::Array{Float64,1}, y::Array{Float64,1}) # integrate y w.r.t. x
integral = 0.0
for i = 1:length(x)-1
integral += (x[i+1]-x[i])*0.5*(y[i] + y[i+1])
end
return integral
end
function linear_interp(x_array::Array{Float64,1},y_array::Array{Float64,1},xnew::Float64)
i = 1
if xnew<minimum(x_array) # cap at max and min
return y_array[indmin(x_array)]
elseif xnew>maximum(x_array)
return y_array[indmax(x_array)]
else
for i = 1:length(x_array)-1
if x_array[i]<=xnew && x_array[i+1]>=xnew
break
end
end
fraction = (xnew - x_array[i])/(x_array[i+1] - x_array[i])
return y_array[i] + fraction * (y_array[i+1] - y_array[i])
end
end
function linear_interp(x_array::Array{Float64,1},y_array::Array{Float64,1},xnew::Array{Float64,1})
ynew = zeros(xnew)
for i = 1:length(xnew)
ynew[i] = linear_interp(x_array,y_array,xnew[i])
end
return ynew
end
| [
8818,
25432,
62,
7249,
942,
7,
10724,
929,
11,
77,
62,
457,
11,
4246,
396,
11,
6649,
420,
11,
87,
1878,
11,
88,
1878,
11,
77,
354,
585,
11,
2543,
62,
83,
11,
26124,
37266,
11,
87,
62,
28968,
11,
13989,
341,
11,
87,
17946,
11,
... | 1.93438 | 11,338 |
using Makie
Base.@ccallable function julia_main(ARGS::Vector{String})::Cint
scene = Scene()
scatter(scene, rand(50), rand(50), markersize = 0.01)
a = axis(scene, range(0, stop = 1, length = 4), range(0, stop = 1, length = 4), textsize = 0.1, axisnames = ("", "", ""))
tf = to_value(a, :tickfont2d)
a[:tickfont2d] = map(x-> (0.07, x[2:end]...), tf)
center!(scene)
Makie.block(scene)
return 0
end
| [
3500,
15841,
494,
198,
198,
14881,
13,
31,
535,
439,
540,
2163,
474,
43640,
62,
12417,
7,
1503,
14313,
3712,
38469,
90,
10100,
92,
2599,
25,
34,
600,
198,
220,
220,
220,
3715,
796,
28315,
3419,
198,
220,
220,
220,
41058,
7,
29734,
... | 2.276596 | 188 |
# for GMM
function SVmoments(m, n, θ, η, ϵ)
S = size(ϵ, 2)
ms = zeros(S,size(m,1))
Threads.@threads for s=1:S
ms[s,:] = sqrt(n)*aux_stat(SVmodel(θ, n, η[:,s], ϵ[:,s])[1])
end
ms .- m'
end
| [
2,
329,
6951,
44,
220,
198,
8818,
20546,
32542,
658,
7,
76,
11,
299,
11,
7377,
116,
11,
7377,
115,
11,
18074,
113,
8,
198,
220,
220,
220,
311,
796,
2546,
7,
139,
113,
11,
362,
8,
198,
220,
220,
220,
13845,
796,
1976,
27498,
7,... | 1.619403 | 134 |
# Init to get started
| [
2,
44707,
284,
651,
2067,
198
] | 3.666667 | 6 |
<filename>dirichlet_process_mixture_model/crp.jl<gh_stars>1-10
import DataStructures.Stack
type CRPState
# map from data index to cluster index
assignments::Dict{Int, Int}
# map from cluster index size of cluser
counts::Dict{Int, Int}
# reuse ids for the new clusters by pushing them onto a stack
# this is necessary because we may do billions of Gibbs sweeps
free::Stack{Int}
# the next cluster id to allocate, after free stack is empty
next_cluster::Int
# create an empty CRP state
function CRPState()
free = Stack(Int)
push!(free, 1)
new(Dict{Int, Int}(), Dict{Int, Int}(), free, 2)
end
end
has_assignment_for(crp::CRPState, i::Int) = haskey(crp.assignments, i)
next_new_cluster(crp::CRPState) = DataStructures.top(crp.free)
assignment(crp::CRPState, i::Int) = crp.assignments[i]
num_assignments(crp::CRPState) = length(crp.assignments)
has_cluster(crp::CRPState, cluster::Int) = haskey(crp.counts, cluster)
counts(crp::CRPState, cluster::Int) = crp.counts[cluster]
clusters(crp::CRPState) = keys(crp.counts)
function log_probability(crp::CRPState, alpha::Float64)
N = length(crp.assignments)
ll = length(crp.counts) * log(alpha)
ll += sum(lgamma.(collect(values(crp.counts))))
ll += lgamma(alpha) - lgamma(N + alpha)
ll
end
function incorporate!(crp::CRPState, i::Int, cluster::Int)
@assert !haskey(crp.counts, next_new_cluster(crp))
@assert !haskey(crp.assignments, i)
@assert (cluster == next_new_cluster(crp)) || haskey(crp.counts, cluster)
crp.assignments[i] = cluster
if cluster == next_new_cluster(crp)
# allocate a new cluster
crp.counts[cluster] = 0
pop!(crp.free)
if isempty(crp.free)
@assert !haskey(crp.counts, crp.next_cluster)
push!(crp.free, crp.next_cluster)
crp.next_cluster += 1
end
else
@assert crp.counts[cluster] > 0
end
crp.counts[cluster] += 1
end
function unincorporate!(crp::CRPState, i::Int)
@assert !haskey(crp.counts, next_new_cluster(crp))
@assert haskey(crp.assignments, i)
cluster = crp.assignments[i]
delete!(crp.assignments, i)
crp.counts[cluster] -= 1
if crp.counts[cluster] == 0
# free the empyt cluster
delete!(crp.counts, cluster)
push!(crp.free, cluster)
end
end
function draw!(crp::CRPState, alpha::Float64, data_index::Int)
@assert !haskey(crp.assignments, data_index)
clusters = collect(keys(crp.counts))
probs = Array{Float64,1}(length(clusters) + 1)
for (j, cluster) in enumerate(clusters)
probs[j] = crp.counts[cluster]
end
probs[end] = alpha
probs = probs / sum(probs)
j = rand(Categorical(probs))
if (j == length(clusters) + 1)
# new cluster
cluster = next_new_cluster(crp)
else
cluster = clusters[j]
end
incorporate!(crp, data_index, cluster)
cluster
end
| [
27,
34345,
29,
15908,
488,
1616,
62,
14681,
62,
76,
9602,
62,
19849,
14,
6098,
79,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
11748,
6060,
44909,
942,
13,
25896,
198,
198,
4906,
8740,
47,
9012,
198,
220,
220,
220,
1303,
... | 2.245068 | 1,318 |
struct HillClimb <: BaseStructureLearning end
| [
7249,
3327,
34,
2475,
65,
1279,
25,
7308,
1273,
5620,
41730,
886,
198
] | 3.538462 | 13 |
"""In-place version of `signed_exponent(::Array)`."""
function signed_exponent!(A::Array{T}) where {T<:Union{Float16,Float32,Float64}}
# sign&fraction mask
sfmask = Base.sign_mask(T) | Base.significand_mask(T)
emask = Base.exponent_mask(T)
sbits = Base.significand_bits(T)
bias = Base.exponent_bias(T)
ebits = Base.exponent_bits(T)-1
for i in eachindex(A)
ui = reinterpret(Unsigned,A[i])
sf = ui & sfmask # sign & fraction bits
e = ((ui & emask) >> sbits) - bias # de-biased exponent
eabs = e == -bias ? 0 : abs(e) # for iszero(A[i]) e == -bias, set to 0
esign = (e < 0 ? 1 : 0) << ebits # determine sign of exponent
esigned = ((esign | eabs) % typeof(ui)) << sbits # concatentate exponent
A[i] = reinterpret(T,sf | esigned) # concatenate everything back together
end
end
"""
```julia
B = signed_exponent(A::Array{T}) where {T<:Union{Float16,Float32,Float64}}
```
Converts the exponent bits of Float16,Float32 or Float64-arrays from its
conventional biased-form into a sign&magnitude representation.
# Example
```julia
julia> bitstring(10f0,:split)
"0 10000010 01000000000000000000000"
julia> bitstring.(signed_exponent([10f0]),:split)[1]
"0 00000011 01000000000000000000000"
```
In the former the exponent 3 is interpret from 0b10000010=130 via subtraction of
the exponent bias of Float32 = 127. In the latter the exponent is inferred from
sign bit (0) and a magnitude represetation 2^1 + 2^1 = 3."""
function signed_exponent(A::Array{T}) where {T<:Union{Float16,Float32,Float64}}
B = copy(A)
signed_exponent!(B)
return B
end | [
37811,
818,
12,
5372,
2196,
286,
4600,
32696,
62,
11201,
3471,
7,
3712,
19182,
8,
63,
526,
15931,
198,
8818,
4488,
62,
11201,
3471,
0,
7,
32,
3712,
19182,
90,
51,
30072,
810,
1391,
51,
27,
25,
38176,
90,
43879,
1433,
11,
43879,
26... | 2.489552 | 670 |
#===================================================================================================
Kernel Kernels Module
===================================================================================================#
module MLKernels
import Base: convert, eltype, print, show, string, ==, *, /, +, -, ^, exp, tanh
export
# Memory
Orientation,
# Kernel Functions
Kernel,
MercerKernel,
AbstractExponentialKernel,
ExponentialKernel,
LaplacianKernel,
SquaredExponentialKernel,
GaussianKernel,
RadialBasisKernel,
GammaExponentialKernel,
AbstractRationalQuadraticKernel,
RationalQuadraticKernel,
GammaRationalQuadraticKernel,
MaternKernel,
LinearKernel,
PolynomialKernel,
ExponentiatedKernel,
PeriodicKernel,
NegativeDefiniteKernel,
PowerKernel,
LogKernel,
SigmoidKernel,
# Kernel Function Properties
ismercer,
isnegdef,
isstationary,
isisotropic,
# Kernel Matrix
kernel,
kernelmatrix,
kernelmatrix!,
centerkernelmatrix!,
centerkernelmatrix,
# Kernel Approximation
NystromFact,
nystrom
using SpecialFunctions: besselk, gamma
import LinearAlgebra
import Statistics
@doc raw"""
Orientation
Union of the two `Val` types representing the data matrix orientations:
1. `Val{:row}` identifies when observation vector corresponds to a row of the data matrix
2. `Val{:col}` identifies when each observation vector corresponds to a column of the data
matrix
"""
const Orientation = Union{Val{:row}, Val{:col}}
include("utils.jl")
include("basefunctions.jl")
include("basematrix.jl")
include("kernelfunctions.jl")
include("kernelmatrix.jl")
include("nystrom.jl")
include("deprecated.jl")
end # MLKernels | [
2,
23926,
10052,
18604,
198,
220,
32169,
509,
44930,
19937,
198,
23926,
10052,
18604,
2,
198,
198,
21412,
10373,
42,
44930,
198,
198,
11748,
7308,
25,
10385,
11,
1288,
4906,
11,
3601,
11,
905,
11,
4731,
11,
6624,
11,
1635,
11,
1220,
... | 2.566186 | 763 |
module IntegralsModule
export computeIntegralOverlap, computeElectronRepulsionIntegral, computeTensorBlockElectronRepulsionIntegrals, computeIntegralKinetic, computeIntegralNuclearAttraction, computeIntegralThreeCenterOverlap, computeMatrixBlockOverlap, computeMatrixBlockKinetic, computeMatrixBlockNuclearAttraction
using ..BaseModule
using ..BasisFunctionsModule
using ..AtomModule
using ..GeometryModule
using ..DocumentationModule
using ..ShellModule
using ProgressMeter
import Base.normalize!
ProgressMeter.printover(STDOUT," + (GSL........................")
using GSL
ProgressMeter.printover(STDOUT," + IntegralsModule.............")
function GaussianIntegral1D_Valeev(mqn::Int,exponent::Float64)
m = mqn
ζ = exponent
if (mqn%2!=0) # integral over uneven function
return 0
else
return (doublefactorial(m-1)*sqrt(π)) / ((2ζ)^(m/2)*sqrt(ζ))
end
end
@doc """
I_x = Integrate[x^m Exp[-ζ x^2], {x,-∞,∞}] (acc. to Fundament. of Mol. Integr. Eval. by Fermann, Valeev)
""" GaussianIntegral1D_Valeev
@add_doc GenericCitation("Fundament. of Mol. Integr. Eval. by Fermann, Valeev") GaussianIntegral1D_Valeev
"""
I_x = Integrate[x^m Exp[-ζ x^2], {x,-∞,∞}] (acc. to Mathematica 9)
"""
function GaussianIntegral1D_Mathematica(mqn::Int,exponent::Float64)
m = mqn
ζ = exponent
if (mqn%2!=0) # integral over uneven function
return 0
else
t=(m+1)/2
return ζ^(-t) * gamma(t)
end
end
GaussianIntegral1D = GaussianIntegral1D_Mathematica
#function FundamentalIntegral(
# pgb1::PrimitiveGaussianBasisFunction,
# pgb2::PrimitiveGaussianBasisFunction,
# func::Function,
# pgb3::PrimitiveGaussianBasisFunction,
# pgb4::PrimitiveGaussianBasisFunction)
# # (pgb1(x1) pgb2(x1) | func(x1,x2) | pgb3(x2) pgb4(x2) )
# # pgb1..4 are considered s-Functions
# # acc. to Molecular Integrals of Gaussian Basis Functions by Gill
#
# # I = (π G_AB G_CD) / ( 2(ζ η)^(3/2) R^3 ) Integrate[u Sin[u] Exp[-u^2/(4T)] FT[f][u/R],{u,0,∞}]
#end
function GaussProductFundamental(
pgb1::PrimitiveGaussianBasisFunction,
pgb2::PrimitiveGaussianBasisFunction)
# Fundamental means primitives are taken as s-Functions
# K Exp[-γ r^2_P] = pgb1 * pgb2 (acc. to Fundament. of Mol. Integr. Eval. by Fermann, Valeev)
α1 = pgb1.exponent
α2 = pgb2.exponent
A = pgb1.center
B = pgb2.center
γ = α1 + α2
P = (α1*A + α2*B) / γ
AB = distance(A,B)
K = exp(-α1*α2*AB^2/γ)
return (K,P,γ)
end
type PolynomialFactors
x::Array{Tuple{Float64,Int},1} # Float64*x^Int
y::Array{Tuple{Float64,Int},1} # Float64*y^Int
z::Array{Tuple{Float64,Int},1} # Float64*z^Int
end
function GaussProductPolynomialFactor(
pgb1::PrimitiveGaussianBasisFunction,
pgb2::PrimitiveGaussianBasisFunction)
# _Sum_K[f_k r^k]_ K Exp[-γ r^2_P] = pgb1 * pgb2 (acc. to Fundament. of Mol. Integr. Eval. by Fermann, Valeev)
A = pgb1.center
B = pgb2.center
(K,P,γ) = GaussProductFundamental(pgb1,pgb2)
#factors = PolynomialFactors(Array(Tuple{Float64,Int},pgb1.mqn.x+pgb2.mqn.x),Array(Tuple{Float64,Int},pgb1.mqn.y+pgb2.mqn.y),Array(Tuple{Float64,Int},pgb1.mqn.z+pgb2.mqn.z))
factors = PolynomialFactors(Tuple{Float64,Int}[],Tuple{Float64,Int}[],Tuple{Float64,Int}[])
for xyz in 1:3
l1 = getfield(pgb1.mqn,xyz)
l2 = getfield(pgb2.mqn,xyz)
sizehint!(getfield(factors,xyz),1+l1+l2)
for k in 0:l1+l2
f = 0.
for q in max(-k,k-2l2):2:min(k,2l1-k)
i = (k+q) ÷ 2
j = (k-q) ÷ 2
f += binomial(l1,i) * binomial(l2,j) * getfield((P-A),xyz)^(l1-i) * getfield((P-B),xyz)^(l2-j)
end
#ij = [((k+q) ÷ 2,(k-q) ÷ 2) for q in max(-k,k-2l2):2:min(k,2l1-k)]
#f = sum([binomial(l1,i) * binomial(l2,j) * (P-A).(xyz)^(l1-i) * (P-B).(xyz)^(l2-j) for(i,j) in ij])
push!(getfield(factors,xyz),(f,k))
end
end
return factors
end
function computeMatrixBlockOverlap(sh1::Shell,sh2::Shell)
return [computeIntegralOverlap(cgb1,cgb2) for cgb1 in expandShell(sh1), cgb2 in expandShell(sh2)]
end
function computeMatrixBlockKinetic(sh1::Shell,sh2::Shell)
return [computeIntegralKinetic(cgb1,cgb2) for cgb1 in expandShell(sh1), cgb2 in expandShell(sh2)]
end
function computeMatrixBlockNuclearAttraction(sh1::Shell,sh2::Shell,atom::Atom)
return [computeIntegralNuclearAttraction(cgb1,cgb2,atom) for cgb1 in expandShell(sh1), cgb2 in expandShell(sh2)]
end
function computeMatrixBlockNuclearAttraction(sh1::Shell,sh2::Shell,geo::Geometry)
return mapreduce(atom->computeMatrixBlockNuclearAttraction(sh1,sh2,atom),+,0,geo.atoms)
end
function computeIntegralOverlap(
pgb1::PrimitiveGaussianBasisFunction,
pgb2::PrimitiveGaussianBasisFunction)
# S_ij = Integrate[phi_i(r) phi_j(r),{r el R}] (acc. to Fundament. of Mol. Integr. Eval. by <NAME>)
(K,P,γ) = GaussProductFundamental(pgb1,pgb2)
factors = GaussProductPolynomialFactor(pgb1,pgb2)
Ix = sum([f*GaussianIntegral1D(i,γ) for (f,i) in factors.x])
Iy = sum([f*GaussianIntegral1D(i,γ) for (f,i) in factors.y])
Iz = sum([f*GaussianIntegral1D(i,γ) for (f,i) in factors.z])
return K*Ix*Iy*Iz::Float64
end
function computeIntegralOverlap(
cgb1::ContractedGaussianBasisFunction,
cgb2::ContractedGaussianBasisFunction)
integral = 0.
for (coeff1,pgb1) in zip(cgb1.coefficients,cgb1.primitiveBFs)
for (coeff2,pgb2) in zip(cgb2.coefficients,cgb2.primitiveBFs)
integral += coeff1*coeff2*computeIntegralOverlap(pgb1,pgb2)
end
end
return integral::Float64
end
function computeIntegralThreeCenterOverlap(
pgb1::PrimitiveGaussianBasisFunction,
pgb2::PrimitiveGaussianBasisFunction,
pgb3::PrimitiveGaussianBasisFunction)
#
Ix = 0.
Iy = 0.
Iz = 0.
(K12,center,exponent) = IntegralsModule.GaussProductFundamental(pgb1,pgb2)
factors12 = IntegralsModule.GaussProductPolynomialFactor(pgb1,pgb2)
#
mqn = MQuantumNumber(0,0,0)
pgb12 = PrimitiveGaussianBasisFunction(center,exponent,mqn)
(K,P,γ) = IntegralsModule.GaussProductFundamental(pgb12,pgb3)
#
for xyz in [:x,:y,:z]
for (f,i) in getfield(factors12, xyz)
if (xyz == :x)
mqn = MQuantumNumber(i,0,0)
pgb12 = PrimitiveGaussianBasisFunction(center,exponent,mqn)
elseif (xyz == :y)
mqn = MQuantumNumber(0,i,0)
pgb12 = PrimitiveGaussianBasisFunction(center,exponent,mqn)
elseif (xyz == :z)
mqn = MQuantumNumber(0,0,i)
pgb12 = PrimitiveGaussianBasisFunction(center,exponent,mqn)
end
factors = IntegralsModule.GaussProductPolynomialFactor(pgb12,pgb3)
if (xyz == :x)
Ix += sum([f*g*IntegralsModule.GaussianIntegral1D(j,γ) for (g,j) in factors.x])
elseif (xyz == :y)
Iy += sum([f*g*IntegralsModule.GaussianIntegral1D(j,γ) for (g,j) in factors.y])
elseif (xyz == :z)
Iz += sum([f*g*IntegralsModule.GaussianIntegral1D(j,γ) for (g,j) in factors.z])
end
end
end
#
return K12*K*Ix*Iy*Iz::Float64
end
function computeIntegralThreeCenterOverlap(
cgb1::ContractedGaussianBasisFunction,
cgb2::ContractedGaussianBasisFunction,
cgb3::ContractedGaussianBasisFunction)
#
integral = 0.
for (coeff1,pgb1) in zip(cgb1.coefficients,cgb1.primitiveBFs),
(coeff2,pgb2) in zip(cgb2.coefficients,cgb2.primitiveBFs),
(coeff3,pgb3) in zip(cgb3.coefficients,cgb3.primitiveBFs)
integral += coeff1*coeff2*coeff3*computeIntegralThreeCenterOverlap(pgb1,pgb2,pgb3)
end
return integral
end
function incrmqn(mqn::MQuantumNumber,xyz::Symbol,inc::Int)
mqn_t = [mqn.x,mqn.y,mqn.z]
xyznum = Dict(:x => 1, :y => 2, :z => 3)
num = xyznum[xyz]
mqn_t[num] += inc
return MQuantumNumber(mqn_t[1],mqn_t[2],mqn_t[3])
end
function computeIntegralKinetic(
pgb1::PrimitiveGaussianBasisFunction,
pgb2::PrimitiveGaussianBasisFunction)
#Ix + Iy + Iz = (pgb1 | 1/2 ∇^2 | pgb2)
#Ix = 1/2 l1 l2 <-1|-1> + 2 α1 α2 <+1|+1> - α1 l2 <+1|-1> - α2 l1 <-1|+1>
#(acc. to Fundamentals of Mol. Integr. Eval. by <NAME> (eq. 4.1 + 4.13))
integral = 0.
for xyz in (:x,:y,:z)
pgb1decr = deepcopy(pgb1)
pgb1decr.mqn = incrmqn(pgb1.mqn,xyz,-1)
pgb1incr = deepcopy(pgb1)
pgb1incr.mqn = incrmqn(pgb1.mqn,xyz,1)
pgb2decr = deepcopy(pgb2)
pgb2decr.mqn = incrmqn(pgb2.mqn,xyz,-1)
pgb2incr = deepcopy(pgb2)
pgb2incr.mqn = incrmqn(pgb2.mqn,xyz,1)
l1 = getfield(pgb1.mqn, xyz)
l2 = getfield(pgb2.mqn, xyz)
α1 = pgb1.exponent
α2 = pgb2.exponent
if(l1*l2!=0) integral += (1/2*l1*l2) * computeIntegralOverlap(pgb1decr,pgb2decr) end
integral += (2*α1*α2) * computeIntegralOverlap(pgb1incr,pgb2incr)
if(l2 !=0) integral += (-α1*l2) * computeIntegralOverlap(pgb1incr,pgb2decr) end
if(l1 !=0) integral += (-l1*α2) * computeIntegralOverlap(pgb1decr,pgb2incr) end
end
return integral::Float64
end
function computeIntegralKinetic(
cgb1::ContractedGaussianBasisFunction,
cgb2::ContractedGaussianBasisFunction)
integral = 0.
for (coeff1,pgb1) in zip(cgb1.coefficients,cgb1.primitiveBFs),
(coeff2,pgb2) in zip(cgb2.coefficients,cgb2.primitiveBFs)
integral += coeff1*coeff2*computeIntegralKinetic(pgb1,pgb2)
end
return integral
end
function OverlapFundamental(
pgb1::PrimitiveGaussianBasisFunction,
pgb2::PrimitiveGaussianBasisFunction)
(K,P,γ) = GaussProductFundamental(pgb1,pgb2)
# Overlap(s-type pgb1, s-type pgb2) (acc. to. Mathematica)
return K*(π/γ)^(3/2)::Float64
end
function FIntegral(m::Integer,x::Real,APPROX_ZERO::Float64=1e-5)
# F[m,x] = Integrate[u^2m Exp[-x u^2],{u,0,1}
# = 1/2 x^(-0.5-m) ( Gamma[1/2 + m] - Gamma[1/2 + m, x] ) # acc. to Mathematica
x = max(x,APPROX_ZERO) # for x -> 0
if (x<=APPROX_ZERO) return 1/(1+2*m) end
return 1/2 * x^(-1/2-m) * ( GSL.sf_gamma(1/2+m) - GSL.sf_gamma_inc(1/2+m,x) )
end
function ASummand(
f::Real,
l1::Integer,
l2::Integer,
Ax::Real,
Bx::Real,
PCx::Real,
γ::Real,
l::Integer,
r::Integer,
i::Integer)
# A[l1,l2,Ax,Bx,Cx,γ] = (-1)^l f_l(l1,l2,PAx,PBx) ((-1)^i l! PCx^(l-2r-2i) (1/(4γ))^(r+i)) / (r! i! (l-2r-2i)!)
return (-1)^l * f * (-1)^i * factorial(l) * PCx^(l-2r-2i) * (1/(4γ))^(r+i) / (factorial(r) * factorial(i) * factorial(l - 2r - 2i))
end
function computeIntegralNuclearAttraction(
pgb1::PrimitiveGaussianBasisFunction,
pgb2::PrimitiveGaussianBasisFunction,
atom::Atom)
# V = K * A(l1,l2,Ax,Bx,Cx,γ) * A(m1,m2,Ay,By,Cy,γ) * A(n1,n2,Az,Bz,Cz,γ) * F[l+m+n-2(r+s+t) - (i+j+k),γ PC^2]
#acc. to. Handbook of Comput. Quant. Chem. by Cook, chap. 7.7.3 final formula
K,P,γ = GaussProductFundamental(pgb1,pgb2)
C = atom.position
result = 0
for (fx,l) in GaussProductPolynomialFactor(pgb1,pgb2).x,
r in 0:floor(Int,(l/2)),
i in 0:floor(Int,((l-2r)/2))
Ax = ASummand(fx,pgb1.mqn.x,pgb2.mqn.x,pgb1.center.x,pgb2.center.x,(P-C).x,γ,l,r,i)
for (fy,m) in GaussProductPolynomialFactor(pgb1,pgb2).y,
s in 0:floor(Int,(m/2))m,
j in 0:floor(Int,((m-2s)/2))
Ay = ASummand(fy,pgb1.mqn.y,pgb2.mqn.y,pgb1.center.y,pgb2.center.y,(P-C).y,γ,m,s,j)
for (fz,n) in GaussProductPolynomialFactor(pgb1,pgb2).z,
t in 0:floor(Int,(n/2)),
k in 0:floor(Int,((n-2t)/2))
Az = ASummand(fz,pgb1.mqn.z,pgb2.mqn.z,pgb1.center.z,pgb2.center.z,(P-C).z,γ,n,t,k)
result += 2π/γ * K * Ax * Ay * Az * FIntegral(l+m+n-2*(r+s+t)-(i+j+k),γ*distance(P,C)^2)
end
end
end
return -atom.element.atomicNumber * result
end
function computeIntegralNuclearAttraction(
cgb1::ContractedGaussianBasisFunction,
cgb2::ContractedGaussianBasisFunction,
atom::Atom)
integral = 0.
for (coeff1,pgb1) in zip(cgb1.coefficients,cgb1.primitiveBFs),
(coeff2,pgb2) in zip(cgb2.coefficients,cgb2.primitiveBFs)
integral += coeff1*coeff2*computeIntegralNuclearAttraction(pgb1,pgb2,atom)
end
return integral
end
type θfactors
x::Array{Tuple{Float64,Int,Int},1} # θ,l,r
y::Array{Tuple{Float64,Int,Int},1} # θ,l,r
z::Array{Tuple{Float64,Int,Int},1} # θ,l,r
end
function θFactors(
μ::PrimitiveGaussianBasisFunction,
ν::PrimitiveGaussianBasisFunction)
K,P,γ = IntegralsModule.GaussProductFundamental(μ,ν)
factors = θfactors(Tuple{Float64,Int,Int}[],Tuple{Float64,Int,Int}[],Tuple{Float64,Int,Int}[])
for xyz in 1:3
sizehint!(getfield(factors,xyz),floor(Int,(length(getfield(GaussProductPolynomialFactor(μ,ν),xyz))+1)*5/8))
for (f,l) in getfield(IntegralsModule.GaussProductPolynomialFactor(μ,ν),xyz),
r in 0:floor(Int,(l/2))
θ = f * (factorial(l)*γ^(r-l))/(factorial(r)*factorial(l-2r))
push!(getfield(factors,xyz),(θ,l,r))
end
end
return factors
end
type Bfactors
x::Array{Tuple{Float64,Int,Int,Int,Int,Int},1} # B,l12,r12,i,l34,r34
y::Array{Tuple{Float64,Int,Int,Int,Int,Int},1} # B,l12,r12,i,l34,r34
z::Array{Tuple{Float64,Int,Int,Int,Int,Int},1} # B,l12,r12,i,l34,r34
end
function BFactors(
μ::PrimitiveGaussianBasisFunction,
ν::PrimitiveGaussianBasisFunction,
λ::PrimitiveGaussianBasisFunction,
σ::PrimitiveGaussianBasisFunction)
K1,P,γ1 = IntegralsModule.GaussProductFundamental(μ,ν)
K2,Q,γ2 = IntegralsModule.GaussProductFundamental(λ,σ)
δ = 1/(4γ1) + 1/(4γ2)
#p = (P-Q) # this might be a typo in the book
p = (Q-P)
factors = Bfactors([],[],[])
for xyz in 1:3,
(θ12,l12,r12) in getfield(θFactors(μ,ν),xyz),
(θ34,l34,r34) in getfield(θFactors(λ,σ),xyz)
#for (i in 0:floor(Integer,((l12-2r12)/2))) # this might be a typo in the book (otherwise e.g. ERI(s,s,px,px2) != ERI(px,px2,s,s) (where the 2 denotes a second center moved by 0.5 in x direction)
for i in 0:floor(Int,((l12+l34-2r12-2r34)/2))
B = (-1)^l34 * θ12 * θ34 * ((-1)^i*(2δ)^(2(r12+r34))*factorial(l12+l34-2r12-2r34)*δ^i*getfield(p,xyz)^(l12+l34-2*(r12+r34+i)))/((4δ)^(l12+l34)*factorial(i)*factorial(l12+l34-2*(r12+r34+i)))
push!(getfield(factors,xyz),(B,l12,r12,i,l34,r34))
end
end
return factors
end
function computeElectronRepulsionIntegral(
μ::PrimitiveGaussianBasisFunction,
ν::PrimitiveGaussianBasisFunction,
λ::PrimitiveGaussianBasisFunction,
σ::PrimitiveGaussianBasisFunction)
# compute (μν|λσ) (Mulliken notation) = Integrate[μ(1) ν(1) 1/Abs(1-2) λ(2) σ(2), d1 d2]
# in the most straightforward but primitive way (acc. to. Handbook of Comp. Quant. Chem. by Cook eq.7.1)
A = μ.center
B = ν.center
C = λ.center
D = σ.center
α1 = μ.exponent
α2 = ν.exponent
α3 = λ.exponent
α4 = σ.exponent
K1,P,γ1 = IntegralsModule.GaussProductFundamental(μ,ν)
K2,Q,γ2 = IntegralsModule.GaussProductFundamental(λ,σ)
δ = 1/(4γ1) + 1/(4γ2)
p = (P-Q)
Bfactors = BFactors(μ,ν,λ,σ)
Ω = 2π^2/(γ1 * γ2) * sqrt(π/(γ1 + γ2)) * exp(-α1*α2*distance(A,B)^2/γ1 - α3*α4*distance(C,D)^2/γ2)
result = 0.
for (Bx,l12,r12,i,l34,r34) in Bfactors.x,
(By,m12,s12,j,m34,s34) in Bfactors.y,
(Bz,n12,t12,k,n34,t34) in Bfactors.z
V = (l12+l34+m12+m34+n12+n34) - 2*(r12+r34+s12+s34+t12+t34) - (i+j+k)
result += Ω * Bx * By * Bz * IntegralsModule.FIntegral(V,distance(P,Q)^2/(4δ))
#println("Bx*By*Bz*FIntegral[$V] = $Bx * $By * $Bz * $(IntegralsModule.FIntegral(V,distance(P,Q)^2/(4δ)))")
end
return result
end
function computeElectronRepulsionIntegral(
μ::ContractedGaussianBasisFunction,
ν::ContractedGaussianBasisFunction,
λ::ContractedGaussianBasisFunction,
σ::ContractedGaussianBasisFunction)
integral = 0.
for (coeff1,pgb1) in zip(μ.coefficients,μ.primitiveBFs),
(coeff2,pgb2) in zip(ν.coefficients,ν.primitiveBFs),
(coeff3,pgb3) in zip(λ.coefficients,λ.primitiveBFs),
(coeff4,pgb4) in zip(σ.coefficients,σ.primitiveBFs)
integral += coeff1*coeff2*coeff3*coeff4*computeElectronRepulsionIntegral(pgb1,pgb2,pgb3,pgb4)
end
return integral
end
function computeTensorBlockElectronRepulsionIntegrals(
μs::Vector{ContractedGaussianBasisFunction},
νs::Vector{ContractedGaussianBasisFunction},
λs::Vector{ContractedGaussianBasisFunction},
σs::Vector{ContractedGaussianBasisFunction})
[computeElectronRepulsionIntegral(μ,ν,λ,σ) for μ in μs, ν in νs, λ in λs, σ in σs]
end
function normalize!(cgb::ContractedGaussianBasisFunction)
N = computeIntegralOverlap(cgb,cgb)
scale!(cgb.coefficients,1/sqrt(N))
end
end # module
| [
21412,
15995,
30691,
26796,
198,
39344,
24061,
34500,
1373,
5886,
37796,
11,
24061,
19453,
1313,
6207,
15204,
34500,
1373,
11,
24061,
51,
22854,
12235,
19453,
1313,
6207,
15204,
34500,
30691,
11,
24061,
34500,
1373,
49681,
5139,
11,
24061,
... | 2.089992 | 7,734 |
using Test
using InteractiveUtils
using MagneticReadHead: moduleof, functiontypeof
# Define an extra method of eps in this module, so we can test methods of
Base.eps(::typeof(moduleof)) = "dummy"
@testset "moduleof" begin
for meth in methods(detect_ambiguities)
@test moduleof(meth) == Test
end
# We define a verion of eps in this module
# but we expect that it is still counted as being in `Base`
for meth in methods(eps)
@test moduleof(meth) == Base
end
for meth in methods(Vector) # this is a UnionAll
@test moduleof(meth) == Core
end
end
@testset "functiontypeof" begin
for meth in methods(sum)
@test functiontypeof(meth) <: typeof(sum)
end
for meth in methods(+) # This includes some parametric types
@test functiontypeof(meth) <: typeof(+)
end
for meth in methods(detect_ambiguities)
@test functiontypeof(meth) <: typeof(detect_ambiguities)
end
end
| [
3500,
6208,
198,
3500,
21365,
18274,
4487,
198,
3500,
44629,
5569,
13847,
25,
8265,
1659,
11,
2163,
4906,
1659,
628,
198,
2,
2896,
500,
281,
3131,
2446,
286,
304,
862,
287,
428,
8265,
11,
523,
356,
460,
1332,
5050,
286,
198,
14881,
... | 2.604839 | 372 |
#utilities
import Base.LinAlg: HermOrSym, AbstractTriangular, *, +, -, \, A_mul_Bt, At_mul_B, At_mul_Bt, Ac_mul_B, At_ldiv_B, Ac_ldiv_B
# convert SparseChar {N,T,C} to cusparseOperation_t
function cusparseop(trans::SparseChar)
if trans == 'N'
return CUSPARSE_OPERATION_NON_TRANSPOSE
end
if trans == 'T'
return CUSPARSE_OPERATION_TRANSPOSE
end
if trans == 'C'
return CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE
end
throw(ArgumentError("unknown cusparse operation."))
end
# convert SparseChar {G,S,H,T} to cusparseMatrixType_t
function cusparsetype(mattype::SparseChar)
if mattype == 'G'
return CUSPARSE_MATRIX_TYPE_GENERAL
end
if mattype == 'T'
return CUSPARSE_MATRIX_TYPE_TRIANGULAR
end
if mattype == 'S'
return CUSPARSE_MATRIX_TYPE_SYMMETRIC
end
if mattype == 'H'
return CUSPARSE_MATRIX_TYPE_HERMITIAN
end
throw(ArgumentError("unknown cusparse matrix type."))
end
# convert SparseChar {U,L} to cusparseFillMode_t
function cusparsefill(uplo::SparseChar)
if uplo == 'U'
return CUSPARSE_FILL_MODE_UPPER
end
if uplo == 'L'
return CUSPARSE_FILL_MODE_LOWER
end
throw(ArgumentError("unknown cusparse fill mode"))
end
# convert SparseChar {U,N} to cusparseDiagType_t
function cusparsediag(diag::SparseChar)
if diag == 'U'
return CUSPARSE_DIAG_TYPE_UNIT
end
if diag == 'N'
return CUSPARSE_DIAG_TYPE_NON_UNIT
end
throw(ArgumentError("unknown cusparse diag mode"))
end
# convert SparseChar {Z,O} to cusparseIndexBase_t
function cusparseindex(index::SparseChar)
if index == 'Z'
return CUSPARSE_INDEX_BASE_ZERO
end
if index == 'O'
return CUSPARSE_INDEX_BASE_ONE
end
throw(ArgumentError("unknown cusparse index base"))
end
# convert SparseChar {R,C} to cusparseDirection_t
function cusparsedir(dir::SparseChar)
if dir == 'R'
return CUSPARSE_DIRECTION_ROW
end
if dir == 'C'
return CUSPARSE_DIRECTION_COL
end
throw(ArgumentError("unknown cusparse direction"))
end
function chkmvdims( X, n, Y, m)
if length(X) != n
throw(DimensionMismatch("X must have length $n, but has length $(length(X))"))
elseif length(Y) != m
throw(DimensionMismatch("Y must have length $m, but has length $(length(Y))"))
end
end
function chkmmdims( B, C, k, l, m, n )
if size(B) != (k,l)
throw(DimensionMismatch("B has dimensions $(size(B)) but needs ($k,$l)"))
elseif size(C) != (m,n)
throw(DimensionMismatch("C has dimensions $(size(C)) but needs ($m,$n)"))
end
end
function getDescr( A::CudaSparseMatrix, index::SparseChar )
cuind = cusparseindex(index)
typ = CUSPARSE_MATRIX_TYPE_GENERAL
fill = CUSPARSE_FILL_MODE_LOWER
if ishermitian(A)
typ = CUSPARSE_MATRIX_TYPE_HERMITIAN
fill = cusparsefill(A.uplo)
elseif issym(A)
typ = CUSPARSE_MATRIX_TYPE_SYMMETRIC
fill = cusparsefill(A.uplo)
end
cudesc = cusparseMatDescr_t(typ, fill,CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
end
function getDescr( A::Symmetric, index::SparseChar )
cuind = cusparseindex(index)
typ = CUSPARSE_MATRIX_TYPE_SYMMETRIC
fill = cusparsefill(A.uplo)
cudesc = cusparseMatDescr_t(typ, fill,CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
end
function getDescr( A::Hermitian, index::SparseChar )
cuind = cusparseindex(index)
typ = CUSPARSE_MATRIX_TYPE_HERMITIAN
fill = cusparsefill(A.uplo)
cudesc = cusparseMatDescr_t(typ, fill,CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
end
# type conversion
for (fname,elty) in ((:cusparseScsr2csc, :Float32),
(:cusparseDcsr2csc, :Float64),
(:cusparseCcsr2csc, :Complex64),
(:cusparseZcsr2csc, :Complex128))
@eval begin
function switch2csc(csr::CudaSparseMatrixCSR{$elty},inda::SparseChar='O')
cuind = cusparseindex(inda)
m,n = csr.dims
colPtr = CudaArray(zeros(Cint,n+1))
rowVal = CudaArray(zeros(Cint,csr.nnz))
nzVal = CudaArray(zeros($elty,csr.nnz))
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint, Cint, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, cusparseAction_t, cusparseIndexBase_t),
cusparsehandle[1], m, n, csr.nnz, csr.nzVal,
csr.rowPtr, csr.colVal, nzVal, rowVal,
colPtr, CUSPARSE_ACTION_NUMERIC, cuind))
csc = CudaSparseMatrixCSC(colPtr,rowVal,nzVal,csr.nnz,csr.dims)
csc
end
function switch2csr(csc::CudaSparseMatrixCSC{$elty},inda::SparseChar='O')
cuind = cusparseindex(inda)
m,n = csc.dims
rowPtr = CudaArray(zeros(Cint,m+1))
colVal = CudaArray(zeros(Cint,csc.nnz))
nzVal = CudaArray(zeros($elty,csc.nnz))
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint, Cint, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, cusparseAction_t, cusparseIndexBase_t),
cusparsehandle[1], n, m, csc.nnz, csc.nzVal,
csc.colPtr, csc.rowVal, nzVal, colVal,
rowPtr, CUSPARSE_ACTION_NUMERIC, cuind))
csr = CudaSparseMatrixCSR(rowPtr,colVal,nzVal,csc.nnz,csc.dims)
csr
end
end
end
for (fname,elty) in ((:cusparseScsr2bsr, :Float32),
(:cusparseDcsr2bsr, :Float64),
(:cusparseCcsr2bsr, :Complex64),
(:cusparseZcsr2bsr, :Complex128))
@eval begin
function switch2bsr(csr::CudaSparseMatrixCSR{$elty},
blockDim::Cint,
dir::SparseChar='R',
inda::SparseChar='O',
indc::SparseChar='O')
cudir = cusparsedir(dir)
cuinda = cusparseindex(inda)
cuindc = cusparseindex(indc)
m,n = csr.dims
nnz = Array(Cint,1)
mb = div((m + blockDim - 1),blockDim)
nb = div((n + blockDim - 1),blockDim)
bsrRowPtr = CudaArray(zeros(Cint,mb + 1))
cudesca = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuinda)
cudescc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuindc)
statuscheck(ccall((:cusparseXcsr2bsrNnz,libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{Cint},
Ptr{Cint}, Cint, Ptr{cusparseMatDescr_t},
Ptr{Cint}, Ptr{Cint}),
cusparsehandle[1], cudir, m, n, &cudesca, csr.rowPtr,
csr.colVal, blockDim, &cudescc, bsrRowPtr, nnz))
bsrNzVal = CudaArray(zeros($elty, nnz[1] * blockDim * blockDim ))
bsrColInd = CudaArray(zeros(Cint, nnz[1]))
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t, Cint,
Cint, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}), cusparsehandle[1], cudir, m, n,
&cudesca, csr.nzVal, csr.rowPtr, csr.colVal,
blockDim, &cudescc, bsrNzVal, bsrRowPtr,
bsrColInd))
CudaSparseMatrixBSR{$elty}(bsrRowPtr, bsrColInd, bsrNzVal, csr.dims, blockDim, dir, nnz[1], csr.dev)
end
function switch2bsr(csc::CudaSparseMatrixCSC{$elty},
blockDim::Cint,
dir::SparseChar='R',
inda::SparseChar='O',
indc::SparseChar='O')
switch2bsr(switch2csr(csc),blockDim,dir,inda,indc)
end
end
end
for (fname,elty) in ((:cusparseSbsr2csr, :Float32),
(:cusparseDbsr2csr, :Float64),
(:cusparseCbsr2csr, :Complex64),
(:cusparseZbsr2csr, :Complex128))
@eval begin
function switch2csr(bsr::CudaSparseMatrixBSR{$elty},
inda::SparseChar='O',
indc::SparseChar='O')
cudir = cusparsedir(bsr.dir)
cuinda = cusparseindex(inda)
cuindc = cusparseindex(indc)
m,n = bsr.dims
mb = div(m,bsr.blockDim)
nb = div(n,bsr.blockDim)
nnz = bsr.nnz * bsr.blockDim * bsr.blockDim
cudesca = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuinda)
cudescc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuindc)
csrRowPtr = CudaArray(zeros(Cint, m + 1))
csrColInd = CudaArray(zeros(Cint, nnz))
csrNzVal = CudaArray(zeros($elty, nnz))
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t, Cint,
Cint, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}), cusparsehandle[1], cudir, mb, nb,
&cudesca, bsr.nzVal, bsr.rowPtr, bsr.colVal,
bsr.blockDim, &cudescc, csrNzVal, csrRowPtr,
csrColInd))
CudaSparseMatrixCSR(csrRowPtr, csrColInd, csrNzVal, convert(Cint,nnz), bsr.dims)
end
function switch2csc(bsr::CudaSparseMatrixBSR{$elty},
inda::SparseChar='O',
indc::SparseChar='O')
switch2csc(switch2csr(bsr,inda,indc))
end
end
end
for (cname,rname,elty) in ((:cusparseScsc2dense, :cusparseScsr2dense, :Float32),
(:cusparseDcsc2dense, :cusparseDcsr2dense, :Float64),
(:cusparseCcsc2dense, :cusparseCcsr2dense, :Complex64),
(:cusparseZcsc2dense, :cusparseZcsr2dense, :Complex128))
@eval begin
function full(csr::CudaSparseMatrixCSR{$elty},ind::SparseChar='O')
cuind = cusparseindex(ind)
m,n = csr.dims
denseA = CudaArray(zeros($elty,m,n))
lda = max(1,stride(denseA,2))
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
statuscheck(ccall(($(string(rname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Ptr{$elty}, Cint),
cusparsehandle[1], m, n, &cudesc, csr.nzVal,
csr.rowPtr, csr.colVal, denseA, lda))
denseA
end
function full(csc::CudaSparseMatrixCSC{$elty},ind::SparseChar='O')
cuind = cusparseindex(ind)
m,n = csc.dims
denseA = CudaArray(zeros($elty,m,n))
lda = max(1,stride(denseA,2))
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
statuscheck(ccall(($(string(cname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Ptr{$elty}, Cint),
cusparsehandle[1], m, n, &cudesc, csc.nzVal,
csc.rowVal, csc.colPtr, denseA, lda))
denseA
end
function full(hyb::CudaSparseMatrixHYB{$elty},ind::SparseChar='O')
full(switch2csr(hyb,ind))
end
function full(bsr::CudaSparseMatrixBSR{$elty},ind::SparseChar='O')
full(switch2csr(bsr,ind))
end
end
end
for (nname,cname,rname,hname,elty) in ((:cusparseSnnz, :cusparseSdense2csc, :cusparseSdense2csr, :cusparseSdense2hyb, :Float32),
(:cusparseDnnz, :cusparseDdense2csc, :cusparseDdense2csr, :cusparseDdense2hyb, :Float64),
(:cusparseCnnz, :cusparseCdense2csc, :cusparseCdense2csr, :cusparseCdense2hyb, :Complex64),
(:cusparseZnnz, :cusparseZdense2csc, :cusparseZdense2csr, :cusparseZdense2hyb, :Complex128))
@eval begin
function sparse(A::CudaMatrix{$elty},fmt::SparseChar='R',ind::SparseChar='O')
cuind = cusparseindex(ind)
cudir = cusparsedir('R')
if( fmt == 'C' )
cudir = cusparsedir(fmt)
end
m,n = size(A)
lda = max(1,stride(A,2))
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
nnzRowCol = CudaArray(zeros(Cint, fmt == 'R' ? m : n))
nnzTotal = Array(Cint,1)
statuscheck(ccall(($(string(nname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t,
Cint, Cint, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Cint, Ptr{Cint}, Ptr{Cint}), cusparsehandle[1],
cudir, m, n, &cudesc, A, lda, nnzRowCol,
nnzTotal))
nzVal = CudaArray(zeros($elty,nnzTotal[1]))
if(fmt == 'R')
rowPtr = CudaArray(zeros(Cint,m+1))
colInd = CudaArray(zeros(Cint,nnzTotal[1]))
statuscheck(ccall(($(string(rname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty},
Cint, Ptr{Cint}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}), cusparsehandle[1], m, n, &cudesc, A,
lda, nnzRowCol, nzVal, rowPtr, colInd))
return CudaSparseMatrixCSR(rowPtr,colInd,nzVal,nnzTotal[1],size(A))
end
if(fmt == 'C')
colPtr = CudaArray(zeros(Cint,n+1))
rowInd = CudaArray(zeros(Cint,nnzTotal[1]))
statuscheck(ccall(($(string(cname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty},
Cint, Ptr{Cint}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}), cusparsehandle[1], m, n, &cudesc, A,
lda, nnzRowCol, nzVal, rowInd, colPtr))
return CudaSparseMatrixCSC(colPtr,rowInd,nzVal,nnzTotal[1],size(A))
end
if(fmt == 'B')
return switch2bsr(sparse(A,'R',ind),convert(Cint,gcd(m,n)))
end
if(fmt == 'H')
hyb = cusparseHybMat_t[0]
statuscheck(ccall((:cusparseCreateHybMat,libcusparse), cusparseStatus_t,
(Ptr{cusparseHybMat_t},), hyb))
statuscheck(ccall(($(string(hname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty},
Cint, Ptr{Cint}, cusparseHybMat_t,
Cint, cusparseHybPartition_t),
cusparsehandle[1], m, n, &cudesc, A, lda, nnzRowCol,
hyb[1], 0, CUSPARSE_HYB_PARTITION_AUTO))
return CudaSparseMatrixHYB{$elty}(hyb[1],size(A),nnzTotal[1],device())
end
end
end
end
for (rname,cname,elty) in ((:cusparseScsr2hyb, :cusparseScsc2hyb, :Float32),
(:cusparseDcsr2hyb, :cusparseDcsc2hyb, :Float64),
(:cusparseCcsr2hyb, :cusparseCcsc2hyb, :Complex64),
(:cusparseZcsr2hyb, :cusparseZcsc2hyb, :Complex128))
@eval begin
function switch2hyb(csr::CudaSparseMatrixCSR{$elty},
inda::SparseChar='O')
cuinda = cusparseindex(inda)
m,n = csr.dims
cudesca = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuinda)
hyb = cusparseHybMat_t[0]
statuscheck(ccall((:cusparseCreateHybMat,libcusparse), cusparseStatus_t,
(Ptr{cusparseHybMat_t},), hyb))
statuscheck(ccall(($(string(rname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, cusparseHybMat_t,
Cint, cusparseHybPartition_t), cusparsehandle[1],
m, n, &cudesca, csr.nzVal, csr.rowPtr, csr.colVal,
hyb[1], 0, CUSPARSE_HYB_PARTITION_AUTO))
CudaSparseMatrixHYB{$elty}(hyb[1], csr.dims, csr.nnz, csr.dev)
end
function switch2hyb(csc::CudaSparseMatrixCSC{$elty},
inda::SparseChar='O')
switch2hyb(switch2csr(csc,inda),inda)
end
end
end
for (rname,cname,elty) in ((:cusparseShyb2csr, :cusparseShyb2csc, :Float32),
(:cusparseDhyb2csr, :cusparseDhyb2csc, :Float64),
(:cusparseChyb2csr, :cusparseChyb2csc, :Complex64),
(:cusparseZhyb2csr, :cusparseZhyb2csc, :Complex128))
@eval begin
function switch2csr(hyb::CudaSparseMatrixHYB{$elty},
inda::SparseChar='O')
cuinda = cusparseindex(inda)
m,n = hyb.dims
cudesca = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuinda)
csrRowPtr = CudaArray(zeros(Cint, m + 1))
csrColInd = CudaArray(zeros(Cint, hyb.nnz))
csrNzVal = CudaArray(zeros($elty, hyb.nnz))
statuscheck(ccall(($(string(rname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Ptr{cusparseMatDescr_t},
cusparseHybMat_t, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}), cusparsehandle[1], &cudesca,
hyb.Mat, csrNzVal, csrRowPtr, csrColInd))
CudaSparseMatrixCSR(csrRowPtr, csrColInd, csrNzVal, hyb.nnz, hyb.dims)
end
function switch2csc(hyb::CudaSparseMatrixHYB{$elty},
inda::SparseChar='O')
switch2csc(switch2csr(hyb,inda),inda)
end
end
end
# Level 1 CUSPARSE functions
for (fname,elty) in ((:cusparseSaxpyi, :Float32),
(:cusparseDaxpyi, :Float64),
(:cusparseCaxpyi, :Complex64),
(:cusparseZaxpyi, :Complex128))
@eval begin
function axpyi!(alpha::$elty,
X::CudaSparseVector{$elty},
Y::CudaVector{$elty},
index::SparseChar)
cuind = cusparseindex(index)
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Ptr{$elty}, Ptr{$elty},
Ptr{Cint}, Ptr{$elty}, cusparseIndexBase_t),
cusparsehandle[1], X.nnz, [alpha], X.nzVal, X.iPtr,
Y, cuind))
Y
end
function axpyi(alpha::$elty,
X::CudaSparseVector{$elty},
Y::CudaVector{$elty},
index::SparseChar)
axpyi!(alpha,X,copy(Y),index)
end
function axpyi(X::CudaSparseVector{$elty},
Y::CudaVector{$elty},
index::SparseChar)
axpyi!(one($elty),X,copy(Y),index)
end
end
end
for (jname,fname,elty) in ((:doti, :cusparseSdoti, :Float32),
(:doti, :cusparseDdoti, :Float64),
(:doti, :cusparseCdoti, :Complex64),
(:doti, :cusparseZdoti, :Complex128),
(:dotci, :cusparseCdotci, :Complex64),
(:dotci, :cusparseZdotci, :Complex128))
@eval begin
function $jname(X::CudaSparseVector{$elty},
Y::CudaVector{$elty},
index::SparseChar)
dot = Array($elty,1)
cuind = cusparseindex(index)
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Ptr{$elty}, Ptr{Cint},
Ptr{$elty}, Ptr{$elty}, cusparseIndexBase_t),
cusparsehandle[1], X.nnz, X.nzVal, X.iPtr,
Y, dot, cuind))
return dot[1]
end
end
end
for (fname,elty) in ((:cusparseSgthr, :Float32),
(:cusparseDgthr, :Float64),
(:cusparseCgthr, :Complex64),
(:cusparseZgthr, :Complex128))
@eval begin
function gthr!(X::CudaSparseVector{$elty},
Y::CudaVector{$elty},
index::SparseChar)
cuind = cusparseindex(index)
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Ptr{$elty}, Ptr{$elty},
Ptr{Cint}, cusparseIndexBase_t), cusparsehandle[1],
X.nnz, Y, X.nzVal, X.iPtr, cuind))
X
end
function gthr(X::CudaSparseVector{$elty},
Y::CudaVector{$elty},
index::SparseChar)
gthr!(copy(X),Y,index)
end
end
end
for (fname,elty) in ((:cusparseSgthrz, :Float32),
(:cusparseDgthrz, :Float64),
(:cusparseCgthrz, :Complex64),
(:cusparseZgthrz, :Complex128))
@eval begin
function gthrz!(X::CudaSparseVector{$elty},
Y::CudaVector{$elty},
index::SparseChar)
cuind = cusparseindex(index)
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Ptr{$elty}, Ptr{$elty},
Ptr{Cint}, cusparseIndexBase_t), cusparsehandle[1],
X.nnz, Y, X.nzVal, X.iPtr, cuind))
X,Y
end
function gthrz(X::CudaSparseVector{$elty},
Y::CudaVector{$elty},
index::SparseChar)
gthrz!(copy(X),copy(Y),index)
end
end
end
for (fname,elty) in ((:cusparseSroti, :Float32),
(:cusparseDroti, :Float64))
@eval begin
function roti!(X::CudaSparseVector{$elty},
Y::CudaVector{$elty},
c::$elty,
s::$elty,
index::SparseChar)
cuind = cusparseindex(index)
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Ptr{$elty}, Ptr{$Cint},
Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, cusparseIndexBase_t),
cusparsehandle[1], X.nnz, X.nzVal, X.iPtr, Y, [c], [s], cuind))
X,Y
end
function roti(X::CudaSparseVector{$elty},
Y::CudaVector{$elty},
c::$elty,
s::$elty,
index::SparseChar)
roti!(copy(X),copy(Y),c,s,index)
end
end
end
for (fname,elty) in ((:cusparseSsctr, :Float32),
(:cusparseDsctr, :Float64),
(:cusparseCsctr, :Complex64),
(:cusparseZsctr, :Complex128))
@eval begin
function sctr!(X::CudaSparseVector{$elty},
Y::CudaVector{$elty},
index::SparseChar)
cuind = cusparseindex(index)
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Ptr{$elty}, Ptr{Cint},
Ptr{$elty}, cusparseIndexBase_t),
cusparsehandle[1], X.nnz, X.nzVal, X.iPtr,
Y, cuind))
Y
end
function sctr(X::CudaSparseVector{$elty},
index::SparseChar)
sctr!(X,CudaArray(zeros($elty,X.dims[1])),index)
end
end
end
## level 2 functions
for (fname,elty) in ((:cusparseSbsrmv, :Float32),
(:cusparseDbsrmv, :Float64),
(:cusparseCbsrmv, :Complex64),
(:cusparseZbsrmv, :Complex128))
@eval begin
function mv!(transa::SparseChar,
alpha::$elty,
A::CudaSparseMatrixBSR{$elty},
X::CudaVector{$elty},
beta::$elty,
Y::CudaVector{$elty},
index::SparseChar)
cudir = cusparsedir(A.dir)
cutransa = cusparseop(transa)
cuind = cusparseindex(index)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
mb = div(m,A.blockDim)
nb = div(n,A.blockDim)
if transa == 'N'
chkmvdims(X,n,Y,m)
end
if transa == 'T' || transa == 'C'
chkmvdims(X,m,Y,n)
end
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t,
cusparseOperation_t, Cint, Cint, Cint,
Ptr{$elty}, Ptr{cusparseMatDescr_t},
Ptr{$elty}, Ptr{Cint}, Ptr{Cint}, Cint,
Ptr{$elty}, Ptr{$elty}, Ptr{$elty}),
cusparsehandle[1], cudir, cutransa, mb, nb,
A.nnz, [alpha], &cudesc, A.nzVal, A.rowPtr,
A.colVal, A.blockDim, X, [beta], Y))
Y
end
end
end
for (fname,elty) in ((:cusparseScsrmv, :Float32),
(:cusparseDcsrmv, :Float64),
(:cusparseCcsrmv, :Complex64),
(:cusparseZcsrmv, :Complex128))
@eval begin
function mv!(transa::SparseChar,
alpha::$elty,
A::Union{CudaSparseMatrixCSR{$elty},HermOrSym{$elty,CudaSparseMatrixCSR{$elty}}},
X::CudaVector{$elty},
beta::$elty,
Y::CudaVector{$elty},
index::SparseChar)
Mat = A
if typeof(A) <: Base.LinAlg.HermOrSym
Mat = A.data
end
cutransa = cusparseop(transa)
m,n = Mat.dims
if transa == 'N'
chkmvdims(X, n, Y, m)
end
if transa == 'T' || transa == 'C'
chkmvdims(X, m, Y, n)
end
cudesc = getDescr(A,index)
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint,
Cint, Cint, Ptr{$elty}, Ptr{cusparseMatDescr_t},
Ptr{$elty}, Ptr{Cint}, Ptr{Cint}, Ptr{$elty},
Ptr{$elty}, Ptr{$elty}), cusparsehandle[1],
cutransa, m, n, Mat.nnz, [alpha], &cudesc, Mat.nzVal,
Mat.rowPtr, Mat.colVal, X, [beta], Y))
Y
end
function mv!(transa::SparseChar,
alpha::$elty,
A::Union{CudaSparseMatrixCSC{$elty},HermOrSym{$elty,CudaSparseMatrixCSC{$elty}}},
X::CudaVector{$elty},
beta::$elty,
Y::CudaVector{$elty},
index::SparseChar)
Mat = A
if typeof(A) <: Base.LinAlg.HermOrSym
Mat = A.data
end
ctransa = 'N'
if transa == 'N'
ctransa = 'T'
end
cutransa = cusparseop(ctransa)
cuind = cusparseindex(index)
cudesc = getDescr(A,index)
n,m = Mat.dims
if ctransa == 'N'
chkmvdims(X,n,Y,m)
end
if ctransa == 'T' || ctransa == 'C'
chkmvdims(X,m,Y,n)
end
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint,
Cint, Cint, Ptr{$elty}, Ptr{cusparseMatDescr_t},
Ptr{$elty}, Ptr{Cint}, Ptr{Cint}, Ptr{$elty},
Ptr{$elty}, Ptr{$elty}), cusparsehandle[1],
cutransa, m, n, Mat.nnz, [alpha], &cudesc,
Mat.nzVal, Mat.colPtr, Mat.rowVal, X, [beta], Y))
Y
end
end
end
# bsrsv2
for (bname,aname,sname,elty) in ((:cusparseSbsrsv2_bufferSize, :cusparseSbsrsv2_analysis, :cusparseSbsrsv2_solve, :Float32),
(:cusparseDbsrsv2_bufferSize, :cusparseDbsrsv2_analysis, :cusparseDbsrsv2_solve, :Float64),
(:cusparseCbsrsv2_bufferSize, :cusparseCbsrsv2_analysis, :cusparseCbsrsv2_solve, :Complex64),
(:cusparseZbsrsv2_bufferSize, :cusparseZbsrsv2_analysis, :cusparseZbsrsv2_solve, :Complex128))
@eval begin
function sv2!(transa::SparseChar,
uplo::SparseChar,
alpha::$elty,
A::CudaSparseMatrixBSR{$elty},
X::CudaVector{$elty},
index::SparseChar)
cutransa = cusparseop(transa)
cudir = cusparsedir(A.dir)
cuind = cusparseindex(index)
cuplo = cusparsefill(uplo)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, cuplo, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
if( m != n )
throw(DimensionMismatch("A must be square, but has dimensions ($m,$n)!"))
end
mb = div(m,A.blockDim)
mX = length(X)
if( mX != m )
throw(DimensionMismatch("X must have length $m, but has length $mX"))
end
info = bsrsv2Info_t[0]
cusparseCreateBsrsv2Info(info)
bufSize = Array(Cint,1)
statuscheck(ccall(($(string(bname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t,
cusparseOperation_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, Cint, bsrsv2Info_t, Ptr{Cint}),
cusparsehandle[1], cudir, cutransa, mb, A.nnz,
&cudesc, A.nzVal, A.rowPtr, A.colVal,
A.blockDim, info[1], bufSize))
buffer = CudaArray(zeros(UInt8, bufSize[1]))
statuscheck(ccall(($(string(aname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t,
cusparseOperation_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, Cint, bsrsv2Info_t,
cusparseSolvePolicy_t, Ptr{Void}),
cusparsehandle[1], cudir, cutransa, mb, A.nnz,
&cudesc, A.nzVal, A.rowPtr, A.colVal, A.blockDim,
info[1], CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
posit = Array(Cint,1)
statuscheck(ccall((:cusparseXbsrsv2_zeroPivot, libcusparse),
cusparseStatus_t, (cusparseHandle_t, bsrsv2Info_t,
Ptr{Cint}), cusparsehandle[1], info[1], posit))
if( posit[1] >= 0 )
throw(string("Structural/numerical zero in A at (",posit[1],posit[1],")"))
end
statuscheck(ccall(($(string(sname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t,
cusparseOperation_t, Cint, Cint, Ptr{$elty},
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, Cint, bsrsv2Info_t, Ptr{$elty},
Ptr{$elty}, cusparseSolvePolicy_t, Ptr{Void}),
cusparsehandle[1], cudir, cutransa, mb, A.nnz,
[alpha], &cudesc, A.nzVal, A.rowPtr, A.colVal,
A.blockDim, info[1], X, X,
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
cusparseDestroyBsrsv2Info(info[1])
X
end
end
end
for elty in (:Float32, :Float64, :Complex64, :Complex128)
@eval begin
function sv2(transa::SparseChar,
uplo::SparseChar,
alpha::$elty,
A::CudaSparseMatrix{$elty},
X::CudaVector{$elty},
index::SparseChar)
sv2!(transa,uplo,alpha,A,copy(X),index)
end
function sv2(transa::SparseChar,
uplo::SparseChar,
A::CudaSparseMatrix{$elty},
X::CudaVector{$elty},
index::SparseChar)
sv2!(transa,uplo,one($elty),A,copy(X),index)
end
function sv2(transa::SparseChar,
alpha::$elty,
A::AbstractTriangular,
X::CudaVector{$elty},
index::SparseChar)
uplo = 'U'
if islower(A)
uplo = 'L'
end
sv2!(transa,uplo,alpha,A.data,copy(X),index)
end
function sv2(transa::SparseChar,
A::AbstractTriangular,
X::CudaVector{$elty},
index::SparseChar)
uplo = 'U'
if islower(A)
uplo = 'L'
end
sv2!(transa,uplo,one($elty),A.data,copy(X),index)
end
end
end
for (fname,elty) in ((:cusparseScsrsv_analysis, :Float32),
(:cusparseDcsrsv_analysis, :Float64),
(:cusparseCcsrsv_analysis, :Complex64),
(:cusparseZcsrsv_analysis, :Complex128))
@eval begin
function sv_analysis(transa::SparseChar,
typea::SparseChar,
uplo::SparseChar,
A::CudaSparseMatrixCSR{$elty},
index::SparseChar)
cutransa = cusparseop(transa)
cuind = cusparseindex(index)
cutype = cusparsetype(typea)
cuuplo = cusparsefill(uplo)
cudesc = cusparseMatDescr_t(cutype, cuuplo, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
if( m != n )
throw(DimensionMismatch("A must be square, but has dimensions ($m,$n)!"))
end
info = cusparseSolveAnalysisInfo_t[0]
cusparseCreateSolveAnalysisInfo(info)
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint,
Cint, Ptr{cusparseMatDescr_t},
Ptr{$elty}, Ptr{Cint}, Ptr{Cint},
cusparseSolveAnalysisInfo_t), cusparsehandle[1],
cutransa, m, A.nnz, &cudesc, A.nzVal,
A.rowPtr, A.colVal, info[1]))
info[1]
end
end
end
#cscsv_analysis
for (fname,elty) in ((:cusparseScsrsv_analysis, :Float32),
(:cusparseDcsrsv_analysis, :Float64),
(:cusparseCcsrsv_analysis, :Complex64),
(:cusparseZcsrsv_analysis, :Complex128))
@eval begin
function sv_analysis(transa::SparseChar,
typea::SparseChar,
uplo::SparseChar,
A::CudaSparseMatrixCSC{$elty},
index::SparseChar)
ctransa = 'N'
if transa == 'N'
ctransa = 'T'
end
cuplo = 'U'
if uplo == 'U'
cuplo = 'L'
end
cutransa = cusparseop(ctransa)
cuind = cusparseindex(index)
cutype = cusparsetype(typea)
cuuplo = cusparsefill(cuplo)
cudesc = cusparseMatDescr_t(cutype, cuuplo, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
n,m = A.dims
if( m != n )
throw(DimensionMismatch("A must be square, but has dimensions ($m,$n)!"))
end
info = cusparseSolveAnalysisInfo_t[0]
cusparseCreateSolveAnalysisInfo(info)
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint,
Cint, Ptr{cusparseMatDescr_t},
Ptr{$elty}, Ptr{Cint}, Ptr{Cint},
cusparseSolveAnalysisInfo_t), cusparsehandle[1],
cutransa, m, A.nnz, &cudesc, A.nzVal,
A.colPtr, A.rowVal, info[1]))
info[1]
end
end
end
# csr solve
for (fname,elty) in ((:cusparseScsrsv_solve, :Float32),
(:cusparseDcsrsv_solve, :Float64),
(:cusparseCcsrsv_solve, :Complex64),
(:cusparseZcsrsv_solve, :Complex128))
@eval begin
function sv_solve!(transa::SparseChar,
uplo::SparseChar,
alpha::$elty,
A::CudaSparseMatrixCSR{$elty},
X::CudaVector{$elty},
Y::CudaVector{$elty},
info::cusparseSolveAnalysisInfo_t,
index::SparseChar)
cutransa = cusparseop(transa)
cuind = cusparseindex(index)
cuuplo = cusparsefill(uplo)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_TRIANGULAR, cuuplo, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
if( size(X)[1] != m )
throw(DimensionMismatch("First dimension of A, $m, and of X, $(size(X)[1]) must match"))
end
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint,
Ptr{$elty}, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, cusparseSolveAnalysisInfo_t,
Ptr{$elty}, Ptr{$elty}), cusparsehandle[1],
cutransa, m, [alpha], &cudesc, A.nzVal,
A.rowPtr, A.colVal, info, X, Y))
Y
end
end
end
# csc solve
for (fname,elty) in ((:cusparseScsrsv_solve, :Float32),
(:cusparseDcsrsv_solve, :Float64),
(:cusparseCcsrsv_solve, :Complex64),
(:cusparseZcsrsv_solve, :Complex128))
@eval begin
function sv_solve!(transa::SparseChar,
uplo::SparseChar,
alpha::$elty,
A::CudaSparseMatrixCSC{$elty},
X::CudaVector{$elty},
Y::CudaVector{$elty},
info::cusparseSolveAnalysisInfo_t,
index::SparseChar)
ctransa = 'N'
if transa == 'N'
ctransa = 'T'
end
cuplo = 'U'
if uplo == 'U'
cuplo = 'L'
end
cutransa = cusparseop(ctransa)
cuind = cusparseindex(index)
cuuplo = cusparsefill(cuplo)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_TRIANGULAR, cuuplo, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
n,m = A.dims
if( size(X)[1] != m )
throw(DimensionMismatch("First dimension of A, $m, and of X, $(size(X)[1]) must match"))
end
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint,
Ptr{$elty}, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, cusparseSolveAnalysisInfo_t,
Ptr{$elty}, Ptr{$elty}), cusparsehandle[1],
cutransa, m, [alpha], &cudesc, A.nzVal,
A.colPtr, A.rowVal, info, X, Y))
Y
end
end
end
# csrsv2
for (bname,aname,sname,elty) in ((:cusparseScsrsv2_bufferSize, :cusparseScsrsv2_analysis, :cusparseScsrsv2_solve, :Float32),
(:cusparseDcsrsv2_bufferSize, :cusparseDcsrsv2_analysis, :cusparseDcsrsv2_solve, :Float64),
(:cusparseCcsrsv2_bufferSize, :cusparseCcsrsv2_analysis, :cusparseCcsrsv2_solve, :Complex64),
(:cusparseZcsrsv2_bufferSize, :cusparseZcsrsv2_analysis, :cusparseZcsrsv2_solve, :Complex128))
@eval begin
function sv2!(transa::SparseChar,
uplo::SparseChar,
alpha::$elty,
A::CudaSparseMatrixCSR{$elty},
X::CudaVector{$elty},
index::SparseChar)
cutransa = cusparseop(transa)
cuind = cusparseindex(index)
cuuplo = cusparsefill(uplo)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, cuuplo, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
if( m != n )
throw(DimensionMismatch("A must be square, but has dimensions ($m,$n)!"))
end
mX = length(X)
if( mX != m )
throw(DimensionMismatch("First dimension of A, $m, and of X, $(size(X)[1]) must match"))
end
info = csrsv2Info_t[0]
cusparseCreateCsrsv2Info(info)
bufSize = Array(Cint,1)
statuscheck(ccall(($(string(bname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csrsv2Info_t, Ptr{Cint}),
cusparsehandle[1], cutransa, m, A.nnz,
&cudesc, A.nzVal, A.rowPtr, A.colVal,
info[1], bufSize))
buffer = CudaArray(zeros(UInt8, bufSize[1]))
statuscheck(ccall(($(string(aname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csrsv2Info_t, cusparseSolvePolicy_t,
Ptr{Void}), cusparsehandle[1], cutransa, m, A.nnz,
&cudesc, A.nzVal, A.rowPtr, A.colVal, info[1],
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
posit = Array(Cint,1)
statuscheck(ccall((:cusparseXcsrsv2_zeroPivot, libcusparse),
cusparseStatus_t, (cusparseHandle_t, csrsv2Info_t,
Ptr{Cint}), cusparsehandle[1], info[1], posit))
if( posit[1] >= 0 )
throw(string("Structural/numerical zero in A at (",posit[1],posit[1],")"))
end
statuscheck(ccall(($(string(sname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint,
Cint, Ptr{$elty}, Ptr{cusparseMatDescr_t},
Ptr{$elty}, Ptr{Cint}, Ptr{Cint}, csrsv2Info_t,
Ptr{$elty}, Ptr{$elty}, cusparseSolvePolicy_t,
Ptr{Void}), cusparsehandle[1], cutransa, m,
A.nnz, [alpha], &cudesc, A.nzVal, A.rowPtr,
A.colVal, info[1], X, X,
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
cusparseDestroyCsrsv2Info(info[1])
X
end
end
end
# cscsv2
for (bname,aname,sname,elty) in ((:cusparseScsrsv2_bufferSize, :cusparseScsrsv2_analysis, :cusparseScsrsv2_solve, :Float32),
(:cusparseDcsrsv2_bufferSize, :cusparseDcsrsv2_analysis, :cusparseDcsrsv2_solve, :Float64),
(:cusparseCcsrsv2_bufferSize, :cusparseCcsrsv2_analysis, :cusparseCcsrsv2_solve, :Complex64),
(:cusparseZcsrsv2_bufferSize, :cusparseZcsrsv2_analysis, :cusparseZcsrsv2_solve, :Complex128))
@eval begin
function sv2!(transa::SparseChar,
uplo::SparseChar,
alpha::$elty,
A::CudaSparseMatrixCSC{$elty},
X::CudaVector{$elty},
index::SparseChar)
ctransa = 'N'
cuplo = 'U'
if transa == 'N'
ctransa = 'T'
end
if uplo == 'U'
cuplo = 'L'
end
cutransa = cusparseop(ctransa)
cuind = cusparseindex(index)
cuuplo = cusparsefill(cuplo)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, cuuplo, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
n,m = A.dims
if( m != n )
throw(DimensionMismatch("A must be square, but has dimensions ($m,$n)!"))
end
mX = length(X)
if( mX != m )
throw(DimensionMismatch("First dimension of A, $m, and of X, $(size(X)[1]) must match"))
end
info = csrsv2Info_t[0]
cusparseCreateCsrsv2Info(info)
bufSize = Array(Cint,1)
statuscheck(ccall(($(string(bname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csrsv2Info_t, Ptr{Cint}),
cusparsehandle[1], cutransa, m, A.nnz,
&cudesc, A.nzVal, A.colPtr, A.rowVal,
info[1], bufSize))
buffer = CudaArray(zeros(UInt8, bufSize[1]))
statuscheck(ccall(($(string(aname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csrsv2Info_t, cusparseSolvePolicy_t,
Ptr{Void}), cusparsehandle[1], cutransa, m, A.nnz,
&cudesc, A.nzVal, A.colPtr, A.rowVal, info[1],
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
posit = Array(Cint,1)
statuscheck(ccall((:cusparseXcsrsv2_zeroPivot, libcusparse),
cusparseStatus_t, (cusparseHandle_t, csrsv2Info_t,
Ptr{Cint}), cusparsehandle[1], info[1], posit))
if( posit[1] >= 0 )
throw(string("Structural/numerical zero in A at (",posit[1],posit[1],")"))
end
statuscheck(ccall(($(string(sname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint,
Cint, Ptr{$elty}, Ptr{cusparseMatDescr_t},
Ptr{$elty}, Ptr{Cint}, Ptr{Cint}, csrsv2Info_t,
Ptr{$elty}, Ptr{$elty}, cusparseSolvePolicy_t,
Ptr{Void}), cusparsehandle[1], cutransa, m,
A.nnz, [alpha], &cudesc, A.nzVal, A.colPtr,
A.rowVal, info[1], X, X,
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
cusparseDestroyCsrsv2Info(info[1])
X
end
end
end
(\)(A::AbstractTriangular,B::CudaVector) = sv2('N',A,B,'O')
At_ldiv_B(A::AbstractTriangular,B::CudaVector) = sv2('T',A,B,'O')
Ac_ldiv_B(A::AbstractTriangular,B::CudaVector) = sv2('C',A,B,'O')
(\){T}(A::AbstractTriangular{T,CudaSparseMatrixHYB{T}},B::CudaVector{T}) = sv('N',A,B,'O')
At_ldiv_B{T}(A::AbstractTriangular{T,CudaSparseMatrixHYB{T}},B::CudaVector{T}) = sv('T',A,B,'O')
Ac_ldiv_B{T}(A::AbstractTriangular{T,CudaSparseMatrixHYB{T}},B::CudaVector{T}) = sv('C',A,B,'O')
for (fname,elty) in ((:cusparseShybmv, :Float32),
(:cusparseDhybmv, :Float64),
(:cusparseChybmv, :Complex64),
(:cusparseZhybmv, :Complex128))
@eval begin
function mv!(transa::SparseChar,
alpha::$elty,
A::CudaSparseMatrixHYB{$elty},
X::CudaVector{$elty},
beta::$elty,
Y::CudaVector{$elty},
index::SparseChar)
cutransa = cusparseop(transa)
cuind = cusparseindex(index)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
if transa == 'N'
chkmvdims(X,n,Y,m)
end
if transa == 'T' || transa == 'C'
chkmvdims(X,m,Y,n)
end
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t,
Ptr{$elty}, Ptr{cusparseMatDescr_t},
cusparseHybMat_t, Ptr{$elty},
Ptr{$elty}, Ptr{$elty}), cusparsehandle[1],
cutransa, [alpha], &cudesc, A.Mat, X, [beta], Y))
Y
end
end
end
for elty in (:Float32, :Float64, :Complex64, :Complex128)
@eval begin
function mv(transa::SparseChar,
alpha::$elty,
A::Union{CudaSparseMatrix{$elty},CompressedSparse{$elty}},
X::CudaVector{$elty},
beta::$elty,
Y::CudaVector{$elty},
index::SparseChar)
mv!(transa,alpha,A,X,beta,copy(Y),index)
end
function mv(transa::SparseChar,
alpha::$elty,
A::Union{CudaSparseMatrix{$elty},CompressedSparse{$elty}},
X::CudaVector{$elty},
Y::CudaVector{$elty},
index::SparseChar)
mv(transa,alpha,A,X,one($elty),Y,index)
end
function mv(transa::SparseChar,
A::Union{CudaSparseMatrix{$elty},CompressedSparse{$elty}},
X::CudaVector{$elty},
beta::$elty,
Y::CudaVector{$elty},
index::SparseChar)
mv(transa,one($elty),A,X,beta,Y,index)
end
function mv(transa::SparseChar,
A::Union{CudaSparseMatrix{$elty},CompressedSparse{$elty}},
X::CudaVector{$elty},
Y::CudaVector{$elty},
index::SparseChar)
mv(transa,one($elty),A,X,one($elty),Y,index)
end
function mv(transa::SparseChar,
alpha::$elty,
A::Union{CudaSparseMatrix{$elty},CompressedSparse{$elty}},
X::CudaVector{$elty},
index::SparseChar)
mv(transa,alpha,A,X,zero($elty),CudaArray(zeros($elty,size(A)[1])),index)
end
function mv(transa::SparseChar,
A::Union{CudaSparseMatrix{$elty},CompressedSparse{$elty}},
X::CudaVector{$elty},
index::SparseChar)
mv(transa,one($elty),A,X,zero($elty),CudaArray(zeros($elty,size(A)[1])),index)
end
end
end
(*){T}(A::CudaSparseMatrix{T},B::CudaMatrix{T}) = mm2('N','N',A,B,'O')
A_mul_Bt{T}(A::CudaSparseMatrix{T},B::CudaMatrix{T}) = mm2('N','T',A,B,'O')
At_mul_B{T}(A::CudaSparseMatrix{T},B::CudaMatrix{T}) = mm2('T','N',A,B,'O')
At_mul_Bt{T}(A::CudaSparseMatrix{T},B::CudaMatrix{T}) = mm2('T','T',A,B,'O')
Ac_mul_B{T}(A::CudaSparseMatrix{T},B::CudaMatrix{T}) = mm2('C','N',A,B,'O')
(*)(A::HermOrSym,B::CudaMatrix) = mm('N',A,B,'O')
At_mul_B(A::HermOrSym,B::CudaMatrix) = mm('T',A,B,'O')
Ac_mul_B(A::HermOrSym,B::CudaMatrix) = mm('C',A,B,'O')
for (fname,elty) in ((:cusparseShybsv_analysis, :Float32),
(:cusparseDhybsv_analysis, :Float64),
(:cusparseChybsv_analysis, :Complex64),
(:cusparseZhybsv_analysis, :Complex128))
@eval begin
function sv_analysis(transa::SparseChar,
typea::SparseChar,
uplo::SparseChar,
A::CudaSparseMatrixHYB{$elty},
index::SparseChar)
cutransa = cusparseop(transa)
cuind = cusparseindex(index)
cuuplo = cusparsefill(uplo)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_TRIANGULAR, cuuplo, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
if( m != n )
throw(DimensionMismatch("A must be square, but has dimensions ($m,$n)!"))
end
info = cusparseSolveAnalysisInfo_t[0]
cusparseCreateSolveAnalysisInfo(info)
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t,
Ptr{cusparseMatDescr_t}, cusparseHybMat_t,
cusparseSolveAnalysisInfo_t),
cusparsehandle[1], cutransa, &cudesc, A.Mat,
info[1]))
info[1]
end
end
end
for (fname,elty) in ((:cusparseShybsv_solve, :Float32),
(:cusparseDhybsv_solve, :Float64),
(:cusparseChybsv_solve, :Complex64),
(:cusparseZhybsv_solve, :Complex128))
@eval begin
function sv_solve!(transa::SparseChar,
uplo::SparseChar,
alpha::$elty,
A::CudaSparseMatrixHYB{$elty},
X::CudaVector{$elty},
Y::CudaVector{$elty},
info::cusparseSolveAnalysisInfo_t,
index::SparseChar)
cutransa = cusparseop(transa)
cuind = cusparseindex(index)
cuuplo = cusparsefill(uplo)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_TRIANGULAR, cuuplo, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
if( size(X)[1] != m )
throw(DimensionMismatch("First dimension of A, $m, and of X, $(size(X)[1]) must match"))
end
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t,
Ptr{$elty}, Ptr{cusparseMatDescr_t},
cusparseHybMat_t, cusparseSolveAnalysisInfo_t,
Ptr{$elty}, Ptr{$elty}), cusparsehandle[1],
cutransa, [alpha], &cudesc, A.Mat, info, X, Y))
Y
end
end
end
for elty in (:Float32, :Float64, :Complex64, :Complex128)
@eval begin
function sv_solve(transa::SparseChar,
uplo::SparseChar,
alpha::$elty,
A::CudaSparseMatrix{$elty},
X::CudaVector{$elty},
info::cusparseSolveAnalysisInfo_t,
index::SparseChar)
Y = similar(X)
sv_solve!(transa, uplo, alpha, A, X, Y, info, index)
end
function sv(transa::SparseChar,
typea::SparseChar,
uplo::SparseChar,
alpha::$elty,
A::CudaSparseMatrix{$elty},
X::CudaVector{$elty},
index::SparseChar)
info = sv_analysis(transa,typea,uplo,A,index)
sv_solve(transa,uplo,alpha,A,X,info,index)
end
function sv(transa::SparseChar,
typea::SparseChar,
uplo::SparseChar,
A::CudaSparseMatrix{$elty},
X::CudaVector{$elty},
index::SparseChar)
info = sv_analysis(transa,typea,uplo,A,index)
sv_solve(transa,uplo,one($elty),A,X,info,index)
end
function sv(transa::SparseChar,
A::AbstractTriangular,
X::CudaVector{$elty},
index::SparseChar)
uplo = 'U'
if islower(A)
uplo = 'L'
end
info = sv_analysis(transa,'T',uplo,A.data,index)
sv_solve(transa,uplo,one($elty),A.data,X,info,index)
end
function sv_analysis(transa::SparseChar,
typea::SparseChar,
uplo::SparseChar,
A::HermOrSym{$elty},
index::SparseChar)
sv_analysis(transa,typea,uplo,A.data,index)
end
end
end
## level 3 functions
# bsrmm
for (fname,elty) in ((:cusparseSbsrmm, :Float32),
(:cusparseDbsrmm, :Float64),
(:cusparseCbsrmm, :Complex64),
(:cusparseZbsrmm, :Complex128))
@eval begin
function mm2!(transa::SparseChar,
transb::SparseChar,
alpha::$elty,
A::CudaSparseMatrixBSR{$elty},
B::CudaMatrix{$elty},
beta::$elty,
C::CudaMatrix{$elty},
index::SparseChar)
cutransa = cusparseop(transa)
cutransb = cusparseop(transb)
cudir = cusparsedir(A.dir)
cuind = cusparseindex(index)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,k = A.dims
mb = div(m,A.blockDim)
kb = div(k,A.blockDim)
n = size(C)[2]
if transa == 'N' && transb == 'N'
chkmmdims(B,C,k,n,m,n)
elseif transa == 'N' && transb != 'N'
chkmmdims(B,C,n,k,m,n)
elseif transa != 'N' && transb == 'N'
chkmmdims(B,C,m,n,k,n)
elseif transa != 'N' && transb != 'N'
chkmmdims(B,C,n,m,k,n)
end
ldb = max(1,stride(B,2))
ldc = max(1,stride(C,2))
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t,
cusparseOperation_t, cusparseOperation_t, Cint,
Cint, Cint, Cint, Ptr{$elty},
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, Cint, Ptr{$elty}, Cint, Ptr{$elty},
Ptr{$elty}, Cint), cusparsehandle[1], cudir,
cutransa, cutransb, mb, n, kb, A.nnz,
[alpha], &cudesc, A.nzVal,A.rowPtr, A.colVal,
A.blockDim, B, ldb, [beta], C, ldc))
C
end
end
end
# csrmm
for (fname,elty) in ((:cusparseScsrmm, :Float32),
(:cusparseDcsrmm, :Float64),
(:cusparseCcsrmm, :Complex64),
(:cusparseZcsrmm, :Complex128))
@eval begin
function mm!(transa::SparseChar,
alpha::$elty,
A::Union{HermOrSym{$elty,CudaSparseMatrixCSR{$elty}},CudaSparseMatrixCSR{$elty}},
B::CudaMatrix{$elty},
beta::$elty,
C::CudaMatrix{$elty},
index::SparseChar)
Mat = A
if typeof(A) <: Base.LinAlg.HermOrSym
Mat = A.data
end
cutransa = cusparseop(transa)
cuind = cusparseindex(index)
cudesc = getDescr(A,index)
m,k = Mat.dims
n = size(C)[2]
if transa == 'N'
chkmmdims(B,C,k,n,m,n)
else
chkmmdims(B,C,m,n,k,n)
end
ldb = max(1,stride(B,2))
ldc = max(1,stride(C,2))
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint, Cint,
Cint, Cint, Ptr{$elty}, Ptr{cusparseMatDescr_t},
Ptr{$elty}, Ptr{Cint}, Ptr{Cint}, Ptr{$elty},
Cint, Ptr{$elty}, Ptr{$elty}, Cint),
cusparsehandle[1], cutransa, m, n, k, Mat.nnz,
[alpha], &cudesc, Mat.nzVal, Mat.rowPtr,
Mat.colVal, B, ldb, [beta], C, ldc))
C
end
function mm!(transa::SparseChar,
alpha::$elty,
A::Union{HermOrSym{$elty,CudaSparseMatrixCSC{$elty}},CudaSparseMatrixCSC{$elty}},
B::CudaMatrix{$elty},
beta::$elty,
C::CudaMatrix{$elty},
index::SparseChar)
Mat = A
if typeof(A) <: Base.LinAlg.HermOrSym
Mat = A.data
end
ctransa = 'N'
if transa == 'N'
ctransa = 'T'
end
cutransa = cusparseop(ctransa)
cuind = cusparseindex(index)
cudesc = getDescr(A,index)
k,m = Mat.dims
n = size(C)[2]
if ctransa == 'N'
chkmmdims(B,C,k,n,m,n)
else
chkmmdims(B,C,m,n,k,n)
end
ldb = max(1,stride(B,2))
ldc = max(1,stride(C,2))
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint, Cint,
Cint, Cint, Ptr{$elty}, Ptr{cusparseMatDescr_t},
Ptr{$elty}, Ptr{Cint}, Ptr{Cint}, Ptr{$elty},
Cint, Ptr{$elty}, Ptr{$elty}, Cint),
cusparsehandle[1], cutransa, m, n, k, Mat.nnz,
[alpha], &cudesc, Mat.nzVal, Mat.colPtr,
Mat.rowVal, B, ldb, [beta], C, ldc))
C
end
end
end
for elty in (:Float32, :Float64, :Complex64, :Complex128)
@eval begin
function mm(transa::SparseChar,
alpha::$elty,
A::CudaSparseMatrix{$elty},
B::CudaMatrix{$elty},
beta::$elty,
C::CudaMatrix{$elty},
index::SparseChar)
mm!(transa,alpha,A,B,beta,copy(C),index)
end
function mm(transa::SparseChar,
A::CudaSparseMatrix{$elty},
B::CudaMatrix{$elty},
beta::$elty,
C::CudaMatrix{$elty},
index::SparseChar)
mm(transa,one($elty),A,B,beta,C,index)
end
function mm(transa::SparseChar,
A::CudaSparseMatrix{$elty},
B::CudaMatrix{$elty},
C::CudaMatrix{$elty},
index::SparseChar)
mm(transa,one($elty),A,B,one($elty),C,index)
end
function mm(transa::SparseChar,
alpha::$elty,
A::CudaSparseMatrix{$elty},
B::CudaMatrix{$elty},
index::SparseChar)
m = transa == 'N' ? size(A)[1] : size(A)[2]
mm!(transa,alpha,A,B,zero($elty),CudaArray(zeros($elty,(m,size(B)[2]))),index)
end
function mm(transa::SparseChar,
A::CudaSparseMatrix{$elty},
B::CudaMatrix{$elty},
index::SparseChar)
m = transa == 'N' ? size(A)[1] : size(A)[2]
mm!(transa,one($elty),A,B,zero($elty),CudaArray(zeros($elty,(m,size(B)[2]))),index)
end
function mm(transa::SparseChar,
A::HermOrSym,
B::CudaMatrix{$elty},
index::SparseChar)
m = transa == 'N' ? size(A)[1] : size(A)[2]
mm!(transa,one($elty),A.data,B,zero($elty),CudaArray(zeros($elty,(m,size(B)[2]))),index)
end
end
end
for (fname,elty) in ((:cusparseScsrmm2, :Float32),
(:cusparseDcsrmm2, :Float64),
(:cusparseCcsrmm2, :Complex64),
(:cusparseZcsrmm2, :Complex128))
@eval begin
function mm2!(transa::SparseChar,
transb::SparseChar,
alpha::$elty,
A::CudaSparseMatrixCSR{$elty},
B::CudaMatrix{$elty},
beta::$elty,
C::CudaMatrix{$elty},
index::SparseChar)
cutransa = cusparseop(transa)
cutransb = cusparseop(transb)
cuind = cusparseindex(index)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,k = A.dims
n = size(C)[2]
if transa == 'N' && transb == 'N'
chkmmdims(B,C,k,n,m,n)
elseif transa == 'N' && transb != 'N'
chkmmdims(B,C,n,k,m,n)
elseif transa != 'N' && transb == 'N'
chkmmdims(B,C,m,n,k,n)
elseif transa != 'N' && transb != 'N'
chkmmdims(B,C,n,m,k,n)
end
ldb = max(1,stride(B,2))
ldc = max(1,stride(C,2))
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t,
cusparseOperation_t, Cint, Cint, Cint, Cint,
Ptr{$elty}, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Ptr{$elty}, Cint,
Ptr{$elty}, Ptr{$elty}, Cint), cusparsehandle[1],
cutransa, cutransb, m, n, k, A.nnz, [alpha], &cudesc,
A.nzVal, A.rowPtr, A.colVal, B, ldb, [beta], C, ldc))
C
end
function mm2!(transa::SparseChar,
transb::SparseChar,
alpha::$elty,
A::CudaSparseMatrixCSC{$elty},
B::CudaMatrix{$elty},
beta::$elty,
C::CudaMatrix{$elty},
index::SparseChar)
ctransa = 'N'
if transa == 'N'
ctransa = 'T'
end
cutransa = cusparseop(ctransa)
cutransb = cusparseop(transb)
cuind = cusparseindex(index)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
k,m = A.dims
n = size(C)[2]
if ctransa == 'N' && transb == 'N'
chkmmdims(B,C,k,n,m,n)
elseif ctransa == 'N' && transb != 'N'
chkmmdims(B,C,n,k,m,n)
elseif ctransa != 'N' && transb == 'N'
chkmmdims(B,C,m,n,k,n)
elseif ctransa != 'N' && transb != 'N'
chkmmdims(B,C,n,m,k,n)
end
ldb = max(1,stride(B,2))
ldc = max(1,stride(C,2))
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t,
cusparseOperation_t, Cint, Cint, Cint, Cint,
Ptr{$elty}, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Ptr{$elty}, Cint,
Ptr{$elty}, Ptr{$elty}, Cint), cusparsehandle[1],
cutransa, cutransb, m, n, k, A.nnz, [alpha], &cudesc,
A.nzVal, A.colPtr, A.rowVal, B, ldb, [beta], C, ldc))
C
end
end
end
for elty in (:Float32,:Float64,:Complex64,:Complex128)
@eval begin
function mm2(transa::SparseChar,
transb::SparseChar,
alpha::$elty,
A::Union{CudaSparseMatrixCSR{$elty},CudaSparseMatrixCSC{$elty},CudaSparseMatrixBSR{$elty}},
B::CudaMatrix{$elty},
beta::$elty,
C::CudaMatrix{$elty},
index::SparseChar)
mm2!(transa,transb,alpha,A,B,beta,copy(C),index)
end
function mm2(transa::SparseChar,
transb::SparseChar,
A::Union{CudaSparseMatrixCSR{$elty},CudaSparseMatrixCSC{$elty},CudaSparseMatrixBSR{$elty}},
B::CudaMatrix{$elty},
beta::$elty,
C::CudaMatrix{$elty},
index::SparseChar)
mm2(transa,transb,one($elty),A,B,beta,C,index)
end
function mm2(transa::SparseChar,
transb::SparseChar,
A::Union{CudaSparseMatrixCSR{$elty},CudaSparseMatrixCSC{$elty},CudaSparseMatrixBSR{$elty}},
B::CudaMatrix{$elty},
C::CudaMatrix{$elty},
index::SparseChar)
mm2(transa,transb,one($elty),A,B,one($elty),C,index)
end
function mm2(transa::SparseChar,
transb::SparseChar,
alpha::$elty,
A::Union{CudaSparseMatrixCSR{$elty},CudaSparseMatrixCSC{$elty},CudaSparseMatrixBSR{$elty}},
B::CudaMatrix{$elty},
index::SparseChar)
m = transa == 'N' ? size(A)[1] : size(A)[2]
n = transb == 'N' ? size(B)[2] : size(B)[1]
mm2(transa,transb,alpha,A,B,zero($elty),CudaArray(zeros($elty,(m,n))),index)
end
function mm2(transa::SparseChar,
transb::SparseChar,
A::Union{CudaSparseMatrixCSR{$elty},CudaSparseMatrixCSC{$elty},CudaSparseMatrixBSR{$elty}},
B::CudaMatrix{$elty},
index::SparseChar)
m = transa == 'N' ? size(A)[1] : size(A)[2]
n = transb == 'N' ? size(B)[2] : size(B)[1]
mm2(transa,transb,one($elty),A,B,zero($elty),CudaArray(zeros($elty,(m,n))),index)
end
end
end
(*)(A::CudaSparseMatrix,B::CudaVector) = mv('N',A,B,'O')
At_mul_B(A::CudaSparseMatrix,B::CudaVector) = mv('T',A,B,'O')
Ac_mul_B(A::CudaSparseMatrix,B::CudaVector) = mv('C',A,B,'O')
(*){T}(A::HermOrSym{T,CudaSparseMatrix{T}},B::CudaVector{T}) = mv('N',A,B,'O')
At_mul_B{T}(A::HermOrSym{T,CudaSparseMatrix{T}},B::CudaVector{T}) = mv('T',A,B,'O')
Ac_mul_B{T}(A::HermOrSym{T,CudaSparseMatrix{T}},B::CudaVector{T}) = mv('C',A,B,'O')
for (fname,elty) in ((:cusparseScsrsm_analysis, :Float32),
(:cusparseDcsrsm_analysis, :Float64),
(:cusparseCcsrsm_analysis, :Complex64),
(:cusparseZcsrsm_analysis, :Complex128))
@eval begin
function sm_analysis(transa::SparseChar,
uplo::SparseChar,
A::CudaSparseMatrixCSR{$elty},
index::SparseChar)
cutransa = cusparseop(transa)
cuind = cusparseindex(index)
cuuplo = cusparsefill(uplo)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_TRIANGULAR, cuuplo, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
if( n != m )
throw(DimensionMismatch("A must be square, but has dimensions ($m,$n)!"))
end
info = cusparseSolveAnalysisInfo_t[0]
cusparseCreateSolveAnalysisInfo(info)
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint,
Cint, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, cusparseSolveAnalysisInfo_t),
cusparsehandle[1], cutransa, m, A.nnz, &cudesc,
A.nzVal, A.rowPtr, A.colVal, info[1]))
info[1]
end
function sm_analysis(transa::SparseChar,
uplo::SparseChar,
A::CudaSparseMatrixCSC{$elty},
index::SparseChar)
ctransa = 'N'
if transa == 'N'
ctransa = 'T'
end
cuplo = 'U'
if uplo == 'U'
cuplo = 'L'
end
cutransa = cusparseop(ctransa)
cuind = cusparseindex(index)
cuuplo = cusparsefill(cuplo)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_TRIANGULAR, cuuplo, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
n,m = A.dims
if( n != m )
throw(DimensionMismatch("A must be square, but has dimensions ($m,$n)!"))
end
info = cusparseSolveAnalysisInfo_t[0]
cusparseCreateSolveAnalysisInfo(info)
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint,
Cint, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, cusparseSolveAnalysisInfo_t),
cusparsehandle[1], cutransa, m, A.nnz, &cudesc,
A.nzVal, A.colPtr, A.rowVal, info[1]))
info[1]
end
end
end
for (fname,elty) in ((:cusparseScsrsm_solve, :Float32),
(:cusparseDcsrsm_solve, :Float64),
(:cusparseCcsrsm_solve, :Complex64),
(:cusparseZcsrsm_solve, :Complex128))
@eval begin
function sm_solve(transa::SparseChar,
uplo::SparseChar,
alpha::$elty,
A::CudaSparseMatrixCSR{$elty},
X::CudaMatrix{$elty},
info::cusparseSolveAnalysisInfo_t,
index::SparseChar)
cutransa = cusparseop(transa)
cuind = cusparseindex(index)
cuuplo = cusparsefill(uplo)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_TRIANGULAR, cuuplo, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,nA = A.dims
mX,n = X.dims
if( mX != m )
throw(DimensionMismatch("First dimension of A, $m, and X, $mX must match"))
end
Y = similar(X)
ldx = max(1,stride(X,2))
ldy = max(1,stride(Y,2))
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint, Cint,
Ptr{$elty}, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, cusparseSolveAnalysisInfo_t,
Ptr{$elty}, Cint, Ptr{$elty}, Cint),
cusparsehandle[1], cutransa, m, n, [alpha],
&cudesc, A.nzVal, A.rowPtr, A.colVal, info, X, ldx,
Y, ldy))
Y
end
function sm_solve(transa::SparseChar,
uplo::SparseChar,
alpha::$elty,
A::CudaSparseMatrixCSC{$elty},
X::CudaMatrix{$elty},
info::cusparseSolveAnalysisInfo_t,
index::SparseChar)
ctransa = 'N'
if transa == 'N'
ctransa = 'T'
end
cuplo = 'U'
if uplo == 'U'
cuplo = 'L'
end
cutransa = cusparseop(ctransa)
cuind = cusparseindex(index)
cuuplo = cusparsefill(cuplo)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_TRIANGULAR, cuuplo, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,nA = A.dims
mX,n = X.dims
if( mX != m )
throw(DimensionMismatch("First dimension of A, $m, and X, $mX must match"))
end
Y = similar(X)
ldx = max(1,stride(X,2))
ldy = max(1,stride(Y,2))
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint, Cint,
Ptr{$elty}, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, cusparseSolveAnalysisInfo_t,
Ptr{$elty}, Cint, Ptr{$elty}, Cint),
cusparsehandle[1], cutransa, m, n, [alpha],
&cudesc, A.nzVal, A.colPtr, A.rowVal, info, X, ldx,
Y, ldy))
Y
end
end
end
for elty in (:Float32, :Float64, :Complex64, :Complex128)
@eval begin
function sm(transa::SparseChar,
uplo::SparseChar,
alpha::$elty,
A::CudaSparseMatrix{$elty},
B::CudaMatrix{$elty},
index::SparseChar)
info = sm_analysis(transa,uplo,A,index)
sm_solve(transa,uplo,alpha,A,B,info,index)
end
function sm(transa::SparseChar,
uplo::SparseChar,
A::CudaSparseMatrix{$elty},
B::CudaMatrix{$elty},
index::SparseChar)
info = sm_analysis(transa,uplo,A,index)
sm_solve(transa,uplo,one($elty),A,B,info,index)
end
function sm(transa::SparseChar,
alpha::$elty,
A::AbstractTriangular,
B::CudaMatrix{$elty},
index::SparseChar)
uplo = 'U'
if islower(A)
uplo = 'L'
end
info = sm_analysis(transa,uplo,A.data,index)
sm_solve(transa,uplo,alpha,A.data,B,info,index)
end
function sm(transa::SparseChar,
A::AbstractTriangular,
B::CudaMatrix{$elty},
index::SparseChar)
uplo = 'U'
if islower(A)
uplo = 'L'
end
info = sm_analysis(transa,uplo,A.data,index)
sm_solve(transa,uplo,one($elty),A.data,B,info,index)
end
end
end
(\)(A::AbstractTriangular,B::CudaMatrix) = sm('N',A,B,'O')
At_ldiv_B(A::AbstractTriangular,B::CudaMatrix) = sm('T',A,B,'O')
Ac_ldiv_B(A::AbstractTriangular,B::CudaMatrix) = sm('C',A,B,'O')
# bsrsm2
for (bname,aname,sname,elty) in ((:cusparseSbsrsm2_bufferSize, :cusparseSbsrsm2_analysis, :cusparseSbsrsm2_solve, :Float32),
(:cusparseDbsrsm2_bufferSize, :cusparseDbsrsm2_analysis, :cusparseDbsrsm2_solve, :Float64),
(:cusparseCbsrsm2_bufferSize, :cusparseCbsrsm2_analysis, :cusparseCbsrsm2_solve, :Complex64),
(:cusparseZbsrsm2_bufferSize, :cusparseZbsrsm2_analysis, :cusparseZbsrsm2_solve, :Complex128))
@eval begin
function bsrsm2!(transa::SparseChar,
transxy::SparseChar,
alpha::$elty,
A::CudaSparseMatrixBSR{$elty},
X::CudaMatrix{$elty},
index::SparseChar)
cutransa = cusparseop(transa)
cutransxy = cusparseop(transxy)
cudir = cusparsedir(A.dir)
cuind = cusparseindex(index)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_UPPER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
if( m != n )
throw(DimensionMismatch("A must be square!"))
end
mb = div(m,A.blockDim)
mX,nX = size(X)
if( transxy == 'N' && (mX != m) )
throw(DimensionMismatch(""))
end
if( transxy != 'N' && (nX != m) )
throw(DimensionMismatch(""))
end
ldx = max(1,stride(X,2))
info = bsrsm2Info_t[0]
cusparseCreateBsrsm2Info(info)
bufSize = Array(Cint,1)
statuscheck(ccall(($(string(bname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t,
cusparseOperation_t, cusparseOperation_t, Cint,
Cint, Cint, Ptr{cusparseMatDescr_t},
Ptr{$elty}, Ptr{Cint}, Ptr{Cint}, Cint,
bsrsm2Info_t, Ptr{Cint}), cusparsehandle[1],
cudir, cutransa, cutransxy, mb, nX, A.nnz,
&cudesc, A.nzVal, A.rowPtr, A.colVal,
A.blockDim, info[1], bufSize))
buffer = CudaArray(zeros(UInt8, bufSize[1]))
statuscheck(ccall(($(string(aname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t,
cusparseOperation_t, cusparseOperation_t, Cint,
Cint, Cint, Ptr{cusparseMatDescr_t},
Ptr{$elty}, Ptr{Cint}, Ptr{Cint}, Cint,
bsrsm2Info_t, cusparseSolvePolicy_t, Ptr{Void}),
cusparsehandle[1], cudir, cutransa, cutransxy,
mb, nX, A.nnz, &cudesc, A.nzVal, A.rowPtr,
A.colVal, A.blockDim, info[1],
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
posit = Array(Cint,1)
statuscheck(ccall((:cusparseXbsrsm2_zeroPivot, libcusparse),
cusparseStatus_t, (cusparseHandle_t, bsrsm2Info_t,
Ptr{Cint}), cusparsehandle[1], info[1], posit))
if( posit[1] >= 0 )
throw(string("Structural/numerical zero in A at (",posit[1],posit[1],")"))
end
statuscheck(ccall(($(string(sname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t,
cusparseOperation_t, cusparseOperation_t, Cint,
Cint, Cint, Ptr{$elty}, Ptr{cusparseMatDescr_t},
Ptr{$elty}, Ptr{Cint}, Ptr{Cint}, Cint,
bsrsm2Info_t, Ptr{$elty}, Cint, Ptr{$elty}, Cint,
cusparseSolvePolicy_t, Ptr{Void}),
cusparsehandle[1], cudir, cutransa, cutransxy, mb,
nX, A.nnz, [alpha], &cudesc, A.nzVal, A.rowPtr,
A.colVal, A.blockDim, info[1], X, ldx, X, ldx,
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
cusparseDestroyBsrsm2Info(info[1])
X
end
function bsrsm2(transa::SparseChar,
transxy::SparseChar,
alpha::$elty,
A::CudaSparseMatrixBSR{$elty},
X::CudaMatrix{$elty},
index::SparseChar)
bsrsm2!(transa,transxy,alpha,A,copy(X),index)
end
end
end
# extensions
# CSR GEAM
for (fname,elty) in ((:cusparseScsrgeam, :Float32),
(:cusparseDcsrgeam, :Float64),
(:cusparseCcsrgeam, :Complex64),
(:cusparseZcsrgeam, :Complex128))
@eval begin
function geam(alpha::$elty,
A::CudaSparseMatrixCSR{$elty},
beta::$elty,
B::CudaSparseMatrixCSR{$elty},
indexA::SparseChar,
indexB::SparseChar,
indexC::SparseChar)
cuinda = cusparseindex(indexA)
cuindb = cusparseindex(indexB)
cuindc = cusparseindex(indexB)
cudesca = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuinda)
cudescb = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuindb)
cudescc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuindc)
mA,nA = A.dims
mB,nB = B.dims
if( (mA != mB) || (nA != nB) )
throw(DimensionMismatch(""))
end
nnzC = Array(Cint,1)
rowPtrC = CudaArray(zeros(Cint,mA+1))
statuscheck(ccall((:cusparseXcsrgeamNnz,libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Cint, Ptr{Cint},
Ptr{Cint}, Ptr{cusparseMatDescr_t}, Cint, Ptr{Cint},
Ptr{Cint}, Ptr{cusparseMatDescr_t}, Ptr{Cint},
Ptr{Cint}), cusparsehandle[1], mA, nA, &cudesca,
A.nnz, A.rowPtr, A.colVal, &cudescb, B.nnz,
B.rowPtr, B.colVal, &cudescc, rowPtrC, nnzC))
nnz = nnzC[1]
C = CudaSparseMatrixCSR(rowPtrC, CudaArray(zeros(Cint,nnz)), CudaArray(zeros($elty,nnz)), nnz, A.dims)
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint, Ptr{$elty},
Ptr{cusparseMatDescr_t}, Cint, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Ptr{$elty},
Ptr{cusparseMatDescr_t}, Cint, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Ptr{cusparseMatDescr_t},
Ptr{$elty}, Ptr{Cint}, Ptr{Cint}),
cusparsehandle[1], mA, nA, [alpha], &cudesca,
A.nnz, A.nzVal, A.rowPtr, A.colVal, [beta],
&cudescb, B.nnz, B.nzVal, B.rowPtr, B.colVal,
&cudescc, C.nzVal, C.rowPtr, C.colVal))
C
end
function geam(alpha::$elty,
A::CudaSparseMatrixCSC{$elty},
beta::$elty,
B::CudaSparseMatrixCSC{$elty},
indexA::SparseChar,
indexB::SparseChar,
indexC::SparseChar)
cuinda = cusparseindex(indexA)
cuindb = cusparseindex(indexB)
cuindc = cusparseindex(indexB)
cudesca = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuinda)
cudescb = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuindb)
cudescc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuindc)
mA,nA = A.dims
mB,nB = B.dims
if( (mA != mB) || (nA != nB) )
throw(DimensionMismatch("A and B must have same dimensions!"))
end
nnzC = Array(Cint,1)
rowPtrC = CudaArray(zeros(Cint,mA+1))
statuscheck(ccall((:cusparseXcsrgeamNnz,libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Cint, Ptr{Cint},
Ptr{Cint}, Ptr{cusparseMatDescr_t}, Cint, Ptr{Cint},
Ptr{Cint}, Ptr{cusparseMatDescr_t}, Ptr{Cint},
Ptr{Cint}), cusparsehandle[1], mA, nA, &cudesca,
A.nnz, A.colPtr, A.rowVal, &cudescb, B.nnz,
B.colPtr, B.rowVal, &cudescc, rowPtrC, nnzC))
nnz = nnzC[1]
C = CudaSparseMatrixCSC(rowPtrC, CudaArray(zeros(Cint,nnz)), CudaArray(zeros($elty,nnz)), nnz, A.dims)
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint, Ptr{$elty},
Ptr{cusparseMatDescr_t}, Cint, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Ptr{$elty},
Ptr{cusparseMatDescr_t}, Cint, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Ptr{cusparseMatDescr_t},
Ptr{$elty}, Ptr{Cint}, Ptr{Cint}),
cusparsehandle[1], mA, nA, [alpha], &cudesca,
A.nnz, A.nzVal, A.colPtr, A.rowVal, [beta],
&cudescb, B.nnz, B.nzVal, B.colPtr, B.rowVal,
&cudescc, C.nzVal, C.colPtr, C.rowVal))
C
end
function geam(alpha::$elty,
A::CudaSparseMatrixCSR{$elty},
beta::$elty,
B::CudaSparseMatrixCSC{$elty},
indexA::SparseChar,
indexB::SparseChar,
indexC::SparseChar)
geam(alpha,A,beta,switch2csr(B),indexA,indexB,indexC)
end
function geam(alpha::$elty,
A::CudaSparseMatrixCSC{$elty},
beta::$elty,
B::CudaSparseMatrixCSR{$elty},
indexA::SparseChar,
indexB::SparseChar,
indexC::SparseChar)
geam(alpha,switch2csr(A),beta,B,indexA,indexB,indexC)
end
function geam(alpha::$elty,
A::Union{CudaSparseMatrixCSR{$elty},CudaSparseMatrixCSC{$elty}},
B::Union{CudaSparseMatrixCSR{$elty},CudaSparseMatrixCSC{$elty}},
indexA::SparseChar,
indexB::SparseChar,
indexC::SparseChar)
geam(alpha,A,one($elty),B,indexA,indexB,indexC)
end
function geam(A::Union{CudaSparseMatrixCSR{$elty},CudaSparseMatrixCSC{$elty}},
beta::$elty,
B::Union{CudaSparseMatrixCSR{$elty},CudaSparseMatrixCSC{$elty}},
indexA::SparseChar,
indexB::SparseChar,
indexC::SparseChar)
geam(one($elty),A,beta,B,indexA,indexB,indexC)
end
function geam(A::Union{CudaSparseMatrixCSR{$elty},CudaSparseMatrixCSC{$elty}},
B::Union{CudaSparseMatrixCSR{$elty},CudaSparseMatrixCSC{$elty}},
indexA::SparseChar,
indexB::SparseChar,
indexC::SparseChar)
geam(one($elty),A,one($elty),B,indexA,indexB,indexC)
end
end
end
(+)(A::Union{CudaSparseMatrixCSR,CudaSparseMatrixCSC},B::Union{CudaSparseMatrixCSR,CudaSparseMatrixCSC}) = geam(A,B,'O','O','O')
(-)(A::Union{CudaSparseMatrixCSR,CudaSparseMatrixCSC},B::Union{CudaSparseMatrixCSR,CudaSparseMatrixCSC}) = geam(A,-one(eltype(A)),B,'O','O','O')
#CSR GEMM
for (fname,elty) in ((:cusparseScsrgemm, :Float32),
(:cusparseDcsrgemm, :Float64),
(:cusparseCcsrgemm, :Complex64),
(:cusparseZcsrgemm, :Complex128))
@eval begin
function gemm(transa::SparseChar,
transb::SparseChar,
A::CudaSparseMatrixCSR{$elty},
B::CudaSparseMatrixCSR{$elty},
indexA::SparseChar,
indexB::SparseChar,
indexC::SparseChar)
cutransa = cusparseop(transa)
cutransb = cusparseop(transb)
cuinda = cusparseindex(indexA)
cuindb = cusparseindex(indexB)
cuindc = cusparseindex(indexB)
cudesca = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuinda)
cudescb = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuindb)
cudescc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuindc)
m,k = transa == 'N' ? A.dims : (A.dims[2],A.dims[1])
kB,n = transb == 'N' ? B.dims : (B.dims[2],B.dims[1])
if k != kB
throw(DimensionMismatch("Interior dimension of A, $k, and B, $kB, must match"))
end
nnzC = Array(Cint,1)
rowPtrC = CudaArray(zeros(Cint,m + 1))
statuscheck(ccall((:cusparseXcsrgemmNnz,libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t,
cusparseOperation_t, Cint, Cint, Cint,
Ptr{cusparseMatDescr_t}, Cint, Ptr{Cint},
Ptr{Cint}, Ptr{cusparseMatDescr_t}, Cint, Ptr{Cint},
Ptr{Cint}, Ptr{cusparseMatDescr_t}, Ptr{Cint},
Ptr{Cint}), cusparsehandle[1], cutransa, cutransb,
m, n, k, &cudesca, A.nnz, A.rowPtr, A.colVal,
&cudescb, B.nnz, B.rowPtr, B.colVal, &cudescc,
rowPtrC, nnzC))
nnz = nnzC[1]
C = CudaSparseMatrixCSR(rowPtrC, CudaArray(zeros(Cint,nnz)), CudaArray(zeros($elty,nnz)), nnz, (m,n))
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t,
cusparseOperation_t, Cint, Cint, Cint,
Ptr{cusparseMatDescr_t}, Cint, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Ptr{cusparseMatDescr_t},
Cint, Ptr{$elty}, Ptr{Cint}, Ptr{Cint},
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}), cusparsehandle[1], cutransa,
cutransb, m, n, k, &cudesca, A.nnz, A.nzVal,
A.rowPtr, A.colVal, &cudescb, B.nnz, B.nzVal,
B.rowPtr, B.colVal, &cudescc, C.nzVal,
C.rowPtr, C.colVal))
C
end
end
end
#CSC GEMM
for (fname,elty) in ((:cusparseScsrgemm, :Float32),
(:cusparseDcsrgemm, :Float64),
(:cusparseCcsrgemm, :Complex64),
(:cusparseZcsrgemm, :Complex128))
@eval begin
function gemm(transa::SparseChar,
transb::SparseChar,
A::CudaSparseMatrixCSC{$elty},
B::CudaSparseMatrixCSC{$elty},
indexA::SparseChar,
indexB::SparseChar,
indexC::SparseChar)
ctransa = 'N'
if transa == 'N'
ctransa = 'T'
end
cutransa = cusparseop(ctransa)
ctransb = 'N'
if transb == 'N'
ctransb = 'T'
end
cutransb = cusparseop(ctransb)
cuinda = cusparseindex(indexA)
cuindb = cusparseindex(indexB)
cuindc = cusparseindex(indexB)
cudesca = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuinda)
cudescb = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuindb)
cudescc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuindc)
m,k = ctransa != 'N' ? A.dims : (A.dims[2],A.dims[1])
kB,n = ctransb != 'N' ? B.dims : (B.dims[2],B.dims[1])
if k != kB
throw(DimensionMismatch("Interior dimension of A, $k, and B, $kB, must match"))
end
nnzC = Array(Cint,1)
colPtrC = CudaArray(zeros(Cint,n + 1))
statuscheck(ccall((:cusparseXcsrgemmNnz,libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t,
cusparseOperation_t, Cint, Cint, Cint,
Ptr{cusparseMatDescr_t}, Cint, Ptr{Cint},
Ptr{Cint}, Ptr{cusparseMatDescr_t}, Cint, Ptr{Cint},
Ptr{Cint}, Ptr{cusparseMatDescr_t}, Ptr{Cint},
Ptr{Cint}), cusparsehandle[1], cutransa, cutransb,
m, n, k, &cudesca, A.nnz, A.colPtr, A.rowVal,
&cudescb, B.nnz, B.colPtr, B.rowVal, &cudescc,
colPtrC, nnzC))
nnz = nnzC[1]
C = CudaSparseMatrixCSC(colPtrC, CudaArray(zeros(Cint,nnz)), CudaArray(zeros($elty,nnz)), nnz, (m,n))
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t,
cusparseOperation_t, Cint, Cint, Cint,
Ptr{cusparseMatDescr_t}, Cint, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Ptr{cusparseMatDescr_t},
Cint, Ptr{$elty}, Ptr{Cint}, Ptr{Cint},
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}), cusparsehandle[1], cutransa,
cutransb, m, n, k, &cudesca, A.nnz, A.nzVal,
A.colPtr, A.rowVal, &cudescb, B.nnz, B.nzVal,
B.colPtr, B.rowVal, &cudescc, C.nzVal,
C.colPtr, C.rowVal))
C
end
end
end
## preconditioners
# ic0 - incomplete Cholesky factorization with no pivoting
for (fname,elty) in ((:cusparseScsric0, :Float32),
(:cusparseDcsric0, :Float64),
(:cusparseCcsric0, :Complex64),
(:cusparseZcsric0, :Complex128))
@eval begin
function ic0!(transa::SparseChar,
typea::SparseChar,
A::CompressedSparse{$elty},
info::cusparseSolveAnalysisInfo_t,
index::SparseChar)
Mat = A
if typeof(A) <: Base.LinAlg.HermOrSym
Mat = A.data
end
cutransa = cusparseop(transa)
cutype = cusparsetype(typea)
if typeof(A) <: Symmetric
cutype = cusparsetype('S')
elseif typeof(A) <: Hermitian
cutype = cusparsetype('H')
end
if transa == 'N' && typeof(Mat) == CudaSparseMatrixCSC{$elty}
cutransa = cusparseop('T')
elseif transa == 'T' && typeof(Mat) == CudaSparseMatrixCSC{$elty}
cutransa = cusparseop('N')
end
cuind = cusparseindex(index)
cudesc = getDescr(A, index)
m,n = Mat.dims
indPtr = typeof(Mat) == CudaSparseMatrixCSC{$elty} ? Mat.colPtr : Mat.rowPtr
valPtr = typeof(Mat) == CudaSparseMatrixCSC{$elty} ? Mat.rowVal : Mat.colVal
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, cusparseSolveAnalysisInfo_t),
cusparsehandle[1], cutransa, m, &cudesc, Mat.nzVal,
indPtr, valPtr, info))
Mat
end
function ic0(transa::SparseChar,
typea::SparseChar,
A::CompressedSparse{$elty},
info::cusparseSolveAnalysisInfo_t,
index::SparseChar)
ic0!(transa,typea,copy(A),info,index)
end
end
end
# csric02
for (bname,aname,sname,elty) in ((:cusparseScsric02_bufferSize, :cusparseScsric02_analysis, :cusparseScsric02, :Float32),
(:cusparseDcsric02_bufferSize, :cusparseDcsric02_analysis, :cusparseDcsric02, :Float64),
(:cusparseCcsric02_bufferSize, :cusparseCcsric02_analysis, :cusparseCcsric02, :Complex64),
(:cusparseZcsric02_bufferSize, :cusparseZcsric02_analysis, :cusparseZcsric02, :Complex128))
@eval begin
function ic02!(A::CudaSparseMatrixCSR{$elty},
index::SparseChar)
cuind = cusparseindex(index)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
if( m != n )
throw(DimensionMismatch("A must be square, but has dimensions ($m,$n)!"))
end
info = csric02Info_t[0]
cusparseCreateCsric02Info(info)
bufSize = Array(Cint,1)
statuscheck(ccall(($(string(bname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csric02Info_t, Ptr{Cint}),
cusparsehandle[1], m, A.nnz, &cudesc, A.nzVal,
A.rowPtr, A.colVal, info[1], bufSize))
buffer = CudaArray(zeros(UInt8, bufSize[1]))
statuscheck(ccall(($(string(aname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csric02Info_t, cusparseSolvePolicy_t,
Ptr{Void}), cusparsehandle[1], m, A.nnz, &cudesc,
A.nzVal, A.rowPtr, A.colVal, info[1],
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
posit = Array(Cint,1)
statuscheck(ccall((:cusparseXcsric02_zeroPivot, libcusparse),
cusparseStatus_t, (cusparseHandle_t, csric02Info_t,
Ptr{Cint}), cusparsehandle[1], info[1], posit))
if( posit[1] >= 0 )
throw(string("Structural/numerical zero in A at (",posit[1],posit[1],")"))
end
statuscheck(ccall(($(string(sname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csric02Info_t, cusparseSolvePolicy_t,
Ptr{Void}), cusparsehandle[1], m, A.nnz,
&cudesc, A.nzVal, A.rowPtr, A.colVal, info[1],
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
cusparseDestroyCsric02Info(info[1])
A
end
end
end
# cscic02
for (bname,aname,sname,elty) in ((:cusparseScsric02_bufferSize, :cusparseScsric02_analysis, :cusparseScsric02, :Float32),
(:cusparseDcsric02_bufferSize, :cusparseDcsric02_analysis, :cusparseDcsric02, :Float64),
(:cusparseCcsric02_bufferSize, :cusparseCcsric02_analysis, :cusparseCcsric02, :Complex64),
(:cusparseZcsric02_bufferSize, :cusparseZcsric02_analysis, :cusparseZcsric02, :Complex128))
@eval begin
function ic02!(A::CudaSparseMatrixCSC{$elty},
index::SparseChar)
cuind = cusparseindex(index)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
if( m != n )
throw(DimensionMismatch("A must be square, but has dimensions ($m,$n)!"))
end
info = csric02Info_t[0]
cusparseCreateCsric02Info(info)
bufSize = Array(Cint,1)
statuscheck(ccall(($(string(bname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csric02Info_t, Ptr{Cint}),
cusparsehandle[1], m, A.nnz, &cudesc, A.nzVal,
A.colPtr, A.rowVal, info[1], bufSize))
buffer = CudaArray(zeros(UInt8, bufSize[1]))
statuscheck(ccall(($(string(aname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csric02Info_t, cusparseSolvePolicy_t,
Ptr{Void}), cusparsehandle[1], m, A.nnz, &cudesc,
A.nzVal, A.colPtr, A.rowVal, info[1],
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
posit = Array(Cint,1)
statuscheck(ccall((:cusparseXcsric02_zeroPivot, libcusparse),
cusparseStatus_t, (cusparseHandle_t, csric02Info_t,
Ptr{Cint}), cusparsehandle[1], info[1], posit))
if( posit[1] >= 0 )
throw(string("Structural/numerical zero in A at (",posit[1],posit[1],")"))
end
statuscheck(ccall(($(string(sname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csric02Info_t, cusparseSolvePolicy_t,
Ptr{Void}), cusparsehandle[1], m, A.nnz,
&cudesc, A.nzVal, A.colPtr, A.rowVal, info[1],
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
cusparseDestroyCsric02Info(info[1])
A
end
end
end
# ilu0 - incomplete LU factorization with no pivoting
for (fname,elty) in ((:cusparseScsrilu0, :Float32),
(:cusparseDcsrilu0, :Float64),
(:cusparseCcsrilu0, :Complex64),
(:cusparseZcsrilu0, :Complex128))
@eval begin
function ilu0!(transa::SparseChar,
A::CompressedSparse{$elty},
info::cusparseSolveAnalysisInfo_t,
index::SparseChar)
Mat = A
if typeof(A) <: Base.LinAlg.HermOrSym
Mat = A.data
end
cutransa = cusparseop(transa)
if transa == 'N' && typeof(Mat) == CudaSparseMatrixCSC{$elty}
cutransa = cusparseop('T')
elseif transa == 'T' && typeof(Mat) == CudaSparseMatrixCSC{$elty}
cutransa = cusparseop('N')
end
cuind = cusparseindex(index)
cudesc = getDescr(A, index)
m,n = Mat.dims
indPtr = typeof(Mat) == CudaSparseMatrixCSC{$elty} ? Mat.colPtr : Mat.rowPtr
valPtr = typeof(Mat) == CudaSparseMatrixCSC{$elty} ? Mat.rowVal : Mat.colVal
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseOperation_t, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, cusparseSolveAnalysisInfo_t),
cusparsehandle[1], cutransa, m, &cudesc, Mat.nzVal,
indPtr, valPtr, info))
Mat
end
function ilu0(transa::SparseChar,
A::CompressedSparse{$elty},
info::cusparseSolveAnalysisInfo_t,
index::SparseChar)
ilu0!(transa,copy(A),info,index)
end
end
end
# csrilu02
for (bname,aname,sname,elty) in ((:cusparseScsrilu02_bufferSize, :cusparseScsrilu02_analysis, :cusparseScsrilu02, :Float32),
(:cusparseDcsrilu02_bufferSize, :cusparseDcsrilu02_analysis, :cusparseDcsrilu02, :Float64),
(:cusparseCcsrilu02_bufferSize, :cusparseCcsrilu02_analysis, :cusparseCcsrilu02, :Complex64),
(:cusparseZcsrilu02_bufferSize, :cusparseZcsrilu02_analysis, :cusparseZcsrilu02, :Complex128))
@eval begin
function ilu02!(A::CudaSparseMatrixCSR{$elty},
index::SparseChar)
cuind = cusparseindex(index)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
if( m != n )
throw(DimensionMismatch("A must be square, but has dimensions ($m,$n)!"))
end
info = csrilu02Info_t[0]
cusparseCreateCsrilu02Info(info)
bufSize = Array(Cint,1)
statuscheck(ccall(($(string(bname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csrilu02Info_t, Ptr{Cint}),
cusparsehandle[1], m, A.nnz, &cudesc, A.nzVal,
A.rowPtr, A.colVal, info[1], bufSize))
buffer = CudaArray(zeros(UInt8, bufSize[1]))
statuscheck(ccall(($(string(aname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csrilu02Info_t, cusparseSolvePolicy_t,
Ptr{Void}), cusparsehandle[1], m, A.nnz, &cudesc,
A.nzVal, A.rowPtr, A.colVal, info[1],
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
posit = Array(Cint,1)
statuscheck(ccall((:cusparseXcsrilu02_zeroPivot, libcusparse),
cusparseStatus_t, (cusparseHandle_t, csrilu02Info_t,
Ptr{Cint}), cusparsehandle[1], info[1], posit))
if( posit[1] >= 0 )
throw(string("Structural zero in A at (",posit[1],posit[1],")"))
end
statuscheck(ccall(($(string(sname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csrilu02Info_t, cusparseSolvePolicy_t,
Ptr{Void}), cusparsehandle[1], m, A.nnz,
&cudesc, A.nzVal, A.rowPtr, A.colVal, info[1],
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
cusparseDestroyCsrilu02Info(info[1])
A
end
end
end
# cscilu02
for (bname,aname,sname,elty) in ((:cusparseScsrilu02_bufferSize, :cusparseScsrilu02_analysis, :cusparseScsrilu02, :Float32),
(:cusparseDcsrilu02_bufferSize, :cusparseDcsrilu02_analysis, :cusparseDcsrilu02, :Float64),
(:cusparseCcsrilu02_bufferSize, :cusparseCcsrilu02_analysis, :cusparseCcsrilu02, :Complex64),
(:cusparseZcsrilu02_bufferSize, :cusparseZcsrilu02_analysis, :cusparseZcsrilu02, :Complex128))
@eval begin
function ilu02!(A::CudaSparseMatrixCSC{$elty},
index::SparseChar)
cuind = cusparseindex(index)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_LOWER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
if( m != n )
throw(DimensionMismatch("A must be square, but has dimensions ($m,$n)!"))
end
info = csrilu02Info_t[0]
cusparseCreateCsrilu02Info(info)
bufSize = Array(Cint,1)
statuscheck(ccall(($(string(bname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csrilu02Info_t, Ptr{Cint}),
cusparsehandle[1], m, A.nnz, &cudesc, A.nzVal,
A.colPtr, A.rowVal, info[1], bufSize))
buffer = CudaArray(zeros(UInt8, bufSize[1]))
statuscheck(ccall(($(string(aname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csrilu02Info_t, cusparseSolvePolicy_t,
Ptr{Void}), cusparsehandle[1], m, A.nnz, &cudesc,
A.nzVal, A.colPtr, A.rowVal, info[1],
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
posit = Array(Cint,1)
statuscheck(ccall((:cusparseXcsrilu02_zeroPivot, libcusparse),
cusparseStatus_t, (cusparseHandle_t, csrilu02Info_t,
Ptr{Cint}), cusparsehandle[1], info[1], posit))
if( posit[1] >= 0 )
throw(string("Structural zero in A at (",posit[1],posit[1],")"))
end
statuscheck(ccall(($(string(sname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint,
Ptr{cusparseMatDescr_t}, Ptr{$elty}, Ptr{Cint},
Ptr{Cint}, csrilu02Info_t, cusparseSolvePolicy_t,
Ptr{Void}), cusparsehandle[1], m, A.nnz,
&cudesc, A.nzVal, A.colPtr, A.rowVal, info[1],
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
cusparseDestroyCsrilu02Info(info[1])
A
end
end
end
# bsric02
for (bname,aname,sname,elty) in ((:cusparseSbsric02_bufferSize, :cusparseSbsric02_analysis, :cusparseSbsric02, :Float32),
(:cusparseDbsric02_bufferSize, :cusparseDbsric02_analysis, :cusparseDbsric02, :Float64),
(:cusparseCbsric02_bufferSize, :cusparseCbsric02_analysis, :cusparseCbsric02, :Complex64),
(:cusparseZbsric02_bufferSize, :cusparseZbsric02_analysis, :cusparseZbsric02, :Complex128))
@eval begin
function ic02!(A::CudaSparseMatrixBSR{$elty},
index::SparseChar)
cudir = cusparsedir(A.dir)
cuind = cusparseindex(index)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_UPPER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
if( m != n )
throw(DimensionMismatch("A must be square, but has dimensions ($m,$n)!"))
end
mb = div(m,A.blockDim)
info = bsric02Info_t[0]
cusparseCreateBsric02Info(info)
bufSize = Array(Cint,1)
statuscheck(ccall(($(string(bname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t, Cint,
Cint, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Cint, bsric02Info_t,
Ptr{Cint}), cusparsehandle[1], cudir, mb, A.nnz,
&cudesc, A.nzVal, A.rowPtr, A.colVal,
A.blockDim, info[1], bufSize))
buffer = CudaArray(zeros(UInt8, bufSize[1]))
statuscheck(ccall(($(string(aname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t, Cint,
Cint, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Cint, bsric02Info_t,
cusparseSolvePolicy_t, Ptr{Void}),
cusparsehandle[1], cudir, mb, A.nnz, &cudesc,
A.nzVal, A.rowPtr, A.colVal, A.blockDim, info[1],
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
posit = Array(Cint,1)
statuscheck(ccall((:cusparseXbsric02_zeroPivot, libcusparse),
cusparseStatus_t, (cusparseHandle_t, bsric02Info_t,
Ptr{Cint}), cusparsehandle[1], info[1], posit))
if( posit[1] >= 0 )
throw(string("Structural/numerical zero in A at (",posit[1],posit[1],")"))
end
statuscheck(ccall(($(string(sname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t, Cint,
Cint, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Cint,bsric02Info_t,
cusparseSolvePolicy_t, Ptr{Void}),
cusparsehandle[1], cudir, mb, A.nnz, &cudesc,
A.nzVal, A.rowPtr, A.colVal, A.blockDim, info[1],
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
cusparseDestroyBsric02Info(info[1])
A
end
end
end
# bsrilu02
for (bname,aname,sname,elty) in ((:cusparseSbsrilu02_bufferSize, :cusparseSbsrilu02_analysis, :cusparseSbsrilu02, :Float32),
(:cusparseDbsrilu02_bufferSize, :cusparseDbsrilu02_analysis, :cusparseDbsrilu02, :Float64),
(:cusparseCbsrilu02_bufferSize, :cusparseCbsrilu02_analysis, :cusparseCbsrilu02, :Complex64),
(:cusparseZbsrilu02_bufferSize, :cusparseZbsrilu02_analysis, :cusparseZbsrilu02, :Complex128))
@eval begin
function ilu02!(A::CudaSparseMatrixBSR{$elty},
index::SparseChar)
cudir = cusparsedir(A.dir)
cuind = cusparseindex(index)
cudesc = cusparseMatDescr_t(CUSPARSE_MATRIX_TYPE_GENERAL, CUSPARSE_FILL_MODE_UPPER, CUSPARSE_DIAG_TYPE_NON_UNIT, cuind)
m,n = A.dims
if( m != n )
throw(DimensionMismatch("A must be square, but has dimensions ($m,$n)!"))
end
mb = div(m,A.blockDim)
info = bsrilu02Info_t[0]
cusparseCreateBsrilu02Info(info)
bufSize = Array(Cint,1)
statuscheck(ccall(($(string(bname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t, Cint,
Cint, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Cint, bsrilu02Info_t,
Ptr{Cint}), cusparsehandle[1], cudir, mb, A.nnz,
&cudesc, A.nzVal, A.rowPtr, A.colVal,
A.blockDim, info[1], bufSize))
buffer = CudaArray(zeros(UInt8, bufSize[1]))
statuscheck(ccall(($(string(aname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t, Cint,
Cint, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Cint, bsrilu02Info_t,
cusparseSolvePolicy_t, Ptr{Void}),
cusparsehandle[1], cudir, mb, A.nnz, &cudesc,
A.nzVal, A.rowPtr, A.colVal, A.blockDim, info[1],
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
posit = Array(Cint,1)
statuscheck(ccall((:cusparseXbsrilu02_zeroPivot, libcusparse),
cusparseStatus_t, (cusparseHandle_t, bsrilu02Info_t,
Ptr{Cint}), cusparsehandle[1], info[1], posit))
if( posit[1] >= 0 )
throw(string("Structural/numerical zero in A at (",posit[1],posit[1],")"))
end
statuscheck(ccall(($(string(sname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, cusparseDirection_t, Cint,
Cint, Ptr{cusparseMatDescr_t}, Ptr{$elty},
Ptr{Cint}, Ptr{Cint}, Cint,bsrilu02Info_t,
cusparseSolvePolicy_t, Ptr{Void}),
cusparsehandle[1], cudir, mb, A.nnz, &cudesc,
A.nzVal, A.rowPtr, A.colVal, A.blockDim, info[1],
CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer))
cusparseDestroyBsrilu02Info(info[1])
A
end
end
end
for elty in (:Float32, :Float64, :Complex64, :Complex128)
@eval begin
function ilu02(A::CudaSparseMatrix{$elty},
index::SparseChar)
ilu02!(copy(A),index)
end
function ic02(A::CudaSparseMatrix{$elty},
index::SparseChar)
ic02!(copy(A),index)
end
function ilu02(A::HermOrSym{$elty,CudaSparseMatrix{$elty}},
index::SparseChar)
ilu02!(copy(A.data),index)
end
function ic02(A::HermOrSym{$elty,CudaSparseMatrix{$elty}},
index::SparseChar)
ic02!(copy(A.data),index)
end
end
end
# gtsv - general tridiagonal solver
for (fname,elty) in ((:cusparseSgtsv, :Float32),
(:cusparseDgtsv, :Float64),
(:cusparseCgtsv, :Complex64),
(:cusparseZgtsv, :Complex128))
@eval begin
function gtsv!(dl::CudaVector{$elty},
d::CudaVector{$elty},
du::CudaVector{$elty},
B::CudaMatrix{$elty})
m,n = B.dims
ldb = max(1,stride(B,2))
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint, Ptr{$elty},
Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Cint),
cusparsehandle[1], m, n, dl, d, du, B, ldb))
B
end
function gtsv(dl::CudaVector{$elty},
d::CudaVector{$elty},
du::CudaVector{$elty},
B::CudaMatrix{$elty})
gtsv!(dl,d,du,copy(B))
end
end
end
# gtsv_nopivot - general tridiagonal solver without pivoting
for (fname,elty) in ((:cusparseSgtsv_nopivot, :Float32),
(:cusparseDgtsv_nopivot, :Float64),
(:cusparseCgtsv_nopivot, :Complex64),
(:cusparseZgtsv_nopivot, :Complex128))
@eval begin
function gtsv_nopivot!(dl::CudaVector{$elty},
d::CudaVector{$elty},
du::CudaVector{$elty},
B::CudaMatrix{$elty})
m,n = B.dims
ldb = max(1,stride(B,2))
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Cint, Ptr{$elty},
Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Cint),
cusparsehandle[1], m, n, dl, d, du, B, ldb))
B
end
function gtsv_nopivot(dl::CudaVector{$elty},
d::CudaVector{$elty},
du::CudaVector{$elty},
B::CudaMatrix{$elty})
gtsv_nopivot!(dl,d,du,copy(B))
end
end
end
# gtsvStridedBatch - batched general tridiagonal solver
for (fname,elty) in ((:cusparseSgtsvStridedBatch, :Float32),
(:cusparseDgtsvStridedBatch, :Float64),
(:cusparseCgtsvStridedBatch, :Complex64),
(:cusparseZgtsvStridedBatch, :Complex128))
@eval begin
function gtsvStridedBatch!(dl::CudaVector{$elty},
d::CudaVector{$elty},
du::CudaVector{$elty},
X::CudaVector{$elty},
batchCount::Integer,
batchStride::Integer)
m = div(length(X),batchCount)
statuscheck(ccall(($(string(fname)),libcusparse), cusparseStatus_t,
(cusparseHandle_t, Cint, Ptr{$elty}, Ptr{$elty},
Ptr{$elty}, Ptr{$elty}, Cint, Cint),
cusparsehandle[1], m, dl, d, du, X,
batchCount, batchStride))
X
end
function gtsvStridedBatch(dl::CudaVector{$elty},
d::CudaVector{$elty},
du::CudaVector{$elty},
X::CudaVector{$elty},
batchCount::Integer,
batchStride::Integer)
gtsvStridedBatch!(dl,d,du,copy(X),batchCount,batchStride)
end
end
end
| [
2,
315,
2410,
198,
198,
11748,
7308,
13,
14993,
2348,
70,
25,
18113,
5574,
43094,
11,
27741,
14824,
21413,
11,
1635,
11,
1343,
11,
532,
11,
3467,
11,
317,
62,
76,
377,
62,
33,
83,
11,
1629,
62,
76,
377,
62,
33,
11,
1629,
62,
7... | 1.601063 | 81,073 |
<gh_stars>1-10
export absolute_error
function Base.:(|>)(t::Transform{T}, p::Point{3,U}) where {T,U}
(xₚ, yₚ, zₚ, wₚ) = sum(transpose(t.m[:,StaticArrays.SUnitRange(1,3)]) .* p,), dims=1) .+ transpose(t.m[:,StaticArrays.SUnitRange(4,4))]
return wₚ ≈ 1 ? promote_type(T, typeof(p))(xₚ, yₚ, zₚ) / wₚ : promote_type(T, typeof(p))(xₚ, yₚ, zₚ)
end
absolute_error(t::Transform{T}, p::Point3{U}) where {T,U} =
Γ(3.0) * Vector3{promote_type(T,U)}(
sum(abs.(transpose(t.m[StaticArrays.SUnitRange(1,3), StaticArrays.SUnitRange(1,3)]) .* p), dims=1))
Base.:(|>)(t::Transform{T}, v::Vector3{U}) where {T,U} =
Vector3{promote_type(T,U)}(
sum(transpose(t.m[StaticArrays.SUnitRange(1,3), StaticArrays.SUnitRange(1,3)]) .* v, dims=1))
Base.:(|>)(t::Transform{T}, n::Normal3{U}}) where {T<:AbstractFloat, U<:Number} =
Normal3{promote_type(T,U)}(
sum(transpose(t.m_inv[StaticArrays.SUnitRange(1,3), StaticArrays.SUnitRange(1,3)]) .* n, dims=1))
function Base.:(|>)(t::Transform{T}, r::Ray{U}) where {T<:AbstractFloat, U<:AbstractFloat}
m² = magnitude²(r.d)
if m² > 0.0
δₜ = (abs(r.d) ⋅ absolute_error(t, r.o)) / m²
o = r.o + r.d * δₜ
return Ray{promote_type(T,U)}(t |> o, t |> r.d, r.time)
end
Ray{promote_type(T,U)}(t |> r.o, t |> r.d, r.time)
end
function Base.:(|>)(t::Transform{T}, b::Bounds{N,U}) where {N,T,U}
Bounds{N, promote_type(T,U)}(
sum(min.(
t.m[StaticArrays.SUnitRange(1,3), StaticArrays.SUnitRange(1,4)] .* b.p_min,
t.m[StaticArrays.SUnitRange(1,3), StaticArrays.SUnitRange(1,4)] .* b.p_max)),
sum(max.(
t.m[StaticArrays.SUnitRange(1,3), StaticArrays.SUnitRange(1,4)] .* b.p_min,
t.m[StaticArrays.SUnitRange(1,3), StaticArrays.SUnitRange(1,4)] .* b.p_max)))
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
39344,
4112,
62,
18224,
198,
198,
8818,
7308,
11207,
7,
91,
29,
5769,
83,
3712,
41762,
90,
51,
5512,
279,
3712,
12727,
90,
18,
11,
52,
30072,
810,
1391,
51,
11,
52,
92,
198,
220,
220,
... | 1.869342 | 972 |
<gh_stars>0
using GenomicAnnotations
using GenomicMaps
using ColorTypes
# You can add any kind of annotation that you want to display.
# Here, I add COG annotation:
function addcogs!(chr, filename)
cogs = split.(readlines(filename), Ref('\t'))
i = 1
for gene in @genes(chr, :feature == "CDS")
if gene.locus_tag == cogs[i][1]
if occursin(r"\w", cogs[i][2])
gene.cog = cogs[i][2]
end
i += 1
end
end
end
# Colour scheme for COG categories:
cogcolours = Dict("B"=>RGB{Float64}(1.0,0.630714,0.576563),
"M"=>RGB{Float64}(0.756869,0.916499,0.965176),
"I"=>RGB{Float64}(0.187839,0.54561,0.252343),
"X"=>RGB{Float64}(0.540006,0.493982,0.813567),
"Y"=>RGB{Float64}(0.0973617,0.285282,0.5329),
"Z"=>RGB{Float64}(0.0418427,0.156645,0.341597),
"L"=>RGB{Float64}(0.426131,0.0441442,0.0465628),
"O"=>RGB{Float64}(0.518954,0.802339,0.930272),
"F"=>RGB{Float64}(0.587882,0.865532,0.51112),
"Q"=>RGB{Float64}(0.0,0.225356,0.101282),
"D"=>RGB{Float64}(0.862653,0.958477,0.981395),
"V"=>RGB{Float64}(0.188382,0.529206,0.795898),
"U"=>RGB{Float64}(0.277786,0.635283,0.863472),
"E"=>RGB{Float64}(0.711814,0.932724,0.629136),
"T"=>RGB{Float64}(0.394211,0.72627,0.90426),
"H"=>RGB{Float64}(0.32729,0.673206,0.326717),
"P"=>RGB{Float64}(0.0232916,0.395886,0.180144),
"G"=>RGB{Float64}(0.459895,0.779462,0.41097),
"N"=>RGB{Float64}(0.641543,0.865092,0.94902),
"K"=>RGB{Float64}(0.825431,0.118066,0.106858),
"C"=>RGB{Float64}(0.835916,0.980813,0.770886),
"R"=>RGB{Float64}(0.807625,0.787968,0.949453),
"W"=>RGB{Float64}(0.137797,0.411028,0.686187),
"A"=>RGB{Float64}(1.0,0.808314,0.771835),
"S"=>RGB{Float64}(0.236943,0.0166779,0.407047),
"J"=>RGB{Float64}(1.0,0.389569,0.336934))
# First download annotations for E. coli:
download("ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/000/005/845/GCA_000005845.2_ASM584v2/GCA_000005845.2_ASM584v2_genomic.gbff.gz", "ecoli.gbk.gz")
# Then read the annotations and add COGs:
chr = readgbk("ecoli.gbk.gz")[1]
addcogs!(chr, "ecoli_cogs.tsv")
# The output can be customised, see src/initialise.jl for all options. Here I
# provide a function that will be run on each gene to determine its colour:
colourby_cog = g -> unique(String.(split(get(g, :cog, ""), "")))
drawgenome(chr;
outfile = "ecoli.svg",
colourmap = cogcolours,
colourfunction = colourby_cog,
annotate = true,
nbreaks = 40)
| [
27,
456,
62,
30783,
29,
15,
198,
3500,
5215,
10179,
2025,
30078,
198,
3500,
5215,
10179,
47010,
198,
3500,
5315,
31431,
628,
198,
2,
921,
460,
751,
597,
1611,
286,
23025,
326,
345,
765,
284,
3359,
13,
198,
2,
3423,
11,
314,
751,
7... | 1.881872 | 1,346 |
export MCVanilla, Vegas, Domain
export integral, ∫
using QuickTypes: @qstruct
using ArgCheck
using LinearAlgebra, Statistics
using StaticArrays
using Base.Threads: @threads
using Setfield: @settable
import Random
abstract type MCAlgorithm end
@settable @qstruct MCVanilla{R}(
neval::Int64=10^6,
rng::R=GLOBAL_PAR_RNG,
) do
@argcheck neval > 2
end <: MCAlgorithm
"""
Domain
Encodes a domain of integration, i.e. a product of finte intervals.
"""
struct Domain{N,T}
lower::SVector{N,T}
upper::SVector{N,T}
end
# function cartesian_product(d1::Domain, d2::Domain)
# Domain(vcat(d1.lower, d2.lower), vcat(d1.upper, d2.upper))
# end
function pointtype(::Type{Domain{N,T}}) where {N,T}
SVector{N,T}
end
function volume(dom::Domain)
prod(dom.upper - dom.lower)
end
function Base.ndims(dom::Domain{N,T}) where {N,T}
N
end
function Domain(interval::NTuple{2,Number})
(a,b) = float.(promote(interval...))
Domain(@SVector[a], @SVector[b])
end
function Domain(dom::Domain)
dom
end
function Domain(lims)
lower = SVector(map(float ∘ first, lims))
upper = SVector(map(float ∘ last, lims))
Domain(lower, upper)
end
function uniform(rng, dom::Domain)
V = pointtype(typeof(dom))
Δ = (dom.upper - dom.lower)
dom.lower .+ rand(rng, V) .* Δ
end
"""
integral(f, dom [,alg])
Compute the integral of the function `f` over a domain `dom`, via the algorithm `alg`.
"""
function integral(f, dom, alg=Vegas())
f2, dom2, alg2 = canonicalize(f, dom, alg)
integral_kernel(f2, dom2, alg2)
end
function canonicalize(f, dom, alg)
f, Domain(dom), alg
end
function canonicalize(f, I::NTuple{2,Number}, alg)
f_v = f ∘ first
dom = Domain(I)
f_v, dom, alg
end
const ∫ = integral
function integral_kernel(f, dom::Domain, alg::MCVanilla)
mc_kernel(f, alg.rng, dom, neval=alg.neval)
end
function draw(rng, dom::Domain)
vol = volume(dom)
x = uniform(rng, dom)
(position=x, weight = volume(dom))
end
"""
mc_kernel(f, rng::AbstractRNG, dom; neval)
Monte Carlo integration of function `f` over `dom`.
`dom` must support the following methods:
* volume(dom): Return the volume of dom
* draw(rng, dom): Return an object with properties `position::SVector`, `weight::Real`.
"""
function mc_kernel(f, rng::AbstractRNG, dom; neval)
N = neval
x = uniform(rng, dom)
y = float(f(x)) * volume(dom)
sum = y
sum2 = y.^2
for _ in 1:(N-1)
s = draw(rng, dom)
y = s.weight * f(s.position)
sum += y
sum2 += y.^2
end
mean = sum/N
var_f = (sum2/N) - mean .^2
var_f = max(zero(var_f), var_f)
var_fmean = var_f / N
std = sqrt.(var_fmean)
(value = mean, std=std, neval=N)
end
function mc_kernel(f, p::ParallelRNG, dom; neval)
rngs = p.rngs
res1 = mc_kernel(f, rngs[1], dom, neval=2)
T = typeof(res1)
nthreads = length(rngs)
results = Vector{T}(undef, nthreads)
neval_i = ceil(Int, neval / nthreads)
@threads for i in 1:nthreads
res = mc_kernel(f, rngs[i], dom, neval=neval_i)
results[i] = res
end
fuseall(results)
end
function fuseall(results)
N = sum(res->res.neval, results)
value = sum(res->res.value * res.neval/N , results)
var = sum(res->(res.std * res.neval/N).^2, results)
(value=value, std=sqrt.(var), neval=N)
end
# function fuse(res1, res2)
# var1 = res1.std .^ 2
# var2 = res2.std .^ 2
# w1, w2 = fusion_weights(var1, var2)
# (
# value = @.(w1 * res1.value + w2*res2.value),
# std = @.(sqrt(w1^2*var1 + w2^2*var2))
# )
# end
#
# function fusion_weights(var1::AbstractVector, var2::AbstractVector)
# pairs = fusion_weights_scalar.(var1, var2)
# first.(pairs), last.(pairs)
# end
#
# function fusion_weights(var1, var2)
# fusion_weights_scalar(var1, var2)
# end
#
# function fusion_weights_scalar(var1, var2)
# z = zero(var1)
# o = one(var1)
# if iszero(var1)
# (o, z)
# elseif iszero(var2)
# (z,o)
# else
# p1 = o/var1
# p2 = o/var2
# p = p1 + p2
# (p1/p, p2/p)
# end
# end
| [
198,
39344,
13122,
25298,
5049,
11,
9621,
11,
20021,
198,
39344,
19287,
11,
18872,
104,
198,
198,
3500,
12029,
31431,
25,
2488,
80,
7249,
198,
3500,
20559,
9787,
198,
3500,
44800,
2348,
29230,
11,
14370,
198,
3500,
36125,
3163,
20477,
1... | 2.152219 | 1,938 |
<gh_stars>1-10
#= MIT License
Copyright (c) 2020, 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. =#
# data structures for the flight state and the flight log
# functions for creating a demo flight state, demo flight log, loading and saving flight logs
# function se() for reading the settings
# the parameter P is the number of points of the tether, equal to segments+1
# in addition helper functions for working with rotations
"""
Settings
Flat struct, defining the settings of the Simulator and the Viewer.
$(TYPEDFIELDS)
"""
@with_kw mutable struct Settings @deftype Float64
project::String = ""
log_file::String = ""
"file name of the 3D model of the kite for the viewer"
model::String = ""
"name of the kite model to use (KPS3 or KPS4)"
physical_model::String = ""
version::Int64 = 1
"number of tether segments"
segments::Int64 = 0
sample_freq::Int64 = 0
time_lapse = 0
zoom = 0
kite_scale = 1.0
fixed_font::String = ""
abs_tol = 0.0
rel_tol = 0.0
max_iter::Int64 = 1
v_reel_out = 0
c0 = 0
c_s = 0
c2_cor = 0
k_ds = 0
"projected kite area [m^2]"
area = 0
"kite mass incl. sensor unit [kg]"
mass = 0
"height of the kite [m]"
height_k = 0
alpha_cl::Vector{Float64} = []
cl_list::Vector{Float64} = []
alpha_cd::Vector{Float64} = []
cd_list::Vector{Float64} = []
"width of the kite [m]"
width = 0
alpha_zero = 0
alpha_ztip = 0
"relative nose distance; increasing m_k increases C2 of the turn-rate law"
m_k = 0
rel_nose_mass = 0
"mass of the top particle relative to the sum of top and side particles"
rel_top_mass = 0
"relative side area [%]"
rel_side_area = 0
"max depower angle [deg]"
alpha_d_max = 0
"mass of the kite control unit [kg]"
kcu_mass = 0
"power to steering line distance [m]"
power2steer_dist = 0
depower_drum_diameter = 0
depower_offset = 0
tape_thickness = 0
v_depower = 0
v_steering = 0
depower_gain = 0
steering_gain = 0
v_wind = 0
v_wind_ref::Vector{Float64} = [] # wind speed vector at reference height
h_ref = 0
rho_0 = 0
z0 = 0
profile_law::Int64 = 0
alpha = 0
cd_tether = 0
d_tether = 0
d_line = 0
"height of the bridle [m]"
h_bridle = 0
l_bridle = 0
l_tether = 0
damping = 0
c_spring = 0
"density of Dyneema [kg/m³]"
rho_tether = 0
"axial tensile modulus of the tether [Pa]"
e_tether = 0
"initial elevation angle [deg]"
elevation = 0
"simulation time [sim only]"
depower = 0
sim_time = 0
"temperature at reference height [°C]"
temp_ref = 0
"height of groundstation above see level [m]"
height_gnd = 0
"turbulence intensity relative to Cabau, NL"
use_turbulence = 0
"wind speeds at ref height for calculating the turbulent wind field [m/s]"
v_wind_gnds::Vector{Float64} = []
"average height during reel out [m]"
avg_height = 0
"relative turbulence at the v_wind_gnds"
rel_turbs::Vector{Float64} = []
"the expected value of the turbulence intensity at 15 m/s"
i_ref = 0
"five times the average wind speed in m/s at hub height over the full year [m/s]"
v_ref = 0
"grid resolution in z direction [m]"
height_step = 0
"grid resolution in x and y direction [m]"
grid_step = 0
end
const SETTINGS = Settings()
"""
set_data_path(data_path="")
Set the directory for log and config files.
If called without argument, use the data path of the package to obtain the default settings
when calling se().
"""
function set_data_path(data_path="")
if data_path==""
data_path = joinpath(dirname(dirname(pathof(KiteUtils))), "data")
end
if data_path != DATA_PATH[1]
DATA_PATH[1] = data_path
SETTINGS.segments == 0 # enforce reloading of settings.yaml
end
end
"""
load_settings(project="")
Load the project with the given file name. The default project is determined by the content of the file system.yaml .
The project must include the path and the suffix .yaml .
"""
function load_settings(project="")
SETTINGS.segments=0
se(project)
end
"""
copy_settings()
Copy the default settings.yaml and system.yaml files to the folder DATAPATH
(it will be created if it doesn't exist).
"""
function copy_settings()
if ! isdir(DATA_PATH[1])
mkdir(DATA_PATH[1])
end
src_path = joinpath(dirname(pathof(@__MODULE__)), "..", DATA_PATH[1])
cp(joinpath(src_path, "settings.yaml"), joinpath(DATA_PATH[1], "settings.yaml"), force=true)
cp(joinpath(src_path, "system.yaml"), joinpath(DATA_PATH[1], "system.yaml"), force=true)
chmod(joinpath(DATA_PATH[1], "settings.yaml"), 0o664)
chmod(joinpath(DATA_PATH[1], "system.yaml"), 0o664)
end
"""
se()
Getter function for the [`Settings`](@ref) struct.
The default project is determined by the content of the file system.yaml .
"""
function se(project="")
if SETTINGS.segments == 0
if project == ""
# determine which project to load
dict = YAML.load_file(joinpath(DATA_PATH[1], "system.yaml"))
SETTINGS.project = dict["system"]["project"]
end
# load project from YAML
dict = YAML.load_file(joinpath(DATA_PATH[1], SETTINGS.project))
tmp = split(dict["system"]["log_file"], "/")
SETTINGS.log_file = joinpath(tmp[1], tmp[2])
SETTINGS.segments = dict["system"]["segments"]
SETTINGS.sample_freq = dict["system"]["sample_freq"]
SETTINGS.time_lapse = dict["system"]["time_lapse"]
SETTINGS.sim_time = dict["system"]["sim_time"]
SETTINGS.zoom = dict["system"]["zoom"]
SETTINGS.kite_scale = dict["system"]["kite_scale"]
SETTINGS.fixed_font = dict["system"]["fixed_font"]
SETTINGS.l_tether = dict["initial"]["l_tether"]
SETTINGS.v_reel_out = dict["initial"]["v_reel_out"]
SETTINGS.elevation = dict["initial"]["elevation"]
SETTINGS.depower = dict["initial"]["depower"]
SETTINGS.abs_tol = dict["solver"]["abs_tol"]
SETTINGS.rel_tol = dict["solver"]["rel_tol"]
SETTINGS.max_iter = dict["solver"]["max_iter"]
SETTINGS.c0 = dict["steering"]["c0"]
SETTINGS.c_s = dict["steering"]["c_s"]
SETTINGS.c2_cor = dict["steering"]["c2_cor"]
SETTINGS.k_ds = dict["steering"]["k_ds"]
SETTINGS.alpha_d_max = dict["depower"]["alpha_d_max"]
SETTINGS.depower_offset = dict["depower"]["depower_offset"]
SETTINGS.model = dict["kite"]["model"]
SETTINGS.physical_model= dict["kite"]["physical_model"]
SETTINGS.version = dict["kite"]["version"]
SETTINGS.area = dict["kite"]["area"]
SETTINGS.rel_side_area = dict["kite"]["rel_side_area"]
SETTINGS.mass = dict["kite"]["mass"]
SETTINGS.height_k = dict["kite"]["height"]
SETTINGS.alpha_cl = dict["kite"]["alpha_cl"]
SETTINGS.cl_list = dict["kite"]["cl_list"]
SETTINGS.alpha_cd = dict["kite"]["alpha_cd"]
SETTINGS.cd_list = dict["kite"]["cd_list"]
SETTINGS.width = dict["kps4"]["width"]
SETTINGS.alpha_zero = dict["kps4"]["alpha_zero"]
SETTINGS.alpha_ztip = dict["kps4"]["alpha_ztip"]
SETTINGS.m_k = dict["kps4"]["m_k"]
SETTINGS.rel_nose_mass = dict["kps4"]["rel_nose_mass"]
SETTINGS.rel_top_mass = dict["kps4"]["rel_top_mass"]
SETTINGS.l_bridle = dict["bridle"]["l_bridle"]
SETTINGS.h_bridle = dict["bridle"]["h_bridle"]
SETTINGS.d_line = dict["bridle"]["d_line"]
SETTINGS.kcu_mass = dict["kcu"]["kcu_mass"]
SETTINGS.power2steer_dist = dict["kcu"]["power2steer_dist"]
SETTINGS.depower_drum_diameter = dict["kcu"]["depower_drum_diameter"]
SETTINGS.tape_thickness = dict["kcu"]["tape_thickness"]
SETTINGS.v_depower = dict["kcu"]["v_depower"]
SETTINGS.v_steering = dict["kcu"]["v_steering"]
SETTINGS.depower_gain = dict["kcu"]["depower_gain"]
SETTINGS.steering_gain = dict["kcu"]["steering_gain"]
SETTINGS.cd_tether = dict["tether"]["cd_tether"]
SETTINGS.d_tether = dict["tether"]["d_tether"]
SETTINGS.damping = dict["tether"]["damping"]
SETTINGS.c_spring = dict["tether"]["c_spring"]
SETTINGS.rho_tether = dict["tether"]["rho_tether"]
SETTINGS.e_tether = dict["tether"]["e_tether"]
SETTINGS.v_wind = dict["environment"]["v_wind"]
SETTINGS.v_wind_ref = dict["environment"]["v_wind_ref"]
SETTINGS.h_ref = dict["environment"]["h_ref"]
SETTINGS.rho_0 = dict["environment"]["rho_0"]
SETTINGS.z0 = dict["environment"]["z0"]
SETTINGS.alpha = dict["environment"]["alpha"]
SETTINGS.profile_law = dict["environment"]["profile_law"]
SETTINGS.temp_ref = dict["environment"]["temp_ref"] # temperature at reference height [°C]
SETTINGS.height_gnd = dict["environment"]["height_gnd"] # height of groundstation above see level [m]
SETTINGS.use_turbulence = dict["environment"]["use_turbulence"] # turbulence intensity relative to Cabau, NL
SETTINGS.v_wind_gnds = dict["environment"]["v_wind_gnds"] # wind speeds at ref height for calculating the turbulent wind field [m/s]
SETTINGS.avg_height = dict["environment"]["avg_height"] # average height during reel out [m]
SETTINGS.rel_turbs = dict["environment"]["rel_turbs"] # relative turbulence at the v_wind_gnds
SETTINGS.i_ref = dict["environment"]["i_ref"] # is the expected value of the turbulence intensity at 15 m/s.
SETTINGS.v_ref = dict["environment"]["v_ref"] # five times the average wind speed in m/s at hub height over the full year [m/s]
# Cabau: 8.5863 m/s * 5.0 = 42.9 m/s
SETTINGS.height_step = dict["environment"]["height_step"] # use a grid with 2m resolution in z direction [m]
SETTINGS.grid_step = dict["environment"]["grid_step"] # grid resolution in x and y direction [m]
end
return SETTINGS
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
2,
28,
17168,
13789,
198,
198,
15269,
357,
66,
8,
12131,
11,
33448,
1279,
20608,
29,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,... | 2.06801 | 6,102 |
<filename>src/Gumbo.jl
module Gumbo
using Compat
if isfile(joinpath(dirname(dirname(@__FILE__)),"deps","deps.jl"))
include("../deps/deps.jl")
else
error("Gumbo not properly installed. Please run Pkg.build(\"Gumbo\")")
end
include("CGumbo.jl")
export HTMLElement,
HTMLDocument,
HTMLText,
NullNode,
HTMLNode,
attrs,
text,
tag,
children,
getattr,
setattr!,
parsehtml,
postorder,
preorder,
breadthfirst,
prettyprint
include("htmltypes.jl")
include("manipulation.jl")
include("comparison.jl")
include("io.jl")
include("conversion.jl")
end
| [
27,
34345,
29,
10677,
14,
38,
29309,
13,
20362,
198,
21412,
402,
29309,
198,
198,
3500,
3082,
265,
198,
198,
361,
318,
7753,
7,
22179,
6978,
7,
15908,
3672,
7,
15908,
3672,
7,
31,
834,
25664,
834,
4008,
553,
10378,
82,
2430,
10378,
... | 2.176667 | 300 |
<gh_stars>0
@testset "distributions" begin
Random.seed!(1234)
# Create random vectors and matrices
dim = 3
a = rand(dim)
b = rand(dim)
c = rand(dim)
A = rand(dim, dim)
B = rand(dim, dim)
C = rand(dim, dim)
# Create random numbers
alpha = rand()
beta = rand()
gamma = rand()
# Create matrix `X` such that `X` and `I - X` are positive definite if `A ≠ 0`.
function to_beta_mat(A)
S = A * A' + I
invL = inv(cholesky(S).L)
return invL * invL'
end
# Create positive values.
to_positive(x) = exp.(x)
to_positive(x::AbstractArray{<:AbstractArray}) = to_positive.(x)
# The following definition should not be needed
# It seems there is a bug in the default `rand_tangent` that causes a
# StackOverflowError though
function ChainRulesTestUtils.rand_tangent(::Random.AbstractRNG, ::typeof(to_positive))
return NoTangent()
end
# Tests that have a `broken` field can be executed but, according to FiniteDifferences,
# fail to produce the correct result. These tests can be checked with `@test_broken`.
univariate_distributions = DistSpec[
## Univariate discrete distributions
DistSpec(Bernoulli, (0.45,), 1),
DistSpec(Bernoulli, (0.45,), [1, 1]),
DistSpec(Bernoulli, (0.45,), 0),
DistSpec(Bernoulli, (0.45,), [0, 0]),
DistSpec((a, b) -> BetaBinomial(10, a, b), (2.0, 1.0), 5),
DistSpec((a, b) -> BetaBinomial(10, a, b), (2.0, 1.0), [5, 5]),
DistSpec(p -> Binomial(10, p), (0.5,), 5),
DistSpec(p -> Binomial(10, p), (0.5,), [5, 5]),
DistSpec(p -> Categorical(p / sum(p)), ([0.45, 0.55],), 1),
DistSpec(p -> Categorical(p / sum(p)), ([0.45, 0.55],), [1, 1]),
DistSpec(Geometric, (0.45,), 3),
DistSpec(Geometric, (0.45,), [3, 3]),
DistSpec(NegativeBinomial, (3.5, 0.5), 1),
DistSpec(NegativeBinomial, (3.5, 0.5), [1, 1]),
DistSpec(Poisson, (0.5,), 1),
DistSpec(Poisson, (0.5,), [1, 1]),
DistSpec(Skellam, (1.0, 2.0), -2),
DistSpec(Skellam, (1.0, 2.0), [-2, -2]),
DistSpec(PoissonBinomial, ([0.5, 0.5],), 0),
DistSpec(TuringPoissonBinomial, ([0.5, 0.5],), 0),
DistSpec(TuringPoissonBinomial, ([0.5, 0.5],), [0, 0]),
## Univariate continuous distributions
DistSpec(Arcsine, (), 0.5),
DistSpec(Arcsine, (1.0,), 0.5),
DistSpec(Arcsine, (0.0, 2.0), 0.5),
DistSpec(Beta, (), 0.4),
DistSpec(Beta, (1.5,), 0.4),
DistSpec(Beta, (1.5, 2.0), 0.4),
DistSpec(BetaPrime, (), 0.4),
DistSpec(BetaPrime, (1.5,), 0.4),
DistSpec(BetaPrime, (1.5, 2.0), 0.4),
DistSpec(Biweight, (), 0.5),
DistSpec(Biweight, (1.0,), 0.5),
DistSpec(Biweight, (1.0, 2.0), 0.5),
DistSpec(Cauchy, (), 0.5),
DistSpec(Cauchy, (1.0,), 0.5),
DistSpec(Cauchy, (1.0, 2.0), 0.5),
DistSpec(Chernoff, (), 0.5, broken=(:Zygote,)),
DistSpec(Chi, (1.0,), 0.5),
DistSpec(Chisq, (1.0,), 0.5),
DistSpec(Cosine, (1.0, 1.0), 0.5),
DistSpec(Epanechnikov, (1.0, 1.0), 0.5),
DistSpec(s -> Erlang(1, s), (1.0,), 0.5), # First arg is integer
DistSpec(Exponential, (1.0,), 0.5),
DistSpec(FDist, (1.0, 1.0), 0.5),
DistSpec(Frechet, (), 0.5),
DistSpec(Frechet, (1.0,), 0.5),
DistSpec(Frechet, (1.0, 2.0), 0.5),
DistSpec(Gamma, (), 0.4),
DistSpec(Gamma, (1.5,), 0.4),
DistSpec(Gamma, (1.5, 2.0), 0.4),
DistSpec(GeneralizedExtremeValue, (1.0, 1.0, 1.0), 0.5),
DistSpec(GeneralizedPareto, (), 0.5),
DistSpec(GeneralizedPareto, (1.0, 2.0), 0.5),
DistSpec(GeneralizedPareto, (0.0, 2.0, 3.0), 0.5),
DistSpec(Gumbel, (), 0.5),
DistSpec(Gumbel, (1.0,), 0.5),
DistSpec(Gumbel, (1.0, 2.0), 0.5),
DistSpec(InverseGamma, (), 0.5),
DistSpec(InverseGamma, (1.0,), 0.5),
DistSpec(InverseGamma, (1.0, 2.0), 0.5),
DistSpec(InverseGaussian, (), 0.5),
DistSpec(InverseGaussian, (1.0,), 0.5),
DistSpec(InverseGaussian, (1.0, 2.0), 0.5),
DistSpec(Kolmogorov, (), 0.5),
DistSpec(Laplace, (), 0.5),
DistSpec(Laplace, (1.0,), 0.5),
DistSpec(Laplace, (1.0, 2.0), 0.5),
DistSpec(Levy, (), 0.5),
DistSpec(Levy, (0.0,), 0.5),
DistSpec(Levy, (0.0, 2.0), 0.5),
DistSpec((a, b) -> LocationScale(a, b, Normal()), (1.0, 2.0), 0.5),
DistSpec(Logistic, (), 0.5),
DistSpec(Logistic, (1.0,), 0.5),
DistSpec(Logistic, (1.0, 2.0), 0.5),
DistSpec(LogitNormal, (), 0.5),
DistSpec(LogitNormal, (1.0,), 0.5),
DistSpec(LogitNormal, (1.0, 2.0), 0.5),
DistSpec(LogNormal, (), 0.5),
DistSpec(LogNormal, (1.0,), 0.5),
DistSpec(LogNormal, (1.0, 2.0), 0.5),
# Dispatch error caused by ccall
DistSpec(NoncentralBeta, (1.0, 2.0, 1.0), 0.5, broken=(:Tracker, :ForwardDiff, :Zygote, :ReverseDiff)),
DistSpec(NoncentralChisq, (1.0, 2.0), 0.5, broken=(:Tracker, :ForwardDiff, :Zygote, :ReverseDiff)),
DistSpec(NoncentralF, (1.0, 2.0, 1.0), 0.5, broken=(:Tracker, :ForwardDiff, :Zygote, :ReverseDiff)),
DistSpec(NoncentralT, (1.0, 2.0), 0.5, broken=(:Tracker, :ForwardDiff, :Zygote, :ReverseDiff)),
DistSpec(Normal, (), 0.5),
DistSpec(Normal, (1.0,), 0.5),
DistSpec(Normal, (1.0, 2.0), 0.5),
DistSpec(NormalCanon, (1.0, 2.0), 0.5),
DistSpec(NormalInverseGaussian, (1.0, 2.0, 1.0, 1.0), 0.5),
DistSpec(Pareto, (), 1.5),
DistSpec(Pareto, (1.0,), 1.5),
DistSpec(Pareto, (1.0, 1.0), 1.5),
DistSpec(PGeneralizedGaussian, (), 0.5),
DistSpec(PGeneralizedGaussian, (1.0, 1.0, 1.0), 0.5),
DistSpec(Rayleigh, (), 0.5),
DistSpec(Rayleigh, (1.0,), 0.5),
DistSpec(Semicircle, (1.0,), 0.5),
DistSpec(SymTriangularDist, (), 0.5),
DistSpec(SymTriangularDist, (1.0,), 0.5),
DistSpec(SymTriangularDist, (1.0, 2.0), 0.5),
DistSpec(TDist, (1.0,), 0.5),
DistSpec(TriangularDist, (1.0, 3.0), 1.5),
DistSpec(TriangularDist, (1.0, 3.0, 2.0), 1.5),
DistSpec(Triweight, (1.0, 1.0), 1.0),
DistSpec(
(mu, sigma, l, u) -> truncated(Normal(mu, sigma), l, u), (0.0, 1.0, 1.0, 2.0), 1.5
),
DistSpec(Uniform, (), 0.5),
DistSpec(Uniform, (alpha, alpha + beta), alpha + beta * gamma),
DistSpec(TuringUniform, (), 0.5),
DistSpec(TuringUniform, (alpha, alpha + beta), alpha + beta * gamma),
DistSpec(VonMises, (), 1.0),
DistSpec(Weibull, (), 1.0),
DistSpec(Weibull, (1.0,), 1.0),
DistSpec(Weibull, (1.0, 1.0), 1.0),
]
# Tests cannot be executed, so cannot be checked with `@test_broken`.
broken_univariate_distributions = DistSpec[
# Broken in Distributions even without autodiff
DistSpec(() -> KSDist(1), (), 0.5), # `pdf` method not defined
DistSpec(() -> KSOneSided(1), (), 0.5), # `pdf` method not defined
DistSpec(StudentizedRange, (1.0, 2.0), 0.5), # `srdistlogpdf` method not defined
# Stackoverflow caused by SpecialFunctions.besselix
DistSpec(VonMises, (1.0,), 1.0),
DistSpec(VonMises, (1, 1), 1),
# Some tests are broken on some Julia versions, therefore it can't be checked reliably
DistSpec(PoissonBinomial, ([0.5, 0.5],), [0, 0]; broken=(:Zygote,)),
]
# Tests that have a `broken` field can be executed but, according to FiniteDifferences,
# fail to produce the correct result. These tests can be checked with `@test_broken`.
multivariate_distributions = DistSpec[
## Multivariate discrete distributions
# Vector x
DistSpec(p -> Multinomial(2, p ./ sum(p)), (fill(0.5, 2),), [2, 0]),
DistSpec(p -> Multinomial(2, p ./ sum(p)), (fill(0.5, 2),), [2 1; 0 1]),
# Vector x
DistSpec((m, A) -> MvNormal(m, to_posdef(A)), (a, A), b),
DistSpec((m, s) -> MvNormal(m, to_posdef_diagonal(s)), (a, b), c),
DistSpec((m, s) -> MvNormal(m, s^2 * I), (a, alpha), b),
DistSpec(A -> MvNormal(to_posdef(A)), (A,), a),
DistSpec(s -> MvNormal(to_posdef_diagonal(s)), (a,), b),
DistSpec(s -> MvNormal(zeros(dim), s^2 * I), (alpha,), a),
DistSpec((m, A) -> TuringMvNormal(m, to_posdef(A)), (a, A), b),
DistSpec((m, s) -> TuringMvNormal(m, to_posdef_diagonal(s)), (a, b), c),
DistSpec((m, s) -> TuringMvNormal(m, s^2 * I), (a, alpha), b),
DistSpec(A -> TuringMvNormal(to_posdef(A)), (A,), a),
DistSpec(s -> TuringMvNormal(to_posdef_diagonal(s)), (a,), b),
DistSpec(s -> TuringMvNormal(zeros(dim), s^2 * I), (alpha,), a),
DistSpec((m, A) -> MvLogNormal(m, to_posdef(A)), (a, A), b, to_positive),
DistSpec((m, s) -> MvLogNormal(m, to_posdef_diagonal(s)), (a, b), c, to_positive),
DistSpec((m, s) -> MvLogNormal(m, s^2 * I), (a, alpha), b, to_positive),
DistSpec(A -> MvLogNormal(to_posdef(A)), (A,), a, to_positive),
DistSpec(s -> MvLogNormal(to_posdef_diagonal(s)), (a,), b, to_positive),
DistSpec(s -> MvLogNormal(zeros(dim), s^2 * I), (alpha,), a, to_positive),
DistSpec(alpha -> Dirichlet(to_positive(alpha)), (a,), b, to_simplex),
# Matrix case
DistSpec((m, A) -> MvNormal(m, to_posdef(A)), (a, A), B),
DistSpec((m, s) -> MvNormal(m, to_posdef_diagonal(s)), (a, b), A),
DistSpec((m, s) -> MvNormal(m, s^2 * I), (a, alpha), A),
DistSpec(A -> MvNormal(to_posdef(A)), (A,), B),
DistSpec(s -> MvNormal(to_posdef_diagonal(s)), (a,), A),
DistSpec(s -> MvNormal(zeros(dim), s^2 * I), (alpha,), A),
DistSpec((m, A) -> TuringMvNormal(m, to_posdef(A)), (a, A), B),
DistSpec((m, s) -> TuringMvNormal(m, to_posdef_diagonal(s)), (a, b), A),
DistSpec((m, s) -> TuringMvNormal(m, s^2 * I), (a, alpha), A),
DistSpec(A -> TuringMvNormal(to_posdef(A)), (A,), B),
DistSpec(s -> TuringMvNormal(to_posdef_diagonal(s)), (a,), A),
DistSpec(s -> TuringMvNormal(zeros(dim), s^2 * I), (alpha,), A),
DistSpec((m, A) -> MvLogNormal(m, to_posdef(A)), (a, A), B, to_positive),
DistSpec((m, s) -> MvLogNormal(m, to_posdef_diagonal(s)), (a, b), A, to_positive),
DistSpec((m, s) -> MvLogNormal(m, s^2 * I), (a, alpha), A, to_positive),
DistSpec(A -> MvLogNormal(to_posdef(A)), (A,), B, to_positive),
DistSpec(s -> MvLogNormal(to_posdef_diagonal(s)), (a,), A, to_positive),
DistSpec(s -> MvLogNormal(zeros(dim), s^2 * I), (alpha,), A, to_positive),
DistSpec(alpha -> Dirichlet(to_positive(alpha)), (a,), A, to_simplex),
]
# Tests cannot be executed, so cannot be checked with `@test_broken`.
broken_multivariate_distributions = DistSpec[
# Dispatch error
DistSpec((m, A) -> MvNormalCanon(m, to_posdef(A)), (a, A), b),
DistSpec((m, p) -> MvNormalCanon(m, to_posdef_diagonal(p)), (a, b), c),
DistSpec((m, p) -> MvNormalCanon(m, p^2 * I), (a, alpha), b),
DistSpec(A -> MvNormalCanon(to_posdef(A)), (A,), a),
DistSpec(p -> MvNormalCanon(to_posdef_diagonal(p)), (a,), b),
DistSpec(p -> MvNormalCanon(zeros(dim), p^2 * I), (alpha,), a),
DistSpec((m, A) -> MvNormalCanon(m, to_posdef(A)), (a, A), B),
DistSpec((m, p) -> MvNormalCanon(m, to_posdef_diagonal(p)), (a, b), A),
DistSpec((m, p) -> MvNormalCanon(m, p^2 * I), (a, alpha), A),
DistSpec(A -> MvNormalCanon(to_posdef(A)), (A,), B),
DistSpec(p -> MvNormalCanon(to_posdef_diagonal(p)), (a,), A),
DistSpec(p -> MvNormalCanon(zeros(dim), p^2 * I), (alpha,), A),
]
# Tests that have a `broken` field can be executed but, according to FiniteDifferences,
# fail to produce the correct result. These tests can be checked with `@test_broken`.
matrixvariate_distributions = DistSpec[
# Matrix x
# We should use
# DistSpec((n1, n2) -> MatrixBeta(dim, n1, n2), (3.0, 3.0), A, to_beta_mat),
# but the default implementation of `rand_tangent` causes a StackOverflowError
# Thus we use the following workaround
DistSpec((n1, n2) -> MatrixBeta(3, n1, n2), (3.0, 3.0), A, to_beta_mat),
DistSpec(() -> MatrixNormal(dim, dim), (), A, to_posdef, broken=(:Zygote,)),
DistSpec((df, A) -> Wishart(df, to_posdef(A)), (3.0, A), B, to_posdef),
DistSpec((df, A) -> InverseWishart(df, to_posdef(A)), (3.0, A), B, to_posdef),
DistSpec((df, A) -> TuringWishart(df, to_posdef(A)), (3.0, A), B, to_posdef),
DistSpec((df, A) -> TuringInverseWishart(df, to_posdef(A)), (3.0, A), B, to_posdef),
# Vector of matrices x
# Also here we should use
# DistSpec(
# (n1, n2) -> MatrixBeta(dim, n1, n2),
# (3.0, 3.0),
# [A, B],
# x -> map(to_beta_mat, x),
#),
# but the default implementation of `rand_tangent` causes a StackOverflowError
# Thus we use the following workaround
DistSpec(
(n1, n2) -> MatrixBeta(3, n1, n2),
(3.0, 3.0),
[A, B],
x -> map(to_beta_mat, x),
),
DistSpec(
(df, A) -> Wishart(df, to_posdef(A)),
(3.0, A),
[B, C],
x -> map(to_posdef, x),
),
DistSpec(
(df, A) -> InverseWishart(df, to_posdef(A)),
(3.0, A),
[B, C],
x -> map(to_posdef, x),
),
DistSpec(
(df, A) -> TuringWishart(df, to_posdef(A)),
(3.0, A),
[B, C],
x -> map(to_posdef, x),
),
DistSpec(
(df, A) -> TuringInverseWishart(df, to_posdef(A)),
(3.0, A),
[B, C],
x -> map(to_posdef, x),
),
]
# Tests cannot be executed, so cannot be checked with `@test_broken`.
broken_matrixvariate_distributions = DistSpec[
# Other
# TODO different tests are broken on different combinations of backends
DistSpec(
(A, B, C) -> MatrixNormal(A, to_posdef(B), to_posdef(C)),
(A, B, B),
C,
to_posdef,
),
# TODO different tests are broken on different combinations of backends
DistSpec(
(df, A, B, C) -> MatrixTDist(df, A, to_posdef(B), to_posdef(C)),
(1.0, A, B, B),
C,
to_posdef,
),
# TODO different tests are broken on different combinations of backends
DistSpec(
(n1, n2, A) -> MatrixFDist(n1, n2, to_posdef(A)),
(3.0, 3.0, A),
B,
to_posdef,
),
]
@testset "Univariate distributions" begin
println("\nTesting: Univariate distributions\n")
for d in univariate_distributions
@info "Testing: $(nameof(dist_type(d)))"
test_ad(d)
end
end
@testset "Multivariate distributions" begin
println("\nTesting: Multivariate distributions\n")
for d in multivariate_distributions
@info "Testing: $(nameof(dist_type(d)))"
test_ad(d)
end
# Test `filldist` and `arraydist` distributions of univariate distributions
n = 2 # always use two distributions
for d in univariate_distributions
d.x isa Number || continue
# Broken distributions
D = dist_type(d)
D <: Union{VonMises,TriangularDist} && continue
# Skellam only fails in these tests with ReverseDiff
# Ref: https://github.com/TuringLang/DistributionsAD.jl/issues/126
# PoissonBinomial fails with Zygote
# Matrix case does not work with Skellam:
# https://github.com/TuringLang/DistributionsAD.jl/pull/172#issuecomment-853721493
filldist_broken = if D <: Skellam
((d.broken..., :Zygote, :ReverseDiff), (d.broken..., :Zygote, :ReverseDiff))
elseif D <: PoissonBinomial
((d.broken..., :Zygote), (d.broken..., :Zygote))
elseif D <: Chernoff
# Zygote is not broken with `filldist`
((), ())
else
(d.broken, d.broken)
end
arraydist_broken = if D <: PoissonBinomial
((d.broken..., :Zygote), (d.broken..., :Zygote))
else
(d.broken, d.broken)
end
# Create `filldist` distribution
f = d.f
f_filldist = (θ...,) -> filldist(f(θ...), n)
d_filldist = f_filldist(d.θ...)
# Create `arraydist` distribution
f_arraydist = (θ...,) -> arraydist([f(θ...) for _ in 1:n])
d_arraydist = f_arraydist(d.θ...)
for (i, sz) in enumerate(((n,), (n, 2)))
# Matrix case doesn't work for continuous distributions for some reason
# now but not too important (?!)
if length(sz) == 2 && D <: ContinuousDistribution
continue
end
# Compute compatible sample
x = fill(d.x, sz)
# Test AD
@info "Testing: filldist($(nameof(D)), $sz)"
test_ad(
DistSpec(
f_filldist,
d.θ,
x,
d.xtrans;
broken=filldist_broken[i],
)
)
@info "Testing: arraydist($(nameof(D)), $sz)"
test_ad(
DistSpec(
f_arraydist,
d.θ,
x,
d.xtrans;
broken=arraydist_broken[i],
)
)
end
end
end
@testset "Matrixvariate distributions" begin
println("\nTesting: Matrixvariate distributions\n")
for d in matrixvariate_distributions
@info "Testing: $(nameof(dist_type(d)))"
test_ad(d)
end
# Test `filldist` and `arraydist` distributions of univariate distributions
n = (2, 2) # always use 2 x 2 distributions
for d in univariate_distributions
d.x isa Number || continue
D = dist_type(d)
D <: DiscreteDistribution && continue
# Broken distributions
D <: Union{VonMises,TriangularDist} && continue
# Create `filldist` distribution
f = d.f
f_filldist = (θ...,) -> filldist(f(θ...), n...)
# Create `arraydist` distribution
# Zygote's fill definition does not like non-numbers, so we use a workaround
f_arraydist = (θ...,) -> arraydist(reshape([f(θ...) for _ in 1:prod(n)], n))
# Matrix `x`
x_mat = fill(d.x, n)
# Zygote is not broken with `filldist` + Chernoff
filldist_broken = D <: Chernoff ? () : d.broken
# Test AD
@info "Testing: filldist($(nameof(D)), $n)"
test_ad(
DistSpec(
f_filldist,
d.θ,
x_mat,
d.xtrans;
broken=filldist_broken,
)
)
@info "Testing: arraydist($(nameof(D)), $n)"
test_ad(
DistSpec(
f_arraydist,
d.θ,
x_mat,
d.xtrans;
broken=d.broken,
)
)
# Vector of matrices `x`
x_vec_of_mat = [fill(d.x, n) for _ in 1:2]
# Test AD
@info "Testing: filldist($(nameof(D)), $n, 2)"
test_ad(
DistSpec(
f_filldist,
d.θ,
x_vec_of_mat,
d.xtrans;
broken=filldist_broken,
)
)
@info "Testing: arraydist($(nameof(D)), $n, 2)"
test_ad(
DistSpec(
f_arraydist,
d.θ,
x_vec_of_mat,
d.xtrans;
broken=d.broken,
)
)
end
# test `filldist` and `arraydist` distributions of multivariate distributions
n = 2 # always use two distributions
for d in multivariate_distributions
d.x isa AbstractVector || continue
D = dist_type(d)
D <: DiscreteDistribution && continue
# Tests are failing for matrix covariance vectorized MvNormal
if D <: Union{
MvNormal,MvLogNormal,
DistributionsAD.TuringDenseMvNormal,
DistributionsAD.TuringDiagMvNormal,
DistributionsAD.TuringScalMvNormal,
TuringMvLogNormal
}
any(x isa Matrix for x in d.θ) && continue
end
# Create `filldist` distribution
f = d.f
f_filldist = (θ...,) -> filldist(f(θ...), n)
# Create `arraydist` distribution
f_arraydist = (θ...,) -> arraydist([f(θ...) for _ in 1:n])
# Matrix `x`
x_mat = repeat(d.x, 1, n)
# Test AD
@info "Testing: filldist($(nameof(D)), $n)"
test_ad(
DistSpec(
f_filldist,
d.θ,
x_mat,
d.xtrans;
broken=d.broken,
)
)
@info "Testing: arraydist($(nameof(D)), $n)"
test_ad(
DistSpec(
f_arraydist,
d.θ,
x_mat,
d.xtrans;
broken=d.broken,
)
)
# Vector of matrices `x`
x_vec_of_mat = [repeat(d.x, 1, n) for _ in 1:2]
# Test AD
@info "Testing: filldist($(nameof(D)), $n, 2)"
test_ad(
DistSpec(
f_filldist,
d.θ,
x_vec_of_mat,
d.xtrans;
broken=d.broken,
)
)
@info "Testing: arraydist($(nameof(D)), $n, 2)"
test_ad(
DistSpec(
f_arraydist,
d.θ,
x_vec_of_mat,
d.xtrans;
broken=d.broken,
)
)
end
end
end
| [
27,
456,
62,
30783,
29,
15,
198,
31,
9288,
2617,
366,
17080,
2455,
507,
1,
2221,
198,
220,
220,
220,
14534,
13,
28826,
0,
7,
1065,
2682,
8,
628,
220,
220,
220,
1303,
13610,
4738,
30104,
290,
2603,
45977,
198,
220,
220,
220,
5391,
... | 1.855581 | 12,436 |
<reponame>UnofficialJuliaMirror/SecureSessions.jl-fd66a1d0-90d3-555d-a35d-c122df06773c
# Contents: Functions for creating/handling secure cookies.
################################################################################
# Create session cookie
#
# The scheme:
# const_key, const_iv = global constants, output from a cryptographically secure random number generator
# (used to encrypt session-specific secret keys)
# timestamp = milliseconds since epoch, represented as a string
# session_key, session_iv = output from a cryptographic random number generator, unique for each session
# encrypted_session_key = AES CBC encrypt(const_key, const_iv, session_key)
# data blob = AES CBC encrypt(session_key, session_iv, arbitrary data)
# hmac signature = HMAC(session_key, timestamp * data_blob)
# unencoded cookie_value = session_iv * encrypted_secret_key * hmac signature * timestamp * data blob
# cookie_value = base64encode(unencoded cookie value)
# (the encoding is for transport in an http header)
#
################################################################################
"""
Create a secure session cookie for the response.
The cookie value includes the encryption of the supplied data.
The Secure and HttpOnly attributes are set according to global variables.
"""
function create_secure_session_cookie(data, res, cookie_name = "sessionid")
cookie_value = create_secure_session_cookievalue(data)
attr = Dict("Max-Age" => timeout_str)
if encrypted_sessions_only
attr["Secure"] = ""
end
if http_only
attr["HttpOnly"] = ""
end
setcookie!(res, cookie_name, string(cookie_value), attr)
end
"""
Create the value of the secure session cookie.
Input: Data (ASCIIString) to be embedded in the encrypted cookie value.
Output: Cookie value (ASCIIString)
Note: Binary data is base64 encoded for transport in http headers (base64 is more space efficient than hex encoding).
"""
function create_secure_session_cookievalue(plaintext)
# Encrypt data
session_key = csrng(key_length)
session_iv = csrng(block_size)
data_blob = encrypt(CIPHER_AES, session_key, plaintext, session_iv) # Encryption is done in CBC mode
# Compute HMAC signature
timestamp = string(get_timestamp()) # Millieconds since epoch
ts_uint8 = convert(Array{UInt8, 1}, timestamp)
hmac_signature = digest(MD_SHA256, vcat(ts_uint8, data_blob), session_key)
# Compute cookie value
encrypted_session_key = encrypt(CIPHER_AES, const_key, session_key, const_iv)
cookie_value = base64encode(vcat(session_iv, encrypted_session_key, hmac_signature, ts_uint8, data_blob))
cookie_value
end
################################################################################
# Validate session cookie
################################################################################
# The application validates a session cookie as follows:
# 1. Decode hmac signature.
# 2. Compute HMAC(secret key, timestamp * data blob) and compare it to hmac signature. Fail if they differ.
# 3. Decode timestamp.
# 4. Verify that the current time in milliseconds since the epoch is not greater than timestamp + session timeout.
# TODO: If the cookie is not valid, the application must refuse the requested action and redirect the user to the login page.
#
# If the cookie is valid, the application can
# 1. Decrypt data blob
# 2. Parse or deserialize data blob as appropriate.
# At this point, the application has a valid state object for the user's session and can proceed with processing the requested action.
"""
Returns the decrypted cookie data.
Returns "" if the cookie doesn't exist.
"""
function get_session_cookie_data(req, cookie_name)
result = ""
if haskey(req.headers, "Cookie")
cookie_value = get_cookie_value(req, cookie_name)
if cookie_value != ""
cookie_is_valid, data_blob, session_key, session_iv = session_cookie_is_valid(cookie_value)
if cookie_is_valid
result = decrypt(CIPHER_AES, session_key, data_blob, session_iv)
result = String(result)
end
end
end
result
end
"""
Returns the cookie value, which is encrypted.
Returns "" if the cookie doesn't exist.
"""
function get_cookie_value(req, cookie_name)
cookie_value = ""
ckie = req.headers["Cookie"] # ASCIIString: "name1=value1; name2=value2"
names_values = split(ckie, ";") # "name=value"
for nv in names_values
r = search(nv, cookie_name) # first_idx:last_idx
if length(r) > 0 # cookie_name is in nv
r2 = search(nv, "=")
cookie_value = nv[(r2[1] + 1):end]
break
end
end
String(cookie_value) # Convert SubString to string for base64 decoding
end
"""
Returns: cookie_is_valid (Bool) and session data.
cookie_is_valid is true if session cookie:
1) Has not expired, and
2) hmac_signature == HMAC(secret key, timestamp * data_blob)
"""
function session_cookie_is_valid(cookie_value)
# Extract cookie data
cookie_value = base64decode(cookie_value)
session_iv = cookie_value[1:block_size]
offset = block_size
encrypted_session_key = cookie_value[(offset + 1):(offset + key_length + block_size)]
session_key = decrypt(CIPHER_AES, const_key, encrypted_session_key, const_iv)
offset += key_length + block_size
hmac_signature = view(cookie_value, (offset + 1):(offset + key_length))
offset += key_length
ts_uint8 = cookie_value[(offset + 1):(offset + 13)]
timestamp = parse(Int, String(ts_uint8)) # Seconds since epoch
offset += 13
data_blob = cookie_value[(offset + 1):end]
# Determine conditions
current_time = get_timestamp()
expired = current_time > timestamp + session_timeout
hmac_sig2 = digest(MD_SHA256, vcat(ts_uint8, data_blob), session_key)
hmac_ok = hmac_sig2 == hmac_signature
# Prepare results
cookie_is_valid = false
if !expired && hmac_ok
cookie_is_valid = true
end
cookie_is_valid, data_blob, session_key, session_iv
end
"""
Invalidates the cookie with name == cookie_name.
Curently this works by setting the Max-Age to 0.
"""
function invalidate_cookie!(res, cookie_name)
setcookie!(res, cookie_name, "", Dict("Max-Age" => "0"))
end
# EOF
| [
27,
7856,
261,
480,
29,
3118,
16841,
16980,
544,
27453,
1472,
14,
49793,
50,
6202,
13,
20362,
12,
16344,
2791,
64,
16,
67,
15,
12,
3829,
67,
18,
12,
31046,
67,
12,
64,
2327,
67,
12,
66,
18376,
7568,
15,
3134,
4790,
66,
198,
2,
... | 2.878354 | 2,236 |
# packages -
using LinearAlgebra
using GLPK
# my codes -
include("Flux.jl")
include("Expa.jl")
include("Stoichiometric.jl")
include("Utility.jl") | [
2,
10392,
532,
198,
3500,
44800,
2348,
29230,
198,
3500,
10188,
40492,
198,
198,
2,
616,
12416,
532,
198,
17256,
7203,
37,
22564,
13,
20362,
4943,
198,
17256,
7203,
3109,
8957,
13,
20362,
4943,
198,
17256,
7203,
1273,
78,
16590,
16996,
... | 2.807692 | 52 |
using Statistics
using LinearAlgebra
using Printf
#function nnComputeCosts(nn,datax,datay;print=true,dIdx=1:size(datax,2))
# c = [ nnCost(datay[:,d],nnForward(nn,datax[:,d])) for d=dIdx ];
#
# #Print costs
# if print
# #for d=1:length(c)
# # @printf("dataset %3d: cost=%10.8f\n",d,c[d]);
# #end
# @printf(" mean costs: %10.8f\n",mean(c));
# @printf("median costs: %10.8f\n",median(c));
# @printf("2-norm costs: %10.8f\n",norm(c,2)/length(c));
# end
#
# return c;
#end
#In this version we get the neural net and input data
function nnComputeCosts(nn,datax,datay;print=true,dIdx=1:size(datax,2))
outputs = nnComputeOutputs(nn,datax;dIdx=dIdx);
#c = [ nnCost(datay[:,d],outputs[:,d]) for d=dIdx ];
##Print costs
#if print
# #for d=1:length(c)
# # @printf("dataset %3d: cost=%10.8f\n",d,c[d]);
# #end
# @printf(" mean costs: %10.8f\n",mean(c));
# @printf("median costs: %10.8f\n",median(c));
# @printf("2-norm costs: %10.8f\n",norm(c,2)/length(c));
#end
#return c;
return nnComputeCosts(outputs,datay;print=print,dIdx=dIdx);
end
#In this version we just get the outputs
function nnComputeCosts(outputs,datay;print=true,dIdx=1:size(datay,2))
c = [ nnCost(datay[:,d],outputs[:,d]) for d=dIdx ];
#Print costs
if print
#for d=1:length(c)
# @printf("dataset %3d: cost=%10.8f\n",d,c[d]);
#end
@printf(" mean costs: %10.8f\n",mean(c));
@printf("median costs: %10.8f\n",median(c));
@printf("2-norm costs: %10.8f\n",norm(c,2)/length(c));
end
return c;
end
| [
3500,
14370,
198,
3500,
44800,
2348,
29230,
198,
3500,
12578,
69,
198,
198,
2,
8818,
299,
77,
7293,
1133,
13729,
82,
7,
20471,
11,
19608,
897,
11,
19608,
323,
26,
4798,
28,
7942,
11,
67,
7390,
87,
28,
16,
25,
7857,
7,
19608,
897,
... | 2.068 | 750 |
<reponame>UnofficialJuliaMirror/Crispulator.jl-e21f4509-7b4e-5b4b-a375-a5512fd6f24f
function convert_cells_to_pop(cells, cell_phenotypes, guides)
cells_to_phenotypes = [DefaultDict{Float64, Int}(0) for _ in 1:length(guides)]
@inbounds for i in eachindex(cells)
cells_to_phenotypes[cells[i]][cell_phenotypes[i]] += 1
end
cells_to_phenotypes
end
function test_crispri_construction()
# Test the two different CRISPR types
lib = Library(CRISPRi())
setup = FacsScreen()
guides, guide_freqs_dist = construct_library(setup, lib)
cells, cell_phenotypes = build_cells(lib.cas9_behavior, guides, guide_freqs_dist, 10^6)
# In a CRISPRi screen all cells should have the same phenotype
all(map(length, convert_cells_to_pop(cells, cell_phenotypes, guides)) .== 1)
end
@test test_crispri_construction()
function test_crisprko_construction()
lib = Library(CRISPRn())
setup = FacsScreen()
guides, guide_freqs_dist = construct_library(setup, lib)
cells, cell_phenotypes = build_cells(lib.cas9_behavior, guides, guide_freqs_dist, 10^7)
arr = convert_cells_to_pop(cells, cell_phenotypes, guides)
nonzeros = arr[find(x->length(x) != 1, arr)]
results = zeros(CRISPRn().knockout_dist.K, length(nonzeros))
for (idx, guide) in enumerate(nonzeros)
if -0.0 in keys(guide)
@assert guide[0.0] == 0
delete!(guide, 0.0)
ks = sort(collect(keys(guide)))
tot = sum(values(guide))
results[:, idx] = [guide[key]/tot for key in ks]
else
@assert guide[0.0] != 0
ks = sort(collect(keys(guide)), rev=true)
tot = sum(values(guide))
results[:, idx] = [guide[key]/tot for key in ks]
end
end
isapprox(mean(results[1, :] ./ results[2, :]), 1, atol=0.25) &&
isapprox(mean(results[1, :] ./ results[3, :]), 4, atol=0.25)
end
@test test_crisprko_construction()
| [
27,
7856,
261,
480,
29,
3118,
16841,
16980,
544,
27453,
1472,
14,
34,
2442,
79,
8927,
13,
20362,
12,
68,
2481,
69,
17885,
24,
12,
22,
65,
19,
68,
12,
20,
65,
19,
65,
12,
64,
22318,
12,
64,
2816,
1065,
16344,
21,
69,
1731,
69,
... | 2.270396 | 858 |
macro jl15_str(code::AbstractString)
if VERSION >= v"1.5-rc0"
@debug "Parsing code for Julia ≥ 1.5" Text(code)
expr = Meta.parse(string("begin\n", code, "\nend"))
@assert expr.head === :block
if expr.args[1] isa LineNumberNode
expr.args[1] = __source__
end
return esc(expr)
else
@debug "Skipping code in Julia < 1.5" Text(code)
return nothing
end
end
macro callmacro(ex)
@assert Meta.isexpr(ex, :macrocall)
return Expr(
:call,
ex.args[1],
QuoteNode(ex.args[2]),
__module__,
map(QuoteNode, ex.args[3:end])...,
)
end
| [
20285,
305,
474,
75,
1314,
62,
2536,
7,
8189,
3712,
23839,
10100,
8,
198,
220,
220,
220,
611,
44156,
2849,
18189,
410,
1,
16,
13,
20,
12,
6015,
15,
1,
198,
220,
220,
220,
220,
220,
220,
220,
2488,
24442,
366,
47,
945,
278,
2438,... | 1.975904 | 332 |
using ForwardDiff
using NLSolvers
#ScalarLsqObjective
#VectorLsqObjective
@. model(x, p) = p[1] * exp(-x * p[2])
xdata = range(0, stop = 10, length = 20)
ydata = model(xdata, [1.0 2.0]) + 0.01 * randn(length(xdata))
p0 = [0.5, 0.5]
function f(x)
mod = model(xdata, x)
return sum(abs2, mod .- ydata) / 2
end
x0 = copy(p0)
function g!(G, x)
ForwardDiff.gradient!(G, f, x)
return G
end
function h!(H, x)
ForwardDiff.hessian!(H, f, x)
return H
end
function fg(G, x)
fx = f(x)
g!(G, x)
return fx, G
end
function fgh!(G, H, x)
fx = f(x)
g!(G, x)
h!(H, x)
return fx, G, H
end
obj = ScalarObjective(f, g!, fg, fgh!, h!, nothing, nothing, nothing)
prob = OptimizationProblem(obj, ([0.0, 0.0], [3.0, 3.0]))
res = solve(prob, copy(p0), LineSearch(BFGS()), OptimizationOptions())
res = solve(prob, copy(p0), NelderMead(), OptimizationOptions())
res = solve(prob, copy(p0), SimulatedAnnealing(), OptimizationOptions())
res = solve(prob, copy(p0), ParticleSwarm(), OptimizationOptions())
res = solve(prob, copy(p0), ActiveBox(), OptimizationOptions())
res = solve(prob, copy(p0), LineSearch(BFGS()), OptimizationOptions())
res = solve(prob, copy(p0), LineSearch(SR1()), OptimizationOptions())
res = solve(prob, copy(p0), LineSearch(DFP()), OptimizationOptions())
res = solve(prob, copy(p0), LineSearch(DBFGS()), OptimizationOptions())
res = solve(prob, copy(p0), TrustRegion(Newton(), Dogleg()), OptimizationOptions())
res = solve(prob, copy(p0), TrustRegion(Newton(), NTR()), OptimizationOptions())
res = solve(prob, copy(p0), TrustRegion(Newton(), NWI()), OptimizationOptions())
res = solve(prob, copy(p0), TrustRegion(SR1(), Dogleg()), OptimizationOptions())
res = solve(prob, copy(p0), TrustRegion(SR1(), NTR()), OptimizationOptions())
res = solve(prob, copy(p0), TrustRegion(SR1(), NWI()), OptimizationOptions())
res = solve(prob, copy(p0), TrustRegion(DFP(), Dogleg()), OptimizationOptions())
res = solve(prob, copy(p0), TrustRegion(DFP(), NTR()), OptimizationOptions())
res = solve(prob, copy(p0), TrustRegion(DFP(), NWI()), OptimizationOptions())
res = solve(prob, copy(p0), TrustRegion(DBFGS(), Dogleg()), OptimizationOptions())
res = solve(prob, copy(p0), TrustRegion(BFGS(), NTR()), OptimizationOptions())
res = solve(prob, copy(p0), TrustRegion(BFGS(), NWI()), OptimizationOptions())
function F!(Fx, x)
Fx .= model(xdata, x)
return Fx
end
function J!(Jx, x)
# include this in the wrapper...
ForwardDiff.jacobian!(Jx, F!, copy(ydata), x)
return Jx
end
function FJ!(Fx, Jx, x)
F!(Fx, x)
J!(Jx, x)
Fx, Jx
end
x0 = [-1.2, 1.0]
# Use y-data to setup Fx (and Jx) correctly
vectorobj = LeastSquaresObjective(copy(ydata), ydata * x0', F!, FJ!, ydata)
prob = LeastSquaresProblem(vectorobj, ([0.0, 0.0], [1.0, 1.0]))
res = solve(prob, copy(p0), LineSearch(SR1()), LeastSquaresOptions(40))
res = solve(prob, copy(p0), LineSearch(DFP()), LeastSquaresOptions(40))
res = solve(prob, copy(p0), LineSearch(BFGS()), LeastSquaresOptions(40))
res = solve(prob, copy(p0), LineSearch(DBFGS()), LeastSquaresOptions(40))
res = solve(prob, copy(p0), TrustRegion(BFGS()), LeastSquaresOptions(40))
res = solve(prob, copy(p0), TrustRegion(BFGS()), LeastSquaresOptions(40))
res = solve(prob, copy(p0), TrustRegion(BFGS()), LeastSquaresOptions(40))
res = solve(prob, copy(p0), Adam(), LeastSquaresOptions(40))
res = solve(prob, copy(p0), AdaMax(), LeastSquaresOptions(40))
function LeastSquaresModel(model, xdata, ydata)
function f(x)
function squared_error(xy)
abs2(model(xy[1], x) - xy[2])
end
mapreduce(squared_error, +, zip(eachrow(xdata), ydata))
end
function g!(G, x)
ForwardDiff.gradient!(G, f, x)
return G
end
function h!(H, x)
ForwardDiff.hessian!(H, f, x)
return H
end
function fg(G, x)
fx = f(x)
g!(G, x)
return fx, G
end
function fgh!(G, H, x)
fx = f(x)
g!(G, x)
h!(H, x)
return fx, G, H
end
obj = ScalarObjective(f, g!, fg, fgh!, h!, nothing, nothing, nothing)
end
unimodel(x, p) = p[1] * exp(-x[1] * p[2])
xdata = range(0, stop = 10, length = 20)
ydata = unimodel.(xdata, Ref([1.0 2.0])) + 0.01 * randn(length(xdata))
p0 = [0.5, 0.5]
obj = LeastSquaresModel(unimodel, xdata, ydata)
prob = OptimizationProblem(obj, ([0.0, 0.0], [3.0, 3.0]))
res = solve(prob, copy(p0), LineSearch(BFGS()), OptimizationOptions())
res = solve(prob, copy(p0), NelderMead(), OptimizationOptions())
res = solve(prob, copy(p0), SimulatedAnnealing(), OptimizationOptions())
res = solve(prob, copy(p0), ParticleSwarm(), OptimizationOptions())
# can do this based on model or model and derivative of model
function f(x)
mod = model(xdata, x)
return sum(abs2, mod .- ydata) / 2
end
x0 = copy(p0)
function g!(G, x)
ForwardDiff.gradient!(G, f, x)
return G
end
function h!(H, x)
ForwardDiff.hessian!(H, f, x)
return H
end
function fg(G, x)
fx = f(x)
g!(G, x)
return fx, G
end
function fgh!(G, H, x)
fx = f(x)
g!(G, x)
h!(H, x)
return fx, G, H
end
obj = ScalarObjective(f, g!, fg, fgh!, h!, nothing, nothing, nothing)
| [
3500,
19530,
28813,
198,
3500,
399,
6561,
349,
690,
198,
198,
2,
3351,
282,
283,
43,
31166,
10267,
425,
198,
2,
38469,
43,
31166,
10267,
425,
198,
198,
31,
13,
2746,
7,
87,
11,
279,
8,
796,
279,
58,
16,
60,
1635,
1033,
32590,
87... | 2.297872 | 2,256 |
export body!, content!, loadcss!, loadjs!, load!, importhtml!
content!(o, sel, html::AbstractString; fade = true) =
fade ?
@js_(o, Blink.fill($sel, $html)) :
@js_ o document.querySelector($sel).innerHTML = $html
content!(o, sel, html; fade = true) =
content!(o, sel, stringmime(MIME"text/html"(), html), fade = fade)
body!(w, html; fade = true) = content!(w, "body", html, fade = fade)
function loadcss!(w, url)
@js_ w begin
@var link = document.createElement("link")
link.type = "text/css"
link.rel = "stylesheet"
link.href = $url
document.head.appendChild(link)
end
end
function importhtml!(w, url; async=false)
if async
@js_ w begin
@var link = document.createElement("link")
link.rel = "import"
link.href = $url
document.head.appendChild(link)
end
else
@js w begin
@new Promise(function (resolve, reject)
@var link = document.createElement("link")
link.rel = "import"
link.href = $url
link.onload = (e) -> resolve(true)
link.onerror = (e) -> reject(false)
document.head.appendChild(link)
end)
end
end
end
function loadjs!(w, url)
@js w @new Promise(function (resolve, reject)
@var script = document.createElement("script")
script.src = $url
script.onload = resolve
script.onerror = (e) -> reject(
Dict("name"=>"JSLoadError",
"message"=>"failed to load " + this.src)
)
document.head.appendChild(script)
end)
end
isurl(f) = ismatch(r"^https?://", f)
function load!(w, file)
if !isurl(file)
resource(file)
file = basename(file)
end
ext = Mux.extension(file)
if ext == "js"
loadjs!(w, file)
elseif ext == "css"
loadcss!(w, file)
elseif ext == "html"
importhtml!(w, file)
else
error("Blink: Unsupported file type")
end
end
| [
39344,
1767,
28265,
2695,
28265,
3440,
25471,
28265,
3440,
8457,
28265,
3440,
28265,
848,
1506,
20369,
0,
198,
198,
11299,
0,
7,
78,
11,
384,
75,
11,
27711,
3712,
23839,
10100,
26,
22100,
796,
2081,
8,
796,
198,
220,
22100,
5633,
198,... | 2.214529 | 881 |
<reponame>SyxP/LightGraphs.jl<filename>test/degeneracy.jl
@testset "Decomposition" begin
d = loadgraph(joinpath(testdir, "testdata", "graph-decomposition.jgz"))
for g in testgraphs(d)
corenum = @inferred(core_number(g))
@test corenum == [3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0]
@test @inferred(k_core(g)) == k_core(g, corenum=corenum) == [1:8;]
@test @inferred(k_core(g, 2)) == k_core(g, 2, corenum=corenum) == [1:16;]
@test length(k_core(g, 4)) == 0
@test @inferred(k_shell(g)) == k_shell(g, corenum=corenum) == [1:8;]
@test @inferred(k_shell(g, 2)) == k_shell(g, 2, corenum=corenum) == [9:16;]
@test length(k_shell(g, 4)) == 0
@test @inferred(k_crust(g)) == k_crust(g, corenum=corenum) == [9:21;]
@test @inferred(k_crust(g, 2)) == k_crust(g, 2, corenum=corenum) == [9:21;]
@test @inferred(k_crust(g, 4, corenum=corenum)) == [1:21;]
@test @inferred(k_corona(g, 1)) == k_corona(g, 1, corenum=corenum) == [17:20;]
@test @inferred(k_corona(g, 2)) == [10, 12, 13, 14, 15, 16]
add_edge!(g, 1, 1)
@test_throws ArgumentError k_core(g)
@test_throws ArgumentError k_shell(g)
@test_throws ArgumentError k_crust(g)
@test_throws ArgumentError k_corona(g, 1)
end
end
| [
27,
7856,
261,
480,
29,
13940,
87,
47,
14,
15047,
37065,
82,
13,
20362,
27,
34345,
29,
9288,
14,
13500,
877,
1590,
13,
20362,
198,
31,
9288,
2617,
366,
10707,
296,
9150,
1,
2221,
198,
220,
220,
220,
288,
796,
3440,
34960,
7,
22179... | 1.923513 | 706 |
@safetestset "HEAD requests" begin
@safetestset "HEAD requests should be by default handled by GET" begin
using Genie
using HTTP
port = nothing
port = rand(8500:8900)
route("/") do
"GET request"
end
server = up(port)
response = try
HTTP.request("GET", "http://127.0.0.1:$port", ["Content-Type" => "text/html"])
catch ex
ex.response
end
@test response.status == 200
@test String(response.body) == "GET request"
response = try
HTTP.request("HEAD", "http://127.0.0.1:$port", ["Content-Type" => "text/html"])
catch ex
ex.response
end
@test response.status == 200
@test String(response.body) == ""
down()
sleep(1)
server = nothing
port = nothing
end;
@safetestset "HEAD requests have no body" begin
using Genie
using HTTP
port = nothing
port = rand(8500:8900)
route("/") do
"Hello world"
end
route("/", method = HEAD) do
"Hello world"
end
server = up(port; open_browser = false)
response = try
HTTP.request("GET", "http://127.0.0.1:$port", ["Content-Type" => "text/html"])
catch ex
ex.response
end
@test response.status == 200
@test String(response.body) == "Hello world"
response = try
HTTP.request("HEAD", "http://127.0.0.1:$port", ["Content-Type" => "text/html"])
catch ex
ex.response
end
@test response.status == 200
@test isempty(String(response.body)) == true
down()
sleep(1)
server = nothing
port = nothing
end;
@safetestset "HEAD requests should overwrite GET" begin
using Genie
using HTTP
port = nothing
port = rand(8500:8900)
request_method = ""
route("/", named = :get_root) do
request_method = "GET"
"GET request"
end
route("/", method = "HEAD", named = :head_root) do
request_method = "HEAD"
"HEAD request"
end
server = up(port)
sleep(1)
response = try
HTTP.request("GET", "http://127.0.0.1:$port", ["Content-Type" => "text/html"])
catch ex
ex.response
end
@test response.status == 200
@test request_method == "GET"
response = try
HTTP.request("HEAD", "http://127.0.0.1:$port", ["Content-Type" => "text/html"])
catch ex
ex.response
end
@test response.status == 200
@test request_method == "HEAD"
down()
sleep(1)
server = nothing
port = nothing
end;
end;
| [
31,
49585,
316,
395,
2617,
366,
37682,
7007,
1,
2221,
198,
220,
2488,
49585,
316,
395,
2617,
366,
37682,
7007,
815,
307,
416,
4277,
12118,
416,
17151,
1,
2221,
198,
220,
220,
220,
1262,
49405,
198,
220,
220,
220,
1262,
14626,
628,
2... | 2.425097 | 1,028 |
<filename>tutorials/limitCycleOscillation/simRun.jl
push!(LOAD_PATH,"../../src/")
import UNSflow
alpha_init = 10. *pi/180
alphadot_init = 0.
h_init = 0.
hdot_init = 0.
u = 0.467
udot = 0
kinem = UNSflow.KinemPar2DOF(alpha_init, h_init, alphadot_init, hdot_init, u)
x_alpha = 0.05
r_alpha = 0.5
kappa = 0.05
w_alpha = 1.
w_h = 1.
w_alphadot = 0.
w_hdot = 0.
cubic_h_1 = 1.
cubic_h_3 = 0.
cubic_alpha_1 = 1.
cubic_alpha_3 = 0.
strpar = UNSflow.TwoDOFPar(x_alpha, r_alpha, kappa, w_alpha, w_h, w_alphadot, w_hdot, cubic_h_1, cubic_h_3, cubic_alpha_1, cubic_alpha_3)
#Dummy kinematic definitions to initialise surface
alphadef = UNSflow.ConstDef(alpha_init)
hdef = UNSflow.ConstDef(h_init)
udef = UNSflow.ConstDef(1.) #This is relative to uref
startkinem = UNSflow.KinemDef(alphadef, hdef, udef)
lespcrit = [0.11;]
pvt = 0.35
c = 1.
surf = UNSflow.TwoDSurf("FlatPlate", pvt, startkinem, lespcrit, c=c, uref=u)
curfield = UNSflow.TwoDFlowField()
dtstar = 0.015
nsteps = 50000
t_tot = nsteps * dtstar / u
startflag = 0
writeflag = 0
writeInterval = dtstar * nsteps/20.
writeInterval = t_tot/20.
delvort = UNSflow.delSpalart(500, 12, 1e-5)
mat, surf, curfield = UNSflow.ldvm2DOF(surf, curfield, strpar, kinem, nsteps, dtstar,startflag, writeflag, writeInterval, delvort)
UNSflow.makeForcePlots2D()
#cleanWrite()
| [
27,
34345,
29,
83,
44917,
82,
14,
32374,
20418,
2375,
46,
22360,
341,
14,
14323,
10987,
13,
20362,
198,
14689,
0,
7,
35613,
62,
34219,
553,
40720,
40720,
10677,
14,
4943,
198,
11748,
4725,
50,
11125,
198,
198,
26591,
62,
15003,
796,
... | 2.144246 | 617 |
<reponame>albert-de-montserrat/Persephone
struct DoFHandler{N,T}
DoF::Vector{NTuple{N,T}}
nDofs::T
end
Base.getindex(a::DoFHandler, I::Int64) = a.DoF[I]
Base.getindex(a::DoFHandler, I::Int32) = a.DoF[I]
Base.getindex(a::DoFHandler, I::Int8) = a.DoF[I]
function DoFs_Thermal(EL2NOD, nnodel)
DoF = [ntuple(j->EL2NOD[j,i], nnodel) for i in axes(EL2NOD,2)]
nDofs = maximum(view(EL2NOD,1:nnodel,:))
DoFHandler(DoF,nDofs)
end
function DoFs_Pressure(e2nP)
DoF = [ntuple(j->e2nP[j,i],3) for i in axes(e2nP,2)]
nDofs = maximum(e2nP)
DoFHandler(DoF, nDofs)
end
function DoFs_Velocity(EL2NOD)
dummy = Matrix{Int64}(undef, size(EL2NOD, 2), 12)
@views dummy[:, 1:2:end] .= (@. 2*(EL2NOD-1)+1)'
@views dummy[:, 2:2:end] .= (@. 2*(EL2NOD))'
DoF = [ntuple(j->dummy[i,j], 12) for i in axes(EL2NOD,2)]
nDofs = maximum(EL2NOD)*2
DoFHandler(DoF,nDofs)
end
| [
27,
7856,
261,
480,
29,
282,
4835,
12,
2934,
12,
8691,
2655,
10366,
14,
5990,
325,
4862,
198,
7249,
2141,
37,
25060,
90,
45,
11,
51,
92,
198,
220,
220,
220,
2141,
37,
3712,
38469,
90,
11251,
29291,
90,
45,
11,
51,
11709,
198,
22... | 1.845679 | 486 |
<reponame>Keno/AbstractTrees.jl
using AbstractTrees
using Test
@testset "Array" begin
tree = Any[1,Any[2,3]]
@test collect(Leaves(tree)) == [1,2,3]
@test collect(Leaves(tree)) isa Vector{Int}
@test collect(PostOrderDFS(tree)) == Any[1,2,3,Any[2,3],Any[1,Any[2,3]]]
@test collect(StatelessBFS(tree)) == Any[Any[1,Any[2,3]],1,Any[2,3],2,3]
@test treesize(tree) == 5
@test treebreadth(tree) == 3
@test treeheight(tree) == 2
@test ischild(1, tree)
@test !ischild(2, tree)
@test ischild(tree[2], tree)
@test !ischild(copy(tree[2]), tree) # Should work on identity, not equality
@test isdescendant(1, tree)
@test isdescendant(2, tree)
@test !isdescendant(4, tree)
@test isdescendant(tree[2], tree)
@test !isdescendant(copy(tree[2]), tree)
@test !isdescendant(tree, tree)
@test intree(1, tree)
@test intree(2, tree)
@test !intree(4, tree)
@test intree(tree[2], tree)
@test !intree(copy(tree[2]), tree)
@test intree(tree, tree)
tree2 = Any[Any[1,2],Any[3,'4']]
@test collect(PreOrderDFS(tree2)) == Any[tree2,Any[1,2],1,2,Any[3,'4'],3,'4']
@test treesize(tree2) == 7
@test treebreadth(tree2) == 4
@test treeheight(tree2) == 2
tree3 = []
for itr in [Leaves, PreOrderDFS, PostOrderDFS]
@test collect(itr(tree3)) == [tree3]
end
@test treesize(tree3) == 1
@test treebreadth(tree3) == 1
@test treeheight(tree3) == 0
@test collect(PostOrderDFS([])) == Any[[]]
end
@testset "Pair" begin
tree = 1=>(3=>4)
@test collect(PreOrderDFS(tree)) == Any[tree, tree.second, 4]
end
@testset "Expr" begin
expr = :(foo(x^2 + 3))
@test children(expr) == expr.args
@test collect(Leaves(expr)) == [:foo, :+, :^, :x, 2, 3]
end
@testset "Array-Dict" begin
t = [1, 2, Dict("a"=>3, "b"=>[4,5])]
@test Set(Leaves(t)) == Set(1:5) # don't want to guarantee ordering because of dict
t = [1, Dict("a"=>2)]
@test collect(Leaves(t)) == [1, 2]
@test collect(PreOrderDFS(t)) == [t, 1, t[2], "a"=>2, 2]
@test collect(PostOrderDFS(t)) == [1, 2, "a"=>2, t[2], t]
end
@testset "treemap" begin
a = [1,[2,[3]]]
f = n -> n isa AbstractArray ? (nothing, children(n)) : (n+1, children(n))
b = treemap(f, a)
@test collect(nodevalues(PreOrderDFS(b))) == [nothing, 2, nothing, 3, nothing, 4]
g = n -> isempty(children(n)) ? (nodevalue(n), ()) : (nothing, [0; children(n)])
b = treemap(g, a)
@test nodevalue.(PostOrderDFS(b)) == [0, 1, 0, 2, 0, 3, nothing, nothing, nothing]
end
| [
27,
7856,
261,
480,
29,
42,
23397,
14,
23839,
51,
6037,
13,
20362,
198,
3500,
27741,
51,
6037,
198,
3500,
6208,
198,
198,
31,
9288,
2617,
366,
19182,
1,
2221,
198,
220,
220,
220,
5509,
796,
4377,
58,
16,
11,
7149,
58,
17,
11,
18... | 2.182823 | 1,176 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.