@@ -17,8 +17,15 @@ a range defined by `scaling`.
17
17
# Keyword arguments
18
18
19
19
- `scaling`: A scaling factor to define the range of the uniform distribution.
20
- The matrix elements will be randomly chosen from the
21
- range `[-scaling, scaling]`. Defaults to `0.1`.
20
+ The factor can be passed in three different ways:
21
+
22
+ + A single number. In this case, the matrix elements will be randomly
23
+ chosen from the range `[-scaling, scaling]`. Default option, with
24
+ a the scaling value set to `0.1`.
25
+ + A tuple `(lower, upper)`. The values define the range of the distribution.
26
+ + A vector. In this case, the columns will be scaled individually by the
27
+ entries of the vector. The entries can be numbers or tuples, which will mirror
28
+ the behavior described above.
22
29
23
30
# Examples
24
31
@@ -33,16 +40,68 @@ julia> res_input = scaled_rand(8, 3)
33
40
0.0944272 0.0679244 0.0148647
34
41
-0.0799005 -0.0891089 -0.0444782
35
42
-0.0970182 0.0934286 0.03553
43
+
44
+ julia> tt = scaled_rand(5, 3, scaling = (0.1, 0.15))
45
+ 5×3 Matrix{Float32}:
46
+ 0.13631 0.110929 0.116177
47
+ 0.116299 0.136038 0.119713
48
+ 0.11535 0.144712 0.110029
49
+ 0.127453 0.12657 0.147656
50
+ 0.139446 0.117656 0.104712
51
+ ```
52
+
53
+ Example with vector:
54
+
55
+ ```jldoctest
56
+ julia> tt = scaled_rand(5, 3, scaling = [0.1, 0.2, 0.3])
57
+ 5×3 Matrix{Float32}:
58
+ 0.0452399 -0.112565 -0.105874
59
+ -0.0348047 0.0883044 -0.0634468
60
+ -0.0386004 0.157698 -0.179648
61
+ 0.00981022 0.012559 0.271875
62
+ 0.0577838 -0.0587553 -0.243451
63
+
64
+ julia> tt = scaled_rand(5, 3, scaling = [(0.1, 0.2), (-0.2, -0.1), (0.3, 0.5)])
65
+ 5×3 Matrix{Float32}:
66
+ 0.17262 -0.178141 0.364709
67
+ 0.132598 -0.127924 0.378851
68
+ 0.1307 -0.110575 0.340117
69
+ 0.154905 -0.14686 0.490625
70
+ 0.178892 -0.164689 0.31885
36
71
```
37
72
"""
38
73
function scaled_rand (rng:: AbstractRNG , :: Type{T} , dims:: Integer... ;
39
- scaling:: Number = T (0.1 )) where {T <: Number }
74
+ scaling:: Union{ Number, Tuple, Vector} = T (0.1 )) where {T <: Number }
40
75
res_size, in_size = dims
41
- layer_matrix = ( DeviceAgnostic. rand (rng, T, res_size, in_size) .- T ( 0.5 )) .*
42
- ( T ( 2 ) * T (scaling) )
76
+ layer_matrix = DeviceAgnostic. rand (rng, T, res_size, in_size)
77
+ apply_scale! (layer_matrix, scaling, T )
43
78
return layer_matrix
44
79
end
45
80
81
+ function apply_scale! (input_matrix, scaling:: Number , :: Type{T} ) where {T}
82
+ @. input_matrix = (input_matrix - T (0.5 )) * (T (2 ) * T (scaling))
83
+ return input_matrix
84
+ end
85
+
86
+ function apply_scale! (input_matrix,
87
+ scaling:: Tuple{<:Number, <:Number} , :: Type{T} ) where {T}
88
+ lower, upper = T (scaling[1 ]), T (scaling[2 ])
89
+ @assert lower< upper " lower < upper required"
90
+ scale = upper - lower
91
+ @. input_matrix = input_matrix * scale + lower
92
+ return input_matrix
93
+ end
94
+
95
+ function apply_scale! (input_matrix,
96
+ scaling:: AbstractVector , :: Type{T} ) where {T <: Number }
97
+ ncols = size (input_matrix, 2 )
98
+ @assert length (scaling)== ncols " need one scaling per column"
99
+ for (idx, col) in enumerate (eachcol (input_matrix))
100
+ apply_scale! (col, scaling[idx], T)
101
+ end
102
+ return input_matrix
103
+ end
104
+
46
105
"""
47
106
weighted_init([rng], [T], dims...;
48
107
scaling=0.1, return_sparse=false)
@@ -146,11 +205,11 @@ warning.
146
205
```jldoctest
147
206
julia> res_input = weighted_minimal(8, 3)
148
207
┌ Warning: Reservoir size has changed!
149
- │
150
- │ Computed reservoir size (6) does not equal the provided reservoir size (8).
151
- │
152
- │ Using computed value (6). Make sure to modify the reservoir initializer accordingly.
153
- │
208
+ │
209
+ │ Computed reservoir size (6) does not equal the provided reservoir size (8).
210
+ │
211
+ │ Using computed value (6). Make sure to modify the reservoir initializer accordingly.
212
+ │
154
213
└ @ ReservoirComputing ~/.julia/dev/ReservoirComputing/src/esn/esn_inits.jl:159
155
214
6×3 Matrix{Float32}:
156
215
0.1 0.0 0.0
@@ -370,7 +429,7 @@ using a sine function and subsequent rows are iteratively generated
370
429
via the Chebyshev mapping. The first row is defined as:
371
430
372
431
```math
373
- W[1, j] = \t ext{amplitude} \c dot \s in(j \c dot \p i / (\t ext{sine_divisor}
432
+ W[1, j] = \t ext{amplitude} \c dot \s in(j \c dot \p i / (\t ext{sine_divisor}
374
433
\c dot \t ext{n_cols}))
375
434
```
376
435
@@ -448,7 +507,7 @@ Generate an input weight matrix using a logistic mapping [Wang2022](@cite)
448
507
The first row is initialized using a sine function:
449
508
450
509
```math
451
- W[1, j] = \t ext{amplitude} \c dot \s in(j \c dot \p i /
510
+ W[1, j] = \t ext{amplitude} \c dot \s in(j \c dot \p i /
452
511
(\t ext{sine_divisor} \c dot in_size))
453
512
```
454
513
@@ -527,7 +586,7 @@ as follows:
527
586
- The first element of the chain is initialized using a sine function:
528
587
529
588
```math
530
- W[1,j] = \t ext{amplitude} \c dot \s in( (j \c dot \p i) /
589
+ W[1,j] = \t ext{amplitude} \c dot \s in( (j \c dot \p i) /
531
590
(\t ext{factor} \c dot \t ext{n} \c dot \t ext{sine_divisor}) )
532
591
```
533
592
where `j` is the index corresponding to the input and `n` is the number of inputs.
@@ -540,7 +599,7 @@ as follows:
540
599
541
600
The resulting matrix has dimensions `(factor * in_size) x in_size`, where
542
601
`in_size` corresponds to the number of columns provided in `dims`.
543
- If the provided number of rows does not match `factor * in_size`
602
+ If the provided number of rows does not match `factor * in_size`
544
603
the number of rows is overridden.
545
604
546
605
# Arguments
@@ -576,15 +635,15 @@ julia> modified_lm(20, 10; factor=2)
576
635
577
636
julia> modified_lm(12, 4; factor=3)
578
637
12×4 SparseArrays.SparseMatrixCSC{Float32, Int64} with 9 stored entries:
579
- ⋅ ⋅ ⋅ ⋅
580
- ⋅ ⋅ ⋅ ⋅
581
- ⋅ ⋅ ⋅ ⋅
582
- ⋅ 0.0133075 ⋅ ⋅
583
- ⋅ 0.0308564 ⋅ ⋅
584
- ⋅ 0.070275 ⋅ ⋅
585
- ⋅ ⋅ 0.0265887 ⋅
586
- ⋅ ⋅ 0.0608222 ⋅
587
- ⋅ ⋅ 0.134239 ⋅
638
+ ⋅ ⋅ ⋅ ⋅
639
+ ⋅ ⋅ ⋅ ⋅
640
+ ⋅ ⋅ ⋅ ⋅
641
+ ⋅ 0.0133075 ⋅ ⋅
642
+ ⋅ 0.0308564 ⋅ ⋅
643
+ ⋅ 0.070275 ⋅ ⋅
644
+ ⋅ ⋅ 0.0265887 ⋅
645
+ ⋅ ⋅ 0.0608222 ⋅
646
+ ⋅ ⋅ 0.134239 ⋅
588
647
⋅ ⋅ ⋅ 0.0398177
589
648
⋅ ⋅ ⋅ 0.0898457
590
649
⋅ ⋅ ⋅ 0.192168
@@ -671,7 +730,7 @@ function rand_sparse(rng::AbstractRNG, ::Type{T}, dims::Integer...;
671
730
end
672
731
673
732
"""
674
- pseudo_svd([rng], [T], dims...;
733
+ pseudo_svd([rng], [T], dims...;
675
734
max_value=1.0, sparsity=0.1, sorted=true, reverse_sort=false,
676
735
return_sparse=false)
677
736
@@ -821,15 +880,15 @@ closest valid order is used.
821
880
822
881
```jldoctest
823
882
julia> res_matrix = chaotic_init(8, 8)
824
- ┌ Warning:
825
- │
883
+ ┌ Warning:
884
+ │
826
885
│ Adjusting reservoir matrix order:
827
886
│ from 8 (requested) to 4
828
- │ based on computed bit precision = 1.
829
- │
887
+ │ based on computed bit precision = 1.
888
+ │
830
889
└ @ ReservoirComputing ~/.julia/dev/ReservoirComputing/src/esn/esn_inits.jl:805
831
890
4×4 SparseArrays.SparseMatrixCSC{Float32, Int64} with 6 stored entries:
832
- ⋅ -0.600945 ⋅ ⋅
891
+ ⋅ -0.600945 ⋅ ⋅
833
892
⋅ ⋅ 0.132667 2.21354
834
893
⋅ -2.60383 ⋅ -2.90391
835
894
-0.578156 ⋅ ⋅ ⋅
@@ -1148,7 +1207,7 @@ function delay_line_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...;
1148
1207
end
1149
1208
1150
1209
"""
1151
- cycle_jumps([rng], [T], dims...;
1210
+ cycle_jumps([rng], [T], dims...;
1152
1211
cycle_weight=0.1, jump_weight=0.1, jump_size=3, return_sparse=false,
1153
1212
cycle_kwargs=(), jump_kwargs=())
1154
1213
@@ -1234,7 +1293,7 @@ function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...;
1234
1293
end
1235
1294
1236
1295
"""
1237
- simple_cycle([rng], [T], dims...;
1296
+ simple_cycle([rng], [T], dims...;
1238
1297
weight=0.1, return_sparse=false,
1239
1298
kwargs...)
1240
1299
@@ -1303,7 +1362,7 @@ function simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...;
1303
1362
end
1304
1363
1305
1364
"""
1306
- double_cycle([rng], [T], dims...;
1365
+ double_cycle([rng], [T], dims...;
1307
1366
cycle_weight=0.1, second_cycle_weight=0.1,
1308
1367
return_sparse=false)
1309
1368
@@ -1358,7 +1417,7 @@ function double_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...;
1358
1417
end
1359
1418
1360
1419
"""
1361
- true_double_cycle([rng], [T], dims...;
1420
+ true_double_cycle([rng], [T], dims...;
1362
1421
cycle_weight=0.1, second_cycle_weight=0.1,
1363
1422
return_sparse=false)
1364
1423
@@ -1427,7 +1486,7 @@ function true_double_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...;
1427
1486
end
1428
1487
1429
1488
@doc raw """
1430
- selfloop_cycle([rng], [T], dims...;
1489
+ selfloop_cycle([rng], [T], dims...;
1431
1490
cycle_weight=0.1, selfloop_weight=0.1,
1432
1491
return_sparse=false, kwargs...)
1433
1492
@@ -1518,7 +1577,7 @@ function selfloop_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...;
1518
1577
end
1519
1578
1520
1579
@doc raw """
1521
- selfloop_feedback_cycle([rng], [T], dims...;
1580
+ selfloop_feedback_cycle([rng], [T], dims...;
1522
1581
cycle_weight=0.1, selfloop_weight=0.1,
1523
1582
return_sparse=false)
1524
1583
@@ -1601,7 +1660,7 @@ function selfloop_feedback_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...;
1601
1660
end
1602
1661
1603
1662
@doc raw """
1604
- selfloop_delayline_backward([rng], [T], dims...;
1663
+ selfloop_delayline_backward([rng], [T], dims...;
1605
1664
weight=0.1, selfloop_weight=0.1, fb_weight=0.1,
1606
1665
fb_shift=2, return_sparse=false, fb_kwargs=(),
1607
1666
selfloop_kwargs=(), delay_kwargs=())
@@ -1707,7 +1766,7 @@ function selfloop_delayline_backward(rng::AbstractRNG, ::Type{T}, dims::Integer.
1707
1766
end
1708
1767
1709
1768
@doc raw """
1710
- selfloop_forward_connection([rng], [T], dims...;
1769
+ selfloop_forward_connection([rng], [T], dims...;
1711
1770
weight=0.1, selfloop_weight=0.1,
1712
1771
return_sparse=false, selfloop_kwargs=(),
1713
1772
delay_kwargs=())
@@ -1749,7 +1808,7 @@ W_{i,j} =
1749
1808
Default is 0.1.
1750
1809
- `return_sparse`: flag for returning a `sparse` matrix.
1751
1810
Default is `false`.
1752
- - `delay_kwargs` and `selfloop_kwargs`: named tuples that control the kwargs for the
1811
+ - `delay_kwargs` and `selfloop_kwargs`: named tuples that control the kwargs for the
1753
1812
delay line weight and self loop weights respectively. The kwargs are as follows:
1754
1813
+ `sampling_type`: Sampling that decides the distribution of `weight` negative numbers.
1755
1814
If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each
@@ -1801,7 +1860,7 @@ function selfloop_forward_connection(rng::AbstractRNG, ::Type{T}, dims::Integer.
1801
1860
end
1802
1861
1803
1862
@doc raw """
1804
- forward_connection([rng], [T], dims...;
1863
+ forward_connection([rng], [T], dims...;
1805
1864
weight=0.1, selfloop_weight=0.1,
1806
1865
return_sparse=false)
1807
1866
@@ -1887,8 +1946,8 @@ end
1887
1946
return_sparse=false)
1888
1947
1889
1948
Creates a block‐diagonal matrix consisting of square blocks of size
1890
- `block_size` along the main diagonal [Ma2023](@cite).
1891
- Each block may be filled with
1949
+ `block_size` along the main diagonal [Ma2023](@cite).
1950
+ Each block may be filled with
1892
1951
- a single scalar
1893
1952
- a vector of per‐block weights (length = number of blocks)
1894
1953
@@ -1897,21 +1956,21 @@ Each block may be filled with
1897
1956
```math
1898
1957
W_{i,j} =
1899
1958
\b egin{cases}
1900
- w_b, & \t ext{if }\l eft\l floor\f rac{i-1}{s}\r ight\r floor = \l eft\l floor\f rac{j-1}{s}\r ight\r floor = b,\;
1959
+ w_b, & \t ext{if }\l eft\l floor\f rac{i-1}{s}\r ight\r floor = \l eft\l floor\f rac{j-1}{s}\r ight\r floor = b,\;
1901
1960
s = \t ext{block\_ size},\; b=0,\d ots,nb-1, \\
1902
1961
0, & \t ext{otherwise,}
1903
1962
\e nd{cases}
1904
1963
```
1905
1964
1906
1965
# Arguments
1907
1966
1908
- - `rng`: Random number generator. Default is `Utils.default_rng()`.
1909
- - `T`: Element type of the matrix. Default is `Float32`.
1967
+ - `rng`: Random number generator. Default is `Utils.default_rng()`.
1968
+ - `T`: Element type of the matrix. Default is `Float32`.
1910
1969
- `dims`: Dimensions of the output matrix (must be two-dimensional).
1911
1970
1912
1971
# Keyword arguments
1913
1972
1914
- - `weight`:
1973
+ - `weight`:
1915
1974
- scalar: every block is filled with that value
1916
1975
- vector: length = number of blocks, one constant per block
1917
1976
Default is `1.0`.
0 commit comments