@@ -5,7 +5,7 @@ import io.github.mandar2812.dynaml.algebra.{PartitionedMatrix, PartitionedVector
5
5
import io .github .mandar2812 .dynaml .analysis .{DifferentiableMap , PartitionedVectorField , PushforwardMap }
6
6
import io .github .mandar2812 .dynaml .models .{ContinuousProcess , SecondOrderProcess }
7
7
import io .github .mandar2812 .dynaml .optimization .GloballyOptWithGrad
8
- import io .github .mandar2812 .dynaml .pipes .DataPipe
8
+ import io .github .mandar2812 .dynaml .pipes .{ DataPipe , Encoder }
9
9
import io .github .mandar2812 .dynaml .probability .{E , MeasurableDistrRV }
10
10
import io .github .mandar2812 .dynaml .utils
11
11
@@ -15,8 +15,10 @@ import scala.reflect.ClassTag
15
15
* Created by mandar on 02/01/2017.
16
16
*/
17
17
abstract class WarpedGP [T , I ](p : AbstractGPRegressionModel [T , I ])(
18
- wFuncT : PushforwardMap [Double , Double , Double ])(
19
- implicit ev : ClassTag [I ], pf : PartitionedVectorField )
18
+ warpingFunc : PushforwardMap [Double , Double , Double ])(
19
+ implicit ev : ClassTag [I ],
20
+ pf : PartitionedVectorField ,
21
+ transform : Encoder [T , Seq [(I , Double )]])
20
22
extends ContinuousProcess [
21
23
T , I , Double ,
22
24
MeasurableDistrRV [PartitionedVector , PartitionedVector , PartitionedMatrix ]]
@@ -25,6 +27,41 @@ abstract class WarpedGP[T, I](p: AbstractGPRegressionModel[T, I])(
25
27
MeasurableDistrRV [PartitionedVector , PartitionedVector , PartitionedMatrix ]]
26
28
with GloballyOptWithGrad {
27
29
30
+ /**
31
+ * The training data
32
+ **/
33
+ override protected val g : T = p.data
34
+
35
+ private val dataProcessPipe = transform >
36
+ DataPipe ((s : Seq [(I , Double )]) => s.map(pattern => (pattern._1, warpingFunc.i(pattern._2)))) >
37
+ transform.i
38
+
39
+ val underlyingProcess =
40
+ AbstractGPRegressionModel [T , I ](
41
+ p.covariance, p.noiseModel)(
42
+ dataProcessPipe(p.data), p.npoints)(transform, ev)
43
+
44
+
45
+ /**
46
+ * Mean Function: Takes a member of the index set (input)
47
+ * and returns the corresponding mean of the distribution
48
+ * corresponding to input.
49
+ **/
50
+ override val mean = p.mean
51
+ /**
52
+ * Underlying covariance function of the
53
+ * Gaussian Processes.
54
+ **/
55
+ override val covariance = p.covariance
56
+ /**
57
+ * Stores the names of the hyper-parameters
58
+ **/
59
+ override protected var hyper_parameters : List [String ] = underlyingProcess._hyper_parameters
60
+ /**
61
+ * A Map which stores the current state of
62
+ * the system.
63
+ **/
64
+ override protected var current_state : Map [String , Double ] = underlyingProcess._current_state
28
65
29
66
// Define the default determinant implementation
30
67
implicit val detImpl = DataPipe (
@@ -33,11 +70,11 @@ abstract class WarpedGP[T, I](p: AbstractGPRegressionModel[T, I])(
33
70
// Define the push forward map for the multivariate case
34
71
val wFuncPredDistr : PushforwardMap [PartitionedVector , PartitionedVector , PartitionedMatrix ] =
35
72
PushforwardMap (
36
- DataPipe ((v : PartitionedVector ) => v.map(c => (c._1, c._2.map(wFuncT .run)))),
73
+ DataPipe ((v : PartitionedVector ) => v.map(c => (c._1, c._2.map(warpingFunc .run)))),
37
74
DifferentiableMap (
38
- (v : PartitionedVector ) => v.map(c => (c._1, c._2.map(wFuncT .i.run))),
75
+ (v : PartitionedVector ) => v.map(c => (c._1, c._2.map(warpingFunc .i.run))),
39
76
(v : PartitionedVector ) => new PartitionedMatrix (
40
- v._data.map(l => ((l._1, l._1), diag(l._2.map(wFuncT .i.J )))) ++
77
+ v._data.map(l => ((l._1, l._1), diag(l._2.map(warpingFunc .i.J )))) ++
41
78
utils.combine(Seq ((0 until v.rowBlocks.toInt).toList, (0 until v.rowBlocks.toInt).toList))
42
79
.map(c =>
43
80
(c.head.toLong, c.last.toLong))
@@ -52,28 +89,10 @@ abstract class WarpedGP[T, I](p: AbstractGPRegressionModel[T, I])(
52
89
* 2) Y- : The lower error bar estimate (mean - sigma*stdDeviation)
53
90
* 3) Y+ : The upper error bar. (mean + sigma*stdDeviation)
54
91
**/
55
- override def predictionWithErrorBars [U <: Seq [I ]](testData : U , sigma : Int ) = ???
56
-
57
- /**
58
- * Mean Function: Takes a member of the index set (input)
59
- * and returns the corresponding mean of the distribution
60
- * corresponding to input.
61
- **/
62
- override val mean = p.mean
63
- /**
64
- * Underlying covariance function of the
65
- * Gaussian Processes.
66
- **/
67
- override val covariance = p.covariance
68
- /**
69
- * Stores the names of the hyper-parameters
70
- **/
71
- override protected var hyper_parameters : List [String ] = p._hyper_parameters
72
- /**
73
- * A Map which stores the current state of
74
- * the system.
75
- **/
76
- override protected var current_state : Map [String , Double ] = p._current_state
92
+ override def predictionWithErrorBars [U <: Seq [I ]](testData : U , sigma : Int ) =
93
+ underlyingProcess
94
+ .predictionWithErrorBars(testData, sigma)
95
+ .map(d => (d._1, warpingFunc(d._2), warpingFunc(d._3), warpingFunc(d._4)))
77
96
78
97
/**
79
98
* Calculates the energy of the configuration,
@@ -86,33 +105,37 @@ abstract class WarpedGP[T, I](p: AbstractGPRegressionModel[T, I])(
86
105
* @param options Optional parameters about configuration
87
106
* @return Configuration Energy E(h)
88
107
**/
89
- override def energy (h : Map [String , Double ], options : Map [String , String ]) = p.energy(h, options)
108
+ override def energy (h : Map [String , Double ], options : Map [String , String ]) = {
109
+ val trainingLabels = PartitionedVector (
110
+ dataAsSeq(g).toStream.map(_._2),
111
+ underlyingProcess.npoints.toLong, underlyingProcess._blockSize
112
+ )
113
+
114
+ detImpl(wFuncPredDistr.i.J (trainingLabels))* underlyingProcess.energy(h, options)
115
+ }
116
+
90
117
91
118
/** Calculates posterior predictive distribution for
92
119
* a particular set of test data points.
93
120
*
94
121
* @param test A Sequence or Sequence like data structure
95
122
* storing the values of the input patters.
96
123
**/
97
- override def predictiveDistribution [U <: Seq [I ]](test : U ) = wFuncPredDistr -> p.predictiveDistribution(test)
124
+ override def predictiveDistribution [U <: Seq [I ]](test : U ) =
125
+ wFuncPredDistr -> underlyingProcess.predictiveDistribution(test)
98
126
99
127
/**
100
128
* Convert from the underlying data structure to
101
129
* Seq[(I, Y)] where I is the index set of the GP
102
130
* and Y is the value/label type.
103
131
**/
104
- override def dataAsSeq (data : T ) = p.dataAsSeq(data)
105
-
106
- /**
107
- * The training data
108
- **/
109
- override protected val g : T = p.data
132
+ override def dataAsSeq (data : T ) = transform(data)
110
133
111
134
/**
112
135
* Predict the value of the
113
136
* target variable given a
114
137
* point.
115
138
*
116
139
**/
117
- override def predict (point : I ) = wFuncT(p .predictionWithErrorBars(Seq (point), 1 ).head._2)
140
+ override def predict (point : I ) = warpingFunc(underlyingProcess .predictionWithErrorBars(Seq (point), 1 ).head._2)
118
141
}
0 commit comments