|
1 | 1 | package io.github.mandar2812.dynaml.models.gp
|
2 | 2 |
|
3 |
| -import io.github.mandar2812.dynaml.kernels.LocalScalarKernel |
| 3 | +import breeze.linalg.{DenseMatrix, det, diag} |
| 4 | +import io.github.mandar2812.dynaml.algebra.{PartitionedMatrix, PartitionedVector} |
| 5 | +import io.github.mandar2812.dynaml.analysis.{DifferentiableMap, PartitionedVectorField, PushforwardMap} |
| 6 | +import io.github.mandar2812.dynaml.models.{ContinuousProcess, SecondOrderProcess} |
| 7 | +import io.github.mandar2812.dynaml.optimization.GloballyOptWithGrad |
| 8 | +import io.github.mandar2812.dynaml.pipes.DataPipe |
| 9 | +import io.github.mandar2812.dynaml.probability.{E, MeasurableDistrRV} |
| 10 | +import io.github.mandar2812.dynaml.utils |
4 | 11 |
|
5 | 12 | import scala.reflect.ClassTag
|
6 | 13 |
|
7 | 14 | /**
|
8 | 15 | * Created by mandar on 02/01/2017.
|
9 | 16 | */
|
10 |
| -abstract class WarpedGP[T, I]( |
11 |
| - cov: LocalScalarKernel[I], n: LocalScalarKernel[I], |
12 |
| - data: T, num: Int)(implicit ev: ClassTag[I]) extends |
13 |
| - AbstractGPRegressionModel[T, I](cov, n, data, num) { |
| 17 | +abstract class WarpedGP[T, I](p: AbstractGPRegressionModel[T, I])( |
| 18 | + wFuncT: PushforwardMap[Double, Double, Double])( |
| 19 | + implicit ev: ClassTag[I], pf: PartitionedVectorField) |
| 20 | + extends ContinuousProcess[ |
| 21 | + T, I, Double, |
| 22 | + MeasurableDistrRV[PartitionedVector, PartitionedVector, PartitionedMatrix]] |
| 23 | + with SecondOrderProcess[ |
| 24 | + T, I, Double, Double, DenseMatrix[Double], |
| 25 | + MeasurableDistrRV[PartitionedVector, PartitionedVector, PartitionedMatrix]] |
| 26 | + with GloballyOptWithGrad { |
14 | 27 |
|
| 28 | + |
| 29 | + //Define the default determinant implementation |
| 30 | + implicit val detImpl = DataPipe( |
| 31 | + (m: PartitionedMatrix) => m.filterBlocks(c => c._1 == c._2).map(c => det(c._2)).product) |
| 32 | + |
| 33 | + //Define the push forward map for the multivariate case |
| 34 | + val wFuncPredDistr: PushforwardMap[PartitionedVector, PartitionedVector, PartitionedMatrix] = |
| 35 | + PushforwardMap( |
| 36 | + DataPipe((v: PartitionedVector) => v.map(c => (c._1, c._2.map(wFuncT.run)))), |
| 37 | + DifferentiableMap( |
| 38 | + (v: PartitionedVector) => v.map(c => (c._1, c._2.map(wFuncT.i.run))), |
| 39 | + (v: PartitionedVector) => new PartitionedMatrix( |
| 40 | + v._data.map(l => ((l._1, l._1), diag(l._2.map(wFuncT.i.J)))) ++ |
| 41 | + utils.combine(Seq((0 until v.rowBlocks.toInt).toList, (0 until v.rowBlocks.toInt).toList)) |
| 42 | + .map(c => |
| 43 | + (c.head.toLong, c.last.toLong)) |
| 44 | + .filter(c => c._2 != c._1) |
| 45 | + .map(c => (c, DenseMatrix.zeros[Double](v.rows.toInt/v.rowBlocks.toInt, v.rows.toInt/v.rowBlocks.toInt))) |
| 46 | + .toStream, num_cols = v.rows, num_rows = v.rows)) |
| 47 | + ) |
| 48 | + |
| 49 | + /** |
| 50 | + * Draw three predictions from the posterior predictive distribution |
| 51 | + * 1) Mean or MAP estimate Y |
| 52 | + * 2) Y- : The lower error bar estimate (mean - sigma*stdDeviation) |
| 53 | + * 3) Y+ : The upper error bar. (mean + sigma*stdDeviation) |
| 54 | + **/ |
| 55 | + override def predictionWithErrorBars[U <: Seq[I]](testData: U, sigma: Int) = ??? |
| 56 | + |
| 57 | + /** |
| 58 | + * Mean Function: Takes a member of the index set (input) |
| 59 | + * and returns the corresponding mean of the distribution |
| 60 | + * corresponding to input. |
| 61 | + **/ |
| 62 | + override val mean = p.mean |
| 63 | + /** |
| 64 | + * Underlying covariance function of the |
| 65 | + * Gaussian Processes. |
| 66 | + **/ |
| 67 | + override val covariance = p.covariance |
| 68 | + /** |
| 69 | + * Stores the names of the hyper-parameters |
| 70 | + **/ |
| 71 | + override protected var hyper_parameters: List[String] = p._hyper_parameters |
| 72 | + /** |
| 73 | + * A Map which stores the current state of |
| 74 | + * the system. |
| 75 | + **/ |
| 76 | + override protected var current_state: Map[String, Double] = p._current_state |
| 77 | + |
| 78 | + /** |
| 79 | + * Calculates the energy of the configuration, |
| 80 | + * in most global optimization algorithms |
| 81 | + * we aim to find an approximate value of |
| 82 | + * the hyper-parameters such that this function |
| 83 | + * is minimized. |
| 84 | + * |
| 85 | + * @param h The value of the hyper-parameters in the configuration space |
| 86 | + * @param options Optional parameters about configuration |
| 87 | + * @return Configuration Energy E(h) |
| 88 | + **/ |
| 89 | + override def energy(h: Map[String, Double], options: Map[String, String]) = p.energy(h, options) |
| 90 | + |
| 91 | + /** Calculates posterior predictive distribution for |
| 92 | + * a particular set of test data points. |
| 93 | + * |
| 94 | + * @param test A Sequence or Sequence like data structure |
| 95 | + * storing the values of the input patters. |
| 96 | + **/ |
| 97 | + override def predictiveDistribution[U <: Seq[I]](test: U) = wFuncPredDistr -> p.predictiveDistribution(test) |
| 98 | + |
| 99 | + /** |
| 100 | + * Convert from the underlying data structure to |
| 101 | + * Seq[(I, Y)] where I is the index set of the GP |
| 102 | + * and Y is the value/label type. |
| 103 | + **/ |
| 104 | + override def dataAsSeq(data: T) = p.dataAsSeq(data) |
| 105 | + |
| 106 | + /** |
| 107 | + * The training data |
| 108 | + **/ |
| 109 | + override protected val g: T = p.data |
| 110 | + |
| 111 | + /** |
| 112 | + * Predict the value of the |
| 113 | + * target variable given a |
| 114 | + * point. |
| 115 | + * |
| 116 | + **/ |
| 117 | + override def predict(point: I) = wFuncT(p.predictionWithErrorBars(Seq(point), 1).head._2) |
15 | 118 | }
|
0 commit comments