|
11 | 11 | import random
|
12 | 12 | from collections import defaultdict
|
13 | 13 | from functools import reduce
|
| 14 | +import numpy as np |
14 | 15 |
|
15 | 16 |
|
16 | 17 | # ______________________________________________________________________________
|
@@ -687,28 +688,42 @@ def forward_backward(HMM, ev):
|
687 | 688 |
|
688 | 689 | def viterbi(HMM, ev):
|
689 | 690 | """[Equation 15.11]
|
690 |
| - Viterbi algorithm to find the most likely sequence. Computes the best path, |
| 691 | + Viterbi algorithm to find the most likely sequence. Computes the best path and the corresponding probabilities, |
691 | 692 | given an HMM model and a sequence of observations."""
|
692 | 693 | t = len(ev)
|
| 694 | + ev = ev.copy() |
693 | 695 | ev.insert(0, None)
|
694 | 696 |
|
695 | 697 | m = [[0.0, 0.0] for _ in range(len(ev) - 1)]
|
696 | 698 |
|
697 | 699 | # the recursion is initialized with m1 = forward(P(X0), e1)
|
698 | 700 | m[0] = forward(HMM, HMM.prior, ev[1])
|
| 701 | + # keep track of maximizing predecessors |
| 702 | + backtracking_graph = [] |
699 | 703 |
|
700 | 704 | for i in range(1, t):
|
701 | 705 | m[i] = element_wise_product(HMM.sensor_dist(ev[i + 1]),
|
702 | 706 | [max(element_wise_product(HMM.transition_model[0], m[i - 1])),
|
703 | 707 | max(element_wise_product(HMM.transition_model[1], m[i - 1]))])
|
| 708 | + backtracking_graph.append([np.argmax(element_wise_product(HMM.transition_model[0], m[i - 1])), |
| 709 | + np.argmax(element_wise_product(HMM.transition_model[1], m[i - 1]))]) |
| 710 | + |
| 711 | + # computed probabilities |
| 712 | + ml_probabilities = [0.0] * (len(ev) - 1) |
| 713 | + # most likely sequence |
| 714 | + ml_path = [True] * (len(ev) - 1) |
704 | 715 |
|
705 |
| - path = [0.0] * (len(ev) - 1) |
706 | 716 | # the construction of the most likely sequence starts in the final state with the largest probability,
|
707 |
| - # and runs backwards; the algorithm needs to store for each xt its best predecessor xt-1 |
708 |
| - for i in range(t, -1, -1): |
709 |
| - path[i - 1] = max(m[i - 1]) |
| 717 | + # and runs backwards; the algorithm needs to store for each xt its predecessor xt-1 maximizing its probability |
| 718 | + i_max = np.argmax(m[-1]) |
| 719 | + |
| 720 | + for i in range(t - 1, -1, -1): |
| 721 | + ml_probabilities[i] = m[i][i_max] |
| 722 | + ml_path[i] = True if i_max == 0 else False |
| 723 | + if i > 0: |
| 724 | + i_max = backtracking_graph[i - 1][i_max] |
710 | 725 |
|
711 |
| - return path |
| 726 | + return ml_path, ml_probabilities |
712 | 727 |
|
713 | 728 |
|
714 | 729 | # _________________________________________________________________________
|
|
0 commit comments