-
Notifications
You must be signed in to change notification settings - Fork 30
/
Copy pathcontrol_node.py
187 lines (155 loc) · 6.93 KB
/
control_node.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
#! /usr/bin/env python
import rospy
from time import time
from time import sleep
from datetime import datetime
import matplotlib.pyplot as plt
import sys
DATA_PATH = '/home/maestro/catkin_ws/src/master_rad/Data'
MODULES_PATH = '/home/maestro/catkin_ws/src/master_rad/scripts'
sys.path.insert(0, MODULES_PATH)
from Qlearning import *
from Lidar import *
from Control import *
# Real robot
REAL_ROBOT = False
# Action parameter
MIN_TIME_BETWEEN_ACTIONS = 0.0
# Initial and goal positions
INIT_POSITIONS_X = [ -0.7, -0.7, -0.5, -1, -2]
INIT_POSITIONS_Y = [ -0.7, 0.7, 1, -2, 1]
INIT_POSITIONS_THETA = [ 45, -45, -120, -90, 150]
GOAL_POSITIONS_X = [ 2.0, 2.0, 0.5, 1, 2]
GOAL_POSITIONS_Y = [ 1.0, -1.0, -1.9, 2, -1,]
GOAL_POSITIONS_THETA = [ 25.0, -40.0, -40, 60, -30,]
PATH_IND = 4
# Initial & Goal position
if REAL_ROBOT:
X_INIT = 0.0
Y_INIT = 0.0
THETA_INIT = 0.0
X_GOAL = 1.7
Y_GOAL = 1.1
THETA_GOAL = 90
else:
RANDOM_INIT_POS = False
X_INIT = INIT_POSITIONS_X[PATH_IND]
Y_INIT = INIT_POSITIONS_Y[PATH_IND]
THETA_INIT = INIT_POSITIONS_THETA[PATH_IND]
X_GOAL = GOAL_POSITIONS_X[PATH_IND]
Y_GOAL = GOAL_POSITIONS_Y[PATH_IND]
THETA_GOAL = GOAL_POSITIONS_THETA[PATH_IND]
# Log file directory - Q table source
Q_TABLE_SOURCE = DATA_PATH + '/Log_learning_FINAL'
if __name__ == '__main__':
try:
rospy.init_node('control_node', anonymous = False)
rate = rospy.Rate(10)
setPosPub = rospy.Publisher('/gazebo/set_model_state', ModelState, queue_size = 10)
velPub = rospy.Publisher('/cmd_vel', Twist, queue_size = 10)
actions = createActions()
state_space = createStateSpace()
Q_table = readQTable(Q_TABLE_SOURCE+'/Qtable.csv')
print('Initial Q-table:')
print(Q_table)
# Init time
t_0 = rospy.Time.now()
t_start = rospy.Time.now()
# init timer
while not (t_start > t_0):
t_start = rospy.Time.now()
t_step = t_start
count = 0
# robot in initial position
robot_in_pos = False
# because of the video recording
sleep(1)
# main loop
while not rospy.is_shutdown():
msgScan = rospy.wait_for_message('/scan', LaserScan)
odomMsg = rospy.wait_for_message('/odom', Odometry)
# Secure the minimum time interval between 2 actions
step_time = (rospy.Time.now() - t_step).to_sec()
if step_time > MIN_TIME_BETWEEN_ACTIONS:
t_step = rospy.Time.now()
if not robot_in_pos:
robotStop(velPub)
# init pos
if REAL_ROBOT:
( x_init , y_init , theta_init ) = (0, 0, 0)
odomMsg = rospy.wait_for_message('/odom', Odometry)
( x , y ) = getPosition(odomMsg)
theta = degrees(getRotation(odomMsg))
robot_in_pos = True
print('\r\nInitial position:')
print('x = %.2f [m]' % x)
print('y = %.2f [m]' % y)
print('theta = %.2f [degrees]' % theta)
print('')
else:
if RANDOM_INIT_POS:
( x_init , y_init , theta_init ) = robotSetRandomPos(setPosPub)
else:
( x_init , y_init , theta_init ) = robotSetPos(setPosPub, X_INIT, Y_INIT, THETA_INIT)
# check init pos
odomMsg = rospy.wait_for_message('/odom', Odometry)
( x , y ) = getPosition(odomMsg)
theta = degrees(getRotation(odomMsg))
print(theta, theta_init)
if abs(x-x_init) < 0.05 and abs(y-y_init) < 0.05 and abs(theta-theta_init) < 2:
robot_in_pos = True
print('\r\nInitial position:')
print('x = %.2f [m]' % x)
print('y = %.2f [m]' % y)
print('theta = %.2f [degrees]' % theta)
print('')
sleep(1)
else:
robot_in_pos = False
else:
count = count + 1
text = '\r\nStep %d , Step time %.2f s' % (count, step_time)
# Get robot position and orientation
( x , y ) = getPosition(odomMsg)
theta = getRotation(odomMsg)
# Get lidar scan
( lidar, angles ) = lidarScan(msgScan)
( state_ind, x1, x2 ,x3 ,x4 ) = scanDiscretization(state_space, lidar)
# Check for objects nearby
crash = checkCrash(lidar)
object_nearby = checkObjectNearby(lidar)
goal_near = checkGoalNear(x, y, X_GOAL, Y_GOAL)
enable_feedback_control = True
# Stop the simulation
if crash:
robotStop(velPub)
rospy.signal_shutdown('End of testing!')
text = text + ' ==> Crash! End of simulation!'
status = 'Crash! End of simulation!'
# Feedback control algorithm
elif enable_feedback_control and ( not object_nearby or goal_near ):
status = robotFeedbackControl(velPub, x, y, theta, X_GOAL, Y_GOAL, radians(THETA_GOAL))
text = text + ' ==> Feedback control algorithm '
if goal_near:
text = text + '(goal near)'
# Q-learning algorithm
else:
( action, status ) = getBestAction(Q_table, state_ind, actions)
if not status == 'getBestAction => OK':
print('\r\n', status, '\r\n')
status = robotDoAction(velPub, action)
if not status == 'robotDoAction => OK':
print('\r\n', status, '\r\n')
text = text + ' ==> Q-learning algorithm'
text = text + '\r\nx : %.2f -> %.2f [m]' % (x, X_GOAL)
text = text + '\r\ny : %.2f -> %.2f [m]' % (y, Y_GOAL)
text = text + '\r\ntheta : %.2f -> %.2f [degrees]' % (degrees(theta), THETA_GOAL)
if status == 'Goal position reached!':
robotStop(velPub)
rospy.signal_shutdown('End of testing!')
text = text + '\r\n\r\nGoal position reached! End of simulation!'
print(text)
except rospy.ROSInterruptException:
robotStop(velPub)
print('Simulation terminated!')
pass