-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathurb.conf
256 lines (220 loc) · 7.95 KB
/
urb.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
[Service]
service_monitor_handler = service_monitor_handler.ServiceMonitorHandler('urb.service.monitor')
mesos_handler = mesos_handler.MesosHandler('urb.endpoint.0.mesos', 60, 5)
[Http]
scheduler_port = 5060
heartbeat_interval_seconds = 30
executor_port = 5061
[MesosHandler]
executor_runner_config_file = urb.executor_runner.conf
[DBManager]
db_client = urb_mongo_client.URBMongoClient('mongodb://localhost.localdomain:27017/','urb')
[ChannelFactory]
# The following shows how you can configure redis sentinel support
#message_broker = redis_sentinel_message_broker.RedisSentinelMessageBroker(sentinel_servers=[('localhost',26379)],sentinel_master="mymaster")
message_broker = redis_message_broker.RedisMessageBroker(host='localhost', port=6379)
# Adapter path property name should be of the form
# mesoshandler_adapter_path = path
# path can be absolute or relative, this propoerty it is optional, if not specified
# adapter is expected to be in path/to/urb/../name_adapter/name_adapter.py
# Adapter property name should be of the form
# <handlername>_adapter = adapter_module.AdapterClass(arg1, arg2, ...)
[AdapterManager]
#mesoshandler_adapter_path = .
#mesoshandler_adapter = localhost_adapter.LocalhostAdapter()
#mesoshandler_adapter = k8s_adapter.K8SAdapter("local")
mesoshandler_adapter = k8s_adapter.K8SAdapter()
#mesoshandler_adapter = k8s_adapter.K8SAdapter("gcr.io/univa-stereshchenko")
[JobMonitor]
monitor_poll_period_in_seconds = 15.0
max_job_status_retrieval_error_count = 3
[ExecutorRunner]
service_monitor_endpoint = urb.service.monitor
mesos_master_endpoint = urb.endpoint.0.mesos
mesos_work_dir = %(tmp)s/urb
[ExecutorHandler]
urb_lib_path = /urb/lib/liburb.so
fetcher_path = /urb/bin/fetcher
command_executor_path = /urb/bin/command-executor
ld_library_path = /urb/lib
# default values for all frameworks
[DefaultFrameworkConfig]
# amount of memory each resource has to offer in megabytes
mem = 4096
# number cores each resource has to offer
cpus = 2
# amount of disk space each resource has to offer
disk = 81924
# free ports each resource has to offer
ports = [(30000,31000)]
# number of Executor Runner offers a framework can refuse before the executor runner is shut down
max_rejected_offers = 4
# maximum number of pending and running jobs allowed
max_tasks = 5000
# should a TASK_LOST message be sent when a framework launches a task on a speculative resource
send_task_lost = False
# how many additional jobs to submit whenever a framework starts an executor runner
scale_count = 1
# how many speculative resources to include when generating a speculative offer
initial_tasks = 1
# how many jobs to submit when a framework starts a task on a speculative resource
concurrent_tasks = 1
# maximum wait time in seconds for the resource to be offered to the ramework,
# the default is 10 seconds
offer_period =
# map resources from the task’s accepted offer to kubernetes resources requests,
# the default is False, other possible configuration settings: "cpu" - only cpu is mapped,
# "mem" - only memory is mapped, True or "cpu;mem"- both cpu and memory mapped
# it is also possible to specify scaling factor for each of the resource or both:
# "cpu/8;mem*1.2"
resource_mapping =
# persistent volume claims in a forms of "claim_name:directory_path_inside_container_to_mount_claim_content"
# multiple entries should be separated by semicolone
#persistent_volume_claims = spark-pvc:/opt/spark;data-pvc:/opt/data
persistent_volume_claims =
# Any framework can have custom configuration with all or subset of the fields from the DefaultFrameworkConfig.
# Framework configuration section consists of framework name without special characters and whitespaces
# used in framework registration followed by "FrameworkConfig". Simple globbing with asterisk character *
# can be used to map multiple Frameworks to the same framework configuration section
# C++ example framework
[TestFrameworkC*FrameworkConfig]
mem = 2048
cpus = 1
disk = 16384
ports = [(31000,32000),(41000,42000)]
custom_resources = dfsio_spindles:4;disk_ids:[(0,5),(10,15)]
attributes = host:hname;rack:a
max_rejected_offers = 10
max_tasks = 1000
send_task_lost = False
scale_count = 1
concurrent_tasks = 1
offer_period = 2
persistent_volume_claims = example-pvc:/opt/example
# Python example framework
[TestFrameworkPy*FrameworkConfig]
mem = 2048
cpus = 1
disk = 16384
ports = [(31000,32000),(41000,42000)]
max_rejected_offers = 10
max_tasks = 1000
send_task_lost = False
scale_count = 1
concurrent_tasks = 1
executor_runner = local/python-executor-runner
[marathonFrameworkConfig]
send_task_lost = False
scale_count = 0
[chronosFrameworkConfig]
send_task_lost = False
scale_count = 0
# Spark examples with name starting with "Spark"
[Spark*FrameworkConfig]
mem = 12288
cpus = 2
disk = 81924
ports = [(30000,31000)]
max_rejected_offers = 4
max_tasks = 5000
send_task_lost = True
scale_count = 1
initial_tasks = 1
concurrent_tasks = 1
persistent_volume_claims = spark-pvc:/opt/spark-2.1.0-bin-hadoop2.7
# pyspark shell
[PySparkShellFrameworkConfig]
mem = 12288
cpus = 2
disk = 81924
ports = [(30000,31000)]
max_rejected_offers = 4
max_tasks = 5000
send_task_lost = True
scale_count = 1
initial_tasks = 1
concurrent_tasks = 1
executor_runner = local/spark-exec
persistent_volume_claims = spark-pvc:/opt/spark-2.1.0-bin-hadoop2.7;scratch-pvc:/scratch
# some Python Spark examples with name starting with Custom
# with custom Spark executor runner docker image and scratch volume for data
[Custom*FrameworkConfig]
mem = 12288
cpus = 2
disk = 81924
ports = [(30000,31000)]
max_rejected_offers = 4
max_tasks = 5000
send_task_lost = True
scale_count = 1
initial_tasks = 1
concurrent_tasks = 1
executor_runner = local/spark-exec
#resource_mapping = cpu/5; mem
persistent_volume_claims = scratch-pvc:/scratch
# ###########################################################
# Logging Configuration
#
# The LoggerLevels section lists regular expressions to match against
# configured loggers Expressions are matched top down with the last
# match setting the effective level. The root key describes the root
# logger level which is the default level for all loggers.
#
# The lower sections describe configuration for specific log file handlers.
# In order for a message to be emitted by a specific handler the level of
# the message must be greater than or equal to both the logger level and
# the handler level.
#
# Examples setting LoggerLevels:
#
# 1) Trace level debugging for BrokerDbHandler debug for everything else
# other than root.
#
# [LoggerLevels]
# root=error
# expressions: ^.*$=debug
# ^BrokerDbHandler$=trace
#
# 2) Trace level debugging for any logger beginning with Broker and debug for
# everything else other than root.
#
# [LoggerLevels]
# root=error
# expressions: ^.*$=debug
# ^Broker.*$=trace
#
# Some expressions examples:
#
# expressions: ^.*$=debug
# ^Broker.*$=trace
# DbManager=info
# urb\.*=critical
#
# expressions: Brokers=debug
#
# Remeber to set the level in the "Handlers" section if you want to see log
# messages of a certain level on a specific handler.
#
[LoggerLevels]
root=error
expressions: ^.*$=trace
# ^.*MesosHandler.*$=trace
[ConsoleLogging]
handler=stream_log_handler.StreamLogHandler(sys.stdout,)
level=trace
format=%(asctime)s.%(msecs)03d|%(levelname)s|%(threadName)s|%(filename)s:%(funcName)s:%(lineno)d|%(message)s
datefmt=%Y-%m-%d %H:%M:%S
# Custom log files can be setup here.
# A new config section named "FileLogging<something>"
# will be read and enacted by the logger.
# NOTE: If you wish to use a filter you must make
# sure that the modules you want to log are logging to the appropriate logger.
# In other words, if you create a filter for "urb.broker", you should make
# sure that all of the modules in urb/broker actually create their loggers
# with "urb.broker.<classname>".
[FileLogging]
handler=timed_rotating_file_log_handler.TimedRotatingFileLogHandler('/urb/urb.log')
level=error
format=%(asctime)s.%(msecs)03d|%(levelname)s|%(threadName)s|%(filename)s:%(funcName)s:%(lineno)d|%(message)s
datefmt=%Y-%m-%d %H:%M:%S
#filter=urb