-
Notifications
You must be signed in to change notification settings - Fork 11
/
server.rb
107 lines (86 loc) · 3.35 KB
/
server.rb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
libdir = File.expand_path(File.join(File.dirname(__FILE__), '/src/'))
$LOAD_PATH.unshift(libdir) unless $LOAD_PATH.include?(libdir)
require 'loader'
config = Confstruct::Configuration.new(
YAML.load_file(
File.expand_path(
File.join(File.dirname(__FILE__), 'config.yaml')
)
)
)
# allocate a logger object, we will initialize it per job.
logger = nil
# create a new decomposer. Note we create it once and then just use its
# processing method.
decomposer = Decomposer::Tokens.new
# find all metrics available for processing, minus the default.
# note we initialize them once and then use their processing method.
metrics = Metrics.constants - ["Default", :Default]
metrics = metrics.collect { |metric|
"Metrics::#{metric}".constantize.new(config)
}
# Create pub and sub connections to redis.
@pub = Redis.new(:host => config.redis.host, :port => config.redis.port)
@sub = Redis.new(:host => config.redis.host, :port => config.redis.port)
@sub.subscribe('process_article', 'new_job') do |on|
on.message do |channel, message|
case channel
# Handle a new job request.
when 'new_job'
puts "new job requested"
# Create a new job id and send it back to the client.
# All processing requests should now come with this id.
# This isn't a guarantee of anything at this point, but we may
# start associating files with their job_ids (as well as additional)
# information.
job_request_id = message
job_id = UUID.generate()
@pub.publish job_request_id, job_id
logger = Logger.new("logs/#{Time.now.strftime('%Y%m%d-%H%M')}" +
job_id +
".log")
logger.level = Logger.const_get(ENV['LOGLEVEL'] ?
ENV['LOGLEVEL'].upcase :
config.logging.level.upcase)
# Handle a Process an article requestion
# decompose it and run all available metrics.
# TODO when we have more intricate decompositions
# we will need to generalize that as well.
when 'process_article'
message = JSON.parse(message)
job_id = message["job_id"]
# did we get a file path to the article or the full body of it?
article_path = message["path"]
article_body = message["article"]
article = nil
begin
logger.debug "#{job_id}: processing #{article_path}"
if (article_path)
article = Article.new({ :path => article_path })
elsif (article_body)
article = Article.new({ :article => article_body })
end
logger.debug "decomposing"
decomposer.process(article)
metrics.each do |metric|
logger.debug "metric: #{metric.get_name}"
metric.process(article)
end
# Generic catch all for now. We probably need to do a better job error
# handling this.
rescue Exception => e
logger.error "Error: #{e.message} \n #{e.backtrace}"
# always make sure we increment apporopriate counters, regardless of
# whether our processing succeeded here.
ensure
@pub.incr job_id
if (article_path)
article.save
@pub.publish "process_article_done", { :path => article_path, :job_id => job_id }.to_json()
elsif (article_body)
@pub.publish "process_article_done", { :article => article.to_json, :job_id => job_id }.to_json()
end
end
end
end
end