-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathProcess.py
227 lines (212 loc) · 11.2 KB
/
Process.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
import pandas as pd
import time
from datetime import datetime
import urllib.request
import json
from dateutil.relativedelta import relativedelta
from dateutil import parser
from Init import Init
from collections import defaultdict
import html.parser
import pytz # $ pip install pytz
import tzlocal
import itertools
import os
from bs4 import BeautifulSoup
import re
import xml.sax.saxutils as saxutils
def cleanhtml(raw_html):
#raw_html = '&<gt>'
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
#cleantext = re.sub(r'^https?:\/\/.*[\r\n]*', '', cleantext, flags=re.MULTILINE)
cleantext = re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', cleantext)
cleantext = re.sub(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]','',cleantext.strip())
cleantext = re.sub("[\n\t\r]",'',cleantext)
cleantext = re.sub(r'[^A-Za-z0-9]+', ' ', cleantext)
#cleantext = re.sub(r"\s", "", cleantext)
# cleantext = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+] |[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', cleantext)
return cleantext
def Find(string):
# findall() has been used
# with valid conditions for urls in string
url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+] |[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', string)
return url
init = Init()
#start_date = datetime.now()
#init_date = parser.parse('Apr 01 2017 12:00AM') # The hard start date
start_date = parser.parse('May 20 2017 01:00AM')
init_date = parser.parse('May 15 2017 12:00AM')
cut_off_date = parser.parse('Jan 01 2018 12:00AM') # The hard end date - All endo users first post should be between Jan 01 2018 and current date
cut_off_date_3months = parser.parse('Aug 01 2018 12:00AM') # Added this for negative users - to give a buffer of 3 months. For example, if a user posts on Nov 1st (
# today is Nov 12th, then since there are only 12 days from Nov 1st to 12th, we cant say that the user is a non endo users just because
# the user didnt get a chance to post to endo
def Process():
endo_users = pd.read_csv('endoUsers_2018.csv')
non_endo_subreddits = pd.read_csv('FinalReddits.csv').iloc[:,0]
endo_users = endo_users.set_index('user_name').T.to_dict('list')
pos_users = defaultdict(list)
neg_users = defaultdict(list)
outside_users = defaultdict(list)
d = os.getcwd()
if not (os.path.exists(os.path.join(d, 'pos3'))):
os.mkdir('pos3')
os.mkdir('neg3')
for sub_red in non_endo_subreddits:
pos_users, neg_users, outside_users = getEndoBatchUsers(endo_users, sub_red, start_date, init_date, pos_users, neg_users, outside_users)
print('len of pos users is :{0}', len(pos_users))
print('len of neg users is :{0}', len(neg_users))
print('len of neg users is :{0}', len(outside_users))
print('completed')
with open('usercount.csv','a') as f:
s = sub_red + ',' + str(len(pos_users)) + ',' + str(len(neg_users)) + ',' + str(len(outside_users))
f.write(s)
f.write('\n')
d_pos = os.path.join(d, 'pos3')
d_neg = os.path.join(d, 'neg3')
if not (os.path.exists(os.path.join(d_pos, sub_red))):
os.chdir(d_pos)
for i,user in pos_users.items():
try:
file_name = os.path.join(d_pos, i + '.txt')
with open(file_name, 'a', encoding='utf-8') as f:
count = len(user)
if count > 1:
with open('count_greaterthan1.csv', 'a', encoding='utf-8') as g:
g.write(i)
g.write('|')
g.write(str(count))
g.write('\n')
for i in range(count):
f.write(user[i][0])
f.write('|')
msg_created_time = datetime.fromtimestamp(user[i][2]).strftime('%c')
f.write(msg_created_time) #convert to reg date
f.write('|')
f.write(str(len(user[i][1])))
f.write('|')
f.write(user[i][1])
f.write('\n')
except Exception as ex:
print(ex)
if not (os.path.exists(os.path.join(d_neg, sub_red))):
os.chdir(d_neg)
for i,user in neg_users.items():
try:
file_name = os.path.join(d_neg, i + '.txt')
with open(file_name, 'a', encoding='utf-8') as f:
count = len(user)
if count > 1:
with open('count_greaterthan1.csv', 'a', encoding='utf-8') as g:
g.write(i)
g.write('|')
g.write(str(count))
g.write('\n')
for i in range(count):
f.write(user[i][0])
f.write('|')
msg_created_time = datetime.fromtimestamp(user[i][2]).strftime('%c')
f.write(msg_created_time) # convert to reg date
f.write('|')
f.write(str(len(user[i][1])))
f.write('|')
f.write(user[i][1])
f.write('\n')
except Exception as ex:
print(ex)
def getEndoBatchUsers(endo_users, sub_red, start_date, init_date,pos_users, neg_users, outside_users):
d = defaultdict(list)
start_date_epoch = time.mktime(start_date.timetuple())
end_date = start_date - relativedelta(hours=int(1))
end_date_epoch = time.mktime(end_date.timetuple())
dates = set()
comment_length = set()
while end_date > init_date:
try:
api_comment_url = 'https://api.pushshift.io/reddit/search/comment/?subreddit=' + sub_red + '&before='+ str(int(start_date_epoch)) + '&after=' + str(int(end_date_epoch)) + '&size=5000&sort=desc'
url = urllib.request.urlopen(api_comment_url)
user_data = json.loads(url.read().decode())
count = 0
for user_detail in user_data['data']:
try:
t = user_detail['created_utc']
msg_created_time = datetime.fromtimestamp(t).strftime('%c')
if 'author' in user_detail and 'body' in user_detail:
key = user_detail['author']
value = cleanhtml(saxutils.unescape(user_detail['body']))
count += 1
if len(value) > 0:
if key in list(endo_users):
endo_first_comment_time = endo_users[key]
three_months = parser.parse(endo_first_comment_time[0]) - relativedelta(months=int(3))
six_months = parser.parse(endo_first_comment_time[0]) - relativedelta(months=int(9))
if six_months < parser.parse(msg_created_time) < three_months:
if key in pos_users:
pos_users[key].append((sub_red,value,t, len(value)))
else:
pos_users[key] = [(sub_red,value,t, len(value))]
dates.add(t)
comment_length.add(len(value))
else:
if key in outside_users:
outside_users[key].append((sub_red, value,t, len(value)))
else:
outside_users[key] = [(sub_red,value,t, len(value))]
else:
if t in dates or len(value) in comment_length:
if parser.parse(msg_created_time) < cut_off_date_3months:
if key in neg_users:
neg_users[key].append((sub_red,value,t, len(value)))
else:
neg_users[key] = [(sub_red,value,t, len(value))]
except Exception as ex:
print(ex)
api_submission_url = 'https://api.pushshift.io/reddit/search/submission/?subreddit=' + sub_red + '&before=' + str(int(start_date_epoch)) + '&after=' + str(int(end_date_epoch)) + '&size=5000&sort=desc'
url = urllib.request.urlopen(api_submission_url)
user_data = json.loads(url.read().decode())
print('*****')
print('count of {0} batch is {1} between {2} and {3}'.format(sub_red, count, end_date, start_date))
for user_detail in user_data['data']:
try:
t = user_detail['created_utc']
if 'author' in user_detail and 'title' in user_detail:
key = user_detail['author']
value = cleanhtml(saxutils.unescape(user_detail['title']))
msg_created_time = datetime.fromtimestamp(t).strftime('%c')
if len(value) > 0:
if key in list(endo_users):
endo_first_comment_time = endo_users[key]
three_months = parser.parse(endo_first_comment_time[0]) - relativedelta(months=int(3))
six_months = parser.parse(endo_first_comment_time[0]) - relativedelta(months=int(9))
if six_months < parser.parse(msg_created_time) < three_months:
if key in pos_users:
pos_users[key].append((sub_red,value, t, len(value)))
else:
pos_users[key] = [(sub_red,value, t, len(value))]
dates.add(t)
comment_length.add(len(value))
else:
if key in outside_users:
outside_users[key].append((sub_red,value, t, len(value)))
else:
outside_users[key] = [(sub_red,value, t, len(value))]
else:
if t in dates or len(value) in comment_length:
if parser.parse(msg_created_time) < cut_off_date_3months:
if key in neg_users:
neg_users[key].append((sub_red,value, t, len(value)))
else:
neg_users[key] = [(sub_red,value, t, len(value))]
except Exception as ex:
print(ex)
start_date = end_date
start_date_epoch = end_date_epoch
end_date = start_date - relativedelta(hours=int(1))
end_date_epoch = time.mktime(end_date.timetuple())
print('pos users:{0}'.format(len(pos_users)))
print('neg users:{0}'.format(len(neg_users)))
print('outside users:{0}'.format(len(outside_users)))
except Exception as e:
print(e)
return pos_users, neg_users, outside_users
Process()