-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathjson2csv.py
More file actions
130 lines (114 loc) · 3.73 KB
/
json2csv.py
File metadata and controls
130 lines (114 loc) · 3.73 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import scraperwiki
import cStringIO
import codecs
import cgi
import os
import csv
import sys
import json
import collections
import copy
def getpaths(fragment,sofar=[]):
"""returns a list of lists; (1, a) means x[1]['a'] is a good target"""
if type(fragment) not in [dict, list, collections.OrderedDict]:
return
try:
keys=fragment.keys()
except AttributeError: # not a dic
keys=range(len(fragment))
for key in keys:
s=list(sofar)
s.extend([key])
if type(fragment[key]) in [dict, list, collections.OrderedDict]:
for i in getpaths(fragment[key], s):
yield i
else:
yield s
def pullout(item,commands):
"""recursively gets items in getpaths format"""
if commands:
try:
chunk = item[commands[0]]
except IndexError:
return ''
except KeyError:
return ''
newcomm = commands[1:]
return pullout(chunk, newcomm) or ''
else:
return item or ''
class UnicodeCSVWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([unicode(s).encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
def jsoncsv(data,nicenames,colnames,filename='unnamed',encoding=None):
scraperwiki.utils.httpresponseheader("Content-Type", "text/csv")
scraperwiki.utils.httpresponseheader("Content-Disposition", "attachment;filename=%s.csv" % filename)
writer = UnicodeCSVWriter(sys.stdout)
if encoding:
rawpages = (x.decode(encoding) for x in data)
else:
rawpages = (x for x in data)
pages=[json.loads(rawpage, object_pairs_hook=collections.OrderedDict) for rawpage in rawpages]
rawpages=None
#for i in getpaths(pages[0]):
# print i, pullout(pages[0], i)
# build list of potential columns
biglist=[]
for page in pages:
for path in getpaths(page):
if path not in biglist:
biglist.append(path)
# prune and sort
newbiglist=[]
for col in colnames:
for i in biglist:
if i[0]==col:
newbiglist.append(i)
# nice columnname
nicelist=copy.deepcopy(newbiglist)
for item in nicelist:
item[0]=nicenames[colnames.index(item[0])]
names=[]
for i in nicelist:
names.append(u'_'.join([unicode(j) for j in i]))
writer.writerow(names)
# output rows
for page in pages:
rowdata=[pullout(page, i) for i in newbiglist]
writer.writerow(rowdata)
if __name__=='scraper':
try:
qs = dict(cgi.parse_qsl(os.getenv("QUERY_STRING")))
except:
qs = {}
paramlist=['url','nicenames','colnames','encoding','filename','data']
jqs={}
for item in paramlist:
if item in jqs:
jqs[item]=json.loads(qs.get(item))
else:
jqs[item]=None
if jqs['url'] and not jqs['data']:
jqs['data']=json.loads(requests.get(jqs['url']))