Version 0.2.0
This commit is contained in:
parent
312b985229
commit
26376eaaec
43 changed files with 4699 additions and 2668 deletions
|
@ -19,9 +19,6 @@ update_queries = [
|
|||
ALTER TABLE requests ADD COLUMN is_ssl INTEGER;
|
||||
""",
|
||||
|
||||
"""
|
||||
UPDATE schema_meta SET version=2;
|
||||
""",
|
||||
]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
@ -29,9 +26,30 @@ def update(dbpool):
|
|||
for query in update_queries:
|
||||
yield dbpool.runQuery(query)
|
||||
|
||||
# Load each request and save them again for any request that specified a port
|
||||
# or protocol in the host header.
|
||||
http.init(dbpool)
|
||||
reqs = yield http.Request.load_from_filters([])
|
||||
for req in reqs:
|
||||
yield req.deep_save()
|
||||
# Update metadata for each request
|
||||
reqrows = yield dbpool.runQuery(
|
||||
"""
|
||||
SELECT id, full_request
|
||||
FROM requests;
|
||||
""",
|
||||
)
|
||||
|
||||
# Create an object and get its port/is_ssl
|
||||
for reqrow in reqrows:
|
||||
reqid = reqrow[0]
|
||||
fullreq = reqrow[1]
|
||||
r = http.Request(fullreq)
|
||||
port = r.port
|
||||
is_ssl = r.is_ssl
|
||||
yield dbpool.runQuery(
|
||||
"""
|
||||
UPDATE requests SET port=?,is_ssl=? WHERE id=?;
|
||||
""",
|
||||
(port, is_ssl, reqid)
|
||||
)
|
||||
|
||||
yield dbpool.runQuery(
|
||||
"""
|
||||
UPDATE schema_meta SET version=2;
|
||||
"""
|
||||
)
|
||||
|
|
50
pappyproxy/schema/schema_4.py
Normal file
50
pappyproxy/schema/schema_4.py
Normal file
|
@ -0,0 +1,50 @@
|
|||
from pappyproxy import http
|
||||
from twisted.internet import defer
|
||||
|
||||
"""
|
||||
Schema v4
|
||||
|
||||
Description:
|
||||
Adds additional metadata to the database for requests. Mainly it stores the host
|
||||
that a request was sent to so that pappy doesn't have to guess from the host
|
||||
header.
|
||||
"""
|
||||
|
||||
update_queries = [
|
||||
"""
|
||||
ALTER TABLE requests ADD COLUMN host TEXT;
|
||||
""",
|
||||
]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update(dbpool):
|
||||
for query in update_queries:
|
||||
yield dbpool.runQuery(query)
|
||||
|
||||
# Update metadata for each request
|
||||
reqrows = yield dbpool.runQuery(
|
||||
"""
|
||||
SELECT id, full_request
|
||||
FROM requests;
|
||||
""",
|
||||
)
|
||||
|
||||
# Create an object that will parse the host from the request
|
||||
for reqrow in reqrows:
|
||||
reqid = reqrow[0]
|
||||
fullreq = reqrow[1]
|
||||
r = http.Request(fullreq)
|
||||
host = r.host
|
||||
if r.host:
|
||||
yield dbpool.runQuery(
|
||||
"""
|
||||
UPDATE requests SET host=? WHERE id=?;
|
||||
""",
|
||||
(host, reqid)
|
||||
)
|
||||
|
||||
yield dbpool.runQuery(
|
||||
"""
|
||||
UPDATE schema_meta SET version=4;
|
||||
"""
|
||||
)
|
29
pappyproxy/schema/schema_5.py
Normal file
29
pappyproxy/schema/schema_5.py
Normal file
|
@ -0,0 +1,29 @@
|
|||
from pappyproxy import http
|
||||
from twisted.internet import defer
|
||||
|
||||
"""
|
||||
Schema v5
|
||||
|
||||
Description:
|
||||
Adds a column to the requests table which will store a dict that plugins can
|
||||
use to store metadata about requests.
|
||||
"""
|
||||
|
||||
update_queries = [
|
||||
"""
|
||||
ALTER TABLE requests ADD COLUMN plugin_data TEXT;
|
||||
""",
|
||||
|
||||
"""
|
||||
UPDATE requests SET plugin_data="{}";
|
||||
""",
|
||||
|
||||
"""
|
||||
UPDATE schema_meta SET version=5;
|
||||
"""
|
||||
]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update(dbpool):
|
||||
for query in update_queries:
|
||||
yield dbpool.runQuery(query)
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
import glob
|
||||
import imp
|
||||
import random
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.enterprise import adbapi
|
||||
|
@ -29,9 +30,28 @@ def add_schema_files(schemas):
|
|||
module_name = os.path.basename(os.path.splitext(mod)[0])
|
||||
newmod = imp.load_source('%s'%module_name, mod)
|
||||
schemas.append( (module_name, newmod) )
|
||||
|
||||
def copy_file(a, b):
|
||||
a_bytes = a.read(1024)
|
||||
while a_bytes:
|
||||
b.write(a_bytes)
|
||||
a_bytes = a.read(1024)
|
||||
|
||||
def create_backup(filename):
|
||||
backup = filename + '.bak'
|
||||
while os.path.isfile(backup):
|
||||
backup = '%s.%d' % (backup, random.randint(0, 9999999999))
|
||||
# Make sure backup file has secure permissions
|
||||
with os.fdopen(os.open(backup, os.O_CREAT, 0o0600), 'r') as f:
|
||||
pass
|
||||
# Copy the datafile
|
||||
with open(filename, 'r') as a:
|
||||
with open(backup, 'w') as b:
|
||||
copy_file(a, b)
|
||||
return backup
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_schema(dbpool):
|
||||
def update_schema(dbpool, filename):
|
||||
# Update the database schema to the latest version
|
||||
schema_version = yield get_schema_version(dbpool)
|
||||
if schema_version == 0:
|
||||
|
@ -41,11 +61,32 @@ def update_schema(dbpool):
|
|||
schemas = []
|
||||
add_schema_files(schemas)
|
||||
schemas = sorted(schemas, key=lambda tup: tup[0])
|
||||
for i in range(schema_version, len(schemas)):
|
||||
# schemas[0] is v1, schemas[1] is v2, etc
|
||||
to_run = range(schema_version, len(schemas))
|
||||
if len(to_run) > 0:
|
||||
# Back up data file
|
||||
if verbose_update:
|
||||
print "Updating datafaile schema to version %d" % (i+1)
|
||||
yield schemas[i][1].update(dbpool)
|
||||
print 'Backing up data file'
|
||||
backup = create_backup(filename)
|
||||
if verbose_update:
|
||||
print 'Backed up to %s' % backup
|
||||
try:
|
||||
for i in to_run:
|
||||
# schemas[0] is v1, schemas[1] is v2, etc
|
||||
if verbose_update:
|
||||
print "Updating datafaile schema to version %d" % (i+1)
|
||||
yield schemas[i][1].update(dbpool)
|
||||
# Delete backup
|
||||
os.remove(backup)
|
||||
if verbose_update:
|
||||
print 'Update successful! Deleted backup'
|
||||
except Exception as e:
|
||||
# restore the backup
|
||||
print 'Update failed, restoring backup'
|
||||
with open(filename, 'w') as a:
|
||||
with open(backup, 'r') as b:
|
||||
copy_file(b, a)
|
||||
os.remove(backup)
|
||||
raise e
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def main():
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue