diff --git a/README.md b/README.md index 39a8101..2ca2ef8 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,15 @@ # Description JMeter Control center. -Online web-application for CI Load testing with JMeter, future replacement for Jenkins + Plugins + Jmeter combination. Now it is possible to analyze JMeter test results and monitoring the running tests which were started in console mode (incl. distribution testing). Currently the part where is possible to run the tests is under development. - -Currently it`s designed for integration with Jenkins. In post-job script need to include script datagenerator_linux.py which will populate database with all necessary data. +Online web-application for CI Load testing with JMeter. Replacement for Jenkins + Plugins + Jmeter combination. +Now it is possible to run, analyze JMeter test results (incl. compare different results) and monitoring the running tests which were started in console mode (incl. distribution testing). +Could be used in integration with Jenkins CI. In post-job script need to include script datagenerator_linux.py which will populate database with all necessary data. Consist of several modules: 1. Analyzer - build reports, analyze results and compare results with another. 2. Online - online monitoring for running tests -3. Controller - central part for configuration and starting tests (currently ready ~ 60%) +3. Controller - central part for configuration and starting tests # [ANALYZER] Create dynamic report for tests diff --git a/analyzer/templates/overall_configure_page.html b/analyzer/templates/overall_configure_page.html index d424107..49c4d00 100644 --- a/analyzer/templates/overall_configure_page.html +++ b/analyzer/templates/overall_configure_page.html @@ -54,12 +54,10 @@ $("input[type=checkbox]").on("change", function() { show = this.checked; edit_test_id = this.value; - new_display_name = $('#display_name_' + edit_test_id).text(); $.ajax({ url: "/analyzer/test/" + edit_test_id + "/change/", type: "post", data: { - display_name: new_display_name, show: show, csrfmiddlewaretoken: '{{ csrf_token }}' }, diff --git a/analyzer/views.py b/analyzer/views.py index 3f41f6d..5d8ddf5 100644 --- a/analyzer/views.py +++ b/analyzer/views.py @@ -167,21 +167,17 @@ def test_servers(request, test_id): return JsonResponse(list(servers_list), safe=False) + def test_change(request, test_id): test = Test.objects.get(id=test_id) response = [] if request.method == 'POST': if 'show' in request.POST: show = request.POST.get('show', '') - display_name = request.POST.get('display_name', '') - print show - print display_name test.show = True if show == 'true' else False - test.display_name = display_name test.save() - else: + elif 'display_name' in request.POST: display_name = request.POST.get('display_name', '') - print display_name test.display_name = display_name test.save() response = [{"message": "Test data was changed"}] diff --git a/controller/datagenerator.py b/controller/datagenerator.py new file mode 100644 index 0000000..f2afda4 --- /dev/null +++ b/controller/datagenerator.py @@ -0,0 +1,193 @@ +from collections import defaultdict, OrderedDict +import json + +from pandas import DataFrame +from pylab import * +import pandas as pd +import matplotlib.font_manager +import sys +import re +import os +import zipfile +from os.path import basename +from django.conf import settings +from controller.models import TestRunning +from analyzer.models import Project, Test, Action, \ + TestActionData, TestAggregate, TestData +reload(sys) +sys.setdefaultencoding('utf-8') + +def percentile(n): + def percentile_(x): + return np.percentile(x, n) + percentile_.__name__ = 'percentile_%s' % n + return percentile_ + +def mask(df, f): + return df[f(df)] + +def ord_to_char(v, p=None): + return chr(int(v)) + +def get_dir_size(path): + total_size = 0 + for dirpath, dirnames, filenames in os.walk(path): + for f in filenames: + if not f=='checksum': + fp = os.path.join(dirpath, f) + total_size += os.path.getsize(fp) + return total_size + +def zip_results_file(file): + if os.path.exists(file+'.zip'): + os.remove(file+'.zip') + print "Move results file " + file + " to zip archive" + with zipfile.ZipFile(file + ".zip", "w", zipfile.ZIP_DEFLATED,allowZip64 = True) as zip_file: + zip_file.write(file, basename(file)) + os.remove(file) + print "File was packed, original file was deleted" + +dateconv = np.vectorize(datetime.datetime.fromtimestamp) + +def generate_data(t_id): + print "Parse and generate test data: " + str(t_id) + test_running = TestRunning.objects.get(id=t_id) + jmeter_results_file = test_running.result_file_dest + if os.path.exists(jmeter_results_file): + if not Test.objects.filter(path=test_running.workspace).exists(): + test = Test( + project_id=test_running.project_id, + path=test_running.workspace, + display_name=test_running.display_name, + start_time=test_running.start_time, + build_number=0, + show = True + ) + test.save() + else: + test = Test.objects.get(path=test_running.workspace) + project_id = test.project_id + test_id = test.id + df = pd.DataFrame() + if os.stat(jmeter_results_file).st_size > 1000007777: + print "Executing a parse for a huge file" + chunks = pd.read_table(jmeter_results_file,sep=',',index_col=0,chunksize=3000000); + for chunk in chunks: + chunk.columns = ['average', 'url','responseCode','success','threadName','failureMessage','grpThreads','allThreads'] + chunk=chunk[~chunk['URL'].str.contains('exclude_')] + df = df.append(chunk); + print "Parsing a huge file,size: " + str(df.size) + else: + df = pd.read_csv(jmeter_results_file,index_col=0,low_memory=False) + df.columns = ['average', 'url','responseCode','success','threadName','failureMessage','grpThreads','allThreads'] + df=df[~df['url'].str.contains('exclude_')] + + df.columns = ['average', 'url','responseCode','success','threadName','failureMessage','grpThreads','allThreads'] + + #convert timestamps to normal date/time + df.index=pd.to_datetime(dateconv((df.index.values/1000))) + num_lines = df['average'].count() + print "Number of lines in filrue: %d." % num_lines + unique_urls = df['url'].unique() + for url in unique_urls: + if not Action.objects.filter(url=url,project_id=project_id).exists(): + print "Adding new action: " + url + " project_id: " + str(project_id) + a = Action(url=url, + project_id=project_id) + a.save() + a = Action.objects.get(url=url, project_id=project_id) + action_id = a.id + if not TestActionData.objects.filter(action_id=action_id,test_id=test_id).exists(): + print "Adding action data: " + url + df_url = df[(df.url == url)] + url_data = pd.DataFrame() + df_url_gr_by_ts = df_url.groupby(pd.TimeGrouper(freq='1Min')) + url_data['avg'] = df_url_gr_by_ts.average.mean() + url_data['median'] = df_url_gr_by_ts.average.median() + url_data['count'] = df_url_gr_by_ts.success.count() + df_url_gr_by_ts_only_errors = df_url[(df_url.success == False)].groupby(pd.TimeGrouper(freq='1Min')) + url_data['errors'] = df_url_gr_by_ts_only_errors.success.count() + url_data['test_id'] = test_id + url_data['url'] = url + output_json = json.loads(url_data. + to_json(orient='index',date_format='iso'), + object_pairs_hook=OrderedDict) + for row in output_json: + data = {'timestamp': row, + 'avg': output_json[row]['avg'], + 'median': output_json[row]['median'], + 'count': output_json[row]['count'], + 'url': output_json[row]['url'], + 'errors': output_json[row]['errors'], + 'test_id': output_json[row]['test_id'], + } + test_action_data = TestActionData( + test_id=output_json[row]['test_id'], + action_id=action_id, + data=data + ) + test_action_data.save() + try: + by_url = df.groupby('url') + agg = by_url.aggregate({'average':np.mean}).round(1) + agg['median'] = by_url.average.median().round(1) + agg['percentile_75'] = by_url.average.quantile(.75).round(1) + agg['percentile_90'] = by_url.average.quantile(.90).round(1) + agg['percentile_99'] = by_url.average.quantile(.99).round(1) + agg['maximum'] = by_url.average.max().round(1) + agg['minimum'] = by_url.average.min().round(1) + agg['cnt'] = by_url.success.count().round(1) + agg['errors'] = ((1-df[(df.success == True)].groupby('url')['success'].count()/by_url['success'].count())*100).round(1) + agg['weight'] = by_url.average.sum() + agg['test_id'] = test_id + action_df = DataFrame(list(Action.objects.values('id','url').filter(project_id=project_id))) + action_df.columns = ['action_id', 'url'] + action_df = action_df.set_index('url') + agg.index.names = ['url'] + agg = pd.merge(action_df, agg + ,left_index=True, right_index=True) + #agg = agg.set_index('action_id') + print agg.columns + for index, row in agg.iterrows(): + print "add row:" + str(row) + aggr = TestAggregate( + test_id = row['test_id'], + action_id =int(row['action_id']), + average = row['average'], + median = row['median'], + percentile_75 = row['percentile_75'], + percentile_90 = row['percentile_90'], + percentile_99 = row['percentile_99'], + maximum = row['maximum'], + minimum = row['minimum'], + count=int(row['cnt']), + errors=row['errors'], + weight=row['weight'] + ) + aggr.save() + zip_results_file(jmeter_results_file) + except ValueError,e: + print "error",e + + test_overall_data = pd.DataFrame() + df_gr_by_ts = df.groupby(pd.TimeGrouper(freq='1Min')) + test_overall_data['avg'] = df_gr_by_ts.average.mean() + test_overall_data['median'] = df_gr_by_ts.average.median() + test_overall_data['count'] = df_gr_by_ts.average.count() + test_overall_data['test_id'] = test_id + output_json = json.loads(test_overall_data. + to_json(orient='index',date_format='iso'), + object_pairs_hook=OrderedDict) + for row in output_json: + data = {'timestamp': row, 'avg': output_json[row]['avg'], + 'median': output_json[row]['median'], + 'count': output_json[row]['count']} + test_data = TestData( + test_id=output_json[row]['test_id'], + data=data + ) + test_data.save() + else: + print "Result file does not exist" + + return True \ No newline at end of file diff --git a/controller/datagenerator_linux.py b/controller/datagenerator_linux.py deleted file mode 100644 index 5b4d366..0000000 --- a/controller/datagenerator_linux.py +++ /dev/null @@ -1,420 +0,0 @@ -from collections import defaultdict, OrderedDict -import json -from pylab import * -import numpy as na -import pandas as pd -import matplotlib.font_manager -import csv -import sys -import re -import os -import zipfile -import sqlalchemy -from xml.etree.ElementTree import ElementTree -from os.path import basename -from sqlalchemy import create_engine, Table, Column, Index, Integer, String, ForeignKey -from sqlalchemy.sql.expression import func -from sqlalchemy.sql import select, delete -from sqlalchemy.orm import sessionmaker -from sqlalchemy.engine import reflection -from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, TEXT, TIMESTAMP,BIGINT - -db_engine = create_engine('postgresql://postgres:postgres@localhost:5432/postgres') -db_connection = db_engine.connect() -meta = sqlalchemy.MetaData(bind=db_connection, reflect=True, schema="jltom") -insp = reflection.Inspector.from_engine(db_engine) -schema = 'jltom' - -project = meta.tables['jltom.project'] -test = meta.tables['jltom.test'] -test_data = meta.tables['jltom.test_data'] -action = meta.tables['jltom.action'] -test_action_data = meta.tables['jltom.test_action_data'] -server = meta.tables['jltom.server'] -aggregate = meta.tables['jltom.aggregate'] -server_monitoring_data = meta.tables['jltom.server_monitoring_data'] -test_aggregate = meta.tables['jltom.test_aggregate'] - - - - - -Session = sessionmaker(bind=db_engine) - -db_session = Session() -#stm = server_monitoring_data.delete() -#result = db_session.execute(stm) -#stm = test_aggregate.delete() -#result = db_session.execute(stm) -#stm = test_action_data.delete() -#result = db_session.execute(stm) -#stm = test_data.delete() -#result = db_session.execute(stm) -#stm = aggregate.delete() -#result = db_session.execute(stm) -#stm = test.delete() -#result = db_session.execute(stm) -#stm = action.delete() -#result = db_session.execute(stm) -#stm = server.delete() -#result = db_session.execute(stm) -#stm = project.delete() -#result = db_session.execute(stm) - -db_session.commit() - -reload(sys) -sys.setdefaultencoding('utf-8') -matplotlib.style.use('bmh') - - - -def percentile(n): - def percentile_(x): - return np.percentile(x, n) - percentile_.__name__ = 'percentile_%s' % n - return percentile_ - -def mask(df, f): - return df[f(df)] - -def getIndex(item): - return int(re.search('(\d+)/', item[0]).group(1)) - -def ord_to_char(v, p=None): - return chr(int(v)) - -def get_dir_size(path): - total_size = 0 - for dirpath, dirnames, filenames in os.walk(path): - for f in filenames: - if not f=='checksum': - fp = os.path.join(dirpath, f) - total_size += os.path.getsize(fp) - return total_size - -def zip_results_file(file): - if os.path.exists(file+'.zip'): - os.remove(file+'.zip') - print "Move results file " + file + " to zip archive" - with zipfile.ZipFile(file + ".zip", "w", zipfile.ZIP_DEFLATED,allowZip64 = True) as zip_file: - zip_file.write(file, basename(file)) - os.remove(file) - print "File was packed, original file was deleted" - -jtl_files = [] - -builds_dir="/var/lib/jenkins/jobs" - -jtl_files = [] -releases = [] - - -build_xml = ElementTree() -for root, dirs, files in os.walk(builds_dir): - for file in files: - if "jmeter.jtl" in file: - if os.stat(os.path.join(root, file)).st_size>0: - build_parameters = [] - displayName = "unknown" - startTime = 0 - monitoring_data = os.path.join(root.replace('JMeterCSV','').replace('performance-reports',''), "monitoring.data") - build_xml_path = os.path.join(root.replace('JMeterCSV','').replace('performance-reports',''), "build.xml") - - if os.path.isfile(build_xml_path): - build_xml.parse(build_xml_path) - build_tag = build_xml.getroot() - - for params in build_tag: - if params.tag == 'actions': - parameters = params.find('.//parameters') - for parameter in parameters: - name = parameter.find('name') - value = parameter.find('value') - build_parameters.append([name.text,value.text]) - elif params.tag == 'startTime': - startTime = int(params.text) - elif params.tag == 'displayName': - displayName = params.text - - if "Performance_HTML_Report" not in os.path.join(root, file): - jtl_files.append([os.path.join(root, file),monitoring_data,displayName, build_parameters,root]) - project_name = re.search('/([^/]+)/builds', root).group(1) - if db_session.query(project.c.id). \ - filter(project.c.project_name == project_name).count() == 0: - print "Adding new project: " + project_name; - stm = project.insert().values(project_name=project_name) - result = db_connection.execute(stm) - - project_id = db_session.query(project.c.id). \ - filter(project.c.project_name == project_name).scalar() - print "Project_id: " + str(project_id) - if db_session.query(test.c.path).filter(test.c.path==root).count()==0: - build_number = int(re.search('/builds/(\d+)', root).group(1)) - stm = test.insert().values(path=root, - display_name=displayName, - project_id=project_id, - start_time=startTime, - build_number=build_number) - result = db_connection.execute(stm) -jtl_files = sorted(jtl_files, key=getIndex,reverse=True) - - -releases.sort(); - - -dateconv = np.vectorize(datetime.datetime.fromtimestamp) - - -aggregate_table='aggregate_table' -monitor_table='monitor_table' - - -agg = {} -mon = {} - -rtot_over_releases = []; -cpu_over_releases = []; - - -file_index = 0 -print "Trying to open CSV-files" - -build_roots = [jtl_files[i][4] for i in xrange(0,len(jtl_files))] - -#dataframe to compare with: - -for build_root in build_roots: - - print "Current build directory:" + build_root - test_id = db_session.query(test.c.id).filter(test.c.path==build_root).scalar() - project_id = db_session.query(test.c.project_id).filter(test.c.id == test_id).scalar() - - - checksum = -1 - PARSED_DATA_ROOT = build_root + "/parsed_data/" - if db_session.query(aggregate.c.test_id).filter(aggregate.c.test_id==test_id).count()==0: - - df = pd.DataFrame() - jmeter_results_file = build_root + "/jmeter.jtl" - if not os.path.exists(jmeter_results_file): - print "Results file does not exists, try to check archive" - jmeter_results_zip = jmeter_results_file + ".zip" - if os.path.exists(jmeter_results_zip): - print "Archive file was found " + jmeter_results_zip - with zipfile.ZipFile(jmeter_results_zip, "r") as z: - z.extractall(build_root) - print "Executing a new parse: " + jmeter_results_file + " size: "+ str(os.stat(jmeter_results_file).st_size) - if os.stat(jmeter_results_file).st_size > 1000007777: - print "Executing a parse for a huge file" - chunks = pd.read_table(jmeter_results_file,sep=',',index_col=0,chunksize=3000000); - for chunk in chunks: - chunk.columns = ['average', 'url','responseCode','success','threadName','failureMessage','grpThreads','allThreads'] - chunk=chunk[~chunk['URL'].str.contains('exclude_')] - df = df.append(chunk); - print "Parsing a huge file,size: " + str(df.size) - else: - df = pd.read_csv(jmeter_results_file,index_col=0,low_memory=False) - df.columns = ['average', 'url','responseCode','success','threadName','failureMessage','grpThreads','allThreads'] - df=df[~df['url'].str.contains('exclude_')] - - df.columns = ['average', 'url','responseCode','success','threadName','failureMessage','grpThreads','allThreads'] - #convert timestamps to normal date/time - df.index=pd.to_datetime(dateconv((df.index.values/1000))) - num_lines = df['average'].count() - print "Number of lines in file 1: %d." % num_lines - - unique_urls = df['url'].unique() - for url in unique_urls: - if db_session.query(action.c.id).filter(action.c.url == url).\ - filter(action.c.project_id == project_id).count() == 0: - print "Adding new action: " + url - stm = action.insert().values(url=url, - project_id=project_id, - ) - result = db_connection.execute(stm) - - action_id = db_session.query(action.c.id).filter(action.c.url == url). \ - filter(action.c.project_id == project_id).scalar() - print "Adding action data: " + url - df_url = df[(df.url == url)] - url_data = pd.DataFrame() - df_url_gr_by_ts = df_url.groupby(pd.TimeGrouper(freq='1Min')) - url_data['avg'] = df_url_gr_by_ts.average.mean() - url_data['median'] = df_url_gr_by_ts.average.median() - url_data['count'] = df_url_gr_by_ts.success.count() - df_url_gr_by_ts_only_errors = df_url[(df_url.success == False)].groupby(pd.TimeGrouper(freq='1Min')) - url_data['errors'] = df_url_gr_by_ts_only_errors.success.count() - url_data['test_id'] = test_id - url_data['url'] = url - output_json = json.loads(url_data. - to_json(orient='index',date_format='iso'), - object_pairs_hook=OrderedDict) - for row in output_json: - data = {'timestamp': row, - 'avg': output_json[row]['avg'], - 'median': output_json[row]['median'], - 'count': output_json[row]['count'], - 'url': output_json[row]['url'], - 'errors': output_json[row]['errors'], - 'test_id': output_json[row]['test_id'], - } - stm = test_action_data.insert().values(test_id=output_json[row]['test_id'], - action_id=action_id, - data=data - ) - result = db_connection.execute(stm) - - try: - by_url = df.groupby('url') - agg[file_index] = by_url.aggregate({'average':np.mean}).round(1) - agg[file_index]['median'] = by_url.average.median().round(1) - agg[file_index]['percentile_75'] = by_url.average.quantile(.75).round(1) - agg[file_index]['percentile_90'] = by_url.average.quantile(.90).round(1) - agg[file_index]['percentile_99'] = by_url.average.quantile(.99).round(1) - agg[file_index]['maximum'] = by_url.average.max().round(1) - agg[file_index]['minimum'] = by_url.average.min().round(1) - agg[file_index]['count'] = by_url.success.count().round(1) - agg[file_index]['errors'] = ((1-df[(df.success == True)].groupby('url')['success'].count()/by_url['success'].count())*100).round(1) - agg[file_index]['weight'] = by_url.average.sum() - agg[file_index]['test_id'] = test_id - action_df = pd.read_sql(db_session.query(action.c.id,action.c.url). - filter(action.c.project_id==project_id).statement, - con = db_session.bind) - action_df.columns = ['action_id', 'url'] - action_df = action_df.set_index('url') - agg[file_index].index.names = ['url'] - agg[file_index] = pd.merge(action_df, agg[file_index] - ,left_index=True, right_index=True) - agg[file_index] = agg[file_index].set_index('action_id') - print agg[file_index].columns - agg[file_index].to_sql("aggregate", schema='jltom', con=db_engine, if_exists='append') - zip_results_file(jmeter_results_file) - except ValueError,e: - print "error",e - - - #print df.groupby(pd.TimeGrouper(freq='1Min')).average.agg(lambda x: x.to_json(orient='records')) - test_overall_data = pd.DataFrame() - df_gr_by_ts = df.groupby(pd.TimeGrouper(freq='1Min')) - test_overall_data['avg'] = df_gr_by_ts.average.mean() - test_overall_data['median'] = df_gr_by_ts.average.median() - test_overall_data['count'] = df_gr_by_ts.average.count() - test_overall_data['test_id'] = test_id - output_json = json.loads(test_overall_data. - to_json(orient='index',date_format='iso'), - object_pairs_hook=OrderedDict) - for row in output_json: - data = {'timestamp': row, 'avg': output_json[row]['avg'], - 'median': output_json[row]['median'], - 'count': output_json[row]['count']} - stm = test_data.insert().values(test_id=output_json[row]['test_id'], - data=data - ) - result = db_connection.execute(stm) - - file_index += 1 - - -num = 0 -GRAPHS = "" -for build_root in build_roots: - uniqueURL = [] - PARSED_DATA_ROOT = build_root + "/parsed_data/" - - rownum = 0 - - if os.path.isfile(jtl_files[num][1]) and os.stat(jtl_files[num][1]).st_size != 0: - test_id = db_session.query(test.c.id).filter(test.c.path==build_root).scalar() - f = open(jtl_files[num][1],"r") - lines = f.readlines() - f.close() - f = open(jtl_files[num][1],"w") - for line in lines: - if not ('start' in line): - f.write(line) - - f.close() - monitoring_df = pd.read_csv(jtl_files[num][1],index_col=1, sep=";") - - monitoring_df.columns = ['server_name', - 'Memory_used', - 'Memory_free', - 'Memory_buff', - 'Memory_cached', - 'Net_recv', - 'Net_send', - 'Disk_read', - 'Disk_write', - 'System_la1', - 'CPU_user', - 'CPU_system', - 'CPU_iowait'] - monitoring_df.index=pd.to_datetime(dateconv((monitoring_df.index.values))) - monitoring_df.index.names = ['timestamp'] - - unique_servers = monitoring_df['server_name'].unique() - for server_ in unique_servers: - if db_session.query(server.c.id).\ - filter(server.c.server_name == server_).count() == 0: - print "Adding new server: " + server_ - stm = server.insert().values(server_name=server_ - ) - result = db_connection.execute(stm) - - server_id = db_session.query(server.c.id).\ - filter(server.c.server_name == server_).scalar() - - if db_session.query(server_monitoring_data.c.test_id).\ - filter(server_monitoring_data.c.test_id==test_id).\ - filter(server_monitoring_data.c.server_id==server_id).count()==0: - df_server = monitoring_df[(monitoring_df.server_name == server_)] - output_json = json.loads(df_server. - to_json(orient='index',date_format='iso'), - object_pairs_hook=OrderedDict) - for row in output_json: - data = {'timestamp': row, - 'Memory_used': output_json[row]['Memory_used'], - 'Memory_free': output_json[row]['Memory_free'], - 'Memory_buff': output_json[row]['Memory_buff'], - 'Memory_cached': output_json[row]['Memory_cached'], - 'Net_recv': output_json[row]['Net_recv'], - 'Net_send': output_json[row]['Net_send'], - 'Disk_read': output_json[row]['Disk_read'], - 'Disk_write': output_json[row]['Disk_write'], - 'System_la1': output_json[row]['System_la1'], - 'CPU_user': output_json[row]['CPU_user'], - 'CPU_system': output_json[row]['CPU_system'], - 'CPU_iowait': output_json[row]['CPU_iowait'] - } - stm = server_monitoring_data.insert().values(test_id=test_id, server_id=server_id, data=data) - result = db_connection.execute(stm) - - else: - print "Monitoring data is not exist" - num+=1 - -stmt = select([ - test.c.id, test.c.path -]) -query_result = db_engine.execute(stmt) - -print "Cleanup obsolete test results" -for q in query_result: - test_id = q.id - test_path = q.path - print "Check: " + test_path - if not os.path.exists(q.path): - print "Deleting test_id:" + str(test_id) + " path:" + test_path - stm1 = aggregate.delete().where(aggregate.c.test_id == test_id) - stm2 = server_monitoring_data.delete().where(server_monitoring_data.c.test_id == test_id) - stm3 = test_action_data.delete().where(test_action_data.c.test_id == test_id) - stm4 = test_data.delete().where(test_data.c.test_id == test_id) - stm5 = test.delete().where(test.c.id == test_id) - - result1 = db_connection.execute(stm1) - result2 = db_connection.execute(stm2) - result3 = db_connection.execute(stm3) - result4 = db_connection.execute(stm4) - result5 = db_connection.execute(stm5) - diff --git a/controller/models.py b/controller/models.py index 5bd13c2..140bf47 100644 --- a/controller/models.py +++ b/controller/models.py @@ -18,7 +18,7 @@ class Meta: class TestRunning(models.Model): project = models.ForeignKey(Project, on_delete=models.CASCADE) - result_file_path = models.CharField(max_length=200, default = "") + result_file_dest = models.CharField(max_length=200, default = "") log_file_dest = models.CharField(max_length=200, default = "") display_name = models.CharField(max_length=100, default = "") start_time = models.BigIntegerField() diff --git a/controller/proxy_linux.py b/controller/proxy_linux.py deleted file mode 100644 index de12404..0000000 --- a/controller/proxy_linux.py +++ /dev/null @@ -1,235 +0,0 @@ -import socket -import SocketServer -import threading - -import select -import sqlalchemy -import time -import errno -from time import sleep - -import sys -from socket import _fileobject, timeout - -from select import poll, POLLIN, POLLOUT, POLLERR -from select import epoll, EPOLLIN, EPOLLOUT, EPOLLERR - -from sqlalchemy import create_engine, Table, Column, Index, Integer, String, ForeignKey -from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, TEXT, TIMESTAMP,BIGINT -from sqlalchemy import create_engine -from sqlalchemy.orm import sessionmaker -from socket import error as SocketError -from sqlalchemy.engine import reflection - - -PORT_ = sys.argv[1] -DESTINATION_ = sys.argv[2] -PROXY_ID_ =sys.argv[3] - -print "PORT_:" + PORT_ -print "DESTINATION_:" + DESTINATION_ -print "PROXY_ID_:" + PROXY_ID_ - -db_engine = create_engine('postgresql://postgres:postgres@localhost:5432/postgres') -db_connection = db_engine.connect() -meta = sqlalchemy.MetaData(bind=db_connection, reflect=True, schema="jltom") -insp = reflection.Inspector.from_engine(db_engine) -Session = sessionmaker(bind=db_engine) -db_session = Session() - - -def _poll(reads, writes, exceptions, timeout): - pollfunc = epoll - read_flag = EPOLLIN - write_flag = EPOLLOUT - exc_flag = EPOLLERR - p = pollfunc() - for fd in reads: - p.register(fd, read_flag) - for fd in writes: - p.register(fd, write_flag) - for fd in exceptions: - p.register(fd, exc_flag) - return p.poll(timeout) - - -def _select(readlist, writelist, exceptionallist, timeout): - return _poll(readlist, writelist, exceptionallist, timeout) - - -def wait_to_read_data(socket, timeout=0.0): - sockets = _select([socket], [], [], timeout) - if isinstance(sockets, tuple): - return sockets[0] - return sockets - - -def wait_to_write_data(socket, timeout=0.0): - sockets = _select([], [socket], [], timeout) - if isinstance(sockets, tuple): - return sockets[1] - return sockets - -if not db_engine.dialect.has_table(db_engine.connect(), "delay_table"): - delay_table = Table('delay_table', meta, - Column('value', DOUBLE_PRECISION), - ) - meta.create_all(db_connection) - -proxy = meta.tables['jltom.proxy'] - - -def get_delay(proxy_id): - statement = sqlalchemy.sql.select([ - proxy.c.delay - ]).where(proxy.c.id == proxy_id) - x = execute_statement(statement, False)[0][0] - return float(x) - - -def execute_statement(statement, with_header): - #log.debug("Executing SQL-query: " + str(statement)) - q = db_engine.execute(statement) - output = [] - fieldnames = [] - - for fieldname in q.keys(): - fieldnames.append(fieldname) - - if with_header: - output.append(fieldnames) - - for row in q.fetchall(): - values = [] - for fieldname in fieldnames: - values.append(row[fieldname]) - - output.append(values) - return output - - -current_incomings = [] -current_forwarders = [] -BUFFER_SIZE = 4096 - - -class Forwarder(threading.Thread): - def __init__(self, source): - threading.Thread.__init__(self) - self._stop = threading.Event() - self.source = source - self.destination = \ - socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.destination.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - self.destination.connect((DESTINATION_, 443)) - self.connection_string = str(self.destination.getpeername()) - print "[+] New forwarder: " + self.connection_string - - def run(self): - try: - while 1: - r = wait_to_read_data(self.destination, 1.0) - if not r: - print "[<] Timeout to wait the data from destination" - else: - data = self.destination.recv(BUFFER_SIZE) - if len(data) == BUFFER_SIZE: - print "[<] Trying to get data from destination" - while 1: - try: - data += self.destination.recv(BUFFER_SIZE, socket.MSG_DONTWAIT) - except: - break - if data == "": - self.close_connection() - break; - print "[<] Received from destination: " + str(len(data)) - self.source.write_to_source(data) - except SocketError as e: - if e.errno != errno.ECONNRESET: - raise - pass - print "[-] Closed destination" - - - def write_to_dest(self, data): - print "[>] Sending to destination: " + str(len(data)) - wlist = wait_to_write_data(self.destination, 1.0) - if not wlist: - raise timeout() - self.destination.send(data) - - def close_connection(self): - try: - self.source.request.shutdown(socket.SHUT_RDWR) - except socket.error: - pass - #self.source.request.close() - - -class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): - def handle(self): - delay = get_delay(PROXY_ID_) - print "[**] Delay: " + str(delay) - time.sleep(delay) - self.connection_string = str(self.request.getpeername()) - self.request.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - print "[+] Incoming connection:" + str(self.connection_string) - f = Forwarder(self) - f.start() - try: - while 1: - r = wait_to_read_data(self.request, 1.0) - if not r: - print "[>] Timeout to wait the data from incoming connection" - else: - print "[>] Trying to get data from incoming connection" - data = self.request.recv(BUFFER_SIZE) - if (len(data)==BUFFER_SIZE): - while 1: - try: #error means no more data - data += self.request.recv(BUFFER_SIZE, socket.MSG_DONTWAIT) - except: - break - f.write_to_dest(data) - if data == "": - #f.close_connection() - break; - print "[>] Data from incoming connection: " + str(len(data)) - - except SocketError as e: - if e.errno != errno.ECONNRESET: - raise - pass - print "[-] Close incoming connection" - - def write_to_source(self, data): - print "[<] Sending to incoming connect: " + str(len(data)) - wlist = wait_to_write_data(self.request, 1.0) - if not wlist: - raise timeout() - self.request.send(data) - - - -class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer): - pass - -if __name__ == "__main__": - HOST, PORT = "", PORT_ - server = ThreadedTCPServer((HOST, int(PORT)), ThreadedTCPRequestHandler) - ip, port = server.server_address - server_thread = threading.Thread(target=server.serve_forever) - server_thread.daemon = True - server_thread.start() - print "[*] LINUX Starting proxy on port: ", port - try: - while True: - sleep(1) - except: - pass - print "[*] Stopping proxy..." - server.shutdown() - - - diff --git a/controller/templates/configure_test_page.html b/controller/templates/configure_test_page.html index e7d5f56..b681fdb 100644 --- a/controller/templates/configure_test_page.html +++ b/controller/templates/configure_test_page.html @@ -18,6 +18,10 @@