2023-04-05 16:28:39 -05:00
|
|
|
from flask import Flask
|
|
|
|
from flask_restful import Resource, Api, reqparse
|
2023-04-05 20:36:19 -05:00
|
|
|
from datetime import date, timedelta
|
|
|
|
import configparser
|
2023-04-06 14:34:59 -05:00
|
|
|
# TODO Can we do it without pandas and numpy? They are so big.
|
|
|
|
#import pandas as pd
|
|
|
|
#import numpy as np
|
|
|
|
import csv
|
2023-04-05 16:28:39 -05:00
|
|
|
import ast
|
2023-04-05 18:21:37 -05:00
|
|
|
import glob
|
2023-04-05 22:43:35 -05:00
|
|
|
import json
|
2023-04-05 16:28:39 -05:00
|
|
|
app = Flask(__name__)
|
2023-04-05 16:58:35 -05:00
|
|
|
api = Api(app)
|
|
|
|
|
2023-04-05 20:36:19 -05:00
|
|
|
def read_config():
|
|
|
|
config = configparser.ConfigParser()
|
|
|
|
config.read('config.ini')
|
|
|
|
return config
|
|
|
|
|
|
|
|
def read_logs(log_folder):
|
|
|
|
# Read some log files
|
|
|
|
# Zulu time, so let's look at tomorrow, today, and yesterday.
|
|
|
|
# TODO Load new files into a database on a schedule?
|
|
|
|
today = date.today()
|
|
|
|
yesterday = today - timedelta(days = 1)
|
|
|
|
tomorrow = today + timedelta(days = 1)
|
|
|
|
file_list = glob.glob(log_folder+str(yesterday)+"*") + \
|
|
|
|
glob.glob(log_folder+str(today)+"*") + \
|
|
|
|
glob.glob(log_folder+str(tomorrow)+"*")
|
2023-04-06 14:34:59 -05:00
|
|
|
# list_stacked = pd.DataFrame()
|
|
|
|
# for file in file_list:
|
|
|
|
# file1 = pd.read_csv(file)
|
|
|
|
# list_stacked = pd.concat([list_stacked, file1])
|
|
|
|
|
|
|
|
# https://stackoverflow.com/a/66071962
|
|
|
|
json_array = []
|
2023-04-05 20:36:19 -05:00
|
|
|
for file in file_list:
|
2023-04-06 14:34:59 -05:00
|
|
|
with open(file, encoding='utf-8') as csvf:
|
|
|
|
csvReader = csv.DictReader(csvf)
|
|
|
|
for row in csvReader:
|
|
|
|
#add this python dict to json array
|
|
|
|
json_array.append(row)
|
2023-04-05 22:43:35 -05:00
|
|
|
|
2023-04-05 20:36:19 -05:00
|
|
|
# TODO Can we do this without numpy?
|
2023-04-06 14:34:59 -05:00
|
|
|
#list_stacked.replace(np.nan, 0, inplace=True)
|
2023-04-05 20:36:19 -05:00
|
|
|
#print(list_stacked.head())
|
2023-04-06 14:34:59 -05:00
|
|
|
return(json_array)
|
2023-04-05 20:36:19 -05:00
|
|
|
|
2023-04-06 14:34:59 -05:00
|
|
|
# class Users(Resource):
|
|
|
|
# def get(self):
|
|
|
|
# data = pd.read_csv('users.csv') # read CSV
|
|
|
|
# data = data.to_dict(orient = 'records') # convert dataframe to dictionary
|
|
|
|
# return {'data': data}, 200 # return data and 200 OK code
|
2023-04-05 17:36:16 -05:00
|
|
|
|
2023-04-06 14:34:59 -05:00
|
|
|
# class Locations(Resource):
|
|
|
|
# def get(self):
|
|
|
|
# data = pd.read_csv('locations.csv') # read CSV
|
|
|
|
# data = data.to_dict(orient = 'records') # convert dataframe to dictionary
|
|
|
|
# return {'data': data}, 200 # return data and 200 OK code
|
2023-04-05 17:36:16 -05:00
|
|
|
|
2023-04-05 18:21:37 -05:00
|
|
|
class Packets(Resource):
|
|
|
|
def get(self):
|
2023-04-06 14:34:59 -05:00
|
|
|
#data = read_logs(log_folder) # re-reads the log files every time
|
|
|
|
#data = data.to_dict(orient = 'records') # convert dataframe to dictionary
|
|
|
|
|
2023-04-05 18:21:37 -05:00
|
|
|
return {'data': data}, 200 # return data and 200 OK code
|
|
|
|
|
2023-04-05 20:36:19 -05:00
|
|
|
# Read config
|
|
|
|
config = read_config()
|
|
|
|
log_folder = config['Settings']['log_folder']
|
|
|
|
# Load logs first (just to check for errors before page loads)
|
|
|
|
data = read_logs(log_folder)
|
|
|
|
|
|
|
|
|
2023-04-06 14:34:59 -05:00
|
|
|
#api.add_resource(Users, '/users') # '/users' is our entry point for Users
|
|
|
|
#api.add_resource(Locations, '/locations') # and '/locations' is our entry point for Locations
|
2023-04-05 18:21:37 -05:00
|
|
|
api.add_resource(Packets, '/packets') # and '/locations' is our entry point for Locations
|
2023-04-05 16:58:35 -05:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2023-04-05 17:36:16 -05:00
|
|
|
app.run(debug=True) # run our Flask app
|