diff options
| author | hc <hc@email.ch> | 2024-09-12 11:46:51 +0800 |
|---|---|---|
| committer | hc <hc@email.ch> | 2024-09-12 11:46:51 +0800 |
| commit | fabefacd8da4932c9a5e8b4aec33d196c290d33b (patch) | |
| tree | 58f775cff291903a091ed3d4a63265ad44705614 | |
| -rw-r--r-- | btcdashboard/App.js | 81 | ||||
| -rw-r--r-- | btcdashboard/DOCS/react1 | 21 | ||||
| -rw-r--r-- | btcdashboard/pulltoredis.py | 56 | ||||
| -rw-r--r-- | btcdashboard/server3.js | 43 | ||||
| -rw-r--r-- | tuffy/tuffycommand1.py | 379 |
5 files changed, 580 insertions, 0 deletions
diff --git a/btcdashboard/App.js b/btcdashboard/App.js new file mode 100644 index 0000000..838ab4f --- /dev/null +++ b/btcdashboard/App.js @@ -0,0 +1,81 @@ +import React, { useState, useEffect } from 'react'; +import { LineChart, Line, XAxis, YAxis, CartesianGrid, Tooltip, Legend, ResponsiveContainer } from 'recharts'; + +const App = () => { + const [prices, setPrices] = useState([]); + const [rawData, setRawData] = useState(''); + const [error, setError] = useState(null); + + const testingString = "hiiii"; + + useEffect(() => { + const fetchPrices = async () => { + try { + console.log('Fetching prices...'); + const response = await fetch('/api/prices', { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + }, + }); + console.log('Response status:', response.status); + console.log('Response headers:', response.headers); + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + const data = await response.json(); + console.log('Fetched data:', data); + setPrices(data); + setRawData(JSON.stringify(data, null, 2)); + setError(null); + } catch (error) { + console.error('Failed to fetch prices:', error); + setError(error.toString()); + setRawData(''); + } + }; + fetchPrices(); + const interval = setInterval(fetchPrices, 5000); + return () => clearInterval(interval); + }, []); + + // prep data + const chartData = prices.map(item => { + const parsedItem = JSON.parse(item); + return { + price: parsedItem.price, + time: new Date(parsedItem.timestamp * 1000).toLocaleTimeString() + }; + }).reverse(); // show oldest data first + + return ( + <div> + <h1>price</h1> + <p>test stuff: {testingString}</p> + {error && <p>Error: {error}</p>} + <pre>{rawData}</pre> + + <h2>Price Graph</h2> + <ResponsiveContainer width="100%" height={400}> + <LineChart + data={chartData} + margin={{ + top: 5, + right: 30, + left: 20, + bottom: 5, + }} + > + <CartesianGrid strokeDasharray="3 3" /> + <XAxis dataKey="time" /> + <YAxis domain={['auto', 'auto']} /> + <Tooltip /> + <Legend /> + <Line type="monotone" dataKey="price" stroke="#8884d8" activeDot={{ r: 8 }} /> + </LineChart> + </ResponsiveContainer> + </div> + ); +}; + +export default App; diff --git a/btcdashboard/DOCS/react1 b/btcdashboard/DOCS/react1 new file mode 100644 index 0000000..9f8da04 --- /dev/null +++ b/btcdashboard/DOCS/react1 @@ -0,0 +1,21 @@ + +sudo dnf update -y +sudo dnf install nodejs npm -y +mkdir prices && cd prices + +npx create-react-app client +cd client +cd .. +npm install axios recharts + +mkdir server +cd server +npm init -y +cd .. +npm install express redis cors + +sudo dnf install -y redis +sudo systemctl start redis +sudo systemctl enable redis +sudo systemctl status redis + diff --git a/btcdashboard/pulltoredis.py b/btcdashboard/pulltoredis.py new file mode 100644 index 0000000..0a82fa7 --- /dev/null +++ b/btcdashboard/pulltoredis.py @@ -0,0 +1,56 @@ +import asyncio +import aiohttp +import redis +import json +import logging +import time + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# redis default port +redis_client = redis.Redis(host='localhost', port=6379, db=0) + +URL = 'https://api.kraken.com/0/public/Ticker?pair=BTCUSD' +HEADERS = {'Accept': 'application/json'} + +MAX_PRICES = 10 +UPDATE_INTERVAL = 3 # seconds + +async def fetch_btc_price(session): + try: + async with session.get(URL, headers=HEADERS, ssl=False) as response: + data = await response.json() + return float(data['result']['XXBTZUSD']['c'][0]) + except Exception as e: + logger.error(f"error getting price: {e}") + return None + +async def update_price(): + async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session: + while True: + price = await fetch_btc_price(session) + if price: + timestamp = int(time.time()) # this is unix + price_data = json.dumps({"price": price, "timestamp": timestamp}) + + redis_client.lpush('btc_prices', price_data) + + redis_client.ltrim('btc_prices', 0, MAX_PRICES - 1) + + logger.info(f"updated btc price: ${price}") + + all_prices = redis_client.lrange('btc_prices', 0, -1) + logger.info("prices:") + for i, p in enumerate(all_prices, 1): + p_data = json.loads(p) + logger.info(f"{i}. ${p_data['price']} at {p_data['timestamp']}") + + await asyncio.sleep(UPDATE_INTERVAL) + +async def main(): + logger.info("btc price update") + await update_price() + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/btcdashboard/server3.js b/btcdashboard/server3.js new file mode 100644 index 0000000..72bfb8d --- /dev/null +++ b/btcdashboard/server3.js @@ -0,0 +1,43 @@ +const express = require('express'); +const cors = require('cors'); +const redis = require('redis'); + +const app = express(); +const PORT = 3001; + +const redisClient = redis.createClient({ + url: 'redis://localhost:6379' // this is default port +}); + +// CORS +app.use((req, res, next) => { + res.header('Access-Control-Allow-Origin', '*'); + res.header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS, PUT, PATCH, DELETE'); + res.header('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept'); + next(); +}); + +app.get('/api/prices', async (req, res) => { + try { + const prices = await redisClient.lRange('btc_prices', 0, -1); + + res.header('Access-Control-Allow-Origin', '*'); + res.header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS, PUT, PATCH, DELETE'); + res.header('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept'); + + res.json(prices); + } catch (error) { + console.error('Error fetching prices:', error); + res.status(500).json({ error: 'Internal Server Error' }); + } +}); + +async function startServer() { + await redisClient.connect(); + + app.listen(PORT, () => { + console.log(`Server running on http://localhost:${PORT}`); + }); +} + +startServer().catch(console.error); diff --git a/tuffy/tuffycommand1.py b/tuffy/tuffycommand1.py new file mode 100644 index 0000000..5b069ea --- /dev/null +++ b/tuffy/tuffycommand1.py @@ -0,0 +1,379 @@ +#!/usr/bin/env python3 +import argparse +import os +import io +import os +import shutil +import fileinput +import subprocess +#import paramiko #install pip3, paramiko, zip +import sys +import time + +""" +External dependencies: +https://api.ipify.org + to get the pubic facing ip address of this server + + NOTES + -before deploying, make sure template files are present and pub keys installed + -at each step of the process, describe what you want to do + + next steps + -hardcode aws and terraform installation and their credentials for ease of testing on new servers + -can test exfil and infil + - + +""" +#global variables +working_user="ubuntu" +working_directory=f"/home/{working_user}/tuffycommand1" + +def deploy(args): + #print(f"Hello {args.name}!") + #print(f"{args.name}, {args.cloudcomputeprovider}, {args.location}, {args.serverid}, {args.clients}!") + + #prerequisite for this code block to run is to have the lowpriv pub in the user dir, rest works + #this is to initialise the low priv user to put files in. + #store the variable 'username' and install ssh keys if they don't exist for remote user login + lpusername = "lowpriv" + pubkey_file = f"{working_directory}/pubkey/lowpriv.pub" + + file_names = [f"{pubkey_file}", + f"{working_directory}/server_exec-template/comp208.pub", + f"{working_directory}/server_exec-template/exfil.sh", + f"{working_directory}/server_exec-template/init.sh", + f"{working_directory}/server_exec-template/lowpriv", + f"{working_directory}/server_provisioning-template/template.tf", + f"{working_directory}/server_provisioning-template/tf-start.sh", + f"{working_directory}/server_provisioning-template/tf-end.sh"] + for file_name in file_names: + if os.path.exists(file_name): + continue + else: + print("one of the prerequisite files isn't present.") + print("exiting program") + sys.exit() + + if os.system("grep -q '^" + lpusername + ":' /etc/passwd") == 0: + print("user already exist, pub key is assumed to be installed") + else: + print("user does not exist, creating...") + #create the new user with low-privileges!! + subprocess.run(["useradd", "-m", "-s", "/bin/bash", lpusername]) + subprocess.run(["mkdir", "-p", f"/home/{lpusername}/.ssh"]) + subprocess.run(["chmod", "700", f"/home/{lpusername}/.ssh"]) + subprocess.run(["touch", f"/home/{lpusername}/.ssh/authorized_keys"]) + subprocess.run(["chmod", "600", f"/home/{lpusername}/.ssh/authorized_keys"]) + subprocess.run(["chown", "-R", f"{lpusername}:{lpusername}", f"/home/{lpusername}/.ssh"]) + with open(pubkey_file, "r") as f: + pubkey_file = f.read() + with open(f"/home/{lpusername}/.ssh/authorized_keys", "a") as auth_file: + auth_file.write(pubkey_file) + print(f"User '{lpusername}' has been created with the provided public key") + + cmd = "curl ifconfig.me" + result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True) + local_public_ip = result.stdout.decode().strip() + print(local_public_ip) + + + #you need these 3 variables and serverid to configure the template for terraform! + location_aws="" + ami="" + ccp="" + if args.location == "London": + location_aws = "eu-west-2" + ami="ami-038d76c4d28805c09" + ccp="aws" + elif args.location == "Paris": + location_aws = "eu-west-3" + ami="ami-0dfb6769e523bf035" + ccp="aws" + else: + print("Error: invalid cloud compute location. Allowed locations are 'London' and 'Paris'") + print("exiting program") + sys.exit() + + #create unique directory + path = os.path.join(working_directory,"instances", args.name, ccp, args.location, args.serverid) + print(f"unique server path is {path}") + if os.path.exists(os.path.expanduser(path)): + print("Error: Directory already exists, nothing will happen, program will continue to run") + else: + os.makedirs(os.path.expanduser(path)) + print("hi new directory will be made") + + # copy file from source to destination directory + source_dir = f"{working_directory}/server_provisioning-template/" + dest_dir = f"{path}/server_provisioning/" + if not os.path.exists(dest_dir): + #if directory does not exist, create it + os.makedirs(dest_dir) + for filename in os.listdir(source_dir): + src_file = os.path.join(source_dir, filename) + dest_file = os.path.join(dest_dir, filename) + #overwrite the file if it already exists + shutil.copy(src_file, dest_file) + + + # Replace string in template.tf file + filename_template = os.path.join(path, "server_provisioning/template.tf") + with fileinput.FileInput(os.path.join(path, filename_template), inplace=True, backup="", mode="r") as file: + #these strings are already in the template + old_string_location = "LOCATION-TEMPLATE" + old_string_ami = "AMI-TEMPLATE" + old_string_serverid = "SERVERID-TEMPLATE" + #do note the below 3 commands do not work on mac due to a bug in sed + #verified it works in linux + subprocess.run(f"sed -i 's/{old_string_location}/{location_aws}/g' {filename_template}", shell=True) + subprocess.run(f"sed -i 's/{old_string_ami}/{ami}/g' {filename_template}", shell=True) + subprocess.run(f"sed -i 's/{old_string_serverid}/{args.serverid}/g' {filename_template}", shell=True) + print("template change success") + + #commands to execute in the particular instance's directory + print("path changed to the server path") + path_server_provisioning=os.path.join(f"{path}","server_provisioning/") + os.chdir(path_server_provisioning) + commands = [ + f"chmod +x {path_server_provisioning}tf-start.sh", + f"chmod +x {path_server_provisioning}tf-end.sh", + f"sudo {path_server_provisioning}tf-start.sh" + ] + + + # Iterate over the commands and execute them + for command in commands: + # Build the full SSH command to execute + #full_command = f"{ssh_command} '{command}'" + # Start the subprocess and capture the output + process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + # Print the output as it is generated + while True: + # Read a line of output from the process's stdout stream + output = process.stdout.readline() + # If there is no more output, break out of the loop + if not output: + break + # Decode the output and print it to the console + print(output.decode().strip()) + # Print any error messages that were generated + stderr_output = process.stderr.read().decode() + if stderr_output: + print(f"Error: {stderr_output.strip()}") + # Wait for the process to finish + process.wait() + subprocess.run(f"mv terraform-output.txt ../", shell=True) + os.chdir(working_directory) + + + #after tf-start.sh is executed, output.txt is produced + #store each variable of "output.txt" + filename_output = os.path.join(path, "terraform-output.txt") + instance_id="" + instance_public_ip="" + instance_labelled_serverid="" + with open(filename_output) as file: + lines = [line.strip() for line in file.readlines()] + instance_id=lines[0] + instance_public_ip=lines[1] + instance_labelled_serverid=lines[2] + #check if the created server id, labelled in terraform is the same as the one given as input. it should be the same. + if args.serverid == instance_labelled_serverid: + pass + print("server initialisation success!!") + else: + print('ERROR! \nThe instance you created does not have the same server id as the one you assigned it.\n(The problem is due to a mistake made by the programmer)') + + + print("\nserver initialisation success! now we are going to configure and install the vpn on the server\n") + + #set source and destination directory + source_dir = "server_exec-template/" + dest_dir = os.path.join(path, "server_exec") + if not os.path.exists(dest_dir): + #if directory does not exist, create it + os.makedirs(dest_dir) + #copy all files from the source destination + for filename in os.listdir(source_dir): + src_file = os.path.join(source_dir, filename) + dest_file = os.path.join(dest_dir, filename) + #overwrite the file if it already exists + shutil.copy(src_file, dest_file) + + + + + #now make the .sh file to be executed in the user environment + + #create edited input file, line seperated + with io.open(os.path.expanduser(f"{path}/server-info-all.txt"), "w", encoding="utf-8") as f: + f.write(f"{args.name}\n") + f.write(f"{ccp}\n") + f.write(f"{args.location}\n") + f.write(f"{args.clients}\n") + f.write(f"{args.serverid}\n") + f.write(f"{location_aws}\n") + f.write(f"{ami}\n") + f.write(f"\n") + f.write(f"{local_public_ip}\n") + f.write(f"{instance_public_ip}\n") + f.write(f"{lpusername}\n") + f.write(f"{pubkey_file}\n") + + + + #generate a list of client names + clients = [f'client{i}' for i in range(1, args.clients+1)] + #concatenate the client names into a comma-separated string + client_string = ','.join(clients) + print(f"client string is {client_string}") + + + filename_template = os.path.join(f"{path}","server_exec", "exfil.sh") + #subprocess.run(f"cat {filename_template}", shell=True) + with fileinput.FileInput(os.path.join(path, filename_template), inplace=True, backup="", mode="r") as file: + #these strings are already in the template + old_string_iip = "instance_ip_TEMPLATE" + old_string_clientstring = "allclients_TEMPLATE" + old_string_lpusername = "lowpriv_TEMPLATE" + old_string_serverip = "exfil_ip_TEMPLATE" + lowpriv_serverdir=f"/home/lowpriv{path}" + old_string_serverdir = "exfil_dir_TEMPLATE" + #do note the below 3 commands do not work on mac due to a bug in sed + #verified it works in linux + subprocess.run(f"sed -i 's/{old_string_iip}/{instance_public_ip}/g' {filename_template}", shell=True) + subprocess.run(f"sed -i 's/{old_string_clientstring}/{client_string}/g' {filename_template}", shell=True) + subprocess.run(f"sed -i 's/{old_string_lpusername}/{lpusername}/g' {filename_template}", shell=True) + subprocess.run(f"sed -i 's/{old_string_serverip}/{local_public_ip}/g' {filename_template}", shell=True) + subprocess.run(f"sed -i 's+{old_string_serverdir}+{lowpriv_serverdir}+g' {filename_template}", shell=True) + + + #instance_public_ip="10.211.55.18" + print("sleeping 10") + time.sleep(10) + #if not, ssh will fail, aws needs a bit of time for their ssh to work + username='ubuntu' + key_filename='/home/ubuntu/awscomp208.pem' + print(instance_public_ip) + + # make the zipped file from the folder that already contains the preconfigured files first + local_folder_name = os.path.join(path, "server_exec") + local_zipped_file = f'{local_folder_name}.zip' + path_to_zipped_file = os.path.join(path,local_zipped_file) + #print("hiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii") + #print(path_to_zipped_file) + print("zipping local configured initialisation files for transfer") + subprocess.run(f"cp {path}/server-info-all.txt {local_folder_name}/", shell=True) + subprocess.run(f"zip -r {local_folder_name}.zip {local_folder_name}/", shell=True) + print("") + + time.sleep(1) + print("transferring zipped file over to the other(vpn) server") + #EDIT SSH AUTH FILE AND USERNAME HERE + print(local_zipped_file) + print(instance_public_ip) + subprocess.run(f"scp -i /home/ubuntu/awscomp208.pem -o StrictHostKeyChecking=no {local_zipped_file} ubuntu@{instance_public_ip}:/home/ubuntu/", shell=True) + subprocess.run(f"scp -i /home/ubuntu/awscomp208.pem -o StrictHostKeyChecking=no {path}/server_exec/init.sh ubuntu@{instance_public_ip}:/home/ubuntu/", shell=True) + # unzip file on remote server + + remote_folder_name = local_folder_name[1:] + + # Define the SSH command to use + ssh_command = f"ssh -i {key_filename} {username}@{instance_public_ip}" + + # Define the commands to execute + commands = [ + #f"sudo apt-get update", + #f"echo 'update completed, sleeping for 20 seconds'", + #f"sleep 20", + #f"sudo apt-get install zip unzip tree tmux vim htop mlocate mosh net-tools -y", + f"sudo rm -rf /var/lib/apt/lists/*", + f"sudo chmod +x init.sh", + f"sudo ./init.sh", + #f"sudo apt-get update && sudo apt-get install zip unzip tree -y", + f"unzip -o /home/ubuntu/server_exec.zip -d /home/ubuntu/", + f"cp {remote_folder_name}/* .", + f"rm -rf home/ ", + f"chmod +x *", + f"sudo ./exfil.sh" + ] + + # Iterate over the commands and execute them + for command in commands: + # Build the full SSH command to execute + full_command = f"{ssh_command} '{command}'" + # Start the subprocess and capture the output + process = subprocess.Popen(full_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + # Print the output as it is generated + while True: + # Read a line of output from the process's stdout stream + output = process.stdout.readline() + # If there is no more output, break out of the loop + if not output: + break + # Decode the output and print it to the console + print(output.decode().strip()) + # Print any error messages that were generated + stderr_output = process.stderr.read().decode() + if stderr_output: + print(f"Error: {stderr_output.strip()}") + # Wait for the process to finish + process.wait() + + subprocess.run(f"tree /home/lowpriv/", shell=True) + #subprocess.run(f"sudo mkdir {path}/files/", shell=True) + subprocess.run(f"sudo unzip /home/{lpusername}{path}/t33-exfil.zip -d {path}", shell=True) + + + path2 = os.path.join("/opt/lampp/htdocs/",args.name, ccp, args.location, args.serverid) + if not os.path.exists(path2): + os.makedirs(path2, mode=0o777, exist_ok=True) + print(path2) + subprocess.run(f"sudo mv {path}/t33-exfil/* {path2}", shell=True) + subprocess.run("pwd",shell=True) + subprocess.run(f"sudo zip -j {path2}/file.zip {path2}/*", shell=True) + subprocess.run(f"sudo chmod 777 {path2}/file.zip ", shell=True) +# subprocess.run(f"sudo rm -rf /home/ubuntu/tuffycommand1/instances/", shell=True) + #subprocess.run(f"tree /home/ubuntu/tuffycommand1", shell=True) + + + +# ssh.close() + + + + + + + + + +def destroy(args): + print(f"currently in development!") + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers() + + deploy_p = subparsers.add_parser('deploy') + deploy_p.add_argument('name') + deploy_p.add_argument('cloudcomputeprovider') + deploy_p.add_argument('location') + deploy_p.add_argument('serverid') + deploy_p.add_argument('clients',type=int) + deploy_p.set_defaults(func=deploy) + + destroy_p = subparsers.add_parser('destroy') + destroy_p.add_argument('name') + destroy_p.set_defaults(func=destroy) + + try: + args = parser.parse_args() + args.func(args) + except AttributeError: + print("Error: no sub-command specified.") + parser.print_help() + + |
