Dank der derzeitigen total kranken Transaktonsgebühren legt die Gesamtvergütung bei locker 10-16 BTC pro Block!
Ich würde mich nicht mehr einkriegen, wenn wir mit unseren derzeitigen knapp 85TH das jetzt hinbekommen
It was the Bitcointalk forum that inspired us to create Bitcointalksearch.org - Bitcointalk is an excellent site that should be the default page for anybody dealing in cryptocurrency, since it is a virtual gold-mine of data. However, our experience and user feedback led us create our site; Bitcointalk's search is slow, and difficult to get the results you need, because you need to log in first to find anything useful - furthermore, there are rate limiters for their search functionality.
The aim of our project is to create a faster website that yields more results and faster without having to create an account and eliminate the need to log in - your personal data, therefore, will never be in jeopardy since we are not asking for any of your data and you don't need to provide them to use our site with all of its capabilities.
We created this website with the sole purpose of users being able to search quickly and efficiently in the field of cryptocurrency so they will have access to the latest and most accurate information and thereby assisting the crypto-community at large.
import requests
from influxdb import InfluxDBClient
from datetime import datetime
import pytz
# Funktion zum Parsen der Hashrate-Einheiten
def parse_hashrate(hashrate_str):
if "G" in hashrate_str:
return float(hashrate_str.replace("G", ""))
elif "T" in hashrate_str:
return float(hashrate_str.replace("T", "")) * 1000 # Umrechnung von Tera zu Giga
elif "M" in hashrate_str:
return float(hashrate_str.replace("M", "")) / 1000 # Umrechnung von Mega zu Giga
elif "P" in hashrate_str:
return float(hashrate_str.replace("P", "")) * 1000000 # Umrechnung von Peta zu Giga
else:
return 0.0
# URL und Datenbankinformationen
url = "https://solo.ckpool.org/users/bc1qeuupt2tgerfum8jclt8aklu9cdmzzkwml9lg7c"
database_name = "willipool"
influxdb_host = "localhost" # Ändere dies entsprechend deiner InfluxDB-Installation
# JSON von der URL abrufen
response = requests.get(url)
data = response.json()
# Zeitzone für die Umrechnung
local_tz = pytz.timezone("Europe/Berlin") # Ändere dies entsprechend deiner Zeitzone
# InfluxDB-Client initialisieren
client = InfluxDBClient(host=influxdb_host, port=8086, database=database_name)
# InfluxDB-Daten vorbereiten
influx_data = [
{
"measurement": "pool_stats",
"tags": {},
"time": datetime.utcfromtimestamp(data["lastshare"]).replace(tzinfo=pytz.utc).astimezone(local_tz),
"fields": {
"hashrate1m": parse_hashrate(data["hashrate1m"]),
"hashrate5m": parse_hashrate(data["hashrate5m"]),
"hashrate1hr": parse_hashrate(data["hashrate1hr"]),
"hashrate1d": parse_hashrate(data["hashrate1d"]),
"hashrate7d": parse_hashrate(data["hashrate7d"]),
"workers": data["workers"],
"shares": data["shares"],
"bestshare": data["bestshare"],
"bestever": data["bestever"],
},
}
]
# Worker-Daten hinzufügen
for worker in data["worker"]:
influx_data.append(
{
"measurement": "worker_stats",
"tags": {"workername": worker["workername"]},
"time": datetime.utcfromtimestamp(worker["lastshare"]).replace(tzinfo=pytz.utc).astimezone(local_tz),
"fields": {
"hashrate1m": parse_hashrate(worker["hashrate1m"]),
"hashrate5m": parse_hashrate(worker["hashrate5m"]),
"hashrate1hr": parse_hashrate(worker["hashrate1hr"]),
"hashrate1d": parse_hashrate(worker["hashrate1d"]),
"hashrate7d": parse_hashrate(worker["hashrate7d"]),
"lastshare": worker["lastshare"],
"shares": worker["shares"],
"bestshare": worker["bestshare"],
"bestever": worker["bestever"],
},
}
)
# Debugging-Ausgaben
#print("InfluxDB-Daten:")
#for data_point in influx_data:
# print(data_point)
# Daten in die InfluxDB schreiben
client.write_points(influx_data)
# Verbindung schließen
client.close()