Pages:
Author

Topic: My journey from user, to cyborg, to maybe coding a bot. - page 2. (Read 1091 times)

legendary
Activity: 3290
Merit: 16489
Thick-Skinned Gang Leader and Golden Feather 2021
5.Archive All files and folders are now in the format dd-mm-yyyy, for easier auto sorting.
Suggestion: yyyy-mm-dd is much easier to sort.
Example:
Code:
2024_02_23_Fri_10.34h
2024_02_27_Tue_10.34h
2024_03_01_Fri_10.34h
2024_03_05_Tue_07.33h
2024_03_05_Tue_10.34h
2024_03_08_Fri_10.34h
2024_03_12_Tue_10.34h
Even ls shows everything in chronological order now.
sr. member
Activity: 114
Merit: 93
Fly free sweet Mango.
OMGosh.  It finally worked twice in a row.  I've got the script set for pyautogui to hit preview not post, but one little '#' will change, everything.  Unless of course one small thing outside of my control changes and then...another crash and burn.  But you know what?  It's gonna be okay Smiley  
1.Download no change
2.Import  terminal updates about loading the next page and skipping unneeded links
3.Export I figured out a better way to present the big gif link with '''url={imgur_big_gif + "v"}'''
4.Posting Realized a way to control the posting envrionment for pyautogui, was to open chrome, and then go f11, fullscreen.  Soon to be posted on a timer maybe?
5.Archive All files and folders are now in the format dd-mm-yyyy, for easier auto sorting.
Code:
import csv, os, pyautogui, pyperclip, re, requests, shutil, time, urllib.request, webbrowser
from datetime import timedelta, date
from os import rename

startTime = time.perf_counter()

# set dates for links and new folder
today = date.today()
tomorrow = today + timedelta(1)

# name newfolder with date
directory = f"{today:%m}-{today:%d}"
parent_dir = "C:/Users/Games/CB/images/"


# get the final 20 gif layers in reverse order, starting with 24
number = 24
url4 = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=0'
time.sleep(20)
response = requests.get(url4)

# turn response into textfile of the source code.
source_code = response.text

# read the source code, save it, and turn it into a string.  
textfile = open('C:/Users/Games/CB/Temp/CBSource.txt', 'a+')
textfile.write(source_code)
textfile.seek(0)
filetext = textfile.read()
textfile.close()

# find matches using regex, and for every match download the image and number it.  resorted to asking copilot for help with my regex
matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}\/\w{2}\/\w{2}\/\w{5}\.png', filetext)
for link in matches:
    dl_number = f"{number:02d}"
    print(number, link)
    urllib.request.urlretrieve(link, 'C:/Users/Games/CB/images/download ({}).png'.format(dl_number))
    number = number - 1
    time.sleep(2)
os.remove('C:/Users/Games/CB/Temp/CBSource.txt')
print("going on")

# get the first 4 images in reverse order, i copied my own code and changed the link.  Should have made a function and then fed it the links probably.
url5 = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=20'
time.sleep(20)
response5 = requests.get(url5)
source_code = response5.text
textfile5 = open('C:/Users/Games/CB/Temp/CBSource2.txt', 'a+')
textfile5.write(source_code)
textfile5.seek(0)
filetext2 = textfile5.read()
textfile5.close()

# find matches using regex, and for first 4 matches download the image and number it
matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}\/\w{2}\/\w{2}\/\w{5}\.png', filetext2)
for link in matches:
    if number >= 1:
        dl_number = f"{number:02d}"
        print(number, link)
        urllib.request.urlretrieve(link, 'C:/Users/Games/CB/images/download ({}).png'.format(dl_number))
        number = number - 1
        time.sleep(2)
    if number <= 0:
        print("skipping link")
os.remove('C:/Users/Games/CB/Temp/CBSource2.txt')

# hot keys to open gimp and then the plugin that load layers, export, scale, export gifs, quit, agree to not save
time.sleep(5)
pyautogui.click(1, 1)
time.sleep(5)
pyautogui.hotkey('ctrl', 'alt', 'g')
time.sleep(40)
pyautogui.click(820, 446)
time.sleep(20)
pyautogui.hotkey('ctrl', 'alt', 'l')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('enter')
time.sleep(20)
pyautogui.hotkey('ctrl', 'q')
time.sleep(10)
pyautogui.hotkey('shift', 'tab')
time.sleep(5)
pyautogui.hotkey('enter')
time.sleep(60)

# uploading big gif and getting link to use later,
url = "https://api.imgur.com/3/image"
payload = {'name': f'b{today:%m}-{today:%d}-{today.year}'}
files=[('image',('gif.gif',open('C:/Users/Games/CB/Temp/gif.gif','rb'),'image/gif'))]
headers = {'Authorization': 'Bearer xXxXxXxXxXxXx'}
response = requests.post(url, headers=headers, data=payload, files=files)
data = response.json()
imgur_big_gif = data.get("data", {}).get("link")

# uploading talkimg gif and getting link to use later, cle
url = "https://talkimg.com/api/1/upload"
headers = {"X-API-Key": "uvwxXxXxXxXxXxXxyz"}
files = {"source": open("C:/Users/Games/CB/Temp/gif2.gif", "rb")}
payload = {"title": f'b{today:%m}-{today:%d}-{today.year}', "album_id": "UFbj"}
response = requests.post(url, headers=headers, data=payload, files=files)
data = response.json()
talkimg_gif = data["image"]["url"]

# add post to clipboard for btctalk
pyperclip.copy(f"ChartBuddy's 24 hour Wall Observation recap\n[url={imgur_big_gif + "v"}].[img]{talkimg_gif}[/img].[/url]\nAll Credit to [url=https://bitcointalk.org/index.php?topic=178336.msg10084622#msg10084622]ChartBuddy[/url]")

# can use this link for the reply button
url7 = 'https://bitcointalk.org/index.php?action=post;topic=178336.0'
webbrowser.open(url7)
time.sleep(20)
pyautogui.hotkey('f11')
time.sleep(10)
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('ctrl', 'v')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(5)
# we're doing it live if the next command is #ed out
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('enter')

#runtime is calculated
stopTime = time.perf_counter()
runtime = {stopTime - startTime}

# save to csv file
f = open('C:/PyProjects/runtimes.csv', 'a', newline='')
writer = csv.writer(f)
writer.writerow(runtime)

time.sleep(20)

# prepare to store downloads
newf = os.path.join(parent_dir, directory)
os.mkdir(newf)
src = "C:/Users/Games/CB/images"
dest = "C:/Users/Games/CB/images/{}".format(directory)
files = os.listdir(src)
os.chdir(src)

# only move numbered png files
for file in files:
    if file.endswith(").png"):
        shutil.move(file, dest)  

# gifs are stored
rename ("C:/Users/Games/CB/Temp/gif.gif", f"C:/Users/Games/CB/{today.year}/{today.month}-{today.year}/b{today:%m}-{today:%d}.gif")
rename (f"C:/Users/Games/CB/Temp/gif2.gif", f"C:/Users/Games/CB/{today.year}/{today.month}-{today.year}/{today:%m}-{today:%d}.gif")

So exciting on the Top100 vwap list.  I executed a successful post from my phone using Chrome Remote Desktop back to the ole home PC.  I think I've figured out a way to get the green coloring automatic with a second tuple sort, but don't have it ready yet.  We failed many ways, until I figured out that by asking copilot to use tuples to keep the date rank and vwap rank seperate, pre sorting.  Then I could later flag the most recent item on the list, which is red, underlined.   I'm really leaning on dooglous' code and copilot for this
Code:
import requests, pyautogui, pyperclip, time, webbrowser
from datetime import datetime, timezone

current_unix_time = int(time.time())
unix_time_month = 60 * 60 * 24 * 31
unix_time_day = 60 * 60 * 24

def fetch_bitcoin_data(days=1200, top=100, currency='USD'):
    url = f"http://bitcoincharts.com/charts/chart.json?m=bitstampUSD&r={days}&i=Daily"
    response = requests.get(url, verify=False)
    data = response.json()
    rank = 1
    rows = [(entry[0], entry[7]) for entry in data]
    rows = rows[:-1]
    tuples = [(timestamp, vwap, i +1) for i, (timestamp, vwap) in enumerate(rows)]
    sorted_tuples = sorted(tuples, key=lambda x: float(x[1]), reverse=True)

# opens file to store the top 100 vwaps
    with open('C:/PyProjects/VWAP_USD/top100usd.txt', 'w') as top100:

        # sorts the daily VWAP by highest average price, and ranks them
        for timestamp, vwap, rows in sorted_tuples[:top]:
            adjusted_timestamp = int(timestamp)
            utc_date = datetime.fromtimestamp(adjusted_timestamp, tz=timezone.utc).strftime('%Y-%m-%d')
            
            # this is to make the columns line up
            if rank <= 99:
                spacing = "  "
            if rank == 100:
                spacing = " "
          
            # this is to make top 100 vwaps within the last 31 days bold
            if timestamp >= current_unix_time - unix_time_month:
                bolding = "[b]"
                unbolding = "[/b]"
            if timestamp <= current_unix_time - (unix_time_month + 1):
                bolding = ""
                unbolding = ""

            # i noticed the most recent result, the red, underline one, if it makes the list was always the last
            # being reverse sorted, from a list of 1200, and never forget Python starts counting at zed :)
            if rows == 1199:
                redcoloring = "[red][u]"
                reduncoloring = "[/u][/red]"
            if rows != 1199:
                redcoloring = ""
                reduncoloring = ""
            print(f"{spacing}{redcoloring}{bolding}{rank:2d}  {utc_date}  {vwap:.0f} {currency}{unbolding}{reduncoloring}")
            formatted_output = f"{spacing}{redcoloring}{bolding}{rank:2d}  {utc_date}  {vwap:.0f} {currency}{unbolding}{reduncoloring}"
            top100.write(formatted_output + '\n')
            rank += 1

    # putting the post on the clipboard
    with open('C:/PyProjects/VWAP_USD/top100usd.txt', 'r') as top100:
        list = top100.read()
        prelude = "[pre][size=10pt][url=https://bitcoincharts.com/charts/bitstampUSD]Rank   BitStamp  USD/BTC[/url]"
        explanation = "[url=https://bitcointalk.org/index.php?topic=138109.msg54917391#msg54917391][size=8pt]     * * Chart Explanation * *[/size][/url][/pre]"
        full_post = f"{prelude}\n{list}{explanation}"
        pyperclip.copy(full_post)

    # can use this link for the reply page to top20 thread
    url = 'https://bitcointalk.org/index.php?action=post;topic=138109.0'
    webbrowser.open(url)
    time.sleep(5)
    pyautogui.hotkey('f11')
    time.sleep(5)
    pyautogui.hotkey('tab')
    time.sleep(2)
    pyautogui.hotkey('tab')
    time.sleep(2)
    pyautogui.hotkey('ctrl', 'v')
    time.sleep(2)
    pyautogui.hotkey('tab')
    time.sleep(2)
    # we're doing it live if the next command is #ed out
    # pyautogui.hotkey('tab')
    time.sleep(20)
    pyautogui.hotkey('enter')
    # open("top100.txt", 'w').close()
              
if __name__ == "__main__":
    fetch_bitcoin_data()

EDIT: Forgot I hadn't posted in so long, and forgot the previous update was pre auto red underlining, pre f11ing
legendary
Activity: 3290
Merit: 16489
Thick-Skinned Gang Leader and Golden Feather 2021
But of course, just change the regex to only include today's date?
You'll need some from yesterday too. I usually convert the "Today" on the forum to a real date first, then get everything from the last 24 hours.
sr. member
Activity: 114
Merit: 93
Fly free sweet Mango.
One thing to fix is it currently downloads the last 24 images ChartBuddy posted, not necessarily only the posts from the last 24 hours.  I think I can figure out a way to request from Ninjastic Space, the number of posts in the last day, then fix the script to only download that many images for the day.
Why don't you use the time stamps on ChartBuddy's post history (and the second page)?
But of course, just change the regex to only include today's date?  Maybe..with error handling for the second page?  I'll see what I can do.  Thanks again.  Smiley
legendary
Activity: 3290
Merit: 16489
Thick-Skinned Gang Leader and Golden Feather 2021
One thing to fix is it currently downloads the last 24 images ChartBuddy posted, not necessarily only the posts from the last 24 hours.  I think I can figure out a way to request from Ninjastic Space, the number of posts in the last day, then fix the script to only download that many images for the day.
Why don't you use the time stamps on ChartBuddy's post history (and the second page)?
sr. member
Activity: 114
Merit: 93
Fly free sweet Mango.
ChartBuddy Daily Recap Storylog:
1.Download This is one of the last things I really figured out, and it's been the least error prone parts of the whole shebang. One thing to fix is it currently downloads the last 24 images ChartBuddy posted, not necessarily only the posts from the last 24 hours.  I think I can figure out a way to request from Ninjastic Space, the number of posts in the last day, then fix the script to only download that many images for the day.
2.Import My main goal now, is to get rid of pyautogui, and figure out how to run GIMP and post comments without having to worry about is the window maximized?, is it on the correct monitor?...  
3.Export Big progress has been made on the talkimg and imgur front.  I have now been given a talkimg.com account, so I can use the API.  It took me awhile to figure out the payload bit, but I eventually figured out how to I wonder how many people are racing through the code, right now, to see if i was foolish enough to post my secret API key on a public forum again.  Nope, not today, and I hope not in the future.  I was getting a bit down, because it was like, I would try something with imgur and it would work, and then try it again and things are different.  So if anyone was messing with me, thank you for not really doing any damage, but who am I kidding?  I'm sure it was just me.  Embarrassed  I remember back in the day where people would unknowingly expose their btc private key on the news or something, and zip, there go the cornz.  Shocked
4.Posting See above.
5.Archive Starting to date everything with 2 digit days and months for easier sorting.

Full03_08CB.py
Code:
import csv, json, os, pyautogui, pyperclip, re, requests, shutil, time, urllib.request, webbrowser
from datetime import timedelta, date
from os import rename

# on your marks, get set, go!
startTime = time.perf_counter()

# set 2 digit dates for links and new folder
today = date.today()
tomorrow = today + timedelta(1)

# get the final 20 gif layers in reverse order, starting with 24
number = 24
url4 = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=0'
time.sleep(30)
response = requests.get(url4)

# turn response into textfile of the source code.
source_code = response.text

# read the source code, save it, and turn it into a string.  
textfile = open('C:/Users/Games/CBSource.txt', 'a+')
textfile.write(source_code)
textfile.seek(0)
filetext = textfile.read()
textfile.close()

# find matches using regex, and for every match download the image and number it.  resorted to asking copilot for help with my regex
matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}/\w{2}\/\w{2}\/\w{5}\.png', filetext)
for link in matches:
    print(number, link)
    urllib.request.urlretrieve(link, 'C:/Users/Games/CB/images/download ({}).png'.format(number))
    number = number - 1
    time.sleep(5)
os.remove('C:/Users/Games/CBSource.txt')

# get the first 4 images in reverse order, i copied my own code and changed the link.  Should have made a function and then fed it the links probably.
url5 = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=20'
time.sleep(30)
response5 = requests.get(url5)
source_code = response5.text
textfile5 = open('C:/Users/Games/CBSource2.txt', 'a+')
textfile5.write(source_code)
textfile5.seek(0)
filetext = textfile5.read()
textfile5.close()

# find matches using regex, and for first 4 matches download the image and number it
matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}/\w{2}\/\w{2}\/\w{5}\.png', filetext)
for link in matches:
    if number >= 1:
        urllib.request.urlretrieve(link, 'C:/Users/Games/CB/images/download ({}).png'.format(number))
        print(number, link)
        number = number - 1
        time.sleep(5)
os.remove('C:/Users/Games/CBSource2.txt')

# hot keys to open gimp and then the plugin that load layers, export, scale, export gifs, quit, agree to not save
time.sleep(5)
pyautogui.click(1, 1)
time.sleep(5)
pyautogui.hotkey('ctrl', 'alt', 'g')
time.sleep(40)
pyautogui.click(820, 446)
time.sleep(20)
pyautogui.hotkey('ctrl', 'alt', 'l')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('enter')
time.sleep(10)
pyautogui.hotkey('ctrl', 'q')
time.sleep(5)
pyautogui.hotkey('shift', 'tab')
time.sleep(1)
pyautogui.hotkey('enter')
time.sleep(20)

# uploading big gif and getting link to use later,
url = "https://api.imgur.com/3/image"
payload = {'name': f'b{today.month:02d}-{today.day:02d}-{today.year}'}
files=[('image',('gif.gif',open('C:/Users/Games/gif.gif','rb'),'image/gif'))]
headers = {'Authorization': 'Bearer **********************************'}
response = requests.post(url, headers=headers, data=payload, files=files)
data = response.json()
imgur_big_gif = data.get("data", {}).get("link")

# uploading talkimg gif and getting link to use later,
url = "https://talkimg.com/api/1/upload"
headers = {"X-API-Key": "chv_e*************************************************************"}
files = {"source": open("C:/Users/Games/gif2.gif", "rb")}
payload = {"title": f'b{today.month:02d}-{today.day:02d}-{today.year}', "album_id": "UFbj"}
response = requests.post(url, headers=headers, data=payload, files=files)
data = response.json()
talkimg_gif = data["image"]["url"]

# add post to clipboard for btctalk
pyperclip.copy(f"ChartBuddy's 24 hour Wall Observation recap\n[url={imgur_big_gif}].[img]{talkimg_gif}[/img].[/url]\nAll Credit to [url=https://bitcointalk.org/index.php?topic=178336.msg10084622#msg10084622]ChartBuddy[/url]")

# can use this link for the reply button
url7 = 'https://bitcointalk.org/index.php?action=post;topic=178336.0'
webbrowser.open(url7)
time.sleep(20)
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('ctrl', 'v')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(5)
# we're doing it live if the next command is #ed out
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('enter')

# name newfolder with date
directory = f"{today.month:02d}-{today.day:02d}"
parent_dir = "C:/Users/Games/CB/images/"
newf = os.path.join(parent_dir, directory)
os.mkdir(newf)

# prepare to store downloads
src = "C:/Users/Games/CB/images"
dest = "C:/Users/Games/CB/images/{}".format(directory)
files = os.listdir(src)
os.chdir(src)

# only move numbered png files
for file in files:
    if file.endswith(").png"):
        shutil.move(file, dest)  

# gifs are stored:  NEED NEW MONTHLY FOLDER CREATION CODE
rename ("C:/Users/Games/gif.gif", f"C:/Users/Games/CB/{today.year}/{today.month:02d}-{today.year}/b{today.month:02d}-{today.day:02d}.gif")
rename (f"C:/Users/Games/gif2.gif", f"C:/Users/Games/CB/{today.year}/{today.month:02d}-{today.year}/{today.month:02d}-{today.day:02d}.gif")

#runtime is calculated
stopTime = time.perf_counter()
runtime = {stopTime - startTime}

# save to csv file
f = open('C:/PyProjects/runtimes.csv', 'a', newline='')
writer = csv.writer(f)
writer.writerow(runtime)

And then we have this new job posting the top 100 days of the volume weighted average price of BTC thread ne Top 20 days for Bitcoin in the Speculation board.
Storylog:  The main challenge with this code, was being able to implement it from the road, which is where I'm usually located at UTC midnight.  So I have worked out how to put the top 100 vwaps, with bolding for the vwaps within the last 31 days on the clipboard.  So all this week I have left my computer on (but not the monitors of course), and used my phone with chrome desktop remote to run the script.  The chrome remote also has the share clipboard feature, so then I can paste the list using my phone, which is so much easier than trying to control the home PC.  Then I can paste into bitcointalk, change the colors for latest and oldest top 100 vwap and hit post.  I haven't yet figured out how to script the colors.  I plan to work on that tomorrow.

Top100
Code:
import json, requests, pyautogui, pyperclip, time, webbrowser
from datetime import datetime, timezone

current_unix_time = int(time.time())
unix_time_month = 60 * 60 * 24 * 31
unix_time_day = 60 * 60 * 24

def fetch_bitcoin_data(days=1200, top=100, currency='USD'):
    url = f"http://bitcoincharts.com/charts/chart.json?m=bitstampUSD&r={days}&i=Daily"
    response = requests.get(url, verify=False)
    data = response.json()
    number = 1
    
    rows = [(entry[0], entry[7]) for entry in data]
    rows = rows[:-1]
    sorted_rows = sorted(rows, key=lambda x: float(x[1]), reverse=True)

# opens file to store the top 100 vwaps
    with open('top100test.txt', 'w') as top100:

        # sorts the daily VWAP by highest average price, and numbers them
        for timestamp, vwap in sorted_rows[:top]:
            adjusted_timestamp = int(timestamp)
            utc_date = datetime.fromtimestamp(adjusted_timestamp, tz=timezone.utc).strftime('%Y-%m-%d')
            
            # this is to make the columns look pretty
            if number <= 99:
                spacing = "  "
            if number == 100:
                spacing = " "
            # this is to make top 100 vwaps within the last 31 days bold
            if timestamp >= current_unix_time - unix_time_month:
                bolding = "[b]"
                unbolding = "[/b]"
            if timestamp <= current_unix_time - (unix_time_month + 1):
                bolding = ""
                unbolding = ""
            formatted_output = f"{spacing}{bolding}{number:2d}  {utc_date}  {vwap:.0f} {currency}{unbolding}"
            top100.write(formatted_output + '\n')
            # this gives them the rank number
            number += 1

    # putting the post on the clipboard
    with open('C:/Users/Games/top100Test.txt', 'r') as top100:
        list = top100.read()
        prelude = "[pre][size=10pt][url=https://bitcoincharts.com/charts/bitstampUSD]Rank   BitStamp  USD/BTC[/url]"
        explanation = "[url=https://bitcointalk.org/index.php?topic=138109.msg54917391#msg54917391][size=8pt]     * * Chart Explanation * *[/size][/url][/pre]"
        full_post = f"{prelude}\n{list}{explanation}"
        pyperclip.copy(full_post)

Because of couse it did.   Cheesy  The current folder name is 3-2024
Code:
 File "c:\PyProjects\Full3_8CB.py", line 140, in
    rename ("C:/Users/Games/gif.gif", f"C:/Users/Games/CB/{today.year}/{today.month:02d}-{today.year}/b{today.month:02d}-{today.day:02d}.gif")
FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:/Users/Games/gif.gif' -> 'C:/Users/Games/CB/2024/03-2024/b03-08.gif'
sr. member
Activity: 114
Merit: 93
Fly free sweet Mango.
It's amazing how many things can go wrong, right?  I opened up GIMP, admittedly to prepare to the daily, one click, do push ups special.  Because sometimes, the first time opening GIMP, takes a lot longer than subsequent times per restart probably.  This time though, upon CTRL-ALT-Ging my way to opening GIMP there was an update being proffered.  Well hell i thought, yet another thing that would have poked through my travesty of a tapestry of code. I remember wondering if the keyboard shortcut to open would carry over, but quickly stored that thought.  

So...GIMP never opened, gifs were never made to upload, and fail.  But, I only had to change the properties of the GIMP desktop shortcut, # out the download files part, remember to delete the empty current date folder before rerunning so I don't get the folder already exists error again, because I apparently refuse to deal with error cases yet.

But I did come up with some useful code for the Top20 job.  It fixes the weird justification of the number 100, and bolds all the top 100 daily volume weighted average prices.  I have not yet figured out how to automatically highlight the most recent top 100 vwap.  
Current Top20 code:
Code:
import requests, json, time
from datetime import datetime, timezone
# python Top20Current.py > vwap_ordered_list.txt

# setting time variables
current_unix_time = int(time.time())
unix_time_month = 60 * 60 * 24 * 31
unix_time_day = 60 * 60 * 24

#grabs json data, and sorts it by descending vwap
def fetch_bitcoin_data(days=1200, top=100, currency='USD'):
    url = f"http://bitcoincharts.com/charts/chart.json?m=bitstampUSD&r={days}&i=Daily"
    response = requests.get(url, verify=False)
    data = response.json()

#used for setting the rank, should be called rank not number.  todo, change number to rank
    number = 1

# we only want the date and vwap items from the full son return
    rows = [(entry[0], entry[7]) for entry in data]
    rows = rows[:-1]
    sorted_rows = sorted(rows, key=lambda x: float(x[1]), reverse=True)

# building the post in terminal that i need to figure out how to add it a file
    print("[pre][size=10pt]")
    print("[url=https://bitcoincharts.com/charts/bitstampUSD]Rank   BitStamp  USD/BTC[/url]")              
    for timestamp, vwap in sorted_rows[:top]:
        adjusted_timestamp = int(timestamp)
        utc_date = datetime.fromtimestamp(adjusted_timestamp, tz=timezone.utc).strftime('%Y-%m-%d')
        if number <= 99:
            spacing = "  "
        if number == 100:
            spacing = " "
        if timestamp >= current_unix_time - unix_time_month:
            bolding = "[b]"
            unbolding = "[/b]"
        if timestamp <= current_unix_time - (unix_time_month + 1):
            bolding = ""
            unbolding = ""
        print(f"{spacing}{bolding}{number:2d}  {utc_date}  {vwap:.0f} {currency}{unbolding}")
        number += 1
    print("[url=https://bitcointalk.org/index.php?topic=138109.msg54917391#msg54917391][size=8pt]     * * Chart Explanation * *[/size][/url]")
    print("[/size][/pre]")
              
if __name__ == "__main__":
    fetch_bitcoin_data()

EDIT: changed file to folder because that is what i meant, then changed edit to error because the same.
sr. member
Activity: 114
Merit: 93
Fly free sweet Mango.
Ok thank you for that advice.  Maybe you've got some more.  Smiley  Apparently I've taken on another posting job in the Top 20 days for Bitcoin thread, while still retaining my amateur status.   Grin   Luckily the code that was being used was available, but it is a bash script.  I spent yesterday fumbling about with Linux and WSL, and then a virtual box Ubuntu install, trying to get it to work.  Copilot walked me through the steps as the errors rolled in, have to be in the same directory, set the environment, set permissions, set it to execute.  But in the end it would run, and give no results.  Just onto the next prompt.  I do really like Python though and after an initial translation by copilot, that didn't work, we hacked out a partial solution.  The Visual Studio integration with WSL does seem pretty useful, like ImageMagick, and warrants further study.

Here is the base bash script from user dooglus, that i believe yefi used and might have modified, to include underlining for example.
Code:
vwap() {
    days=1200
    top=20
    currency=USD
    rows=$(wget -o/dev/null -O- "http://bitcoincharts.com/charts/chart.json?m=bitstampUSD&r=$days&i=Daily" |
                  sed 's/], \[/\n/g'   |
                  head -n $((days-1))  |
                  tr -d '[],'          |
                  awk '{print $1, $8}' |
                  sort -k2nr           |
                  head -$top
        )
    newest=$(echo "$rows" | sort -n | tail -1 | awk '{print $1}')
    printf "Update:\n[pre]\n"
    n=1
    month_ago=$(($(date +%s) - 60*60*24*32))
    echo "$rows" |
        while read t p
        do
            if ((t > month_ago)); then b1="[b]"            ; b2="[/b]"    ; else b1=""; b2=""; fi
            if ((t == newest))   ; then c1="[color=#7F0000]"; c2="[/color]"; else c1=""; c2=""; fi
            printf "%s%s%2d  %s  %7.2f $currency%s%s\n" "$b1" "$c1" $n "$(TZ= date -d @$t +%Y-%m-%d)" $p "$c2" "$b2"
            ((n++))
        done
    printf "[/pre]\n"
}

And here is the current Python code I'm using.  It's got a function in it, so you know I had help.   Cheesy  Again, I can read it, but not write it.  I did know what to change to get the price to the nearest dollar, and not penny.  Smiley
Code:
import requests
from datetime import datetime

#getting the last 1200 days of btc volume weighted average price
def fetch_bitcoin_data(days=1200, top=100, currency='USD'):
    url = f"http://bitcoincharts.com/charts/chart.json?m=bitstampUSD&r={days}&i=Daily"
    response = requests.get(url, verify=False)
    data = response.json()
    number = 1
    rows = [(entry[0], entry[7]) for entry in data]
    
    # Exclude the most recent entry (today's data)
    rows = rows[:-1]
    
    sorted_rows = sorted(rows, key=lambda x: x[1], reverse=True)
    
    for timestamp, vwap in sorted_rows[:top]:
        adjusted_timestamp = int(timestamp)
        utc_date = datetime.utcfromtimestamp(adjusted_timestamp).strftime('%Y-%m-%d')
        print(f"{number:2d}  {utc_date}  {vwap:.0f} {currency}")
        number += 1
        if number > top:
            break

if __name__ == "__main__":
    fetch_bitcoin_data()
legendary
Activity: 3290
Merit: 16489
Thick-Skinned Gang Leader and Golden Feather 2021
I need to look into what you meant by numerical sort, in terms of possible commands.
In sort, it's this:
Code:
       -n, --numeric-sort
              compare according to string numerical value
If you're going to move your code to Linux anyway, maybe it helps.

Quote
I'm really surprised the datetime module doesn't return double digit hours, days, months, all that stuff.
Isn't that an option you can toggle?
sr. member
Activity: 114
Merit: 93
Fly free sweet Mango.
Okay.  Nobody told me this bloody machine can't even count.  You tell it to load numbered files in order and it goes, 1, 10, 11-19, 2, 20, 21...  Grin
Lol. Been there, done that Smiley It's not counting, it's sorting. Easy fix: use leading zeros, or numerical sort.
I need to look into what you meant by numerical sort, in terms of possible commands.  But yeah, i guess it alphabetically sorts the list, not numerically.  But when i add the leading zeros it breaks all my plugins.  Smiley  Thus is the way of progress.  I'm really surprised the datetime module doesn't return double digit hours, days, months, all that stuff.
legendary
Activity: 3290
Merit: 16489
Thick-Skinned Gang Leader and Golden Feather 2021
Okay.  Nobody told me this bloody machine can't even count.  You tell it to load numbered files in order and it goes, 1, 10, 11-19, 2, 20, 21...  Grin
Lol. Been there, done that Smiley It's not counting, it's sorting. Easy fix: use leading zeros, or numerical sort.
sr. member
Activity: 114
Merit: 93
Fly free sweet Mango.
Okay.  Nobody told me this bloody machine can't even count.  You tell it to load numbered files in order and it goes, 1, 10, 11-19, 2, 20, 21...  Grin Luckily I could see something was amiss.
Here's the code i finally squeezed out.  I did have to do a quick Brave search to recall the method of stating a range, and getting the length of a list.  Also some hard coding of dates that need to be fixed.  I figured the easiest way i knew to get all these images into GIMP in the right order would be to number them as they were placed in a single folder.  674 this previous month.  Then my existing GIMP gif making plugin could easily be modified, but I think i actually just ctrl-a, selected them all and then just dragged it into GIMP with the title page already loaded, use the reverse the layers plugin, export, and Bob's your uncle.
Code:
import os, shutil, time
from os import rename

#making a backup
shutil.copytree('C:/Users/Games/CB/CBuddyDaily', 'C:/Users/Games/Backup')

# set dates and variables for folders and files
# today = date.today()
# tomorrow = today + timedelta(1)
destination = "C:/Users/Games/CB/2024/2-2024/Monthly"
hour_number = 1
day_number= 1

# for 29 days this year
for i in range(1, 30):
    file_number = 1
    src = f"C:/Users/Games/CB/CBuddyDaily/2-{day_number:02d}"
    files = os.listdir(src)
    CB_daily_post_total = len(files) + 1
    os.chdir(src)
    time.sleep(1)

    for m in range(1, CB_daily_post_total):
        rename (f'C:/Users/Games/CB/CBuddyDaily/2-{day_number:02d}/download ({m}).png', f"C:/Users/Games/CB/2024/2-2024/Monthly/download ({hour_number}).png")
        print(hour_number, file_number, m)
        hour_number += 1
        file_number += 1
    day_number += 1
    print(day_number)
There might be some good error checking code in there.   If i know how many posts ChartBuddy made that day before starting the whole process, that would be helpful.

Still waiting for that perfect run, but things really went smoothly this run, only because there were exactly 24 images to download.  Gonna work on that.
1.Download Left click
2.Import  Do pushups.  Y'all hear about the 100 pushups a day till 100k btc challenge?  https://bitcointalksearch.org/topic/--5484350
3.Export  
4.Posting  I have it set to skip the Post button and tab one more time to the Preview button, for now.  This time I had to add the monthly recap to the post.
5.Archive  Here, it finally errored out.  On the penultimate command, because I didn't have a new 3-2024 folder to store the gifs in.

2_29CB.py errr 3_1CB.py EDIT: I'm so distressed i couldn't post the monthly, I don't even know what day it is.  Just like my code sometimes.  Cry
Code:
import csv, os, pyautogui, pyperclip, re, requests, shutil, time, urllib.request, webbrowser
from datetime import timedelta, date
from os import rename

startTime = time.perf_counter()

# set dates for links and new folder
today = date.today()
tomorrow = today + timedelta(1)

# name newfolder with date
directory = f"{today.month}-{today.day}"
parent_dir = "C:/Users/Games/CB/images/"
newf = os.path.join(parent_dir, directory)
os.mkdir(newf)

# get the final 20 gif layers in reverse order, starting with 24
number = 24
url4 = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=0'
response = requests.get(url4)

# turn response into textfile of the source code.
source_code = response.text

# read the source code, save it, and turn it into a string.  
textfile = open('C:/Users/Games/CBSource.txt', 'a+')
textfile.write(source_code)
textfile.seek(0)
filetext = textfile.read()
textfile.close()

# find matches using regex, and for every match download the image and number it.  resorted to asking copilot for help with my regex
matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}/\w{2}\/\w{2}\/\w{5}\.png', filetext)
for link in matches:
    print(number, link)
    urllib.request.urlretrieve(link, 'C:/Users/Games/CB/images/download ({}).png'.format(number))
    number = number - 1
    time.sleep(5)
os.remove('C:/Users/Games/CBSource.txt')

# get the first 4 images in reverse order, i copied my own code and changed the link.  Should have made a function and then fed it the links probably.
url5 = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=20'
response5 = requests.get(url5)
source_code = response5.text
textfile5 = open('C:/Users/Games/CBSource2.txt', 'a+')
textfile5.write(source_code)
textfile5.seek(0)
filetext = textfile5.read()
textfile5.close()

# find matches using regex, and for first 4 matches download the image and number it
matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}/\w{2}\/\w{2}\/\w{5}\.png', filetext)
for link in matches:
    if number >= 1:
        urllib.request.urlretrieve(link, 'C:/Users/Games/CB/images/download ({}).png'.format(number))
        print(number, link)
        number = number - 1
        time.sleep(5)
os.remove('C:/Users/Games/CBSource2.txt')

# hot keys to open gimp and then the plugin that load layers, export, scale, export gifs, quit, agree to not save
time.sleep(5)
pyautogui.click(1, 1)
time.sleep(5)
pyautogui.hotkey('ctrl', 'alt', 'g')
time.sleep(20)
pyautogui.click(820, 446)
time.sleep(20)
pyautogui.hotkey('ctrl', 'alt', 'l')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('enter')
time.sleep(10)
pyautogui.hotkey('ctrl', 'q')
time.sleep(5)
pyautogui.hotkey('shift', 'tab')
time.sleep(1)
pyautogui.hotkey('enter')
time.sleep(10)

# uploading big gif and getting link to use later,
url = "https://api.imgur.com/3/image"
payload = {'name': f'b{today.month}-{today.day}'}
files=[('image',('C:/Users/Games/gif.gif',open('C:/Users/Games/gif.gif','rb'),'image/gif'))]
headers = {'Authorization': 'Bearer f0e27b94e6f8ead1480763e666c8587b73365850'}
response = requests.request("POST", url, headers=headers, data=payload, files=files)

# looking for the link
imgur_return = response.text
linkfile = open('C:/Users/Games/imgurlink.txt', 'a+')
linkfile.write(imgur_return)
linkfile.seek(0)
filetext = linkfile.read()
linkfile.close()
imgurlink = re.findall(r'https:\/\/i\.imgur\.com\/.*\.gif', filetext)

# and the following only works  i think because it's the only link in the JSON response
for imgur in imgurlink:
    imgur_big_gif = imgur
os.remove('C:/Users/Games/imgurlink.txt')

# open imgtalk to upload gif2
url3 = "https://www.talkimg.com/"
webbrowser.open(url3)
time.sleep(30)
pyautogui.click(953, 590)
time.sleep(5)
pyautogui.click(221, 479)
time.sleep(5)
pyautogui.typewrite("gif2.gif")
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('tab')
time.sleep(10)
pyautogui.hotkey('enter')
time.sleep(5)
pyautogui.click(949, 645)
time.sleep(5)
pyautogui.click(1276, 625)
time.sleep(5)
imgtalklink = pyperclip.paste()

# add post to clipboard for btctalk
pyperclip.copy(f"ChartBuddy's 24 hour Wall Observation recap\n[url={imgur_big_gif}].{imgtalklink}.[/url]\nAll Credit to [url=https://bitcointalk.org/index.php?topic=178336.msg10084622#msg10084622]ChartBuddy[/url]")

# can use this link for the reply button
url7 = 'https://bitcointalk.org/index.php?action=post;topic=178336.0'
webbrowser.open(url7)
time.sleep(10)
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('ctrl', 'v')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(5)
# we're doing it live if the next command is #ed out
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('enter')

#runtime is calculated
stopTime = time.perf_counter()
runtime = {stopTime - startTime}

# save to csv file
f = open('C:/PyProjects/runtimes.csv', 'a', newline='')
writer = csv.writer(f)
writer.writerow(runtime)

time.sleep(20)

# prepare to store downloads
src = "C:/Users/Games/CB/images"
dest = "C:/Users/Games/CB/images/{}".format(directory)
files = os.listdir(src)
os.chdir(src)

# only move numbered png files
for file in files:
    if file.endswith(").png"):
        shutil.move(file, dest)  

# big gif is stored
rename ("C:/Users/Games/gif.gif", f"C:/Users/Games/CB/{today.year}/{today.month}-{today.year}/b{today.month}-{today.day}.gif")

# little gif is stored
rename (f"C:/Users/Games/gif2.gif", f"C:/Users/Games/CB/{today.year}/{today.month}-{today.year}/{today.month}-{today.day}.gif")

Next moves: Handling errors, exceptions, and elses.  What a save!

EDIT:  Runtime was 364.1 s of which, 220 s were sleep commands to make sure things weren't happening too fast.  Very nice.
sr. member
Activity: 114
Merit: 93
Fly free sweet Mango.
PermissionError: [WinError 32] The process cannot access the file because it is being used by another process:
~
I'm going to install linux on a virtual machine, which i do have some very limited experience with, and see how things go there.
Linux will never tell you you can't move or delete a file because it's in use. It just does what you tell it to do. If you would delete a movie while it's playing, it keeps playing until the end anyway.

That would seem to solve that problem, because if i understand how things are happening, GIMP should be done with the file once GIMP is closed, but obviously not.  But here's a crazy thing.  Tonight the upload failed while the file move succeeded.  Huh  I did move the file move command to the very end of the script, but it had previously failed all day in testing with the same changes.   Huh  I need to pay closer attention to the finer details.  Oh.  Yeah.  My attempt at an auto monthly recap, resulted in... not that.  Going to try again tomorrow.  The code for the monthly recap is below, but honestly, of course I might have messed up my own code by backing up empty folders by accident. Whoops.
Whoopsie.
Code:
import os, shutil
from os import rename

#making a backup
shutil.copytree('C:/Users/Games/CB/CBuddyDaily', 'C:/Users/Games/Backup')

# set dates and variables for file numbering
# today = date.today()
# tomorrow = today + timedelta(1)
destination = "C:/Users/Games/CB/2024/2-2024/Monthly"
hour_number = 1
day_number= 1

for i in range(1, 30):
    src = f"C:/Users/Games/CB/CBuddyDaily/02-{day_number:02d}"
    files = os.listdir(src)
    os.chdir(src)
    print(src)
    for file in files:
        rename (file, f"C:/Users/Games/CB/2024/2-2024/Monthly/download ({hour_number}).png")
        hour_number += 1
        print(hour_number)
        print(day_number)
    day_number += 1
    print(day_number)

def play_game(Rocket_League)
legendary
Activity: 3290
Merit: 16489
Thick-Skinned Gang Leader and Golden Feather 2021
PermissionError: [WinError 32] The process cannot access the file because it is being used by another process:
~
I'm going to install linux on a virtual machine, which i do have some very limited experience with, and see how things go there.
Linux will never tell you you can't move or delete a file because it's in use. It just does what you tell it to do. If you would delete a movie while it's playing, it keeps playing until the end anyway.
sr. member
Activity: 114
Merit: 93
Fly free sweet Mango.
Last time was another head scratcher.  I figured out the hard parts, downloading the images, making the gifs, composing and posting the post, but now one of the first things I was doing, moving files around, kept erroring out.

PermissionError: [WinError 32] The process cannot access the file because it is being used by another process:

I commented out all of the downloading below because that part of the code worked the first time, and all the file transfer parts that stopped working.  Just in time for the end of the month recap.  Brilliant.   Grin  That's what i'm having fun with now, I'm going try and figure it out just looking at snippets I've grabbed, and then I'm going to see what copilot says, without providing any of my code, only telling it what I want the script to do.  Oh, and it is probably my mistake somehow, but the copilot solution to pulling the link out of the API using JSON didn't work.  I think I imported everything required, json, and api from requests?  Can't fool around with that now, I'm sticking to what works.

Code:
import csv, os, pyautogui, pyperclip, re, requests, shutil, time, urllib.request, webbrowser
from datetime import timedelta, date
from os import rename

startTime = time.perf_counter()

# set dates for links and new folder
today = date.today()
tomorrow = today + timedelta(1)

# # get the final 20 gif layers in reverse order, starting with 24
# number = 24
# url4 = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=0'
# response = requests.get(url4)

# # turn response into textfile of the source code.
# source_code = response.text

# # read the source code, save it, and turn it into a string.  
# textfile = open('C:/Users/Games/CBSource.txt', 'a+')
# textfile.write(source_code)
# textfile.seek(0)
# filetext = textfile.read()
# textfile.close()

# # find matches using regex, and for every match download the image and number it.  resorted to asking copilot for help with my regex
# matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}/\w{2}\/\w{2}\/\w{5}\.png', filetext)
# for link in matches:
#     print(number, link)
#     urllib.request.urlretrieve(link, 'C:/Users/Games/CB/images/download ({}).png'.format(number))
#     number = number - 1
#     time.sleep(5)
# os.remove('C:/Users/Games/CBSource.txt')

# # get the first 4 images in reverse order, i copied my own code and changed the link.  Should have made a function and then fed it the links probably.
# url5 = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=20'
# response5 = requests.get(url5)
# source_code = response5.text
# textfile5 = open('C:/Users/Games/CBSource2.txt', 'a+')
# textfile5.write(source_code)
# textfile5.seek(0)
# filetext = textfile5.read()
# textfile5.close()

# # find matches using regex, and for first 4 matches download the image and number it
# matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}/\w{2}\/\w{2}\/\w{5}\.png', filetext)
# for link in matches:
#     if number >= 1:
#         urllib.request.urlretrieve(link, 'C:/Users/Games/CB/images/download ({}).png'.format(number))
#         print(number, link)
#         number = number - 1
#         time.sleep(5)
# os.remove('C:/Users/Games/CBSource2.txt')

# name newfolder with date
directory = f"{today.month}-{today.day}"
parent_dir = "C:/Users/Games/CB/images/"
newf = os.path.join(parent_dir, directory)
os.mkdir(newf)

# command for show desktop, and clicking an empty region on the proper monitor
time.sleep(5)
pyautogui.hotkey('win', 'd')
time.sleep(5)
pyautogui.click(1, 1)
time.sleep(5)

# hot keys to open gimp and then the plugin that load layers, export, scale, export gifs, quit, agree to not save
pyautogui.hotkey('ctrl', 'alt', 'g')
time.sleep(10)
pyautogui.click(820, 446)
time.sleep(5)
pyautogui.hotkey('ctrl', 'alt', 'l')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('enter')
time.sleep(10)
pyautogui.hotkey('ctrl', 'q')
time.sleep(5)
pyautogui.hotkey('shift', 'tab')
time.sleep(1)
pyautogui.hotkey('enter')
time.sleep(5)
print('gif done')

# uploading big gif and getting link to use later,
url = "https://api.imgur.com/3/image"
payload = {'name': f'b{today.month}-{today.day}'}
files=[('image',('C:/Users/Games/Postman/files/gif.gif',open('C:/Users/Games/Postman/files/gif.gif','rb'),'image/gif'))]
headers = {'Authorization': 'Bearer f0e27b94e6f8ead1480763e666c8587b73365850'}
response = requests.request("POST", url, headers=headers, data=payload, files=files)

# looking for the link
imgur_return = response.text
linkfile = open('C:/Users/Games/imgurlink.txt', 'a+')
linkfile.write(imgur_return)
linkfile.seek(0)
filetext = linkfile.read()
linkfile.close()
imgurlink = re.findall(r'https:\/\/i\.imgur\.com\/.*\.gif', filetext)
# ibg = imgurlink
# print (ibg)

# if i don't do it the following way, the link comes out with ['brackets and quotes']
# that's probably because what i've been 're turned' is a list
# and the following only works because it's the only link in the JSON response
for imgur in imgurlink:
    imgur_big_gif = imgur
os.remove('C:/Users/Games/imgurlink.txt')

# big gif is stored, hmm cancelling all file movements, both these methods have worked.  I think i need to close the file or something
# PermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:/Users/Games/Postman/files/gif.gif'
# src = "C:/Users/Games/Postman/files/gif.gif"
# dest = f"C:/Users/Games/CB/{today.year}/{today.month}-{today.year}/"
# shutil.move('C:/Users/Games/Postman/files/gif.gif', dest)
# rename ("C:/Users/Games/CB/2024/2-2024/gif.gif", f"C:/Users/Games/CB/{today.year}/{today.month}-{today.year}/b{today.month}-{today.day}.gif")

# OR
# look at me turning 4 lines of code into 1
# rename ("C:/Users/Games/Postman/files/gif.gif", f"C:/Users/Games/CB/{today.year}/{today.month}-{today.year}/b{today.month}-{today.day}.gif")
# rename ("C:/Users/Games/Postman/files/gif.gif", f"C:/Users/Games/CB/{today.year}/{today.month}-{today.year}/b{today.month}-{today.day}.gif")

# open imgtalk to upload gif2
url3 = "https://www.talkimg.com/"
webbrowser.open(url3)
time.sleep(10)
pyautogui.click(953, 590)
time.sleep(5)
pyautogui.click(221, 479)
time.sleep(5)
pyautogui.typewrite("gif2.gif")
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('tab')
time.sleep(10)
pyautogui.hotkey('enter')
time.sleep(5)
pyautogui.click(949, 645)
time.sleep(5)
pyautogui.click(1276, 625)
time.sleep(5)
imgtalklink = pyperclip.paste()

# little gif is stored
#  rename (f"C:/Users/Games/Postman/files/gif2.gif", f"C:/Users/Games/CB/{today.year}/{today.month}-{today.year}/{today.month}-{today.day}.gif")

# # prepare to store downloads
# src = "C:/Users/Games/CB/images"
# dest = "C:/Users/Games/CB/images/{}".format(directory)
# files = os.listdir(src)
# os.chdir(src)

# # only move numbered png files
# for file in files:
#     if file.endswith(").png"):
#         shutil.move(file, dest)  

# add post to clipboard for btctalk
pyperclip.copy(f"ChartBuddy's 24 hour Wall Observation recap\n[url={imgur_big_gif}].{imgtalklink}.[/url]\nAll Credit to [url=https://bitcointalk.org/index.php?topic=178336.msg10084622#msg10084622]ChartBuddy[/url]")

# can use this link for the reply button
url7 = 'https://bitcointalk.org/index.php?action=post;topic=178336.0'
webbrowser.open(url7)
time.sleep(10)
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('ctrl', 'v')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(5)
# we're doing it live if the next command is #ed out
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('enter')

#runtime is calculated
stopTime = time.perf_counter()
runtime = {stopTime - startTime}

# save to csv file
f = open('C:/PyProjects/runtimes.csv', 'a', newline='')
writer = csv.writer(f)
writer.writerow(runtime)
Crash and burn again, with the permissions.  I thought changing the GIMP's exports from Postman's folder to my own folder would solve the problem.  Which it did, but apparently only one time use only.  I understand Buddy needs space, so I'm going to install linux on a virtual machine, which i do have some very limited experience with, and see how things go there.  See you on the other side.
sr. member
Activity: 114
Merit: 93
Fly free sweet Mango.
Fingers of lead tonight mates.  Sad  I couldn't leave well enough alone.  Not that it worked completely last night, but here's a lesson that I know, but didn't implement.  Test after each change to one's code because if one changes 10 things, and it doesn't work, which thing is the problem?  I still can't figure out what happened with the imgur API last night, and how it uploaded an old gif.  But now I can't get it to work with python at all, I can upload using the Postman desktop app, but the python script it gives me kept erroring out tonight, even though I didn't touch that part of the code.  Last night something didn't work with imgur, but my code did, somehow, stumble the rest of the way to post the talkimg hosted gif into the bct forum.   
At least I've been saving daily versions of the script, so tonight I went back a few days and had a runtime of 342.5 s.   Embarrassed
sr. member
Activity: 114
Merit: 93
Fly free sweet Mango.
One click! One click! One click!  No, not 3 clicks, I said One click!, but it's actually, much, much, more!  How's that you ask?  Well, in one click, sorry, in One click!, i got the post, with proper gif, posted to the WO thread.  But for reasons i can't figure out yet, the imgur.com clickable link came out using 2-23 data.  Huh  So i needed clicks to fix.  Must be something to do with testing, but the code i ran is below.  I also successfully failed, sailed,  in step 4, when the 'my way' multi step solution to a simple answer, went wrong, yet showed the way.  Went wrong in the sense that there are many parts to solving a problem, it's not just the solving part.  Effort should be put into identifying the variables, and how the variables affect the outcome.  If I had mapped out all the outcomes, I might have seen from step one, just applying my pyautogui commands to the static link i've hard coded, would've been much easier.  

1.Download
2.Import  
3.Export
I need to figure out what to do on days with less than 24 downloads.  Also I'm going to work on less file movement, just put it where is going and call it from there when needed.  Before that I should understand virtual environments better.  

4.Posting  
I'm looking forward to removing all the pyautogui, and using the command line.  But if I'm logged into BCT and imgur, everything should work with one click. Cheesy  I had a plan and i executed it, but it ain't pretty.  

I figured out my way to get the most recent page of the WO Observer thread by looking at the first page source of the thread, which is a static link, to find the link to the last page of the thread at the bottom of the page.  I had some fun with that, because of my ignorance of the finer differences between strings, and lists.  But with a little help from copilot we powered on.  I now see one could also find the link to the most current page number, on the reply page to that thread. ATTN: Code Error produced 1000 not the desired 661080 or i guess it should have been 661060, since this particular post is the first on a new page.  OR ya big doofus, that's me, just hit reply on the first page and let the forum code handle on how to post it as the most recent post.  Roll Eyes  I realized this, when my solution failed and sent me to some other page rather than the last one, but since pyautogui mindlessly went on to hit reply, everything worked out okay, for that part.  Phew, at least i was in the proper thread.  Smiley

5.Archive
I just learned that pyautogui can also click and drag, so it should be possible, for fun, to drag all the images from every day this month into GIMP.  Maybe set up another race... Smiley  It would be good practice for loops and functions maybe.  I'm seeing a function that auto double-clicks the first day's folder, at (x, y) then select all, ctrl + a.  This way, there is no need to know how many images are in each day to move.  Then click and drag into gimp, go back to previous folder and loop around with (x, y + 20) or wherever the next day's folder is.  Hmm.  

I would need to know how many folders there are, or I could change the name of the folder to include a variable number that goes up by one each time a new day is added, then just cut it out of the directory name to plug into the mouse moving function, at the end of the month.  

2-25 run code runtime = 313.0 s
Code:
import csv, os, pyautogui, pyperclip, re, requests, shutil, time, urllib.request, webbrowser
from datetime import timedelta, date
from os import rename

startTime = time.perf_counter()

# set dates for links and new folder
today = date.today()
tomorrow = today + timedelta(1)

# get the final 20 gif layers in reverse order, starting with 24
number = 24
url4 = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=0'
response = requests.get(url4)

# turn response into textfile of the source code.
source_code = response.text

# read the source code, save it, and turn it into a string.  
textfile = open('C:/Users/Games/CBSource.txt', 'a+')
textfile.write(source_code)
textfile.seek(0)
filetext = textfile.read()
textfile.close()

# find matches using regex, and for every match download the image and number it.  resorted to asking copilot for help with my regex
matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}/\w{2}\/\w{2}\/\w{5}\.png', filetext)
for link in matches:
    print(number, link)
    urllib.request.urlretrieve(link, 'download ({}).png'.format(number))
    number = number - 1
    time.sleep(5)
os.remove('C:/Users/Games/CBSource.txt')

# get the first 4 images in reverse order, i copied my own code and changed the link.  Should have made a function and then fed it the links probably.
url5 = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=20'
response5 = requests.get(url5)
source_code = response5.text
textfile5 = open('C:/Users/Games/CBSource2.txt', 'a+')
textfile5.write(source_code)
textfile5.seek(0)
filetext = textfile5.read()
textfile5.close()

# find matches using regex, and for first 4 matches download the image and number it
matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}/\w{2}\/\w{2}\/\w{5}\.png', filetext)
for link in matches:
    if number >= 1:
        urllib.request.urlretrieve(link, 'download ({}).png'.format(number))
        print(number, link)
        number = number - 1
        time.sleep(5)
os.remove('C:/Users/Games/CBSource2.txt')

# move em where they usually go, repurposing code
src = "C:/Users/Games/"
dest = "C:/Users/Games/Downloads/"
files = os.listdir(src)
os.chdir(src)

# only move numbered png files
for file in files:
    if os.path.isfile(file): # probably don't need this because of the next if?
        if file.endswith(").png"):
            shutil.move(file, dest)

# name newfolder with date
directory = f"{today.month}-{today.day}"
parent_dir = "C:/Users/Games/Downloads"
newf = os.path.join(parent_dir, directory)
os.mkdir(newf)

# make sure everything is in the right place, no need to rush.  yet :)
# command for show desktop, and clicking an empty region on the proper monitor
time.sleep(5)
pyautogui.hotkey('win', 'd')
time.sleep(5)
pyautogui.click(1, 1)
time.sleep(5)

# hot keys to open gimp and then the plugin that load layers, export, scale, export gifs, quit, agree to not save
pyautogui.hotkey('ctrl', 'alt', 'g')
time.sleep(10)
pyautogui.hotkey('ctrl', 'alt', 'l')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('enter')
time.sleep(10)
pyautogui.hotkey('ctrl', 'q')
time.sleep(5)
pyautogui.hotkey('shift', 'tab')
time.sleep(1)
pyautogui.hotkey('enter')
time.sleep(5)

# uploading big gif and getting link to use later,
url = "https://api.imgur.com/3/image"
payload = {'name': f'b{today.month}-{today.day}'}
files=[('image',('gif.gif',open('gif.gif','rb'),'image/gif'))]
headers = {'Authorization': 'Bearer f0e27b94e6f8ead1480763e666c8587b73365850'}
response = requests.request("POST", url, headers=headers, data=payload, files=files)

# find imgur url from api response
imgur_return = response.text
linkfile = open('C:/Users/Games/imgurlink.txt', 'a+')
linkfile.write(imgur_return)
linkfile.seek(0)
filetext = linkfile.read()
linkfile.close()
imgurlink = re.findall(r'https:\/\/i\.imgur\.com\/.*\.gif', filetext)
# ibg = imgurlink
# print (ibg)

# if i don't do it this way, the link comes out with ['brackets and quotes']
# that's probably because what i've been 're turned' is a list
# and the following only works because it's the only link in the JSON response
for imgur in imgurlink:
    ibg = imgur
os.remove('C:/Users/Games/imgurlink.txt')

# big gif is stored
src = "C:/Users/Games/Postman/files/gif.gif"
dest = f"C:/PyProjects/GMIP/{today.year}/{today.month}-{today.year}/"
shutil.move("C:/Users/Games/Postman/files/gif.gif", dest)
rename (f"C:/PyProjects/GMIP/{today.year}/{today.month}-{today.year}/gif.gif", f"C:/PyProjects/GMIP/{today.year}/{today.month}-{today.year}/b{today.month}-{today.day}.gif")

# open imgtalk to upload gif2
url3 = "https://www.talkimg.com/"
webbrowser.open(url3)

# pyautogui to the rescue
time.sleep(10)
pyautogui.click(953, 590)
time.sleep(5)
pyautogui.click(221, 479)
time.sleep(5)
pyautogui.typewrite("gif2.gif")
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('tab')
time.sleep(10)
pyautogui.hotkey('enter')
time.sleep(5)
pyautogui.click(949, 645)
time.sleep(5)
pyautogui.click(1276, 625)
time.sleep(5)
imgtalklink = pyperclip.paste()

# little gif is stored
src = "C:/Users/Games/Postman/files/"
dest = f"C:/PyProjects/GMIP/{today.year}/{today.month}-{today.year}/"
shutil.move("C:/Users/Games/Postman/files/gif2.gif", dest)
rename (f"C:/PyProjects/GMIP/{today.year}/{today.month}-{today.year}/gif2.gif", f"C:/PyProjects/GMIP/{today.year}/{today.month}-{today.year}/{today.month}-{today.day}.gif")

# make a list of files in downloads folder
src = "C:/Users/Games/Downloads"
dest = "C:/Users/Games/Downloads/{}".format(directory)
files = os.listdir(src)
os.chdir(src)

# only move numbered png files
for file in files:
    if os.path.isfile(file):
        if file.endswith(").png"):
            shutil.move(file, dest)  

# add post to clipboard for btctalk
pyperclip.copy(f"ChartBuddy's 24 hour Wall Observation recap\n[url={ibg}].{imgtalklink}.[/url]\nAll Credit to [url=https://bitcointalk.org/index.php?topic=178336.msg10084622#msg10084622]ChartBuddy[/url]")

# what kind of newb posts their own post? ;)
# this is one way to get the most current wall observer page through the page source
url7 = 'https://bitcointalk.org/index.php?topic=178336.0'
response = requests.get(url7)

# turn response into textfile of the source code.
source_code = response.text

# read the source code, save it, and turn it into a string.  
textfile = open('C:/Users/Games/CBSource.txt', 'a+')
textfile.write(source_code)
textfile.seek(0)
filetext = textfile.read()
textfile.close()
os.remove('C:/Users/Games/CBSource.txt')

# look for all the links again
matches = re.findall(r'https:\/\/bitcointalk\.org\/index\.php\?topic=178336\.[0-9]+', filetext)

# start an empty list and fill it with all the links and split off the thread numbers
pages = []
for hit in matches:
    res = hit.rsplit('.', 1)[-1]
    # res2 = res[1:2:1], oh boy, i just found this comment before runtime, but i searched for res2 and this is the only instance so i'm going for it like this
    # “The Times 2/26/2024 07:24 utc DK on brink of second crash.”  let's do this
    pages.append(res)

# i knew link i wanted was 5 from the end, so start at -5 for, until -6 is hit, -1 each time
# what I figured out to do, but it was coming back [['what I want']]
theone = (pages[-5:-6:-1])

# getting tired so didn't even try, copilot said do this
theone = theone[0].strip('[]')

# I'm back!, time for more autopygui
url7 = 'https://bitcointalk.org/index.php?topic=178336.{}'.format(theone)
webbrowser.open(url7)
time.sleep(5)
pyautogui.click(1171, 347)
time.sleep(5)
pyautogui.hotkey('end')
time.sleep(5)
pyautogui.click(1627, 829)
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(5)
pyautogui.hotkey('ctrl', 'v')
time.sleep(5)
# hit post!
pyautogui.click(914, 1008)

#runtime is calculated
stopTime = time.perf_counter()
runtime = {stopTime - startTime}

# save to csv file
f = open('C:/PyProjects/runtimes.csv', 'a', newline='')
writer = csv.writer(f)
writer.writerow(runtime)

EDIT: added details about 'semicrash' circumstances
sr. member
Activity: 114
Merit: 93
Fly free sweet Mango.
It didn't work, it didn't work, we. didn't. work.  Big crash, BUT,  huge breakthrough, although I need more testing to make sure this even worked...but.  Huge crashing progress!  With enough pyautoguis we could take over the world!  Firstly, the 2_22 build has worked great the last 2 days with runtimes of 239.2 s, and 252.5 s.  But we need less keystrokes.   Grin

Storylog:
1.Download
Works with a click Smiley

2.Import
Son of a batch.  Gonna take the L on this one for now.  Even copilot said it should be working.  Examples follow
Code:
PATH=%PATH%;"C:\Program Files\GIMP 2\bin"
gimp-2.10 --batch-interpreter python-fu-eval --pdb-compat-mode="on" -b "pdb.python_fu_loadlay" -b pdb.file_gif_save2 (image, drawable, "C:/PyProjects/tmp/gif.gif", "C:/PyProjects/tmp/gif.gif", 0, 1, 1000, 0, 0, 0, 0)

OR

PATH=%PATH%;"C:\Program Files\GIMP 2\bin"
gimp-2.10 --batch-interpreter python-fu-eval -b "pdb.python_fu_loadlay" -b "pdb.file_gif_save2" (image, drawable, "C:/PyProjects/tmp/gif.gif", "gif.gif", 0, 1, 1000, 0, 0, 0, 0)

OR

"C:\Program Files\GIMP 2\bin\gimp-console-2.10.exe" -i --batch-interpreter python-fu-eval -b "pdb.python_fu_loadlay" -b "(gimp-quit 1)"

But with pyautogui, I could just program in the mindless tabs and clicks.  Amazing!  So that I did.
 
3.Export
See above re: pyautogui's cool factor

4.Posting  
Figured out some of imgur's API, and how to crudely hack the link out of the imgur json response, i think, to get the link after uploading.  Full disclosure, i gave up and asked copilot, and then figured out how to make 'my' way work.  Then, there is, my attempt to use pyautogui, to upload a talkimg image and collect the link with no api.  I believe it would have worked, I think,  if I had put the script to sleep for longer, before moving the created gifs.  We'll see tomorrow...
Edge's copilot way
Code:
# Assuming 'api' contains the response object
response_data = json.loads(api.text)
image_link = response_data.get('data', {}).get('link')
ibg = image_link

5.Archive
tick tock

Code with two fails follows. I believe, one is easier to spot if you check the imgur filename.  The other, which I may be wrong about, is the need for a delay before moving the created gifs.

2_24 production build:
Code:
import csv, os, pyautogui, pyperclip, re, requests, shutil, subprocess, time, urllib.request, webbrowser
from datetime import timedelta, date
from os import rename
from tkinter import Tk

# start runtimer
startTime = time.perf_counter()

# set dates for links and new folder
today = date.today()
tomorrow = today + timedelta(1)
      
# learn to scrape 24 images 1 second at a time, yatta!
# get the final 20 gif layers in reverse order, starting with 24
number = 24
url4 = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=0'
response = requests.get(url4)

# turn response into textfile of the source code.
source_code = response.text

# read the source code, save it, and turn it into a string.  
textfile = open('C:/Users/Games/CBSource.txt', 'a+')
textfile.write(source_code)
textfile.seek(0)
filetext = textfile.read()
textfile.close()

# find matches using regex, and for every match download the image and number it.  resorted to asking copilot for help with my regex
matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}/\w{2}\/\w{2}\/\w{5}\.png', filetext)
for link in matches:
    print(link)
    urllib.request.urlretrieve(link, 'download ({}).png'.format(number))
    number = number - 1
    time.sleep(3)

#delete the source code
os.remove('C:/Users/Games/CBSource.txt')

# get the first 4 images in reverse order, i copied my own code and changed the link.  Should have made a function and then fed it the links probably.
url5 = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=20'
response5 = requests.get(url5)
source_code = response5.text
textfile5 = open('C:/Users/Games/CBSource2.txt', 'a+')
textfile5.write(source_code)
textfile5.seek(0)
filetext = textfile5.read()
textfile5.close()

# find matches using regex, and for first 4 matches download the image and number it
matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}/\w{2}\/\w{2}\/\w{5}\.png', filetext)
for link in matches:
    if number >=1:
        urllib.request.urlretrieve(link, 'download ({}).png'.format(number))
        number = number - 1
        time.sleep(3)
        print(link)
    
# delete the soure code
os.remove('C:/Users/Games/CBSource2.txt')

# move em where they usually go, repurposing code
src = "C:/Users/Games/"
dest = "C:/Users/Games/Downloads/"
files = os.listdir(src)
os.chdir(src)

# only move numbered png files
for file in files:
    if os.path.isfile(file): # probably don't need this because of the next if?
        if file.endswith(").png"):
            shutil.move(file, dest)  

# name newfolder with date
directory = f"{today.month}-{today.day}"
parent_dir = "C:/Users/Games/Downloads"
newf = os.path.join(parent_dir, directory)
os.mkdir(newf)
print("Directory '%s' created" %directory)

# hot keys for confirming plugin to open, load, export, scale, export gif, gif2, quit, agree to not save
# bye, bye, batch (for now :)
pyautogui.hotkey('ctrl', 'alt', 'g')
time.sleep(10)
pyautogui.hotkey('ctrl', 'alt', 'l')
time.sleep(5)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('enter')
time.sleep(10)
pyautogui.hotkey('ctrl', 'q')
time.sleep(5)
pyautogui.hotkey('shift', 'tab')
time.sleep(1)
pyautogui.hotkey('enter')

# uploading big gif and getting link to use later,
url = "https://api.imgur.com/3/image"
payload = {'name': 'b{today.month}-{today.day}'}
files=[('image',('gif.gif',open('gif.gif','rb'),'image/gif'))]
headers = {'Authorization': 'Bearer f0e27b94e6f8ead1480763e666c8587b73365850'}
response = requests.request("POST", url, headers=headers, data=payload, files=files)

# repurposing some code, that means define a function, right?
# find imgur url from api response
imgur_return = response.text
linkfile = open('C:/Users/Games/imgurlink.txt', 'a+')
linkfile.write(imgur_return)
linkfile.seek(0)
filetext = linkfile.read()
linkfile.close()

# delete the link.txt
os.remove('C:/Users/Games/imgurlink.txt')

imgurlink = re.findall(r'https:\/\/i\.imgur\.com\/.*\.gif', filetext)
# ibg = imgurlink
# print (ibg)

# if i don't do it this way the link comes out with ['brackets and quotes']
for imgur in imgurlink:
    ibg = imgur

# big gif is moved
src = "C:/Users/Games/Postman/files/gif.gif"
dest = "C:/PyProjects/GMIP/2024/2-2024/"
shutil.move("C:/Users/Games/Postman/files/gif.gif", dest)
rename ("C:/PyProjects/GMIP/2024/2-2024/gif.gif", f"C:/PyProjects/GMIP/2024/2-2024/b{today.month}-{today.day}.gif")

# little gif is moved
src = "C:/Users/Games/Postman/files/"
dest = "C:/PyProjects/GMIP/2024/2-2024/"
shutil.move("C:/Users/Games/Postman/files/gif2.gif", dest)
rename ("C:/PyProjects/GMIP/2024/2-2024/gif2.gif", f"C:/PyProjects/GMIP/2024/2-2024/{today.month}-{today.day}.gif")

# ID files
src = "C:/Users/Games/Downloads"
dest = "C:/Users/Games/Downloads/{}".format(directory)
files = os.listdir(src)
os.chdir(src)

# only move numbered png files
for file in files:
    if os.path.isfile(file):
        if file.endswith(").png"):
            shutil.move(file, dest)  

# open websites to upload gifs
url3 = "https://www.talkimg.com/"
webbrowser.open(url3)

# pyautogui to the rescue
time.sleep(5)

# click start uploading
pyautogui.click(953, 590)
time.sleep(5)

# click file enter box
pyautogui.click(221, 479)
time.sleep(5)

# type name of small gif
pyautogui.typewrite("gif2.gif")
time.sleep(5)

# move selection to save
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('tab')
time.sleep(1)
pyautogui.hotkey('enter')
time.sleep(5)
pyautogui.click(949, 645)
time.sleep(5)

# click mouse to copy the talkimg link
pyautogui.click(1276, 625)

imgtalklink = pyperclip.paste()

# add post to clipboard for btctalk
r = Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append(f"ChartBuddy's 24 hour Wall Observation recap\n[url={ibg}].{imgtalklink}.[/url]\nAll Credit to [url=https://bitcointalk.org/index.php?topic=178336.msg10084622#msg10084622]ChartBuddy[/url]")
r.update()

#this holds the post on the clipboard until posted
print("All done?")
input()

#runtime is calculated
stopTime = time.perf_counter()
runtime = {stopTime - startTime}

# save to csv file
f = open('C:/PyProjects/runtimes.csv', 'a', newline='')
writer = csv.writer(f)
writer.writerow(runtime)

EDIT: added comments to code about talkimg, pyautogui.  phrasing
sr. member
Activity: 114
Merit: 93
Fly free sweet Mango.
It worked, it worked, we worked! Man and machine in harmony, like the ending of...well I don't want to spoil that one.   Wink The runtime was a surprisingly high at 288.3 s, which I guess I have no reason to doubt, but I should manually test the accuracy of my method.  I did set a delay of 5 seconds between downloads, when manually it was probably in the 2-3 second range between downloads.   But the real savings is in key strokes.  Let's see...graciously estimating
1. Scroll through the day and download each ChartBuddy post.  Scrolling through usually around 4 pages to, right click, left click, enter 24 times, which would be (3*24)+3
2. Drag each downloaded image into GIMP as a new layer Click and drag 24 images, i hope i only did that a few times , 2*24 more actions
3. Export full size gif to imgur for the clickable link, and export optimized gif for the in-thread talkimg hosted one Oh boy, lets remember, export as, 2 clicks, type the name.gif, 5, timing the frames, 4 actions, click scale and choose size, 5 actions, export again, (2+5+4+5)*2
4. Put together the post and post  Let's not count the typing, clicking on 2 bookmarks, 2 click and drags, 6
5. Archive images for later use in a monthly replay  creating a new folder and naming it by date, 7 clicks and keystrokes and a click and drag, 9 = 168 total
168 actions.

And this time it was.  
1.Download: Click run, sit back and relax, 1
2.Import: loading, ctrl-alt-l, 3 tabs, enter, 7
3.Export: ctrl-alt-b, 2 tabs, enter, ctrl-alt-s, 2 tabs, enter, enter to script, 13
4.Post: same, 6
5.Archive: 0
27 actions, most of them mindless clicking through GIMP which I believe can do everything I need it to, all from the command line.  Exciting stuff!

Changelog:
1.Download Wow.  I am now auto downloading the images from BCT, and naming them in the process.

3.Export  I changed the GIMP gif saving plugin, by adding the resize of the gif in the same plugin.  So instead of save, resize, save (which all have keyboard shortcuts), it's now just save, save2.  I've duplicated the GIMP gif saving plugin so I don't have to wait to move the big gif, called, gif.gif before the small gif is exported, which was also called gif.gif.  The small gif is now gif2.gif. Which also means I get to get rid of 2 user inputs, which were only there to stop the code, waiting for me to tell it to proceed. Well, it still has to wait once I guess, but it will be so much faster, one less keystroke for sure. Smiley  

Working on.
2.Import Work in Progress:  I've got a code example of using the command line to start a Python script, that calls GIMP plug ins.  I'm trying to modify it for this purpose.
4.Post I guess I should have been working on that runtime posting bot after all.  Smiley
5.Archive  Blessed with an extra day this month to figure the code for the monthly recap.  

Current Code:
Code:
from datetime import timedelta, date
import time
import shutil
import os
from os import rename
import webbrowser
import subprocess
from tkinter import Tk
import csv
import re
import urllib.request
import requests

# start runtimer
startTime = time.perf_counter()

# set dates for links and new folder
today = date.today()
tomorrow = today + timedelta(1)
      
# open websites to upload gifs
url2 = "https://imgur.com/upload"
url3 = "https://www.talkimg.com/"
webbrowser.open(url2)
webbrowser.open(url3)

# get the final 20 gif layers in reverse order, starting with 24
number = 24
url4 = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=0'
response = requests.get(url4)

# turn response into textfile of the source code.
source_code = response.text

# read the source code, save it, and turn it into a string.  
textfile = open('C:/Users/Games/CBSource.txt', 'a+')
textfile.write(source_code)
textfile.seek(0)
filetext = textfile.read()
textfile.close()

# find matches using regex, and for every match download the image and number it.  resorted to asking copilot for help with my regex
matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}/\w{2}\/\w{2}\/\w{5}\.png', filetext)
for link in matches:
    print(link)
    urllib.request.urlretrieve(link, 'download ({}).png'.format(number))
    number = number - 1
    time.sleep(5)

#delete the source code
os.remove('C:/Users/Games/CBSource.txt')

# get the first 4 images in reverse order, i copied my own code and changed the link.  Should have made a function and then fed it the links probably.
url5 = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=20'
response5 = requests.get(url5)
source_code = response5.text
textfile5 = open('C:/Users/Games/CBSource2.txt', 'a+')
textfile5.write(source_code)
textfile5.seek(0)
filetext = textfile5.read()
textfile5.close()

# find matches using regex, and for first 4 matches download the image and number it
matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}/\w{2}\/\w{2}\/\w{5}\.png', filetext)
for link in matches:
    if number >=1:
        urllib.request.urlretrieve(link, 'download ({}).png'.format(number))
        number = number - 1
        time.sleep(5)
        print(link)
    
# delete the soure code
os.remove('C:/Users/Games/CBSource2.txt')

# move em where they usually go, repurposing code
src = "C:/Users/Games/"
dest = "C:/Users/Games/Downloads/"
files = os.listdir(src)
os.chdir(src)

# only move numbered png files
for file in files:
    if os.path.isfile(file): # probably don't need this because of the next if?
        if file.endswith(").png"):
            shutil.move(file, dest)  

# name newfolder with date
directory = f"{today.month}-{today.day}"
parent_dir = "C:/Users/Games/Downloads"
newf = os.path.join(parent_dir, directory)
os.mkdir(newf)
print("Directory '%s' created" %directory)

# learn to scrape 24 images 1 second at a time, yatta!
# else manually download each file

# automatically open gimp, then filter to load all images
subprocess.Popen([r'C:/Program Files/GIMP 2/bin/gimp-2.10.exe'])

# export gifs press the any key then enter
print("Are the gifs exported?")
input()
print("Movin' on...")

# big gif is moved
src = "C:/PyProjects/tmp/"
dest = "C:/PyProjects/GMIP/2024/2-2024/"
shutil.move("C:/PyProjects/tmp/gif.gif", dest)
rename ("C:/PyProjects/GMIP/2024/2-2024/gif.gif", f"C:/PyProjects/GMIP/2024/2-2024/b{today.month}-{today.day}.gif")

# little gif is moved
src = "C:/PyProjects/tmp/"
dest = "C:/PyProjects/GMIP/2024/2-2024/"
shutil.move("C:/PyProjects/tmp/gif2.gif", dest)
rename ("C:/PyProjects/GMIP/2024/2-2024/gif2.gif", f"C:/PyProjects/GMIP/2024/2-2024/{today.month}-{today.day}.gif")

# ID files
src = "C:/Users/Games/Downloads"
dest = "C:/Users/Games/Downloads/{}".format(directory)
files = os.listdir(src)
os.chdir(src)

# only move numbered png files
for file in files:
    if os.path.isfile(file):
        if file.endswith(").png"):
            shutil.move(file, dest)  

# upload to two sites, gather links to input into console
ibg = input("imgur big gif link here")
imgtalk = input("imgtalk little gif link here")

# add post to clipboard for btctalk
r = Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append(f"ChartBuddy's 24 hour Wall Observation recap\n[url={ibg}].{imgtalk}.[/url]\nAll Credit to [url=https://bitcointalk.org/index.php?topic=178336.msg10084622#msg10084622]ChartBuddy[/url]")
r.update()

#this holds the post on the clipboard until posted
print("All done?")
input()

#runtime is calculated
stopTime = time.perf_counter()
runtime = {stopTime - startTime}

# save to csv file
f = open('C:/PyProjects/runtimes.csv', 'a', newline='')
writer = csv.writer(f)
writer.writerow(runtime)
sr. member
Activity: 114
Merit: 93
Fly free sweet Mango.
It was the best of times, it was the worst of times.
Three days with no runtimes.  The first two were because I knew the code would fail.  I have no error handling for what happens if ChartBuddy decides to take an hour off, to maybe go out for a pizza.  So I didn't even try.  

But now!  Such progress!  I found the pages on bitcointalk.org that have ChartBuddy's last 20 posts , and I have the auto download code, and it worked!  It's not the best way or the fastest way probably, and i needed copilot's help, but it is definitely my way. Smiley Because it then crashed.  Huh  But, here is the relevant new code, and with no BeautifulSoup, i only needed the page source to grab the links.  I knew the links would be in the form of https://www.talkimg.com/images/(4 digit year)/(2 digit month)/(2 digit date)/*****.png, where only the last 5 bits of the file would be different.  I tried and tried, but had to resort to copilot to get my regex correct.
Code:
# get the last 20 images in reverse order, starting with 24, ChartBuddy is user 110685
url = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=0'

# send a get request and get the response object
response = requests.get(url)

# turn response into textfile of the source code, not sure if needed, what else did I request?
source_code = response.text

# read the source code, save it, and turn it into a string.  Why am i saving it if I'm going to delete it, can probably skip this instruction, but it might help with error handling?  
textfile = open('C:/Users/Games/CBSource.txt', 'a+')
textfile.write(source_code)
textfile.seek(0)
filetext = textfile.read()
textfile.close()

number = 24

# find matches using regex, and for every match download the image, and number it.  i resorted to asking copilot for help with my regex
matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}/\w{2}\/\w{2}\/\w{5}\.png', filetext)
for link in matches:
    print(link)
    urllib.request.urlretrieve(link, 'download({}).png'.format(number))
    number = number - 1
    time.sleep(5)

#delete the source code
os.remove('C:/Users/Games/CBSource.txt')

One thing i'm still ignorant of, which is okay, is why i can sometimes pass variables directly, sometime one has to use curly brackets, and other times empty curly brackets with defining at the end, but learning is living.

Here is the full script i ran today, which I know is going to work tomorrow, after making a tiny change.  Or maybe it won't. Cheesy  If it does I am going to try and work on error handling. .  
If you can see why this script crashed you are an awesome debugger!  (hint: it happens early  Grin)
Code:
from datetime import timedelta, date
import time
import shutil
import os
from os import rename
import webbrowser
import subprocess
from tkinter import Tk
import csv
import re
import urllib.request
import requests

# start runtimer
startTime = time.perf_counter()

#set dates for links and new folder
today = date.today()
tomorrow = today + timedelta(1)
      
# open links to download and upload images if only I could get out of the way
# url1 = f"https://injastic.space/search?after_date={today.strftime("%Y")}-{today.strftime("%m")}-{today.strftime("%d")}T07%3A55%3A00&author=ChartBuddy&before_date={tomorrow.strftime("%Y")}-{tomorrow.strftime("%m")}-{tomorrow.strftime("%d")}T07%3A55%3A00"
url2 = "https://imgur.com/upload"
url3 = "https://www.talkimg.com/"
webbrowser.open(url1)
webbrowser.open(url2)
webbrowser.open(url3)

# holy cow is this going to work all together on the first try
# get the last 20 images in reverse order, starting with 24
# set the file numbering start
number = 24

# get the last 20 images in reverse order, starting with 24
url = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=0'

# Send a get request and get the response object
response = requests.get(url)

# turn response into textfile of the source code, not sure if needed, what else did I request?
source_code = response.text

# read the source code, save it, and turn it into a string.  Why am i saving it if I'm going to delete it?  
textfile = open('C:/Users/Games/CBSource.txt', 'a+')
textfile.write(source_code)
textfile.seek(0)
filetext = textfile.read()
textfile.close()

# find matches using regex, and for every match download the image and number it.  resorted to asking copilot for help with my regex
matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}/\w{2}\/\w{2}\/\w{5}\.png', filetext)
for link in matches:
    print(link)
    urllib.request.urlretrieve(link, 'download({}).png'.format(number))
    number = number - 1
    time.sleep(5)

#delete the source code
os.remove('C:/Users/Games/CBSource.txt')

# get the first 4 images in reverse order, i copied my own code and changed the link.  Should have made a function and then fed it the links probably.
# i renamed everything here with 2, i'm not sure i needed to, but I think I did it correctly
url2 = 'https://bitcointalk.org/index.php?action=profile;u=110685;sa=showPosts;start=20'

# Send a GET request and get the response object
response2 = requests.get(url2)

# turn response into textfile of source
source_code = response2.text

# read the source code and turn it into a string
textfile2 = open('C:/Users/Games/CBSource2.txt', 'a+')
textfile2.write(source_code)
textfile2.seek(0)
filetext = textfile2.read()
textfile2.close()

# find matches using regex, and for first 4 matches download the image and number it
# tried using finditer i think it was to set a limit for the first four results, but i was getting another string, so this seemed like a workaround
matches = re.findall(r'https:\/\/www\.talkimg\.com\/images\/\w{4}/\w{2}\/\w{2}\/\w{5}\.png', filetext)
for link in matches:
    if number >=1:
        urllib.request.urlretrieve(link, 'download({}).png'.format(number))
        number = number - 1
        time.sleep(5)
        print(link)
    
# delete the soure code
os.remove('C:/Users/Games/CBSource2.txt')

# move em where they usually repurposing code
# ID files
src = "C:/Users/Games/"
dest = "C:/Users/Games/Downloads/"
files = os.listdir(src)
os.chdir(src)

# i have named the new downloads to look like the old manual downloads
# only move numbered png files
for file in files:
    if os.path.isfile(file): # probably don't need this because of the next if?
        if file.endswith(").png"):
            shutil.move(file, dest)  

#back to the old code
# name newfolder with date
directory = f"{today.month}-{today.day}"
parent_dir = "C:/Users/Games/Downloads"
newf = os.path.join(parent_dir, directory)
os.mkdir(newf)
print("Directory '%s' created" %directory)

# learn to scrape 24 images 1 second at a time,
# else manually download each file

# automatically open gimp, then filter to load all images
subprocess.Popen([r'C:/Program Files/GIMP 2/bin/gimp-2.10.exe'])

# export big gif press the any key then enter
print("Is big gif exported?")
input()
print("Movin' on...")

# big gif is moved
src = "C:/PyProjects/tmp/"
dest = "C:/PyProjects/GMIP/2024/2-2024/"
shutil.move("C:/PyProjects/tmp/gif.gif", dest)
rename ("C:/PyProjects/GMIP/2024/2-2024/gif.gif", f"C:/PyProjects/GMIP/2024/2-2024/b{today.month}-{today.day}.gif")

# scale image and export little gif
print("Is little gif exported?")
input()
print("Movin' on...")

# little gif is moved
src = "C:/PyProjects/tmp/"
dest = "C:/PyProjects/GMIP/2024/2-2024/"
shutil.move("C:/PyProjects/tmp/gif.gif", dest)
rename ("C:/PyProjects/GMIP/2024/2-2024/gif.gif", f"C:/PyProjects/GMIP/2024/2-2024/{today.month}-{today.day}.gif")

# ID files
src = "C:/Users/Games/Downloads"
dest = "C:/Users/Games/Downloads/{}".format(directory)
files = os.listdir(src)
os.chdir(src)

# i have a dummy file present so new downloads look like download(*).png
# only move numbered png files
for file in files:
    if os.path.isfile(file):
        if file.endswith(").png"):
            shutil.move(file, dest)  

# upload to two sites, gather links to input into console
ibg = input("imgur big gif link here")
imgtalk = input("imgtalk little gif link here")

# add post to clipboard for btctalk
r = Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append(f"ChartBuddy's 24 hour Wall Observation recap\n[url={ibg}].{imgtalk}.[/url]\nAll Credit to [url=https://bitcointalk.org/index.php?topic=178336.msg10084622#msg10084622]ChartBuddy[/url]")
r.update()

#this holds the post on the clipboard until posted
print("All done?")
input()

#runtime is calculated
stopTime = time.perf_counter()
runtime = {stopTime - startTime}

# save to csv file
f = open('C:/PyProjects/runtimes.csv', 'a', newline='')
writer = csv.writer(f)
writer.writerow(runtime)

EDIT: added details about regex, added a line of code defining number to first snippet, changelog soon Smiley  
Pages:
Jump to: