Pages:
Author

Topic: [Contest] - Win 2 BTC for the best retargeting algorithm. Python testbed inside! - page 3. (Read 3020 times)

newbie
Activity: 34
Merit: 0
Indeed that's a good idea, here is my version, I count the number of full blocks, and divide the adjustment by 2^FullBlockCount. Also, I don't do it if there is only one block full before as it induces too much oscillations.

Code:
import datetime
import random
import numpy as np
import matplotlib.pyplot as plt

# sudo apt-get install python-tk
# pip2 install numpy matplotlib

def create_block(timestamp, num_pow):
return {'time_stamp' : timestamp, 'num_pow' : num_pow, 'first_work_factor':0}

def create_work(idx, factor, target):
return {'id': idx, 'base_executions_per_second' : factor, 'target' : target}

def addSecs(tm, secs):
    fulldate = tm + datetime.timedelta(seconds=secs)
    return fulldate

def randomDuration():
if do_not_randomize_block_times_but_do_always_60_sec:
return 60
else:
return int(random.uniform(25, 120))

current_time = datetime.datetime.now()

# experiment with the number of work packages
works_to_create = 3

generate_blocks = 100
current_height = 0
blockchain = []
work_packages = []
base_target = 0x000000ffffffffffffffffffffffffff
poisson_distribution = np.random.poisson(5, generate_blocks)
stretch_number_pows = True
do_not_randomize_block_times_but_do_always_60_sec = False
new_miner_every_xth_second = 10
how_many_miners_come_or_go = 70242
initial_miners = 199381

def currently_active_miners(current_height):
# get the current active number of miners in relation of blockchain height,
# but the number of miners increases by 1 every 10 blocks
increases = int(current_height/new_miner_every_xth_second) * how_many_miners_come_or_go
return initial_miners+increases

# for now, leave poisson distributed variable miner count out and assume only one miner
ret = poisson_distribution[current_height]
if ret > 0:
return ret
else:
return 1

def miner_pows_based_on_target(work, height, dur):
current_target = work["target"]
factor = (current_target / base_target) * 1.0*dur/60.0
actual_pow_mined = work["base_executions_per_second"]
# random jitter
actual_pow_mined = abs((actual_pow_mined - 1) + random.uniform(1,2)) * currently_active_miners(height)
actual_pow_mined = actual_pow_mined *factor
# rate limit to 20 pows per block
        if actual_pow_mined > 20:
            actual_pow_mined = 20
if actual_pow_mined < 0:
actual_pow_mined = 0
if actual_pow_mined == 0:
print "mined",actual_pow_mined,work["base_executions_per_second"]*factor,currently_active_miners(height)
return actual_pow_mined
def kimoto(x):
    return  1 + (0.7084 * pow(((x)/(144)), -1.228));
def retarget_work(block, x):
    targetI = x["target"]
    pastMass = 0
    counter = 0
    current_block = block
    current_block_timestamp = blockchain[current_block]["time_stamp"]
    adjustment = 0
    isFull = True
    fullCnt = 0
    isEmpty = True
    emptyCnt = 0
    while isFull or isEmpty:
        if isFull and blockchain[current_block]["num_pow"][x["id"]] == 20:
            fullCnt += 1
        else:
            isFull = False
        if isEmpty and blockchain[current_block]["num_pow"][x["id"]] == 0:
            emptyCnt += 1
        else:
            isEmpty = False
        current_block -= 1
        if current_block < 1:
            break
    current_block = block
    while True:
        counter += 1
        pastMass += blockchain[current_block]["num_pow"][x["id"]]
        seconds_passed = (current_block_timestamp - blockchain[current_block-1]["time_stamp"]).seconds
        current_block -= 1
        if seconds_passed < 1:
            seconds_passed = 1
        trs_per_second = float(pastMass) / float(seconds_passed)
        target_per_second = 10.0 / 60.0
        adjustment = target_per_second / trs_per_second
        kim = kimoto(pastMass * 30)
        #print("kim : " + str(kim) + " adjustment : " + str(adjustment))
        if adjustment > kim or adjustment < (1.0/kim):
            break
        if current_block < 1:
            break
    if fullCnt > 1:
        adjustment = adjustment / (1 << (fullCnt))
    if emptuCnt > 1:
        adjustment = adjustment * (1 << (emptyCnt))
    targetI = targetI * adjustment
    if targetI>base_target:
            targetI = base_target
    if x["id"] == 0:
            blockchain[block]["first_work_factor"] = adjustment
    x["target"] = targetI


def retarget_works(block):
for x in work_packages:
retarget_work(block,x)

# Here we create up to three different work objects
if works_to_create>=1:
work_packages.append(create_work(0, 20, base_target))
if works_to_create>=2:
work_packages.append(create_work(1, 60, base_target))
if works_to_create>=3:
work_packages.append(create_work(2, 35, base_target))

while current_height < generate_blocks:
dur = randomDuration()
current_time = addSecs(current_time,dur) # random block generation time
block_pows = {}
for x in work_packages:
num_pow = miner_pows_based_on_target(x, current_height, dur) # mine some POW depending on the current difficulty
block_pows[x["id"]] = num_pow
blockchain.append(create_block(current_time, block_pows))
retarget_works(current_height) # This retargeting method is the "critical part here"
current_height = current_height + 1


values = []
target_factors = []
ideal = []
for idx in range(len(blockchain)):
if idx == 0:
continue
x = blockchain[idx]
x_minus_one = blockchain[idx-1]
time_passed = (x["time_stamp"] - x_minus_one["time_stamp"]).seconds
strech_normalizer = time_passed / 60.0
if stretch_number_pows == False:
ideal.append(works_to_create*10*strech_normalizer)
else:
ideal.append(works_to_create*10)
sum_x = 0
for y in x["num_pow"]:
sum_x += x["num_pow"][y]
if stretch_number_pows == False:
values.append(sum_x)
else:
values.append(sum_x/strech_normalizer)
x = range(generate_blocks)[1:]
y = values

#fig = plt.figure()
ax0 = plt.subplot(211)
if stretch_number_pows:
ax0.set_ylabel('POW rate per 60s', color='b')
else:
ax0.set_ylabel('POWs per Block', color='b')
ax0.set_xlabel('Block height')
ax0.plot(x,y,'-o',x,ideal,'r--')
values = []
ideal = []
target_factors = []
for idx in range(len(blockchain)):
if idx == 0:
continue
x = blockchain[idx]
x_minus_one = blockchain[idx-1]
time_passed = (x["time_stamp"] - x_minus_one["time_stamp"]).seconds
strech_normalizer = time_passed / 60.0
if stretch_number_pows == False:
ideal.append(10*strech_normalizer)
else:
ideal.append(10)
sum_x = 0
sum_x += x["num_pow"][0]
#print "sumx",sum_x
if stretch_number_pows == False:
values.append(sum_x)
else:
values.append(sum_x/strech_normalizer)
x = range(generate_blocks)[1:]
y = values
plt.title('All Works: Total POWs')

ax1 = plt.subplot(212)
ax1.plot(x,y,'-o',x,ideal,'r--')
ax1.set_xlabel('Block Height')
# Make the y-axis label and tick labels match the line color.
if stretch_number_pows:
ax1.set_ylabel('POW rate per 60s', color='b')
else:
ax1.set_ylabel('POWs per Block', color='b')

for tl in ax1.get_yticklabels():
    tl.set_color('b')



ax2 = ax1.twinx()
ax2.set_ylim(0.4, 1.6)
ax2.bar(x,[x["first_work_factor"] for x in blockchain][1:],0.45,color='#deb0b0', alpha=0.2)
ax2.set_ylabel('Retargeting Factor', color='r')
for tl in ax2.get_yticklabels():
    tl.set_color('r')
plt.title('First Work: POWs + Retargeting Factor')

plt.show()

With random block time:

https://i.imgur.com/dqPTtAG.png

With fixed block time:

https://i.imgur.com/f1Rml4j.png
legendary
Activity: 1260
Merit: 1168
Okay I see, but like I said, this is due to the fact that we can't be aware there is that much miners on the network. All we see is 20 transactions per block. Because of this 20 transactions limit, we can't know how much transaction would have been really mined, so we can't know the hashing power of the network.

Well, this is the tricky question. See this submission here:

I have just added a "optimistic" down throttling clause into the while loop:
Code:
# adjust hard-core if limit was reached in last 3 blocks
        if counter == 1 and pastMass == 20:
         adjustment_speed_up = 0.05

So we do some "hard core" throttling which will likely result in way too few transactions per block. KGW can then "fix things" in the next block.
This approach is shitty and not perfect, but shows how greedy approaches could work.

Maybe some "intelligent exponential backoff just as implemented in TCP Vegas"?

Before, your code:



After, changed code:



Code:
import datetime
import random
import numpy as np
import matplotlib.pyplot as plt

# sudo apt-get install python-tk
# pip2 install numpy matplotlib

def create_block(timestamp, num_pow):
return {'time_stamp' : timestamp, 'num_pow' : num_pow, 'first_work_factor':0}

def create_work(idx, factor, target):
return {'id': idx, 'base_executions_per_second' : factor, 'target' : target}

def addSecs(tm, secs):
    fulldate = tm + datetime.timedelta(seconds=secs)
    return fulldate

def randomDuration():
if do_not_randomize_block_times_but_do_always_60_sec:
return 60
else:
return int(random.uniform(25, 120))

current_time = datetime.datetime.now()

# experiment with the number of work packages
works_to_create = 3

generate_blocks = 100
current_height = 0
blockchain = []
work_packages = []
base_target = 0x000000ffffffffffffffffffffffffff
poisson_distribution = np.random.poisson(5, generate_blocks)
stretch_number_pows = True
do_not_randomize_block_times_but_do_always_60_sec = True
new_miner_every_xth_second = 10
how_many_miners_come_or_go = 500
significant_miner_drop_to_100_on_block = 60
initial_miners = 700000000

def currently_active_miners(current_height):
if current_height>=significant_miner_drop_to_100_on_block:
return 100
# get the current active number of miners in relation of blockchain height,
# but the number of miners increases by 1 every 10 blocks
increases = int(current_height/new_miner_every_xth_second) * how_many_miners_come_or_go
return initial_miners+increases

# for now, leave poisson distributed variable miner count out and assume only one miner
ret = poisson_distribution[current_height]
if ret > 0:
return ret
else:
return 1

def miner_pows_based_on_target(work, height, dur):
current_target = work["target"]
factor = (current_target / base_target) * 1.0*dur/60.0
actual_pow_mined = work["base_executions_per_second"]
# random jitter
actual_pow_mined = abs((actual_pow_mined - 1) + random.uniform(1,2)) * currently_active_miners(height)
actual_pow_mined = actual_pow_mined *factor
# rate limit to 20 pows per block
if actual_pow_mined>20:
actual_pow_mined = 20
if actual_pow_mined < 0:
actual_pow_mined = 0
if actual_pow_mined == 0:
print "mined",actual_pow_mined,work["base_executions_per_second"]*factor,currently_active_miners(height)
return actual_pow_mined
def kimoto(x):
    return  1 + (0.7084 * pow(((x)/(144)), -1.228));
def retarget_work(block, x):
    targetI = x["target"]
    pastMass = 0
    counter = 0
    current_block = block
    current_block_timestamp = blockchain[current_block]["time_stamp"]
    adjustment = 0
    adjustment_speed_up = 1
    while True:
        counter += 1
        pastMass += blockchain[current_block]["num_pow"][x["id"]]

        # adjust hard-core if limit was reached in last 3 blocks
        if counter == 1 and pastMass == 20:
         adjustment_speed_up = 0.05

        seconds_passed = (current_block_timestamp - blockchain[current_block-1]["time_stamp"]).seconds
        current_block -= 1
        if seconds_passed < 1:
            seconds_passed = 1
        trs_per_second = float(pastMass) / float(seconds_passed)
        target_per_second = 10.0 / 60.0
        adjustment = target_per_second / trs_per_second
        kim = kimoto(pastMass * 30)
        #print("kim : " + str(kim) + " adjustment : " + str(adjustment))
        if adjustment > kim or adjustment < (1.0/kim):
            print("SPEEDUP: " + str(adjustment_speed_up) + ", count: " + str(counter) + " kim : " + str(kim) + " 1/kim : " + str(1.0/kim) + " adj : " + str(adjustment))
            break
        if current_block < 1:
            break
    targetI = targetI * adjustment * adjustment_speed_up
    if targetI>base_target:
            targetI = base_target
    if x["id"] == 0:
            blockchain[block]["first_work_factor"] = adjustment
    x["target"] = targetI


def retarget_works(block):
for x in work_packages:
retarget_work(block,x)

# Here we create up to three different work objects
if works_to_create>=1:
work_packages.append(create_work(0, 20, base_target))
if works_to_create>=2:
work_packages.append(create_work(1, 60, base_target))
if works_to_create>=3:
work_packages.append(create_work(2, 35, base_target))

while current_height < generate_blocks:
dur = randomDuration()
current_time = addSecs(current_time,dur) # random block generation time
block_pows = {}
for x in work_packages:
num_pow = miner_pows_based_on_target(x, current_height, dur) # mine some POW depending on the current difficulty
block_pows[x["id"]] = num_pow
blockchain.append(create_block(current_time, block_pows))
retarget_works(current_height) # This retargeting method is the "critical part here"
current_height = current_height + 1


values = []
target_factors = []
ideal = []
for idx in range(len(blockchain)):
if idx == 0:
continue
x = blockchain[idx]
x_minus_one = blockchain[idx-1]
time_passed = (x["time_stamp"] - x_minus_one["time_stamp"]).seconds
strech_normalizer = time_passed / 60.0
if stretch_number_pows == False:
ideal.append(works_to_create*10*strech_normalizer)
else:
ideal.append(works_to_create*10)
sum_x = 0
for y in x["num_pow"]:
sum_x += x["num_pow"][y]
if stretch_number_pows == False:
values.append(sum_x)
else:
values.append(sum_x/strech_normalizer)
x = range(generate_blocks)[1:]
y = values

#fig = plt.figure()
ax0 = plt.subplot(211)
if stretch_number_pows:
ax0.set_ylabel('POW rate per 60s', color='b')
else:
ax0.set_ylabel('POWs per Block', color='b')
ax0.set_xlabel('Block height')
ax0.plot(x,y,'-o',x,ideal,'r--')
values = []
ideal = []
target_factors = []
for idx in range(len(blockchain)):
if idx == 0:
continue
x = blockchain[idx]
x_minus_one = blockchain[idx-1]
time_passed = (x["time_stamp"] - x_minus_one["time_stamp"]).seconds
strech_normalizer = time_passed / 60.0
if stretch_number_pows == False:
ideal.append(10*strech_normalizer)
else:
ideal.append(10)
sum_x = 0
sum_x += x["num_pow"][0]
#print "sumx",sum_x
if stretch_number_pows == False:
values.append(sum_x)
else:
values.append(sum_x/strech_normalizer)
x = range(generate_blocks)[1:]
y = values
plt.title('All Works: Total POWs')

ax1 = plt.subplot(212)
ax1.plot(x,y,'-o',x,ideal,'r--')
ax1.set_xlabel('Block Height')
# Make the y-axis label and tick labels match the line color.
if stretch_number_pows:
ax1.set_ylabel('POW rate per 60s', color='b')
else:
ax1.set_ylabel('POWs per Block', color='b')

for tl in ax1.get_yticklabels():
    tl.set_color('b')



ax2 = ax1.twinx()
ax2.set_ylim(0.4, 1.6)
ax2.bar(x,[x["first_work_factor"] for x in blockchain][1:],0.45,color='#deb0b0', alpha=0.2)
ax2.set_ylabel('Retargeting Factor', color='r')
for tl in ax2.get_yticklabels():
    tl.set_color('r')
plt.title('First Work: POWs + Retargeting Factor')

plt.show()
hero member
Activity: 994
Merit: 513
Ok, I probably look extremely stupid for even asking it (certainly feel like it), but here it is anyway:

Can't we use Elastic itself for this?

It seems we have all it needs to at least give it a try:

- we have an array of possible variables.
- we have at least some kind of algorithm.

Couldn't we "just" bruteforce this to find factors per set of special cases, that can be applied? As in, play through as many combinations as possible and find the optimal factor per combination, then either find a median or a range in which the same factor can be used?

I realize, that an algorithm is more elegant, but it seems like a library with conditional factors may work as well. I don't mean to have some kind of factor ready for every single case, more like a factor per range. My hope is, that if we play through enough combinations to apply these factors (like, in the graphs shown, there is this spike every few blocks. if you know when to expect such a spike and maybe apply at least a little bit of stochastic, you may be able to level them out, just by applying "I see condition X, the most likely scenario is condition Y, so that's what I'm going for").

Does that make any sense at all?

Now, to actually use Elastic to find these conditional factors, I'd suggest to make a test run on testnet with greatly simplified conditions. If this works, we could "contract" miners to work on a special testnet version, whose only purpose is to find these conditional factors. Since we would know the power and number of miners on this network, we wouldn't need an optimal algorithm, yet. The miners would be paid with btc.
newbie
Activity: 34
Merit: 0
Okay I see, but like I said, this is due to the fact that we can't be aware there is that much miners on the network. All we see is 20 transactions per block. Because of this 20 transactions limit, we can't know how much transaction would have been really mined, so we can't know the hashing power of the network.
legendary
Activity: 1260
Merit: 1168
What do you mean by slow initialization ?
If I understand your code correctly, the huge spikes are due to the fact that in the beginning, the amount of miners is multiplied by 6, then by 2, then by 0.68, etc so you have this ripple effect, and you can't do much about the spikes, you have to wait for the re targeting to happen for them to be corrected.
In the latest code (the one i attached to your reply) we start with 199381 miners, and 70242 add every 10 blocks.
The problem is the crappy initial "difficulty" which is very very low. It takes a few iterations to "converge". Until we converge we always max out the 20 max possible transactions per block.

 The less initial miners we have the faster it converges.

But since "pool hopping" may cause these very effects (huge loads of miners come suddenly) I think there are more elegant ways to level such spikes out. Pure KGW does not do that sufficiently, yet it does it good!

It might be wise to exceed the minimum possible factor 0.5 (that I observed during this phase) for some side cases.

Code:
how_many_miners_come_or_go = 70242
initial_miners = 199381
newbie
Activity: 34
Merit: 0
What do you mean by slow initialization ?
If I understand your code correctly, the huge spikes are due to the fact that in the beginning, the amount of miners is multiplied by 6, then by 2, then by 0.68, etc so you have this ripple effect, and you can't do much about the spikes, you have to wait for the re targeting to happen for them to be corrected.
legendary
Activity: 1260
Merit: 1168
I meant like a python example using the code above.
Your entry will be tested automatically later on for the standard error compared to the ideal tx rate.
newbie
Activity: 3
Merit: 0
* Kimoto's Gravity Well
   - Auroracoin implementation (from MegaCoin)
   - Dash implementation (above plus handling of negative uint256 and parameter change)
   - Vulnerable to Timewarp Attack (has been carried out on an altcoin).
       timewarp attacks attempt to decrease the difficulty to then mine many coins fast, or with a 51% attack mine a new chain from the genesis block.

* Nite's Gravity Well
  - Implements fix for KGW Timewarp Attack.
  - I can't find any particular reference to the other changes notsofast refers to, and the AuroraCoin source doesn't even appear to use it (they changed     to a different calculation for a multi-PoW-algorithm setup AFAICT).

* DigiShield
   - DigiByte implementation of v3 (there are four versions, see above and below that function).
   - Designed to overcome the issues of the Kimoto Gravity Well algorithm in recovering from large multipool engagements.
   - Asymmetric (allows difficulty to decrease faster than it can increase) .
   - Possibly makes it vulnerable to timewarp attacks, but no proof yet.

* Dark Gravity Wave
   - Dash implementation
 - Combines multiple exponential and simple moving averages to smooth difficulty readjustments and mitigate against exploits in the Kimoto Gravity Well.
legendary
Activity: 1260
Merit: 1168
hendryrodriguez1990, thanks for the suggestion. If you want to take the run for the 2BTC you need to make a short proof-of-concept. Wink
newbie
Activity: 3
Merit: 0
A better choose than kimoto function is the implementation of DigiShield , In summary DigiShield is a balanced asymmetrical approach to difficulty re-targeting. You don't want to let the difficulty go to high to fast, but you need to give it enough room to catch up quickly. The same thing goes with down swings, since it takes longer to discover new blocks you need to give it more room to go down, but not enough to send it to the floor.

The DigiShield code can be found here between lines 833 & 1007: https://github.com/digibyte/DigiByteProject/blob/master/src/main.cpp
Take a look at the Dogecoin difficulty chart: http://www.coinwarz.com/difficulty-charts/dogecoin-difficulty-chart.
You can see how multi-pools have really been mining most of the coins and leaving the dedicated Doge miners to pick up the slack and get the short end of the stick when it comes to new coins. You can also see when DigiShield took effect and that no longer occurs.
legendary
Activity: 1260
Merit: 1168
loracle, correction: yours works better for randomized block times. So I guess it's a valid submission for now!  Wink
But I think it will happen that people will in the next days try to level out the "slow initialization phase" at the beginning. Maybe it's worth taking a look at this!
legendary
Activity: 1260
Merit: 1168
...

Thanks, but in the example below this approach underperforms in relation to the solution in the submission just above you.

Yours:



Other Submission:




Code (Your code modified)

Code:
import datetime
import random
import numpy as np
import matplotlib.pyplot as plt

# sudo apt-get install python-tk
# pip2 install numpy matplotlib

def create_block(timestamp, num_pow):
return {'time_stamp' : timestamp, 'num_pow' : num_pow, 'first_work_factor':0}

def create_work(idx, factor, target):
return {'id': idx, 'base_executions_per_second' : factor, 'target' : target}

def addSecs(tm, secs):
    fulldate = tm + datetime.timedelta(seconds=secs)
    return fulldate

def randomDuration():
if do_not_randomize_block_times_but_do_always_60_sec:
return 60
else:
return int(random.uniform(25, 120))

current_time = datetime.datetime.now()

# experiment with the number of work packages
works_to_create = 3

generate_blocks = 100
current_height = 0
blockchain = []
work_packages = []
base_target = 0x000000ffffffffffffffffffffffffff
poisson_distribution = np.random.poisson(5, generate_blocks)
stretch_number_pows = True
do_not_randomize_block_times_but_do_always_60_sec = True
new_miner_every_xth_second = 10
how_many_miners_come_or_go = 70242
initial_miners = 199381

def currently_active_miners(current_height):
# get the current active number of miners in relation of blockchain height,
# but the number of miners increases by 1 every 10 blocks
increases = int(current_height/new_miner_every_xth_second) * how_many_miners_come_or_go
return initial_miners+increases

# for now, leave poisson distributed variable miner count out and assume only one miner
ret = poisson_distribution[current_height]
if ret > 0:
return ret
else:
return 1

def miner_pows_based_on_target(work, height, dur):
current_target = work["target"]
factor = (current_target / base_target) * 1.0*dur/60.0
actual_pow_mined = work["base_executions_per_second"]
# random jitter
actual_pow_mined = abs((actual_pow_mined - 1) + random.uniform(1,2)) * currently_active_miners(height)
actual_pow_mined = actual_pow_mined *factor
# rate limit to 20 pows per block
if actual_pow_mined>20:
actual_pow_mined = 20
if actual_pow_mined < 0:
actual_pow_mined = 0
if actual_pow_mined == 0:
print "mined",actual_pow_mined,work["base_executions_per_second"]*factor,currently_active_miners(height)
return actual_pow_mined
def kimoto(x):
    return  1 + (0.7084 * pow(((x)/(144)), -1.228));
def retarget_work(block, x):
    targetI = x["target"]
    pastMass = 0
    counter = 0
    current_block = block
    current_block_timestamp = blockchain[current_block]["time_stamp"]
    adjustment = 0
    while True:
        counter += 1
        pastMass += blockchain[current_block]["num_pow"][x["id"]]
        seconds_passed = (current_block_timestamp - blockchain[current_block-1]["time_stamp"]).seconds
        current_block -= 1
        if seconds_passed < 1:
            seconds_passed = 1
        trs_per_second = float(pastMass) / float(seconds_passed)
        target_per_second = 10.0 / 60.0
        adjustment = target_per_second / trs_per_second
        kim = kimoto(pastMass * 30)
        #print("kim : " + str(kim) + " adjustment : " + str(adjustment))
        if adjustment > kim or adjustment < (1.0/kim):
            print("kim : " + str(kim) + " 1/kim : " + str(1.0/kim) + " adj : " + str(adjustment))
            break
        if current_block < 1:
            break
    targetI = targetI * adjustment
    if targetI>base_target:
            targetI = base_target
    if x["id"] == 0:
            blockchain[block]["first_work_factor"] = adjustment
    x["target"] = targetI


def retarget_works(block):
for x in work_packages:
retarget_work(block,x)

# Here we create up to three different work objects
if works_to_create>=1:
work_packages.append(create_work(0, 20, base_target))
if works_to_create>=2:
work_packages.append(create_work(1, 60, base_target))
if works_to_create>=3:
work_packages.append(create_work(2, 35, base_target))

while current_height < generate_blocks:
dur = randomDuration()
current_time = addSecs(current_time,dur) # random block generation time
block_pows = {}
for x in work_packages:
num_pow = miner_pows_based_on_target(x, current_height, dur) # mine some POW depending on the current difficulty
block_pows[x["id"]] = num_pow
blockchain.append(create_block(current_time, block_pows))
retarget_works(current_height) # This retargeting method is the "critical part here"
current_height = current_height + 1


values = []
target_factors = []
ideal = []
for idx in range(len(blockchain)):
if idx == 0:
continue
x = blockchain[idx]
x_minus_one = blockchain[idx-1]
time_passed = (x["time_stamp"] - x_minus_one["time_stamp"]).seconds
strech_normalizer = time_passed / 60.0
if stretch_number_pows == False:
ideal.append(works_to_create*10*strech_normalizer)
else:
ideal.append(works_to_create*10)
sum_x = 0
for y in x["num_pow"]:
sum_x += x["num_pow"][y]
if stretch_number_pows == False:
values.append(sum_x)
else:
values.append(sum_x/strech_normalizer)
x = range(generate_blocks)[1:]
y = values

#fig = plt.figure()
ax0 = plt.subplot(211)
if stretch_number_pows:
ax0.set_ylabel('POW rate per 60s', color='b')
else:
ax0.set_ylabel('POWs per Block', color='b')
ax0.set_xlabel('Block height')
ax0.plot(x,y,'-o',x,ideal,'r--')
values = []
ideal = []
target_factors = []
for idx in range(len(blockchain)):
if idx == 0:
continue
x = blockchain[idx]
x_minus_one = blockchain[idx-1]
time_passed = (x["time_stamp"] - x_minus_one["time_stamp"]).seconds
strech_normalizer = time_passed / 60.0
if stretch_number_pows == False:
ideal.append(10*strech_normalizer)
else:
ideal.append(10)
sum_x = 0
sum_x += x["num_pow"][0]
#print "sumx",sum_x
if stretch_number_pows == False:
values.append(sum_x)
else:
values.append(sum_x/strech_normalizer)
x = range(generate_blocks)[1:]
y = values
plt.title('All Works: Total POWs')

ax1 = plt.subplot(212)
ax1.plot(x,y,'-o',x,ideal,'r--')
ax1.set_xlabel('Block Height')
# Make the y-axis label and tick labels match the line color.
if stretch_number_pows:
ax1.set_ylabel('POW rate per 60s', color='b')
else:
ax1.set_ylabel('POWs per Block', color='b')

for tl in ax1.get_yticklabels():
    tl.set_color('b')



ax2 = ax1.twinx()
ax2.set_ylim(0.4, 1.6)
ax2.bar(x,[x["first_work_factor"] for x in blockchain][1:],0.45,color='#deb0b0', alpha=0.2)
ax2.set_ylabel('Retargeting Factor', color='r')
for tl in ax2.get_yticklabels():
    tl.set_color('r')
plt.title('First Work: POWs + Retargeting Factor')

plt.show()


Ill test in other settings soon ;-)
newbie
Activity: 34
Merit: 0
Results when we randomize the block time:

newbie
Activity: 34
Merit: 0
The kimoto function was designed to take into account the few last blocks if the hashrate is changing too much in a short amount of time. The block mass needs to be quite high for kimoto to start having reasonable values (kimoto(144) = 1.7084), so a lot of blocks will be taken into account, even if you have a big change in mining power, which will slow down the adjustment.
So, I think you should increase the blockMass argument, few tests lead me to think that blockMass * 30 is a good choice, with this parameter, the amount of block to be taken into account (Assuming 10 transactions per block) will quickly decrease as the blockMass increases, and works OK in your case where there is a lot of sudden changes in the number of transaction per seconds, which I don't think would happen that much in production, well I don't really know because I haven't quite understood what you are trying to do.
Also, because we are limited to 20 trs/minwe don't know if the actual rate is 21 or 300, so in case of big spike, it will take time to readjust the difficulty.

Finally, do you want to adjust the difficulty at each blocks ?




Code:
def retarget_work_2(block, x):
    targetI = x["target"]
    pastMass = 0
    counter = 0
    current_block = block
    current_block_timestamp = blockchain[current_block]["time_stamp"]
    adjustment = 0
    while True:
        counter += 1
        pastMass += blockchain[current_block]["num_pow"][x["id"]]
        seconds_passed = (current_block_timestamp - blockchain[current_block-1]["time_stamp"]).seconds
        current_block -= 1
        if seconds_passed < 1:
            seconds_passed = 1
        trs_per_second = float(pastMass) / float(seconds_passed)
        target_per_second = 10.0 / 60.0
        adjustment = target_per_second / trs_per_second
        kim = kimoto(pastMass * 30)
        #print("kim : " + str(kim) + " adjustment : " + str(adjustment))
        if adjustment > kim or adjustment < (1.0/kim):
            print("kim : " + str(kim) + " 1/kim : " + str(1.0/kim) + " adj : " + str(adjustment))
            break
        if current_block < 1:
            break
    targetI = targetI * adjustment
    if targetI>base_target:
            targetI = base_target
    if x["id"] == 0:
            blockchain[block]["first_work_factor"] = adjustment
    x["target"] = targetI

legendary
Activity: 1260
Merit: 1168
An own submission of myself:


Added two special cases. When the limit of 20 is reached, then a "massive retarget" takes place,
also in the last block encountered a "too high" deviation to the desired value, we do a different retarget.

BEFORE AND AFTER:

Peeks are now eliminated in at most 2 blocks. Its better but still sucks when randomizing the block length (here, we use fixed 0 seconds)

Before:


After:


Also, when we start with MANY miners upfront, the retarget is too slow in the beginning:



Code:
import datetime
import random
import numpy as np
import matplotlib.pyplot as plt

# sudo apt-get install python-tk
# pip2 install numpy matplotlib

def create_block(timestamp, num_pow):
return {'time_stamp' : timestamp, 'num_pow' : num_pow, 'first_work_factor':0}

def create_work(idx, factor, target):
return {'id': idx, 'base_executions_per_second' : factor, 'target' : target}

def addSecs(tm, secs):
    fulldate = tm + datetime.timedelta(seconds=secs)
    return fulldate

def randomDuration():
if do_not_randomize_block_times_but_do_always_60_sec:
return 60
else:
return int(random.uniform(25, 120))

current_time = datetime.datetime.now()

# experiment with the number of work packages
works_to_create = 3

generate_blocks = 100
current_height = 0
blockchain = []
work_packages = []
base_target = 0x000000ffffffffffffffffffffffffff
poisson_distribution = np.random.poisson(5, generate_blocks)
stretch_number_pows = True
do_not_randomize_block_times_but_do_always_60_sec = True
new_miner_every_xth_second = 10
how_many_miners_come = 7

def currently_active_miners(current_height):
# get the current active number of miners in relation of blockchain height,
# but the number of miners increases by 1 every 10 blocks
increases = int(current_height/new_miner_every_xth_second) * how_many_miners_come
return 1+increases

# for now, leave poisson distributed variable miner count out and assume only one miner
ret = poisson_distribution[current_height]
if ret > 0:
return ret
else:
return 1

def miner_pows_based_on_target(work, height, dur):
current_target = work["target"]
factor = (current_target / base_target) * 1.0*dur/60.0
actual_pow_mined = work["base_executions_per_second"]
# random jitter
actual_pow_mined = abs((actual_pow_mined - 1) + random.uniform(1,2)) * currently_active_miners(height)
actual_pow_mined = actual_pow_mined *factor
# rate limit to 20 pows per block
if actual_pow_mined>20:
actual_pow_mined = 20
if actual_pow_mined < 0:
actual_pow_mined = 0
if actual_pow_mined == 0:
print "mined",actual_pow_mined,work["base_executions_per_second"]*factor,currently_active_miners(height)
return actual_pow_mined

def retarget_work(block, x):
targetI = x["target"]
pastMass = 0
account_for_block_max = 10
seconds_passed = 0
totalMass = 0
counter = 0
current_block = block
current_block_timestamp = blockchain[current_block]["time_stamp"]

massive_retarget = False
deviation_too_high = False
last_two_deviation = 0.0

while True:
counter = counter + 1
curmass = blockchain[current_block]["num_pow"][x["id"]]
pastMass += curmass


# if the maximum block-tx-limit of 20 was reached, do massive retargeting
if counter == 1 and pastMass == 20:
massive_retarget = True
break

# Also if deviation of last two block was too high, do some "magic"
if counter == 1 and curmass > 0:
last_two_deviation = curmass / 10
if last_two_deviation > 1.25 or last_two_deviation < -0.75:  #deviation of over 25% is bad
print "last two deviation",last_two_deviation,"at block",block
deviation_too_high = True
break




for y in blockchain[current_block]["num_pow"]:
totalMass += blockchain[current_block]["num_pow"][y]
seconds_passed = (current_block_timestamp - blockchain[current_block-1]["time_stamp"]).seconds
current_block = current_block - 1

if current_block < 1 or seconds_passed >= 60: # retarget every 120 seconds ~ 1 block on average
break

factor = 1
if massive_retarget == True:
factor = 0.4 # lower to just 40%
elif deviation_too_high == True:
factor = 1/last_two_deviation
else:
if seconds_passed < 1:
seconds_passed = 1

pows_per_360_seconds = ((pastMass * 360.0) / seconds_passed)
if pows_per_360_seconds>0 and pows_per_360_seconds<1:
pows_per_360_seconds = 1

factor = 1
if pows_per_360_seconds > 0:
factor = 10*6.0/pows_per_360_seconds
if factor<0.9:
factor = 0.9
if factor>1.1:
factor=1.1
elif pows_per_360_seconds == 0 and totalMass == 0:
factor = 1.05
else:
factor = 1

#print "seconds",seconds_passed,"blocks",counter,"actual pows",pastMass,"per 360s:",pows_per_360_seconds,"wanted:",60,"factor",factor

targetI = targetI * factor
if targetI>base_target:
targetI = base_target
if x["id"]==0:
blockchain[block]["first_work_factor"] = factor
x["target"] = targetI


def retarget_works(block):
for x in work_packages:
retarget_work(block,x)

# Here we create up to three different work objects
if works_to_create>=1:
work_packages.append(create_work(0, 20, base_target))
if works_to_create>=2:
work_packages.append(create_work(1, 60, base_target))
if works_to_create>=3:
work_packages.append(create_work(2, 35, base_target))

while current_height < generate_blocks:
dur = randomDuration()
current_time = addSecs(current_time,dur) # random block generation time
block_pows = {}
for x in work_packages:
num_pow = miner_pows_based_on_target(x, current_height, dur) # mine some POW depending on the current difficulty
block_pows[x["id"]] = num_pow
blockchain.append(create_block(current_time, block_pows))
retarget_works(current_height) # This retargeting method is the "critical part here"
current_height = current_height + 1

values = []
target_factors = []
ideal = []
for idx in range(len(blockchain)):
if idx == 0:
continue
x = blockchain[idx]
x_minus_one = blockchain[idx-1]
time_passed = (x["time_stamp"] - x_minus_one["time_stamp"]).seconds
strech_normalizer = time_passed / 60.0
if stretch_number_pows == False:
ideal.append(works_to_create*10*strech_normalizer)
else:
ideal.append(works_to_create*10)
sum_x = 0
for y in x["num_pow"]:
sum_x += x["num_pow"][y]
if stretch_number_pows == False:
values.append(sum_x)
else:
values.append(sum_x/strech_normalizer)
x = range(generate_blocks)[1:]
y = values

#fig = plt.figure()
ax0 = plt.subplot(211)
if stretch_number_pows:
ax0.set_ylabel('POW rate per 60s', color='b')
else:
ax0.set_ylabel('POWs per Block', color='b')
ax0.set_xlabel('Block height')
ax0.plot(x,y,'-o',x,ideal,'r--')
values = []
ideal = []
target_factors = []
for idx in range(len(blockchain)):
if idx == 0:
continue
x = blockchain[idx]
x_minus_one = blockchain[idx-1]
time_passed = (x["time_stamp"] - x_minus_one["time_stamp"]).seconds
strech_normalizer = time_passed / 60.0
if stretch_number_pows == False:
ideal.append(10*strech_normalizer)
else:
ideal.append(10)
sum_x = 0
sum_x += x["num_pow"][0]
#print "sumx",sum_x
if stretch_number_pows == False:
values.append(sum_x)
else:
values.append(sum_x/strech_normalizer)
x = range(generate_blocks)[1:]
y = values
plt.title('All Works: Total POWs')

ax1 = plt.subplot(212)
ax1.plot(x,y,'-o',x,ideal,'r--')
ax1.set_xlabel('Block Height')
# Make the y-axis label and tick labels match the line color.
if stretch_number_pows:
ax1.set_ylabel('POW rate per 60s', color='b')
else:
ax1.set_ylabel('POWs per Block', color='b')

for tl in ax1.get_yticklabels():
    tl.set_color('b')



ax2 = ax1.twinx()
ax2.set_ylim(0.4, 1.6)
ax2.bar(x,[x["first_work_factor"] for x in blockchain][1:],0.45,color='#deb0b0', alpha=0.2)
ax2.set_ylabel('Retargeting Factor', color='r')
for tl in ax2.get_yticklabels():
    tl.set_color('r')
plt.title('First Work: POWs + Retargeting Factor')

plt.show()
legendary
Activity: 1260
Merit: 1168
@klintay:

Both methods are not suitable.

Digishield just rate limits with an upper bound! It does not "converge" to a desired unbounded value. We rate limit at 20 per 60seconds but want to converge to 10 per 60seconds. Doesn't work with the approach.

Also, my KGW seems to not work at all. At least my coding! I had to edit it a bit (bug are possible) bit in KGW it is not possible to have a pastBlockMass of zero, because every block is at lest one block. In our scheme, there may be blocks with ZERO transactions.

If I coded the KGW wrong, feel free to correct and submit a working proof-of-concept along with plots.
Code:
import datetime
import random
import numpy as np
import matplotlib.pyplot as plt

# sudo apt-get install python-tk
# pip2 install numpy matplotlib

def create_block(timestamp, num_pow):
return {'time_stamp' : timestamp, 'num_pow' : num_pow, 'first_work_factor':0}

def create_work(idx, factor, target):
return {'id': idx, 'base_executions_per_second' : factor, 'target' : target}

def addSecs(tm, secs):
    fulldate = tm + datetime.timedelta(seconds=secs)
    return fulldate

def randomDuration():
if do_not_randomize_block_times_but_do_always_60_sec:
return 60
else:
return int(random.uniform(25, 120))

current_time = datetime.datetime.now()

# experiment with the number of work packages
works_to_create = 3

generate_blocks = 100
current_height = 0
blockchain = []
work_packages = []
base_target = 0x000000ffffffffffffffffffffffffff
poisson_distribution = np.random.poisson(5, generate_blocks)
stretch_number_pows = True
do_not_randomize_block_times_but_do_always_60_sec = True
new_miner_every_xth_second = 10
how_many_miners_come = 5

def currently_active_miners(current_height):
# get the current active number of miners in relation of blockchain height,
# but the number of miners increases by 1 every 10 blocks
increases = int(current_height/new_miner_every_xth_second) * how_many_miners_come
return 1+increases

# for now, leave poisson distributed variable miner count out and assume only one miner
ret = poisson_distribution[current_height]
if ret > 0:
return ret
else:
return 1

def miner_pows_based_on_target(work, height, dur):
current_target = work["target"]
factor = (current_target / base_target) * 1.0*dur/60.0
actual_pow_mined = work["base_executions_per_second"]
# random jitter
actual_pow_mined = abs((actual_pow_mined - 1) + random.uniform(1,2)) * currently_active_miners(height)
actual_pow_mined = actual_pow_mined *factor
# rate limit to 20 pows per block
if actual_pow_mined>20:
actual_pow_mined = 20
if actual_pow_mined < 0:
actual_pow_mined = 0
if actual_pow_mined == 0:
print "mined",actual_pow_mined,work["base_executions_per_second"]*factor,currently_active_miners(height)
return actual_pow_mined

def kimoto(x):
    return  1 + (0.7084 * pow(((x)/(144)), -1.228));

def retarget_work(block, x):
targetI = x["target"]
pastMass = 0
account_for_block_max = 10
seconds_passed = 0
totalMass = 0
counter = 0
current_block = block
adjustmentRatio=0
current_block_timestamp = blockchain[current_block]["time_stamp"]
while True:
counter = counter + 1
pastMass += blockchain[current_block]["num_pow"][x["id"]]
for y in blockchain[current_block]["num_pow"]:
totalMass += blockchain[current_block]["num_pow"][y]
seconds_passed = (current_block_timestamp - blockchain[current_block-1]["time_stamp"]).seconds
current_block = current_block - 1
#print "iter",seconds_passed
rateActualSeconds = seconds_passed*1.0/pastMass
rateTargetSeconds = 6 # ten per minute, 6 seconds per pow

print "time passed",seconds_passed,"seen pows",pastMass,"actual seconds per pow",rateActualSeconds,"wanted seconds",rateTargetSeconds
adjustmentRatio = 1
if rateActualSeconds <0:
rateActualSeconds = 0
if rateActualSeconds!=0 and rateTargetSeconds != 0:
adjustmentRatio = rateTargetSeconds / rateActualSeconds
if pastMass>0:
horizonDeviation = kimoto(pastMass*1.0)
horizonDeviationSlow = 1/kimoto(pastMass*1.0)
else:
horizonDeviation = 1
horizonDeviationSlow = 1



if pastMass >= 50:
if adjustmentRatio<=horizonDeviationSlow or adjustmentRatio>=horizonDeviation:
break

if current_block < 1 or counter == account_for_block_max:
break

if seconds_passed < 1:
seconds_passed = 1




factor = adjustmentRatio


#print "seconds",seconds_passed,"blocks",counter,"actual pows",pastMass,"per 360s:",pows_per_360_seconds,"wanted:",60,"factor",factor

targetI = targetI * factor
if targetI>base_target:
targetI = base_target
if x["id"]==0:
blockchain[block]["first_work_factor"] = factor
x["target"] = targetI


def retarget_works(block):
for x in work_packages:
retarget_work(block,x)

# Here we create up to three different work objects
if works_to_create>=1:
work_packages.append(create_work(0, 20, base_target))
if works_to_create>=2:
work_packages.append(create_work(1, 60, base_target))
if works_to_create>=3:
work_packages.append(create_work(2, 35, base_target))

while current_height < generate_blocks:
dur = randomDuration()
current_time = addSecs(current_time,dur) # random block generation time
block_pows = {}
for x in work_packages:
num_pow = miner_pows_based_on_target(x, current_height, dur) # mine some POW depending on the current difficulty
block_pows[x["id"]] = num_pow
blockchain.append(create_block(current_time, block_pows))
retarget_works(current_height) # This retargeting method is the "critical part here"
current_height = current_height + 1

values = []
target_factors = []
ideal = []
for idx in range(len(blockchain)):
if idx == 0:
continue
x = blockchain[idx]
x_minus_one = blockchain[idx-1]
time_passed = (x["time_stamp"] - x_minus_one["time_stamp"]).seconds
strech_normalizer = time_passed / 60.0
if stretch_number_pows == False:
ideal.append(works_to_create*10*strech_normalizer)
else:
ideal.append(works_to_create*10)
sum_x = 0
for y in x["num_pow"]:
sum_x += x["num_pow"][y]
if stretch_number_pows == False:
values.append(sum_x)
else:
values.append(sum_x/strech_normalizer)
x = range(generate_blocks)[1:]
y = values

#fig = plt.figure()
ax0 = plt.subplot(211)
if stretch_number_pows:
ax0.set_ylabel('POW rate per 60s', color='b')
else:
ax0.set_ylabel('POWs per Block', color='b')
ax0.set_xlabel('Block height')
ax0.plot(x,y,'-o',x,ideal,'r--')
values = []
ideal = []
target_factors = []
for idx in range(len(blockchain)):
if idx == 0:
continue
x = blockchain[idx]
x_minus_one = blockchain[idx-1]
time_passed = (x["time_stamp"] - x_minus_one["time_stamp"]).seconds
strech_normalizer = time_passed / 60.0
if stretch_number_pows == False:
ideal.append(10*strech_normalizer)
else:
ideal.append(10)
sum_x = 0
sum_x += x["num_pow"][0]
#print "sumx",sum_x
if stretch_number_pows == False:
values.append(sum_x)
else:
values.append(sum_x/strech_normalizer)
x = range(generate_blocks)[1:]
y = values
plt.title('All Works: Total POWs')

ax1 = plt.subplot(212)
ax1.plot(x,y,'-o',x,ideal,'r--')
ax1.set_xlabel('Block Height')
# Make the y-axis label and tick labels match the line color.
if stretch_number_pows:
ax1.set_ylabel('POW rate per 60s', color='b')
else:
ax1.set_ylabel('POWs per Block', color='b')

for tl in ax1.get_yticklabels():
    tl.set_color('b')



ax2 = ax1.twinx()
ax2.set_ylim(0.85, 1.15)
ax2.bar(x,[x["first_work_factor"] for x in blockchain][1:],0.45,color='#deb0b0', alpha=0.2)
ax2.set_ylabel('Retargeting Factor', color='r')
for tl in ax2.get_yticklabels():
    tl.set_color('r')
plt.title('First Work: POWs + Retargeting Factor')

plt.show()
legendary
Activity: 1260
Merit: 1168
I will implement klintay's suggestions now and test! But I see that it takes way too much time, so I will not accept any more "non-proof-of-concept-code" submissions without plots.  Wink
legendary
Activity: 1775
Merit: 1032
Value will be measured in sats
What crypto will be used this algorytm?

Elastic Coin!


Solution 1: Kimoto Gravity Well (from bitcoin)

KGW = 1 + (0.7084 * pow((double(PastBlocksMass)/double(144)), -1.228));

Kimoto Gravity Well (KGW) retargets after every block and adjusts very quickly, e.g. when multipools add and retract mining power to a smaller coin's network.

http://bitcoin.stackexchange.com/questions/21730/how-does-the-kimoto-gravity-well-regulate-difficulty

Can we use a variant of this algorithm?

Solution 2: DigiShield Retargetting (from digicoin)

   // * If we are creating a transaction we allow transactions up to 1,000 bytes
      //   to be considered safe and assume they can likely make it into this section.
      if (nBytes < (mode == GMF_SEND ? 1000 : (DEFAULT_BLOCK_PRIORITY_SIZE - 1000)))
         nMinFee = 0;
   }

   // This code can be removed after enough miners have upgraded to version 0.9.
   // Until then, be safe when sending and require a fee if any output
   // is less than CENT:
   if (nMinFee < nBaseFee && mode == GMF_SEND)
   {
      BOOST_FOREACH(const CTxOut& txout, tx.vout)
                      if (txout.nValue < CENT)
                         nMinFee = nBaseFee;
   }

   if (!MoneyRange(nMinFee))
      nMinFee = MAX_MONEY;
   return nMinFee;
}


bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree,
      bool* pfMissingInputs, bool fRejectInsaneFee)
{
   AssertLockHeld(cs_main);
   if (pfMissingInputs)
      *pfMissingInputs = false;

   if (!CheckTransaction(tx, state))
      return error("AcceptToMemoryPool: : CheckTransaction failed");

   // Coinbase is only valid in a block, not as a loose transaction
   if (tx.IsCoinBase())
      return state.DoS(100, error("AcceptToMemoryPool: : coinbase as individual tx"),
            REJECT_INVALID, "coinbase");

   // Rather not work on nonstandard transactions (unless -testnet/-regtest)
   string reason;
   if (Params().NetworkID() == CChainParams::MAIN && !IsStandardTx(tx, reason))
      return state.DoS(0, error("AcceptToMemoryPool : nonstandard transaction: %s", reason), REJECT_NONSTANDARD, reason);

   // is it already in the memory pool?
   uint256 hash = tx.GetHash();
   if (pool.exists(hash))
      return false;

   // Check for conflicts with in-memory transactions
   {
      LOCK(pool.cs); // protect pool.mapNextTx
      for (unsigned int i = 0; i < tx.vin.size(); i++)
      {
         COutPoint outpoint = tx.vin.prevout;
         if (pool.mapNextTx.count(outpoint))
         {
            // Disable replacement feature for now
            return false;
         }
      }
   }

   {
      CCoinsView dummy;
      CCoinsViewCache view(dummy);

      {
         LOCK(pool.cs);
         CCoinsViewMemPool viewMemPool(*pcoinsTip, pool);
         view.SetBackend(viewMemPool);

         // do we already have it?
         if (view.HaveCoins(hash))
            return false;

         // do all inputs exist?
         // Note that this does not check for the presence of actual outputs (see the next check for that),
         // only helps filling in pfMissingInputs (to determine missing vs spent).
         BOOST_FOREACH(const CTxIn txin, tx.vin) {
            if (!view.HaveCoins(txin.prevout.hash)) {
               if (pfMissingInputs)
                  *pfMissingInputs = true;
               return false;
            }
         }

         // are the actual inputs available?
         if (!view.HaveInputs(tx))
            return state.Invalid(error("AcceptToMemoryPool : inputs already spent"),REJECT_DUPLICATE, "bad-txns-inputs-spent");

         // Bring the best block into scope
         view.GetBestBlock();

         // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
         view.SetBackend(dummy);
      }

      // Check for non-standard pay-to-script-hash in inputs
      if (Params().NetworkID() == CChainParams::MAIN && !AreInputsStandard(tx, view))
         return error("AcceptToMemoryPool: : nonstandard transaction input");

      // Note: if you modify this code to accept non-standard transactions, then
      // you should add code here to check that the transaction does a
      // reasonable number of ECDSA signature verifications.

      int64_t nValueIn = view.GetValueIn(tx);
      int64_t nValueOut = tx.GetValueOut();
      int64_t nFees = nValueIn-nValueOut;
      double dPriority = view.GetPriority(tx, chainActive.Height());

      CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height());
      unsigned int nSize = entry.GetTxSize();

      // Don't accept it if it can't get into a block
      int64_t txMinFee = GetMinFee(tx, nSize, true, GMF_RELAY);
      if (fLimitFree && nFees < txMinFee)
         return state.DoS(0, error("AcceptToMemoryPool : not enough fees %s, %d < %d", hash.ToString(), nFees, txMinFee), REJECT_INSUFFICIENTFEE, "insufficient fee");

      // Continuously rate-limit free transactions
      // This mitigates 'penny-flooding' -- sending thousands of free transactions just to
      // be annoying or make others' transactions take longer to confirm.
      if (fLimitFree && nFees < CTransaction::nMinRelayTxFee)
      {
         static CCriticalSection csFreeLimiter;
         static double dFreeCount;
         static int64_t nLastTime;
         int64_t nNow = GetTime();

         LOCK(csFreeLimiter);

         // Use an exponentially decaying ~10-minute window:
         dFreeCount *= pow(1.0 - 1.0/600.0, (double)(nNow - nLastTime));
         nLastTime = nNow;
         // -limitfreerelay unit is thousand-bytes-per-minute
         // At default rate it would take over a month to fill 1GB
         if (dFreeCount >= GetArg("-limitfreerelay", 15)*10*1000)
            return state.DoS(0, error("AcceptToMemoryPool : free transaction rejected by rate limiter"), REJECT_INSUFFICIENTFEE, "insufficient priority");
         LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount+nSize);
         dFreeCount += nSize;
      }


      if (fRejectInsaneFee && nFees > CTransaction::nMinRelayTxFee * 10000)
         return error("AcceptToMemoryPool: : insane fees %s, %d > %d", hash.ToString(), nFees, CTransaction::nMinRelayTxFee * 10000);

      // Check against previous transactions
      // This is done last to help prevent CPU exhaustion denial-of-service attacks.
      if (!CheckInputs(tx, state, view, true, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_STRICTENC))
      {
         return error("AcceptToMemoryPool: : ConnectInputs failed %s", hash.ToString());
      }
      // Store transaction in memory
      pool.addUnchecked(hash, entry);
   }

   g_signals.SyncTransaction(hash, tx, NULL);

   return true;
}


int CMerkleTx::GetDepthInMainChainINTERNAL(CBlockIndex* &pindexRet) const
{
   if (hashBlock == 0 || nIndex == -1)
      return 0;
   AssertLockHeld(cs_main);

   // Find the block it claims to be in
   map::iterator mi = mapBlockIndex.find(hashBlock);
   if (mi == mapBlockIndex.end())
      return 0;
   CBlockIndex* pindex = (*mi).second;
   if (!pindex || !chainActive.Contains(pindex))
      return 0;

   // Make sure the merkle branch connects to this block

https://github.com/digibyte/digibyte/blob/master/src/main.cpp
https://www.reddit.com/r/Digibyte/comments/213t7b/what_is_digishield_how_it_works_to_retarget/
hero member
Activity: 812
Merit: 500
What crypto will be used this algorytm?
hero member
Activity: 661
Merit: 500
Working my way through the code. I dont see myself submitting any solutions but I know some people that like bitcoin.
Pages:
Jump to: