1) a sharelog
cpuminer runs with debug output, however, the output are often not captured
a share log is needed, i.e. for the shares submitted to a mining pool it would be helpful to log the
- time
- disposition (accept or reject)
- target
- hash
- data
and perhaps some other info in a text sharelog file. this would help with verifying from historical submitted entries if there are issues
As I've already said, this could indeed be useful, but the changes required to implement it properly are nontrivial (cpuminer does not currently keep track of submitted shares). I don't have much free time in this period, but I will put this on my to-do list.
hi pooler,
thanks much for the quick response, here is some codes i'm not sure if this is optimum but the changes are something like this:
i moved struct work into miner.h and added hash field which is the winning hash that scanhash has found
struct work {
uint32_t data[32];
uint32_t target[8];
uint32_t hash[8]; // <-- new field
char job_id[128];
size_t xnonce2_len;
unsigned char xnonce2[32];
};
i modified scanhash* function so that scanhash can pass the winning hash back
e.g.
miner_thread calls scanhash as such:
/* scan nonces for a proof-of-work hash */
switch (opt_algo) {
case ALGO_SCRYPT:
rc = scanhash_scrypt(thr_id, work.data, scratchbuf, work.target,
max_nonce, &hashes_done, work.hash);
break;
...
then in scanhash:
int scanhash_scrypt(int thr_id, uint32_t *pdata,
unsigned char *scratchbuf, const uint32_t *ptarget,
uint32_t max_nonce, unsigned long *hashes_done,
uint32_t *phash); {
...
for (i = 0; i < throughput; i++) {
if (hash[i * 8 + 7] <= Htarg && fulltest(hash + i * 8, ptarget)) {
*hashes_done = n - pdata[19] + 1;
pdata[19] = data[i * 20 + 19];
memcpy(phash, hash + i * 8, 32); // <- copy the winning hash into struct work
return 1;
}
}
that goes back to miner_thread, which calls submit_work
that in turns gets picked up by workio_thread -> workio_submit_work -> submit_upstream_work
then i modify submit_upstream_work
if (have_stratum) {
.... codes that build that rpc call ...
/* place work in the queue for share_result*/
pwork = malloc(sizeof(struct work));
if (!pwork)
goto err_out;
memcpy(pwork, work, sizeof(struct work));
if (!tq_push(tq_result_work, pwork))
goto err_out;
/* existing code
if (unlikely(!stratum_send_line(&stratum, s))) {
applog(LOG_ERR, "submit_upstream_work stratum_send_line failed");
goto out;
}
} else { // no stratum
again before that rpc call
/* place work in the queue for share_result*/
pwork = malloc(sizeof(struct work));
if (!pwork)
goto err_out;
memcpy(pwork, work, sizeof(struct work));
if (!tq_push(tq_result_work, pwork))
goto err_out;
/* build JSON-RPC request */
sprintf(s,
"{\"method\": \"getwork\", \"params\": [ \"%s\" ], \"id\":1}\r\n",
str);
/* issue JSON-RPC request */
val = json_rpc_call(curl, rpc_url, rpc_userpass, s, false, false, NULL);
if (unlikely(!val)) {
applog(LOG_ERR, "submit_upstream_work json_rpc_call failed");
goto out;
}
tq_result_work is actually a "thread queue" in util.c i.e.
struct thread_q *tq_result_work;
i simply make tq_result_work a global variable in cpu-miner.c and let the mutexes in thread_q co-ordinate access to that data
- not sure if a deadlock is possible
it is initialised shortly after workio thread is initialised in main
/* init workio thread info */
work_thr_id = opt_n_threads;
thr = &thr_info[work_thr_id];
thr->id = work_thr_id;
thr->q = tq_new();
if (!thr->q)
return 1;
/* init work result q
* used to pass work between workio - submit_upstream_work and
* and share_result*/
tq_result_work = tq_new();
if (!tq_result_work)
return 1;
i simply make a copy of struct work in submit_upstream work and push the struct work into tq_result_work queue. this queue is used to pass work to share_result as follows:
when stratum_thread process the return results it calls share_result
then i modify share_result
static void share_result(int result, const char *reason)
{
char s[345];
double hashrate;
int i;
struct work *pwork;
hashrate = 0.;
pthread_mutex_lock(&stats_lock);
for (i = 0; i < opt_n_threads; i++)
hashrate += thr_hashrates
;
result ? accepted_count++ : rejected_count++;
pthread_mutex_unlock(&stats_lock);
sprintf(s, hashrate >= 1e6 ? "%.0f" : "%.2f", 1e-3 * hashrate);
applog(LOG_INFO, "accepted: %lu/%lu (%.2f%%), %s khash/s %s",
accepted_count,
accepted_count + rejected_count,
100. * accepted_count / (accepted_count + rejected_count),
s,
result ? "(yay!!!)" : "(booooo)");
/* retrieve work from queue from submit_upstream_work and log share to a file*/
pwork = tq_pop(tq_result_work, NULL);
if (pwork && opt_sharelog) {
share_log(result ? "accept" : "reject", pwork);
free(pwork);
}
if (opt_debug && reason)
applog(LOG_DEBUG, "DEBUG: reject reason: %s", reason);
return;
then i create the supporting functions for share_log:
void sharelog_init(char *filename) {
pthread_mutex_init(&sharelog_lock, NULL);
sharelog_file = fopen(filename, "a");
if (!sharelog_file)
applog(LOG_ERR, "Failed to open %s for share log", filename);
}
void share_log(const char*disposition, const struct work *pwork)
{
char *target, *hash, *data;
unsigned long int t;
struct pool *pool;
char s[1024];
size_t ret;
char ts[50];
int len;
time_t now;
struct tm tm, *tm_p;
uint32_t hash_be[8], target_be[8];
//sharelog file stream cannot be null
if (!sharelog_file)
return;
time(&now);
pthread_mutex_lock(&sharelog_lock);
tm_p = localtime(&now);
memcpy(&tm, tm_p, sizeof(tm));
sprintf(ts, "%d-%02d-%02d %02d:%02d:%02d", tm.tm_year + 1900,
tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
for (int i = 0; i < 8; i++) {
be32enc(hash_be + i, pwork->hash[7 - i]);
be32enc(target_be + i, pwork->target[7 - i]);
}
target = bin2hex((unsigned char *) target_be, sizeof(pwork->target));
hash = bin2hex((unsigned char *) hash_be, sizeof(pwork->hash));
data = bin2hex((unsigned char *) pwork->data, sizeof(pwork->data));
// timestamp,disposition,target,sharehash,sharedata
len = sprintf(s, "%s,%s,%s,%s,%s\n", ts, disposition, target,
hash, data);
free(target);
free(hash);
free(data);
fwrite(s, 1, len, sharelog_file);
fflush(sharelog_file);
ret = ferror(sharelog_file);
pthread_mutex_unlock(&sharelog_lock);
if (ret)
applog(LOG_ERR, "sharelog fwrite error");
}
the remaining is to create the opt_sharelog
case 'L':
free(opt_sharelog);
opt_sharelog = strdup(arg);
applog(LOG_DEBUG, "logging to sharelog: %s", opt_sharelog);
sharelog_init(opt_sharelog);
break;
static struct option const options[] = {
...
{ "sharelog", 1, NULL, 'L' },
...
and to update the help text
note that this is more of a 'hack' than a proper solution as the request-response protocol for submit work (e.g. stratum) needs to be strictly honoured. there is no error handling such as if a response is missing or some other permutations occurs, that'd throw things out of order. perhaps a more robust solution may need some re-design to be done in stratum_thread or workio_thread e.g. for one of them to handle the request / response of submit work rather than currently for stratum the request is send in workio_thread but response processed in stratum_thread
hopes the above may be useful
thanks for the link !
thanks and cheers