IPFS Pub Sub
Description
Processing pub/sub
// This comes from the Dapp and randomSeed if used to choose verifier block.
typedef struct _JOB {
UUID ipfsBlock; // source block data to be processed
unsigned long BlockLen; // and ipfs block's length in bytes
unsigned long BlockStride; // Stride to use for access pattern
unsigned long BlockLineStride; // Line stride in bytes to get to next block start
float randomSeed; // used to randomly choose verifier block
PUBSUBCHANNEL resultsChannel; // which channel to publish results to.
} JOB;
typedef struct _PROCESSSUBCHUNKS {
UUID chunkID; // unique process chunk ID
unsigned long Offset; // offset into data
unsigned long SubChunkWidth; // width of subchunk/subblock
unsigned long SubChunkHeight; // height of chunk/block
unsigned long Stride; // stride to use for overall data chunk
unsigned long LineStride; // stride of one line of data
unsigned long nSubChunks; // number of chunks to process
} PROCESSSUBCHUNKS;
typedef struct _MICROJOB {
UUID ipfsBlock; // source block data to be processed
PROCESSSUBCHUNK chunksToProcess[]; // array of chunks to process
unsigned long datalen; // length of ipfs Block?
PUBSUBCHANNEL resultsChannel; // channel to publish results to
} MICROJOB;
typedef struct _RESULTSMICROJOB {
unsigned long resultHash; // hash of results
unsigned long chunkHashes[]; // the hashes for each chunk
UUID ipfsResultsData; // UUID of the results data on ipfs
} RESULTSMICROJOB;
// node that created processing channel
DoProcessing(JOB *job) {
MICROJOBS microJobs[] = SplitJob(job);
for (unsigned long i = 0; i < microjobs; i++) {
PublishMessage(DoProcessing, micrfoJobs[i], uniquePeerID);
}
}
// each Peer ID
DoProcessing(MICROJOB *mjob, UUID peerID) {
if (peerID != myUUID) {
// won't validate because hash will be incorrect
return;
}
RESULTSMICROJOB resultsMicroJob = ProcessSubChunks(chunksToProcess[i], peerID);
unsigned long hashCode = peerID;
for (unsigned long i = 0; i< resultsMicroJob->chunkHashes.size; i+=) {
hashCode ^= resultsMicroJob->chunkHashes[i];
}
resultsMicroJob->results = hashCode;
AddSGNUSPendingBlock(resultsMicroJob);
PublishResults(resultsMicroJob);
}
}Handling of processing grid structure changes
Processing Chunks.
PreviousDiagram of the internal blockchain, blocks and processing functionalityNextSG Consensus Algorithm Implementation
Last updated