IPFS node in the browser, pin to a cluster

I’m implementing IPFS at https://blurt.world and I’d like to make it so that our web application runs an ipfs node, as well as use a global ipfs cluster run collaboratively by our witnesses (validators) for persistence.

I would like to know if it’s possible to in an authenticated fashion, trigger pins on a cluster, from a browser based IPFS node.

Maybe pubsub with a helper app?

Thanks for any and all ideas.

We’re planning to add a similar feature for our ipfs persistence module.

I’ll follow this topic, it’s an interesting problem

If you decide to go with pubsub, check out our qDesk.org and more specifically our library https://github.com/QuestNetwork/quest-os-js - might help you avoid common pitfalls

1 Like

Thank you for the link, you’re doing really interesting work.

Let me show you what I’ll likely deploy tonight:

require('dotenv').config();
let express = require('express');
let bodyParser = require('body-parser');
let chainLib = require('@blurtfoundation/blurtjs');
let { PrivateKey, PublicKey, Signature } = require('@blurtfoundation/blurtjs/lib/auth/ecc');
const RIPEMD160 = require('ripemd160');
const AWS = require('aws-sdk');
const {RateLimiterMemory} = require('rate-limiter-flexible');
const ipfsClient = require('ipfs-http-client');
const ipfsCluster = require('ipfs-cluster-api');



// Set up blurtjs
chainLib.api.setOptions({
  url: "http://localhost:8091",
  retry: true,
  useAppbaseApi: true,
});


//Connect to local ipfs node
const ipfs = ipfsClient('http://localhost:5001');


// connect to ipfs daemon API server
const cluster = ipfsCluster(); 


// Setup AWS
const s3 = new AWS.S3({
  accessKeyId: process.env.S3_ACCESS_KEY,
  secretAccessKey: process.env.S3_SECRET_KEY,
  region: process.env.S3_REGION,
  endpoint: process.env.S3_ENDPOINT,
  signatureVersion: "v4"
});

// Set up rate limiting
const rate_limit_opts = {
  points: process.env.RATE_LIMIT_POINTS,          // 3 images
  duration: 600, //  everry ten minutes
};
const rateLimiter = new RateLimiterMemory(rate_limit_opts);

let app = express();

const port = process.env.PORT || 7070;        // set our port

hdl_upload_s3 = async (req, res) => {
  try {
    const {username, sig } = req.params;

    // const username = this.session.a;
    if ((username === undefined) || (username === null)) {
      throw new Error("invalid user");
    }

    const jsonBody = req.body;
    // console.log(`jsonBody.data.length=${jsonBody.data.length}`);
    if (jsonBody.data.length > process.env.MAX_JSON_BODY_IN_BYTES) {
      throw new Error("File size too big!");
    }

    // data:image/jpeg;base64,
    let indexData = 0;
    if (jsonBody.data[23] === ',') {
      indexData = 23;
    } else if (jsonBody.data[22] === ',') {
      indexData = 22;
    } else if (jsonBody.data[21] === ',') {
      indexData = 21;
    } else {
      throw new Error("could not find index of [,]")
    }

    let prefix_data = jsonBody.data.substring(0, indexData);
    let base64_data = jsonBody.data.substring(indexData);

    // extract content type
    let file_ext = null;
    if (prefix_data.startsWith('data:image/jpeg;')) file_ext = 'jpeg';
    else if (prefix_data.startsWith('data:image/jpg;')) file_ext = 'jpg';
    else if (prefix_data.startsWith('data:image/png;')) file_ext = 'png';
    else if (prefix_data.startsWith('data:image/gif;')) file_ext = 'gif';
    else throw new Error("invalid content type");

    const content_type = `image/${file_ext}`;

    let buffer = new Buffer(base64_data, 'base64');
    // console.log(`buffer.length=${buffer.length}`);
    if (buffer.length > process.env.MAX_IMAGE_SIZE_IN_BYTES) {
      throw new Error("File size too big!");
    }

    // generate a hash // no longer writing to s3
    //const hash_buffer = (new RIPEMD160().update(buffer).digest('hex'));
    //const s3_file_path = `${username}/${hash_buffer}.${file_ext}`;

    { // verifying sig
      let isValidUsername = chainLib.utils.validateAccountName(username);
      if (isValidUsername) {
        throw new Error("Invalid username");
      }

      let existingAccs = await chainLib.api.getAccountsAsync([username]);
      if (existingAccs.length !== 1) {
        throw new Error('Invalid username.');
      }

      let sign_data = Signature.fromBuffer(new Buffer(sig, 'hex'));
      const sigPubKey = sign_data.recoverPublicKeyFromBuffer(buffer).toString();

      const postingPubKey = existingAccs[0].posting.key_auths[0][0];
      const activePubKey = existingAccs[0].active.key_auths[0][0];
      const ownerPubKey = existingAccs[0].owner.key_auths[0][0];

      switch (sigPubKey) {
        case postingPubKey:
        case activePubKey:
        case ownerPubKey:
          // key matched, do nothing
          break;
        default:
          throw new Error('Invalid key.');
      }

      let is_verified = sign_data.verifyBuffer(buffer, PublicKey.fromString(sigPubKey));
      if (!is_verified) {
        throw new Error('Invalid signature.');
      }
    }

    await rateLimiter.consume(username, 1);

 //   await s3.putObject({
 //     ACL: 'public-read',
 //     Bucket: process.env.S3_BUCKET,
 //     Key: s3_file_path,
 //     Body: buffer,
 //     ContentType: content_type
 //   }).promise();


    const results = ipfs.add(buffer);
    for await (const { cid } of results) {
  // CID (Content IDentifier) uniquely addresses the data
  // and can be used to get it again.
    console.log(cid.toString());

}
   cluster.pin.add(cid.toString(), (err) => {
        err ? console.error(err) : console.log('pin added')
})

    // this.body = JSON.stringify({status: 'ok', message: 'success', data: img_full_path});
    res.json({status: 'ok', message: 'success', data: cid.toString()});
  } catch (e) {
    // console.error('Error in /imageupload api call', this.session.uid, error);
    res.json({status: 'error', message: e.message, data: e});
  }
};



// Configure the exprress server
serverStart = () => {
  app.use(bodyParser.json({type: 'application/json', limit: '10mb'}));

  let router = express.Router();
  router.post('/:username/:sig', hdl_upload_s3);
  router.get('/test_cors', async (req, res) => {
    res.json({status: 'ok', message: 'success', data: null});
  });

  app.use('/', router);

  app.listen(port);
  console.log('serverStart on port ' + port);
};


// Start the express server
serverStart();

module.exports = app;

Basically that’ll write to IPFS instead of the S3 bucket that came before it.

Seems cleaner to run a node in the browser, though-- just have to make sure that the images make it to the cluster. There is also the hairy question of validation to deal with, too-- how to ensure that it’s only users uploading to our cluster?

Could have them sign a message and pipe that over pubsub, check the sig before pinning, I suppose.

Anyhow yeah-- happy to hear your thoughts, and will go though your work some later :).

My impression is that you would need a server-side service that collects “pin requests” from the users, authenticates them and then handles the pin request to Cluster.

check out https://github.com/sindresorhus/file-type for testing if it’s a valid content type

1 Like

To your questions. Yeah it’s a good approach, I’m emulating Google DataStore with my bee service and want to offer the same interface for IPFS persistence.

If you want to make sure only authenticated users can “upload”, maybe it even makes sense to use my channels. We have a participant list that you can either allow to be entered by Captcha and/or invite codes. To be honest though, wherever you can avoid using pubsub, it’s best to do so. It doesn’t scale super well with too many people in the same channel. So what @hector said.

“Doesn’t scale super well with too many people in the same channel”

Is it possible for you to put a number(ish) on that for my education?

Eg: Maybe this doesn’t work for a global swarm of 5000 raspberry Pi devices?

Thank you,

-Jacob

Depends on the activity and the stats, but on a 4GB ram, 2cpu Ubuntu machine running the js-webrtc-star it can get more than rocky at not even 500 peers