javascriptnode.jsweb-scrapingservernightmare

Nightmare.js web-scraping not working on server


For my (open-source) Node.js project bundeszirkus.de I need to implement a web scraper that searches and downloads URLs. Currently, I am using Nightmare.js like this: link to full file

exports.scrape = function(cb) {
    _callback = cb
    _downloadedLinks = 0

    let nightmare = new Nightmare({ show: false })
    const url = 'https://www.bundestag.de/services/opendata'

    // we request nightmare to browse to the bundestag.de url and extract the whole inner html
    nightmare
        .goto(url)
        .wait('body')
        .evaluate(() => document.querySelector('body').innerHTML)
        .end()
        .then(response => {
            _downloadedLinks = 0
            let validLinks = extractLinks(response)
            _foundLinks = validLinks.length
            logger.info("[scraper] found " + validLinks.length + " valid links.")
            if(validLinks.length > 0){
                validLinks.forEach(href => {
                    downloadFileFromHref(BT_LINK + href)
                });
            } else {
                logger.info("[scraper] did not download any files.")
                _callback()
            }  
        }).catch(err => {
            logger.info("[scraper] did not download any files.")
            _callback()
        });

    // Extracting the links we need
    let extractLinks = html => {
        data = [];
        const $ = cheerio.load(html);
        $('.bt-link-dokument').each(function() {
            data.push(this.attribs.href);
         });
         return data.filter(checkDocumentLink)
    } 
}

This is working perfectly when run on my local machine. However there seems to be a problem when running it on my ubuntu server (AWS). I have read that this is caused by the fact that there is no graphical interface available on my server so I am trying to run Xvfb on it.

Here is my ecosystem.config.js file.

When running pm2 ls I can see that Xvfb and my server are both running:

ubuntu@ip-XXX-XX-XX-XXX:~/bundeszirkus-server/current$ pm2 ls
┌─────────────────────┬────┬─────────┬──────┬───────┬────────┬─────────┬────────┬─────┬────────────┬────────┬──────────┐
│ App name            │ id │ version │ mode │ pid   │ status │ restart │ uptime │ cpu │ mem        │ user   │ watching │
├─────────────────────┼────┼─────────┼──────┼───────┼────────┼─────────┼────────┼─────┼────────────┼────────┼──────────┤
│ Xvfb                │ 1  │ N/A     │ fork │ 26063 │ online │ 6       │ 14m    │ 0%  │ 17.5 MB    │ ubuntu │ disabled │
│ bundeszirkus-server │ 0  │ 1.0.0   │ fork │ 26057 │ online │ 6       │ 14m    │ 0%  │ 246.4 MB   │ ubuntu │ disabled │
└─────────────────────┴────┴─────────┴──────┴───────┴────────┴─────────┴────────┴─────┴────────────┴────────┴──────────┘
 Use `pm2 show <id|name>` to get more details about an app

As far as I can tell everything seems to be set up correctly, but the log entries from the server show, that there are no files being downloaded:

{"message":"Starting server!","level":"info","timestamp":"2020-01-11 11:56:38"}
{"message":"Starting initial scraping.","level":"info","timestamp":"2020-01-11 11:56:38"}
{"message":"[scraper] found 0 valid links.","level":"info","timestamp":"2020-01-11 11:56:42"}
{"message":"[scraper] did not download any files.","level":"info","timestamp":"2020-01-11 11:56:42"}
{"message":"Loading data.","level":"info","timestamp":"2020-01-11 11:56:42"}
{"message":"[loader] loading data ...","level":"info","timestamp":"2020-01-11 11:56:42"}

Meanwhile it works when being run on my local (Ubuntu) machine:

{"message":"Starting server!","level":"info","timestamp":"2020-01-11 12:52:47"}
{"message":"Starting initial scraping.","level":"info","timestamp":"2020-01-11 12:52:47"}
{"message":"[scraper] found 5 valid links.","level":"info","timestamp":"2020-01-11 12:52:49"}
{"message":"[scraper] downloading file: 19138-data.xml from href: http://www.bundestag.de/resource/blob/674998/86249f57e79b8308e820d6581e7e2a95/19138-data.xml","level":"info","timestamp":"2020-01-11 12:52:49"}
{"message":"[scraper] downloading file: 19136-data.xml from href: http://www.bundestag.de/resource/blob/674328/0e9d258d50d08923fe6d6ad1381bdb3f/19136-data.xml","level":"info","timestamp":"2020-01-11 12:52:49"}
{"message":"[scraper] downloading file: 19137-data.xml from href: http://www.bundestag.de/resource/blob/674730/2bc751b619488227c9267e3cbe12c4c3/19137-data.xml","level":"info","timestamp":"2020-01-11 12:52:49"}
{"message":"[scraper] downloading file: 19135-data.xml from href: http://www.bundestag.de/resource/blob/673576/147b80c74d6d681833568cfcf36f9670/19135-data.xml","level":"info","timestamp":"2020-01-11 12:52:49"}
{"message":"[scraper] downloading file: 19134-data.xml from href: http://www.bundestag.de/resource/blob/673116/982f9d0ec845b85bddd289ede4a589fd/19134-data.xml","level":"info","timestamp":"2020-01-11 12:52:49"}
{"message":"[scraper] finished downloading  all 5 files.","level":"info","timestamp":"2020-01-11 12:52:51"}
{"message":"Loading data.","level":"info","timestamp":"2020-01-11 12:52:51"}

I am a bit lost here on how to search for the missing piece. Any help is greatly appreciated!


Solution

  • It now works after doing the following:

    1. Adding xvfb to the code like this:
    let xvfb = new Xvfb();
    try {
      xvfb.startSync();
    }
    catch (e) {
      console.log(e);
    }
    // scraping
    xvfb.stopSync();  
    
    
    1. Changing this line: .wait('body') to .wait(2000).