diff --git a/scrapyd/Dockerfile b/scrapyd/Dockerfile index 4986a69..5eb112b 100644 --- a/scrapyd/Dockerfile +++ b/scrapyd/Dockerfile @@ -37,6 +37,7 @@ RUN apt-get update \ && curl -sSL https://bootstrap.pypa.io/get-pip.py | python \ && pip install git+https://github.com/scrapy/scrapy.git \ git+https://github.com/scrapy/scrapyd.git \ + git+https://github.com/scrapy/scrapyd-client.git \ && curl -sSL https://github.com/scrapy/scrapy/raw/master/extras/scrapy_bash_completion -o /etc/bash_completion.d/scrapy_bash_completion \ && echo 'source /etc/bash_completion.d/scrapy_bash_completion' >> /root/.bashrc \ && apt-get purge -y --auto-remove autoconf \ diff --git a/scrapyd/README.md b/scrapyd/README.md index 767dfd6..6bead20 100644 --- a/scrapyd/README.md +++ b/scrapyd/README.md @@ -7,11 +7,15 @@ data you need from websites. In a fast, simple, yet extensible way. [Scrapyd][2] is a service for running Scrapy spiders. It allows you to deploy your Scrapy projects and control their spiders using a HTTP JSON API. +[Scrapyd-client][3] is a client for scrapyd. It provides the scrapyd-deploy +utility which allows you to deploy your project to a Scrapyd server. + This image is based on `debian:jessie` without any useless packages installed. -Only two latest python packages are installed: +Only 3 latest python packages are installed: - `scrapy`: git+https://github.com/scrapy/scrapy.git - `scrapyd`: git+https://github.com/scrapy/scrapyd.git +- `scrapyd-client`: git+https://github.com/scrapy/scrapyd-client.git Please use this image as base for your own project. @@ -55,3 +59,4 @@ $ docker run -it --rm -v `pwd`:/code -w /code vimagick/scrapyd bash [1]: https://github.com/scrapy/scrapy [2]: https://github.com/scrapy/scrapyd +[3]: https://github.com/scrapy/scrapyd-client