Compare commits
159 Commits
Author | SHA1 | Date |
---|---|---|
Inex Code | f3cb4ce73c | |
Inex Code | 8fc4ebbcc5 | |
Inex Code | d7cc4d1d88 | |
Inex Code | a2fe689cab | |
Inex Code | 950e03fc35 | |
Inex Code | fb9ed11b5d | |
Inex Code | 6177cc4d43 | |
Inex Code | dde5fa4896 | |
Inex Code | 428af93950 | |
Inex Code | 1c5faa1ee1 | |
Inex Code | 88f1c54f8e | |
Inex Code | e127ac3596 | |
Tao Bojlén | 2e87b53024 | |
Tao Bojlén | 0dd39e6984 | |
Tao Bojlén | 15ad5f1615 | |
Tao Bojlén | 153af37ccf | |
Tao Bojlén | 5f8bd7f891 | |
Tao Bojlén | e1d48e70c4 | |
Tao Bojlén | db04c3a67a | |
Tao Bojlén | 55994be8bc | |
Tao Bojlén | 4691b8dfd7 | |
Tao Bojlén | 3148578a7d | |
Tao Bojlén | 41f5461386 | |
Tao Bojlén | a276eccba4 | |
Tao Bojlén | 49b74c189b | |
Tao Bojlén | 6f75565061 | |
Tao Bojlén | c1d939e7b3 | |
Tao Bojlén | 2eb7ea98d1 | |
Tao Bojlén | f5daa648f7 | |
Tao Bojlén | 3cdc0dc49a | |
Tao Bojlén | 15e0d982e9 | |
Tao Bojlén | e51edef22d | |
Tao Bojlén | a8874c82ba | |
Tao Bojlén | 9078c0315d | |
Tao Bojlén | 1c251866ff | |
Tao Bojlén | 3db98cbfa0 | |
Tao Bojlén | 94034ee538 | |
Tao Bojlén | dd2b43a9bf | |
Tao Bojlén | e532173322 | |
Tao Bojlén | 9b9dec818a | |
Tao Bojlén | d7b3cf8932 | |
Tao Bojlén | 51daf3efae | |
Tao Bojlén | 5d8b8c6dbd | |
Tao Bojlén | 8912ccc6f8 | |
Tao Bror Bojlén | 37c00908ec | |
Tao Bror Bojlén | 41ac4ca9a8 | |
Tao Bror Bojlén | 99f2b247dc | |
Tao Bror Bojlén | 4b332ba980 | |
Tao Bror Bojlén | 5b54e65827 | |
Tao Bror Bojlén | 902358a022 | |
Tao Bror Bojlén | b525a08521 | |
Tao Bror Bojlén | 493ee2778d | |
Tao Bror Bojlén | 074f649f26 | |
Tao Bror Bojlén | deda156837 | |
Tao Bror Bojlén | 1c76a38dbc | |
Tao Bror Bojlén | 3725a22c1c | |
Tao Bror Bojlén | 21d0e523dd | |
Tao Bror Bojlén | 30c5154d16 | |
Tao Bror Bojlén | 6e826f153a | |
Tao Bror Bojlén | 6715d9395f | |
Tao Bror Bojlén | 8558f96635 | |
Tao Bror Bojlén | 44040abd1b | |
Tao Bror Bojlén | 637278ad74 | |
Tao Bror Bojlén | 8c83e5fcf9 | |
Tao Bror Bojlén | de22141a05 | |
Tao Bror Bojlén | 5525bcb204 | |
Tao Bojlén | 1e9ec13221 | |
Tao Bror Bojlén | a80ba972aa | |
Tao Bror Bojlén | b653e1a22a | |
Tao Bror Bojlén | c6b6144d2a | |
Tao Bror Bojlén | b51ddddbeb | |
Tao Bror Bojlén | 7d97e2e397 | |
Tao Bror Bojlén | e3ab73529c | |
Tao Bror Bojlén | f2bd81ca4e | |
Tao Bror Bojlén | c538b7a4b7 | |
Tao Bror Bojlén | 5a9508f3c9 | |
Tao Bror Bojlén | 1f62f0efd5 | |
Tao Bror Bojlén | fbc8e11722 | |
Tao Bror Bojlén | d9b3024fae | |
Tao Bror Bojlén | 50968f37b5 | |
Tao Bror Bojlén | ee48bc8d10 | |
Tao Bror Bojlén | 19b3a3806d | |
Tao Bror Bojlén | 1c1ef37df9 | |
Tao Bror Bojlén | 3b28803bfa | |
Tao Bror Bojlén | f134941eb2 | |
Tao Bror Bojlén | f572cd937e | |
Tao Bror Bojlén | 8935872df9 | |
Tao Bror Bojlén | f95d2dd9e9 | |
Tao Bror Bojlén | 02dbab3d17 | |
Tao Bror Bojlén | c2f842263c | |
Tao Bojlén | 693cf2b2d9 | |
Tao Bror Bojlén | 4d333dd14c | |
Tao Bror Bojlén | 82734947f1 | |
Tao Bror Bojlén | 53bc0d3090 | |
Tao Bror Bojlén | 93ef16589a | |
Tao Bror Bojlén | 1bf600b1ac | |
Tao Bror Bojlén | cee52de603 | |
Tao Bojlén | 6972237d21 | |
Tao Bror Bojlén | 528d4e4386 | |
Tao Bror Bojlén | 0a3e1e638c | |
Tao Bror Bojlén | 55f55216d6 | |
Tao Bror Bojlén | 2f1a654520 | |
Tao Bror Bojlén | c2124468a7 | |
Tao Bror Bojlén | 67baa0ec04 | |
Tao Bror Bojlén | d195cc302d | |
Tao Bror Bojlén | acfa38505f | |
Tao Bojlén | 3360f4c28e | |
Tao Bror Bojlén | fc7b698a81 | |
Tao Bror Bojlén | d5d4a5f0f0 | |
Tao Bror Bojlén | 7427a78b65 | |
Tao Bror Bojlén | 617e2f43ee | |
Tao Bror Bojlén | 34d4f58ff7 | |
Tao Bror Bojlén | 92ebdc5580 | |
Tao Bror Bojlén | 7469a97769 | |
Tao Bror Bojlén | adde4810ba | |
Tao Bror Bojlén | cc167d748e | |
Tao Bror Bojlén | ec19fe2fcf | |
Tao Bror Bojlén | f033b1eec0 | |
Tao Bror Bojlén | 4a7804d987 | |
Tao Bror Bojlén | fc836256ba | |
Tao Bror Bojlén | b0cdab2fbd | |
Tao Bror Bojlén | 2c035892d4 | |
Tao Bror Bojlén | 144a6e842f | |
Tao Bror Bojlén | 992ed6b9bb | |
Tao Bror Bojlén | e8b951485e | |
Tao Bror Bojlén | 271c67ea8e | |
Tao Bror Bojlén | 71b76a4332 | |
Tao Bror Bojlén | 83b21de97b | |
Tao Bror Bojlén | b2e849cf9f | |
Tao Bojlén | 3320e050c8 | |
Tao Bror Bojlén | 9a0bbbb7d9 | |
Tao Bojlén | d1b3f3f023 | |
Tao Bojlén | 8fbfb1ce69 | |
Tao Bror Bojlén | 186abbd9cf | |
Tao Bror Bojlén | 30c5bbe2b9 | |
Tao Bror Bojlén | d9b9081ec3 | |
Tao Bror Bojlén | 012fc1d85a | |
Tao Bojlén | f0f3f76f6c | |
Tao Bror Bojlén | 1e4cebb26b | |
Tao Bojlén | cf9ac30b1e | |
Tao Bror Bojlén | c01e324e91 | |
Tao Bror Bojlén | e4f98a2b2f | |
Tao Bror Bojlén | decdadf76e | |
Tao Bojlén | 75e66affe3 | |
Tao Bror Bojlén | cc541b9ddf | |
Tao Bror Bojlén | a4e02e6f76 | |
Tao Bror Bojlén | 2c036a9cf1 | |
Tao Bror Bojlén | de6d997976 | |
Tao Bror Bojlén | 9bcec30dd6 | |
Tao Bojlén | 5a10c6c9e3 | |
Tao Bror Bojlén | 57393cc381 | |
Tao Bror Bojlén | 3a50e5cb4d | |
Tao Bror Bojlén | 80c1462866 | |
Tao Bror Bojlén | 76ebf50c40 | |
Tao Bror Bojlén | 62bb309df7 | |
Tao Bror Bojlén | 7324669322 | |
Tao Bror Bojlén | a39887856d | |
Tao Bror Bojlén | cc95d19ee8 | |
Tao Bror Bojlén | 287c7b5624 |
|
@ -0,0 +1,43 @@
|
|||
name: Elixir CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
name: Build and test
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Elixir
|
||||
uses: erlef/setup-elixir@885971a72ed1f9240973bd92ab57af8c1aa68f24
|
||||
with:
|
||||
elixir-version: '1.12.2' # Define the elixir version [required]
|
||||
otp-version: '24.0.4' # Define the OTP version [required]
|
||||
- name: Restore dependencies cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
working-directory: ./backend
|
||||
path: deps
|
||||
key: ${{ runner.os }}-mix-${{ hashFiles('**/mix.lock') }}
|
||||
restore-keys: ${{ runner.os }}-mix-
|
||||
- name: Install dependencies
|
||||
working-directory: ./backend
|
||||
run: |
|
||||
mix local.hex --force
|
||||
mix local.rebar --force
|
||||
mix deps.get
|
||||
- name: Compile dependencies
|
||||
working-directory: ./backend
|
||||
run: mix deps.compile
|
||||
- name: Run Credo
|
||||
working-directory: ./backend
|
||||
run: mix credo --strict
|
||||
- name: Run sobelow
|
||||
working-directory: ./backend
|
||||
run: mix sobelow --config
|
|
@ -0,0 +1,37 @@
|
|||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: CI
|
||||
|
||||
# Controls when the workflow will run
|
||||
on:
|
||||
# Triggers the workflow on push or pull request events but only for the master branch
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
jobs:
|
||||
# This workflow contains a single job called "build"
|
||||
build:
|
||||
# The type of runner that the job will run on
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Node.js environment
|
||||
uses: actions/setup-node@v2.3.0
|
||||
|
||||
- name: Setup deps
|
||||
working-directory: ./frontend
|
||||
run: yarn install
|
||||
|
||||
- name: Lint
|
||||
working-directory: ./frontend
|
||||
run: yarn lint
|
|
@ -1,10 +1,3 @@
|
|||
include:
|
||||
template: Dependency-Scanning.gitlab-ci.yml
|
||||
|
||||
dependency_scanning:
|
||||
except:
|
||||
- develop
|
||||
|
||||
test-frontend:
|
||||
image: node:lts-alpine
|
||||
stage: test
|
||||
|
@ -20,17 +13,16 @@ test-frontend:
|
|||
- frontend/.yarn
|
||||
only:
|
||||
changes:
|
||||
- frontend/*
|
||||
- frontend/**/*
|
||||
|
||||
backend-sobelow:
|
||||
test-backend:
|
||||
stage: test
|
||||
image: elixir:1.9
|
||||
image: elixir:1.10
|
||||
variables:
|
||||
MIX_ENV: test
|
||||
only:
|
||||
changes:
|
||||
- backend/*
|
||||
except:
|
||||
- develop
|
||||
- master
|
||||
- backend/**/*
|
||||
before_script:
|
||||
- cd backend
|
||||
script:
|
||||
|
@ -38,33 +30,13 @@ backend-sobelow:
|
|||
- mix local.rebar --force
|
||||
- mix deps.get
|
||||
- mix deps.compile
|
||||
- mix credo --strict
|
||||
- mix sobelow --config
|
||||
cache:
|
||||
paths:
|
||||
- backend/deps/
|
||||
- backend/_build/
|
||||
|
||||
deploy-backend-develop:
|
||||
stage: deploy
|
||||
environment:
|
||||
name: develop
|
||||
url: https://phoenix.api-develop.fediverse.space
|
||||
image: ilyasemenov/gitlab-ci-git-push
|
||||
only:
|
||||
- develop
|
||||
script:
|
||||
- git-push dokku@api-develop.fediverse.space:phoenix develop
|
||||
|
||||
deploy-gephi-develop:
|
||||
stage: deploy
|
||||
image: ilyasemenov/gitlab-ci-git-push
|
||||
environment:
|
||||
name: develop
|
||||
only:
|
||||
- develop
|
||||
script:
|
||||
- git-push dokku@api-develop.fediverse.space:gephi develop
|
||||
|
||||
deploy-backend-production:
|
||||
stage: deploy
|
||||
environment:
|
||||
|
@ -73,6 +45,8 @@ deploy-backend-production:
|
|||
image: ilyasemenov/gitlab-ci-git-push
|
||||
only:
|
||||
- master
|
||||
except:
|
||||
- schedules
|
||||
script:
|
||||
- git-push dokku@api.fediverse.space:phoenix master
|
||||
|
||||
|
@ -83,5 +57,7 @@ deploy-gephi-production:
|
|||
name: production
|
||||
only:
|
||||
- master
|
||||
except:
|
||||
- schedules
|
||||
script:
|
||||
- git-push dokku@api.fediverse.space:gephi master
|
|
@ -0,0 +1,6 @@
|
|||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
|
||||
Files: *
|
||||
Copyright: 2018-2019 Tao Bojlén
|
||||
License: AGPL-3.0-or-later
|
||||
|
|
@ -1,7 +1,6 @@
|
|||
{
|
||||
"recommendations": [
|
||||
"jakebecker.elixir-ls",
|
||||
"ms-vscode.vscode-typescript-tslint-plugin",
|
||||
"kevinmcgowan.typescriptimport",
|
||||
"msjsdiag.debugger-for-chrome"
|
||||
]
|
||||
|
|
|
@ -5,12 +5,15 @@
|
|||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "chrome",
|
||||
"type": "mix_task",
|
||||
"request": "launch",
|
||||
"name": "Launch Chrome",
|
||||
"url": "http://localhost:3000",
|
||||
"webRoot": "${workspaceFolder}/frontend/src",
|
||||
"runtimeExecutable": "/usr/bin/chromium-browser"
|
||||
}
|
||||
"name": "phx.server",
|
||||
"task": "phx.server",
|
||||
"taskArgs": [],
|
||||
"projectDir": "${workspaceRoot}/backend",
|
||||
"env": {
|
||||
"SKIP_CRAWL": "1"
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
234
CHANGELOG.md
234
CHANGELOG.md
|
@ -19,6 +19,240 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
|
||||
### Security
|
||||
|
||||
## [2.9.6 - 2020-10-13]
|
||||
|
||||
### Added
|
||||
|
||||
- Added link to personal website on About page.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Allow `data:` images in Netlify CSP.
|
||||
- Fix inability to DM login links in releases (#104).
|
||||
|
||||
## [2.9.5 - 2020-10-11]
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed crawler not finding API in some cases
|
||||
|
||||
## [2.9.4 - 2020-10-09]
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix CSP issues for Plausible analytics
|
||||
|
||||
## [2.9.3 - 2020-10-09]
|
||||
|
||||
### Added
|
||||
|
||||
- Allow Plausible privacy-preserving analytics in CSP
|
||||
|
||||
### Changed
|
||||
|
||||
- Update dependencies
|
||||
- Update to Elixir 1.10
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed CSP headers for data: images
|
||||
|
||||
## [2.9.2 - 2020-08-31]
|
||||
|
||||
### Removed
|
||||
|
||||
- Remove staging server
|
||||
|
||||
## [2.9.1 - 2020-08-31]
|
||||
|
||||
### Fixed
|
||||
|
||||
- Added AppSignal logo to "Special thanks" section
|
||||
|
||||
## [2.9.0 - 2020-06-19]
|
||||
|
||||
### Changed
|
||||
|
||||
- Bring back `develop` staging backup (now managed in DNS)
|
||||
- Increase default number of concurrent crawlers to 100
|
||||
- Accessibility improvements (according to axe review)
|
||||
- Update dependencies
|
||||
|
||||
### Security
|
||||
- Add rate limiting of auth endpoints
|
||||
- Added security headers to netlify frontend
|
||||
- Sanitize crawled HTML in the backend
|
||||
|
||||
## [2.8.6 - 2020-01-16]
|
||||
|
||||
### Changed
|
||||
|
||||
- Update default number of concurrent crawlers
|
||||
- Crawl every 30 minutes instead of every hour
|
||||
|
||||
## [2.8.5 - 2019-12-25]
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed link to Mastodon account
|
||||
|
||||
## [2.8.4 - 2019-11-21]
|
||||
|
||||
### Changed
|
||||
|
||||
- Update links to @fediversespace Mastodon account
|
||||
|
||||
### Removed
|
||||
|
||||
- Remove staging backend server
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed frontend crash when instance node missing
|
||||
|
||||
## [2.8.3 - 2019-11-19]
|
||||
|
||||
### Changed
|
||||
|
||||
- Update dependencies
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed some unsuccessful crawls being saved without their errors
|
||||
|
||||
## [2.8.2 - 2019-08-31]
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix insularity score only working for > 0
|
||||
|
||||
## [2.8.1 - 2019-08-31]
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed error when viewing some uncrawlable instances
|
||||
- Fix navbar z-index
|
||||
- Optimize query used for generating status rate
|
||||
|
||||
## [2.8.0 - 2019-08-29]
|
||||
|
||||
### Added
|
||||
|
||||
- Add support for logging in via an ActivityPub direct message to the instance admin.
|
||||
- Added option to hide edges between instances if there are only mentions in one direction (off by default).
|
||||
- Added note to neighbors tab to make it explicit that blocked instances may appear.
|
||||
- Added federation tab that shows federation restrictions (only available for some Pleroma instances).
|
||||
- Add tabular view of instances.
|
||||
|
||||
### Changed
|
||||
|
||||
- Edges are no longer shown between instances where one blocks the other (based on the federation list in nodeinfo).
|
||||
|
||||
## [2.7.1 - 2018-08-23]
|
||||
|
||||
### Added
|
||||
|
||||
- Add caching to graph + instance endpoints to better handle traffic spikes.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Added ON DELETE to `most_recent_crawl` table, such that it can handle previously-crawled but now-dead instances.
|
||||
- You can now login to the admin view by clicking, not just by pressing enter.
|
||||
- Add handling for weirdly-formatted Friendica peers.
|
||||
- If the details of an instance fail to load, it's now easy to dismiss the error.
|
||||
|
||||
## [2.7.0 - 2018-08-18]
|
||||
|
||||
### Added
|
||||
|
||||
- Add Friendica crawler (only supports peers; there's no timeline API endpoint.)
|
||||
- Color more server types on the map -- Hubzilla, Plume, Pixelfed, and Wordpress.
|
||||
|
||||
### Changed
|
||||
|
||||
- Cleaned up ElasticSearch configuration in backend.
|
||||
|
||||
### Removed
|
||||
|
||||
- Remove color-coding by activity per user. The vast majority of instances had the exact same color so this wasn't very useful.
|
||||
|
||||
## [2.6.1 - 2019-08-10]
|
||||
|
||||
### Changed
|
||||
|
||||
- Added missing indices on `crawls` and `crawl_interactions` tables.
|
||||
- Added table to store most recent crawl. This speeds up the instance view by a lot!
|
||||
|
||||
## [2.6.0 - 2019-08-10]
|
||||
|
||||
### Added
|
||||
|
||||
- Add nodeinfo and GNU Social crawler.
|
||||
- Thanks to nodeinfo, Peertube and Writefreely are now also displayed on the map.
|
||||
- Note that the information about connections comes from other instances.
|
||||
|
||||
### Changed
|
||||
|
||||
- You can now zoom slightly further out on the map to see more of the fediverse at once.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Database deletions are now properly handled with `ON DELETE CASCADE` where necessary.
|
||||
|
||||
## [2.5.0 - 2019-08-08]
|
||||
|
||||
### Added
|
||||
|
||||
- Added Misskey crawler.
|
||||
|
||||
### Changed
|
||||
|
||||
- Crawl instances that are down or unrecognized less often.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed broken instance view on mobile devices.
|
||||
- Increased database connection timeout - required as the database grows!
|
||||
|
||||
## [2.4.1 - 2019-08-04]
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed a wonky search UI when there are no results.
|
||||
|
||||
## [2.4.0 - 2019-08-04]
|
||||
|
||||
### Added
|
||||
|
||||
- You can now click a button in the search bar to search (you can also still just press enter, of course).
|
||||
- You can now filter searches by instance type.
|
||||
- Added toggle to show/hide edges on graph.
|
||||
- Full-text search across instance descriptions now supports the following languages: arabic, armenian, basque,
|
||||
bengali, brazilian, bulgarian, catalan, cjk (i.e. chinese, japanese, korean), czech, danish, dutch, english, finnish,
|
||||
french, galician, german, greek, hindi, hungarian, indonesian, irish, italian, latvian, lithuanian, norwegian,
|
||||
persian, romanian, russian, sorani, spanish, swedish, turkish, thai.
|
||||
|
||||
## [2.3.1 - 2019-08-03]
|
||||
|
||||
### Added
|
||||
|
||||
- Added a warning on mobile devices suggesting to view the site on a larger computer.
|
||||
|
||||
### Changed
|
||||
|
||||
- Performance improvements when opening the app on something that isn't the graph.
|
||||
- There are now fewer irrelevant search results.
|
||||
- Clarify that the admin page only works for Mastodon and Pleroma instances.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed some instances being duplicated (due to un-normalized data).
|
||||
- Fixed mobile instance view erroring for uncrawled instances.
|
||||
- Improved error handling in admin login page.
|
||||
- Instances that opt-out will no longer show up in search results ever, nor are they accessible through the API.
|
||||
|
||||
### Security
|
||||
|
||||
## [2.3.0 - 2019-08-02]
|
||||
|
||||
### Added
|
||||
|
|
8
LICENSE
8
LICENSE
|
@ -629,12 +629,12 @@ to attach them to the start of each source file to most effectively
|
|||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
fediverse.space
|
||||
Copyright (C) 2019 fediverse.space
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
it under the terms of the GNU Affero General Public License as published
|
||||
by the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
|
|
|
@ -0,0 +1,613 @@
|
|||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this license
|
||||
document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for software
|
||||
and other kinds of works, specifically designed to ensure cooperation with
|
||||
the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed to take
|
||||
away your freedom to share and change the works. By contrast, our General
|
||||
Public Licenses are intended to guarantee your freedom to share and change
|
||||
all versions of a program--to make sure it remains free software for all its
|
||||
users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not price. Our
|
||||
General Public Licenses are designed to make sure that you have the freedom
|
||||
to distribute copies of free software (and charge for them if you wish), that
|
||||
you receive source code or can get it if you want it, that you can change
|
||||
the software or use pieces of it in new free programs, and that you know you
|
||||
can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights with two
|
||||
steps: (1) assert copyright on the software, and (2) offer you this License
|
||||
which gives you legal permission to copy, distribute and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that improvements made
|
||||
in alternate versions of the program, if they receive widespread use, become
|
||||
available for other developers to incorporate. Many developers of free software
|
||||
are heartened and encouraged by the resulting cooperation. However, in the
|
||||
case of software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and letting
|
||||
the public access it on a server without ever releasing its source code to
|
||||
the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to ensure that,
|
||||
in such cases, the modified source code becomes available to the community.
|
||||
It requires the operator of a network server to provide the source code of
|
||||
the modified version running there to the users of that server. Therefore,
|
||||
public use of a modified version, on a publicly accessible server, gives the
|
||||
public access to the source code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and published by
|
||||
Affero, was designed to accomplish similar goals. This is a different license,
|
||||
not a version of the Affero GPL, but Affero has released a new version of
|
||||
the Affero GPL which permits relicensing under this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and modification
|
||||
follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of works,
|
||||
such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this License.
|
||||
Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals
|
||||
or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work in
|
||||
a fashion requiring copyright permission, other than the making of an exact
|
||||
copy. The resulting work is called a "modified version" of the earlier work
|
||||
or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based on the
|
||||
Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without permission,
|
||||
would make you directly or secondarily liable for infringement under applicable
|
||||
copyright law, except executing it on a computer or modifying a private copy.
|
||||
Propagation includes copying, distribution (with or without modification),
|
||||
making available to the public, and in some countries other activities as
|
||||
well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other parties
|
||||
to make or receive copies. Mere interaction with a user through a computer
|
||||
network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices" to the
|
||||
extent that it includes a convenient and prominently visible feature that
|
||||
(1) displays an appropriate copyright notice, and (2) tells the user that
|
||||
there is no warranty for the work (except to the extent that warranties are
|
||||
provided), that licensees may convey the work under this License, and how
|
||||
to view a copy of this License. If the interface presents a list of user commands
|
||||
or options, such as a menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work for making
|
||||
modifications to it. "Object code" means any non-source form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official standard
|
||||
defined by a recognized standards body, or, in the case of interfaces specified
|
||||
for a particular programming language, one that is widely used among developers
|
||||
working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other than
|
||||
the work as a whole, that (a) is included in the normal form of packaging
|
||||
a Major Component, but which is not part of that Major Component, and (b)
|
||||
serves only to enable use of the work with that Major Component, or to implement
|
||||
a Standard Interface for which an implementation is available to the public
|
||||
in source code form. A "Major Component", in this context, means a major essential
|
||||
component (kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to produce
|
||||
the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all the source
|
||||
code needed to generate, install, and (for an executable work) run the object
|
||||
code and to modify the work, including scripts to control those activities.
|
||||
However, it does not include the work's System Libraries, or general-purpose
|
||||
tools or generally available free programs which are used unmodified in performing
|
||||
those activities but which are not part of the work. For example, Corresponding
|
||||
Source includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically linked
|
||||
subprograms that the work is specifically designed to require, such as by
|
||||
intimate data communication or control flow between those
|
||||
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users can regenerate
|
||||
automatically from other parts of the Corresponding Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of copyright
|
||||
on the Program, and are irrevocable provided the stated conditions are met.
|
||||
This License explicitly affirms your unlimited permission to run the unmodified
|
||||
Program. The output from running a covered work is covered by this License
|
||||
only if the output, given its content, constitutes a covered work. This License
|
||||
acknowledges your rights of fair use or other equivalent, as provided by copyright
|
||||
law.
|
||||
|
||||
You may make, run and propagate covered works that you do not convey, without
|
||||
conditions so long as your license otherwise remains in force. You may convey
|
||||
covered works to others for the sole purpose of having them make modifications
|
||||
exclusively for you, or provide you with facilities for running those works,
|
||||
provided that you comply with the terms of this License in conveying all material
|
||||
for which you do not control copyright. Those thus making or running the covered
|
||||
works for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of your copyrighted
|
||||
material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under the conditions
|
||||
stated below. Sublicensing is not allowed; section 10 makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological measure
|
||||
under any applicable law fulfilling obligations under article 11 of the WIPO
|
||||
copyright treaty adopted on 20 December 1996, or similar laws prohibiting
|
||||
or restricting circumvention of such measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid circumvention
|
||||
of technological measures to the extent such circumvention is effected by
|
||||
exercising rights under this License with respect to the covered work, and
|
||||
you disclaim any intention to limit operation or modification of the work
|
||||
as a means of enforcing, against the work's users, your or third parties'
|
||||
legal rights to forbid circumvention of technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you receive
|
||||
it, in any medium, provided that you conspicuously and appropriately publish
|
||||
on each copy an appropriate copyright notice; keep intact all notices stating
|
||||
that this License and any non-permissive terms added in accord with section
|
||||
7 apply to the code; keep intact all notices of the absence of any warranty;
|
||||
and give all recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey, and you
|
||||
may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to produce
|
||||
it from the Program, in the form of source code under the terms of section
|
||||
4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified it, and
|
||||
giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is released under
|
||||
this License and any conditions added under section 7. This requirement modifies
|
||||
the requirement in section 4 to "keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this License to anyone
|
||||
who comes into possession of a copy. This License will therefore apply, along
|
||||
with any applicable section 7 additional terms, to the whole of the work,
|
||||
and all its parts, regardless of how they are packaged. This License gives
|
||||
no permission to license the work in any other way, but it does not invalidate
|
||||
such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display Appropriate
|
||||
Legal Notices; however, if the Program has interactive interfaces that do
|
||||
not display Appropriate Legal Notices, your work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent works,
|
||||
which are not by their nature extensions of the covered work, and which are
|
||||
not combined with it such as to form a larger program, in or on a volume of
|
||||
a storage or distribution medium, is called an "aggregate" if the compilation
|
||||
and its resulting copyright are not used to limit the access or legal rights
|
||||
of the compilation's users beyond what the individual works permit. Inclusion
|
||||
of a covered work in an aggregate does not cause this License to apply to
|
||||
the other parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms of sections
|
||||
4 and 5, provided that you also convey the machine-readable Corresponding
|
||||
Source under the terms of this License, in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product (including
|
||||
a physical distribution medium), accompanied by the Corresponding Source fixed
|
||||
on a durable physical medium customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product (including
|
||||
a physical distribution medium), accompanied by a written offer, valid for
|
||||
at least three years and valid for as long as you offer spare parts or customer
|
||||
support for that product model, to give anyone who possesses the object code
|
||||
either (1) a copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical medium customarily
|
||||
used for software interchange, for a price no more than your reasonable cost
|
||||
of physically performing this conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the written
|
||||
offer to provide the Corresponding Source. This alternative is allowed only
|
||||
occasionally and noncommercially, and only if you received the object code
|
||||
with such an offer, in accord with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated place (gratis
|
||||
or for a charge), and offer equivalent access to the Corresponding Source
|
||||
in the same way through the same place at no further charge. You need not
|
||||
require recipients to copy the Corresponding Source along with the object
|
||||
code. If the place to copy the object code is a network server, the Corresponding
|
||||
Source may be on a different server (operated by you or a third party) that
|
||||
supports equivalent copying facilities, provided you maintain clear directions
|
||||
next to the object code saying where to find the Corresponding Source. Regardless
|
||||
of what server hosts the Corresponding Source, you remain obligated to ensure
|
||||
that it is available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided you inform
|
||||
other peers where the object code and Corresponding Source of the work are
|
||||
being offered to the general public at no charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded from
|
||||
the Corresponding Source as a System Library, need not be included in conveying
|
||||
the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any tangible
|
||||
personal property which is normally used for personal, family, or household
|
||||
purposes, or (2) anything designed or sold for incorporation into a dwelling.
|
||||
In determining whether a product is a consumer product, doubtful cases shall
|
||||
be resolved in favor of coverage. For a particular product received by a particular
|
||||
user, "normally used" refers to a typical or common use of that class of product,
|
||||
regardless of the status of the particular user or of the way in which the
|
||||
particular user actually uses, or expects or is expected to use, the product.
|
||||
A product is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent the
|
||||
only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods, procedures,
|
||||
authorization keys, or other information required to install and execute modified
|
||||
versions of a covered work in that User Product from a modified version of
|
||||
its Corresponding Source. The information must suffice to ensure that the
|
||||
continued functioning of the modified object code is in no case prevented
|
||||
or interfered with solely because modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or specifically
|
||||
for use in, a User Product, and the conveying occurs as part of a transaction
|
||||
in which the right of possession and use of the User Product is transferred
|
||||
to the recipient in perpetuity or for a fixed term (regardless of how the
|
||||
transaction is characterized), the Corresponding Source conveyed under this
|
||||
section must be accompanied by the Installation Information. But this requirement
|
||||
does not apply if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has been installed
|
||||
in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a requirement
|
||||
to continue to provide support service, warranty, or updates for a work that
|
||||
has been modified or installed by the recipient, or for the User Product in
|
||||
which it has been modified or installed. Access to a network may be denied
|
||||
when the modification itself materially and adversely affects the operation
|
||||
of the network or violates the rules and protocols for communication across
|
||||
the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided, in accord
|
||||
with this section must be in a format that is publicly documented (and with
|
||||
an implementation available to the public in source code form), and must require
|
||||
no special password or key for unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this License
|
||||
by making exceptions from one or more of its conditions. Additional permissions
|
||||
that are applicable to the entire Program shall be treated as though they
|
||||
were included in this License, to the extent that they are valid under applicable
|
||||
law. If additional permissions apply only to part of the Program, that part
|
||||
may be used separately under those permissions, but the entire Program remains
|
||||
governed by this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option remove any
|
||||
additional permissions from that copy, or from any part of it. (Additional
|
||||
permissions may be written to require their own removal in certain cases when
|
||||
you modify the work.) You may place additional permissions on material, added
|
||||
by you to a covered work, for which you have or can give appropriate copyright
|
||||
permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you add
|
||||
to a covered work, you may (if authorized by the copyright holders of that
|
||||
material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the terms of
|
||||
sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or author
|
||||
attributions in that material or in the Appropriate Legal Notices displayed
|
||||
by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or requiring
|
||||
that modified versions of such material be marked in reasonable ways as different
|
||||
from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or authors
|
||||
of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some trade names,
|
||||
trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that material by
|
||||
anyone who conveys the material (or modified versions of it) with contractual
|
||||
assumptions of liability to the recipient, for any liability that these contractual
|
||||
assumptions directly impose on those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further restrictions"
|
||||
within the meaning of section 10. If the Program as you received it, or any
|
||||
part of it, contains a notice stating that it is governed by this License
|
||||
along with a term that is a further restriction, you may remove that term.
|
||||
If a license document contains a further restriction but permits relicensing
|
||||
or conveying under this License, you may add to a covered work material governed
|
||||
by the terms of that license document, provided that the further restriction
|
||||
does not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you must place,
|
||||
in the relevant source files, a statement of the additional terms that apply
|
||||
to those files, or a notice indicating where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the form
|
||||
of a separately written license, or stated as exceptions; the above requirements
|
||||
apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly provided
|
||||
under this License. Any attempt otherwise to propagate or modify it is void,
|
||||
and will automatically terminate your rights under this License (including
|
||||
any patent licenses granted under the third paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your license from
|
||||
a particular copyright holder is reinstated (a) provisionally, unless and
|
||||
until the copyright holder explicitly and finally terminates your license,
|
||||
and (b) permanently, if the copyright holder fails to notify you of the violation
|
||||
by some reasonable means prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is reinstated permanently
|
||||
if the copyright holder notifies you of the violation by some reasonable means,
|
||||
this is the first time you have received notice of violation of this License
|
||||
(for any work) from that copyright holder, and you cure the violation prior
|
||||
to 30 days after your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the licenses
|
||||
of parties who have received copies or rights from you under this License.
|
||||
If your rights have been terminated and not permanently reinstated, you do
|
||||
not qualify to receive new licenses for the same material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or run a copy
|
||||
of the Program. Ancillary propagation of a covered work occurring solely as
|
||||
a consequence of using peer-to-peer transmission to receive a copy likewise
|
||||
does not require acceptance. However, nothing other than this License grants
|
||||
you permission to propagate or modify any covered work. These actions infringe
|
||||
copyright if you do not accept this License. Therefore, by modifying or propagating
|
||||
a covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically receives
|
||||
a license from the original licensors, to run, modify and propagate that work,
|
||||
subject to this License. You are not responsible for enforcing compliance
|
||||
by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an organization,
|
||||
or substantially all assets of one, or subdividing an organization, or merging
|
||||
organizations. If propagation of a covered work results from an entity transaction,
|
||||
each party to that transaction who receives a copy of the work also receives
|
||||
whatever licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the Corresponding
|
||||
Source of the work from the predecessor in interest, if the predecessor has
|
||||
it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the rights
|
||||
granted or affirmed under this License. For example, you may not impose a
|
||||
license fee, royalty, or other charge for exercise of rights granted under
|
||||
this License, and you may not initiate litigation (including a cross-claim
|
||||
or counterclaim in a lawsuit) alleging that any patent claim is infringed
|
||||
by making, using, selling, offering for sale, or importing the Program or
|
||||
any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this License
|
||||
of the Program or a work on which the Program is based. The work thus licensed
|
||||
is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims owned or controlled
|
||||
by the contributor, whether already acquired or hereafter acquired, that would
|
||||
be infringed by some manner, permitted by this License, of making, using,
|
||||
or selling its contributor version, but do not include claims that would be
|
||||
infringed only as a consequence of further modification of the contributor
|
||||
version. For purposes of this definition, "control" includes the right to
|
||||
grant patent sublicenses in a manner consistent with the requirements of this
|
||||
License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free patent
|
||||
license under the contributor's essential patent claims, to make, use, sell,
|
||||
offer for sale, import and otherwise run, modify and propagate the contents
|
||||
of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express agreement
|
||||
or commitment, however denominated, not to enforce a patent (such as an express
|
||||
permission to practice a patent or covenant not to s ue for patent infringement).
|
||||
To "grant" such a patent license to a party means to make such an agreement
|
||||
or commitment not to enforce a patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license, and the
|
||||
Corresponding Source of the work is not available for anyone to copy, free
|
||||
of charge and under the terms of this License, through a publicly available
|
||||
network server or other readily accessible means, then you must either (1)
|
||||
cause the Corresponding Source to be so available, or (2) arrange to deprive
|
||||
yourself of the benefit of the patent license for this particular work, or
|
||||
(3) arrange, in a manner consistent with the requirements of this License,
|
||||
to extend the patent
|
||||
|
||||
license to downstream recipients. "Knowingly relying" means you have actual
|
||||
knowledge that, but for the patent license, your conveying the covered work
|
||||
in a country, or your recipient's use of the covered work in a country, would
|
||||
infringe one or more identifiable patents in that country that you have reason
|
||||
to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or arrangement,
|
||||
you convey, or propagate by procuring conveyance of, a covered work, and grant
|
||||
a patent license to some of the parties receiving the covered work authorizing
|
||||
them to use, propagate, modify or convey a specific copy of the covered work,
|
||||
then the patent license you grant is automatically extended to all recipients
|
||||
of the covered work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within the scope
|
||||
of its coverage, prohibits the exercise of, or is conditioned on the non-exercise
|
||||
of one or more of the rights that are specifically granted under this License.
|
||||
You may not convey a covered work if you are a party to an arrangement with
|
||||
a third party that is in the business of distributing software, under which
|
||||
you make payment to the third party based on the extent of your activity of
|
||||
conveying the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory patent
|
||||
license (a) in connection with copies of the covered work conveyed by you
|
||||
(or copies made from those copies), or (b) primarily for and in connection
|
||||
with specific products or compilations that contain the covered work, unless
|
||||
you entered into that arrangement, or that patent license was granted, prior
|
||||
to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting any implied
|
||||
license or other defenses to infringement that may otherwise be available
|
||||
to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or otherwise)
|
||||
that contradict the conditions of this License, they do not excuse you from
|
||||
the conditions of this License. If you cannot convey a covered work so as
|
||||
to satisfy simultaneously your obligations under this License and any other
|
||||
pertinent obligations, then as a consequence you may
|
||||
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey the
|
||||
Program, the only way you could satisfy both those terms and this License
|
||||
would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the Program,
|
||||
your modified version must prominently offer all users interacting with it
|
||||
remotely through a computer network (if your version supports such interaction)
|
||||
an opportunity to receive the Corresponding Source of your version by providing
|
||||
access to the Corresponding Source from a network server at no charge, through
|
||||
some standard or customary means of facilitating copying of software. This
|
||||
Corresponding Source shall include the Corresponding Source for any work covered
|
||||
by version 3 of the GNU General Public License that is incorporated pursuant
|
||||
to the following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have permission to
|
||||
link or combine any covered work with a work licensed under version 3 of the
|
||||
GNU General Public License into a single combined work, and to convey the
|
||||
resulting work. The terms of this License will continue to apply to the part
|
||||
which is the covered work, but the work with which it is combined will remain
|
||||
governed by version 3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of the
|
||||
GNU Affero General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to address
|
||||
new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Program specifies
|
||||
that a certain numbered version of the GNU Affero General Public License "or
|
||||
any later version" applies to it, you have the option of following the terms
|
||||
and conditions either of that numbered version or of any later version published
|
||||
by the Free Software Foundation. If the Program does not specify a version
|
||||
number of the GNU Affero General Public License, you may choose any version
|
||||
ever published by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future versions of
|
||||
the GNU Affero General Public License can be used, that proxy's public statement
|
||||
of acceptance of a version permanently authorizes you to choose that version
|
||||
for the Program.
|
||||
|
||||
Later license versions may give you additional or different permissions. However,
|
||||
no additional obligations are imposed on any author or copyright holder as
|
||||
a result of your choosing to follow a later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE
|
||||
LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
|
||||
OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER
|
||||
EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
|
||||
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM
|
||||
PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR
|
||||
CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL
|
||||
ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM
|
||||
AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL,
|
||||
INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO
|
||||
USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
|
||||
INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE
|
||||
PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER
|
||||
PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided above cannot
|
||||
be given local legal effect according to their terms, reviewing courts shall
|
||||
apply local law that most closely approximates an absolute waiver of all civil
|
||||
liability in connection with the Program, unless a warranty or assumption
|
||||
of liability accompanies a copy of the Program in return for a fee. END OF
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest possible
|
||||
use to the public, the best way to achieve this is to make it free software
|
||||
which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest to attach
|
||||
them to the start of each source file to most effectively state the exclusion
|
||||
of warranty; and each file should have at least the "copyright" line and a
|
||||
pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify it under
|
||||
the terms of the GNU Affero General Public License as published by the Free
|
||||
Software Foundation, either version 3 of the License, or (at your option)
|
||||
any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
|
||||
details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License along
|
||||
with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer network,
|
||||
you should also make sure that it provides a way for users to get its source.
|
||||
For example, if your program is a web application, its interface could display
|
||||
a "Source" link that leads users to an archive of the code. There are many
|
||||
ways you could offer source, and different solutions will be better for different
|
||||
programs; see section 13 for the specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary. For
|
||||
more information on this, and how to apply and follow the GNU AGPL, see <https://www.gnu.org/licenses/>.
|
41
README.md
41
README.md
|
@ -1,23 +1,30 @@
|
|||
# fediverse.space 🌐
|
||||
# index.community 🌐
|
||||
|
||||
The map of the fediverse that you always wanted.
|
||||
|
||||
Read the latest updates on Mastodon: [@fediversespace](https://cursed.technology/@fediversespace)
|
||||
Read the latest updates on Mastodon: [@indexCommunity](https://social.inex.rocks/@indexCommunity)
|
||||
|
||||
![A screenshot of fediverse.space](screenshot.png)
|
||||
|
||||
1. [Requirements](#requirements)
|
||||
2. [Running it](#running-it)
|
||||
3. [Commands](#commands)
|
||||
4. [Privacy](#privacy)
|
||||
5. [Deployment](#deployment)
|
||||
6. [Acknowledgements](#acknowledgements)
|
||||
- [index.community 🌐](#indexcommunity-%f0%9f%8c%90)
|
||||
- [Requirements](#requirements)
|
||||
- [Running it](#running-it)
|
||||
- [Backend](#backend)
|
||||
- [Frontend](#frontend)
|
||||
- [Commands](#commands)
|
||||
- [Backend](#backend-1)
|
||||
- [Frontend](#frontend-1)
|
||||
- [Privacy](#privacy)
|
||||
- [Deployment](#deployment)
|
||||
- [Acknowledgements](#acknowledgements)
|
||||
|
||||
## Requirements
|
||||
|
||||
Though dockerized, backend development is easiest if you have the following installed.
|
||||
Note: examples here use `podman`. In most cases you should be able to replace `podman` with `docker`.
|
||||
|
||||
- For the scraper + API:
|
||||
Though containerized, backend development is easiest if you have the following installed.
|
||||
|
||||
- For the crawler + API:
|
||||
- Elixir
|
||||
- Postgres
|
||||
- For laying out the graph:
|
||||
|
@ -31,9 +38,11 @@ Though dockerized, backend development is easiest if you have the following inst
|
|||
### Backend
|
||||
|
||||
- `cp example.env .env` and modify environment variables as required
|
||||
- `docker-compose build`
|
||||
- `docker-compose up -d phoenix`
|
||||
- if you don't specify `phoenix`, it'll also start `gephi` which should only be run as a regular one-off job
|
||||
- `podman build gephi && podman build phoenix`
|
||||
- `podman run --name elasticsearch -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" elasticsearch:6.8.9`
|
||||
- If you've `run` this container previously, use `podman start elasticsearch`
|
||||
- `podman run --name postgres -e "POSTGRES_USER=postgres" -e "POSTGRES_PASSWORD=postgres" -p 5432:5432 postgres:12`
|
||||
- `podman-compose -f compose.backend-services.yml -f compose.phoenix.yml`
|
||||
- Create the elasticsearch index:
|
||||
- `iex -S mix app.start`
|
||||
- `Elasticsearch.Index.hot_swap(Backend.Elasticsearch.Cluster, :instances)`
|
||||
|
@ -48,10 +57,6 @@ Though dockerized, backend development is easiest if you have the following inst
|
|||
### Backend
|
||||
|
||||
`./gradlew shadowJar` compiles the graph layout program. `java -Xmx1g -jar build/libs/graphBuilder.jar` runs it.
|
||||
If running in docker, this means you run
|
||||
|
||||
- `docker-compose build gephi`
|
||||
- `docker-compose run gephi java -Xmx1g -jar build/libs/graphBuilder.jar` lays out the graph
|
||||
|
||||
### Frontend
|
||||
|
||||
|
@ -97,8 +102,6 @@ SHELL=/bin/bash
|
|||
0 2 * * * /usr/bin/dokku run gephi java -Xmx1g -jar build/libs/graphBuilder.jar
|
||||
```
|
||||
|
||||
10. (Optional) Set up caching with something like [dokku-nginx-cache](https://github.com/Aluxian/dokku-nginx-cache)
|
||||
|
||||
Before the app starts running, make sure that the Elasticsearch index exists -- otherwise it'll create one called
|
||||
`instances`, which should be the name of the alias. Then it won't be able to hot swap if you reindex in the future.
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
FROM elixir:1.9.0-alpine as build
|
||||
FROM elixir:1.12-alpine as build
|
||||
|
||||
# install build dependencies
|
||||
RUN apk add --update git build-base
|
||||
RUN apk add --update git build-base
|
||||
|
||||
# prepare build dir
|
||||
RUN mkdir /app
|
||||
|
@ -37,7 +37,7 @@ RUN mix release
|
|||
|
||||
# prepare release image
|
||||
FROM alpine:3.9 AS app
|
||||
RUN apk add --update bash openssl
|
||||
RUN apk add --update bash openssl libstdc++ build-base
|
||||
|
||||
RUN mkdir /app
|
||||
WORKDIR /app
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## Notes
|
||||
|
||||
- This project requires Elixir >= 1.9.
|
||||
- This project requires Elixir >= 1.10.
|
||||
- Run with `SKIP_CRAWL=true` to just run the server (useful for working on the API without also crawling)
|
||||
- This project is automatically scanned for potential vulnerabilities with [Sobelow](https://sobelow.io/).
|
||||
|
||||
|
@ -24,6 +24,8 @@ There are several environment variables you can set to configure how the crawler
|
|||
- `FRONTEND_DOMAIN` (required). Used to generate login links for instance admins.
|
||||
- Don't enter `https://`, this is added automatically.
|
||||
- `SENDGRID_API_KEY`. Needed to send emails to the admin, or to instance admins who want to opt in/out.
|
||||
- `MASTODON_DOMAIN`. The domain (e.g. `mastodon.social`) that your bot login account is hosted on.
|
||||
- `MASTODON_TOKEN`. The access token for the bot login account.
|
||||
|
||||
## Deployment
|
||||
|
||||
|
|
|
@ -13,31 +13,15 @@ config :backend,
|
|||
# Configures the endpoint
|
||||
config :backend, BackendWeb.Endpoint,
|
||||
url: [host: "localhost"],
|
||||
secret_key_base: "XL4NKGBN9lZMrQbMEI1KJOlwAt8S7younVJl90TdAgzmwyapr3g7BRYSNYvX0sZ9",
|
||||
render_errors: [view: BackendWeb.ErrorView, accepts: ~w(json)],
|
||||
pubsub: [name: Backend.PubSub, adapter: Phoenix.PubSub.PG2],
|
||||
instrumenters: [Appsignal.Phoenix.Instrumenter]
|
||||
secret_key_base: System.get_env("SECRET_KEY_BASE"),
|
||||
render_errors: [view: BackendWeb.ErrorView, accepts: ~w(json)]
|
||||
|
||||
config :backend, Backend.Repo, queue_target: 5000
|
||||
|
||||
instances_config_path =
|
||||
if System.get_env("MIX_ENV") == "prod",
|
||||
do: "lib/backend-2.3.0/priv/elasticsearch/instances.json",
|
||||
else: "priv/elasticsearch/instances.json"
|
||||
|
||||
config :backend, Backend.Elasticsearch.Cluster,
|
||||
url: "http://localhost:9200",
|
||||
url: "http://elastic:9200",
|
||||
api: Elasticsearch.API.HTTP,
|
||||
json_library: Jason,
|
||||
indexes: %{
|
||||
instances: %{
|
||||
settings: instances_config_path,
|
||||
store: Backend.Elasticsearch.Store,
|
||||
sources: [Backend.Instance],
|
||||
bulk_page_size: 1000,
|
||||
bulk_wait_interval: 1_000
|
||||
}
|
||||
}
|
||||
json_library: Jason
|
||||
|
||||
# Configures Elixir's Logger
|
||||
config :logger, :console,
|
||||
|
@ -47,29 +31,51 @@ config :logger, :console,
|
|||
# Use Jason for JSON parsing in Phoenix
|
||||
config :phoenix, :json_library, Jason
|
||||
|
||||
config :ex_twilio,
|
||||
account_sid: System.get_env("TWILIO_ACCOUNT_SID"),
|
||||
auth_token: System.get_env("TWILIO_AUTH_TOKEN")
|
||||
config :gollum,
|
||||
# 24 hrs
|
||||
refresh_secs: 86_400,
|
||||
lazy_refresh: true,
|
||||
user_agent: "index.community crawler"
|
||||
|
||||
config :backend, Graph.Cache,
|
||||
# 1 hour
|
||||
gc_interval: 3600
|
||||
|
||||
|
||||
config :backend, Backend.Mailer,
|
||||
adapter: Swoosh.Adapters.Sendgrid,
|
||||
api_key: System.get_env("SENDGRID_API_KEY")
|
||||
adapter: Swoosh.Adapters.SMTP,
|
||||
relay: System.get_env("MAILER_RELAY"),
|
||||
username: System.get_env("MAILER_USERNAME"),
|
||||
password: System.get_env("MAILER_PASSWORD"),
|
||||
ssl: true,
|
||||
tls: :always,
|
||||
auth: :always,
|
||||
port: 465
|
||||
|
||||
config :backend, Mastodon.Messenger,
|
||||
domain: System.get_env("MASTODON_DOMAIN"),
|
||||
token: System.get_env("MASTODON_TOKEN")
|
||||
|
||||
config :backend, :crawler,
|
||||
status_age_limit_days: 28,
|
||||
status_count_limit: 5000,
|
||||
personal_instance_threshold: 10,
|
||||
crawl_interval_mins: 60,
|
||||
crawl_interval_mins: 30,
|
||||
crawl_workers: 100,
|
||||
blacklist: [
|
||||
# spam
|
||||
"gab.best",
|
||||
"4chan.icu"
|
||||
# spam
|
||||
"4chan.icu",
|
||||
# *really* doesn't want to be listed on fediverse.space
|
||||
"pleroma.site",
|
||||
# dummy instances used for pleroma CI
|
||||
"pleroma.online"
|
||||
],
|
||||
user_agent: "fediverse.space crawler",
|
||||
user_agent: "index.community crawler",
|
||||
require_bidirectional_mentions: false,
|
||||
admin_phone: System.get_env("ADMIN_PHONE"),
|
||||
twilio_phone: System.get_env("TWILIO_PHONE"),
|
||||
admin_email: System.get_env("ADMIN_EMAIL"),
|
||||
frontend_domain: "https://www.fediverse.space"
|
||||
admin_email: System.get_env("ADMIN_EMAIL")
|
||||
|
||||
config :backend, Backend.Scheduler,
|
||||
jobs: [
|
||||
|
@ -85,6 +91,10 @@ config :backend, Backend.Scheduler,
|
|||
{"0 */3 * * *", {Backend.Scheduler, :check_for_spam_instances, []}}
|
||||
]
|
||||
|
||||
config :phoenix, :template_engines,
|
||||
eex: Appsignal.Phoenix.Template.EExEngine,
|
||||
exs: Appsignal.Phoenix.Template.ExsEngine
|
||||
|
||||
# Import environment specific config. This must remain at the bottom
|
||||
# of this file so it overrides the configuration defined above.
|
||||
import_config "#{Mix.env()}.exs"
|
||||
|
|
|
@ -7,7 +7,7 @@ import Config
|
|||
# watchers to your application. For example, we use it
|
||||
# with webpack to recompile .js and .css sources.
|
||||
config :backend, BackendWeb.Endpoint,
|
||||
http: [port: 4000],
|
||||
http: [port: 4001],
|
||||
debug_errors: true,
|
||||
code_reloader: true,
|
||||
check_origin: false,
|
||||
|
@ -53,16 +53,13 @@ config :backend, Backend.Repo,
|
|||
username: "postgres",
|
||||
password: "postgres",
|
||||
database: "backend_dev",
|
||||
hostname: "localhost",
|
||||
hostname: "127.0.0.1:5435",
|
||||
pool_size: 10
|
||||
|
||||
config :backend, :crawler,
|
||||
status_age_limit_days: 28,
|
||||
status_count_limit: 100,
|
||||
status_age_limit_days: 14,
|
||||
status_count_limit: 500,
|
||||
personal_instance_threshold: 5,
|
||||
crawl_interval_mins: 60,
|
||||
crawl_workers: 1,
|
||||
blacklist: [
|
||||
"gab.best",
|
||||
"4chan.icu"
|
||||
]
|
||||
crawl_workers: 10,
|
||||
frontend_domain: "localhost:3000"
|
||||
|
|
|
@ -19,7 +19,7 @@ config :backend, Backend.Elasticsearch.Cluster,
|
|||
|
||||
config :appsignal, :config, revision: System.get_env("GIT_REV")
|
||||
|
||||
port = String.to_integer(System.get_env("PORT") || "4000")
|
||||
port = String.to_integer(System.get_env("PORT") || "4001")
|
||||
|
||||
config :backend, BackendWeb.Endpoint,
|
||||
http: [:inet6, port: port],
|
||||
|
@ -28,16 +28,20 @@ config :backend, BackendWeb.Endpoint,
|
|||
secret_key_base: System.get_env("SECRET_KEY_BASE"),
|
||||
server: true
|
||||
|
||||
config :ex_twilio,
|
||||
account_sid: System.get_env("TWILIO_ACCOUNT_SID"),
|
||||
auth_token: System.get_env("TWILIO_AUTH_TOKEN")
|
||||
|
||||
config :backend, :crawler,
|
||||
admin_phone: System.get_env("ADMIN_PHONE"),
|
||||
twilio_phone: System.get_env("TWILIO_PHONE"),
|
||||
admin_email: System.get_env("ADMIN_EMAIL"),
|
||||
frontend_domain: System.get_env("FRONTEND_DOMAIN")
|
||||
|
||||
config :backend, Backend.Mailer,
|
||||
adapter: Swoosh.Adapters.Sendgrid,
|
||||
api_key: System.get_env("SENDGRID_API_KEY")
|
||||
adapter: Swoosh.Adapters.SMTP,
|
||||
relay: System.get_env("MAILER_RELAY"),
|
||||
username: System.get_env("MAILER_USERNAME"),
|
||||
password: System.get_env("MAILER_PASSWORD"),
|
||||
ssl: true,
|
||||
auth: :always,
|
||||
port: 465
|
||||
|
||||
config :backend, Mastodon.Messenger,
|
||||
domain: System.get_env("MASTODON_DOMAIN"),
|
||||
token: System.get_env("MASTODON_TOKEN")
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
version: "2"
|
||||
|
||||
networks:
|
||||
space:
|
||||
external: false
|
||||
|
||||
services:
|
||||
server:
|
||||
build: .
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- space
|
||||
volumes:
|
||||
- /home/gitea/data:/data
|
||||
depends_on:
|
||||
- db
|
||||
|
||||
db:
|
||||
image: postgres:12-alpine
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- POSTGRES_PASSWORD: postgres
|
||||
- POSTGRES_USER: postgres
|
||||
networks:
|
||||
- space
|
||||
volumes:
|
||||
- /var/lib/postgresql/data
|
||||
|
||||
elastic:
|
||||
image: elasticsearch:6.8.9
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- discovery.type: single-node
|
||||
networks:
|
||||
- space
|
||||
|
||||
|
|
@ -1,13 +1,56 @@
|
|||
defmodule Backend.Api do
|
||||
@moduledoc """
|
||||
Functions used in the API controllers. Most of these simply return data from the database.
|
||||
"""
|
||||
alias Backend.{Edge, Instance, Repo}
|
||||
import Backend.Util
|
||||
import Ecto.Query
|
||||
|
||||
@spec get_instance!(String.t()) :: Instance.t()
|
||||
def get_instance!(domain) do
|
||||
@type instance_sort_field :: :name | :user_count | :status_count | :insularity
|
||||
@type sort_direction :: :asc | :desc
|
||||
@spec get_instances(Integer.t() | nil, instance_sort_field | nil, sort_direction | nil) ::
|
||||
Scrivener.Page.t()
|
||||
def get_instances(page \\ nil, sort_field \\ nil, sort_direction \\ nil) do
|
||||
Instance
|
||||
|> where([i], not is_nil(i.type) and not i.opt_out)
|
||||
|> maybe_order_by(sort_field, sort_direction)
|
||||
|> Repo.paginate(page: page)
|
||||
end
|
||||
|
||||
defp maybe_order_by(query, sort_field, sort_direction) do
|
||||
cond do
|
||||
sort_field == nil and sort_direction != nil ->
|
||||
query
|
||||
|
||||
sort_field != nil and sort_direction == nil ->
|
||||
query
|
||||
|> order_by(desc: ^sort_field)
|
||||
|
||||
sort_direction == :asc ->
|
||||
query
|
||||
|> order_by(asc_nulls_last: ^sort_field)
|
||||
|
||||
sort_direction == :desc ->
|
||||
query
|
||||
|> order_by(desc_nulls_last: ^sort_field)
|
||||
|
||||
true ->
|
||||
query
|
||||
end
|
||||
end
|
||||
|
||||
@spec get_instance(String.t()) :: Instance.t() | nil
|
||||
def get_instance(domain) do
|
||||
Instance
|
||||
|> Repo.get_by(domain: domain)
|
||||
end
|
||||
|
||||
@spec get_instance_with_relationships(String.t()) :: Instance.t() | nil
|
||||
def get_instance_with_relationships(domain) do
|
||||
Instance
|
||||
|> preload(:peers)
|
||||
|> Repo.get_by!(domain: domain)
|
||||
|> preload(:federation_restrictions)
|
||||
|> Repo.get_by(domain: domain)
|
||||
end
|
||||
|
||||
def update_instance(instance) do
|
||||
|
@ -24,7 +67,8 @@ defmodule Backend.Api do
|
|||
* the user count is > the threshold
|
||||
* have x and y coordinates
|
||||
|
||||
If `domain` is passed, then this function only returns nodes that are neighbors of that instance.
|
||||
If `domain` is passed, then this function only returns nodes that are neighbors of that
|
||||
instance.
|
||||
"""
|
||||
@spec list_nodes() :: [Instance.t()]
|
||||
def list_nodes(domain \\ nil) do
|
||||
|
@ -34,7 +78,7 @@ defmodule Backend.Api do
|
|||
|> where(
|
||||
[i],
|
||||
not is_nil(i.x) and not is_nil(i.y) and not is_nil(i.user_count) and
|
||||
i.user_count >= ^user_threshold and not i.opt_out
|
||||
(i.user_count >= ^user_threshold or i.opt_in) and not i.opt_out
|
||||
)
|
||||
|> maybe_filter_nodes_to_neighborhood(domain)
|
||||
|> select([c], [:domain, :user_count, :x, :y, :type, :statuses_per_day])
|
||||
|
@ -61,6 +105,7 @@ defmodule Backend.Api do
|
|||
end
|
||||
|
||||
@spec list_edges() :: [Edge.t()]
|
||||
# credo:disable-for-next-line Credo.Check.Refactor.CyclomaticComplexity
|
||||
def list_edges(domain \\ nil) do
|
||||
user_threshold = get_config(:personal_instance_threshold)
|
||||
|
||||
|
@ -73,7 +118,8 @@ defmodule Backend.Api do
|
|||
[e, i1, i2],
|
||||
not is_nil(i1.x) and not is_nil(i1.y) and
|
||||
not is_nil(i2.x) and not is_nil(i2.y) and
|
||||
i1.user_count >= ^user_threshold and i2.user_count >= ^user_threshold and
|
||||
(i1.user_count >= ^user_threshold or i1.opt_in) and
|
||||
(i2.user_count >= ^user_threshold or i2.opt_in) and
|
||||
not i1.opt_out and not i2.opt_out
|
||||
)
|
||||
|> Repo.all()
|
||||
|
@ -101,50 +147,15 @@ defmodule Backend.Api do
|
|||
end
|
||||
end
|
||||
|
||||
def search_instances(query, from \\ 0) do
|
||||
def search_instances(query, filters, from \\ 0) do
|
||||
page_size = 50
|
||||
|
||||
search_response =
|
||||
Elasticsearch.post(Backend.Elasticsearch.Cluster, "/instances/_search", %{
|
||||
"sort" => "_score",
|
||||
"from" => from,
|
||||
"size" => page_size,
|
||||
"query" => %{
|
||||
"bool" => %{
|
||||
"should" => [
|
||||
%{
|
||||
"multi_match" => %{
|
||||
"query" => query,
|
||||
"fields" => [
|
||||
"description.english"
|
||||
]
|
||||
}
|
||||
},
|
||||
%{
|
||||
"wildcard" => %{
|
||||
"domain.keyword" => %{
|
||||
"value" => query,
|
||||
"boost" => 100
|
||||
}
|
||||
}
|
||||
},
|
||||
%{
|
||||
"wildcard" => %{
|
||||
"domain.keyword" => %{
|
||||
"value" => "*#{query}*",
|
||||
"boost" => 1
|
||||
}
|
||||
}
|
||||
},
|
||||
%{
|
||||
"match" => %{
|
||||
"domain.ngram^0.5" => query
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
})
|
||||
Elasticsearch.post(
|
||||
Backend.Elasticsearch.Cluster,
|
||||
"/instances/_search",
|
||||
build_es_query(query, filters, page_size, from)
|
||||
)
|
||||
|
||||
with {:ok, result} <- search_response do
|
||||
hits =
|
||||
|
@ -164,4 +175,51 @@ defmodule Backend.Api do
|
|||
}
|
||||
end
|
||||
end
|
||||
|
||||
defp build_es_query(query, filters, page_size, from) do
|
||||
opt_out_filter = %{"term" => %{"opt_out" => "false"}}
|
||||
filters = [opt_out_filter | filters]
|
||||
|
||||
%{
|
||||
"sort" => "_score",
|
||||
"from" => from,
|
||||
"size" => page_size,
|
||||
# This must be >0, otherwise all documents will be returned
|
||||
"min_score" => 1,
|
||||
"query" => %{
|
||||
"bool" => %{
|
||||
"filter" => filters,
|
||||
"should" => [
|
||||
%{
|
||||
"multi_match" => %{
|
||||
"query" => query,
|
||||
"fields" => [
|
||||
"description.*",
|
||||
"domain.english"
|
||||
]
|
||||
}
|
||||
},
|
||||
%{
|
||||
# If the query exactly matches a domain, that instance should always be the first result.
|
||||
"wildcard" => %{
|
||||
"domain.keyword" => %{
|
||||
"value" => query,
|
||||
"boost" => 100
|
||||
}
|
||||
}
|
||||
},
|
||||
%{
|
||||
# Give substring matches in domains a large boost, too.
|
||||
"wildcard" => %{
|
||||
"domain.keyword" => %{
|
||||
"value" => "*#{query}*",
|
||||
"boost" => 10
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
end
|
||||
end
|
||||
|
|
|
@ -7,6 +7,7 @@ defmodule Backend.Application do
|
|||
import Backend.Util
|
||||
|
||||
def start(_type, _args) do
|
||||
|
||||
:telemetry.attach(
|
||||
"appsignal-ecto",
|
||||
[:backend, :repo, :query],
|
||||
|
@ -22,7 +23,7 @@ defmodule Backend.Application do
|
|||
# Start the endpoint when the application starts
|
||||
BackendWeb.Endpoint,
|
||||
# Crawler children
|
||||
:hackney_pool.child_spec(:crawler, timeout: 15000, max_connections: crawl_worker_count),
|
||||
:hackney_pool.child_spec(:crawler, timeout: 15_000, max_connections: crawl_worker_count),
|
||||
Supervisor.child_spec(
|
||||
{Task,
|
||||
fn ->
|
||||
|
@ -33,7 +34,8 @@ defmodule Backend.Application do
|
|||
),
|
||||
Supervisor.child_spec({Task, fn -> HTTPoison.start() end}, id: :start_httpoison),
|
||||
Backend.Scheduler,
|
||||
Backend.Elasticsearch.Cluster
|
||||
Backend.Elasticsearch.Cluster,
|
||||
Graph.Cache
|
||||
]
|
||||
|
||||
children =
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
defmodule Backend.Auth do
|
||||
@moduledoc """
|
||||
Functions related to authentication.
|
||||
"""
|
||||
alias Phoenix.Token
|
||||
import Backend.Util
|
||||
|
||||
|
@ -12,6 +15,6 @@ defmodule Backend.Auth do
|
|||
|
||||
def verify_token(token) do
|
||||
# tokens are valid for 12 hours
|
||||
Token.verify(BackendWeb.Endpoint, @salt, token, max_age: 43200)
|
||||
Token.verify(BackendWeb.Endpoint, @salt, token, max_age: 43_200)
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
defmodule Backend.Crawl do
|
||||
@moduledoc """
|
||||
Stores aggregate data about a single crawl (i.e. not individual statuses, but the number of statuses seen etc.)
|
||||
"""
|
||||
use Ecto.Schema
|
||||
import Ecto.Changeset
|
||||
|
||||
|
@ -11,16 +14,13 @@ defmodule Backend.Crawl do
|
|||
field :interactions_seen, :integer
|
||||
field :statuses_seen, :integer
|
||||
|
||||
# if something went wrong, otherwise null
|
||||
field :error, :string
|
||||
|
||||
timestamps()
|
||||
end
|
||||
|
||||
@doc false
|
||||
def changeset(crawl, attrs) do
|
||||
crawl
|
||||
|> cast(attrs, [:instance, :statuses_seen, :interactions_seen, :error])
|
||||
|> cast(attrs, [:instance, :statuses_seen, :interactions_seen])
|
||||
|> validate_required([:instance])
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
defmodule Backend.CrawlInteraction do
|
||||
@moduledoc """
|
||||
Model for tracking interactions between instances. Stores the source and target instance, as well as the number
|
||||
of mentions seen in the given crawl.
|
||||
"""
|
||||
use Ecto.Schema
|
||||
import Ecto.Changeset
|
||||
|
||||
|
|
|
@ -11,10 +11,14 @@ defmodule Backend.Crawler.ApiCrawler do
|
|||
* Make sure to check the most recent crawl of the instance so you don't re-crawl old statuses
|
||||
"""
|
||||
|
||||
alias Backend.Crawler.Crawlers.Nodeinfo
|
||||
|
||||
# {domain_mentioned, count}
|
||||
@type instance_interactions :: %{String.t() => integer}
|
||||
# {domain, type} e.g. {"gab.com", "reject"}
|
||||
@type federation_restriction :: {String.t(), String.t()}
|
||||
|
||||
@type instance_type :: :mastodon | :pleroma | :gab
|
||||
@type instance_type :: :mastodon | :pleroma | :gab | :misskey | :gnusocial | :smithereen
|
||||
|
||||
defstruct [
|
||||
:version,
|
||||
|
@ -24,24 +28,39 @@ defmodule Backend.Crawler.ApiCrawler do
|
|||
:peers,
|
||||
:interactions,
|
||||
:statuses_seen,
|
||||
:instance_type
|
||||
:instance_type,
|
||||
:federation_restrictions
|
||||
]
|
||||
|
||||
@type t() :: %__MODULE__{
|
||||
version: String.t(),
|
||||
description: String.t(),
|
||||
user_count: integer,
|
||||
status_count: integer,
|
||||
version: String.t() | nil,
|
||||
description: String.t() | nil,
|
||||
user_count: integer | nil,
|
||||
status_count: integer | nil,
|
||||
peers: [String.t()],
|
||||
interactions: instance_interactions,
|
||||
statuses_seen: integer,
|
||||
instance_type: instance_type
|
||||
instance_type: instance_type | nil,
|
||||
federation_restrictions: [federation_restriction]
|
||||
}
|
||||
|
||||
@empty_result %{
|
||||
version: nil,
|
||||
description: nil,
|
||||
user_count: nil,
|
||||
status_count: nil,
|
||||
peers: [],
|
||||
interactions: %{},
|
||||
statuses_seen: 0,
|
||||
instance_type: nil,
|
||||
federation_restrictions: []
|
||||
}
|
||||
|
||||
@doc """
|
||||
Check whether the instance at the given domain is of the type that this ApiCrawler implements.
|
||||
Arguments are the instance domain and the nodeinfo results.
|
||||
"""
|
||||
@callback is_instance_type?(String.t()) :: boolean()
|
||||
@callback is_instance_type?(String.t(), ApiCrawler.t()) :: boolean()
|
||||
|
||||
@doc """
|
||||
Check whether the instance allows crawling according to its robots.txt or otherwise.
|
||||
|
@ -50,6 +69,14 @@ defmodule Backend.Crawler.ApiCrawler do
|
|||
|
||||
@doc """
|
||||
Crawl the instance at the given domain.
|
||||
Takes two arguments: the domain to crawl and the existing results (from nodeinfo).
|
||||
"""
|
||||
@callback crawl(String.t()) :: t()
|
||||
@callback crawl(String.t(), Nodeinfo.t()) :: t()
|
||||
|
||||
@doc """
|
||||
Returns the default, empty state
|
||||
"""
|
||||
def get_default do
|
||||
@empty_result
|
||||
end
|
||||
end
|
||||
|
|
|
@ -4,11 +4,23 @@ defmodule Backend.Crawler do
|
|||
"""
|
||||
|
||||
alias __MODULE__
|
||||
alias Backend.Crawler.Crawlers.Mastodon
|
||||
|
||||
alias Backend.{
|
||||
Crawl,
|
||||
CrawlInteraction,
|
||||
FederationRestriction,
|
||||
Instance,
|
||||
InstancePeer,
|
||||
MostRecentCrawl,
|
||||
Repo
|
||||
}
|
||||
|
||||
alias Backend.Crawler.ApiCrawler
|
||||
alias Backend.{Crawl, CrawlInteraction, Repo, Instance, InstancePeer}
|
||||
alias Backend.Crawler.Crawlers.{Friendica, GnuSocial, Mastodon, Misskey, Nodeinfo}
|
||||
|
||||
import Ecto.Query
|
||||
import Backend.Util
|
||||
|
||||
require Logger
|
||||
|
||||
defstruct [
|
||||
|
@ -16,8 +28,8 @@ defmodule Backend.Crawler do
|
|||
:domain,
|
||||
# a list of ApiCrawlers that will be attempted
|
||||
:api_crawlers,
|
||||
:found_api?,
|
||||
:allows_crawling?,
|
||||
:found_api?,
|
||||
:result,
|
||||
:error
|
||||
]
|
||||
|
@ -25,8 +37,8 @@ defmodule Backend.Crawler do
|
|||
@type t() :: %__MODULE__{
|
||||
domain: String.t(),
|
||||
api_crawlers: [ApiCrawler.t()],
|
||||
found_api?: boolean,
|
||||
allows_crawling?: boolean,
|
||||
found_api?: boolean,
|
||||
result: ApiCrawler.t() | nil,
|
||||
error: String.t() | nil
|
||||
}
|
||||
|
@ -37,15 +49,19 @@ defmodule Backend.Crawler do
|
|||
state = %Crawler{
|
||||
domain: domain,
|
||||
api_crawlers: [],
|
||||
found_api?: false,
|
||||
allows_crawling?: true,
|
||||
found_api?: false,
|
||||
result: nil,
|
||||
error: nil
|
||||
}
|
||||
|
||||
state
|
||||
# register APICrawlers here
|
||||
# These crawlers are run in the order they're registered. Nodeinfo should be the first one.
|
||||
|> register(Nodeinfo)
|
||||
|> register(Mastodon)
|
||||
|> register(Misskey)
|
||||
|> register(GnuSocial)
|
||||
|> register(Friendica)
|
||||
# go!
|
||||
|> crawl()
|
||||
|> save()
|
||||
|
@ -55,33 +71,57 @@ defmodule Backend.Crawler do
|
|||
|
||||
# Adds a new ApiCrawler that run/1 will check.
|
||||
defp register(%Crawler{api_crawlers: crawlers} = state, api_crawler) do
|
||||
Map.put(state, :api_crawlers, [api_crawler | crawlers])
|
||||
Map.put(state, :api_crawlers, crawlers ++ [api_crawler])
|
||||
end
|
||||
|
||||
# Recursive function to check whether `domain` has an API that the head of the api_crawlers list can read.
|
||||
# If so, crawls it. If not, continues with the tail of the api_crawlers list.
|
||||
defp crawl(%Crawler{api_crawlers: [], domain: domain} = state) do
|
||||
Logger.debug("Found no compatible API for #{domain}")
|
||||
Map.put(state, :found_api?, false)
|
||||
state
|
||||
end
|
||||
|
||||
defp crawl(%Crawler{domain: domain, api_crawlers: [curr | remaining_crawlers]} = state) do
|
||||
if curr.is_instance_type?(domain) do
|
||||
# Nodeinfo is distinct from other crawlers in that
|
||||
# a) it should always be run first
|
||||
# b) it passes the results on to the next crawlers (e.g. user_count)
|
||||
defp crawl(%Crawler{api_crawlers: [Nodeinfo | remaining_crawlers], domain: domain} = state) do
|
||||
if Nodeinfo.allows_crawling?(domain) do
|
||||
nodeinfo = Nodeinfo.crawl(domain, nil)
|
||||
|
||||
if nodeinfo != nil do
|
||||
Logger.debug("Found nodeinfo for #{domain}.")
|
||||
|
||||
crawl(%Crawler{
|
||||
state
|
||||
| result: nodeinfo,
|
||||
found_api?: true,
|
||||
api_crawlers: remaining_crawlers
|
||||
})
|
||||
else
|
||||
Logger.debug("Did not find nodeinfo for #{domain}.")
|
||||
crawl(%Crawler{state | api_crawlers: remaining_crawlers})
|
||||
end
|
||||
else
|
||||
crawl(%Crawler{state | api_crawlers: remaining_crawlers, allows_crawling?: false})
|
||||
end
|
||||
end
|
||||
|
||||
defp crawl(
|
||||
%Crawler{domain: domain, result: result, api_crawlers: [curr | remaining_crawlers]} =
|
||||
state
|
||||
) do
|
||||
if curr.is_instance_type?(domain, result) do
|
||||
Logger.debug("Found #{curr} instance")
|
||||
state = Map.put(state, :found_api?, true)
|
||||
|
||||
if curr.allows_crawling?(domain) do
|
||||
try do
|
||||
%Crawler{state | result: curr.crawl(domain), api_crawlers: []}
|
||||
%Crawler{state | result: curr.crawl(domain, result), found_api?: true}
|
||||
rescue
|
||||
e in HTTPoison.Error ->
|
||||
Map.put(state, :error, "HTTPoison error: " <> HTTPoison.Error.message(e))
|
||||
|
||||
e in Jason.DecodeError ->
|
||||
Map.put(state, :error, "Jason DecodeError: " <> Jason.DecodeError.message(e))
|
||||
|
||||
e in _ ->
|
||||
Map.put(state, :error, "Unknown error: " <> inspect(e))
|
||||
end
|
||||
else
|
||||
Logger.debug("#{domain} does not allow crawling.")
|
||||
|
@ -94,13 +134,19 @@ defmodule Backend.Crawler do
|
|||
end
|
||||
end
|
||||
|
||||
# Save the state (after crawling) to the database.
|
||||
## Save the state (after crawling) to the database. ##
|
||||
|
||||
# If we didn't get a server type, the crawl wasn't successful.
|
||||
defp save(%Crawler{result: %{type: nil}} = state) do
|
||||
save_error(state)
|
||||
end
|
||||
|
||||
defp save(%Crawler{
|
||||
domain: domain,
|
||||
result: result,
|
||||
found_api?: true,
|
||||
error: nil,
|
||||
allows_crawling?: true
|
||||
allows_crawling?: true,
|
||||
found_api?: true
|
||||
}) do
|
||||
now = get_now()
|
||||
|
||||
|
@ -113,33 +159,60 @@ defmodule Backend.Crawler do
|
|||
## Update the instance we crawled ##
|
||||
instance = %Instance{
|
||||
domain: domain,
|
||||
description: result.description,
|
||||
version: result.version,
|
||||
description: HtmlSanitizeEx.basic_html(result.description),
|
||||
version: HtmlSanitizeEx.basic_html(result.version),
|
||||
user_count: result.user_count,
|
||||
status_count: result.status_count,
|
||||
type: instance_type,
|
||||
base_domain: get_base_domain(domain)
|
||||
base_domain: get_base_domain(domain),
|
||||
next_crawl: NaiveDateTime.add(now, get_config(:crawl_interval_mins) * 60, :second),
|
||||
crawl_error: nil,
|
||||
crawl_error_count: 0
|
||||
}
|
||||
|
||||
Repo.insert!(
|
||||
instance,
|
||||
on_conflict:
|
||||
{:replace,
|
||||
[:description, :version, :user_count, :status_count, :type, :base_domain, :updated_at]},
|
||||
[
|
||||
:description,
|
||||
:version,
|
||||
:user_count,
|
||||
:status_count,
|
||||
:type,
|
||||
:base_domain,
|
||||
:updated_at,
|
||||
:next_crawl,
|
||||
:crawl_error,
|
||||
:crawl_error_count
|
||||
]},
|
||||
conflict_target: :domain
|
||||
)
|
||||
|
||||
Elasticsearch.put_document(Backend.Elasticsearch.Cluster, instance, "instances/_doc")
|
||||
Elasticsearch.put_document!(Backend.Elasticsearch.Cluster, instance, "instances/_doc")
|
||||
|
||||
# Save details of a new crawl
|
||||
## Save details of a new crawl ##
|
||||
curr_crawl =
|
||||
Repo.insert!(%Crawl{
|
||||
instance_domain: domain,
|
||||
interactions_seen:
|
||||
result.interactions |> Map.values() |> Enum.reduce(0, fn count, acc -> count + acc end),
|
||||
result.interactions
|
||||
|> Map.values()
|
||||
|> Enum.reduce(0, fn count, acc -> count + acc end),
|
||||
statuses_seen: result.statuses_seen
|
||||
})
|
||||
|
||||
Repo.insert!(
|
||||
%MostRecentCrawl{
|
||||
instance_domain: domain,
|
||||
crawl_id: curr_crawl.id,
|
||||
inserted_at: now,
|
||||
updated_at: now
|
||||
},
|
||||
on_conflict: {:replace, [:crawl_id, :updated_at]},
|
||||
conflict_target: :instance_domain
|
||||
)
|
||||
|
||||
# We get a list of peers from two places:
|
||||
# * the official peers endpoint (which may be disabled)
|
||||
# * the interactions
|
||||
|
@ -148,13 +221,25 @@ defmodule Backend.Crawler do
|
|||
|> Map.keys()
|
||||
|> list_union(result.peers)
|
||||
|> Enum.filter(fn domain -> domain != nil and not is_blacklisted?(domain) end)
|
||||
|> Enum.map(&clean_domain(&1))
|
||||
|> Enum.filter(fn peer_domain ->
|
||||
if is_valid_domain?(peer_domain) do
|
||||
true
|
||||
else
|
||||
Logger.info("Found invalid peer domain from #{domain}: #{peer_domain}")
|
||||
false
|
||||
end
|
||||
end)
|
||||
|
||||
peers =
|
||||
new_instances =
|
||||
peers_domains
|
||||
|> Enum.map(&%{domain: &1, inserted_at: now, updated_at: now})
|
||||
|> list_union(
|
||||
Enum.map(result.federation_restrictions, fn {domain, _restriction_type} -> domain end)
|
||||
)
|
||||
|> Enum.map(&%{domain: &1, inserted_at: now, updated_at: now, next_crawl: now})
|
||||
|
||||
Instance
|
||||
|> Repo.insert_all(peers, on_conflict: :nothing, conflict_target: :domain)
|
||||
|> Repo.insert_all(new_instances, on_conflict: :nothing, conflict_target: :domain)
|
||||
|
||||
Repo.transaction(fn ->
|
||||
## Save peer relationships ##
|
||||
|
@ -196,6 +281,55 @@ defmodule Backend.Crawler do
|
|||
|> Repo.insert_all(new_instance_peers)
|
||||
end)
|
||||
|
||||
## Save federation restrictions ##
|
||||
Repo.transaction(fn ->
|
||||
current_restrictions =
|
||||
FederationRestriction
|
||||
|> select([fr], {fr.target_domain, fr.type})
|
||||
|> where(source_domain: ^domain)
|
||||
|> Repo.all()
|
||||
|
||||
wanted_restrictions_set =
|
||||
result.federation_restrictions
|
||||
|> MapSet.new()
|
||||
|
||||
current_restrictions_set = MapSet.new(current_restrictions)
|
||||
|
||||
# Delete the ones we don't want
|
||||
restrictions_to_delete =
|
||||
current_restrictions_set
|
||||
|> MapSet.difference(wanted_restrictions_set)
|
||||
|> MapSet.to_list()
|
||||
|> Enum.map(fn {target_domain, _type} -> target_domain end)
|
||||
|
||||
if length(restrictions_to_delete) > 0 do
|
||||
FederationRestriction
|
||||
|> where(
|
||||
[fr],
|
||||
fr.source_domain == ^domain and fr.target_domain in ^restrictions_to_delete
|
||||
)
|
||||
|> Repo.delete_all()
|
||||
end
|
||||
|
||||
# Save the new ones
|
||||
new_restrictions =
|
||||
wanted_restrictions_set
|
||||
|> MapSet.difference(current_restrictions_set)
|
||||
|> MapSet.to_list()
|
||||
|> Enum.map(fn {target_domain, type} ->
|
||||
%{
|
||||
source_domain: domain,
|
||||
target_domain: target_domain,
|
||||
type: type,
|
||||
inserted_at: now,
|
||||
updated_at: now
|
||||
}
|
||||
end)
|
||||
|
||||
FederationRestriction
|
||||
|> Repo.insert_all(new_restrictions)
|
||||
end)
|
||||
|
||||
## Save interactions ##
|
||||
interactions =
|
||||
result.interactions
|
||||
|
@ -217,29 +351,46 @@ defmodule Backend.Crawler do
|
|||
Appsignal.increment_counter("crawler.success", 1)
|
||||
end
|
||||
|
||||
defp save(%{domain: domain, error: error, allows_crawling?: allows_crawling}) do
|
||||
defp save(state) do
|
||||
save_error(state)
|
||||
end
|
||||
|
||||
defp save_error(%{domain: domain, error: error, allows_crawling?: allows_crawling}) do
|
||||
now = get_now()
|
||||
|
||||
error =
|
||||
cond do
|
||||
not allows_crawling -> "robots.txt"
|
||||
error == nil -> "no api found"
|
||||
true -> "unknown error"
|
||||
true -> error
|
||||
end
|
||||
|
||||
Repo.transaction(fn ->
|
||||
Repo.insert!(
|
||||
%Instance{
|
||||
domain: domain,
|
||||
base_domain: get_base_domain(domain)
|
||||
},
|
||||
on_conflict: {:replace, [:base_domain]},
|
||||
conflict_target: :domain
|
||||
)
|
||||
# The "+1" is this error!
|
||||
error_count =
|
||||
Instance
|
||||
|> Repo.get_by!(domain: domain)
|
||||
|> Map.get(:crawl_error_count)
|
||||
|> Kernel.+(1)
|
||||
|
||||
Repo.insert!(%Crawl{
|
||||
instance_domain: domain,
|
||||
error: error
|
||||
})
|
||||
end)
|
||||
# The crawl interval grows exponentially at first but never goes above 24 hours
|
||||
crawl_interval_mins =
|
||||
min(get_config(:crawl_interval_mins) * round(:math.pow(2, error_count)), 1440)
|
||||
|
||||
next_crawl = NaiveDateTime.add(now, crawl_interval_mins * 60, :second)
|
||||
|
||||
Repo.insert!(
|
||||
%Instance{
|
||||
domain: domain,
|
||||
base_domain: get_base_domain(domain),
|
||||
crawl_error: error,
|
||||
crawl_error_count: error_count,
|
||||
next_crawl: next_crawl,
|
||||
updated_at: now
|
||||
},
|
||||
on_conflict:
|
||||
{:replace, [:base_domain, :crawl_error, :crawl_error_count, :next_crawl, :updated_at]},
|
||||
conflict_target: :domain
|
||||
)
|
||||
|
||||
Appsignal.increment_counter("crawler.failure", 1)
|
||||
end
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
defmodule Backend.Crawler.Crawlers.Friendica do
|
||||
@moduledoc """
|
||||
A crawler for Friendica servers.
|
||||
These don't expose a public list of statuses. This crawler combines nodeinfo data with the /statistics.json endpoint
|
||||
in Friendica, and gets a list of peers from /poco/@server.
|
||||
"""
|
||||
alias Backend.Crawler.ApiCrawler
|
||||
import Backend.Crawler.Util
|
||||
import Backend.Util
|
||||
require Logger
|
||||
|
||||
@behaviour ApiCrawler
|
||||
|
||||
@impl ApiCrawler
|
||||
def is_instance_type?(domain, nodeinfo_result) do
|
||||
if nodeinfo_result != nil do
|
||||
Map.get(nodeinfo_result, :instance_type) == :friendica
|
||||
else
|
||||
case get_statistics(domain) do
|
||||
{:ok, stats} -> Map.get(stats, "network") |> String.downcase() == "friendica"
|
||||
{:error, _other} -> false
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@impl ApiCrawler
|
||||
def allows_crawling?(domain) do
|
||||
[
|
||||
"/statistics.json",
|
||||
"/poco/@server"
|
||||
]
|
||||
|> Enum.map(fn endpoint -> "https://#{domain}#{endpoint}" end)
|
||||
|> urls_are_crawlable?()
|
||||
end
|
||||
|
||||
@impl ApiCrawler
|
||||
def crawl(domain, nodeinfo_result) do
|
||||
details =
|
||||
case get_statistics(domain) do
|
||||
{:ok, s} -> s
|
||||
{:error, _err} -> %{}
|
||||
end
|
||||
|> convert_keys_to_atoms()
|
||||
|> (fn m ->
|
||||
%{
|
||||
version: m.version,
|
||||
user_count: m.total_users,
|
||||
status_count: m.local_posts
|
||||
}
|
||||
end).()
|
||||
|> Map.merge(nodeinfo_result)
|
||||
|
||||
peers =
|
||||
case get_and_decode("https://#{domain}/poco/@server") do
|
||||
{:ok, p} -> p
|
||||
{:error, _err} -> []
|
||||
end
|
||||
|> Enum.map(fn peer ->
|
||||
peer
|
||||
|> Map.get("url")
|
||||
|> to_domain()
|
||||
end)
|
||||
|
||||
if details |> Map.get(:user_count, 0) |> is_above_user_threshold?() do
|
||||
ApiCrawler.get_default()
|
||||
|> Map.merge(%{peers: peers, instance_type: :friendica})
|
||||
|> Map.merge(Map.take(details, [:description, :version, :user_count, :status_count]))
|
||||
else
|
||||
Map.merge(ApiCrawler.get_default(), nodeinfo_result)
|
||||
end
|
||||
end
|
||||
|
||||
defp get_statistics(domain) do
|
||||
get_and_decode("https://#{domain}/statistics.json")
|
||||
end
|
||||
|
||||
defp to_domain(url) do
|
||||
url
|
||||
|> String.replace_prefix("http://", "")
|
||||
|> String.replace_prefix("https://", "")
|
||||
|> strip_username()
|
||||
end
|
||||
|
||||
# Sometimes a url at the poco/@server endpoint has the form username@domain.tld, in which case we only want domain.tld
|
||||
defp strip_username(string) do
|
||||
[_match, _username, domain] = Regex.run(~r/([\w\-_]+@)?([\w\.\-_]+)/, string)
|
||||
domain
|
||||
end
|
||||
end
|
|
@ -0,0 +1,191 @@
|
|||
defmodule Backend.Crawler.Crawlers.GnuSocial do
|
||||
@moduledoc """
|
||||
Crawler for GNU Social servers.
|
||||
"""
|
||||
alias Backend.Crawler.ApiCrawler
|
||||
import Backend.Crawler.Util
|
||||
import Backend.Util
|
||||
require Logger
|
||||
|
||||
@behaviour ApiCrawler
|
||||
|
||||
@impl ApiCrawler
|
||||
def is_instance_type?(domain, nodeinfo_result) do
|
||||
if nodeinfo_result != nil do
|
||||
Map.get(nodeinfo_result, :instance_type) == :gnusocial
|
||||
else
|
||||
case get_and_decode("https://#{domain}/api/statuses/public_timeline.json") do
|
||||
{:ok, statuses} -> is_list(statuses)
|
||||
{:error, _other} -> false
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@impl ApiCrawler
|
||||
def allows_crawling?(domain) do
|
||||
[
|
||||
"/api/statuses/public_timeline.json"
|
||||
]
|
||||
|> Enum.map(fn endpoint -> "https://#{domain}#{endpoint}" end)
|
||||
|> urls_are_crawlable?()
|
||||
end
|
||||
|
||||
@impl ApiCrawler
|
||||
def crawl(domain, nodeinfo) do
|
||||
if nodeinfo == nil or
|
||||
nodeinfo |> Map.get(:user_count) |> is_above_user_threshold?() do
|
||||
Map.merge(nodeinfo, crawl_large_instance(domain))
|
||||
else
|
||||
Map.merge(ApiCrawler.get_default(), nodeinfo)
|
||||
end
|
||||
end
|
||||
|
||||
@spec crawl_large_instance(String.t()) :: ApiCrawler.t()
|
||||
defp crawl_large_instance(domain) do
|
||||
status_datetime_threshold =
|
||||
NaiveDateTime.utc_now()
|
||||
|> NaiveDateTime.add(get_config(:status_age_limit_days) * 24 * 3600 * -1, :second)
|
||||
|
||||
# Don't get any statuses older than this
|
||||
min_timestamp = max_datetime(get_last_crawl_timestamp(domain), status_datetime_threshold)
|
||||
|
||||
{interactions, statuses_seen} = get_interactions(domain, min_timestamp)
|
||||
|
||||
Map.merge(
|
||||
ApiCrawler.get_default(),
|
||||
%{
|
||||
interactions: interactions,
|
||||
statuses_seen: statuses_seen,
|
||||
instance_type: :gnusocial
|
||||
}
|
||||
)
|
||||
end
|
||||
|
||||
@spec get_interactions(
|
||||
String.t(),
|
||||
NaiveDateTime.t(),
|
||||
String.t() | nil,
|
||||
ApiCrawler.instance_interactions(),
|
||||
integer()
|
||||
) :: {ApiCrawler.instance_interactions(), integer()}
|
||||
defp get_interactions(
|
||||
domain,
|
||||
min_timestamp,
|
||||
max_id \\ nil,
|
||||
interactions \\ %{},
|
||||
statuses_seen \\ 0
|
||||
) do
|
||||
endpoint = "https://#{domain}/api/statuses/public_timeline.json"
|
||||
|
||||
endpoint =
|
||||
if max_id != nil do
|
||||
endpoint <> "?max_id=#{max_id}"
|
||||
else
|
||||
endpoint
|
||||
end
|
||||
|
||||
Logger.debug("Crawling #{endpoint}")
|
||||
|
||||
statuses = get_and_decode!(endpoint)
|
||||
|
||||
# Filter to statuses that are in the correct timeframe
|
||||
filtered_statuses =
|
||||
statuses
|
||||
|> Enum.filter(fn s ->
|
||||
s["created_at"]
|
||||
|> parse_timestamp()
|
||||
|> is_after?(min_timestamp)
|
||||
end)
|
||||
|
||||
if length(filtered_statuses) > 0 do
|
||||
# Filter down further to statuses that a) aren't faves and b) aren't from #nobot users
|
||||
eligible_statuses =
|
||||
filtered_statuses |> Enum.filter(fn s -> not is_fave?(s) and not has_nobot?(s) end)
|
||||
|
||||
# get statuses that are eligible (i.e. users don't have #nobot in their profile), have mentions, and are not faves
|
||||
interactions =
|
||||
eligible_statuses
|
||||
|> statuses_to_interactions()
|
||||
|> merge_count_maps(interactions)
|
||||
|
||||
statuses_seen =
|
||||
eligible_statuses
|
||||
|> Kernel.length()
|
||||
|> Kernel.+(statuses_seen)
|
||||
|
||||
oldest_status = Enum.at(filtered_statuses, -1)
|
||||
|
||||
oldest_status_datetime =
|
||||
oldest_status
|
||||
|> Map.get("created_at")
|
||||
|> parse_timestamp()
|
||||
|
||||
if NaiveDateTime.compare(oldest_status_datetime, min_timestamp) == :gt and
|
||||
statuses_seen < get_config(:status_count_limit) and
|
||||
length(filtered_statuses) == length(statuses) do
|
||||
get_interactions(domain, min_timestamp, oldest_status["id"], interactions, statuses_seen)
|
||||
else
|
||||
{interactions, statuses_seen}
|
||||
end
|
||||
else
|
||||
{interactions, statuses_seen}
|
||||
end
|
||||
end
|
||||
|
||||
@spec statuses_to_interactions(any()) :: ApiCrawler.instance_interactions()
|
||||
defp statuses_to_interactions(statuses) do
|
||||
statuses
|
||||
|> Enum.filter(fn status -> is_mention?(status) end)
|
||||
|> Enum.map(fn status -> extract_mentions_from_status(status) end)
|
||||
|> Enum.reduce(%{}, fn map, acc ->
|
||||
Map.merge(acc, map)
|
||||
end)
|
||||
end
|
||||
|
||||
# Checks whether the status contains one or more mentions
|
||||
@spec is_mention?(any()) :: boolean()
|
||||
defp is_mention?(%{"attentions" => []}) do
|
||||
false
|
||||
end
|
||||
|
||||
defp is_mention?(_status) do
|
||||
true
|
||||
end
|
||||
|
||||
@spec is_fave?(any()) :: boolean()
|
||||
defp is_fave?(status) do
|
||||
uri_elements = status |> Map.get("uri") |> String.split(":")
|
||||
Enum.member?(uri_elements, "fave")
|
||||
end
|
||||
|
||||
@spec has_nobot?(any()) :: boolean()
|
||||
defp has_nobot?(status) do
|
||||
case get_in(status, ["user", "description"]) do
|
||||
nil ->
|
||||
false
|
||||
|
||||
description ->
|
||||
description
|
||||
|> String.downcase()
|
||||
|> String.contains?("nobot")
|
||||
end
|
||||
end
|
||||
|
||||
@spec extract_mentions_from_status(any()) :: ApiCrawler.instance_interactions()
|
||||
defp extract_mentions_from_status(status) do
|
||||
status["attentions"]
|
||||
|> Enum.map(fn mention -> get_domain(mention["profileurl"]) end)
|
||||
|> Enum.reduce(%{}, fn domain, acc ->
|
||||
Map.update(acc, domain, 1, &(&1 + 1))
|
||||
end)
|
||||
end
|
||||
|
||||
# Parses the messed-up time format that GNU social uses
|
||||
# Like seriously, it's 2019, why *wouldn't* you use iso8601?
|
||||
@spec parse_timestamp(String.t()) :: NaiveDateTime.t()
|
||||
defp parse_timestamp(timestamp) do
|
||||
timestamp
|
||||
|> Timex.parse!("{WDshort} {Mshort} {0D} {h24}:{0m}:{0s} {0Z} {YYYY}")
|
||||
|> Timex.to_naive_datetime()
|
||||
end
|
||||
end
|
|
@ -1,74 +1,58 @@
|
|||
defmodule Backend.Crawler.Crawlers.Mastodon do
|
||||
@moduledoc """
|
||||
Crawler for the Mastodon API (used by Mastodon, its forks like Gab or Glitch, and Pleroma).
|
||||
"""
|
||||
require Logger
|
||||
import Backend.Crawler.Util
|
||||
import Backend.Util
|
||||
import Ecto.Query
|
||||
alias Backend.Crawler.ApiCrawler
|
||||
alias Backend.{Instance, Repo}
|
||||
|
||||
@behaviour ApiCrawler
|
||||
|
||||
@impl ApiCrawler
|
||||
def is_instance_type?(domain) do
|
||||
case get("https://#{domain}/api/v1/instance") do
|
||||
{:ok, response} -> if is_http_200?(response), do: has_title?(response.body), else: false
|
||||
{:error, _error} -> false
|
||||
def is_instance_type?(domain, result) do
|
||||
# We might already know that this is a Pleroma instance from nodeinfo
|
||||
if result != nil and (Map.get(result, :instance_type) == :pleroma or Map.get(result, :instance_type) == :smithereen) do
|
||||
true
|
||||
else
|
||||
case get_and_decode("https://#{domain}/api/v1/instance") do
|
||||
{:ok, %{"title" => _title}} -> true
|
||||
_other -> false
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@impl ApiCrawler
|
||||
def allows_crawling?(domain) do
|
||||
endpoints = [
|
||||
[
|
||||
"/api/v1/instance",
|
||||
"/api/v1/instance/peers",
|
||||
"/api/v1/timelines/public"
|
||||
]
|
||||
|
||||
user_agent = get_config(:user_agent)
|
||||
|
||||
endpoints
|
||||
|> Enum.map(fn endpoint -> "https://#{domain}#{endpoint}" end)
|
||||
|> Enum.all?(fn endpoint -> Gollum.crawlable?(user_agent, endpoint) != :uncrawlable end)
|
||||
|> urls_are_crawlable?()
|
||||
end
|
||||
|
||||
@impl ApiCrawler
|
||||
def crawl(domain) do
|
||||
instance = Jason.decode!(get!("https://#{domain}/api/v1/instance").body)
|
||||
def crawl(domain, nodeinfo) do
|
||||
instance = get_and_decode!("https://#{domain}/api/v1/instance")
|
||||
user_count = get_in(instance, ["stats", "user_count"])
|
||||
|
||||
has_opted_in =
|
||||
case Instance |> select([:opt_in]) |> Repo.get_by(domain: domain) do
|
||||
%{opt_in: true} -> true
|
||||
_ -> false
|
||||
end
|
||||
|
||||
if get_in(instance, ["stats", "user_count"]) > get_config(:personal_instance_threshold) or
|
||||
has_opted_in do
|
||||
crawl_large_instance(domain, instance)
|
||||
if is_above_user_threshold?(user_count) or has_opted_in?(domain) do
|
||||
Map.merge(nodeinfo, crawl_large_instance(domain, instance))
|
||||
else
|
||||
Map.merge(
|
||||
Map.take(instance["stats"], ["user_count"])
|
||||
|> convert_keys_to_atoms(),
|
||||
%{
|
||||
peers: [],
|
||||
interactions: %{},
|
||||
statuses_seen: 0,
|
||||
instance_type: nil,
|
||||
description: nil,
|
||||
version: nil,
|
||||
status_count: nil
|
||||
}
|
||||
)
|
||||
ApiCrawler.get_default()
|
||||
|> Map.merge(nodeinfo)
|
||||
|> Map.merge(%{
|
||||
instance_type: get_instance_type(instance),
|
||||
user_count: get_in(instance, ["stats", "user_count"])
|
||||
})
|
||||
end
|
||||
end
|
||||
|
||||
@spec crawl_large_instance(String.t(), any()) :: ApiCrawler.t()
|
||||
defp crawl_large_instance(domain, instance) do
|
||||
# servers may not publish peers
|
||||
peers =
|
||||
case get("https://#{domain}/api/v1/instance/peers") do
|
||||
{:ok, response} -> if is_http_200?(response), do: Jason.decode!(response.body), else: []
|
||||
{:error, _error} -> []
|
||||
end
|
||||
peers = get_peers(domain)
|
||||
|
||||
Logger.debug("Found #{length(peers)} peers.")
|
||||
|
||||
|
@ -80,13 +64,6 @@ defmodule Backend.Crawler.Crawlers.Mastodon do
|
|||
} mentions in #{statuses_seen} statuses."
|
||||
)
|
||||
|
||||
instance_type =
|
||||
cond do
|
||||
Map.get(instance, "version") |> String.downcase() =~ "pleroma" -> :pleroma
|
||||
is_gab?(instance) -> :gab
|
||||
true -> :mastodon
|
||||
end
|
||||
|
||||
Map.merge(
|
||||
Map.merge(
|
||||
Map.take(instance, ["version", "description"]),
|
||||
|
@ -97,7 +74,7 @@ defmodule Backend.Crawler.Crawlers.Mastodon do
|
|||
peers: peers,
|
||||
interactions: interactions,
|
||||
statuses_seen: statuses_seen,
|
||||
instance_type: instance_type
|
||||
instance_type: get_instance_type(instance)
|
||||
}
|
||||
)
|
||||
end
|
||||
|
@ -120,7 +97,7 @@ defmodule Backend.Crawler.Crawlers.Mastodon do
|
|||
# most recent status we have.
|
||||
min_timestamp =
|
||||
if statuses_seen == 0 do
|
||||
get_last_successful_crawl_timestamp(domain)
|
||||
get_last_crawl_timestamp(domain)
|
||||
else
|
||||
min_timestamp
|
||||
end
|
||||
|
@ -136,19 +113,23 @@ defmodule Backend.Crawler.Crawlers.Mastodon do
|
|||
|
||||
Logger.debug("Crawling #{endpoint}")
|
||||
|
||||
statuses =
|
||||
endpoint
|
||||
|> get!()
|
||||
|> Map.get(:body)
|
||||
|> Jason.decode!()
|
||||
statuses = get_and_decode!(endpoint)
|
||||
|
||||
filtered_statuses =
|
||||
statuses
|
||||
|> Enum.filter(fn s -> is_after?(s["created_at"], min_timestamp) end)
|
||||
|> Enum.filter(fn s ->
|
||||
s["created_at"]
|
||||
|> NaiveDateTime.from_iso8601!()
|
||||
|> is_after?(min_timestamp)
|
||||
end)
|
||||
|
||||
if length(filtered_statuses) > 0 do
|
||||
# get statuses that are eligible (i.e. users don't have #nobot in their profile) and have mentions
|
||||
interactions = Map.merge(interactions, statuses_to_interactions(filtered_statuses))
|
||||
interactions =
|
||||
filtered_statuses
|
||||
|> statuses_to_interactions()
|
||||
|> merge_count_maps(interactions)
|
||||
|
||||
statuses_seen = statuses_seen + length(filtered_statuses)
|
||||
|
||||
status_datetime_threshold =
|
||||
|
@ -174,12 +155,11 @@ defmodule Backend.Crawler.Crawlers.Mastodon do
|
|||
end
|
||||
end
|
||||
|
||||
# To check if the endpoint works as expected
|
||||
@spec has_title?(String.t()) :: boolean
|
||||
defp has_title?(body) do
|
||||
case Jason.decode(body) do
|
||||
{:ok, decoded} -> Map.has_key?(decoded, "title")
|
||||
{:error, _error} -> false
|
||||
defp get_peers(domain) do
|
||||
# servers may not publish peers
|
||||
case get_and_decode("https://#{domain}/api/v1/instance/peers") do
|
||||
{:ok, peers} -> peers
|
||||
{:error, _err} -> []
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -246,4 +226,13 @@ defmodule Backend.Crawler.Crawlers.Mastodon do
|
|||
title_is_gab
|
||||
end
|
||||
end
|
||||
|
||||
defp get_instance_type(instance_stats) do
|
||||
cond do
|
||||
Map.get(instance_stats, "version") |> String.downcase() =~ "pleroma" -> :pleroma
|
||||
Map.get(instance_stats, "version") |> String.downcase() =~ "smithereen" -> :smithereen
|
||||
is_gab?(instance_stats) -> :gab
|
||||
true -> :mastodon
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -0,0 +1,210 @@
|
|||
defmodule Backend.Crawler.Crawlers.Misskey do
|
||||
@moduledoc """
|
||||
Crawler for Misskey servers.
|
||||
"""
|
||||
alias Backend.Crawler.ApiCrawler
|
||||
|
||||
@behaviour ApiCrawler
|
||||
import Backend.Crawler.Util
|
||||
import Backend.Util
|
||||
require Logger
|
||||
|
||||
@impl ApiCrawler
|
||||
def is_instance_type?(domain, result) do
|
||||
# We may already know that this is a Misskey instance from nodeinfo
|
||||
if result != nil and Map.get(result, :instance_type) == :misskey do
|
||||
true
|
||||
else
|
||||
case get_version_and_description(domain) do
|
||||
{:ok, _} -> true
|
||||
{:error, _} -> false
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@impl ApiCrawler
|
||||
def allows_crawling?(domain) do
|
||||
[
|
||||
"/api/meta",
|
||||
"/api/stats",
|
||||
"/api/notes/local-timeline",
|
||||
"/api/v1/instance/peers"
|
||||
]
|
||||
|> Enum.map(fn endpoint -> "https://#{domain}#{endpoint}" end)
|
||||
|> urls_are_crawlable?()
|
||||
end
|
||||
|
||||
@impl ApiCrawler
|
||||
def crawl(domain, nodeinfo) do
|
||||
with {:ok, %{"originalUsersCount" => user_count, "originalNotesCount" => status_count}} <-
|
||||
post_and_decode("https://#{domain}/api/stats") do
|
||||
if is_above_user_threshold?(user_count) or has_opted_in?(domain) do
|
||||
Map.merge(nodeinfo, crawl_large_instance(domain, user_count, status_count))
|
||||
else
|
||||
ApiCrawler.get_default()
|
||||
|> Map.merge(nodeinfo)
|
||||
|> Map.merge(%{
|
||||
user_count: user_count,
|
||||
type: :misskey
|
||||
})
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@spec crawl_large_instance(String.t(), integer(), integer()) :: ApiCrawler.t()
|
||||
defp crawl_large_instance(domain, user_count, status_count) do
|
||||
status_datetime_threshold =
|
||||
NaiveDateTime.utc_now()
|
||||
|> NaiveDateTime.add(get_config(:status_age_limit_days) * 24 * 3600 * -1, :second)
|
||||
|
||||
# Don't get any statuses older than this
|
||||
min_timestamp = max_datetime(get_last_crawl_timestamp(domain), status_datetime_threshold)
|
||||
|
||||
{interactions, statuses_seen} = get_interactions(domain, min_timestamp)
|
||||
{:ok, {version, description}} = get_version_and_description(domain)
|
||||
{:ok, peers} = get_peers(domain)
|
||||
|
||||
%{
|
||||
instance_type: :misskey,
|
||||
# From stats endpoint
|
||||
user_count: user_count,
|
||||
status_count: status_count,
|
||||
# From meta endpoint
|
||||
version: version,
|
||||
description: description,
|
||||
# From timeline
|
||||
interactions: interactions,
|
||||
statuses_seen: statuses_seen,
|
||||
# From peers endpoint
|
||||
peers: peers
|
||||
}
|
||||
end
|
||||
|
||||
@spec get_interactions(
|
||||
String.t(),
|
||||
NaiveDateTime.t(),
|
||||
String.t() | nil,
|
||||
ApiCrawler.instance_interactions(),
|
||||
integer()
|
||||
) :: {ApiCrawler.instance_interactions(), integer()}
|
||||
defp get_interactions(
|
||||
domain,
|
||||
min_timestamp,
|
||||
until_id \\ nil,
|
||||
interactions \\ %{},
|
||||
statuses_seen \\ 0
|
||||
) do
|
||||
endpoint = "https://#{domain}/api/notes/local-timeline"
|
||||
|
||||
params = %{
|
||||
limit: 20
|
||||
}
|
||||
|
||||
params =
|
||||
if until_id != nil do
|
||||
Map.put(params, :untilId, until_id)
|
||||
else
|
||||
params
|
||||
end
|
||||
|
||||
Logger.debug("Crawling #{endpoint} with untilId=#{until_id}")
|
||||
|
||||
statuses = post_and_decode!(endpoint, Jason.encode!(params))
|
||||
|
||||
filtered_statuses =
|
||||
statuses
|
||||
|> Enum.filter(fn s ->
|
||||
s["createdAt"]
|
||||
|> NaiveDateTime.from_iso8601!()
|
||||
|> is_after?(min_timestamp)
|
||||
end)
|
||||
|
||||
if length(filtered_statuses) > 0 do
|
||||
# get statuses that are eligible (i.e. users don't have #nobot in their profile) and have mentions
|
||||
interactions =
|
||||
filtered_statuses
|
||||
|> statuses_to_interactions()
|
||||
|> merge_count_maps(interactions)
|
||||
|
||||
# Don't count renotes in the # of statuses seen
|
||||
statuses_seen =
|
||||
filtered_statuses
|
||||
|> Enum.filter(&is_original_status?(&1))
|
||||
|> Kernel.length()
|
||||
|> Kernel.+(statuses_seen)
|
||||
|
||||
oldest_status = Enum.at(filtered_statuses, -1)
|
||||
|
||||
oldest_status_datetime =
|
||||
oldest_status
|
||||
|> (fn s -> s["createdAt"] end).()
|
||||
|> NaiveDateTime.from_iso8601!()
|
||||
|
||||
if NaiveDateTime.compare(oldest_status_datetime, min_timestamp) == :gt and
|
||||
statuses_seen < get_config(:status_count_limit) and
|
||||
length(filtered_statuses) == length(statuses) do
|
||||
get_interactions(domain, min_timestamp, oldest_status["id"], interactions, statuses_seen)
|
||||
else
|
||||
{interactions, statuses_seen}
|
||||
end
|
||||
else
|
||||
{interactions, statuses_seen}
|
||||
end
|
||||
end
|
||||
|
||||
@spec get_version_and_description(String.t()) ::
|
||||
{:ok, {String.t(), String.t()}} | {:error, Jason.DecodeError.t() | HTTPoison.Error.t()}
|
||||
defp get_version_and_description(domain) do
|
||||
case post_and_decode("https://#{domain}/api/meta") do
|
||||
{:ok, %{"version" => version, "description" => description}} ->
|
||||
{:ok, {version, description}}
|
||||
|
||||
{:error, err} ->
|
||||
{:error, err}
|
||||
end
|
||||
end
|
||||
|
||||
@spec get_peers(String.t()) :: {:ok, [String.t()]} | {:error, Jason.DecodeError.t()}
|
||||
defp get_peers(domain) do
|
||||
case get_and_decode("https://#{domain}/api/v1/instance/peers") do
|
||||
{:ok, peers} -> {:ok, peers}
|
||||
{:error, _} -> {:ok, []}
|
||||
end
|
||||
end
|
||||
|
||||
@spec statuses_to_interactions(any()) :: ApiCrawler.instance_interactions()
|
||||
defp statuses_to_interactions(statuses) do
|
||||
statuses
|
||||
|> Enum.filter(fn status -> is_mention?(status) end)
|
||||
|> Enum.map(fn status -> extract_mentions_from_status(status) end)
|
||||
|> Enum.reduce(%{}, fn map, acc ->
|
||||
Map.merge(acc, map)
|
||||
end)
|
||||
end
|
||||
|
||||
# Checks whether
|
||||
# * it's not a renote (a.k.a. a boost)
|
||||
# * the status contains one or more mentions
|
||||
@spec is_mention?(any()) :: boolean()
|
||||
defp is_mention?(status) do
|
||||
has_mentions = Map.get(status, "mentions") != nil
|
||||
is_original_status?(status) and has_mentions
|
||||
end
|
||||
|
||||
# Checks whether it's not a renote (a.k.a. a boost)
|
||||
@spec is_original_status?(any()) :: boolean()
|
||||
defp is_original_status?(status) do
|
||||
Map.get(status, "renoteId") == nil
|
||||
end
|
||||
|
||||
@spec extract_mentions_from_status(any()) :: ApiCrawler.instance_interactions()
|
||||
defp extract_mentions_from_status(status) do
|
||||
status_content = Map.get(status, "text")
|
||||
|
||||
Regex.scan(~r/@\w+@([\w\._\-]+)/, status_content)
|
||||
|> Enum.map(fn [_match, domain] -> domain end)
|
||||
|> Enum.reduce(%{}, fn domain, acc ->
|
||||
Map.update(acc, domain, 1, &(&1 + 1))
|
||||
end)
|
||||
end
|
||||
end
|
|
@ -0,0 +1,139 @@
|
|||
defmodule Backend.Crawler.Crawlers.Nodeinfo do
|
||||
@moduledoc """
|
||||
This module is slightly different from the other crawlers. It's run before all the others and its
|
||||
result is included in theirs.
|
||||
"""
|
||||
|
||||
alias Backend.Crawler.ApiCrawler
|
||||
require Logger
|
||||
import Backend.Util
|
||||
import Backend.Crawler.Util
|
||||
@behaviour ApiCrawler
|
||||
|
||||
@impl ApiCrawler
|
||||
def allows_crawling?(domain) do
|
||||
[
|
||||
".well-known/nodeinfo"
|
||||
]
|
||||
|> Enum.map(fn endpoint -> "https://#{domain}#{endpoint}" end)
|
||||
|> urls_are_crawlable?()
|
||||
end
|
||||
|
||||
@impl ApiCrawler
|
||||
def is_instance_type?(_domain, _nodeinfo) do
|
||||
# This crawler is used slightly differently from the others -- we always check for nodeinfo.
|
||||
true
|
||||
end
|
||||
|
||||
@impl ApiCrawler
|
||||
def crawl(domain, _curr_result) do
|
||||
with {:ok, nodeinfo_url} <- get_nodeinfo_url(domain),
|
||||
{:ok, nodeinfo} <- get_nodeinfo(nodeinfo_url) do
|
||||
nodeinfo
|
||||
else
|
||||
_other -> ApiCrawler.get_default()
|
||||
end
|
||||
end
|
||||
|
||||
@spec get_nodeinfo_url(String.t()) ::
|
||||
{:ok, String.t()} | {:error, Jason.DecodeError.t() | HTTPoison.Error.t()}
|
||||
defp get_nodeinfo_url(domain) do
|
||||
case get_and_decode("https://#{domain}/.well-known/nodeinfo") do
|
||||
{:ok, response} -> {:ok, process_nodeinfo_url(response)}
|
||||
{:error, err} -> {:error, err}
|
||||
end
|
||||
end
|
||||
|
||||
@spec process_nodeinfo_url(any()) :: String.t()
|
||||
defp process_nodeinfo_url(response) do
|
||||
response
|
||||
|> Map.get("links")
|
||||
|> Enum.filter(fn %{"rel" => rel} -> is_compatible_nodeinfo_version?(rel) end)
|
||||
|> Kernel.hd()
|
||||
|> Map.get("href")
|
||||
end
|
||||
|
||||
@spec get_nodeinfo(String.t()) :: ApiCrawler.t()
|
||||
defp get_nodeinfo(nodeinfo_url) do
|
||||
case get_and_decode(nodeinfo_url) do
|
||||
{:ok, nodeinfo} -> {:ok, process_nodeinfo(nodeinfo)}
|
||||
{:error, err} -> {:error, err}
|
||||
end
|
||||
end
|
||||
|
||||
@spec process_nodeinfo(any()) :: ApiCrawler.t()
|
||||
defp process_nodeinfo(nodeinfo) do
|
||||
user_count = get_in(nodeinfo, ["usage", "users", "total"])
|
||||
|
||||
if is_above_user_threshold?(user_count) do
|
||||
# Both of these are used, depending on the server implementation
|
||||
description =
|
||||
[
|
||||
get_in(nodeinfo, ["metadata", "description"]),
|
||||
get_in(nodeinfo, ["metadata", "nodeDescription"])
|
||||
]
|
||||
|> Enum.filter(fn d -> d != nil end)
|
||||
|> Enum.at(0)
|
||||
|
||||
type = nodeinfo |> get_in(["software", "name"]) |> String.downcase() |> String.to_atom()
|
||||
|
||||
Map.merge(
|
||||
ApiCrawler.get_default(),
|
||||
%{
|
||||
description: description,
|
||||
user_count: user_count,
|
||||
status_count: get_in(nodeinfo, ["usage", "localPosts"]),
|
||||
instance_type: type,
|
||||
version: get_in(nodeinfo, ["software", "version"]),
|
||||
federation_restrictions: get_federation_restrictions(nodeinfo)
|
||||
}
|
||||
)
|
||||
else
|
||||
Map.merge(
|
||||
ApiCrawler.get_default(),
|
||||
%{
|
||||
user_count: user_count
|
||||
}
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
@spec is_compatible_nodeinfo_version?(String.t()) :: boolean()
|
||||
defp is_compatible_nodeinfo_version?(schema_url) do
|
||||
version = String.slice(schema_url, (String.length(schema_url) - 3)..-1)
|
||||
Enum.member?(["1.0", "1.1", "2.0"], version)
|
||||
end
|
||||
|
||||
@spec get_federation_restrictions(any()) :: [ApiCrawler.federation_restriction()]
|
||||
defp get_federation_restrictions(nodeinfo) do
|
||||
mrf_simple = get_in(nodeinfo, ["metadata", "federation", "mrf_simple"])
|
||||
quarantined_domains = get_in(nodeinfo, ["metadata", "federation", "quarantined_instances"])
|
||||
|
||||
quarantined_domains =
|
||||
if quarantined_domains == nil do
|
||||
[]
|
||||
else
|
||||
Enum.map(quarantined_domains, fn domain -> {domain, "quarantine"} end)
|
||||
end
|
||||
|
||||
if mrf_simple != nil do
|
||||
mrf_simple
|
||||
|> Map.take([
|
||||
"report_removal",
|
||||
"reject",
|
||||
"media_removal",
|
||||
"media_nsfw",
|
||||
"federated_timeline_removal",
|
||||
"banner_removal",
|
||||
"avatar_removal",
|
||||
"accept"
|
||||
])
|
||||
|> Enum.flat_map(fn {type, domains} ->
|
||||
Enum.map(domains, fn domain -> {domain, type} end)
|
||||
end)
|
||||
|> Enum.concat(quarantined_domains)
|
||||
else
|
||||
quarantined_domains
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,15 +1,15 @@
|
|||
defmodule Backend.Crawler.StaleInstanceManager do
|
||||
use GenServer
|
||||
alias Backend.{Crawl, Instance, Repo}
|
||||
import Ecto.Query
|
||||
import Backend.Util
|
||||
require Logger
|
||||
|
||||
@moduledoc """
|
||||
This module regularly finds stale instances (i.e. instances that haven't been updated for longer than the crawl
|
||||
interval) and adds them to the job queue. It runs once a minute.
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
alias Backend.{Instance, Repo}
|
||||
import Ecto.Query
|
||||
import Backend.Util
|
||||
require Logger
|
||||
|
||||
def start_link(_opts) do
|
||||
GenServer.start_link(__MODULE__, [], name: __MODULE__)
|
||||
end
|
||||
|
@ -20,14 +20,13 @@ defmodule Backend.Crawler.StaleInstanceManager do
|
|||
|
||||
instance_count =
|
||||
Instance
|
||||
|> where([i], not is_nil(i.version))
|
||||
|> select([i], count(i.domain))
|
||||
|> Repo.one()
|
||||
|
||||
case instance_count do
|
||||
# Add m.s. as the seed and schedule the next add
|
||||
0 ->
|
||||
add_to_queue("mastodon.social")
|
||||
add_to_queue("mastodon.ml")
|
||||
schedule_add()
|
||||
|
||||
# Start immediately
|
||||
|
@ -45,52 +44,22 @@ defmodule Backend.Crawler.StaleInstanceManager do
|
|||
{:noreply, state}
|
||||
end
|
||||
|
||||
defp schedule_add() do
|
||||
defp schedule_add do
|
||||
Process.send_after(self(), :queue_stale_domains, 60_000)
|
||||
end
|
||||
|
||||
# TODO: crawl instances with a blocking robots.txt less often (daily?)
|
||||
defp queue_stale_domains() do
|
||||
interval = -1 * get_config(:crawl_interval_mins)
|
||||
|
||||
# Get domains that have never been crawled and where the last crawl is past the threshold
|
||||
crawls_subquery =
|
||||
Crawl
|
||||
|> select([c], %{
|
||||
instance_domain: c.instance_domain,
|
||||
most_recent_crawl: max(c.inserted_at),
|
||||
crawl_count: count(c.id)
|
||||
})
|
||||
|> where([c], is_nil(c.error))
|
||||
|> group_by([c], c.instance_domain)
|
||||
defp queue_stale_domains do
|
||||
now = get_now()
|
||||
|
||||
stale_domains =
|
||||
Instance
|
||||
|> join(:left, [i], c in subquery(crawls_subquery), on: i.domain == c.instance_domain)
|
||||
|> where(
|
||||
[i, c],
|
||||
(c.most_recent_crawl < datetime_add(^NaiveDateTime.utc_now(), ^interval, "minute") or
|
||||
is_nil(c.crawl_count)) and not i.opt_out
|
||||
)
|
||||
|> select([i], i.domain)
|
||||
|> where([i], i.next_crawl < ^now and not i.opt_out)
|
||||
|> Repo.all()
|
||||
|> MapSet.new()
|
||||
|
||||
# Don't add a domain that's already in the queue
|
||||
domains_in_queue =
|
||||
Honeydew.filter(:crawl_queue, fn job ->
|
||||
is_pending_crawl_job = match?(%Honeydew.Job{completed_at: nil, task: {:run, [_]}}, job)
|
||||
|
||||
if is_pending_crawl_job do
|
||||
%Honeydew.Job{completed_at: nil, task: {:run, [d]}} = job
|
||||
MapSet.member?(stale_domains, d)
|
||||
else
|
||||
false
|
||||
end
|
||||
end)
|
||||
|> Enum.map(fn %Honeydew.Job{task: {:run, [d]}} -> d end)
|
||||
|> MapSet.new()
|
||||
|
||||
domains_in_queue = get_domains_in_queue(stale_domains)
|
||||
domains_to_queue = MapSet.difference(stale_domains, domains_in_queue)
|
||||
|
||||
Logger.debug("Adding #{MapSet.size(domains_to_queue)} stale domains to queue.")
|
||||
|
@ -102,4 +71,20 @@ defmodule Backend.Crawler.StaleInstanceManager do
|
|||
defp add_to_queue(domain) do
|
||||
{:run, [domain]} |> Honeydew.async(:crawl_queue)
|
||||
end
|
||||
|
||||
@spec get_domains_in_queue(MapSet.t()) :: MapSet.t()
|
||||
defp get_domains_in_queue(domains) do
|
||||
Honeydew.filter(:crawl_queue, fn job ->
|
||||
is_pending_crawl_job = match?(%Honeydew.Job{completed_at: nil, task: {:run, [_]}}, job)
|
||||
|
||||
if is_pending_crawl_job do
|
||||
%Honeydew.Job{completed_at: nil, task: {:run, [d]}} = job
|
||||
MapSet.member?(domains, d)
|
||||
else
|
||||
false
|
||||
end
|
||||
end)
|
||||
|> Enum.map(fn %Honeydew.Job{task: {:run, [d]}} -> d end)
|
||||
|> MapSet.new()
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,32 +1,27 @@
|
|||
defmodule Backend.Crawler.Util do
|
||||
@moduledoc false
|
||||
require Logger
|
||||
alias Backend.{Instance, Repo}
|
||||
import Backend.Util
|
||||
import Ecto.Query
|
||||
|
||||
# Gets the domain from a Mastodon/Pleroma account URL
|
||||
# (e.g. https://mastodon.social/@demouser or https://pleroma.site/users/demouser)
|
||||
@spec get_domain(String.t()) :: String.t()
|
||||
def get_domain(url) do
|
||||
String.slice(url, 8..-1)
|
||||
|> String.split("/")
|
||||
|> Enum.at(0)
|
||||
[_match, domain] = Regex.run(~r/https?:\/\/([\w\.\-_]+)\/.*/, url)
|
||||
domain
|
||||
end
|
||||
|
||||
@spec is_http_200?(HTTPoison.Response.t()) :: boolean
|
||||
def is_http_200?(%{status_code: 200}) do
|
||||
true
|
||||
end
|
||||
|
||||
def is_http_200?(_) do
|
||||
false
|
||||
end
|
||||
|
||||
@spec is_after?(String.t(), NaiveDateTime.t() | nil) :: boolean()
|
||||
@doc """
|
||||
Returns true if the first argument is after the second.
|
||||
"""
|
||||
@spec is_after?(NaiveDateTime.t(), NaiveDateTime.t() | nil) :: boolean()
|
||||
def is_after?(timestamp, threshold) do
|
||||
if threshold == nil do
|
||||
true
|
||||
else
|
||||
timestamp
|
||||
|> NaiveDateTime.from_iso8601!()
|
||||
# :second is the granularity used in the database
|
||||
|> NaiveDateTime.truncate(:second)
|
||||
|> NaiveDateTime.compare(threshold)
|
||||
|
@ -34,30 +29,24 @@ defmodule Backend.Crawler.Util do
|
|||
end
|
||||
end
|
||||
|
||||
def get(url) do
|
||||
# TODO: add version number to user agent?
|
||||
HTTPoison.get(url, [{"User-Agent", get_config(:user_agent)}],
|
||||
hackney: [pool: :crawler],
|
||||
recv_timeout: 15000,
|
||||
timeout: 15000
|
||||
)
|
||||
@spec urls_are_crawlable?([String.t()]) :: boolean()
|
||||
def urls_are_crawlable?(urls) do
|
||||
user_agent = get_config(:user_agent)
|
||||
|
||||
urls
|
||||
|> Enum.all?(fn url -> Gollum.crawlable?(user_agent, url) != :uncrawlable end)
|
||||
end
|
||||
|
||||
@spec get!(binary) :: %{
|
||||
:__struct__ => HTTPoison.AsyncResponse | HTTPoison.Response,
|
||||
optional(:body) => any,
|
||||
optional(:headers) => [any],
|
||||
optional(:id) => reference,
|
||||
optional(:request) => HTTPoison.Request.t(),
|
||||
optional(:request_url) => any,
|
||||
optional(:status_code) => integer
|
||||
}
|
||||
def get!(url) do
|
||||
# TODO: add version number to user agent?
|
||||
HTTPoison.get!(url, [{"User-Agent", get_config(:user_agent)}],
|
||||
hackney: [pool: :crawler],
|
||||
recv_timeout: 15000,
|
||||
timeout: 15000
|
||||
)
|
||||
@spec has_opted_in?(String.t()) :: boolean()
|
||||
def has_opted_in?(domain) do
|
||||
case Instance |> select([:opt_in]) |> Repo.get_by(domain: domain) do
|
||||
%{opt_in: true} -> true
|
||||
_ -> false
|
||||
end
|
||||
end
|
||||
|
||||
@spec is_above_user_threshold?(integer) :: boolean()
|
||||
def is_above_user_threshold?(user_count) do
|
||||
user_count > get_config(:personal_instance_threshold)
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
defmodule Backend.Edge do
|
||||
@moduledoc false
|
||||
use Ecto.Schema
|
||||
import Ecto.Changeset
|
||||
|
||||
|
|
|
@ -1,3 +1,22 @@
|
|||
defmodule Backend.Elasticsearch.Cluster do
|
||||
@moduledoc false
|
||||
use Elasticsearch.Cluster, otp_app: :backend
|
||||
|
||||
def init(config) do
|
||||
indexes = %{
|
||||
instances: %{
|
||||
settings: Application.app_dir(:backend, "priv/elasticsearch/instances.json"),
|
||||
store: Backend.Elasticsearch.Store,
|
||||
sources: [Backend.Instance],
|
||||
bulk_page_size: 1000,
|
||||
bulk_wait_interval: 1000
|
||||
}
|
||||
}
|
||||
|
||||
config =
|
||||
config
|
||||
|> Map.put(:indexes, indexes)
|
||||
|
||||
{:ok, config}
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
defmodule Backend.Elasticsearch.Store do
|
||||
@moduledoc false
|
||||
@behaviour Elasticsearch.Store
|
||||
|
||||
alias Backend.Repo
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
defmodule Backend.FederationRestriction do
|
||||
@moduledoc false
|
||||
use Ecto.Schema
|
||||
import Ecto.Changeset
|
||||
|
||||
schema "federation_restrictions" do
|
||||
belongs_to :source, Backend.Instance,
|
||||
references: :domain,
|
||||
type: :string,
|
||||
foreign_key: :source_domain
|
||||
|
||||
belongs_to :target, Backend.Instance,
|
||||
references: :domain,
|
||||
type: :string,
|
||||
foreign_key: :target_domain
|
||||
|
||||
field :type, :string
|
||||
|
||||
timestamps()
|
||||
end
|
||||
|
||||
@doc false
|
||||
def changeset(federation_restriction, attrs) do
|
||||
federation_restriction
|
||||
|> cast(attrs, [:source, :target, :type])
|
||||
|> validate_required([:source, :target, :type])
|
||||
end
|
||||
end
|
|
@ -1,4 +1,8 @@
|
|||
defmodule Backend.Instance do
|
||||
@moduledoc """
|
||||
Model for storing everything related to an instance: not only the data from crawls, but also statistics, the time
|
||||
of the next scheduled crawl, X and Y coordinates on the graph, and so on.
|
||||
"""
|
||||
use Ecto.Schema
|
||||
import Ecto.Changeset
|
||||
|
||||
|
@ -14,6 +18,9 @@ defmodule Backend.Instance do
|
|||
field :base_domain, :string
|
||||
field :opt_in, :boolean
|
||||
field :opt_out, :boolean
|
||||
field :next_crawl, :naive_datetime
|
||||
field :crawl_error, :string
|
||||
field :crawl_error_count, :integer
|
||||
|
||||
many_to_many :peers, Backend.Instance,
|
||||
join_through: Backend.InstancePeer,
|
||||
|
@ -25,6 +32,10 @@ defmodule Backend.Instance do
|
|||
foreign_key: :source_domain,
|
||||
references: :domain
|
||||
|
||||
has_many :federation_restrictions, Backend.FederationRestriction,
|
||||
foreign_key: :source_domain,
|
||||
references: :domain
|
||||
|
||||
timestamps()
|
||||
end
|
||||
|
||||
|
@ -43,7 +54,10 @@ defmodule Backend.Instance do
|
|||
:statuses_per_day,
|
||||
:base_domain,
|
||||
:opt_in,
|
||||
:opt_out
|
||||
:opt_out,
|
||||
:next_crawl,
|
||||
:crawl_error,
|
||||
:crawl_error_count
|
||||
])
|
||||
|> validate_required([:domain])
|
||||
|> put_assoc(:peers, attrs.peers)
|
||||
|
@ -59,7 +73,8 @@ defmodule Backend.Instance do
|
|||
domain: instance.domain,
|
||||
description: instance.description,
|
||||
type: instance.type,
|
||||
user_count: instance.user_count
|
||||
user_count: instance.user_count,
|
||||
opt_out: instance.opt_out
|
||||
}
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
defmodule Backend.InstancePeer do
|
||||
@moduledoc """
|
||||
Model for tracking which other instances a given instance knows of
|
||||
(the data returned from /api/v1/instance/peers from Mastodon, for example)
|
||||
"""
|
||||
use Ecto.Schema
|
||||
import Ecto.Changeset
|
||||
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
defmodule Backend.MostRecentCrawl do
|
||||
@moduledoc """
|
||||
Model for fast access to the most recent crawl ID for a given domain.
|
||||
You could also just look this up in the crawls table, but that table gets very large so this is much faster.
|
||||
"""
|
||||
use Ecto.Schema
|
||||
import Ecto.Changeset
|
||||
|
||||
schema "most_recent_crawl" do
|
||||
belongs_to :instance, Backend.Instance,
|
||||
references: :domain,
|
||||
type: :string,
|
||||
foreign_key: :instance_domain
|
||||
|
||||
belongs_to :crawl, Backend.Crawl
|
||||
|
||||
timestamps()
|
||||
end
|
||||
|
||||
@doc false
|
||||
def changeset(edge, attrs) do
|
||||
edge
|
||||
|> cast(attrs, [:instance, :crawl])
|
||||
|> validate_required([:instance, :crawl])
|
||||
end
|
||||
end
|
|
@ -1,4 +1,8 @@
|
|||
defmodule Backend.Release do
|
||||
@moduledoc """
|
||||
Functions related to releases. Can be run against the compiled binary with e.g.
|
||||
`/bin/backend eval "Backend.Release.migrate()"`
|
||||
"""
|
||||
@app :backend
|
||||
@start_apps [
|
||||
:crypto,
|
||||
|
@ -10,7 +14,7 @@ defmodule Backend.Release do
|
|||
]
|
||||
|
||||
# Ecto repos to start, if any
|
||||
@repos Application.get_env(:backend, :ecto_repos, [])
|
||||
@repos Application.compile_env(:backend, :ecto_repos, [])
|
||||
# Elasticsearch clusters to start
|
||||
@clusters [Backend.Elasticsearch.Cluster]
|
||||
# Elasticsearch indexes to build
|
||||
|
@ -31,7 +35,7 @@ defmodule Backend.Release do
|
|||
{:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :down, to: version))
|
||||
end
|
||||
|
||||
def build_elasticsearch_indexes() do
|
||||
def build_elasticsearch_indexes do
|
||||
start_services()
|
||||
IO.puts("Building indexes...")
|
||||
Enum.each(@indexes, &Elasticsearch.Index.hot_swap(Backend.Elasticsearch.Cluster, &1))
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
defmodule Backend.Repo do
|
||||
use Ecto.Repo,
|
||||
otp_app: :backend,
|
||||
adapter: Ecto.Adapters.Postgres
|
||||
adapter: Ecto.Adapters.Postgres,
|
||||
timeout: 25_000
|
||||
|
||||
use Paginator
|
||||
use Scrivener, page_size: 20
|
||||
|
||||
def init(_type, config) do
|
||||
{:ok, Keyword.put(config, :url, System.get_env("DATABASE_URL"))}
|
||||
|
|
|
@ -3,11 +3,14 @@ defmodule Backend.Scheduler do
|
|||
This module runs recurring tasks.
|
||||
"""
|
||||
|
||||
use Quantum.Scheduler, otp_app: :backend
|
||||
use Quantum, otp_app: :backend
|
||||
|
||||
alias Backend.{Crawl, CrawlInteraction, Edge, FederationRestriction, Instance, Repo}
|
||||
alias Backend.Mailer.AdminEmail
|
||||
|
||||
alias Backend.{Crawl, Edge, CrawlInteraction, Instance, Repo}
|
||||
import Backend.Util
|
||||
import Ecto.Query
|
||||
|
||||
require Logger
|
||||
|
||||
@doc """
|
||||
|
@ -25,7 +28,7 @@ defmodule Backend.Scheduler do
|
|||
i.inserted_at <
|
||||
datetime_add(^NaiveDateTime.utc_now(), -1 * ^amount, ^unit)
|
||||
)
|
||||
|> Repo.delete_all()
|
||||
|> Repo.delete_all(timeout: :infinity)
|
||||
|
||||
Logger.info("Pruned #{deleted_num} old crawls.")
|
||||
end
|
||||
|
@ -34,36 +37,58 @@ defmodule Backend.Scheduler do
|
|||
Calculates every instance's "insularity score" -- that is, the percentage of mentions that are among users on the
|
||||
instance, rather than at other instances.
|
||||
"""
|
||||
def generate_insularity_scores() do
|
||||
def generate_insularity_scores do
|
||||
now = get_now()
|
||||
|
||||
crawls_subquery =
|
||||
Crawl
|
||||
|> select([c], %{
|
||||
instance_domain: c.instance_domain,
|
||||
statuses_seen: sum(c.statuses_seen),
|
||||
interactions_seen: sum(c.interactions_seen)
|
||||
})
|
||||
|> where([c], is_nil(c.error))
|
||||
|> group_by([c], c.instance_domain)
|
||||
|
||||
scores =
|
||||
self_mentions_subquery =
|
||||
CrawlInteraction
|
||||
|> join(:left, [ci], c in subquery(crawls_subquery),
|
||||
on: ci.source_domain == c.instance_domain
|
||||
)
|
||||
|> where([ci], ci.source_domain == ci.target_domain)
|
||||
|> group_by([ci], ci.source_domain)
|
||||
|> select([ci, c], %{
|
||||
|> select([ci], %{
|
||||
domain: ci.source_domain,
|
||||
mentions: sum(ci.mentions),
|
||||
# we can take min() because every row is the same
|
||||
interactions: min(c.interactions_seen)
|
||||
self_mentions: sum(ci.mentions)
|
||||
})
|
||||
|> Repo.all()
|
||||
|> group_by([ci], ci.source_domain)
|
||||
|
||||
scores =
|
||||
Instance
|
||||
|> join(:inner, [i], c in subquery(crawls_subquery), on: i.domain == c.instance_domain)
|
||||
|> join(:left, [i, c], ci in subquery(self_mentions_subquery), on: i.domain == ci.domain)
|
||||
# don't generate insularity scores for instances where we haven't seen any activity
|
||||
# (e.g. server types where the timeline isn't crawled)
|
||||
|> where([i, c, ci], c.statuses_seen > 0)
|
||||
|> select([i, c, ci], %{
|
||||
domain: i.domain,
|
||||
mentions: ci.self_mentions,
|
||||
interactions: c.interactions_seen
|
||||
})
|
||||
|> Repo.all(timeout: :infinity)
|
||||
|> Enum.map(fn %{domain: domain, mentions: mentions, interactions: interactions} ->
|
||||
insularity =
|
||||
cond do
|
||||
# if we haven't seen any self mentions, but there are interactions, it means that users on the instance
|
||||
# only mentions others, i.e. insularity is 0
|
||||
mentions == nil and interactions != 0 ->
|
||||
0.0
|
||||
|
||||
interactions > 0 ->
|
||||
mentions / interactions
|
||||
|
||||
true ->
|
||||
nil
|
||||
end
|
||||
|
||||
%{
|
||||
domain: domain,
|
||||
insularity: mentions / interactions,
|
||||
insularity: insularity,
|
||||
inserted_at: now,
|
||||
updated_at: now
|
||||
}
|
||||
|
@ -72,19 +97,20 @@ defmodule Backend.Scheduler do
|
|||
Instance
|
||||
|> Repo.insert_all(scores,
|
||||
on_conflict: {:replace, [:insularity, :updated_at]},
|
||||
conflict_target: :domain
|
||||
conflict_target: :domain,
|
||||
timeout: :infinity
|
||||
)
|
||||
end
|
||||
|
||||
@doc """
|
||||
This function calculates the average number of statuses per hour over the last month.
|
||||
"""
|
||||
def generate_status_rate() do
|
||||
def generate_status_rate do
|
||||
now = get_now()
|
||||
# We want the earliest sucessful crawl so that we can exclude it from the statistics.
|
||||
# This is because the first crawl goes up to one month into the past -- this would mess up the counts!
|
||||
# The statistics from here assume that all statuses were written at exactly the crawl's inserted_at timestamp.
|
||||
earliest_successful_crawl_subquery =
|
||||
earliest_crawl_subquery =
|
||||
Crawl
|
||||
|> group_by([c], c.instance_domain)
|
||||
|> select([c], %{
|
||||
|
@ -93,23 +119,24 @@ defmodule Backend.Scheduler do
|
|||
})
|
||||
|
||||
instances =
|
||||
Crawl
|
||||
|> join(:inner, [c], c2 in subquery(earliest_successful_crawl_subquery),
|
||||
on: c.instance_domain == c2.instance_domain
|
||||
Instance
|
||||
|> join(:inner, [i], c in Crawl, on: i.domain == c.instance_domain)
|
||||
|> join(:inner, [i], c2 in subquery(earliest_crawl_subquery),
|
||||
on: i.domain == c2.instance_domain
|
||||
)
|
||||
|> where(
|
||||
[c, c2],
|
||||
c.inserted_at > c2.earliest_crawl and not is_nil(c.statuses_seen) and is_nil(c.error)
|
||||
[i, c, c2],
|
||||
c.inserted_at > c2.earliest_crawl and c.statuses_seen > 0
|
||||
)
|
||||
|> select([c], %{
|
||||
instance_domain: c.instance_domain,
|
||||
|> select([i, c], %{
|
||||
domain: i.domain,
|
||||
status_count: sum(c.statuses_seen),
|
||||
second_earliest_crawl: min(c.inserted_at)
|
||||
})
|
||||
|> group_by([c], c.instance_domain)
|
||||
|> Repo.all()
|
||||
|> group_by([i], i.domain)
|
||||
|> Repo.all(timeout: :infinity)
|
||||
|> Enum.map(fn %{
|
||||
instance_domain: domain,
|
||||
domain: domain,
|
||||
status_count: status_count,
|
||||
second_earliest_crawl: oldest_timestamp
|
||||
} ->
|
||||
|
@ -128,16 +155,20 @@ defmodule Backend.Scheduler do
|
|||
Instance
|
||||
|> Repo.insert_all(instances,
|
||||
on_conflict: {:replace, [:statuses_per_day, :updated_at]},
|
||||
conflict_target: :domain
|
||||
conflict_target: :domain,
|
||||
timeout: :infinity
|
||||
)
|
||||
end
|
||||
|
||||
@doc """
|
||||
This function aggregates statistics from the interactions in the database.
|
||||
It calculates the strength of edges between nodes. Self-edges are not generated.
|
||||
Edges are only generated if both instances have been succesfully crawled.
|
||||
Edges are only generated if
|
||||
* both instances have been succesfully crawled
|
||||
* neither of the instances have blocked each other
|
||||
* there are interactions in each direction (if :require_bidirectional_edges is true in config)
|
||||
"""
|
||||
def generate_edges() do
|
||||
def generate_edges do
|
||||
now = get_now()
|
||||
|
||||
crawls_subquery =
|
||||
|
@ -146,7 +177,6 @@ defmodule Backend.Scheduler do
|
|||
instance_domain: c.instance_domain,
|
||||
statuses_seen: sum(c.statuses_seen)
|
||||
})
|
||||
|> where([c], is_nil(c.error))
|
||||
|> group_by([c], c.instance_domain)
|
||||
|
||||
interactions =
|
||||
|
@ -157,70 +187,61 @@ defmodule Backend.Scheduler do
|
|||
|> join(:inner, [ci], c_target in subquery(crawls_subquery),
|
||||
on: ci.target_domain == c_target.instance_domain
|
||||
)
|
||||
|> where([ci], ci.source_domain != ci.target_domain)
|
||||
|> group_by([ci], [ci.source_domain, ci.target_domain])
|
||||
|> select([ci, c_source, c_target], %{
|
||||
|> join(:inner, [ci], i_source in Instance, on: ci.source_domain == i_source.domain)
|
||||
|> join(:inner, [ci], i_target in Instance, on: ci.target_domain == i_target.domain)
|
||||
|> select([ci, c_source, c_target, i_source, i_target], %{
|
||||
source_domain: ci.source_domain,
|
||||
target_domain: ci.target_domain,
|
||||
mentions: sum(ci.mentions),
|
||||
# we can take min() because every row is the same
|
||||
source_type: min(i_source.type),
|
||||
target_type: min(i_target.type),
|
||||
source_statuses_seen: min(c_source.statuses_seen),
|
||||
target_statuses_seen: min(c_target.statuses_seen)
|
||||
})
|
||||
|> where([ci], ci.source_domain != ci.target_domain)
|
||||
|> group_by([ci], [ci.source_domain, ci.target_domain])
|
||||
|> Repo.all(timeout: :infinity)
|
||||
|
||||
federation_blocks =
|
||||
FederationRestriction
|
||||
|> select([fr], {fr.source_domain, fr.target_domain})
|
||||
|> where([fr], fr.type == "reject")
|
||||
|> Repo.all()
|
||||
|> MapSet.new()
|
||||
|
||||
new_edges =
|
||||
interactions
|
||||
|> filter_to_eligible_interactions(federation_blocks)
|
||||
|> combine_mention_directions()
|
||||
|> Enum.map(fn {{source_domain, target_domain}, {mention_count, statuses_seen}} ->
|
||||
%{
|
||||
source_domain: source_domain,
|
||||
target_domain: target_domain,
|
||||
weight: mention_count / statuses_seen,
|
||||
inserted_at: now,
|
||||
updated_at: now
|
||||
}
|
||||
end)
|
||||
|
||||
# Get edges and their weights
|
||||
Repo.transaction(fn ->
|
||||
Edge
|
||||
|> Repo.delete_all()
|
||||
Repo.transaction(
|
||||
fn ->
|
||||
Edge
|
||||
|> Repo.delete_all(timeout: :infinity)
|
||||
|
||||
edges =
|
||||
interactions
|
||||
# Get a map of %{{source, target} => {total_mention_count, total_statuses_seen}}
|
||||
|> Enum.reduce(%{}, fn
|
||||
%{
|
||||
source_domain: source_domain,
|
||||
target_domain: target_domain,
|
||||
mentions: mentions,
|
||||
source_statuses_seen: source_statuses_seen,
|
||||
target_statuses_seen: target_statuses_seen
|
||||
},
|
||||
acc ->
|
||||
key = get_interaction_key(source_domain, target_domain)
|
||||
|
||||
# target_statuses_seen might be nil if that instance was never crawled. default to 0.
|
||||
target_statuses_seen =
|
||||
case target_statuses_seen do
|
||||
nil -> 0
|
||||
_ -> target_statuses_seen
|
||||
end
|
||||
|
||||
statuses_seen = source_statuses_seen + target_statuses_seen
|
||||
|
||||
Map.update(acc, key, {mentions, statuses_seen}, fn {curr_mentions, curr_statuses_seen} ->
|
||||
{curr_mentions + mentions, curr_statuses_seen}
|
||||
end)
|
||||
end)
|
||||
|> Enum.map(fn {{source_domain, target_domain}, {mention_count, statuses_seen}} ->
|
||||
%{
|
||||
source_domain: source_domain,
|
||||
target_domain: target_domain,
|
||||
weight: mention_count / statuses_seen,
|
||||
inserted_at: now,
|
||||
updated_at: now
|
||||
}
|
||||
end)
|
||||
|
||||
Edge
|
||||
|> Repo.insert_all(edges)
|
||||
end)
|
||||
Edge
|
||||
|> Repo.insert_all(new_edges, timeout: :infinity)
|
||||
end,
|
||||
timeout: :infinity
|
||||
)
|
||||
end
|
||||
|
||||
@doc """
|
||||
This function checks to see if a lot of instances on the same base domain have been created recently. If so,
|
||||
notifies the server admin over SMS.
|
||||
"""
|
||||
def check_for_spam_instances() do
|
||||
def check_for_spam_instances do
|
||||
hour_range = 3
|
||||
|
||||
count_subquery =
|
||||
|
@ -259,10 +280,90 @@ defmodule Backend.Scheduler do
|
|||
end).()
|
||||
|
||||
Logger.info(message)
|
||||
send_admin_sms(message)
|
||||
Backend.Mailer.AdminEmail.send("Potential spam", message)
|
||||
AdminEmail.send("Potential spam", message)
|
||||
else
|
||||
Logger.debug("Did not find potential spam instances.")
|
||||
end
|
||||
end
|
||||
|
||||
# Takes a list of Interactions
|
||||
# Returns a map of %{{source, target} => {total_mention_count, total_statuses_seen}}
|
||||
defp combine_mention_directions(interactions) do
|
||||
Enum.reduce(interactions, %{}, fn
|
||||
%{
|
||||
source_domain: source_domain,
|
||||
target_domain: target_domain,
|
||||
mentions: mentions,
|
||||
source_statuses_seen: source_statuses_seen,
|
||||
target_statuses_seen: target_statuses_seen
|
||||
},
|
||||
acc ->
|
||||
key = get_interaction_key(source_domain, target_domain)
|
||||
|
||||
# target_statuses_seen might be nil if that instance was never crawled. default to 0.
|
||||
target_statuses_seen =
|
||||
case target_statuses_seen do
|
||||
nil -> 0
|
||||
_ -> target_statuses_seen
|
||||
end
|
||||
|
||||
statuses_seen = source_statuses_seen + target_statuses_seen
|
||||
|
||||
Map.update(acc, key, {mentions, statuses_seen}, fn {curr_mentions, curr_statuses_seen} ->
|
||||
{curr_mentions + mentions, curr_statuses_seen}
|
||||
end)
|
||||
end)
|
||||
end
|
||||
|
||||
defp filter_to_eligible_interactions(interactions, federation_blocks) do
|
||||
# A map of {source_domain, target_domain} => mention_count. Used to find out whether a mention in the reverse
|
||||
# direction has been seen.
|
||||
mention_directions =
|
||||
interactions
|
||||
|> Enum.reduce(%{}, fn %{source_domain: source, target_domain: target, mentions: mentions},
|
||||
acc ->
|
||||
Map.put(acc, {source, target}, mentions)
|
||||
end)
|
||||
|
||||
interactions
|
||||
|> Enum.filter(&is_eligible_interaction?(&1, mention_directions, federation_blocks))
|
||||
end
|
||||
|
||||
# Returns true if
|
||||
# * there's no federation block in either direction between the two instances
|
||||
# * there are mentions in both directions (if enabled in configuration)
|
||||
defp is_eligible_interaction?(
|
||||
%{
|
||||
source_domain: source,
|
||||
target_domain: target,
|
||||
mentions: mention_count,
|
||||
source_type: source_type,
|
||||
target_type: target_type
|
||||
},
|
||||
mention_directions,
|
||||
federation_blocks
|
||||
) do
|
||||
mentions_were_seen = mention_count > 0
|
||||
|
||||
# If :require_bidirectional_edges is set to `true` in the config, then an edge is only created if both instances
|
||||
# have mentioned each other
|
||||
opposite_mention_exists =
|
||||
if get_config(:require_bidirectional_mentions) and is_timeline_crawlable_type?(source_type) and
|
||||
is_timeline_crawlable_type?(target_type) do
|
||||
Map.has_key?(mention_directions, {target, source}) and
|
||||
Map.get(mention_directions, {target, source}) > 0
|
||||
else
|
||||
true
|
||||
end
|
||||
|
||||
federation_block_exists =
|
||||
MapSet.member?(federation_blocks, {source, target}) or
|
||||
MapSet.member?(federation_blocks, {target, source})
|
||||
|
||||
mentions_were_seen and opposite_mention_exists and not federation_block_exists
|
||||
end
|
||||
|
||||
defp is_timeline_crawlable_type?(type) do
|
||||
Enum.member?(["mastodon", "gab", "pleroma", "gnusocial", "misskey"], type)
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
defmodule Backend.Util do
|
||||
@moduledoc false
|
||||
import Ecto.Query
|
||||
require Logger
|
||||
alias Backend.{Crawl, Repo}
|
||||
alias Backend.{Crawl, MostRecentCrawl, Repo}
|
||||
|
||||
@doc """
|
||||
Returns the given key from :backend, :crawler in the config.
|
||||
|
@ -30,7 +31,7 @@ defmodule Backend.Util do
|
|||
blacklist =
|
||||
case get_config(:blacklist) do
|
||||
nil -> []
|
||||
_ -> get_config(:blacklist)
|
||||
other -> other
|
||||
end
|
||||
|
||||
blacklist
|
||||
|
@ -53,7 +54,7 @@ defmodule Backend.Util do
|
|||
@doc """
|
||||
Gets the current UTC time as a NaiveDateTime in a format that can be inserted into the database.
|
||||
"""
|
||||
def get_now() do
|
||||
def get_now do
|
||||
NaiveDateTime.truncate(NaiveDateTime.utc_now(), :second)
|
||||
end
|
||||
|
||||
|
@ -78,38 +79,22 @@ defmodule Backend.Util do
|
|||
|
||||
@spec get_last_crawl(String.t()) :: Crawl.t() | nil
|
||||
def get_last_crawl(domain) do
|
||||
crawls =
|
||||
Crawl
|
||||
|> select([c], c)
|
||||
|> where([c], c.instance_domain == ^domain)
|
||||
|> order_by(desc: :id)
|
||||
|> limit(1)
|
||||
|> Repo.all()
|
||||
most_recent_crawl_subquery =
|
||||
MostRecentCrawl
|
||||
|> select([mrc], %{
|
||||
most_recent_id: mrc.crawl_id
|
||||
})
|
||||
|> where([mrc], mrc.instance_domain == ^domain)
|
||||
|
||||
case length(crawls) do
|
||||
1 -> hd(crawls)
|
||||
0 -> nil
|
||||
end
|
||||
Crawl
|
||||
|> join(:inner, [c], mrc in subquery(most_recent_crawl_subquery),
|
||||
on: c.id == mrc.most_recent_id
|
||||
)
|
||||
|> Repo.one()
|
||||
end
|
||||
|
||||
@spec get_last_successful_crawl(String.t()) :: Crawl.t() | nil
|
||||
def get_last_successful_crawl(domain) do
|
||||
crawls =
|
||||
Crawl
|
||||
|> select([c], c)
|
||||
|> where([c], is_nil(c.error) and c.instance_domain == ^domain)
|
||||
|> order_by(desc: :id)
|
||||
|> limit(1)
|
||||
|> Repo.all()
|
||||
|
||||
case length(crawls) do
|
||||
1 -> hd(crawls)
|
||||
0 -> nil
|
||||
end
|
||||
end
|
||||
|
||||
@spec get_last_successful_crawl_timestamp(String.t()) :: NaiveDateTime.t() | nil
|
||||
def get_last_successful_crawl_timestamp(domain) do
|
||||
@spec get_last_crawl_timestamp(String.t()) :: NaiveDateTime.t() | nil
|
||||
def get_last_crawl_timestamp(domain) do
|
||||
crawl = get_last_crawl(domain)
|
||||
|
||||
case crawl do
|
||||
|
@ -128,26 +113,16 @@ defmodule Backend.Util do
|
|||
end)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Sends an SMS to the admin phone number if configured.
|
||||
"""
|
||||
def send_admin_sms(body) do
|
||||
if get_config(:admin_phone) != nil and get_config(:twilio_phone) != nil do
|
||||
ExTwilio.Message.create(
|
||||
to: get_config(:admin_phone),
|
||||
from: get_config(:twilio_phone),
|
||||
body: body
|
||||
)
|
||||
else
|
||||
Logger.info("Could not send SMS to admin; not configured.")
|
||||
end
|
||||
end
|
||||
|
||||
@spec clean_domain(String.t()) :: String.t()
|
||||
def clean_domain(domain) do
|
||||
domain
|
||||
|> String.replace_prefix("https://", "")
|
||||
|> String.trim_trailing("/")
|
||||
|> String.downcase()
|
||||
cleaned =
|
||||
domain
|
||||
|> String.replace_prefix("https://", "")
|
||||
|> String.trim_trailing("/")
|
||||
|> String.trim()
|
||||
|> String.downcase()
|
||||
|
||||
Regex.replace(~r/(:\d+|\.)$/, cleaned, "")
|
||||
end
|
||||
|
||||
def get_account(username, domain) do
|
||||
|
@ -167,4 +142,61 @@ defmodule Backend.Util do
|
|||
def convert_keys_to_atoms(map) do
|
||||
map |> Map.new(fn {k, v} -> {String.to_atom(k), v} end)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Gets and decodes a HTTP response.
|
||||
"""
|
||||
@spec get_and_decode(String.t(), Atom.t(), Integer.t()) ::
|
||||
{:ok, any()} | {:error, Jason.DecodeError.t() | HTTPoison.Error.t()}
|
||||
def get_and_decode(url, pool \\ :crawler, timeout \\ 15_000) do
|
||||
case HTTPoison.get(url, [{"User-Agent", get_config(:user_agent)}],
|
||||
hackney: [pool: pool],
|
||||
recv_timeout: timeout,
|
||||
timeout: timeout
|
||||
) do
|
||||
{:ok, %{status_code: 200, body: body}} -> Jason.decode(body)
|
||||
{:ok, %{status_code: 401}} -> Jason.decode("[]")
|
||||
{:ok, %{status_code: 404}} -> Jason.decode("[]")
|
||||
{:ok, %{body: body}} -> {:error, %HTTPoison.Error{reason: "Non-200 response. Body: #{body}"}}
|
||||
{:error, err} -> {:error, err}
|
||||
end
|
||||
end
|
||||
|
||||
@spec get_and_decode!(String.t()) :: any()
|
||||
def get_and_decode!(url) do
|
||||
case get_and_decode(url) do
|
||||
{:ok, decoded} -> decoded
|
||||
{:error, error} -> raise error
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
POSTS to a HTTP endpoint and decodes the JSON response.
|
||||
"""
|
||||
@spec post_and_decode(String.t(), String.t()) ::
|
||||
{:ok, any()} | {:error, Jason.DecodeError.t() | HTTPoison.Error.t()}
|
||||
def post_and_decode(url, body \\ "") do
|
||||
case HTTPoison.post(url, body, [{"User-Agent", get_config(:user_agent)}],
|
||||
hackney: [pool: :crawler],
|
||||
recv_timeout: 15_000,
|
||||
timeout: 15_000
|
||||
) do
|
||||
{:ok, %{status_code: 200, body: response_body}} -> Jason.decode(response_body)
|
||||
{:ok, _} -> {:error, %HTTPoison.Error{reason: "Non-200 response"}}
|
||||
{:error, err} -> {:error, err}
|
||||
end
|
||||
end
|
||||
|
||||
@spec post_and_decode!(String.t(), String.t()) :: any()
|
||||
def post_and_decode!(url, body \\ "") do
|
||||
case post_and_decode(url, body) do
|
||||
{:ok, decoded} -> decoded
|
||||
{:error, error} -> raise error
|
||||
end
|
||||
end
|
||||
|
||||
@spec is_valid_domain?(String.t()) :: boolean
|
||||
def is_valid_domain?(domain) do
|
||||
Regex.match?(~r/^[\pL\d\.\-_]+\.[a-zA-Z]+$/, domain)
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
defmodule BackendWeb.AdminController do
|
||||
alias Backend.{Api, Auth, Instance}
|
||||
use BackendWeb, :controller
|
||||
alias Backend.{Auth, Api, Instance}
|
||||
require Logger
|
||||
|
||||
action_fallback BackendWeb.FallbackController
|
||||
|
||||
|
@ -9,7 +8,7 @@ defmodule BackendWeb.AdminController do
|
|||
[token] = get_req_header(conn, "token")
|
||||
|
||||
with {:ok, domain} <- Auth.verify_token(token) do
|
||||
instance = Api.get_instance!(domain)
|
||||
instance = Api.get_instance(domain)
|
||||
render(conn, "show.json", instance: instance)
|
||||
end
|
||||
end
|
||||
|
@ -20,13 +19,21 @@ defmodule BackendWeb.AdminController do
|
|||
with {:ok, domain} <- Auth.verify_token(token) do
|
||||
%{"optIn" => opt_in, "optOut" => opt_out} = params
|
||||
|
||||
instance = %Instance{
|
||||
# Make sure to update ElasticSearch so that the instance is no longer returned in search results
|
||||
es_instance =
|
||||
Api.get_instance(domain)
|
||||
|> Map.put(:opt_in, opt_in)
|
||||
|> Map.put(:opt_out, opt_out)
|
||||
|
||||
Elasticsearch.put_document!(Backend.Elasticsearch.Cluster, es_instance, "instances")
|
||||
|
||||
ecto_instance = %Instance{
|
||||
domain: domain,
|
||||
opt_in: opt_in,
|
||||
opt_out: opt_out
|
||||
}
|
||||
|
||||
with {:ok, updated_instance} <- Api.update_instance(instance) do
|
||||
with {:ok, updated_instance} <- Api.update_instance(ecto_instance) do
|
||||
render(conn, "show.json", instance: updated_instance)
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
defmodule BackendWeb.AdminLoginController do
|
||||
use BackendWeb, :controller
|
||||
import Backend.Util
|
||||
alias Backend.Api
|
||||
alias Backend.Mailer.UserEmail
|
||||
alias Mastodon.Messenger
|
||||
|
||||
action_fallback BackendWeb.FallbackController
|
||||
|
||||
|
@ -10,28 +12,44 @@ defmodule BackendWeb.AdminLoginController do
|
|||
choose one or the other by POSTing back.
|
||||
"""
|
||||
def show(conn, %{"id" => domain}) do
|
||||
# TODO: this should really be handled in a more async manner
|
||||
# TODO: this assumes mastodon/pleroma API
|
||||
cleaned_domain = clean_domain(domain)
|
||||
instance = Api.get_instance(domain)
|
||||
|
||||
instance_data =
|
||||
HTTPoison.get!("https://#{cleaned_domain}/api/v1/instance")
|
||||
|> Map.get(:body)
|
||||
|> Jason.decode!()
|
||||
keyword_args =
|
||||
cond do
|
||||
instance == nil or instance.type == nil ->
|
||||
[error: "We have not seen this instance before. Please check for typos."]
|
||||
|
||||
render(conn, "show.json", instance_data: instance_data, cleaned_domain: cleaned_domain)
|
||||
not Enum.member?(["mastodon", "pleroma", "gab"], instance.type) ->
|
||||
[error: "It is only possible to administer Mastodon and Pleroma instances."]
|
||||
|
||||
true ->
|
||||
case get_and_decode("https://#{cleaned_domain}/api/v1/instance") do
|
||||
{:ok, instance_data} ->
|
||||
[instance_data: instance_data, cleaned_domain: cleaned_domain]
|
||||
|
||||
{:error, _err} ->
|
||||
[error: "Unable to get instance details. Is it currently live?"]
|
||||
end
|
||||
end
|
||||
|
||||
render(conn, "show.json", keyword_args)
|
||||
end
|
||||
|
||||
def create(conn, %{"domain" => domain, "type" => type}) do
|
||||
cleaned_domain = clean_domain(domain)
|
||||
|
||||
instance_data =
|
||||
HTTPoison.get!("https://#{cleaned_domain}/api/v1/instance")
|
||||
|> Map.get(:body)
|
||||
|> Jason.decode!()
|
||||
{data_state, instance_data} =
|
||||
get_and_decode("https://#{cleaned_domain}/api/v1/instance",
|
||||
pool: :admin_login,
|
||||
timeout: 20_000
|
||||
)
|
||||
|
||||
error =
|
||||
cond do
|
||||
data_state == :error ->
|
||||
"Unable to get instance details. Is it currently live?"
|
||||
|
||||
type == "email" ->
|
||||
email = Map.get(instance_data, "email")
|
||||
|
||||
|
@ -40,8 +58,10 @@ defmodule BackendWeb.AdminLoginController do
|
|||
{:error, _} -> "Failed to send email."
|
||||
end
|
||||
|
||||
# type == "fediverseAccount" ->
|
||||
# account = nil
|
||||
type == "fediverseAccount" ->
|
||||
username = get_in(instance_data, ["contact_account", "username"])
|
||||
_status = Messenger.dm_login_link(username, cleaned_domain)
|
||||
nil
|
||||
|
||||
true ->
|
||||
"Invalid account type. Must be 'email' or 'fediverseAccount'."
|
||||
|
|
|
@ -1,19 +1,16 @@
|
|||
defmodule BackendWeb.GraphController do
|
||||
use BackendWeb, :controller
|
||||
|
||||
alias Backend.Api
|
||||
alias Graph.Cache
|
||||
|
||||
action_fallback BackendWeb.FallbackController
|
||||
|
||||
def index(conn, _params) do
|
||||
nodes = Api.list_nodes()
|
||||
edges = Api.list_edges()
|
||||
%{nodes: nodes, edges: edges} = Cache.get_graph()
|
||||
render(conn, "index.json", nodes: nodes, edges: edges)
|
||||
end
|
||||
|
||||
def show(conn, %{"id" => domain}) do
|
||||
nodes = Api.list_nodes(domain)
|
||||
edges = Api.list_edges(domain)
|
||||
%{nodes: nodes, edges: edges} = Cache.get_graph(domain)
|
||||
render(conn, "index.json", nodes: nodes, edges: edges)
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,15 +1,96 @@
|
|||
defmodule BackendWeb.InstanceController do
|
||||
use BackendWeb, :controller
|
||||
|
||||
import Backend.Util
|
||||
alias Backend.Api
|
||||
alias Backend.{Api, Instance, Repo}
|
||||
alias Graph.Cache
|
||||
import Ecto.Query
|
||||
|
||||
action_fallback(BackendWeb.FallbackController)
|
||||
|
||||
# sobelow_skip ["DOS.StringToAtom"]
|
||||
def index(conn, params) do
|
||||
page = Map.get(params, "page")
|
||||
sort_field = Map.get(params, "sortField")
|
||||
sort_direction = Map.get(params, "sortDirection")
|
||||
|
||||
cond do
|
||||
not Enum.member?([nil, "domain", "userCount", "statusCount", "insularity"], sort_field) ->
|
||||
render(conn, "error.json", error: "Invalid sort field")
|
||||
|
||||
not Enum.member?([nil, "asc", "desc"], sort_direction) ->
|
||||
render(conn, "error.json", error: "Invalid sort direction")
|
||||
|
||||
true ->
|
||||
sort_field =
|
||||
if sort_field != nil do
|
||||
sort_field
|
||||
|> Recase.to_snake()
|
||||
|> String.to_atom()
|
||||
else
|
||||
nil
|
||||
end
|
||||
|
||||
sort_direction =
|
||||
if sort_direction != nil do
|
||||
sort_direction
|
||||
|> Recase.to_snake()
|
||||
|> String.to_atom()
|
||||
else
|
||||
nil
|
||||
end
|
||||
|
||||
%{
|
||||
entries: instances,
|
||||
total_pages: total_pages,
|
||||
page_number: page_number,
|
||||
total_entries: total_entries,
|
||||
page_size: page_size
|
||||
} = Api.get_instances(page, sort_field, sort_direction)
|
||||
|
||||
render(conn, "index.json",
|
||||
instances: instances,
|
||||
total_pages: total_pages,
|
||||
page_number: page_number,
|
||||
total_entries: total_entries,
|
||||
page_size: page_size
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
def show(conn, %{"id" => domain}) do
|
||||
instance = Api.get_instance!(domain)
|
||||
last_crawl = get_last_crawl(domain)
|
||||
render(conn, "show.json", instance: instance, crawl: last_crawl)
|
||||
instance = Cache.get_instance_with_relationships(domain)
|
||||
|
||||
if instance == nil or instance.opt_out == true do
|
||||
send_resp(conn, 404, "Not found")
|
||||
else
|
||||
last_crawl = Cache.get_last_crawl(domain)
|
||||
|
||||
restricted_domains =
|
||||
instance.federation_restrictions
|
||||
|> Enum.map(fn %{target_domain: domain} -> domain end)
|
||||
|
||||
opted_out_instances =
|
||||
Instance
|
||||
|> select([i], i.domain)
|
||||
|> where([i], i.opt_out and i.domain in ^restricted_domains)
|
||||
|> Repo.all()
|
||||
|
||||
# convert from a list of {domain, restriction_type} to a map of %{restriction_type => list_of_domains}
|
||||
federation_restrictions =
|
||||
instance.federation_restrictions
|
||||
|> Enum.filter(fn %{target_domain: domain} ->
|
||||
not Enum.member?(opted_out_instances, domain)
|
||||
end)
|
||||
|> Enum.reduce(%{}, fn %{target_domain: domain, type: type}, acc ->
|
||||
Map.update(acc, type, [domain], fn curr_domains -> [domain | curr_domains] end)
|
||||
end)
|
||||
|> Recase.Enumerable.convert_keys(&Recase.to_camel(&1))
|
||||
|
||||
render(conn, "show.json",
|
||||
instance: instance,
|
||||
crawl: last_crawl,
|
||||
federation_restrictions: federation_restrictions
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
# def update(conn, %{"id" => id, "instance" => instance_params}) do
|
||||
|
|
|
@ -7,7 +7,46 @@ defmodule BackendWeb.SearchController do
|
|||
def index(conn, params) do
|
||||
query = Map.get(params, "query")
|
||||
from = Map.get(params, "after", "0") |> String.to_integer()
|
||||
%{hits: hits, next: next} = Api.search_instances(query, from)
|
||||
|
||||
# Filters
|
||||
filter_keys =
|
||||
params
|
||||
|> Map.keys()
|
||||
|> Enum.filter(fn key -> key !== "query" and key !== "after" end)
|
||||
|
||||
filters =
|
||||
params
|
||||
|> Map.take(filter_keys)
|
||||
|> Map.to_list()
|
||||
|> Enum.map(&convert_to_es_filter(&1))
|
||||
|
||||
%{hits: hits, next: next} = Api.search_instances(query, filters, from)
|
||||
render(conn, "index.json", hits: hits, next: next)
|
||||
end
|
||||
|
||||
defp convert_to_es_filter(url_param) do
|
||||
{key, value} = url_param
|
||||
# Key has the form e.g. "type_eq" or "user_count_gte"
|
||||
key_components = String.split(key, "_")
|
||||
# The field to filter on
|
||||
field = Enum.take(key_components, length(key_components) - 1) |> Enum.join("_")
|
||||
# The filter relation -- one of eq, gt, gte, lt, lte
|
||||
relation = Enum.take(key_components, -1)
|
||||
|
||||
case field do
|
||||
"type" ->
|
||||
%{
|
||||
"term" => %{"type" => value}
|
||||
}
|
||||
|
||||
"user_count" ->
|
||||
%{
|
||||
"range" => %{
|
||||
"user_count" => %{
|
||||
relation => value
|
||||
}
|
||||
}
|
||||
}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -46,7 +46,7 @@ defmodule BackendWeb.Endpoint do
|
|||
)
|
||||
|
||||
plug(Corsica,
|
||||
origins: ["http://localhost:3000", ~r{^https?://(.*\.?)fediverse\.space$}],
|
||||
origins: ["http://localhost:3001", ~r{^https://(.*\.?)index\.community$}, ~r{^https://(.*\.?)fediverse\.space$}],
|
||||
allow_headers: ["content-type", "token"]
|
||||
)
|
||||
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
defmodule BackendWeb.RateLimiter do
|
||||
@moduledoc """
|
||||
Functions used to rate limit:
|
||||
* all endpoints by IP/endpoint
|
||||
* authentication endpoints by domain
|
||||
"""
|
||||
|
||||
import Phoenix.Controller, only: [json: 2]
|
||||
import Plug.Conn, only: [put_status: 2]
|
||||
use Plug.Builder
|
||||
|
||||
def rate_limit(conn, options \\ []) do
|
||||
case check_rate(conn, options) do
|
||||
{:ok, _count} -> conn # Do nothing, allow execution to continue
|
||||
{:error, _count} -> render_error(conn)
|
||||
end
|
||||
end
|
||||
|
||||
def rate_limit_authentication(conn, options \\ []) do
|
||||
domain =
|
||||
if Map.has_key?(conn.params, "id") do
|
||||
Map.get(conn.params, "id")
|
||||
else
|
||||
Map.get(conn.params, "domain")
|
||||
end
|
||||
options = Keyword.put(options, :bucket_name, "authorization: #{domain}")
|
||||
rate_limit(conn, options)
|
||||
end
|
||||
|
||||
defp check_rate(conn, options) do
|
||||
interval_milliseconds = options[:interval_seconds] * 1000
|
||||
max_requests = options[:max_requests]
|
||||
bucket_name = options[:bucket_name] || bucket_name(conn)
|
||||
|
||||
ExRated.check_rate(bucket_name, interval_milliseconds, max_requests)
|
||||
end
|
||||
|
||||
# Bucket name should be a combination of ip address and request path, like so:
|
||||
#
|
||||
# "127.0.0.1:/api/v1/authorizations"
|
||||
defp bucket_name(conn) do
|
||||
path = Enum.join(conn.path_info, "/")
|
||||
ip = conn.remote_ip |> Tuple.to_list |> Enum.join(".")
|
||||
"#{ip}:#{path}"
|
||||
end
|
||||
|
||||
defp render_error(conn) do
|
||||
conn
|
||||
|> put_status(:forbidden)
|
||||
|> json(%{error: "Rate limit exceeded."})
|
||||
|> halt # Stop execution of further plugs, return response now
|
||||
end
|
||||
end
|
|
@ -1,19 +1,29 @@
|
|||
defmodule BackendWeb.Router do
|
||||
use BackendWeb, :router
|
||||
import BackendWeb.RateLimiter
|
||||
|
||||
pipeline :api do
|
||||
plug(:accepts, ["json"])
|
||||
plug(:rate_limit, max_requests: 5, interval_seconds: 10) # requests to the same endpoint
|
||||
end
|
||||
|
||||
pipeline :api_admin do
|
||||
plug(:rate_limit_authentication, max_requests: 5, interval_seconds: 60)
|
||||
end
|
||||
|
||||
scope "/api", BackendWeb do
|
||||
pipe_through(:api)
|
||||
|
||||
resources("/instances", InstanceController, only: [:show])
|
||||
resources("/instances", InstanceController, only: [:index, :show])
|
||||
resources("/graph", GraphController, only: [:index, :show])
|
||||
resources("/search", SearchController, only: [:index])
|
||||
|
||||
resources("/admin/login", AdminLoginController, only: [:show, :create])
|
||||
get "/admin", AdminController, :show
|
||||
post "/admin", AdminController, :update
|
||||
scope "/admin" do
|
||||
pipe_through :api_admin
|
||||
|
||||
resources("/login", AdminLoginController, only: [:show, :create])
|
||||
get "/", AdminController, :show
|
||||
post "/", AdminController, :update
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -2,9 +2,17 @@ defmodule BackendWeb.AdminLoginView do
|
|||
use BackendWeb, :view
|
||||
import Backend.Util
|
||||
|
||||
def render("show.json", %{instance_data: instance_data, cleaned_domain: cleaned_domain}) do
|
||||
username = get_in(instance_data, ["contact_account", "username"])
|
||||
def render("show.json", %{error: error}) do
|
||||
%{
|
||||
error: error
|
||||
}
|
||||
end
|
||||
|
||||
def render("show.json", %{
|
||||
instance_data: instance_data,
|
||||
cleaned_domain: cleaned_domain
|
||||
}) do
|
||||
username = get_in(instance_data, ["contact_account", "username"])
|
||||
fedi_account = get_account(username, cleaned_domain)
|
||||
|
||||
%{
|
||||
|
|
|
@ -8,15 +8,6 @@ defmodule BackendWeb.GraphView do
|
|||
|> Enum.map(fn %{statuses_per_day: statuses_per_day} -> statuses_per_day end)
|
||||
|> Enum.filter(fn s -> s != nil end)
|
||||
|
||||
statuses_per_user_per_day =
|
||||
nodes
|
||||
|> Enum.filter(fn %{statuses_per_day: statuses_per_day, user_count: user_count} ->
|
||||
statuses_per_day != nil and user_count != nil and user_count > 0
|
||||
end)
|
||||
|> Enum.map(fn %{statuses_per_day: statuses_per_day, user_count: user_count} ->
|
||||
statuses_per_day / user_count
|
||||
end)
|
||||
|
||||
%{
|
||||
graph: %{
|
||||
nodes: render_many(nodes, GraphView, "node.json", as: :node),
|
||||
|
@ -26,12 +17,8 @@ defmodule BackendWeb.GraphView do
|
|||
ranges: %{
|
||||
# Make sure that these keys match what's in the "node.json" render function.
|
||||
statusesPerDay: [
|
||||
Enum.min(statuses_per_day),
|
||||
Enum.max(statuses_per_day)
|
||||
],
|
||||
statusesPerUserPerDay: [
|
||||
Enum.min(statuses_per_user_per_day),
|
||||
Enum.max(statuses_per_user_per_day)
|
||||
Enum.min(statuses_per_day, fn -> nil end),
|
||||
Enum.max(statuses_per_day, fn -> nil end)
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -45,13 +32,6 @@ defmodule BackendWeb.GraphView do
|
|||
false -> 1
|
||||
end
|
||||
|
||||
statuses_per_user_per_day =
|
||||
if node.statuses_per_day != nil and node.user_count != nil and node.user_count > 0 do
|
||||
node.statuses_per_day / node.user_count
|
||||
else
|
||||
nil
|
||||
end
|
||||
|
||||
# This is the format that cytoscape.js expects.
|
||||
%{
|
||||
data: %{
|
||||
|
@ -59,8 +39,7 @@ defmodule BackendWeb.GraphView do
|
|||
label: node.domain,
|
||||
size: size,
|
||||
type: node.type,
|
||||
statusesPerDay: node.statuses_per_day,
|
||||
statusesPerUserPerDay: statuses_per_user_per_day
|
||||
statusesPerDay: node.statuses_per_day
|
||||
},
|
||||
position: %{
|
||||
x: node.x,
|
||||
|
|
|
@ -3,60 +3,118 @@ defmodule BackendWeb.InstanceView do
|
|||
alias BackendWeb.InstanceView
|
||||
import Backend.Util
|
||||
|
||||
def render("show.json", %{instance: instance, crawl: crawl}) do
|
||||
def render("index.json", %{
|
||||
instances: instances,
|
||||
total_pages: total_pages,
|
||||
page_number: page_number,
|
||||
total_entries: total_entries,
|
||||
page_size: page_size
|
||||
}) do
|
||||
%{
|
||||
instances: render_many(instances, InstanceView, "index_instance.json"),
|
||||
pageNumber: page_number,
|
||||
totalPages: total_pages,
|
||||
totalEntries: total_entries,
|
||||
pageSize: page_size
|
||||
}
|
||||
end
|
||||
|
||||
@doc """
|
||||
Used when rendering the index of all instances (the difference from show.json is primarily that
|
||||
it does not include peers).
|
||||
"""
|
||||
def render("index_instance.json", %{instance: instance}) do
|
||||
%{
|
||||
name: instance.domain,
|
||||
description: instance.description,
|
||||
version: instance.version,
|
||||
userCount: instance.user_count,
|
||||
insularity: instance.insularity,
|
||||
statusCount: instance.status_count,
|
||||
type: instance.type,
|
||||
statusesPerDay: instance.statuses_per_day,
|
||||
statusesPerUserPerDay: get_statuses_per_user_per_day(instance)
|
||||
}
|
||||
end
|
||||
|
||||
def render("show.json", %{
|
||||
instance: instance,
|
||||
crawl: crawl,
|
||||
federation_restrictions: federation_restrictions
|
||||
}) do
|
||||
user_threshold = get_config(:personal_instance_threshold)
|
||||
|
||||
[status, last_updated] =
|
||||
case crawl do
|
||||
nil ->
|
||||
["not crawled", nil]
|
||||
|
||||
_ ->
|
||||
case crawl.error do
|
||||
nil -> ["success", crawl.inserted_at]
|
||||
err -> [err, crawl.inserted_at]
|
||||
end
|
||||
end
|
||||
|
||||
cond do
|
||||
instance.user_count < user_threshold and not instance.opt_in ->
|
||||
%{
|
||||
name: instance.domain,
|
||||
status: "personal instance"
|
||||
}
|
||||
render_personal_instance(instance)
|
||||
|
||||
instance.crawl_error == "robots.txt" ->
|
||||
render_domain_and_error(instance)
|
||||
|
||||
instance.crawl_error != nil and instance.type == nil ->
|
||||
render_domain_and_error(instance)
|
||||
|
||||
crawl == nil ->
|
||||
render_domain_and_error(instance)
|
||||
|
||||
true ->
|
||||
filtered_peers =
|
||||
instance.peers
|
||||
|> Enum.filter(fn peer -> not peer.opt_out end)
|
||||
|
||||
statuses_per_user_per_day =
|
||||
if instance.statuses_per_day != nil and instance.user_count != nil and
|
||||
instance.user_count > 0 do
|
||||
instance.statuses_per_day / instance.user_count
|
||||
else
|
||||
nil
|
||||
end
|
||||
|
||||
%{
|
||||
name: instance.domain,
|
||||
description: instance.description,
|
||||
version: instance.version,
|
||||
userCount: instance.user_count,
|
||||
insularity: instance.insularity,
|
||||
statusCount: instance.status_count,
|
||||
domainCount: length(instance.peers),
|
||||
peers: render_many(filtered_peers, InstanceView, "instance.json"),
|
||||
lastUpdated: last_updated,
|
||||
status: status,
|
||||
type: instance.type,
|
||||
statusesPerDay: instance.statuses_per_day,
|
||||
statusesPerUserPerDay: statuses_per_user_per_day
|
||||
}
|
||||
render_instance(instance, crawl, federation_restrictions)
|
||||
end
|
||||
end
|
||||
|
||||
def render("instance.json", %{instance: instance}) do
|
||||
def render("peer.json", %{instance: instance}) do
|
||||
%{name: instance.domain}
|
||||
end
|
||||
|
||||
def render("error.json", %{error: error}) do
|
||||
%{error: error}
|
||||
end
|
||||
|
||||
defp render_personal_instance(instance) do
|
||||
%{
|
||||
name: instance.domain,
|
||||
status: "personal instance"
|
||||
}
|
||||
end
|
||||
|
||||
defp render_domain_and_error(instance) do
|
||||
%{
|
||||
name: instance.domain,
|
||||
status: instance.crawl_error
|
||||
}
|
||||
end
|
||||
|
||||
defp render_instance(instance, crawl, federation_restrictions) do
|
||||
last_updated = max_datetime(crawl.inserted_at, instance.updated_at)
|
||||
|
||||
filtered_peers =
|
||||
instance.peers
|
||||
|> Enum.filter(fn peer -> not peer.opt_out end)
|
||||
|
||||
%{
|
||||
name: instance.domain,
|
||||
description: instance.description,
|
||||
version: instance.version,
|
||||
userCount: instance.user_count,
|
||||
insularity: instance.insularity,
|
||||
statusCount: instance.status_count,
|
||||
domainCount: length(instance.peers),
|
||||
peers: render_many(filtered_peers, InstanceView, "peer.json"),
|
||||
federationRestrictions: federation_restrictions,
|
||||
lastUpdated: last_updated,
|
||||
status: "success",
|
||||
type: instance.type,
|
||||
statusesPerDay: instance.statuses_per_day,
|
||||
statusesPerUserPerDay: get_statuses_per_user_per_day(instance)
|
||||
}
|
||||
end
|
||||
|
||||
defp get_statuses_per_user_per_day(instance) do
|
||||
if instance.statuses_per_day != nil and instance.user_count != nil and
|
||||
instance.user_count > 0 do
|
||||
instance.statuses_per_day / instance.user_count
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
defmodule Graph.Cache do
|
||||
@moduledoc false
|
||||
use Nebulex.Cache,
|
||||
otp_app: :backend,
|
||||
adapter: Nebulex.Adapters.Local
|
||||
|
||||
alias Backend.{Api, Crawl, Edge, Instance, MostRecentCrawl, Repo}
|
||||
alias __MODULE__
|
||||
require Logger
|
||||
import Ecto.Query
|
||||
|
||||
@spec get_graph(String.t() | nil) :: %{
|
||||
nodes: [Instance.t()],
|
||||
edges: [Edge.t()]
|
||||
}
|
||||
def get_graph(domain \\ nil) do
|
||||
key =
|
||||
if domain != nil do
|
||||
"graph_" <> domain
|
||||
else
|
||||
"graph"
|
||||
end
|
||||
|
||||
case Cache.get(key) do
|
||||
nil ->
|
||||
Appsignal.increment_counter("graph_cache.misses", 1)
|
||||
Logger.debug("Graph cache: miss")
|
||||
nodes = Api.list_nodes(domain)
|
||||
edges = Api.list_edges(domain)
|
||||
# Cache for 10 minutes
|
||||
Cache.set(key, %{nodes: nodes, edges: edges}, ttl: 600)
|
||||
%{nodes: nodes, edges: edges}
|
||||
|
||||
data ->
|
||||
Appsignal.increment_counter("graph_cache.hits", 1)
|
||||
Logger.debug("Graph cache: hit")
|
||||
data
|
||||
end
|
||||
end
|
||||
|
||||
@spec get_instance_with_relationships(String.t()) :: Instance.t()
|
||||
def get_instance_with_relationships(domain) do
|
||||
key = "instance_" <> domain
|
||||
|
||||
case Cache.get(key) do
|
||||
nil ->
|
||||
Appsignal.increment_counter("instance_cache.misses", 1)
|
||||
Logger.debug("Instance cache: miss")
|
||||
instance = Api.get_instance_with_relationships(domain)
|
||||
# Cache for five minutes
|
||||
Cache.set(key, instance, ttl: 300)
|
||||
instance
|
||||
|
||||
data ->
|
||||
Appsignal.increment_counter("instance_cache.hits", 1)
|
||||
Logger.debug("Instance cache: hit")
|
||||
data
|
||||
end
|
||||
end
|
||||
|
||||
@spec get_last_crawl(String.t()) :: Crawl.t() | nil
|
||||
def get_last_crawl(domain) do
|
||||
key = "most_recent_crawl_" <> domain
|
||||
|
||||
most_recent_crawl_subquery =
|
||||
MostRecentCrawl
|
||||
|> select([mrc], %{
|
||||
most_recent_id: mrc.crawl_id
|
||||
})
|
||||
|> where([mrc], mrc.instance_domain == ^domain)
|
||||
|
||||
case Cache.get(key) do
|
||||
nil ->
|
||||
Appsignal.increment_counter("most_recent_crawl_cache.misses", 1)
|
||||
Logger.debug("Most recent crawl cache: miss")
|
||||
|
||||
crawl =
|
||||
Crawl
|
||||
|> join(:inner, [c], mrc in subquery(most_recent_crawl_subquery),
|
||||
on: c.id == mrc.most_recent_id
|
||||
)
|
||||
|> Repo.one()
|
||||
|
||||
# Cache for five minutes
|
||||
Cache.set(key, crawl, ttl: 300)
|
||||
|
||||
data ->
|
||||
Appsignal.increment_counter("most_recent_crawl_cache.hits", 1)
|
||||
Logger.debug("Most recent crawl cache: hit")
|
||||
data
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,4 +1,7 @@
|
|||
defmodule Backend.Mailer.AdminEmail do
|
||||
@moduledoc """
|
||||
Module for sending emails to the server administrator.
|
||||
"""
|
||||
import Swoosh.Email
|
||||
import Backend.Util
|
||||
require Logger
|
||||
|
@ -9,7 +12,7 @@ defmodule Backend.Mailer.AdminEmail do
|
|||
if admin_email != nil do
|
||||
new()
|
||||
|> to(admin_email)
|
||||
|> from("noreply@fediverse.space")
|
||||
|> from("noreply@index.community")
|
||||
|> subject(subject)
|
||||
|> text_body(body)
|
||||
|> Backend.Mailer.deliver!()
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
defmodule Backend.Mailer do
|
||||
@moduledoc false
|
||||
use Swoosh.Mailer, otp_app: :backend
|
||||
end
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
defmodule Backend.Mailer.UserEmail do
|
||||
@moduledoc """
|
||||
Module for sending emails to users.
|
||||
"""
|
||||
import Swoosh.Email
|
||||
import Backend.{Auth, Util}
|
||||
require Logger
|
||||
|
@ -13,8 +16,8 @@ defmodule Backend.Mailer.UserEmail do
|
|||
|
||||
new()
|
||||
|> to(address)
|
||||
|> from("noreply@fediverse.space")
|
||||
|> subject("Login to fediverse.space")
|
||||
|> from("noreply@index.community")
|
||||
|> subject("Login to index.community")
|
||||
|> text_body(body)
|
||||
|> Backend.Mailer.deliver()
|
||||
end
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
defmodule Mastodon.Messenger do
|
||||
@moduledoc """
|
||||
Module for interacting with a Mastodon account. In our case, it's only used to DM login links.
|
||||
"""
|
||||
import Backend.{Auth, Util}
|
||||
require Logger
|
||||
|
||||
def dm_login_link(username, user_domain) do
|
||||
mastodon_domain = Application.get_env(:backend, __MODULE__)[:domain]
|
||||
token = Application.get_env(:backend, __MODULE__)[:token]
|
||||
frontend_domain = get_config(:frontend_domain)
|
||||
|
||||
conn = Hunter.new(base_url: "https://#{mastodon_domain}", bearer_token: token)
|
||||
Logger.info(inspect(conn))
|
||||
|
||||
status_text =
|
||||
"@#{username}@#{user_domain} " <>
|
||||
"Someone tried to log in to #{user_domain} on https://#{frontend_domain}.\n" <>
|
||||
"If it was you, click here to confirm:\n" <>
|
||||
"#{get_login_link(user_domain)} " <>
|
||||
"This link will be valid for 12 hours."
|
||||
|
||||
Hunter.Status.create_status(conn, status_text, visibility: :direct)
|
||||
end
|
||||
end
|
|
@ -1,13 +0,0 @@
|
|||
defmodule Mix.Tasks.Crawl do
|
||||
alias Backend.Crawler
|
||||
use Mix.Task
|
||||
|
||||
@shortdoc "Crawl a given instance."
|
||||
|
||||
def run(domain) do
|
||||
Mix.Task.run("app.start")
|
||||
# Application.ensure_all_started(:timex)
|
||||
# Mix.Task.run("loadconfig")
|
||||
Crawler.run(domain)
|
||||
end
|
||||
end
|
|
@ -4,7 +4,7 @@ defmodule Backend.MixProject do
|
|||
def project do
|
||||
[
|
||||
app: :backend,
|
||||
version: "2.3.0",
|
||||
version: "2.8.2",
|
||||
elixir: "~> 1.5",
|
||||
elixirc_paths: elixirc_paths(Mix.env()),
|
||||
compilers: [:phoenix, :gettext] ++ Mix.compilers(),
|
||||
|
@ -23,11 +23,11 @@ defmodule Backend.MixProject do
|
|||
extra_applications: [
|
||||
:logger,
|
||||
:runtime_tools,
|
||||
:mnesia,
|
||||
:gollum,
|
||||
:ex_twilio,
|
||||
:elasticsearch,
|
||||
:appsignal
|
||||
:appsignal,
|
||||
:swoosh,
|
||||
:gen_smtp
|
||||
]
|
||||
]
|
||||
end
|
||||
|
@ -41,28 +41,33 @@ defmodule Backend.MixProject do
|
|||
# Type `mix help deps` for examples and options.
|
||||
defp deps do
|
||||
[
|
||||
{:phoenix, "~> 1.4.3"},
|
||||
{:phoenix_pubsub, "~> 1.1"},
|
||||
{:phoenix, "~> 1.5"},
|
||||
{:phoenix_pubsub, "~> 2.0"},
|
||||
{:phoenix_ecto, "~> 4.0"},
|
||||
{:ecto_sql, "~> 3.0"},
|
||||
{:postgrex, ">= 0.0.0"},
|
||||
{:gettext, "~> 0.11"},
|
||||
{:jason, "~> 1.0"},
|
||||
{:plug_cowboy, "~> 2.0"},
|
||||
{:httpoison, "~> 1.5"},
|
||||
{:plug_cowboy, "~> 2.1"},
|
||||
{:httpoison, "~> 1.7", override: true},
|
||||
{:timex, "~> 3.5"},
|
||||
{:honeydew, "~> 1.4.3"},
|
||||
{:quantum, "~> 2.3"},
|
||||
{:honeydew, "~> 1.5.0"},
|
||||
{:quantum, "~> 3.3"},
|
||||
{:corsica, "~> 1.1.2"},
|
||||
{:sobelow, "~> 0.8", only: :dev},
|
||||
{:sobelow, "~> 0.8", only: [:dev, :test]},
|
||||
{:gollum, "~> 0.3.2"},
|
||||
{:paginator, "~> 0.6.0"},
|
||||
{:public_suffix, "~> 0.6.0"},
|
||||
{:idna, "~> 5.1.2", override: true},
|
||||
{:swoosh, "~> 0.23.3"},
|
||||
{:ex_twilio, "~> 0.7.0"},
|
||||
{:public_suffix, git: "https://github.com/axelson/publicsuffix-elixir"},
|
||||
{:swoosh, "~> 1.0"},
|
||||
{:gen_smtp, "~> 1.1"},
|
||||
{:elasticsearch, "~> 1.0"},
|
||||
{:appsignal, "~> 1.10.1"}
|
||||
{:appsignal, "~> 1.0"},
|
||||
{:credo, "~> 1.1", only: [:dev, :test], runtime: false},
|
||||
{:nebulex, "~> 1.1"},
|
||||
{:hunter, "~> 0.5.1"},
|
||||
{:scrivener_ecto, "~> 2.2"},
|
||||
{:recase, "~> 0.7"},
|
||||
{:ex_rated, "~> 2.0"},
|
||||
{:html_sanitize_ex, "~> 1.4"}
|
||||
]
|
||||
end
|
||||
|
||||
|
|
120
backend/mix.lock
120
backend/mix.lock
|
@ -1,58 +1,66 @@
|
|||
%{
|
||||
"appsignal": {:hex, :appsignal, "1.10.11", "5df2546d6ea15e392a4384b175ebc1bb33f4ccf8fe9872c11542d3ae2043ff88", [:make, :mix], [{:decorator, "~> 1.2.3", [hex: :decorator, repo: "hexpm", optional: false]}, {:hackney, "~> 1.6", [hex: :hackney, repo: "hexpm", optional: false]}, {:phoenix, ">= 1.2.0", [hex: :phoenix, repo: "hexpm", optional: true]}, {:plug, ">= 1.1.0", [hex: :plug, repo: "hexpm", optional: true]}, {:poison, ">= 1.3.0", [hex: :poison, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"artificery": {:hex, :artificery, "0.4.2", "3ded6e29e13113af52811c72f414d1e88f711410cac1b619ab3a2666bbd7efd4", [:mix], [], "hexpm"},
|
||||
"base64url": {:hex, :base64url, "0.0.1", "36a90125f5948e3afd7be97662a1504b934dd5dac78451ca6e9abf85a10286be", [:rebar], [], "hexpm"},
|
||||
"certifi": {:hex, :certifi, "2.5.1", "867ce347f7c7d78563450a18a6a28a8090331e77fa02380b4a21962a65d36ee5", [:rebar3], [{:parse_trans, "~>3.3", [hex: :parse_trans, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"combine": {:hex, :combine, "0.10.0", "eff8224eeb56498a2af13011d142c5e7997a80c8f5b97c499f84c841032e429f", [:mix], [], "hexpm"},
|
||||
"connection": {:hex, :connection, "1.0.4", "a1cae72211f0eef17705aaededacac3eb30e6625b04a6117c1b2db6ace7d5976", [:mix], [], "hexpm"},
|
||||
"corsica": {:hex, :corsica, "1.1.2", "5ad8b9dcbeeda4762d78a57c0c8c2f88e1eef8741508517c98cb79e0db1f107d", [:mix], [{:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"cowboy": {:hex, :cowboy, "2.6.3", "99aa50e94e685557cad82e704457336a453d4abcb77839ad22dbe71f311fcc06", [:rebar3], [{:cowlib, "~> 2.7.3", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, "~> 1.7.1", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"cowlib": {:hex, :cowlib, "2.7.3", "a7ffcd0917e6d50b4d5fb28e9e2085a0ceb3c97dea310505f7460ff5ed764ce9", [:rebar3], [], "hexpm"},
|
||||
"crontab": {:hex, :crontab, "1.1.7", "b9219f0bdc8678b94143655a8f229716c5810c0636a4489f98c0956137e53985", [:mix], [{:ecto, "~> 1.0 or ~> 2.0 or ~> 3.0", [hex: :ecto, repo: "hexpm", optional: true]}], "hexpm"},
|
||||
"db_connection": {:hex, :db_connection, "2.1.0", "122e2f62c4906bf2e49554f1e64db5030c19229aa40935f33088e7d543aa79d0", [:mix], [{:connection, "~> 1.0.2", [hex: :connection, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"decimal": {:hex, :decimal, "1.8.0", "ca462e0d885f09a1c5a342dbd7c1dcf27ea63548c65a65e67334f4b61803822e", [:mix], [], "hexpm"},
|
||||
"decorator": {:hex, :decorator, "1.2.4", "31dfff6143d37f0b68d0bffb3b9f18ace14fea54d4f1b5e4f86ead6f00d9ff6e", [:mix], [], "hexpm"},
|
||||
"distillery": {:hex, :distillery, "2.1.1", "f9332afc2eec8a1a2b86f22429e068ef35f84a93ea1718265e740d90dd367814", [:mix], [{:artificery, "~> 0.2", [hex: :artificery, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"ecto": {:hex, :ecto, "3.1.7", "fa21d06ef56cdc2fdaa62574e8c3ba34a2751d44ea34c30bc65f0728421043e5", [:mix], [{:decimal, "~> 1.6", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm"},
|
||||
"ecto_sql": {:hex, :ecto_sql, "3.1.6", "1e80e30d16138a729c717f73dcb938590bcdb3a4502f3012414d0cbb261045d8", [:mix], [{:db_connection, "~> 2.0", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.1.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:mariaex, "~> 0.9.1", [hex: :mariaex, repo: "hexpm", optional: true]}, {:myxql, "~> 0.2.0", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.14.0 or ~> 0.15.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"elasticsearch": {:hex, :elasticsearch, "1.0.0", "626d3fb8e7554d9c93eb18817ae2a3d22c2a4191cc903c4644b1334469b15374", [:mix], [{:httpoison, ">= 0.0.0", [hex: :httpoison, repo: "hexpm", optional: false]}, {:poison, ">= 0.0.0", [hex: :poison, repo: "hexpm", optional: true]}, {:sigaws, "~> 0.7", [hex: :sigaws, repo: "hexpm", optional: true]}, {:vex, "~> 0.6.0", [hex: :vex, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"ex_twilio": {:hex, :ex_twilio, "0.7.0", "d7ce624ef4661311ae28c3e3aa060ecb66a9f4843184d7400c29072f7d3f5a4a", [:mix], [{:httpoison, ">= 0.9.0", [hex: :httpoison, repo: "hexpm", optional: false]}, {:inflex, "~> 1.0", [hex: :inflex, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:joken, "~> 2.0", [hex: :joken, repo: "hexpm", optional: false]}, {:poison, "~> 3.0", [hex: :poison, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"gen_stage": {:hex, :gen_stage, "0.14.2", "6a2a578a510c5bfca8a45e6b27552f613b41cf584b58210f017088d3d17d0b14", [:mix], [], "hexpm"},
|
||||
"gen_state_machine": {:hex, :gen_state_machine, "2.0.5", "9ac15ec6e66acac994cc442dcc2c6f9796cf380ec4b08267223014be1c728a95", [:mix], [], "hexpm"},
|
||||
"gettext": {:hex, :gettext, "0.17.0", "abe21542c831887a2b16f4c94556db9c421ab301aee417b7c4fbde7fbdbe01ec", [:mix], [], "hexpm"},
|
||||
"gollum": {:hex, :gollum, "0.3.3", "25ebb47700b9236bc4e5382bf91b72e4cdaf9bae3556172eff27e770735a198f", [:mix], [{:httpoison, "~> 1.5.1", [hex: :httpoison, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"hackney": {:hex, :hackney, "1.15.1", "9f8f471c844b8ce395f7b6d8398139e26ddca9ebc171a8b91342ee15a19963f4", [:rebar3], [{:certifi, "2.5.1", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "6.0.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "1.0.1", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~>1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "1.1.4", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"honeydew": {:hex, :honeydew, "1.4.3", "f2d976aaf8b9b914a635d2d483f1a71d2f6d8651809474dd5db581953cbebb30", [:mix], [{:ecto, "~> 3.0", [hex: :ecto, repo: "hexpm", optional: true]}], "hexpm"},
|
||||
"httpoison": {:hex, :httpoison, "1.5.1", "0f55b5b673b03c5c327dac7015a67cb571b99b631acc0bc1b0b98dcd6b9f2104", [:mix], [{:hackney, "~> 1.8", [hex: :hackney, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"idna": {:hex, :idna, "5.1.2", "e21cb58a09f0228a9e0b95eaa1217f1bcfc31a1aaa6e1fdf2f53a33f7dbd9494", [:rebar3], [{:unicode_util_compat, "0.3.1", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"inflex": {:hex, :inflex, "1.10.0", "8366a7696e70e1813aca102e61274addf85d99f4a072b2f9c7984054ea1b9d29", [:mix], [], "hexpm"},
|
||||
"jason": {:hex, :jason, "1.1.2", "b03dedea67a99223a2eaf9f1264ce37154564de899fd3d8b9a21b1a6fd64afe7", [:mix], [{:decimal, "~> 1.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm"},
|
||||
"joken": {:hex, :joken, "2.1.0", "bf21a73105d82649f617c5e59a7f8919aa47013d2519ebcc39d998d8d12adda9", [:mix], [{:jose, "~> 1.9", [hex: :jose, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"jose": {:hex, :jose, "1.9.0", "4167c5f6d06ffaebffd15cdb8da61a108445ef5e85ab8f5a7ad926fdf3ada154", [:mix, :rebar3], [{:base64url, "~> 0.0.1", [hex: :base64url, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"libring": {:hex, :libring, "1.4.0", "41246ba2f3fbc76b3971f6bce83119dfec1eee17e977a48d8a9cfaaf58c2a8d6", [:mix], [], "hexpm"},
|
||||
"metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm"},
|
||||
"mime": {:hex, :mime, "1.3.1", "30ce04ab3175b6ad0bdce0035cba77bba68b813d523d1aac73d9781b4d193cf8", [:mix], [], "hexpm"},
|
||||
"mimerl": {:hex, :mimerl, "1.2.0", "67e2d3f571088d5cfd3e550c383094b47159f3eee8ffa08e64106cdf5e981be3", [:rebar3], [], "hexpm"},
|
||||
"paginator": {:hex, :paginator, "0.6.0", "bc2c01abdd98281ff39b6a7439cf540091122a7927bdaabc167c61d4508f9cbb", [:mix], [{:ecto, "~> 3.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:ecto_sql, "~> 3.0", [hex: :ecto_sql, repo: "hexpm", optional: false]}, {:postgrex, "~> 0.13", [hex: :postgrex, repo: "hexpm", optional: true]}], "hexpm"},
|
||||
"parse_trans": {:hex, :parse_trans, "3.3.0", "09765507a3c7590a784615cfd421d101aec25098d50b89d7aa1d66646bc571c1", [:rebar3], [], "hexpm"},
|
||||
"phoenix": {:hex, :phoenix, "1.4.9", "746d098e10741c334d88143d3c94cab1756435f94387a63441792e66ec0ee974", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 1.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:plug, "~> 1.8.1 or ~> 1.9", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 1.0 or ~> 2.0", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"phoenix_ecto": {:hex, :phoenix_ecto, "4.0.0", "c43117a136e7399ea04ecaac73f8f23ee0ffe3e07acfcb8062fe5f4c9f0f6531", [:mix], [{:ecto, "~> 3.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.9", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"phoenix_pubsub": {:hex, :phoenix_pubsub, "1.1.2", "496c303bdf1b2e98a9d26e89af5bba3ab487ba3a3735f74bf1f4064d2a845a3e", [:mix], [], "hexpm"},
|
||||
"plug": {:hex, :plug, "1.8.2", "0bcce1daa420f189a6491f3940cc77ea7fb1919761175c9c3b59800d897440fc", [:mix], [{:mime, "~> 1.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm"},
|
||||
"plug_cowboy": {:hex, :plug_cowboy, "2.1.0", "b75768153c3a8a9e8039d4b25bb9b14efbc58e9c4a6e6a270abff1cd30cbe320", [:mix], [{:cowboy, "~> 2.5", [hex: :cowboy, repo: "hexpm", optional: false]}, {:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"plug_crypto": {:hex, :plug_crypto, "1.0.0", "18e49317d3fa343f24620ed22795ec29d4a5e602d52d1513ccea0b07d8ea7d4d", [:mix], [], "hexpm"},
|
||||
"poison": {:hex, :poison, "3.1.0", "d9eb636610e096f86f25d9a46f35a9facac35609a7591b3be3326e99a0484665", [:mix], [], "hexpm"},
|
||||
"postgrex": {:hex, :postgrex, "0.14.3", "5754dee2fdf6e9e508cbf49ab138df964278700b764177e8f3871e658b345a1e", [:mix], [{:connection, "~> 1.0", [hex: :connection, repo: "hexpm", optional: false]}, {:db_connection, "~> 2.0", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm"},
|
||||
"public_suffix": {:hex, :public_suffix, "0.6.0", "100cfe86f13f9f6f0cf67e743b1b83c78dd1223a2c422fa03ebf4adff514cbc3", [:mix], [{:idna, ">= 1.2.0 and < 6.0.0", [hex: :idna, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"quantum": {:hex, :quantum, "2.3.4", "72a0e8855e2adc101459eac8454787cb74ab4169de6ca50f670e72142d4960e9", [:mix], [{:calendar, "~> 0.17", [hex: :calendar, repo: "hexpm", optional: true]}, {:crontab, "~> 1.1", [hex: :crontab, repo: "hexpm", optional: false]}, {:gen_stage, "~> 0.12", [hex: :gen_stage, repo: "hexpm", optional: false]}, {:swarm, "~> 3.3", [hex: :swarm, repo: "hexpm", optional: false]}, {:timex, "~> 3.1", [hex: :timex, repo: "hexpm", optional: true]}], "hexpm"},
|
||||
"ranch": {:hex, :ranch, "1.7.1", "6b1fab51b49196860b733a49c07604465a47bdb78aa10c1c16a3d199f7f8c881", [:rebar3], [], "hexpm"},
|
||||
"sobelow": {:hex, :sobelow, "0.8.0", "a3ec73e546dfde19f14818e5000c418e3f305d9edb070e79dd391de0ae1cd1ea", [:mix], [], "hexpm"},
|
||||
"ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.4", "f0eafff810d2041e93f915ef59899c923f4568f4585904d010387ed74988e77b", [:make, :mix, :rebar3], [], "hexpm"},
|
||||
"swarm": {:hex, :swarm, "3.4.0", "64f8b30055d74640d2186c66354b33b999438692a91be275bb89cdc7e401f448", [:mix], [{:gen_state_machine, "~> 2.0", [hex: :gen_state_machine, repo: "hexpm", optional: false]}, {:libring, "~> 1.0", [hex: :libring, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"swoosh": {:hex, :swoosh, "0.23.3", "750a6d4e2b72e4307e2ff53209fd990cebb46edbf7cb4479678d4e68eb17fe98", [:mix], [{:cowboy, "~> 1.0.1 or ~> 1.1 or ~> 2.4", [hex: :cowboy, repo: "hexpm", optional: true]}, {:gen_smtp, "~> 0.13", [hex: :gen_smtp, repo: "hexpm", optional: true]}, {:hackney, "~> 1.9", [hex: :hackney, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mail, "~> 0.2", [hex: :mail, repo: "hexpm", optional: true]}, {:mime, "~> 1.1", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_cowboy, ">= 1.0.0", [hex: :plug_cowboy, repo: "hexpm", optional: true]}], "hexpm"},
|
||||
"telemetry": {:hex, :telemetry, "0.4.0", "8339bee3fa8b91cb84d14c2935f8ecf399ccd87301ad6da6b71c09553834b2ab", [:rebar3], [], "hexpm"},
|
||||
"timex": {:hex, :timex, "3.6.1", "efdf56d0e67a6b956cc57774353b0329c8ab7726766a11547e529357ffdc1d56", [:mix], [{:combine, "~> 0.10", [hex: :combine, repo: "hexpm", optional: false]}, {:gettext, "~> 0.10", [hex: :gettext, repo: "hexpm", optional: false]}, {:tzdata, "~> 0.1.8 or ~> 0.5 or ~> 1.0.0", [hex: :tzdata, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"tzdata": {:hex, :tzdata, "1.0.1", "f6027a331af7d837471248e62733c6ebee86a72e57c613aa071ebb1f750fc71a", [:mix], [{:hackney, "~> 1.0", [hex: :hackney, repo: "hexpm", optional: false]}], "hexpm"},
|
||||
"unicode_util_compat": {:hex, :unicode_util_compat, "0.3.1", "a1f612a7b512638634a603c8f401892afbf99b8ce93a45041f8aaca99cadb85e", [:rebar3], [], "hexpm"},
|
||||
"vex": {:hex, :vex, "0.6.0", "4e79b396b2ec18cd909eed0450b19108d9631842598d46552dc05031100b7a56", [:mix], [], "hexpm"},
|
||||
"appsignal": {:hex, :appsignal, "1.13.5", "153ebe929fae8f637d43bf66058efecbb4affc4037caa466d31a236cb3f2e788", [:make, :mix], [{:decorator, "~> 1.2.3 or ~> 1.3", [hex: :decorator, repo: "hexpm", optional: false]}, {:hackney, "~> 1.6", [hex: :hackney, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix, ">= 1.2.0", [hex: :phoenix, repo: "hexpm", optional: true]}, {:phoenix_live_view, "~> 0.9", [hex: :phoenix_live_view, repo: "hexpm", optional: true]}, {:plug, ">= 1.1.0", [hex: :plug, repo: "hexpm", optional: true]}, {:poison, ">= 1.3.0", [hex: :poison, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "b8b6847c0d7f8ad03523be0fa6fdd670679ad42d62e2a8b74e599eba0247096c"},
|
||||
"bunt": {:hex, :bunt, "0.2.0", "951c6e801e8b1d2cbe58ebbd3e616a869061ddadcc4863d0a2182541acae9a38", [:mix], [], "hexpm", "7af5c7e09fe1d40f76c8e4f9dd2be7cebd83909f31fee7cd0e9eadc567da8353"},
|
||||
"certifi": {:hex, :certifi, "2.6.1", "dbab8e5e155a0763eea978c913ca280a6b544bfa115633fa20249c3d396d9493", [:rebar3], [], "hexpm", "524c97b4991b3849dd5c17a631223896272c6b0af446778ba4675a1dff53bb7e"},
|
||||
"combine": {:hex, :combine, "0.10.0", "eff8224eeb56498a2af13011d142c5e7997a80c8f5b97c499f84c841032e429f", [:mix], [], "hexpm", "1b1dbc1790073076580d0d1d64e42eae2366583e7aecd455d1215b0d16f2451b"},
|
||||
"connection": {:hex, :connection, "1.1.0", "ff2a49c4b75b6fb3e674bfc5536451607270aac754ffd1bdfe175abe4a6d7a68", [:mix], [], "hexpm", "722c1eb0a418fbe91ba7bd59a47e28008a189d47e37e0e7bb85585a016b2869c"},
|
||||
"corsica": {:hex, :corsica, "1.1.3", "5f1de40bc9285753aa03afbdd10c364dac79b2ddbf2ba9c5c9c47b397ec06f40", [:mix], [{:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "8156b3a14a114a346262871333a931a1766b2597b56bf994fcfcb65443a348ad"},
|
||||
"cowboy": {:hex, :cowboy, "2.9.0", "865dd8b6607e14cf03282e10e934023a1bd8be6f6bacf921a7e2a96d800cd452", [:make, :rebar3], [{:cowlib, "2.11.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, "1.8.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "2c729f934b4e1aa149aff882f57c6372c15399a20d54f65c8d67bef583021bde"},
|
||||
"cowboy_telemetry": {:hex, :cowboy_telemetry, "0.3.1", "ebd1a1d7aff97f27c66654e78ece187abdc646992714164380d8a041eda16754", [:rebar3], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "3a6efd3366130eab84ca372cbd4a7d3c3a97bdfcfb4911233b035d117063f0af"},
|
||||
"cowlib": {:hex, :cowlib, "2.11.0", "0b9ff9c346629256c42ebe1eeb769a83c6cb771a6ee5960bd110ab0b9b872063", [:make, :rebar3], [], "hexpm", "2b3e9da0b21c4565751a6d4901c20d1b4cc25cbb7fd50d91d2ab6dd287bc86a9"},
|
||||
"credo": {:hex, :credo, "1.5.6", "e04cc0fdc236fefbb578e0c04bd01a471081616e741d386909e527ac146016c6", [:mix], [{:bunt, "~> 0.2.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2.8", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "4b52a3e558bd64e30de62a648518a5ea2b6e3e5d2b164ef5296244753fc7eb17"},
|
||||
"crontab": {:hex, :crontab, "1.1.10", "dc9bb1f4299138d47bce38341f5dcbee0aa6c205e864fba7bc847f3b5cb48241", [:mix], [{:ecto, "~> 1.0 or ~> 2.0 or ~> 3.0", [hex: :ecto, repo: "hexpm", optional: true]}], "hexpm", "1347d889d1a0eda997990876b4894359e34bfbbd688acbb0ba28a2795ca40685"},
|
||||
"db_connection": {:hex, :db_connection, "2.4.0", "d04b1b73795dae60cead94189f1b8a51cc9e1f911c234cc23074017c43c031e5", [:mix], [{:connection, "~> 1.0", [hex: :connection, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "ad416c21ad9f61b3103d254a71b63696ecadb6a917b36f563921e0de00d7d7c8"},
|
||||
"decimal": {:hex, :decimal, "2.0.0", "a78296e617b0f5dd4c6caf57c714431347912ffb1d0842e998e9792b5642d697", [:mix], [], "hexpm", "34666e9c55dea81013e77d9d87370fe6cb6291d1ef32f46a1600230b1d44f577"},
|
||||
"decorator": {:hex, :decorator, "1.4.0", "a57ac32c823ea7e4e67f5af56412d12b33274661bb7640ec7fc882f8d23ac419", [:mix], [], "hexpm", "0a07cedd9083da875c7418dea95b78361197cf2bf3211d743f6f7ce39656597f"},
|
||||
"ecto": {:hex, :ecto, "3.6.2", "efdf52acfc4ce29249bab5417415bd50abd62db7b0603b8bab0d7b996548c2bc", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "efad6dfb04e6f986b8a3047822b0f826d9affe8e4ebdd2aeedbfcb14fd48884e"},
|
||||
"ecto_sql": {:hex, :ecto_sql, "3.6.2", "9526b5f691701a5181427634c30655ac33d11e17e4069eff3ae1176c764e0ba3", [:mix], [{:db_connection, "~> 2.2", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.6.2", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.4.0 or ~> 0.5.0", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.15.0 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "5ec9d7e6f742ea39b63aceaea9ac1d1773d574ea40df5a53ef8afbd9242fdb6b"},
|
||||
"elasticsearch": {:hex, :elasticsearch, "1.0.1", "8339538d90af6b280f10ecd02b1eae372f09373e629b336a13461babf7366495", [:mix], [{:httpoison, ">= 0.0.0", [hex: :httpoison, repo: "hexpm", optional: false]}, {:poison, ">= 0.0.0", [hex: :poison, repo: "hexpm", optional: true]}, {:sigaws, "~> 0.7", [hex: :sigaws, repo: "hexpm", optional: true]}, {:vex, "~> 0.6", [hex: :vex, repo: "hexpm", optional: false]}], "hexpm", "83e7d8b8bee3e7e19a06ab4d357d24845ac1da894e79678227fd52c0b7f71867"},
|
||||
"ex2ms": {:hex, :ex2ms, "1.6.0", "f39bbd9ff1b0f27b3f707bab2d167066dd8965e7df1149b962d94c74615d0e09", [:mix], [], "hexpm", "0d1ab5e08421af5cd69146efb408dbb1ff77f38a2f4df5f086f2512dc8cf65bf"},
|
||||
"ex_rated": {:hex, :ex_rated, "2.0.1", "49b4c170039fc62fa93ea28df16e3586e98c2fe0aec10f75e6717fba8039637f", [:mix], [{:ex2ms, "~> 1.5", [hex: :ex2ms, repo: "hexpm", optional: false]}], "hexpm", "2f675b649f74028842ae3d1f0c5090f8a664682df98c82836db6f1d321eaa42a"},
|
||||
"file_system": {:hex, :file_system, "0.2.10", "fb082005a9cd1711c05b5248710f8826b02d7d1784e7c3451f9c1231d4fc162d", [:mix], [], "hexpm", "41195edbfb562a593726eda3b3e8b103a309b733ad25f3d642ba49696bf715dc"},
|
||||
"gen_smtp": {:hex, :gen_smtp, "1.1.1", "bf9303c31735100631b1d708d629e4c65944319d1143b5c9952054f4a1311d85", [:rebar3], [{:hut, "1.3.0", [hex: :hut, repo: "hexpm", optional: false]}, {:ranch, ">= 1.7.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "51bc50cc017efd4a4248cbc39ea30fb60efa7d4a49688986fafad84434ff9ab7"},
|
||||
"gen_stage": {:hex, :gen_stage, "1.1.0", "dd0c0f8d2f3b993fdbd3d58e94abbe65380f4e78bdee3fa93d5618d7d14abe60", [:mix], [], "hexpm", "7f2b36a6d02f7ef2ba410733b540ec423af65ec9c99f3d1083da508aca3b9305"},
|
||||
"gettext": {:hex, :gettext, "0.18.2", "7df3ea191bb56c0309c00a783334b288d08a879f53a7014341284635850a6e55", [:mix], [], "hexpm", "f9f537b13d4fdd30f3039d33cb80144c3aa1f8d9698e47d7bcbcc8df93b1f5c5"},
|
||||
"gollum": {:hex, :gollum, "0.3.3", "25ebb47700b9236bc4e5382bf91b72e4cdaf9bae3556172eff27e770735a198f", [:mix], [{:httpoison, "~> 1.5.1", [hex: :httpoison, repo: "hexpm", optional: false]}], "hexpm", "39268eeaf4f0adb6fdebe4f8c36b10a277881ab2eee3419c9b6727759e2f5a5d"},
|
||||
"hackney": {:hex, :hackney, "1.17.4", "99da4674592504d3fb0cfef0db84c3ba02b4508bae2dff8c0108baa0d6e0977c", [:rebar3], [{:certifi, "~>2.6.1", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~>6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~>1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~>1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.3.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~>1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~>0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "de16ff4996556c8548d512f4dbe22dd58a587bf3332e7fd362430a7ef3986b16"},
|
||||
"honeydew": {:hex, :honeydew, "1.5.0", "53088c1d87399efa5c0939adc8d32a9713b8fe6ce00a77c6769d2d363abac6bc", [:mix], [{:ecto, "~> 3.0", [hex: :ecto, repo: "hexpm", optional: true]}], "hexpm", "f71669e25f6a972e970ecbd79c34c4ad4b28369be78e4f8164fe8d0c5a674907"},
|
||||
"html_sanitize_ex": {:hex, :html_sanitize_ex, "1.4.1", "e8a67da405fe9f0d1be121a40a60f70811192033a5b8d00a95dddd807f5e053e", [:mix], [{:mochiweb, "~> 2.15", [hex: :mochiweb, repo: "hexpm", optional: false]}], "hexpm", "68d92656f47cd73598c45ad2394561f025c8c65d146001b955fd7b517858962a"},
|
||||
"httpoison": {:hex, :httpoison, "1.8.0", "6b85dea15820b7804ef607ff78406ab449dd78bed923a49c7160e1886e987a3d", [:mix], [{:hackney, "~> 1.17", [hex: :hackney, repo: "hexpm", optional: false]}], "hexpm", "28089eaa98cf90c66265b6b5ad87c59a3729bea2e74e9d08f9b51eb9729b3c3a"},
|
||||
"hunter": {:hex, :hunter, "0.5.1", "374dc4a800e2c340659657f8875e466075c7ea532e0d7a7787665f272b410150", [:mix], [{:httpoison, "~> 1.5", [hex: :httpoison, repo: "hexpm", optional: false]}, {:poison, "~> 4.0", [hex: :poison, repo: "hexpm", optional: false]}], "hexpm", "209b2cca7e4d51d5ff7ee4a0ab6cdc4c6ad23ddd61c9e12ceeee6f7ffbeae9c8"},
|
||||
"hut": {:hex, :hut, "1.3.0", "71f2f054e657c03f959cf1acc43f436ea87580696528ca2a55c8afb1b06c85e7", [:"erlang.mk", :rebar, :rebar3], [], "hexpm", "7e15d28555d8a1f2b5a3a931ec120af0753e4853a4c66053db354f35bf9ab563"},
|
||||
"idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~>0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"},
|
||||
"inflex": {:hex, :inflex, "2.1.0", "a365cf0821a9dacb65067abd95008ca1b0bb7dcdd85ae59965deef2aa062924c", [:mix], [], "hexpm", "14c17d05db4ee9b6d319b0bff1bdf22aa389a25398d1952c7a0b5f3d93162dd8"},
|
||||
"jason": {:hex, :jason, "1.2.2", "ba43e3f2709fd1aa1dce90aaabfd039d000469c05c56f0b8e31978e03fa39052", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "18a228f5f0058ee183f29f9eae0805c6e59d61c3b006760668d8d18ff0d12179"},
|
||||
"joken": {:hex, :joken, "2.3.0", "62a979c46f2c81dcb8ddc9150453b60d3757d1ac393c72bb20fc50a7b0827dc6", [:mix], [{:jose, "~> 1.10", [hex: :jose, repo: "hexpm", optional: false]}], "hexpm", "57b263a79c0ec5d536ac02d569c01e6b4de91bd1cb825625fe90eab4feb7bc1e"},
|
||||
"jose": {:hex, :jose, "1.11.1", "59da64010c69aad6cde2f5b9248b896b84472e99bd18f246085b7b9fe435dcdb", [:mix, :rebar3], [], "hexpm", "078f6c9fb3cd2f4cfafc972c814261a7d1e8d2b3685c0a76eb87e158efff1ac5"},
|
||||
"metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"},
|
||||
"mime": {:hex, :mime, "1.6.0", "dabde576a497cef4bbdd60aceee8160e02a6c89250d6c0b29e56c0dfb00db3d2", [:mix], [], "hexpm", "31a1a8613f8321143dde1dafc36006a17d28d02bdfecb9e95a880fa7aabd19a7"},
|
||||
"mimerl": {:hex, :mimerl, "1.2.0", "67e2d3f571088d5cfd3e550c383094b47159f3eee8ffa08e64106cdf5e981be3", [:rebar3], [], "hexpm", "f278585650aa581986264638ebf698f8bb19df297f66ad91b18910dfc6e19323"},
|
||||
"mochiweb": {:hex, :mochiweb, "2.21.0", "3fe5c3403606726d7bc6dabbf36f9d634d5364ce7f33ce73442937fa54feec37", [:rebar3], [], "hexpm", "f848bfa1b75c32d56da9d2730245e34df4b39079c5d45d7b966b072ba53f8a13"},
|
||||
"nebulex": {:hex, :nebulex, "1.2.2", "5b2bb7420a103b2a4278f354c9bd239bc77cd3bbdeddcebc4cc1d6ee656f126c", [:mix], [{:decorator, "~> 1.3", [hex: :decorator, repo: "hexpm", optional: false]}, {:shards, "~> 0.6", [hex: :shards, repo: "hexpm", optional: false]}], "hexpm", "6804ddd7660fd4010a5af5957316ab7471c2db003189dba79dc3dd7b3f0aabf6"},
|
||||
"parse_trans": {:hex, :parse_trans, "3.3.1", "16328ab840cc09919bd10dab29e431da3af9e9e7e7e6f0089dd5a2d2820011d8", [:rebar3], [], "hexpm", "07cd9577885f56362d414e8c4c4e6bdf10d43a8767abb92d24cbe8b24c54888b"},
|
||||
"phoenix": {:hex, :phoenix, "1.5.9", "a6368d36cfd59d917b37c44386e01315bc89f7609a10a45a22f47c007edf2597", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_html, "~> 2.13 or ~> 3.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.0", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:plug, "~> 1.10", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 1.0 or ~> 2.2", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.1.2 or ~> 1.2", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7e4bce20a67c012f1fbb0af90e5da49fa7bf0d34e3a067795703b74aef75427d"},
|
||||
"phoenix_ecto": {:hex, :phoenix_ecto, "4.3.0", "2c69a452c2e0ee8c93345ae1cdc1696ef4877ff9cbb15c305def41960c3c4ebf", [:mix], [{:ecto, "~> 3.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.14.2 or ~> 3.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "0ac491924217550c8f42c81c1f390b5d81517d12ceaf9abf3e701156760a848e"},
|
||||
"phoenix_pubsub": {:hex, :phoenix_pubsub, "2.0.0", "a1ae76717bb168cdeb10ec9d92d1480fec99e3080f011402c0a2d68d47395ffb", [:mix], [], "hexpm", "c52d948c4f261577b9c6fa804be91884b381a7f8f18450c5045975435350f771"},
|
||||
"plug": {:hex, :plug, "1.12.0", "39dc7f1ef8c46bb1bf6dd8f6a49f526c45b4b92ce553687fd885b559a46d0230", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "5282c76e89efdf43f2e04bd268ca99d738039f9518137f02ff468cee3ba78096"},
|
||||
"plug_cowboy": {:hex, :plug_cowboy, "2.5.1", "7cc96ff645158a94cf3ec9744464414f02287f832d6847079adfe0b58761cbd0", [:mix], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:cowboy_telemetry, "~> 0.3", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "107d0a5865fa92bcb48e631cc0729ae9ccfa0a9f9a1bd8f01acb513abf1c2d64"},
|
||||
"plug_crypto": {:hex, :plug_crypto, "1.2.2", "05654514ac717ff3a1843204b424477d9e60c143406aa94daf2274fdd280794d", [:mix], [], "hexpm", "87631c7ad914a5a445f0a3809f99b079113ae4ed4b867348dd9eec288cecb6db"},
|
||||
"poison": {:hex, :poison, "4.0.1", "bcb755a16fac91cad79bfe9fc3585bb07b9331e50cfe3420a24bcc2d735709ae", [:mix], [], "hexpm", "ba8836feea4b394bb718a161fc59a288fe0109b5006d6bdf97b6badfcf6f0f25"},
|
||||
"postgrex": {:hex, :postgrex, "0.15.10", "2809dee1b1d76f7cbabe570b2a9285c2e7b41be60cf792f5f2804a54b838a067", [:mix], [{:connection, "~> 1.0", [hex: :connection, repo: "hexpm", optional: false]}, {:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "1560ca427542f6b213f8e281633ae1a3b31cdbcd84ebd7f50628765b8f6132be"},
|
||||
"public_suffix": {:git, "https://github.com/axelson/publicsuffix-elixir", "89372422ab8b433de508519ef474e39699fd11ca", []},
|
||||
"quantum": {:hex, :quantum, "3.3.0", "e8f6b9479728774288c5f426b11a6e3e8f619f3c226163a7e18bccfe543b714d", [:mix], [{:crontab, "~> 1.1", [hex: :crontab, repo: "hexpm", optional: false]}, {:gen_stage, "~> 0.14 or ~> 1.0", [hex: :gen_stage, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "3b83ef137ab3887e783b013418b5ce3e847d66b71c4ef0f233b0321c84b72f67"},
|
||||
"ranch": {:hex, :ranch, "1.8.0", "8c7a100a139fd57f17327b6413e4167ac559fbc04ca7448e9be9057311597a1d", [:make, :rebar3], [], "hexpm", "49fbcfd3682fab1f5d109351b61257676da1a2fdbe295904176d5e521a2ddfe5"},
|
||||
"recase": {:hex, :recase, "0.7.0", "3f2f719f0886c7a3b7fe469058ec539cb7bbe0023604ae3bce920e186305e5ae", [:mix], [], "hexpm", "36f5756a9f552f4a94b54a695870e32f4e72d5fad9c25e61bc4a3151c08a4e0c"},
|
||||
"scrivener": {:hex, :scrivener, "2.7.2", "1d913c965ec352650a7f864ad7fd8d80462f76a32f33d57d1e48bc5e9d40aba2", [:mix], [], "hexpm", "7866a0ec4d40274efbee1db8bead13a995ea4926ecd8203345af8f90d2b620d9"},
|
||||
"scrivener_ecto": {:hex, :scrivener_ecto, "2.7.0", "cf64b8cb8a96cd131cdbcecf64e7fd395e21aaa1cb0236c42a7c2e34b0dca580", [:mix], [{:ecto, "~> 3.3", [hex: :ecto, repo: "hexpm", optional: false]}, {:scrivener, "~> 2.4", [hex: :scrivener, repo: "hexpm", optional: false]}], "hexpm", "e809f171687806b0031129034352f5ae44849720c48dd839200adeaf0ac3e260"},
|
||||
"shards": {:hex, :shards, "0.6.2", "e05d05537883220c3b8a8f9d40d5c8ba7ff6064c63ebb6b23046972f6863b2d1", [:make, :rebar3], [], "hexpm", "58afa3712f1f1256a2a15e39fa95b7cd758087aaa7a25beaf786daabd87890f0"},
|
||||
"sobelow": {:hex, :sobelow, "0.11.1", "23438964486f8112b41e743bbfd402da3e5b296fdc9eacab29914b79c48916dd", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "9897363a7eff96f4809304a90aad819e2ad5e5d24db547af502885146746a53c"},
|
||||
"ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.6", "cf344f5692c82d2cd7554f5ec8fd961548d4fd09e7d22f5b62482e5aeaebd4b0", [:make, :mix, :rebar3], [], "hexpm", "bdb0d2471f453c88ff3908e7686f86f9be327d065cc1ec16fa4540197ea04680"},
|
||||
"swoosh": {:hex, :swoosh, "1.5.0", "2be4cfc1be10f2203d1854c85b18d8c7be0321445a782efd53ef0b2b88f03ce4", [:mix], [{:cowboy, "~> 1.1 or ~> 2.4", [hex: :cowboy, repo: "hexpm", optional: true]}, {:finch, "~> 0.6", [hex: :finch, repo: "hexpm", optional: true]}, {:gen_smtp, "~> 0.13 or ~> 1.0", [hex: :gen_smtp, repo: "hexpm", optional: true]}, {:hackney, "~> 1.9", [hex: :hackney, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mail, "~> 0.2", [hex: :mail, repo: "hexpm", optional: true]}, {:mime, "~> 1.1 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_cowboy, ">= 1.0.0", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "b53891359e3ddca263ece784051243de84c9244c421a0dee1bff1d52fc5ca420"},
|
||||
"telemetry": {:hex, :telemetry, "0.4.3", "a06428a514bdbc63293cd9a6263aad00ddeb66f608163bdec7c8995784080818", [:rebar3], [], "hexpm", "eb72b8365ffda5bed68a620d1da88525e326cb82a75ee61354fc24b844768041"},
|
||||
"timex": {:hex, :timex, "3.7.5", "3eca56e23bfa4e0848f0b0a29a92fa20af251a975116c6d504966e8a90516dfd", [:mix], [{:combine, "~> 0.10", [hex: :combine, repo: "hexpm", optional: false]}, {:gettext, "~> 0.10", [hex: :gettext, repo: "hexpm", optional: false]}, {:tzdata, "~> 1.0", [hex: :tzdata, repo: "hexpm", optional: false]}], "hexpm", "a15608dca680f2ef663d71c95842c67f0af08a0f3b1d00e17bbd22872e2874e4"},
|
||||
"tzdata": {:hex, :tzdata, "1.1.0", "72f5babaa9390d0f131465c8702fa76da0919e37ba32baa90d93c583301a8359", [:mix], [{:hackney, "~> 1.17", [hex: :hackney, repo: "hexpm", optional: false]}], "hexpm", "18f453739b48d3dc5bcf0e8906d2dc112bb40baafe2c707596d89f3c8dd14034"},
|
||||
"unicode_util_compat": {:hex, :unicode_util_compat, "0.7.0", "bc84380c9ab48177092f43ac89e4dfa2c6d62b40b8bd132b1059ecc7232f9a78", [:rebar3], [], "hexpm", "25eee6d67df61960cf6a794239566599b09e17e668d3700247bc498638152521"},
|
||||
"vex": {:hex, :vex, "0.9.0", "613ea5eb3055662e7178b83e25b2df0975f68c3d8bb67c1645f0573e1a78d606", [:mix], [], "hexpm", "c69fff44d5c8aa3f1faee71bba1dcab05dd36364c5a629df8bb11751240c857f"},
|
||||
}
|
||||
|
|
|
@ -1,21 +1,7 @@
|
|||
{
|
||||
"settings": {
|
||||
"number_of_shards": 1,
|
||||
"number_of_replicas": 0,
|
||||
"analysis": {
|
||||
"analyzer": {
|
||||
"ngramAnalyzer": {
|
||||
"tokenizer": "ngramTokenizer"
|
||||
}
|
||||
},
|
||||
"tokenizer": {
|
||||
"ngramTokenizer": {
|
||||
"type": "ngram",
|
||||
"min_gram": 5,
|
||||
"max_gram": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
"number_of_replicas": 0
|
||||
},
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
@ -23,21 +9,149 @@
|
|||
"domain": {
|
||||
"type": "text",
|
||||
"fields": {
|
||||
"ngram": {
|
||||
"type": "text",
|
||||
"analyzer": "ngramAnalyzer"
|
||||
},
|
||||
"keyword": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"english": {
|
||||
"type": "text",
|
||||
"analyzer": "english"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": {
|
||||
"type": "text",
|
||||
"fields": {
|
||||
"arabic": {
|
||||
"type": "text",
|
||||
"analyzer": "arabic"
|
||||
},
|
||||
"armenian": {
|
||||
"type": "text",
|
||||
"analyzer": "armenian"
|
||||
},
|
||||
"basque": {
|
||||
"type": "text",
|
||||
"analyzer": "basque"
|
||||
},
|
||||
"bengali": {
|
||||
"type": "text",
|
||||
"analyzer": "bengali"
|
||||
},
|
||||
"brazilian": {
|
||||
"type": "text",
|
||||
"analyzer": "brazilian"
|
||||
},
|
||||
"bulgarian": {
|
||||
"type": "text",
|
||||
"analyzer": "bulgarian"
|
||||
},
|
||||
"catalan": {
|
||||
"type": "text",
|
||||
"analyzer": "catalan"
|
||||
},
|
||||
"cjk": {
|
||||
"type": "text",
|
||||
"analyzer": "cjk"
|
||||
},
|
||||
"czech": {
|
||||
"type": "text",
|
||||
"analyzer": "czech"
|
||||
},
|
||||
"danish": {
|
||||
"type": "text",
|
||||
"analyzer": "danish"
|
||||
},
|
||||
"dutch": {
|
||||
"type": "text",
|
||||
"analyzer": "dutch"
|
||||
},
|
||||
"english": {
|
||||
"type": "text",
|
||||
"analyzer": "english"
|
||||
},
|
||||
"finnish": {
|
||||
"type": "text",
|
||||
"analyzer": "finnish"
|
||||
},
|
||||
"french": {
|
||||
"type": "text",
|
||||
"analyzer": "french"
|
||||
},
|
||||
"galician": {
|
||||
"type": "text",
|
||||
"analyzer": "galician"
|
||||
},
|
||||
"german": {
|
||||
"type": "text",
|
||||
"analyzer": "german"
|
||||
},
|
||||
"greek": {
|
||||
"type": "text",
|
||||
"analyzer": "greek"
|
||||
},
|
||||
"hindi": {
|
||||
"type": "text",
|
||||
"analyzer": "hindi"
|
||||
},
|
||||
"hungarian": {
|
||||
"type": "text",
|
||||
"analyzer": "hungarian"
|
||||
},
|
||||
"indonesian": {
|
||||
"type": "text",
|
||||
"analyzer": "indonesian"
|
||||
},
|
||||
"irish": {
|
||||
"type": "text",
|
||||
"analyzer": "irish"
|
||||
},
|
||||
"italian": {
|
||||
"type": "text",
|
||||
"analyzer": "italian"
|
||||
},
|
||||
"latvian": {
|
||||
"type": "text",
|
||||
"analyzer": "latvian"
|
||||
},
|
||||
"lithuanian": {
|
||||
"type": "text",
|
||||
"analyzer": "lithuanian"
|
||||
},
|
||||
"norwegian": {
|
||||
"type": "text",
|
||||
"analyzer": "norwegian"
|
||||
},
|
||||
"persian": {
|
||||
"type": "text",
|
||||
"analyzer": "persian"
|
||||
},
|
||||
"romanian": {
|
||||
"type": "text",
|
||||
"analyzer": "romanian"
|
||||
},
|
||||
"russian": {
|
||||
"type": "text",
|
||||
"analyzer": "russian"
|
||||
},
|
||||
"sorani": {
|
||||
"type": "text",
|
||||
"analyzer": "sorani"
|
||||
},
|
||||
"spanish": {
|
||||
"type": "text",
|
||||
"analyzer": "spanish"
|
||||
},
|
||||
"swedish": {
|
||||
"type": "text",
|
||||
"analyzer": "swedish"
|
||||
},
|
||||
"turkish": {
|
||||
"type": "text",
|
||||
"analyzer": "turkish"
|
||||
},
|
||||
"thai": {
|
||||
"type": "text",
|
||||
"analyzer": "thai"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -46,6 +160,9 @@
|
|||
},
|
||||
"user_count": {
|
||||
"type": "integer"
|
||||
},
|
||||
"opt_out": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,7 +13,6 @@ defmodule Backend.Repo.Migrations.CreateCrawls do
|
|||
timestamps()
|
||||
end
|
||||
|
||||
# TODO: does this actually make WHERE error IS NULL queries faster? if not, drop it
|
||||
create index(:crawls, [:error])
|
||||
create index(:crawls, [:inserted_at])
|
||||
end
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
defmodule Backend.Repo.Migrations.AddNextCrawlToInstances do
|
||||
use Ecto.Migration
|
||||
|
||||
def change do
|
||||
alter table(:instances) do
|
||||
add :next_crawl, :naive_datetime
|
||||
end
|
||||
|
||||
create index(:instances, [:next_crawl])
|
||||
end
|
||||
end
|
|
@ -0,0 +1,18 @@
|
|||
defmodule Backend.Repo.Migrations.RemoveCrawlError do
|
||||
use Ecto.Migration
|
||||
|
||||
def change do
|
||||
execute("ALTER TABLE crawls DISABLE TRIGGER ALL", "ALTER TABLE crawls ENABLE TRIGGER ALL")
|
||||
execute("DELETE FROM crawls WHERE error IS NOT NULL", "")
|
||||
execute("ALTER TABLE crawls ENABLE TRIGGER ALL", "")
|
||||
|
||||
alter table(:crawls) do
|
||||
remove :error, :string
|
||||
end
|
||||
|
||||
alter table(:instances) do
|
||||
add :crawl_error, :string
|
||||
add :crawl_error_count, :integer, default: 0, null: false
|
||||
end
|
||||
end
|
||||
end
|
|
@ -0,0 +1,40 @@
|
|||
defmodule Backend.Repo.Migrations.AddOnDeletes do
|
||||
use Ecto.Migration
|
||||
|
||||
def change do
|
||||
# Add ON DELETE CASCADE to foreign key relations.
|
||||
|
||||
# crawls -> instances
|
||||
execute("ALTER TABLE crawls DROP CONSTRAINT crawls_instance_domain_fkey")
|
||||
|
||||
execute(
|
||||
"ALTER TABLE crawls ADD CONSTRAINT crawls_instance_domain_fkey FOREIGN KEY (instance_domain) REFERENCES instances(domain) ON DELETE CASCADE"
|
||||
)
|
||||
|
||||
# instance_peers -> instances
|
||||
execute("ALTER TABLE instance_peers DROP CONSTRAINT instance_peers_source_domain_fkey")
|
||||
|
||||
execute(
|
||||
"ALTER TABLE instance_peers ADD CONSTRAINT instance_peers_source_domain_fkey FOREIGN KEY (source_domain) REFERENCES instances(domain) ON DELETE CASCADE"
|
||||
)
|
||||
|
||||
execute("ALTER TABLE instance_peers DROP CONSTRAINT instance_peers_target_domain_fkey")
|
||||
|
||||
execute(
|
||||
"ALTER TABLE instance_peers ADD CONSTRAINT instance_peers_target_domain_fkey FOREIGN KEY (target_domain) REFERENCES instances(domain) ON DELETE CASCADE"
|
||||
)
|
||||
|
||||
# edges -> instances
|
||||
execute("ALTER TABLE edges DROP CONSTRAINT edges_source_domain_fkey")
|
||||
|
||||
execute(
|
||||
"ALTER TABLE edges ADD CONSTRAINT edges_source_domain_fkey FOREIGN KEY (source_domain) REFERENCES instances(domain) ON DELETE CASCADE"
|
||||
)
|
||||
|
||||
execute("ALTER TABLE edges DROP CONSTRAINT edges_target_domain_fkey")
|
||||
|
||||
execute(
|
||||
"ALTER TABLE edges ADD CONSTRAINT edges_target_domain_fkey FOREIGN KEY (target_domain) REFERENCES instances(domain) ON DELETE CASCADE"
|
||||
)
|
||||
end
|
||||
end
|
|
@ -0,0 +1,9 @@
|
|||
defmodule Backend.Repo.Migrations.AddCrawlsIndex do
|
||||
use Ecto.Migration
|
||||
|
||||
def change do
|
||||
create index(:crawls, [:instance_domain])
|
||||
create index(:crawl_interactions, [:source_domain])
|
||||
create index(:crawl_interactions, [:target_domain])
|
||||
end
|
||||
end
|
|
@ -0,0 +1,32 @@
|
|||
defmodule Backend.Repo.Migrations.AddMostRecentCrawlTable do
|
||||
use Ecto.Migration
|
||||
|
||||
def change do
|
||||
create table(:most_recent_crawl) do
|
||||
add :instance_domain, references(:instances, column: :domain, type: :string)
|
||||
add :crawl_id, references(:crawls)
|
||||
|
||||
timestamps()
|
||||
end
|
||||
|
||||
create unique_index(:most_recent_crawl, [:instance_domain])
|
||||
|
||||
flush()
|
||||
|
||||
execute(
|
||||
"
|
||||
INSERT INTO most_recent_crawl (instance_domain, crawl_id, updated_at, inserted_at)
|
||||
SELECT
|
||||
c.instance_domain,
|
||||
MAX(c.id) AS crawl_id,
|
||||
(SELECT NOW()) AS updated_at,
|
||||
(SELECT NOW()) AS inserted_at
|
||||
FROM
|
||||
crawls c
|
||||
GROUP BY
|
||||
c.instance_domain
|
||||
",
|
||||
"DELETE FROM most_recent_crawl"
|
||||
)
|
||||
end
|
||||
end
|
|
@ -0,0 +1,25 @@
|
|||
defmodule Backend.Repo.Migrations.AddMostRecentCrawlOnDelete do
|
||||
use Ecto.Migration
|
||||
|
||||
def change do
|
||||
execute(
|
||||
"ALTER TABLE most_recent_crawl DROP CONSTRAINT most_recent_crawl_crawl_id_fkey",
|
||||
"ALTER TABLE most_recent_crawl ADD CONSTRAINT most_recent_crawl_crawl_id_fkey FOREIGN KEY (crawl_id) REFERENCES crawls(id)"
|
||||
)
|
||||
|
||||
execute(
|
||||
"ALTER TABLE most_recent_crawl ADD CONSTRAINT most_recent_crawl_crawl_id_fkey FOREIGN KEY (crawl_id) REFERENCES crawls(id) ON DELETE CASCADE",
|
||||
"ALTER TABLE most_recent_crawl DROP CONSTRAINT most_recent_crawl_crawl_id_fkey"
|
||||
)
|
||||
|
||||
execute(
|
||||
"ALTER TABLE most_recent_crawl DROP CONSTRAINT most_recent_crawl_instance_domain_fkey",
|
||||
"ALTER TABLE most_recent_crawl ADD CONSTRAINT most_recent_crawl_instance_domain_fkey FOREIGN KEY (instance_domain) REFERENCES instances(domain)"
|
||||
)
|
||||
|
||||
execute(
|
||||
"ALTER TABLE most_recent_crawl ADD CONSTRAINT most_recent_crawl_instance_domain_fkey FOREIGN KEY (instance_domain) REFERENCES instances(domain) ON DELETE CASCADE",
|
||||
"ALTER TABLE most_recent_crawl DROP CONSTRAINT most_recent_crawl_instance_domain_fkey"
|
||||
)
|
||||
end
|
||||
end
|
|
@ -0,0 +1,25 @@
|
|||
defmodule Backend.Repo.Migrations.AddCrawlInteractionOnDelete do
|
||||
use Ecto.Migration
|
||||
|
||||
def change do
|
||||
execute(
|
||||
"ALTER TABLE crawl_interactions DROP CONSTRAINT crawl_interactions_source_domain_fkey",
|
||||
"ALTER TABLE crawl_interactions ADD CONSTRAINT crawl_interactions_source_domain_fkey FOREIGN KEY (source_domain) REFERENCES instances(domain)"
|
||||
)
|
||||
|
||||
execute(
|
||||
"ALTER TABLE crawl_interactions ADD CONSTRAINT crawl_interactions_source_domain_fkey FOREIGN KEY (source_domain) REFERENCES instances(domain) ON DELETE CASCADE",
|
||||
"ALTER TABLE crawl_interactions DROP CONSTRAINT crawl_interactions_source_domain_fkey"
|
||||
)
|
||||
|
||||
execute(
|
||||
"ALTER TABLE crawl_interactions DROP CONSTRAINT crawl_interactions_target_domain_fkey",
|
||||
"ALTER TABLE crawl_interactions ADD CONSTRAINT crawl_interactions_target_domain_fkey FOREIGN KEY (target_domain) REFERENCES instances(domain)"
|
||||
)
|
||||
|
||||
execute(
|
||||
"ALTER TABLE crawl_interactions ADD CONSTRAINT crawl_interactions_target_domain_fkey FOREIGN KEY (target_domain) REFERENCES instances(domain) ON DELETE CASCADE",
|
||||
"ALTER TABLE crawl_interactions DROP CONSTRAINT crawl_interactions_target_domain_fkey"
|
||||
)
|
||||
end
|
||||
end
|
|
@ -0,0 +1,22 @@
|
|||
defmodule Backend.Repo.Migrations.CreateFederationRestrictions do
|
||||
use Ecto.Migration
|
||||
|
||||
def change do
|
||||
create table(:federation_restrictions) do
|
||||
add :source_domain,
|
||||
references(:instances, column: :domain, type: :string, on_delete: :delete_all),
|
||||
null: false
|
||||
|
||||
add :target_domain,
|
||||
references(:instances, column: :domain, type: :string, on_delete: :delete_all),
|
||||
null: false
|
||||
|
||||
add :type, :string, null: false
|
||||
|
||||
timestamps()
|
||||
end
|
||||
|
||||
create index(:federation_restrictions, [:source_domain])
|
||||
create index(:federation_restrictions, [:target_domain])
|
||||
end
|
||||
end
|
|
@ -50,7 +50,10 @@ defmodule BackendWeb.InstanceControllerTest do
|
|||
describe "update instance" do
|
||||
setup [:create_instance]
|
||||
|
||||
test "renders instance when data is valid", %{conn: conn, instance: %Instance{id: id} = instance} do
|
||||
test "renders instance when data is valid", %{
|
||||
conn: conn,
|
||||
instance: %Instance{id: id} = instance
|
||||
} do
|
||||
conn = put(conn, Routes.instance_path(conn, :update, instance), instance: @update_attrs)
|
||||
assert %{"id" => ^id} = json_response(conn, 200)["data"]
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ defmodule BackendWeb.ChannelCase do
|
|||
"""
|
||||
|
||||
use ExUnit.CaseTemplate
|
||||
alias Ecto.Adapters.SQL.Sandbox
|
||||
|
||||
using do
|
||||
quote do
|
||||
|
@ -26,10 +27,10 @@ defmodule BackendWeb.ChannelCase do
|
|||
end
|
||||
|
||||
setup tags do
|
||||
:ok = Ecto.Adapters.SQL.Sandbox.checkout(Backend.Repo)
|
||||
:ok = Sandbox.checkout(Backend.Repo)
|
||||
|
||||
unless tags[:async] do
|
||||
Ecto.Adapters.SQL.Sandbox.mode(Backend.Repo, {:shared, self()})
|
||||
Sandbox.mode(Backend.Repo, {:shared, self()})
|
||||
end
|
||||
|
||||
:ok
|
||||
|
|
|
@ -14,6 +14,7 @@ defmodule BackendWeb.ConnCase do
|
|||
"""
|
||||
|
||||
use ExUnit.CaseTemplate
|
||||
alias Ecto.Adapters.SQL.Sandbox
|
||||
|
||||
using do
|
||||
quote do
|
||||
|
@ -27,10 +28,10 @@ defmodule BackendWeb.ConnCase do
|
|||
end
|
||||
|
||||
setup tags do
|
||||
:ok = Ecto.Adapters.SQL.Sandbox.checkout(Backend.Repo)
|
||||
:ok = Sandbox.checkout(Backend.Repo)
|
||||
|
||||
unless tags[:async] do
|
||||
Ecto.Adapters.SQL.Sandbox.mode(Backend.Repo, {:shared, self()})
|
||||
Sandbox.mode(Backend.Repo, {:shared, self()})
|
||||
end
|
||||
|
||||
{:ok, conn: Phoenix.ConnTest.build_conn()}
|
||||
|
|
|
@ -13,6 +13,7 @@ defmodule Backend.DataCase do
|
|||
"""
|
||||
|
||||
use ExUnit.CaseTemplate
|
||||
alias Ecto.Adapters.SQL.Sandbox
|
||||
|
||||
using do
|
||||
quote do
|
||||
|
@ -26,10 +27,10 @@ defmodule Backend.DataCase do
|
|||
end
|
||||
|
||||
setup tags do
|
||||
:ok = Ecto.Adapters.SQL.Sandbox.checkout(Backend.Repo)
|
||||
:ok = Sandbox.checkout(Backend.Repo)
|
||||
|
||||
unless tags[:async] do
|
||||
Ecto.Adapters.SQL.Sandbox.mode(Backend.Repo, {:shared, self()})
|
||||
Sandbox.mode(Backend.Repo, {:shared, self()})
|
||||
end
|
||||
|
||||
:ok
|
||||
|
|
|
@ -1,65 +0,0 @@
|
|||
version: "3"
|
||||
services:
|
||||
db:
|
||||
image: postgres
|
||||
environment:
|
||||
- DATABASE_URL
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- pgdata:/var/lib/postgresql/data
|
||||
networks:
|
||||
- database_network
|
||||
elasticsearch:
|
||||
image: elasticsearch:6.8.1
|
||||
ports:
|
||||
- "9200:9200"
|
||||
volumes:
|
||||
- esdata:/usr/share/elasticsearch/data
|
||||
networks:
|
||||
- phoenix_network
|
||||
- es_network
|
||||
# Kibana is just for development, really
|
||||
kibana:
|
||||
image: kibana:6.8.1
|
||||
networks:
|
||||
- es_network
|
||||
ports:
|
||||
- "5601:5601"
|
||||
# This is for running the occasional graph layout task. It's in docker-compose.yml so that it's built at the same time
|
||||
# as everything else, but it should be run regularly with a cron job or similar.
|
||||
gephi:
|
||||
environment:
|
||||
- DATABASE_URL
|
||||
build: ./gephi
|
||||
volumes:
|
||||
- gradle-cache:/code/.gradle
|
||||
depends_on:
|
||||
- db
|
||||
networks:
|
||||
- database_network
|
||||
phoenix:
|
||||
build: ./backend
|
||||
networks:
|
||||
- database_network
|
||||
- phoenix_network
|
||||
depends_on:
|
||||
- db
|
||||
ports:
|
||||
- "${PORT}:${PORT}"
|
||||
environment:
|
||||
- DATABASE_URL
|
||||
- SECRET_KEY_BASE
|
||||
- PORT
|
||||
- BACKEND_HOSTNAME
|
||||
volumes:
|
||||
pgdata:
|
||||
esdata:
|
||||
gradle-cache:
|
||||
networks:
|
||||
database_network:
|
||||
driver: bridge
|
||||
phoenix_network:
|
||||
driver: bridge
|
||||
es_network:
|
||||
driver: bridge
|
|
@ -0,0 +1,4 @@
|
|||
node_modules
|
||||
dist
|
||||
build
|
||||
coverage
|
|
@ -0,0 +1,24 @@
|
|||
module.exports = {
|
||||
root: true,
|
||||
parser: "@typescript-eslint/parser",
|
||||
parserOptions: {
|
||||
tsconfigRootDir: __dirname,
|
||||
project: ["./tsconfig.json"],
|
||||
},
|
||||
plugins: ["@typescript-eslint", "prettier"],
|
||||
extends: [
|
||||
"plugin:@typescript-eslint/recommended-requiring-type-checking",
|
||||
"plugin:@typescript-eslint/eslint-recommended",
|
||||
"plugin:@typescript-eslint/recommended",
|
||||
"plugin:react/recommended",
|
||||
"plugin:react-hooks/recommended",
|
||||
"prettier/@typescript-eslint",
|
||||
"prettier",
|
||||
],
|
||||
rules: {
|
||||
"@typescript-eslint/no-explicit-any": 0,
|
||||
"@typescript-eslint/explicit-function-return-type": 0,
|
||||
"react/prop-types": 0,
|
||||
"@typescript-eslint/no-non-null-assertion": 0
|
||||
},
|
||||
};
|
|
@ -0,0 +1,3 @@
|
|||
module.exports = {
|
||||
printWidth: 100
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,37 @@
|
|||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
server_name fediverse.space;
|
||||
|
||||
gzip on;
|
||||
gzip_comp_level 5;
|
||||
gzip_min_length 256;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
|
||||
gzip_types
|
||||
application/javascript
|
||||
application/vnd.geo+json
|
||||
application/vnd.ms-fontobject
|
||||
application/x-font-ttf
|
||||
application/x-web-app-manifest+json
|
||||
font/opentype
|
||||
image/bmp
|
||||
image/svg+xml
|
||||
image/x-icon
|
||||
text/cache-manifest
|
||||
text/css
|
||||
text/plain
|
||||
text/vcard
|
||||
text/vnd.rim.location.xloc
|
||||
text/vtt
|
||||
text/x-component
|
||||
text/x-cross-domain-policy;
|
||||
|
||||
root /website;
|
||||
index index.html;
|
||||
|
||||
location / {
|
||||
try_files $uri /index.html;
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,16 +1,18 @@
|
|||
{
|
||||
"name": "frontend",
|
||||
"version": "2.3.0",
|
||||
"version": "2.8.2",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"start": "NODE_ENV=development react-scripts start",
|
||||
"build": "react-scripts build",
|
||||
"typecheck": "tsc --noemit",
|
||||
"lint": "yarn typecheck && tslint -p tsconfig.json -c tslint.json \"src/**/*.{ts,tsx}\"",
|
||||
"lint": "yarn typecheck && yarn eslint src/ --ext .js,.jsx,.ts,.tsx",
|
||||
"lint:fix": "yarn lint --fix",
|
||||
"pretty": "prettier --write \"src/**/*.{ts,tsx}\"",
|
||||
"test": "yarn lint && react-scripts test",
|
||||
"eject": "react-scripts eject"
|
||||
"test": "yarn lint && react-scripts test --ci",
|
||||
"eject": "react-scripts eject",
|
||||
"snyk-protect": "snyk protect",
|
||||
"prepare": "yarn run snyk-protect"
|
||||
},
|
||||
"husky": {
|
||||
"hooks": {
|
||||
|
@ -20,68 +22,74 @@
|
|||
"lint-staged": {
|
||||
"src/**/*.{ts,tsx}": [
|
||||
"yarn pretty",
|
||||
"yarn lint:fix",
|
||||
"git add"
|
||||
"yarn lint:fix"
|
||||
]
|
||||
},
|
||||
"prettier": {
|
||||
"printWidth": 120
|
||||
},
|
||||
"dependencies": {
|
||||
"@blueprintjs/core": "^3.17.1",
|
||||
"@blueprintjs/icons": "^3.9.1",
|
||||
"@blueprintjs/select": "^3.9.0",
|
||||
"@blueprintjs/core": "^3.33.0",
|
||||
"@blueprintjs/icons": "^3.22.0",
|
||||
"@blueprintjs/select": "^3.14.2",
|
||||
"classnames": "^2.2.6",
|
||||
"connected-react-router": "^6.5.2",
|
||||
"cross-fetch": "^3.0.4",
|
||||
"cytoscape": "^3.8.1",
|
||||
"cytoscape-popper": "^1.0.4",
|
||||
"cross-fetch": "^3.0.6",
|
||||
"cytoscape": "^3.16.1",
|
||||
"cytoscape-popper": "^1.0.7",
|
||||
"inflection": "^1.12.0",
|
||||
"lodash": "^4.17.15",
|
||||
"moment": "^2.22.2",
|
||||
"lodash": "^4.17.20",
|
||||
"moment": "^2.29.1",
|
||||
"normalize.css": "^8.0.0",
|
||||
"numeral": "^2.0.6",
|
||||
"react": "^16.8.0",
|
||||
"react-dom": "^16.8.0",
|
||||
"react-redux": "^7.1.0",
|
||||
"react-router-dom": "^5.0.1",
|
||||
"react-scripts": "^3.0.1",
|
||||
"react-sigma": "^1.2.30",
|
||||
"react-virtualized": "^9.21.1",
|
||||
"react": "^16.10.2",
|
||||
"react-dom": "^16.10.2",
|
||||
"react-redux": "^7.2.1",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"react-sigma": "^1.2.35",
|
||||
"react-virtualized": "^9.22.2",
|
||||
"redux": "^4.0.4",
|
||||
"redux-thunk": "^2.3.0",
|
||||
"sanitize-html": "^1.20.1",
|
||||
"styled-components": "^4.3.2",
|
||||
"tippy.js": "^4.3.4"
|
||||
"sanitize-html": "^2.0.0",
|
||||
"snyk": "^1.410.1",
|
||||
"styled-components": "^5.2.0",
|
||||
"tippy.js": "^4.3.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@blueprintjs/tslint-config": "^1.8.1",
|
||||
"@types/classnames": "^2.2.9",
|
||||
"@types/cytoscape": "^3.8.0",
|
||||
"@types/cytoscape": "^3.14.7",
|
||||
"@types/inflection": "^1.5.28",
|
||||
"@types/jest": "^24.0.15",
|
||||
"@types/lodash": "^4.14.136",
|
||||
"@types/node": "^12.6.8",
|
||||
"@types/numeral": "^0.0.25",
|
||||
"@types/react": "^16.8.23",
|
||||
"@types/react-dom": "^16.8.4",
|
||||
"@types/react-redux": "^7.1.1",
|
||||
"@types/react-router-dom": "^4.3.4",
|
||||
"@types/react-virtualized": "^9.21.3",
|
||||
"@types/sanitize-html": "^1.20.1",
|
||||
"@types/styled-components": "4.1.18",
|
||||
"husky": "^3.0.1",
|
||||
"lint-staged": "^9.2.0",
|
||||
"react-axe": "^3.2.0",
|
||||
"tslint": "^5.18.0",
|
||||
"tslint-config-security": "^1.16.0",
|
||||
"tslint-eslint-rules": "^5.4.0",
|
||||
"typescript": "^3.5.3"
|
||||
"@types/jest": "^26.0.14",
|
||||
"@types/lodash": "^4.14.161",
|
||||
"@types/node": "^14.11.5",
|
||||
"@types/numeral": "^0.0.28",
|
||||
"@types/react": "^16.9.51",
|
||||
"@types/react-axe": "^3.1.0",
|
||||
"@types/react-dom": "^16.9.8",
|
||||
"@types/react-redux": "^7.1.9",
|
||||
"@types/react-router-dom": "^5.1.6",
|
||||
"@types/sanitize-html": "^1.27.0",
|
||||
"@types/styled-components": "5.1.3",
|
||||
"@typescript-eslint/eslint-plugin": "^2.24.0",
|
||||
"@typescript-eslint/parser": "^2.34.0",
|
||||
"eslint-config-airbnb-typescript": "^7.2.1",
|
||||
"eslint-config-prettier": "^6.12.0",
|
||||
"eslint-plugin-import": "^2.22.1",
|
||||
"eslint-plugin-jsx-a11y": "^6.3.1",
|
||||
"eslint-plugin-prettier": "^3.1.4",
|
||||
"eslint-plugin-react": "^7.21.3",
|
||||
"eslint-plugin-react-hooks": "^4.1.2",
|
||||
"husky": "^4.3.0",
|
||||
"lint-staged": "^10.4.0",
|
||||
"prettier": "^2.1.2",
|
||||
"react-scripts": "3.4.3",
|
||||
"typescript": "^3.9.2"
|
||||
},
|
||||
"browserslist": [
|
||||
">0.2%",
|
||||
"not dead",
|
||||
"not ie <= 11",
|
||||
"not op_mini all"
|
||||
]
|
||||
],
|
||||
"snyk": true
|
||||
}
|
||||
|
|
|
@ -4,20 +4,30 @@ import { Classes } from "@blueprintjs/core";
|
|||
|
||||
import { ConnectedRouter } from "connected-react-router";
|
||||
import { Route } from "react-router-dom";
|
||||
import { Nav } from "./components/organisms/";
|
||||
import { AboutScreen, AdminScreen, GraphScreen, LoginScreen, VerifyLoginScreen } from "./components/screens/";
|
||||
import { Nav } from "./components/organisms";
|
||||
import {
|
||||
AboutScreen,
|
||||
AdminScreen,
|
||||
GraphScreen,
|
||||
LoginScreen,
|
||||
TableScreen,
|
||||
VerifyLoginScreen,
|
||||
} from "./components/screens";
|
||||
import { history } from "./index";
|
||||
|
||||
const AppRouter: React.FC = () => (
|
||||
<ConnectedRouter history={history}>
|
||||
<div className={`${Classes.DARK} App`}>
|
||||
<Nav />
|
||||
<Route path="/about" exact={true} component={AboutScreen} />
|
||||
<Route path="/admin/login" exact={true} component={LoginScreen} />
|
||||
<Route path="/admin/verify" exact={true} component={VerifyLoginScreen} />
|
||||
<Route path="/admin" exact={true} component={AdminScreen} />
|
||||
{/* We always want the GraphScreen to be rendered (since un- and re-mounting it is expensive */}
|
||||
<GraphScreen />
|
||||
<main role="main">
|
||||
<Route path="/instances" exact component={TableScreen} />
|
||||
<Route path="/about" exact component={AboutScreen} />
|
||||
<Route path="/admin/login" exact component={LoginScreen} />
|
||||
<Route path="/admin/verify" exact component={VerifyLoginScreen} />
|
||||
<Route path="/admin" exact component={AdminScreen} />
|
||||
{/* We always want the GraphScreen to be rendered (since un- and re-mounting it is expensive */}
|
||||
<GraphScreen />
|
||||
</main>
|
||||
</div>
|
||||
</ConnectedRouter>
|
||||
);
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 5.2 KiB |
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 170.08 170.08"><defs><style>.cls-1{fill:#04246e;}.cls-2{fill:#fff;}</style></defs><title>square-mark-white</title><g id="Layer_2" data-name="Layer 2"><g id="Layer_1-2" data-name="Layer 1"><rect class="cls-1" width="170.08" height="170.08"/><path class="cls-2" d="M127.34,58c-8.4,0-14.41,7.4-20.6,15.65-3.84-17.51-8.11-35-21.7-35s-17.86,17.5-21.71,35C57.15,65.37,51.13,58,42.74,58c-5.35,0-14.39,3.63-14.39,17.25L28.41,100c0,13.36,7.14,16.64,11.42,17.76h0c7.39,1.91,25.17,3.69,45.18,3.69s37.8-1.78,45.18-3.69h0c4.28-1.12,11.42-4.4,11.42-17.76l.06-24.79c0-13.62-9-17.25-14.39-17.25M85,46.33c8.15,0,11.24,16.06,15.57,35.33C95.86,87.5,90.83,92.21,85,92.21S74.21,87.5,69.47,81.66c4-18,7.17-35.33,15.57-35.33M36.13,100l-.06-24.79c0-8.6,4.67-9.53,6.67-9.53,5.92,0,12.28,9.88,18.36,17.83-4.29,18-8.83,29-19.56,26.72-2.78-.77-5.41-2.53-5.41-10.23m21,12.53c4.75-5.16,7.78-13.21,10.08-21.73,5,5.19,10.69,9.13,17.8,9.13S97.86,96,102.83,90.81c2.31,8.52,5.33,16.57,10.07,21.73-8.29.75-18.28,1.21-27.86,1.21s-19.59-.46-27.88-1.21M134,100c0,7.7-2.63,9.46-5.42,10.23-10.73,2.27-15.26-8.72-19.56-26.72,6.08-7.94,12.45-17.83,18.37-17.83,2,0,6.67.93,6.67,9.53Z"/></g></g></svg>
|
After Width: | Height: | Size: 1.2 KiB |
Binary file not shown.
After Width: | Height: | Size: 22 KiB |
Binary file not shown.
Before Width: | Height: | Size: 4.8 KiB After Width: | Height: | Size: 11 KiB |
|
@ -8,10 +8,10 @@ const FloatingCardRow = styled.div`
|
|||
`;
|
||||
const FloatingCardElement = styled(Card)`
|
||||
margin: 0 0 10px 10px;
|
||||
z-index: 20;
|
||||
z-index: 2;
|
||||
`;
|
||||
|
||||
const FloatingCard: React.FC<ICardProps> = props => (
|
||||
const FloatingCard: React.FC<ICardProps> = (props) => (
|
||||
<FloatingCardRow>
|
||||
<FloatingCardElement elevation={Elevation.ONE} {...props} />
|
||||
</FloatingCardRow>
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
import { Switch } from "@blueprintjs/core";
|
||||
import * as React from "react";
|
||||
import styled from "styled-components";
|
||||
import FloatingCard from "./FloatingCard";
|
||||
|
||||
const StyledSwitch = styled(Switch)`
|
||||
margin: 0;
|
||||
`;
|
||||
|
||||
interface GraphHideEdgesButtonProps {
|
||||
isShowingEdges: boolean;
|
||||
toggleEdges: () => void;
|
||||
}
|
||||
const GraphHideEdgesButton: React.FC<GraphHideEdgesButtonProps> = ({ isShowingEdges, toggleEdges }) => (
|
||||
<FloatingCard>
|
||||
<StyledSwitch checked={isShowingEdges} label="Show connections" onChange={toggleEdges} tabIndex={-1} />
|
||||
</FloatingCard>
|
||||
);
|
||||
export default GraphHideEdgesButton;
|
|
@ -6,9 +6,9 @@ import React from "react";
|
|||
import styled from "styled-components";
|
||||
import { FloatingCard, InstanceType } from ".";
|
||||
import { QUANTITATIVE_COLOR_SCHEME } from "../../constants";
|
||||
import { IColorScheme } from "../../types";
|
||||
import { ColorScheme } from "../../types";
|
||||
|
||||
const ColorSchemeSelect = Select.ofType<IColorScheme>();
|
||||
const ColorSchemeSelect = Select.ofType<ColorScheme>();
|
||||
|
||||
const StyledLi = styled.li`
|
||||
margin-top: 2px;
|
||||
|
@ -27,12 +27,12 @@ const ColorBarContainer = styled.div`
|
|||
flex-direction: column;
|
||||
margin-right: 10px;
|
||||
`;
|
||||
interface IColorBarProps {
|
||||
interface ColorBarProps {
|
||||
color: string;
|
||||
}
|
||||
const ColorBar = styled.div<IColorBarProps>`
|
||||
const ColorBar = styled.div<ColorBarProps>`
|
||||
width: 10px;
|
||||
background-color: ${props => props.color};
|
||||
background-color: ${(props) => props.color};
|
||||
flex: 1;
|
||||
`;
|
||||
const TextContainer = styled.div`
|
||||
|
@ -41,13 +41,46 @@ const TextContainer = styled.div`
|
|||
justify-content: space-between;
|
||||
`;
|
||||
|
||||
interface IGraphKeyProps {
|
||||
current?: IColorScheme;
|
||||
colorSchemes: IColorScheme[];
|
||||
const renderItem: ItemRenderer<ColorScheme> = (colorScheme, { handleClick, modifiers }) => {
|
||||
if (!modifiers.matchesPredicate) {
|
||||
return null;
|
||||
}
|
||||
return <MenuItem active={modifiers.active} key={colorScheme.name} onClick={handleClick} text={colorScheme.name} />;
|
||||
};
|
||||
|
||||
const renderQualitativeKey = (values: string[]) => (
|
||||
<ul className={Classes.LIST_UNSTYLED}>
|
||||
{values.map((v) => (
|
||||
<StyledLi key={v}>
|
||||
<InstanceType type={v} />
|
||||
</StyledLi>
|
||||
))}
|
||||
</ul>
|
||||
);
|
||||
|
||||
const renderQuantitativeKey = (range: number[]) => {
|
||||
const [min, max] = range;
|
||||
return (
|
||||
<ColorKeyContainer>
|
||||
<ColorBarContainer>
|
||||
{QUANTITATIVE_COLOR_SCHEME.map((color) => (
|
||||
<ColorBar color={color} key={color} />
|
||||
))}
|
||||
</ColorBarContainer>
|
||||
<TextContainer>
|
||||
<span className={Classes.TEXT_SMALL}>{numeral.default(min).format("0")}</span>
|
||||
<span className={Classes.TEXT_SMALL}>{numeral.default(max).format("0")}</span>
|
||||
</TextContainer>
|
||||
</ColorKeyContainer>
|
||||
);
|
||||
};
|
||||
interface GraphKeyProps {
|
||||
current?: ColorScheme;
|
||||
colorSchemes: ColorScheme[];
|
||||
ranges?: { [key: string]: [number, number] };
|
||||
onItemSelect: (colorScheme?: IColorScheme) => void;
|
||||
onItemSelect: (colorScheme?: ColorScheme) => void;
|
||||
}
|
||||
const GraphKey: React.FC<IGraphKeyProps> = ({ current, colorSchemes, ranges, onItemSelect }) => {
|
||||
const GraphKey: React.FC<GraphKeyProps> = ({ current, colorSchemes, ranges, onItemSelect }) => {
|
||||
const unsetColorScheme = () => {
|
||||
onItemSelect(undefined);
|
||||
};
|
||||
|
@ -74,8 +107,9 @@ const GraphKey: React.FC<IGraphKeyProps> = ({ current, colorSchemes, ranges, onI
|
|||
text={(current && current.name) || "Select..."}
|
||||
icon={IconNames.TINT}
|
||||
rightIcon={IconNames.CARET_DOWN}
|
||||
tabIndex={-1}
|
||||
/>
|
||||
<Button icon={IconNames.SMALL_CROSS} minimal={true} onClick={unsetColorScheme} disabled={!current} />
|
||||
<Button icon={IconNames.SMALL_CROSS} minimal onClick={unsetColorScheme} disabled={!current} tabIndex={-1} />
|
||||
</ColorSchemeSelect>
|
||||
<br />
|
||||
{!!current && !!key && (
|
||||
|
@ -88,38 +122,4 @@ const GraphKey: React.FC<IGraphKeyProps> = ({ current, colorSchemes, ranges, onI
|
|||
);
|
||||
};
|
||||
|
||||
const renderItem: ItemRenderer<IColorScheme> = (colorScheme, { handleClick, modifiers }) => {
|
||||
if (!modifiers.matchesPredicate) {
|
||||
return null;
|
||||
}
|
||||
return <MenuItem active={modifiers.active} key={colorScheme.name} onClick={handleClick} text={colorScheme.name} />;
|
||||
};
|
||||
|
||||
const renderQualitativeKey = (values: string[]) => (
|
||||
<ul className={Classes.LIST_UNSTYLED}>
|
||||
{values.map(v => (
|
||||
<StyledLi key={v}>
|
||||
<InstanceType type={v} />
|
||||
</StyledLi>
|
||||
))}
|
||||
</ul>
|
||||
);
|
||||
|
||||
const renderQuantitativeKey = (range: number[]) => {
|
||||
const [min, max] = range;
|
||||
return (
|
||||
<ColorKeyContainer>
|
||||
<ColorBarContainer>
|
||||
{QUANTITATIVE_COLOR_SCHEME.map((color, idx) => (
|
||||
<ColorBar color={color} key={color} />
|
||||
))}
|
||||
</ColorBarContainer>
|
||||
<TextContainer>
|
||||
<span className={Classes.TEXT_SMALL}>{numeral.default(min).format("0")}</span>
|
||||
<span className={Classes.TEXT_SMALL}>{numeral.default(max).format("0")}</span>
|
||||
</TextContainer>
|
||||
</ColorKeyContainer>
|
||||
);
|
||||
};
|
||||
|
||||
export default GraphKey;
|
||||
|
|
|
@ -2,12 +2,12 @@ import { Button } from "@blueprintjs/core";
|
|||
import * as React from "react";
|
||||
import FloatingCard from "./FloatingCard";
|
||||
|
||||
interface IGraphResetButtonProps {
|
||||
interface GraphResetButtonProps {
|
||||
onClick: () => void;
|
||||
}
|
||||
const GraphResetButton: React.FC<IGraphResetButtonProps> = ({ onClick }) => (
|
||||
const GraphResetButton: React.FC<GraphResetButtonProps> = ({ onClick }) => (
|
||||
<FloatingCard>
|
||||
<Button icon="compass" title="Reset graph view" onClick={onClick} />
|
||||
<Button icon="compass" title="Reset graph view" onClick={onClick} tabIndex={-1} />
|
||||
</FloatingCard>
|
||||
);
|
||||
export default GraphResetButton;
|
||||
|
|
|
@ -3,9 +3,9 @@ import { IconNames } from "@blueprintjs/icons";
|
|||
import React from "react";
|
||||
import { QUALITATIVE_COLOR_SCHEME } from "../../constants";
|
||||
import { typeColorScheme } from "../../types";
|
||||
import { capitalize } from "../../util";
|
||||
import { getTypeDisplayString } from "../../util";
|
||||
|
||||
interface IInstanceTypeProps {
|
||||
interface InstanceTypeProps {
|
||||
type: string;
|
||||
colorAfterName?: boolean;
|
||||
}
|
||||
|
@ -13,9 +13,9 @@ interface IInstanceTypeProps {
|
|||
* By default, renders the color followed by the name of the instance type.
|
||||
* You can change this by passing `colorAfterName={true}`.
|
||||
*/
|
||||
const InstanceType: React.FC<IInstanceTypeProps> = ({ type, colorAfterName }) => {
|
||||
const InstanceType: React.FC<InstanceTypeProps> = ({ type, colorAfterName }) => {
|
||||
const idx = typeColorScheme.values.indexOf(type);
|
||||
const name = " " + capitalize(type);
|
||||
const name = ` ${getTypeDisplayString(type)}`;
|
||||
return (
|
||||
<>
|
||||
{!!colorAfterName && name}
|
||||
|
|
|
@ -8,18 +8,24 @@ const Backdrop = styled.div`
|
|||
left: 0;
|
||||
right: 0;
|
||||
background-color: #293742;
|
||||
z-index: 100;
|
||||
z-index: 3;
|
||||
`;
|
||||
|
||||
const Container = styled.div`
|
||||
max-width: 800px;
|
||||
interface ContainerProps {
|
||||
fullWidth?: boolean;
|
||||
}
|
||||
const Container = styled.div<ContainerProps>`
|
||||
max-width: ${(props) => (props.fullWidth ? "100%" : "800px")};
|
||||
margin: auto;
|
||||
padding: 2em;
|
||||
`;
|
||||
|
||||
const Page: React.FC = ({ children }) => (
|
||||
interface PageProps {
|
||||
fullWidth?: boolean;
|
||||
}
|
||||
const Page: React.FC<PageProps> = ({ children, fullWidth }) => (
|
||||
<Backdrop>
|
||||
<Container>{children}</Container>
|
||||
<Container fullWidth={fullWidth}>{children}</Container>
|
||||
</Backdrop>
|
||||
);
|
||||
|
||||
|
|
|
@ -2,4 +2,5 @@ export { default as Page } from "./Page";
|
|||
export { default as FloatingCard } from "./FloatingCard";
|
||||
export { default as GraphKey } from "./GraphKey";
|
||||
export { default as GraphResetButton } from "./GraphResetButton";
|
||||
export { default as GraphHideEdgesButton } from "./GraphHideEdgesButton";
|
||||
export { default as InstanceType } from "./InstanceType";
|
||||
|
|
|
@ -10,10 +10,10 @@ import {
|
|||
QUALITATIVE_COLOR_SCHEME,
|
||||
QUANTITATIVE_COLOR_SCHEME,
|
||||
SEARCH_RESULT_COLOR,
|
||||
SELECTED_NODE_COLOR
|
||||
SELECTED_NODE_COLOR,
|
||||
} from "../../constants";
|
||||
import { IColorScheme } from "../../types";
|
||||
import { getBuckets } from "../../util";
|
||||
import { ColorScheme } from "../../types";
|
||||
import { getBuckets, getTypeDisplayString } from "../../util";
|
||||
|
||||
const CytoscapeContainer = styled.div`
|
||||
width: 100%;
|
||||
|
@ -21,51 +21,53 @@ const CytoscapeContainer = styled.div`
|
|||
flex: 1;
|
||||
`;
|
||||
|
||||
interface ICytoscapeProps {
|
||||
colorScheme?: IColorScheme;
|
||||
interface CytoscapeProps {
|
||||
colorScheme?: ColorScheme;
|
||||
currentNodeId: string | null;
|
||||
elements: cytoscape.ElementsDefinition;
|
||||
hoveringOver?: string;
|
||||
ranges?: { [key: string]: [number, number] };
|
||||
searchResultIds?: string[];
|
||||
showEdges: boolean;
|
||||
navigateToInstancePath?: (domain: string) => void;
|
||||
navigateToRoot?: () => void;
|
||||
}
|
||||
class Cytoscape extends React.PureComponent<ICytoscapeProps> {
|
||||
class Cytoscape extends React.PureComponent<CytoscapeProps> {
|
||||
private cy?: cytoscape.Core;
|
||||
|
||||
public componentDidMount() {
|
||||
// eslint-disable-next-line react/no-find-dom-node
|
||||
const container = ReactDOM.findDOMNode(this);
|
||||
this.cy = cytoscape({
|
||||
autoungrabify: true,
|
||||
container: container as any,
|
||||
elements: this.props.elements,
|
||||
elements: this.cleanElements(this.props.elements),
|
||||
hideEdgesOnViewport: true,
|
||||
hideLabelsOnViewport: true,
|
||||
layout: {
|
||||
name: "preset"
|
||||
name: "preset",
|
||||
},
|
||||
maxZoom: 2,
|
||||
minZoom: 0.02,
|
||||
minZoom: 0.01,
|
||||
pixelRatio: 1.0,
|
||||
selectionType: "single"
|
||||
selectionType: "single",
|
||||
});
|
||||
|
||||
// Setup node tooltip on hover
|
||||
this.cy.nodes().forEach(n => {
|
||||
const domain = n.data("id");
|
||||
this.cy.nodes().forEach((n) => {
|
||||
const tooltipContent = `${n.data("id")} (${getTypeDisplayString(n.data("type"))})`;
|
||||
const ref = (n as any).popperRef();
|
||||
const t = tippy(ref, {
|
||||
animateFill: false,
|
||||
animation: "fade",
|
||||
content: domain,
|
||||
content: tooltipContent,
|
||||
duration: 100,
|
||||
trigger: "manual"
|
||||
trigger: "manual",
|
||||
});
|
||||
n.on("mouseover", e => {
|
||||
n.on("mouseover", () => {
|
||||
(t as Instance).show();
|
||||
});
|
||||
n.on("mouseout", e => {
|
||||
n.on("mouseout", () => {
|
||||
(t as Instance).hide();
|
||||
});
|
||||
});
|
||||
|
@ -77,25 +79,25 @@ class Cytoscape extends React.PureComponent<ICytoscapeProps> {
|
|||
.style({
|
||||
"curve-style": "haystack", // fast edges
|
||||
"line-color": DEFAULT_NODE_COLOR,
|
||||
width: "mapData(weight, 0, 0.5, 1, 20)"
|
||||
width: "mapData(weight, 0, 0.5, 1, 20)",
|
||||
})
|
||||
.selector("node[label]")
|
||||
.style({
|
||||
color: DEFAULT_NODE_COLOR,
|
||||
"font-size": "mapData(size, 1, 6, 10, 100)",
|
||||
"min-zoomed-font-size": 16
|
||||
"min-zoomed-font-size": 16,
|
||||
})
|
||||
.selector(".hidden") // used to hide nodes not in the neighborhood of the selected
|
||||
.selector(".hidden") // used to hide nodes not in the neighborhood of the selected, or to hide edges
|
||||
.style({
|
||||
display: "none"
|
||||
display: "none",
|
||||
})
|
||||
.selector(".thickEdge") // when a node is selected, make edges thicker so you can actually see them
|
||||
.style({
|
||||
width: 2
|
||||
width: 2,
|
||||
});
|
||||
this.resetNodeColorScheme(style); // this function also called `update()`
|
||||
|
||||
this.cy.nodes().on("select", e => {
|
||||
this.cy.nodes().on("select", (e) => {
|
||||
const instanceId = e.target.data("id");
|
||||
if (instanceId && instanceId !== this.props.currentNodeId) {
|
||||
if (this.props.navigateToInstancePath) {
|
||||
|
@ -109,21 +111,19 @@ class Cytoscape extends React.PureComponent<ICytoscapeProps> {
|
|||
this.cy!.nodes().removeClass("hidden");
|
||||
this.cy!.edges().removeClass("thickEdge");
|
||||
// Then hide everything except neighborhood
|
||||
this.cy!.nodes()
|
||||
.diff(neighborhood)
|
||||
.left.addClass("hidden");
|
||||
this.cy!.nodes().diff(neighborhood).left.addClass("hidden");
|
||||
neighborhood.connectedEdges().addClass("thickEdge");
|
||||
});
|
||||
});
|
||||
this.cy.nodes().on("unselect", e => {
|
||||
this.cy.nodes().on("unselect", () => {
|
||||
this.cy!.batch(() => {
|
||||
this.cy!.nodes().removeClass("hidden");
|
||||
this.cy!.edges().removeClass("thickEdge");
|
||||
});
|
||||
});
|
||||
this.cy.on("click", e => {
|
||||
this.cy.on("click", (e) => {
|
||||
// Clicking on the background should also deselect
|
||||
const target = e.target;
|
||||
const { target } = e;
|
||||
if (!target || target === this.cy || target.isEdge()) {
|
||||
if (this.props.navigateToRoot) {
|
||||
// Go to the URL "/"
|
||||
|
@ -135,7 +135,7 @@ class Cytoscape extends React.PureComponent<ICytoscapeProps> {
|
|||
this.setNodeSelection();
|
||||
}
|
||||
|
||||
public componentDidUpdate(prevProps: ICytoscapeProps) {
|
||||
public componentDidUpdate(prevProps: CytoscapeProps) {
|
||||
this.setNodeSelection(prevProps.currentNodeId);
|
||||
if (prevProps.colorScheme !== this.props.colorScheme) {
|
||||
this.updateColorScheme();
|
||||
|
@ -146,6 +146,13 @@ class Cytoscape extends React.PureComponent<ICytoscapeProps> {
|
|||
if (!isEqual(prevProps.searchResultIds, this.props.searchResultIds)) {
|
||||
this.updateSearchResultNodeClass();
|
||||
}
|
||||
if (prevProps.showEdges !== this.props.showEdges) {
|
||||
if (this.props.showEdges) {
|
||||
this.showEdges();
|
||||
} else {
|
||||
this.hideEdges();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public componentWillUnmount() {
|
||||
|
@ -166,12 +173,12 @@ class Cytoscape extends React.PureComponent<ICytoscapeProps> {
|
|||
if (currentNodeId) {
|
||||
this.cy.zoom({
|
||||
level: 0.2,
|
||||
position: this.cy.$id(currentNodeId).position()
|
||||
position: this.cy.$id(currentNodeId).position(),
|
||||
});
|
||||
} else {
|
||||
this.cy.zoom({
|
||||
level: 0.2,
|
||||
position: { x: 0, y: 0 }
|
||||
position: { x: 0, y: 0 },
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -213,7 +220,7 @@ class Cytoscape extends React.PureComponent<ICytoscapeProps> {
|
|||
// quite good as it is, so...
|
||||
height: "mapData(size, 1, 6, 20, 200)",
|
||||
label: "data(id)",
|
||||
width: "mapData(size, 1, 6, 20, 200)"
|
||||
width: "mapData(size, 1, 6, 20, 200)",
|
||||
});
|
||||
|
||||
this.setNodeSearchColorScheme(style);
|
||||
|
@ -232,16 +239,16 @@ class Cytoscape extends React.PureComponent<ICytoscapeProps> {
|
|||
"background-color": SEARCH_RESULT_COLOR,
|
||||
"border-color": SEARCH_RESULT_COLOR,
|
||||
"border-opacity": 0.7,
|
||||
"border-width": 250
|
||||
"border-width": 250,
|
||||
})
|
||||
.selector("node.hovered")
|
||||
.style({
|
||||
"border-color": HOVERED_NODE_COLOR,
|
||||
"border-width": 1000
|
||||
"border-width": 1000,
|
||||
})
|
||||
.selector("node:selected")
|
||||
.style({
|
||||
"background-color": SELECTED_NODE_COLOR
|
||||
"background-color": SELECTED_NODE_COLOR,
|
||||
})
|
||||
.update();
|
||||
};
|
||||
|
@ -255,10 +262,11 @@ class Cytoscape extends React.PureComponent<ICytoscapeProps> {
|
|||
if (!colorScheme) {
|
||||
this.resetNodeColorScheme();
|
||||
return;
|
||||
} else if (colorScheme.type === "qualitative") {
|
||||
}
|
||||
if (colorScheme.type === "qualitative") {
|
||||
colorScheme.values.forEach((v, idx) => {
|
||||
style = style.selector(`node[${colorScheme.cytoscapeDataKey} = '${v}']`).style({
|
||||
"background-color": QUALITATIVE_COLOR_SCHEME[idx]
|
||||
"background-color": QUALITATIVE_COLOR_SCHEME[idx],
|
||||
});
|
||||
});
|
||||
} else if (colorScheme.type === "quantitative") {
|
||||
|
@ -276,7 +284,7 @@ class Cytoscape extends React.PureComponent<ICytoscapeProps> {
|
|||
const max = idx === QUANTITATIVE_COLOR_SCHEME.length - 1 ? maxVal + 1 : buckets[idx + 1];
|
||||
const selector = `node[${dataKey} >= ${min}][${dataKey} < ${max}]`;
|
||||
style = style.selector(selector).style({
|
||||
"background-color": color
|
||||
"background-color": color,
|
||||
});
|
||||
});
|
||||
}
|
||||
|
@ -296,10 +304,10 @@ class Cytoscape extends React.PureComponent<ICytoscapeProps> {
|
|||
}
|
||||
const { hoveringOver } = this.props;
|
||||
|
||||
if (!!prevHoveredId) {
|
||||
if (prevHoveredId) {
|
||||
this.cy.$id(prevHoveredId).removeClass("hovered");
|
||||
}
|
||||
if (!!hoveringOver) {
|
||||
if (hoveringOver) {
|
||||
this.cy.$id(hoveringOver).addClass("hovered");
|
||||
}
|
||||
};
|
||||
|
@ -314,11 +322,35 @@ class Cytoscape extends React.PureComponent<ICytoscapeProps> {
|
|||
this.cy!.nodes().removeClass("searchResult");
|
||||
|
||||
if (!!searchResultIds && searchResultIds.length > 0) {
|
||||
const currentResultSelector = searchResultIds.map(id => `node[id = "${id}"]`).join(", ");
|
||||
const currentResultSelector = searchResultIds.map((id) => `node[id = "${id}"]`).join(", ");
|
||||
this.cy!.$(currentResultSelector).addClass("searchResult");
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
private showEdges = () => {
|
||||
if (!this.cy) {
|
||||
throw new Error("Expected cytoscape, but there wasn't one!");
|
||||
}
|
||||
this.cy.edges().removeClass("hidden");
|
||||
};
|
||||
|
||||
private hideEdges = () => {
|
||||
if (!this.cy) {
|
||||
throw new Error("Expected cytoscape, but there wasn't one!");
|
||||
}
|
||||
this.cy.edges().addClass("hidden");
|
||||
};
|
||||
|
||||
/* Helper function to remove edges if source or target node is missing */
|
||||
private cleanElements = (elements: cytoscape.ElementsDefinition): cytoscape.ElementsDefinition => {
|
||||
const domains = new Set(elements.nodes.map((n) => n.data.id));
|
||||
const edges = elements.edges.filter((e) => domains.has(e.data.source) && domains.has(e.data.target));
|
||||
return {
|
||||
edges,
|
||||
nodes: elements.nodes,
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
export default Cytoscape;
|
||||
|
|
|
@ -2,11 +2,11 @@ import { NonIdealState } from "@blueprintjs/core";
|
|||
import { IconNames } from "@blueprintjs/icons";
|
||||
import * as React from "react";
|
||||
|
||||
interface IErrorStateProps {
|
||||
interface ErrorStateProps {
|
||||
description?: string;
|
||||
}
|
||||
const ErrorState: React.FC<IErrorStateProps> = ({ description }) => (
|
||||
<NonIdealState icon={IconNames.ERROR} title={"Something went wrong."} description={description} />
|
||||
const ErrorState: React.FC<ErrorStateProps> = ({ description }) => (
|
||||
<NonIdealState icon={IconNames.ERROR} title="Something went wrong." description={description} />
|
||||
);
|
||||
|
||||
export default ErrorState;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import React from "react";
|
||||
import styled from "styled-components";
|
||||
import { IColorScheme } from "../../types";
|
||||
import { GraphKey, GraphResetButton } from "../atoms";
|
||||
import { ColorScheme } from "../../types";
|
||||
import { GraphHideEdgesButton, GraphKey, GraphResetButton } from "../atoms";
|
||||
|
||||
const GraphToolsContainer = styled.div`
|
||||
position: absolute;
|
||||
|
@ -11,30 +11,33 @@ const GraphToolsContainer = styled.div`
|
|||
flex-direction: column;
|
||||
`;
|
||||
|
||||
interface IGraphToolsProps {
|
||||
currentColorScheme?: IColorScheme;
|
||||
colorSchemes: IColorScheme[];
|
||||
interface GraphToolsProps {
|
||||
currentColorScheme?: ColorScheme;
|
||||
colorSchemes: ColorScheme[];
|
||||
isShowingEdges: boolean;
|
||||
ranges?: { [key: string]: [number, number] };
|
||||
onColorSchemeSelect: (colorScheme?: IColorScheme) => void;
|
||||
onColorSchemeSelect: (colorScheme?: ColorScheme) => void;
|
||||
onResetButtonClick: () => void;
|
||||
toggleEdges: () => void;
|
||||
}
|
||||
const GraphTools: React.FC<IGraphToolsProps> = ({
|
||||
const GraphTools: React.FC<GraphToolsProps> = ({
|
||||
currentColorScheme,
|
||||
colorSchemes,
|
||||
isShowingEdges,
|
||||
ranges,
|
||||
onColorSchemeSelect,
|
||||
onResetButtonClick
|
||||
}) => {
|
||||
return (
|
||||
<GraphToolsContainer>
|
||||
<GraphResetButton onClick={onResetButtonClick} />
|
||||
<GraphKey
|
||||
current={currentColorScheme}
|
||||
colorSchemes={colorSchemes}
|
||||
onItemSelect={onColorSchemeSelect}
|
||||
ranges={ranges}
|
||||
/>
|
||||
</GraphToolsContainer>
|
||||
);
|
||||
};
|
||||
onResetButtonClick,
|
||||
toggleEdges,
|
||||
}) => (
|
||||
<GraphToolsContainer>
|
||||
<GraphResetButton onClick={onResetButtonClick} />
|
||||
<GraphHideEdgesButton isShowingEdges={isShowingEdges} toggleEdges={toggleEdges} />
|
||||
<GraphKey
|
||||
current={currentColorScheme}
|
||||
colorSchemes={colorSchemes}
|
||||
onItemSelect={onColorSchemeSelect}
|
||||
ranges={ranges}
|
||||
/>
|
||||
</GraphToolsContainer>
|
||||
);
|
||||
export default GraphTools;
|
||||
|
|
|
@ -4,12 +4,12 @@ import * as numeral from "numeral";
|
|||
import React from "react";
|
||||
import sanitize from "sanitize-html";
|
||||
import styled from "styled-components";
|
||||
import { ISearchResultInstance } from "../../redux/types";
|
||||
import { SearchResultInstance } from "../../redux/types";
|
||||
import { InstanceType } from "../atoms";
|
||||
|
||||
const StyledCard = styled(Card)`
|
||||
width: 80%;
|
||||
margin: 1em auto;
|
||||
margin: 0.5em auto;
|
||||
background-color: #394b59 !important;
|
||||
text-align: left;
|
||||
`;
|
||||
|
@ -32,18 +32,18 @@ const StyledUserCount = styled.div`
|
|||
const StyledDescription = styled.div`
|
||||
margin-top: 10px;
|
||||
`;
|
||||
interface ISearchResultProps {
|
||||
result: ISearchResultInstance;
|
||||
interface SearchResultProps {
|
||||
result: SearchResultInstance;
|
||||
onClick: () => void;
|
||||
onMouseEnter: () => void;
|
||||
onMouseLeave: () => void;
|
||||
}
|
||||
const SearchResult: React.FC<ISearchResultProps> = ({ result, onClick, onMouseEnter, onMouseLeave }) => {
|
||||
const SearchResult: React.FC<SearchResultProps> = ({ result, onClick, onMouseEnter, onMouseLeave }) => {
|
||||
let shortenedDescription;
|
||||
if (result.description) {
|
||||
shortenedDescription = result.description && sanitize(result.description);
|
||||
if (shortenedDescription.length > 100) {
|
||||
shortenedDescription = shortenedDescription.substring(0, 100) + "...";
|
||||
shortenedDescription = `${shortenedDescription.substring(0, 100)}...`;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -59,7 +59,7 @@ const SearchResult: React.FC<ISearchResultProps> = ({ result, onClick, onMouseEn
|
|||
return (
|
||||
<StyledCard
|
||||
elevation={Elevation.ONE}
|
||||
interactive={true}
|
||||
interactive
|
||||
key={result.name}
|
||||
onClick={onClick}
|
||||
onMouseEnter={onMouseEnter}
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
import { Classes, H3 } from "@blueprintjs/core";
|
||||
import React from "react";
|
||||
import { Link } from "react-router-dom";
|
||||
import { FederationRestrictions } from "../../redux/types";
|
||||
|
||||
const maybeGetList = (domains?: string[]) =>
|
||||
domains && (
|
||||
<ul>
|
||||
{domains.sort().map((domain) => (
|
||||
<li key={domain}>
|
||||
<Link to={`/instance/${domain}`} className={`${Classes.BUTTON} ${Classes.MINIMAL}`} role="button">
|
||||
{domain}
|
||||
</Link>
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
);
|
||||
|
||||
interface FederationTabProps {
|
||||
restrictions?: FederationRestrictions;
|
||||
}
|
||||
const FederationTab: React.FC<FederationTabProps> = ({ restrictions }) => {
|
||||
if (!restrictions) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const reportsRemovalList = maybeGetList(restrictions.reportRemoval);
|
||||
const rejectsList = maybeGetList(restrictions.reject);
|
||||
const mediaRemovalsList = maybeGetList(restrictions.mediaRemoval);
|
||||
const mediaNsfwsList = maybeGetList(restrictions.mediaNsfw);
|
||||
const federatedTimelineRemovalsList = maybeGetList(restrictions.federatedTimelineRemoval);
|
||||
const bannerRemovalsList = maybeGetList(restrictions.bannerRemoval);
|
||||
const avatarRemovalsList = maybeGetList(restrictions.avatarRemoval);
|
||||
const acceptedList = maybeGetList(restrictions.accept);
|
||||
|
||||
return (
|
||||
<>
|
||||
{rejectsList && (
|
||||
<>
|
||||
<H3>Blocked instances</H3>
|
||||
{rejectsList}
|
||||
</>
|
||||
)}
|
||||
{reportsRemovalList && (
|
||||
<>
|
||||
<H3>Reports ignored</H3>
|
||||
{reportsRemovalList}
|
||||
</>
|
||||
)}
|
||||
{mediaRemovalsList && (
|
||||
<>
|
||||
<H3>Media removed</H3>
|
||||
{mediaRemovalsList}
|
||||
</>
|
||||
)}
|
||||
{mediaNsfwsList && (
|
||||
<>
|
||||
<H3>Media marked as NSFW</H3>
|
||||
{mediaNsfwsList}
|
||||
</>
|
||||
)}
|
||||
{federatedTimelineRemovalsList && (
|
||||
<>
|
||||
<H3>Hidden from federated timeline</H3>
|
||||
{federatedTimelineRemovalsList}
|
||||
</>
|
||||
)}
|
||||
{bannerRemovalsList && (
|
||||
<>
|
||||
<H3>Banners removed</H3>
|
||||
{bannerRemovalsList}
|
||||
</>
|
||||
)}
|
||||
{avatarRemovalsList && (
|
||||
<>
|
||||
<H3>Avatars removed</H3>
|
||||
{avatarRemovalsList}
|
||||
</>
|
||||
)}
|
||||
{acceptedList && (
|
||||
<>
|
||||
<H3>Whitelisted</H3>
|
||||
{acceptedList}
|
||||
</>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
};
|
||||
export default FederationTab;
|
|
@ -6,35 +6,36 @@ import { push } from "connected-react-router";
|
|||
import { Dispatch } from "redux";
|
||||
import styled from "styled-components";
|
||||
import { fetchGraph } from "../../redux/actions";
|
||||
import { IAppState, IGraphResponse } from "../../redux/types";
|
||||
import { colorSchemes, IColorScheme } from "../../types";
|
||||
import { AppState, GraphResponse } from "../../redux/types";
|
||||
import { colorSchemes, ColorScheme } from "../../types";
|
||||
import { domainMatchSelector } from "../../util";
|
||||
import { Cytoscape, ErrorState, GraphTools } from "../molecules/";
|
||||
import { Cytoscape, ErrorState, GraphTools } from "../molecules";
|
||||
|
||||
const GraphDiv = styled.div`
|
||||
flex: 2;
|
||||
`;
|
||||
|
||||
interface IGraphProps {
|
||||
interface GraphProps {
|
||||
currentInstanceName: string | null;
|
||||
fetchGraph: () => void;
|
||||
graphResponse?: IGraphResponse;
|
||||
graphResponse?: GraphResponse;
|
||||
graphLoadError: boolean;
|
||||
hoveringOverResult?: string;
|
||||
isLoadingGraph: boolean;
|
||||
searchResultDomains: string[];
|
||||
navigate: (path: string) => void;
|
||||
}
|
||||
interface IGraphState {
|
||||
colorScheme?: IColorScheme;
|
||||
interface GraphState {
|
||||
colorScheme?: ColorScheme;
|
||||
isShowingEdges: boolean;
|
||||
}
|
||||
class GraphImpl extends React.PureComponent<IGraphProps, IGraphState> {
|
||||
class GraphImpl extends React.PureComponent<GraphProps, GraphState> {
|
||||
private cytoscapeComponent: React.RefObject<Cytoscape>;
|
||||
|
||||
public constructor(props: IGraphProps) {
|
||||
public constructor(props: GraphProps) {
|
||||
super(props);
|
||||
this.cytoscapeComponent = React.createRef();
|
||||
this.state = { colorScheme: undefined };
|
||||
this.state = { colorScheme: undefined, isShowingEdges: true };
|
||||
}
|
||||
|
||||
public componentDidMount() {
|
||||
|
@ -59,20 +60,23 @@ class GraphImpl extends React.PureComponent<IGraphProps, IGraphState> {
|
|||
navigateToInstancePath={this.navigateToInstancePath}
|
||||
navigateToRoot={this.navigateToRoot}
|
||||
searchResultIds={this.props.searchResultDomains}
|
||||
showEdges={this.state.isShowingEdges}
|
||||
ref={this.cytoscapeComponent}
|
||||
/>
|
||||
<GraphTools
|
||||
onResetButtonClick={this.resetGraphPosition}
|
||||
currentColorScheme={this.state.colorScheme}
|
||||
colorSchemes={colorSchemes}
|
||||
isShowingEdges={this.state.isShowingEdges}
|
||||
onColorSchemeSelect={this.setColorScheme}
|
||||
ranges={this.props.graphResponse.metadata.ranges}
|
||||
toggleEdges={this.toggleEdges}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
return <GraphDiv>{content}</GraphDiv>;
|
||||
return <GraphDiv aria-hidden>{content}</GraphDiv>;
|
||||
}
|
||||
|
||||
private loadGraph = () => {
|
||||
|
@ -87,7 +91,11 @@ class GraphImpl extends React.PureComponent<IGraphProps, IGraphState> {
|
|||
}
|
||||
};
|
||||
|
||||
private setColorScheme = (colorScheme?: IColorScheme) => {
|
||||
private toggleEdges = () => {
|
||||
this.setState({ isShowingEdges: !this.state.isShowingEdges });
|
||||
};
|
||||
|
||||
private setColorScheme = (colorScheme?: ColorScheme) => {
|
||||
this.setState({ colorScheme });
|
||||
};
|
||||
|
||||
|
@ -99,23 +107,20 @@ class GraphImpl extends React.PureComponent<IGraphProps, IGraphState> {
|
|||
this.props.navigate("/");
|
||||
};
|
||||
}
|
||||
const mapStateToProps = (state: IAppState) => {
|
||||
const mapStateToProps = (state: AppState) => {
|
||||
const match = domainMatchSelector(state);
|
||||
return {
|
||||
currentInstanceName: match && match.params.domain,
|
||||
graphLoadError: state.data.error,
|
||||
graphLoadError: state.data.graphLoadError,
|
||||
graphResponse: state.data.graphResponse,
|
||||
hoveringOverResult: state.search.hoveringOverResult,
|
||||
isLoadingGraph: state.data.isLoadingGraph,
|
||||
searchResultDomains: state.search.results.map(r => r.name)
|
||||
searchResultDomains: state.search.results.map((r) => r.name),
|
||||
};
|
||||
};
|
||||
const mapDispatchToProps = (dispatch: Dispatch) => ({
|
||||
fetchGraph: () => dispatch(fetchGraph() as any),
|
||||
navigate: (path: string) => dispatch(push(path))
|
||||
navigate: (path: string) => dispatch(push(path)),
|
||||
});
|
||||
const Graph = connect(
|
||||
mapStateToProps,
|
||||
mapDispatchToProps
|
||||
)(GraphImpl);
|
||||
const Graph = connect(mapStateToProps, mapDispatchToProps)(GraphImpl);
|
||||
export default Graph;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue