empty gh-pages branch, skip Circle CI

This commit is contained in:
Daniel Holbach
2020-07-28 13:32:32 +02:00
parent 727c51756e
commit 714a170a32
4046 changed files with 9 additions and 1245825 deletions

View File

@@ -1,204 +1,16 @@
version: 2
defaults: &defaults
working_directory: /go/src/github.com/weaveworks/scope
docker:
- image: weaveworks/scope-backend-build:master-14d4ecc3
client-defaults: &client-defaults
working_directory: /home/weave/scope
docker:
- image: weaveworks/scope-ui-build:master-aee14088
version: 2.1
jobs:
build:
machine: true
steps:
- run: "echo skip"
workflows:
version: 2
test_and_deploy:
ignore:
jobs:
- lint
- unit-test
- client-build
- client-test:
requires:
- client-build
- xplatform-build:
requires:
- build
- build:
requires:
- client-build
- integration-tests:
requires:
- lint
- unit-test
- build
- deploy:
filters:
branches:
only: master
requires:
- client-test
- integration-tests
jobs:
lint:
<<: *defaults
steps:
- checkout
- run: make BUILD_IN_CONTAINER=false lint
unit-test:
<<: *defaults
parallelism: 1
steps:
- checkout
- run: COVERDIR=./coverage make BUILD_IN_CONTAINER=false CODECGEN_UID=23 tests
- persist_to_workspace:
root: .
paths:
- coverage
# Create client/build/index.html
client-build:
<<: *client-defaults
steps:
- checkout
- restore_cache:
name: Restoring Yarn Cache
key: yarn-cache-2-{{ checksum "client/yarn.lock" }}
- restore_cache:
name: Restoring client/node_modules
key: node-modules-{{ checksum "client/yarn.lock" }}-{{ checksum ".circleci/config.yml" }}
- run: cd client; yarn install
- save_cache:
name: Saving Yarn Cache
key: yarn-cache-2-{{ checksum "client/yarn.lock" }}
paths:
- "/home/weave/scope/.cache/yarn"
- save_cache:
name: Saving client/node_modules
# include the CI config in the checksum because it will change when the docker image changes
key: node-modules-{{ checksum "client/yarn.lock" }}-{{ checksum ".circleci/config.yml" }}
paths:
- "/home/weave/scope/client/node_modules"
- run: |
cd client
yarn run build
yarn run build-external
yarn run bundle
- persist_to_workspace:
root: /home/weave/scope
paths:
- client/build/
- client/build-external/
- client/bundle/weave-scope.tgz
client-test:
<<: *client-defaults
steps:
- checkout
- restore_cache:
name: Restoring Yarn Cache
key: yarn-cache-2-{{ checksum "client/yarn.lock" }}
- restore_cache:
name: Restoring client/node_modules
key: node-modules-{{ checksum "client/yarn.lock" }}-{{ checksum ".circleci/config.yml" }}
- run: |
cd client
yarn install
yarn run lint
yarn test
xplatform-build:
<<: *defaults
steps:
- checkout
- run: GOARCH=arm make BUILD_IN_CONTAINER=false GO_BUILD_INSTALL_DEPS= prog/scope
- run: GOOS=darwin make BUILD_IN_CONTAINER=false GO_BUILD_INSTALL_DEPS= prog/scope
build:
<<: *defaults
steps:
- checkout
- setup_remote_docker
- attach_workspace:
at: .
- run: make BUILD_IN_CONTAINER=false SUDO= static all
- run: cd extras; make BUILD_IN_CONTAINER=false
- run: make -C tools/runner
- persist_to_workspace:
root: .
paths:
- scope.tar
- cloud-agent.tar
- tools/runner/runner
- prog/externalui/
- prog/staticui/
- report/report.codecgen.go
- render/detailed/detailed.codecgen.go
integration-tests:
machine:
image: circleci/classic:201709-01
working_directory: /home/circleci/src/github.com/weaveworks/scope
environment:
CIRCLE_ARTIFACTS: /tmp/artifacts
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
GOPATH: /home/circleci/
parallelism: 2
steps:
- checkout
- attach_workspace:
at: .
- run: |
sudo apt-get update
sudo apt-get install python-pip jq pv
- run: mkdir $CIRCLE_ARTIFACTS
# kick off creation of test VMs
- run: test -z "$SECRET_PASSWORD" || bin/setup-circleci-secrets "$SECRET_PASSWORD"
- run: test -z "$SECRET_PASSWORD" || (cd integration; ./gce.sh make_template)
- run: test -z "$SECRET_PASSWORD" || (cd integration; ./gce.sh setup && eval $(./gce.sh hosts); ./setup.sh)
- run: make deps; touch tools/runner/runner
# Run all integration tests
- run:
command: test -z "$SECRET_PASSWORD" || (cd integration; eval $(./gce.sh hosts); ./run_all.sh)
no_output_timeout: 5m
# Destroy testing VMs:
- run:
command: test -z "$SECRET_PASSWORD" || (cd integration; ./gce.sh destroy)
background: true
# Code coverage
- run: ./tools/cover/gather_coverage.sh ./coverage
- run: goveralls -repotoken $COVERALLS_REPO_TOKEN -coverprofile=profile.cov -service=circleci
- run: cp coverage.* */*.codecgen.go $CIRCLE_ARTIFACTS
- store_artifacts:
path: /tmp/artifacts
deploy:
<<: *defaults
environment:
IMAGES: scope cloud-agent
steps:
- checkout
- setup_remote_docker
- attach_workspace:
at: .
- run: |
pip install awscli
docker load -i scope.tar
docker load -i cloud-agent.tar
- run: |
test -z "${DOCKER_USER}" && exit 0
docker login -u $DOCKER_USER -p $DOCKER_PASS
for IMAGE in $IMAGES; do
test "${DOCKER_ORGANIZATION:-$DOCKER_USER}" = "weaveworks" || docker tag weaveworks/$IMAGE:latest ${DOCKER_ORGANIZATION:-$DOCKER_USER}/$IMAGE:latest
docker tag weaveworks/$IMAGE:latest ${DOCKER_ORGANIZATION:-$DOCKER_USER}/$IMAGE:$(./tools/image-tag)
docker push ${DOCKER_ORGANIZATION:-$DOCKER_USER}/$IMAGE:latest
docker push ${DOCKER_ORGANIZATION:-$DOCKER_USER}/$IMAGE:$(./tools/image-tag)
done
- run: |
test -z "${QUAY_USER}" && exit 0
docker login -e '.' -u "$QUAY_USER" -p "$QUAY_PASSWORD" quay.io
docker tag weaveworks/scope:$(./tools/image-tag) "quay.io/${QUAY_ORGANIZATION}/scope:$(./tools/image-tag)"
docker push "quay.io/${QUAY_ORGANIZATION}/scope:$(./tools/image-tag)"
- run: test -z "${UI_BUCKET_KEY_ID}" || (make BUILD_IN_CONTAINER=false ui-upload ui-pkg-upload)
ignore:
- gh-pages

5
.github/CODEOWNERS vendored
View File

@@ -1,5 +0,0 @@
# Lines starting with '#' are comments.
# Each line is a file pattern followed by one or more owners.
# These owners will be the default owners for everything in the repo.
* @bboreham @fbarl @satyamz @qiell

View File

@@ -1,58 +0,0 @@
<!--
Hi, thank you for opening an issue!
Before hitting the button...
** Is this a REQUEST FOR HELP? **
If so, please have a look at:
- How WeaveScope works : https://www.weave.works/docs/scope/latest/how-it-works/
- our troubleshooting page: https://www.weave.works/docs/scope/latest/building/
- our help page, to choose the best channel (Slack, etc.) to reach out: https://weave-community.slack.com/messages/scope/
** Is this a FEATURE REQUEST? **
If so, please search existing feature requests, and if you find a similar one, up-vote it and/or add your comments to it instead.
If you did not find a similar one, please describe in details:
- why: your use-case, specific constraints you may have, etc.
- what: the feature/behaviour/change you would like to see in Weave Scope
Do not hesitate, when appropriate, to share the exact commands or API you would like, and/or to share a diagram (e.g.: asciiflow.com): "a picture is worth a thousand words".
** Is this a BUG REPORT? **
Please fill in as much of the template below as you can.
Thank you!
-->
## What you expected to happen?
## What happened?
<!-- Error message, actual behaviour, etc. -->
## How to reproduce it?
<!-- Specific steps, as minimally and precisely as possible. -->
## Anything else we need to know?
<!-- Cloud provider? Hardware? How did you configure your cluster? Kubernetes YAML, KOPS, etc. -->
## Versions:
<!-- Please paste in the output of these commands; 'kubectl' only if using Kubernetes -->
```
$ scope version
$ docker version
$ uname -a
$ kubectl version
```
## Logs:
```
$ docker logs weavescope
```
or, if using Kubernetes:
```
$ kubectl logs <weave-scope-pod> -n <namespace>
```
<!-- (If output is long, please consider a Gist.) -->
<!-- Anything interesting or unusual output by the below, potentially relevant, commands?
$ journalctl -u docker.service --no-pager
$ journalctl -u kubelet --no-pager
$ kubectl get events
-->

69
.gitignore vendored
View File

@@ -1,69 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
.vagrant
releases
tmp
.cache
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
.DS_Store
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
profile.cov
coverage.html
# Emacs backup files
*~
# ctags files
tags
# Project specific
.*.uptodate
.uptodate
scope.tar
cloud-agent.tar
.pkg
prog/scope
docker/scope
docker/docker.tgz
docker/docker
docker/weave
docker/weaveutil
docker/runsvinit
extras/fixprobe/fixprobe
extras/fixprobe/*.json
extras/copyreport/copyreport
*sublime-project
*sublime-workspace
*npm-debug.log
*yarn-error.log
app/static.go
vendor/github.com/ugorji/go/codec/codecgen/bin/*
*.codecgen.go
client/build-external/*
prog/staticui/staticui.go
prog/externalui/externalui.go
client/build-pkg
client/bundle
# Website
site-build

View File

@@ -1 +0,0 @@
2.6.2

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +0,0 @@
## Community Code of Conduct
Weaveworks follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
Instances of abusive, harassing, or otherwise unacceptable behavior
may be reported by contacting a Weaveworks project maintainer, or
Alexis Richardson alexis@weave.works.

View File

@@ -1,132 +0,0 @@
# How to Contribute
Scope is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub
pull requests. This document outlines some of the conventions on development
workflow, commit message formatting, contact points and other resources to make
it easier to get your contribution accepted.
We gratefully welcome improvements to documentation as well as to code.
# Certificate of Origin
By contributing to this project you agree to the Developer Certificate of
Origin (DCO). This document was created by the Linux Kernel community and is a
simple statement that you, as a contributor, have the legal right to make the
contribution. No action from you is required, but it's a good idea to see the
[DCO](DCO) file for details before you start contributing code to Scope.
# Email, Chat and Community Meetings
The project uses the the scope-community email list and Slack:
- Email: [scope-community](https://groups.google.com/forum/#!forum/scope-community)
- Chat: Join the [Weave community](https://weaveworks.github.io/community-slack/) Slack workspace and use the [#scope](https://weave-community.slack.com/messages/scope/) channel
When sending email, it's usually best to use the mailing list. The maintainers are usually quite busy and the mailing list will more easily find somebody who can reply quickly. You will also be potentially be helping others who had the same question.
We also meet regularly at the [Scope community meeting](https://docs.google.com/document/d/103_60TuEkfkhz_h2krrPJH8QOx-vRnPpbcCZqrddE1s/). Don't feel discouraged to attend the meeting due to not being a developer. Everybody is welcome!
# Getting Started
- Fork the repository on GitHub
- Read the [README](README.md) for getting started as a user and learn how/where to ask for help
- If you want to contribute as a developer, continue reading this document for further instructions
- Play with the project, submit bugs, submit pull requests!
## Contribution workflow
This is a rough outline of how to prepare a contribution:
- Create a topic branch from where you want to base your work (usually branched from master).
- Make commits of logical units.
- Make sure your commit messages are in the proper format (see below).
- Push your changes to a topic branch in your fork of the repository.
- If you changed code:
- add automated tests to cover your changes
- Submit a pull request to the original repository.
## How to build and run the project
```bash
make && ./scope stop && ./scope launch
```
After every change you make to the Go code, you will need to rerun the command above (recompiling and restarting Scope) and refresh the browser tab to see your changes.
**Tip**: If you are only making changes to Scope frontend code, you can speed up the development cycle by additionally starting up the Webpack server, which will automatically recompile and hot-reload your browser tab http://localhost:4042 on every change:
```bash
cd client && yarn install && yart start
```
## How to run the test suite
### Backend
You can run the Go linting and unit tests by simply doing
```bash
make tests
```
There are integration tests for Scope, but unfortunately it's hard to set them up in forked repositories and the setup is not documented. Help is needed to improve this situation: [#2192](https://github.com/weaveworks/scope/issues/2192)
### Frontend
Use `yarn` to run all Javascript tests and linting checks:
```bash
cd client && yarn install && yarn test && yarn lint
```
# Acceptance policy
These things will make a PR more likely to be accepted:
* a well-described requirement
* tests for new code
* tests for old code!
* new code and tests follow the conventions in old code and tests
* a good commit message (see below)
In general, we will merge a PR once two maintainers have endorsed it.
Trivial changes (e.g., corrections to spelling) may get waved through.
For substantial changes, more people may become involved, and you might get asked to resubmit the PR or divide the changes into more than one PR.
### Format of the Commit Message
We follow a rough convention for commit messages that is designed to answer two
questions: what changed and why. The subject line should feature the what and
the body of the commit should describe the why.
```
scripts: add the test-cluster command
this uses tmux to setup a test cluster that you can easily kill and
start for debugging.
Fixes #38
```
The format can be described more formally as follows:
```
<subsystem>: <what changed>
<BLANK LINE>
<why this change was made>
<BLANK LINE>
<footer>
```
The first line is the subject and should be no longer than 70 characters, the
second line is always blank, and other lines should be wrapped at 80 characters.
This allows the message to be easier to read on GitHub as well as in various
git tools.
## 3rd party plugins
So you've built a Scope plugin. Where should it live?
Until it matures, it should live in your own repo. You are encouraged to annouce your plugin at the [mailing list](https://groups.google.com/forum/#!forum/scope-community) and to demo it at a [community meetings](https://docs.google.com/document/d/103_60TuEkfkhz_h2krrPJH8QOx-vRnPpbcCZqrddE1s/).
If you have a good reason why the Scope maintainers should take custody of your
plugin, please open an issue so that it can potentially be promoted to the [Scope plugins](https://github.com/weaveworks-plugins/) organization.

View File

@@ -1,175 +0,0 @@
./tools/integration/assert.sh is a copy of
https://github.com/lehmannro/assert.sh/blob/master/assert.sh
Since it was imported from its original source, it has only received
cosmetic modifications. As it is licensed under the LGPL-3, here's the
license text in its entirety:
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

36
DCO
View File

@@ -1,36 +0,0 @@
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.

View File

@@ -1,58 +0,0 @@
# Scope Governance
This document defines project governance for the project. This is (and probably will always be) a Work in Progress.
## Goals and Principles
Scope's community goals and principles:
1. Transition from a primarily Weaveworks project to a true community-driven project with autonomous governance. Weaveworks noticed interest from various actors and decided to nurture a community and see where it can lead the project.
2. Fill-in the needs of the community with a chop-wood-and-carry-water attitude: expect to give back before you take if you want to make an impact. Demands and suggestions from community members will be taken into account but actions and help will be more-highly appreciated.
## Code of Conduct
The Scope community abides by the CNCF [code of conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). Here is an excerpt:
_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
As a member of the Scope project, you represent the project and your fellow contributors.
We value our community tremendously and we'd like to keep cultivating a friendly and collaborative
environment for our contributors and users. We want everyone in the community to have
[positive experiences](https://www.cncf.io/blog/2016/12/14/diversity-scholarship-series-one-software-engineers-unexpected-cloudnativecon-kubecon-experience).
## Voting
The Scope project aims to employ "organization voting" to ensure no single organization can dominate the project. [Alfonso Acosta](https://github.com/2opremio) will take of the initial maintenance until enough voters join the community. Once the community reaches critical mass and sufficient maintainers are designed, the voting-based governance will start.
Individuals not associated with or employed by a company or organization are allowed one organization vote.
Each company or organization (regardless of the number of maintainers associated with or employed by that company/organization) receives one organization vote.
In other words, if two maintainers are employed by Company X, two by Company Y, two by Company Z, and one maintainer is an un-affiliated individual, a total of four "organization votes" are possible; one for X, one for Y, one for Z, and one for the un-affiliated individual.
Any maintainer from an organization may cast the vote for that organization.
For formal votes, a specific statement of what is being voted on should be added to the relevant github issue or PR, and a link to that issue or PR added to the maintainers meeting agenda document.
Maintainers should indicate their yes/no vote on that issue or PR, and after a suitable period of time, the votes will be tallied and the outcome noted.
## Changes in Maintainership
New maintainers are proposed by an existing maintainer and are elected by a 2/3 majority organization vote.
Maintainers can be removed by a 2/3 majority organization vote.
## Approving PRs
Non-specification-related PRs may be merged after receiving at least two organization votes.
## Github Project Administration
Maintainers will be given write access to the [weaveworks/scope](https://github.com/weaveworks/scope) GitHub repository.
## Changes in Governance
All changes in Governance require a 2/3 majority organization vote.
## Other Changes
Unless specified above, all other changes to the project require a 2/3 majority organization vote.
Additionally, any maintainer may request that any change require a 2/3 majority organization vote.

191
LICENSE
View File

@@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014-2019 Weaveworks Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,5 +0,0 @@
Alfonso Acosta <fons@syntacticsugar.consulting> (@2opremio)
Filip Barl <filip.barl@gmail.com> (@fbarl)
Bryan Boreham <bryan@weave.works> (@bboreham)
Satyam Zode <satyamzode@gmail.com> (@satyamz)
Akash Srivastava <akash.srivastava@openebs.io> (@qiell)

301
Makefile
View File

@@ -1,301 +0,0 @@
.PHONY: all cri deps static clean realclean client-lint client-test client-sync backend frontend shell lint ui-upload
# If you can use Docker without being root, you can `make SUDO= <target>`
SUDO=$(shell docker info >/dev/null 2>&1 || echo "sudo -E")
DOCKERHUB_USER=weaveworks
SCOPE_EXE=prog/scope
SCOPE_EXPORT=scope.tar
CLOUD_AGENT_EXPORT=cloud-agent.tar
SCOPE_UI_BUILD_IMAGE=$(DOCKERHUB_USER)/scope-ui-build
SCOPE_UI_BUILD_UPTODATE=.scope_ui_build.uptodate
SCOPE_BACKEND_BUILD_IMAGE=$(DOCKERHUB_USER)/scope-backend-build
SCOPE_BACKEND_BUILD_UPTODATE=.scope_backend_build.uptodate
SCOPE_VERSION=$(shell git rev-parse --short HEAD)
GIT_REVISION=$(shell git rev-parse HEAD)
WEAVENET_VERSION=2.1.3
RUNSVINIT=vendor/github.com/peterbourgon/runsvinit/runsvinit
CODECGEN_DIR=vendor/github.com/ugorji/go/codec/codecgen
CODECGEN_EXE=$(CODECGEN_DIR)/bin/codecgen_$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH)
CODECGEN_UID=0
GET_CODECGEN_DEPS=$(shell find $(1) -maxdepth 1 -type f -name '*.go' -not -name '*_test.go' -not -name '*.codecgen.go' -not -name '*.generated.go')
CODECGEN_TARGETS=report/report.codecgen.go render/detailed/detailed.codecgen.go
RM=--rm
RUN_FLAGS=-ti
BUILD_IN_CONTAINER=true
GO_ENV=GOGC=off
GO_BUILD_TAGS='netgo unsafe'
GO_BUILD_FLAGS=-mod vendor -ldflags "-extldflags \"-static\" -X main.version=$(SCOPE_VERSION) -s -w" -tags $(GO_BUILD_TAGS)
GOOS=$(shell go tool dist env | grep GOOS | sed -e 's/GOOS="\(.*\)"/\1/')
ifeq ($(GOOS),linux)
GO_ENV+=CGO_ENABLED=1
endif
ifeq ($(GOARCH),arm)
ARM_CC=CC=/usr/bin/arm-linux-gnueabihf-gcc
endif
GO=env $(GO_ENV) $(ARM_CC) go
NO_CROSS_COMP=unset GOOS GOARCH
GO_HOST=$(NO_CROSS_COMP); env $(GO_ENV) go
WITH_GO_HOST_ENV=$(NO_CROSS_COMP); $(GO_ENV)
IMAGE_TAG=$(shell ./tools/image-tag)
all: $(SCOPE_EXPORT)
update-cri:
curl https://raw.githubusercontent.com/kubernetes/kubernetes/master/pkg/kubelet/apis/cri/runtime/v1alpha2/api.proto > cri/runtime/api.proto
protoc-gen-gofast:
@go get -u -v github.com/gogo/protobuf/protoc-gen-gofast
# Use cri target to download latest cri proto files and regenerate CRI runtime files.
cri: update-cri protoc-gen-gofast
@cd $(GOPATH)/src;protoc --proto_path=$(GOPATH)/src --gofast_out=plugins=grpc:. github.com/weaveworks/scope/cri/runtime/api.proto
docker/weave:
curl -L https://github.com/weaveworks/weave/releases/download/v$(WEAVENET_VERSION)/weave -o docker/weave
chmod u+x docker/weave
docker/weaveutil:
$(SUDO) docker run --rm --entrypoint=cat weaveworks/weaveexec:$(WEAVENET_VERSION) /usr/bin/weaveutil > $@
chmod +x $@
docker/%: %
cp $* docker/
%.tar: docker/Dockerfile.%
$(SUDO) docker build --build-arg=revision=$(GIT_REVISION) -t $(DOCKERHUB_USER)/$* -f $< docker/
$(SUDO) docker tag $(DOCKERHUB_USER)/$* $(DOCKERHUB_USER)/$*:$(IMAGE_TAG)
$(SUDO) docker save $(DOCKERHUB_USER)/$*:latest > $@
$(CLOUD_AGENT_EXPORT): docker/Dockerfile.cloud-agent docker/$(SCOPE_EXE) docker/weave docker/weaveutil
$(SCOPE_EXPORT): docker/Dockerfile.scope $(CLOUD_AGENT_EXPORT) docker/$(RUNSVINIT) docker/demo.json docker/run-app docker/run-probe docker/entrypoint.sh
$(RUNSVINIT): vendor/github.com/peterbourgon/runsvinit/*.go
$(SCOPE_EXE): $(shell find ./ -path ./vendor -prune -o -type f -name '*.go') prog/staticui/staticui.go prog/externalui/externalui.go $(CODECGEN_TARGETS)
report/report.codecgen.go: $(call GET_CODECGEN_DEPS,report/)
render/detailed/detailed.codecgen.go: $(call GET_CODECGEN_DEPS,render/detailed/)
static: prog/staticui/staticui.go prog/externalui/externalui.go
prog/staticui/staticui.go: client/build/index.html
prog/externalui/externalui.go: client/build-external/index.html
ifeq ($(BUILD_IN_CONTAINER),true)
$(SCOPE_EXE) $(RUNSVINIT) lint tests shell prog/staticui/staticui.go prog/externalui/externalui.go: $(SCOPE_BACKEND_BUILD_UPTODATE)
@mkdir -p $(shell pwd)/.pkg
$(SUDO) docker run $(RM) $(RUN_FLAGS) \
-v $(shell pwd):/go/src/github.com/weaveworks/scope \
-v $(shell pwd)/.pkg:/go/pkg \
--net=host \
-e GOARCH -e GOOS -e CIRCLECI -e CIRCLE_BUILD_NUM -e CIRCLE_NODE_TOTAL \
-e CIRCLE_NODE_INDEX -e COVERDIR -e SLOW -e TESTDIRS \
$(SCOPE_BACKEND_BUILD_IMAGE) SCOPE_VERSION=$(SCOPE_VERSION) CODECGEN_UID=$(CODECGEN_UID) $@
else
$(SCOPE_EXE):
time $(GO) build $(GO_BUILD_FLAGS) -o $@ ./$(@D)
%.codecgen.go: $(CODECGEN_EXE)
rm -f $@; $(GO_HOST) build $(GO_BUILD_FLAGS) ./$(@D) # workaround for https://github.com/ugorji/go/issues/145
cd $(@D) && $(WITH_GO_HOST_ENV) GO111MODULE=off $(shell pwd)/$(CODECGEN_EXE) -d $(CODECGEN_UID) -rt $(GO_BUILD_TAGS) -u -o $(@F) $(notdir $(call GET_CODECGEN_DEPS,$(@D)))
$(CODECGEN_EXE): $(CODECGEN_DIR)/*.go
mkdir -p $(@D)
$(GO_HOST) build $(GO_BUILD_FLAGS) -o $@ ./$(CODECGEN_DIR)
$(RUNSVINIT):
time $(GO) build $(GO_BUILD_FLAGS) -o $@ ./$(@D)
shell:
/bin/bash
tests: $(CODECGEN_TARGETS) prog/staticui/staticui.go prog/externalui/externalui.go
./tools/test -no-go-get -tags $(GO_BUILD_TAGS)
lint:
./tools/lint
prog/staticui/staticui.go:
mkdir -p prog/staticui
esc -o $@ -pkg staticui -prefix client/build client/build
prog/externalui/externalui.go:
mkdir -p prog/externalui
esc -o $@ -pkg externalui -prefix client/build-external -include '\.html$$' client/build-external
endif
ifeq ($(BUILD_IN_CONTAINER),true)
SCOPE_UI_TOOLCHAIN=.cache/build_node_modules
SCOPE_UI_TOOLCHAIN_UPTODATE=$(SCOPE_UI_TOOLCHAIN)/.uptodate
$(SCOPE_UI_TOOLCHAIN_UPTODATE): client/yarn.lock $(SCOPE_UI_BUILD_UPTODATE)
mkdir -p $(SCOPE_UI_TOOLCHAIN) client/node_modules
if test "true" != "$(SCOPE_SKIP_UI_ASSETS)"; then \
$(SUDO) docker run $(RM) $(RUN_FLAGS) \
-v $(shell pwd)/.cache:/home/weave/scope/.cache \
-v $(shell pwd)/client:/home/weave/scope/client \
-v $(shell pwd)/$(SCOPE_UI_TOOLCHAIN):/home/weave/scope/client/node_modules \
-w /home/weave/scope/client \
-u $(shell id -u ${USER}):$(shell id -g ${USER}) \
$(SCOPE_UI_BUILD_IMAGE) yarn install; \
fi
touch $(SCOPE_UI_TOOLCHAIN_UPTODATE)
client/build/index.html: $(shell find client/app -type f) $(SCOPE_UI_TOOLCHAIN_UPTODATE)
mkdir -p client/build
if test "true" != "$(SCOPE_SKIP_UI_ASSETS)"; then \
$(SUDO) docker run $(RM) $(RUN_FLAGS) \
-v $(shell pwd)/.cache:/home/weave/scope/.cache \
-v $(shell pwd)/client:/home/weave/scope/client \
-v $(shell pwd)/$(SCOPE_UI_TOOLCHAIN):/home/weave/scope/client/node_modules \
-w /home/weave/scope/client \
-u $(shell id -u ${USER}):$(shell id -g ${USER}) \
$(SCOPE_UI_BUILD_IMAGE) yarn run build; \
fi
client/build-external/index.html: $(shell find client/app -type f) $(SCOPE_UI_TOOLCHAIN_UPTODATE)
mkdir -p client/build-external
if test "true" != "$(SCOPE_SKIP_UI_ASSETS)"; then \
$(SUDO) docker run $(RM) $(RUN_FLAGS) \
-v $(shell pwd)/.cache:/home/weave/scope/.cache \
-v $(shell pwd)/client:/home/weave/scope/client \
-v $(shell pwd)/$(SCOPE_UI_TOOLCHAIN):/home/weave/scope/client/node_modules \
-w /home/weave/scope/client \
-u $(shell id -u ${USER}):$(shell id -g ${USER}) \
$(SCOPE_UI_BUILD_IMAGE) yarn run build-external; \
fi
client-test: $(shell find client/app/scripts -type f) $(SCOPE_UI_TOOLCHAIN_UPTODATE)
$(SUDO) docker run $(RM) $(RUN_FLAGS) \
-v $(shell pwd)/.cache:/home/weave/scope/.cache \
-v $(shell pwd)/client:/home/weave/scope/client \
-v $(shell pwd)/$(SCOPE_UI_TOOLCHAIN):/home/weave/scope/client/node_modules \
-w /home/weave/scope/client \
-u $(id -u ${USER}):$(id -g ${USER}) \
$(SCOPE_UI_BUILD_IMAGE) yarn test
client-lint: $(SCOPE_UI_TOOLCHAIN_UPTODATE)
$(SUDO) docker run $(RM) $(RUN_FLAGS) \
-v $(shell pwd)/.cache:/home/weave/scope/.cache \
-v $(shell pwd)/client:/home/weave/scope/client \
-v $(shell pwd)/$(SCOPE_UI_TOOLCHAIN):/home/weave/scope/client/node_modules \
-w /home/weave/scope/client \
-u $(shell id -u ${USER}):$(shell id -g ${USER}) \
$(SCOPE_UI_BUILD_IMAGE) yarn run lint
client-start: $(SCOPE_UI_TOOLCHAIN_UPTODATE)
$(SUDO) docker run $(RM) $(RUN_FLAGS) --net=host \
-v $(shell pwd)/.cache:/home/weave/scope/.cache \
-v $(shell pwd)/client:/home/weave/scope/client \
-v $(shell pwd)/$(SCOPE_UI_TOOLCHAIN):/home/weave/scope/client/node_modules \
-e WEBPACK_SERVER_HOST \
-w /home/weave/scope/client \
-u $(shell id -u ${USER}):$(shell id -g ${USER}) \
$(SCOPE_UI_BUILD_IMAGE) yarn start
client/bundle/weave-scope.tgz: $(shell find client/app -type f) $(SCOPE_UI_TOOLCHAIN_UPTODATE)
$(sudo) docker run $(RUN_FLAGS) \
-v $(shell pwd)/.cache:/home/weave/scope/.cache \
-v $(shell pwd)/client:/home/weave/scope/client \
-v $(shell pwd)/$(SCOPE_UI_TOOLCHAIN):/home/weave/scope/client/node_modules \
-v $(shell pwd)/tmp:/home/weave/tmp \
-w /home/weave/scope/client \
-u $(shell id -u ${USER}):$(shell id -g ${USER}) \
$(SCOPE_UI_BUILD_IMAGE) yarn run bundle
else
SCOPE_UI_TOOLCHAIN=client/node_modules
SCOPE_UI_TOOLCHAIN_UPTODATE=$(SCOPE_UI_TOOLCHAIN)/.uptodate
$(SCOPE_UI_TOOLCHAIN_UPTODATE): client/yarn.lock
if test "true" = "$(SCOPE_SKIP_UI_ASSETS)"; then mkdir -p $(SCOPE_UI_TOOLCHAIN); else cd client && yarn install; fi
touch $(SCOPE_UI_TOOLCHAIN_UPTODATE)
client/build/index.html: $(SCOPE_UI_TOOLCHAIN_UPTODATE)
mkdir -p client/build
if test "true" != "$(SCOPE_SKIP_UI_ASSETS)"; then cd client && yarn run build; fi
client/build-external/index.html: $(SCOPE_UI_TOOLCHAIN_UPTODATE)
mkdir -p client/build-external
if test "true" != "$(SCOPE_SKIP_UI_ASSETS)"; then cd client && yarn run build-external; fi
endif
$(SCOPE_UI_BUILD_UPTODATE): client/Dockerfile client/package.json client/webpack.local.config.js client/webpack.production.config.js client/server.js client/.eslintrc
$(SUDO) docker build -t $(SCOPE_UI_BUILD_IMAGE) client
$(SUDO) docker tag $(SCOPE_UI_BUILD_IMAGE) $(SCOPE_UI_BUILD_IMAGE):$(IMAGE_TAG)
touch $@
$(SCOPE_BACKEND_BUILD_UPTODATE): backend/*
$(SUDO) docker build -t $(SCOPE_BACKEND_BUILD_IMAGE) backend
$(SUDO) docker tag $(SCOPE_BACKEND_BUILD_IMAGE) $(SCOPE_BACKEND_BUILD_IMAGE):$(IMAGE_TAG)
touch $@
ui-upload: client/build-external/index.html
AWS_ACCESS_KEY_ID=$$UI_BUCKET_KEY_ID \
AWS_SECRET_ACCESS_KEY=$$UI_BUCKET_KEY_SECRET \
aws s3 cp client/build-external/ s3://static.weave.works/scope-ui/ --recursive --exclude '*.html'
ui-pkg-upload: client/bundle/weave-scope.tgz
AWS_ACCESS_KEY_ID=$$UI_BUCKET_KEY_ID \
AWS_SECRET_ACCESS_KEY=$$UI_BUCKET_KEY_SECRET \
aws s3 cp client/bundle/weave-scope.tgz s3://weaveworks-js-modules/weave-scope/$(shell echo $(SCOPE_VERSION))/weave-scope.tgz
# We don't rmi images here; rm'ing the .uptodate files is enough to
# get the build images rebuilt, and rm'ing the scope exe is enough to
# get the main images rebuilt.
#
# rmi'ng images is desirable sometimes. Invoke `realclean` for that.
clean:
$(GO) clean ./...
rm -rf $(SCOPE_EXPORT) $(SCOPE_UI_BUILD_UPTODATE) $(SCOPE_UI_TOOLCHAIN_UPTODATE) $(SCOPE_BACKEND_BUILD_UPTODATE) \
$(SCOPE_EXE) $(RUNSVINIT) prog/staticui/staticui.go prog/externalui/externalui.go client/build/*.js client/build-external/*.js docker/weave .pkg \
$(CODECGEN_TARGETS) $(CODECGEN_DIR)/bin
clean-codecgen:
rm -rf $(CODECGEN_TARGETS) $(CODECGEN_DIR)/bin
# clean + rmi
#
# Removal of the main images ensures that a subsequent build rebuilds
# all their layers, in particular layers installing packages.
# Crucially, we also remove the *base* images, so their latest
# versions will be pulled.
#
# Doing this is important for release builds.
realclean: clean
rm -rf $(SCOPE_UI_TOOLCHAIN)
$(SUDO) docker rmi -f $(SCOPE_UI_BUILD_IMAGE) $(SCOPE_BACKEND_BUILD_IMAGE) \
$(DOCKERHUB_USER)/scope $(DOCKERHUB_USER)/cloud-agent \
$(DOCKERHUB_USER)/scope:$(IMAGE_TAG) $(DOCKERHUB_USER)/cloud-agent:$(IMAGE_TAG) \
weaveworks/weaveexec:$(WEAVENET_VERSION) \
ubuntu:yakkety alpine:3.5 node:6.9.0 2>/dev/null || true
# Dependencies are intentionally build without enforcing any tags
# since they are build on the host
deps:
$(GO) get -u -f \
github.com/FiloSottile/gvt \
github.com/mattn/goveralls \
github.com/weaveworks/github-release \
github.com/2opremio/trifles/wscat
# This target is only intended for use in Netlify CI environment for generating preview pages on feature branches and pull requests.
# We need to obtain website repository (checked out under `site-build`) and place `site` directory into the context (`site-build/_weave_net_docs`).
# We then run make in `site-build` and Netlify will publish the output (`site-build/_site`).
netlify-site-preview:
@mkdir -p site-build
@curl --user $(WEBSITE_GITHUB_USER) --silent 'https://codeload.github.com/weaveworks/website-next/tar.gz/$(WEBSITE_BRANCH)' \
| tar --extract --gunzip --directory site-build --strip 1
@cp -r site site-build/_weave_scope_docs
@$(MAKE) -C site-build netlify_ensure_install
@$(MAKE) -C site-build BUILD_ENV=netlify

3
NOTICE
View File

@@ -1,3 +0,0 @@
Weave
Copyright 2016 Weaveworks Ltd.
This product includes software developed at Weaveworks Ltd.

View File

@@ -1,83 +0,0 @@
# Weave Scope - Troubleshooting & Monitoring for Docker & Kubernetes
[![Circle CI](https://circleci.com/gh/weaveworks/scope/tree/master.svg?style=shield)](https://circleci.com/gh/weaveworks/scope/tree/master)
[![Coverage Status](https://coveralls.io/repos/weaveworks/scope/badge.svg)](https://coveralls.io/r/weaveworks/scope)
[![Go Report Card](https://goreportcard.com/badge/github.com/weaveworks/scope)](https://goreportcard.com/report/github.com/weaveworks/scope)
[![Docker Pulls](https://img.shields.io/docker/pulls/weaveworks/scope.svg?maxAge=604800)](https://hub.docker.com/r/weaveworks/scope/)
[![GoDoc](https://godoc.org/github.com/weaveworks/scope?status.svg)](https://godoc.org/github.com/weaveworks/scope)
[![Good first issues](https://img.shields.io/github/issues/weaveworks/scope/good-first-issue.svg?color=blueviolet&label=good%20first%20issues)](https://github.com/weaveworks/scope/issues?q=is%3Aissue+is%3Aopen+label%3Agood-first-issue)
Weave Scope automatically generates a map of your application, enabling you to
intuitively understand, monitor, and control your containerized, microservices-based application.
### Understand your Docker containers in real time
<img src="imgs/topology.png" width="200" alt="Map you architecture" align="right">
Choose an overview of your container infrastructure, or focus on a specific microservice. Easily identify and correct issues to ensure the stability and performance of your containerized applications.
### Contextual details and deep linking
<img src="imgs/selected.png" width="200" alt="Focus on a single container" align="right">
View contextual metrics, tags, and metadata for your containers. Effortlessly navigate between processes inside your container to hosts your containers run on, arranged in expandable, sortable tables. Easily find the container using the most CPU or memory for a given host or service.
### Interact with and manage containers
<img src="imgs/terminals.png" width="200" alt="Launch a command line." align="right">
Interact with your containers directly: pause, restart, and stop containers. Launch a command line. All without leaving the scope browser window.
### Extend and customize via plugins
Add custom details or interactions for your hosts, containers, and/or processes by creating Scope plugins. Or, just choose from some that others have already written at the GitHub [Weaveworks Scope Plugins](https://github.com/weaveworks-plugins/) organization.
## Who is using Scope in production
- [Apester](https://apester.com/)
- [MayaData](https://mayadata.io/) in [MayaOnline / MayaOnPrem](https://mayadata.io/products)
- [Weaveworks](https://www.weave.works/) in [Weave Cloud](https://cloud.weave.works)
If you would like to see your name added, let us know on Slack, or send a PR please.
## <a name="getting-started"></a>Getting Started
**Ensure your computer is behind a firewall that blocks port 4040** then,
```console
sudo curl -L git.io/scope -o /usr/local/bin/scope
sudo chmod a+x /usr/local/bin/scope
scope launch
```
This script downloads and runs a recent Scope image from Docker Hub.
Now, open your web browser to **http://localhost:4040**.
For instructions on installing Scope on [Kubernetes](https://www.weave.works/docs/scope/latest/installing/#k8s), [DCOS](https://www.weave.works/docs/scope/latest/installing/#dcos), or [ECS](https://www.weave.works/docs/scope/latest/installing/#ecs), see [the docs](https://www.weave.works/docs/scope/latest/introducing/).
## <a name="help"></a>Reach Out
We are a very friendly community and love questions, help and feedback.
If you have any questions, feedback, or problems with Scope:
- Docs
- Read [the Weave Scope docs](https://www.weave.works/docs/scope/latest/introducing/)
- Check out the [frequently asked questions](/site/faq.md)
- Learn more about how the [Scope community operates](GOVERNANCE.md)
- Join the discussion
- Invite yourself to the <a href="https://slack.weave.works/" target="_blank">Weave community</a> Slack
- Ask a question on the [#scope](https://weave-community.slack.com/messages/scope/) Slack channel
- Send an email to [Scope community group](https://groups.google.com/forum/#!forum/scope-community)
- Meetings and events
- Join the [Weave User Group](https://www.meetup.com/pro/Weave/) and get invited to online talks, hands-on training and meetups in your area
- Join (and read up on) the regular [Scope community meetings](https://docs.google.com/document/d/103_60TuEkfkhz_h2krrPJH8QOx-vRnPpbcCZqrddE1s/edit)
- Contributing
- Find out how to [contribute to Scope](CONTRIBUTING.md)
- [File an issue](https://github.com/weaveworks/scope/issues/new) or make a pull request for one of our [good first issues](https://github.com/weaveworks/scope/issues?q=is%3Aissue+is%3Aopen+label%3Agood-first-issue)
Your feedback is always welcome!
## License
Scope is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text.
Find more details about the licenses of vendored code in [VENDORED_CODE.md](VENDORED_CODE.md).

View File

@@ -1,25 +0,0 @@
# Use of vendored code in Weave Scope
Weave Scope is licensed under the [Apache 2.0 license](LICENSE).
Some vendored code is under different licenses though, all of them ship the
entire license text they are under.
- https://github.com/weaveworks/go-checkpoint
https://github.com/weaveworks/go-cleanhttp
https://github.com/certifi/gocertifi
can be found in the ./vendor/ directory, is under MPL-2.0.
- Pulled in by dependencies are
https://github.com/hashicorp/go-version (MPL-2.0)
https://github.com/hashicorp/golang-lru (MPL-2.0)
- One file pulled in by a dependency is under CDDL:
./vendor/github.com/howeyc/gopass/terminal_solaris.go
- The docs of a dependency that's pulled in by a dependency
are under CC-BY 4.0:
./vendor/github.com/docker/go-units/
[One file used in tests](COPYING.LGPL-3) is under LGPL-3, that's why we ship
the license text in this repository.

View File

@@ -1,65 +0,0 @@
package app
import (
"net/http"
"time"
"context"
"github.com/weaveworks/scope/report"
)
// Raw report handler
func makeRawReportHandler(rep Reporter) CtxHandlerFunc {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
timestamp := deserializeTimestamp(r.URL.Query().Get("timestamp"))
rawReport, err := rep.Report(ctx, timestamp)
if err != nil {
respondWith(ctx, w, http.StatusInternalServerError, err)
return
}
censorCfg := report.GetCensorConfigFromRequest(r)
respondWith(ctx, w, http.StatusOK, report.CensorRawReport(rawReport, censorCfg))
}
}
type probeDesc struct {
ID string `json:"id"`
Hostname string `json:"hostname"`
Version string `json:"version"`
LastSeen time.Time `json:"lastSeen"`
}
// Probe handler
func makeProbeHandler(rep Reporter) CtxHandlerFunc {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
r.ParseForm()
if _, sparse := r.Form["sparse"]; sparse {
// if we have reports, we must have connected probes
hasProbes, err := rep.HasReports(ctx, time.Now())
if err != nil {
respondWith(ctx, w, http.StatusInternalServerError, err)
}
respondWith(ctx, w, http.StatusOK, hasProbes)
return
}
rpt, err := rep.Report(ctx, time.Now())
if err != nil {
respondWith(ctx, w, http.StatusInternalServerError, err)
return
}
result := []probeDesc{}
for _, n := range rpt.Host.Nodes {
id, _ := n.Latest.Lookup(report.ControlProbeID)
hostname, _ := n.Latest.Lookup(report.HostName)
version, dt, _ := n.Latest.LookupEntry(report.ScopeVersion)
result = append(result, probeDesc{
ID: id,
Hostname: hostname,
Version: version,
LastSeen: dt,
})
}
respondWith(ctx, w, http.StatusOK, result)
}
}

View File

@@ -1,35 +0,0 @@
package app_test
import (
"net/http/httptest"
"testing"
"github.com/gorilla/mux"
"github.com/ugorji/go/codec"
"github.com/weaveworks/scope/app"
"github.com/weaveworks/scope/report"
"github.com/weaveworks/scope/test/fixture"
)
func topologyServer() *httptest.Server {
router := mux.NewRouter().SkipClean(true)
app.RegisterTopologyRoutes(router, app.StaticCollector(fixture.Report), map[string]bool{"foo_capability": true})
return httptest.NewServer(router)
}
func TestAPIReport(t *testing.T) {
ts := topologyServer()
defer ts.Close()
is404(t, ts, "/api/report/foobar")
var body = getRawJSON(t, ts, "/api/report")
// fmt.Printf("Body: %v\n", string(body))
var r report.Report
decoder := codec.NewDecoderBytes(body, &codec.JsonHandle{})
if err := decoder.Decode(&r); err != nil {
t.Fatalf("JSON parse error: %s", err)
}
}

View File

@@ -1,590 +0,0 @@
package app
import (
"context"
"fmt"
"net/http"
"net/url"
"sort"
"strings"
"sync"
"time"
"github.com/gorilla/mux"
opentracing "github.com/opentracing/opentracing-go"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/probe/kubernetes"
"github.com/weaveworks/scope/render"
"github.com/weaveworks/scope/report"
)
const (
apiTopologyURL = "/api/topology/"
processesID = "processes"
processesByNameID = "processes-by-name"
systemGroupID = "system"
containersID = "containers"
containersByHostnameID = "containers-by-hostname"
containersByImageID = "containers-by-image"
podsID = "pods"
kubeControllersID = "kube-controllers"
servicesID = "services"
hostsID = "hosts"
weaveID = "weave"
ecsTasksID = "ecs-tasks"
ecsServicesID = "ecs-services"
swarmServicesID = "swarm-services"
)
var (
topologyRegistry = MakeRegistry()
unmanagedFilter = APITopologyOptionGroup{
ID: "pseudo",
Default: "hide",
Options: []APITopologyOption{
{Value: "show", Label: "Show unmanaged", filter: nil, filterPseudo: false},
{Value: "hide", Label: "Hide unmanaged", filter: render.IsNotPseudo, filterPseudo: true},
},
}
storageFilter = APITopologyOptionGroup{
ID: "storage",
Default: "hide",
Options: []APITopologyOption{
{Value: "show", Label: "Show storage", filter: nil, filterPseudo: false},
{Value: "hide", Label: "Hide storage", filter: render.IsPodComponent, filterPseudo: false},
},
}
snapshotFilter = APITopologyOptionGroup{
ID: "snapshot",
Default: "hide",
Options: []APITopologyOption{
{Value: "show", Label: "Show snapshots", filter: nil, filterPseudo: false},
{Value: "hide", Label: "Hide snapshots", filter: render.IsNonSnapshotComponent, filterPseudo: false},
},
}
)
// namespaceFilters generates a namespace selector option group based on the given namespaces
func namespaceFilters(namespaces []string, noneLabel string) APITopologyOptionGroup {
options := APITopologyOptionGroup{ID: "namespace", Default: "", SelectType: "union", NoneLabel: noneLabel}
for _, namespace := range namespaces {
options.Options = append(options.Options, APITopologyOption{
Value: namespace, Label: namespace, filter: render.IsNamespace(namespace), filterPseudo: false,
})
}
return options
}
// updateFilters updates the available filters based on the current report.
func updateFilters(rpt report.Report, topologies []APITopologyDesc) []APITopologyDesc {
topologies = updateKubeFilters(rpt, topologies)
topologies = updateSwarmFilters(rpt, topologies)
return topologies
}
func updateSwarmFilters(rpt report.Report, topologies []APITopologyDesc) []APITopologyDesc {
namespaces := map[string]struct{}{}
for _, n := range rpt.SwarmService.Nodes {
if namespace, ok := n.Latest.Lookup(docker.StackNamespace); ok {
namespaces[namespace] = struct{}{}
}
}
if len(namespaces) == 0 {
// We only want to apply filters when we have swarm-related nodes,
// so if we don't then return early
return topologies
}
ns := []string{}
for namespace := range namespaces {
ns = append(ns, namespace)
}
topologies = append([]APITopologyDesc{}, topologies...) // Make a copy so we can make changes safely
for i, t := range topologies {
if t.id == containersID || t.id == swarmServicesID {
topologies[i] = mergeTopologyFilters(t, []APITopologyOptionGroup{
namespaceFilters(ns, "All Stacks"),
})
}
}
return topologies
}
func updateKubeFilters(rpt report.Report, topologies []APITopologyDesc) []APITopologyDesc {
ns := []string{}
for _, n := range rpt.Namespace.Nodes {
name, ok := n.Latest.Lookup(kubernetes.Name)
if !ok {
continue
}
ns = append(ns, name)
}
if len(ns) == 0 {
return topologies
}
sort.Strings(ns)
topologies = append([]APITopologyDesc{}, topologies...) // Make a copy so we can make changes safely
for i, t := range topologies {
if t.id == containersID || t.id == podsID || t.id == servicesID || t.id == kubeControllersID {
topologies[i] = mergeTopologyFilters(t, []APITopologyOptionGroup{
namespaceFilters(ns, "All Namespaces"),
})
}
}
return topologies
}
// mergeTopologyFilters recursively merges in new options on a topology description
func mergeTopologyFilters(t APITopologyDesc, options []APITopologyOptionGroup) APITopologyDesc {
t.Options = append(append([]APITopologyOptionGroup{}, t.Options...), options...)
newSubTopologies := make([]APITopologyDesc, len(t.SubTopologies))
for i, sub := range t.SubTopologies {
newSubTopologies[i] = mergeTopologyFilters(sub, options)
}
t.SubTopologies = newSubTopologies
return t
}
// MakeAPITopologyOption provides an external interface to the package for creating an APITopologyOption.
func MakeAPITopologyOption(value string, label string, filterFunc render.FilterFunc, pseudo bool) APITopologyOption {
return APITopologyOption{Value: value, Label: label, filter: filterFunc, filterPseudo: pseudo}
}
// Registry is a threadsafe store of the available topologies
type Registry struct {
sync.RWMutex
items map[string]APITopologyDesc
}
// MakeRegistry returns a new Registry
func MakeRegistry() *Registry {
registry := &Registry{
items: map[string]APITopologyDesc{},
}
containerFilters := []APITopologyOptionGroup{
{
ID: systemGroupID,
Default: "application",
Options: []APITopologyOption{
{Value: "all", Label: "All", filter: nil, filterPseudo: false},
{Value: "system", Label: "System containers", filter: render.IsSystem, filterPseudo: false},
{Value: "application", Label: "Application containers", filter: render.IsApplication, filterPseudo: false}},
},
{
ID: "stopped",
Default: "running",
Options: []APITopologyOption{
{Value: "stopped", Label: "Stopped containers", filter: render.IsStopped, filterPseudo: false},
{Value: "running", Label: "Running containers", filter: render.IsRunning, filterPseudo: false},
{Value: "both", Label: "Both", filter: nil, filterPseudo: false},
},
},
{
ID: "pseudo",
Default: "hide",
Options: []APITopologyOption{
{Value: "show", Label: "Show uncontained", filter: nil, filterPseudo: false},
{Value: "hide", Label: "Hide uncontained", filter: render.IsNotPseudo, filterPseudo: true},
},
},
}
unconnectedFilter := []APITopologyOptionGroup{
{
ID: "unconnected",
Default: "hide",
Options: []APITopologyOption{
{Value: "show", Label: "Show unconnected", filter: nil, filterPseudo: false},
{Value: "hide", Label: "Hide unconnected", filter: render.IsConnected, filterPseudo: false},
},
},
}
// Topology option labels should tell the current state. The first item must
// be the verb to get to that state
registry.Add(
APITopologyDesc{
id: processesID,
renderer: render.ConnectedProcessRenderer,
Name: "Processes",
Rank: 1,
Options: unconnectedFilter,
HideIfEmpty: true,
},
APITopologyDesc{
id: processesByNameID,
parent: processesID,
renderer: render.ProcessNameRenderer,
Name: "by name",
Options: unconnectedFilter,
HideIfEmpty: true,
},
APITopologyDesc{
id: containersID,
renderer: render.ContainerWithImageNameRenderer,
Name: "Containers",
Rank: 2,
Options: containerFilters,
},
APITopologyDesc{
id: containersByHostnameID,
parent: containersID,
renderer: render.ContainerHostnameRenderer,
Name: "by DNS name",
Options: containerFilters,
},
APITopologyDesc{
id: containersByImageID,
parent: containersID,
renderer: render.ContainerImageRenderer,
Name: "by image",
Options: containerFilters,
},
APITopologyDesc{
id: podsID,
renderer: render.PodRenderer,
Name: "Pods",
Rank: 3,
Options: []APITopologyOptionGroup{snapshotFilter, storageFilter, unmanagedFilter},
HideIfEmpty: true,
},
APITopologyDesc{
id: kubeControllersID,
parent: podsID,
renderer: render.KubeControllerRenderer,
Name: "Controllers",
Options: []APITopologyOptionGroup{unmanagedFilter},
HideIfEmpty: true,
},
APITopologyDesc{
id: servicesID,
parent: podsID,
renderer: render.PodServiceRenderer,
Name: "Services",
Options: []APITopologyOptionGroup{unmanagedFilter},
HideIfEmpty: true,
},
APITopologyDesc{
id: ecsTasksID,
renderer: render.ECSTaskRenderer,
Name: "Tasks",
Rank: 3,
Options: []APITopologyOptionGroup{unmanagedFilter},
HideIfEmpty: true,
},
APITopologyDesc{
id: ecsServicesID,
parent: ecsTasksID,
renderer: render.ECSServiceRenderer,
Name: "Services",
Options: []APITopologyOptionGroup{unmanagedFilter},
HideIfEmpty: true,
},
APITopologyDesc{
id: swarmServicesID,
renderer: render.SwarmServiceRenderer,
Name: "Services",
Rank: 3,
Options: []APITopologyOptionGroup{unmanagedFilter},
HideIfEmpty: true,
},
APITopologyDesc{
id: hostsID,
renderer: render.HostRenderer,
Name: "Hosts",
Rank: 4,
},
APITopologyDesc{
id: weaveID,
parent: hostsID,
renderer: render.WeaveRenderer,
Name: "Weave Net",
},
)
return registry
}
// APITopologyDesc is returned in a list by the /api/topology handler.
type APITopologyDesc struct {
id string
parent string
renderer render.Renderer
Name string `json:"name"`
Rank int `json:"rank"`
HideIfEmpty bool `json:"hide_if_empty"`
Options []APITopologyOptionGroup `json:"options"`
URL string `json:"url"`
SubTopologies []APITopologyDesc `json:"sub_topologies,omitempty"`
Stats topologyStats `json:"stats,omitempty"`
}
type byName []APITopologyDesc
func (a byName) Len() int { return len(a) }
func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byName) Less(i, j int) bool { return a[i].Name < a[j].Name }
// APITopologyOptionGroup describes a group of APITopologyOptions
type APITopologyOptionGroup struct {
ID string `json:"id"`
// Default value for the option. Used if the value is omitted; not used if the value is ""
Default string `json:"defaultValue"`
Options []APITopologyOption `json:"options,omitempty"`
// SelectType describes how options can be picked. Currently defined values:
// "one": Default if empty. Exactly one option may be picked from the list.
// "union": Any number of options may be picked. Nodes matching any option filter selected are displayed.
// Value and Default should be a ","-separated list.
SelectType string `json:"selectType,omitempty"`
// For "union" type, this is the label the UI should use to represent the case where nothing is selected
NoneLabel string `json:"noneLabel,omitempty"`
}
// Get the render filters to use for this option group, if any, or nil otherwise.
func (g APITopologyOptionGroup) filter(value string) render.FilterFunc {
var values []string
switch g.SelectType {
case "", "one":
values = []string{value}
case "union":
values = strings.Split(value, ",")
default:
log.Errorf("Invalid select type %s for option group %s, ignoring option", g.SelectType, g.ID)
return nil
}
filters := []render.FilterFunc{}
for _, opt := range g.Options {
for _, v := range values {
if v != opt.Value {
continue
}
var filter render.FilterFunc
if opt.filter == nil {
// No filter means match everything (pseudo doesn't matter)
filter = func(n report.Node) bool { return true }
} else if opt.filterPseudo {
// Apply filter to pseudo topologies also
filter = opt.filter
} else {
// Allow all pseudo topology nodes, only apply filter to non-pseudo
filter = render.AnyFilterFunc(render.IsPseudoTopology, opt.filter)
}
filters = append(filters, filter)
}
}
if len(filters) == 0 {
return nil
}
return render.AnyFilterFunc(filters...)
}
// APITopologyOption describes a &param=value to a given topology.
type APITopologyOption struct {
Value string `json:"value"`
Label string `json:"label"`
filter render.FilterFunc
filterPseudo bool
}
type topologyStats struct {
NodeCount int `json:"node_count"`
NonpseudoNodeCount int `json:"nonpseudo_node_count"`
EdgeCount int `json:"edge_count"`
FilteredNodes int `json:"filtered_nodes"`
}
// deserializeTimestamp converts the ISO8601 query param into a proper timestamp.
func deserializeTimestamp(timestamp string) time.Time {
if timestamp != "" {
result, err := time.Parse(time.RFC3339, timestamp)
if err != nil {
log.Errorf("Error parsing timestamp '%s' - make sure the time format is correct", timestamp)
}
return result
}
// Default to current time if no timestamp is provided.
return time.Now()
}
// AddContainerFilters adds to the default Registry (topologyRegistry)'s containerFilters
func AddContainerFilters(newFilters ...APITopologyOption) {
topologyRegistry.AddContainerFilters(newFilters...)
}
// AddContainerFilters adds container filters to this Registry
func (r *Registry) AddContainerFilters(newFilters ...APITopologyOption) {
r.Lock()
defer r.Unlock()
for _, key := range []string{containersID, containersByHostnameID, containersByImageID} {
for i := range r.items[key].Options {
if r.items[key].Options[i].ID == systemGroupID {
r.items[key].Options[i].Options = append(r.items[key].Options[i].Options, newFilters...)
break
}
}
}
}
// Add inserts a topologyDesc to the Registry's items map
func (r *Registry) Add(ts ...APITopologyDesc) {
r.Lock()
defer r.Unlock()
for _, t := range ts {
t.URL = apiTopologyURL + t.id
t.renderer = render.Memoise(t.renderer)
if t.parent != "" {
parent := r.items[t.parent]
parent.SubTopologies = append(parent.SubTopologies, t)
r.items[t.parent] = parent
}
r.items[t.id] = t
}
}
func (r *Registry) get(name string) (APITopologyDesc, bool) {
r.RLock()
defer r.RUnlock()
t, ok := r.items[name]
return t, ok
}
func (r *Registry) walk(f func(APITopologyDesc)) {
r.RLock()
defer r.RUnlock()
descs := []APITopologyDesc{}
for _, desc := range r.items {
if desc.parent != "" {
continue
}
descs = append(descs, desc)
}
sort.Sort(byName(descs))
for _, desc := range descs {
f(desc)
}
}
// makeTopologyList returns a handler that yields an APITopologyList.
func (r *Registry) makeTopologyList(rep Reporter) CtxHandlerFunc {
return func(ctx context.Context, w http.ResponseWriter, req *http.Request) {
timestamp := deserializeTimestamp(req.URL.Query().Get("timestamp"))
report, err := rep.Report(ctx, timestamp)
if err != nil {
respondWith(ctx, w, http.StatusInternalServerError, err)
return
}
respondWith(ctx, w, http.StatusOK, r.renderTopologies(ctx, report, req))
}
}
func (r *Registry) renderTopologies(ctx context.Context, rpt report.Report, req *http.Request) []APITopologyDesc {
span, ctx := opentracing.StartSpanFromContext(ctx, "app.renderTopologies")
defer span.Finish()
topologies := []APITopologyDesc{}
req.ParseForm()
r.walk(func(desc APITopologyDesc) {
if ctx.Err() != nil {
return
}
renderer, filter, _ := r.RendererForTopology(desc.id, req.Form, rpt)
desc.Stats = computeStats(ctx, rpt, renderer, filter)
for i, sub := range desc.SubTopologies {
renderer, filter, _ := r.RendererForTopology(sub.id, req.Form, rpt)
desc.SubTopologies[i].Stats = computeStats(ctx, rpt, renderer, filter)
}
topologies = append(topologies, desc)
})
return updateFilters(rpt, topologies)
}
func computeStats(ctx context.Context, rpt report.Report, renderer render.Renderer, transformer render.Transformer) topologyStats {
span, ctx := opentracing.StartSpanFromContext(ctx, "app.computeStats")
defer span.Finish()
var (
nodes int
realNodes int
edges int
)
r := render.Render(ctx, rpt, renderer, transformer)
for _, n := range r.Nodes {
nodes++
if n.Topology != render.Pseudo {
realNodes++
}
edges += len(n.Adjacency)
}
return topologyStats{
NodeCount: nodes,
NonpseudoNodeCount: realNodes,
EdgeCount: edges,
FilteredNodes: r.Filtered,
}
}
// RendererForTopology ..
func (r *Registry) RendererForTopology(topologyID string, values url.Values, rpt report.Report) (render.Renderer, render.Transformer, error) {
topology, ok := r.get(topologyID)
if !ok {
return nil, nil, fmt.Errorf("topology not found: %s", topologyID)
}
topology = updateFilters(rpt, []APITopologyDesc{topology})[0]
if len(values) == 0 {
// if no options where provided, only apply base filter
return topology.renderer, render.FilterUnconnectedPseudo, nil
}
var filters []render.FilterFunc
for _, group := range topology.Options {
value := group.Default
if vs := values[group.ID]; len(vs) > 0 {
value = vs[0]
}
if filter := group.filter(value); filter != nil {
filters = append(filters, filter)
}
}
if len(filters) > 0 {
return topology.renderer, render.Transformers([]render.Transformer{render.ComposeFilterFuncs(filters...), render.FilterUnconnectedPseudo}), nil
}
return topology.renderer, render.FilterUnconnectedPseudo, nil
}
type reporterHandler func(context.Context, Reporter, http.ResponseWriter, *http.Request)
func captureReporter(rep Reporter, f reporterHandler) CtxHandlerFunc {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
f(ctx, rep, w, r)
}
}
func (r *Registry) captureRenderer(rep Reporter, f rendererHandler) CtxHandlerFunc {
return func(ctx context.Context, w http.ResponseWriter, req *http.Request) {
var (
topologyID = mux.Vars(req)["topology"]
timestamp = deserializeTimestamp(req.URL.Query().Get("timestamp"))
)
if _, ok := r.get(topologyID); !ok {
http.NotFound(w, req)
return
}
rpt, err := rep.Report(ctx, timestamp)
if err != nil {
respondWith(ctx, w, http.StatusInternalServerError, err)
return
}
req.ParseForm()
renderer, filter, err := r.RendererForTopology(topologyID, req.Form, rpt)
if err != nil {
respondWith(ctx, w, http.StatusInternalServerError, err)
return
}
f(ctx, renderer, filter, RenderContextForReporter(rep, rpt), w, req)
}
}

View File

@@ -1,252 +0,0 @@
package app_test
import (
"bytes"
"context"
"net/http/httptest"
"net/url"
"testing"
"time"
"github.com/gorilla/mux"
"github.com/ugorji/go/codec"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/weaveworks/common/test"
"github.com/weaveworks/scope/app"
"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/probe/kubernetes"
"github.com/weaveworks/scope/render"
"github.com/weaveworks/scope/render/detailed"
"github.com/weaveworks/scope/render/expected"
"github.com/weaveworks/scope/report"
"github.com/weaveworks/scope/test/fixture"
"github.com/weaveworks/scope/test/reflect"
"github.com/weaveworks/scope/test/utils"
)
const (
systemGroupID = "system"
customAPITopologyOptionFilterID = "containerLabelFilter0"
)
func TestAPITopology(t *testing.T) {
ts := topologyServer()
defer ts.Close()
body := getRawJSON(t, ts, "/api/topology")
var topologies []app.APITopologyDesc
decoder := codec.NewDecoderBytes(body, &codec.JsonHandle{})
if err := decoder.Decode(&topologies); err != nil {
t.Fatalf("JSON parse error: %s", err)
}
equals(t, 6, len(topologies))
for _, topology := range topologies {
is200(t, ts, topology.URL)
for _, subTopology := range topology.SubTopologies {
is200(t, ts, subTopology.URL)
}
// TODO: add ECS nodes in report fixture
if topology.Name == "Tasks" || topology.Name == "Services" {
continue
}
if have := topology.Stats.EdgeCount; have <= 0 {
t.Errorf("EdgeCount isn't positive for %s: %d", topology.Name, have)
}
if have := topology.Stats.NodeCount; have <= 0 {
t.Errorf("NodeCount isn't positive for %s: %d", topology.Name, have)
}
if have := topology.Stats.NonpseudoNodeCount; have <= 0 {
t.Errorf("NonpseudoNodeCount isn't positive for %s: %d", topology.Name, have)
}
}
}
func TestContainerLabelFilter(t *testing.T) {
topologySummaries, err := getTestContainerLabelFilterTopologySummary(t, false)
if err != nil {
t.Fatalf("Topology Registry Report error: %s", err)
}
// only the filtered container with fixture.TestLabelKey1 should be present
equals(t, 1, len(topologySummaries))
for key := range topologySummaries {
equals(t, report.MakeContainerNodeID(fixture.ClientContainerID), key)
}
}
func TestContainerLabelFilterExclude(t *testing.T) {
topologySummaries, err := getTestContainerLabelFilterTopologySummary(t, true)
if err != nil {
t.Fatalf("Topology Registry Report error: %s", err)
}
// all containers but the excluded container should be present
for key := range topologySummaries {
id := report.MakeContainerNodeID(fixture.ServerContainerNodeID)
if id == key {
t.Errorf("Didn't expect to find %q in report", id)
}
}
}
func TestRendererForTopologyWithFiltering(t *testing.T) {
ts := topologyServer()
defer ts.Close()
topologyRegistry := app.MakeRegistry()
option := app.MakeAPITopologyOption(customAPITopologyOptionFilterID, "title", render.IsApplication, false)
topologyRegistry.AddContainerFilters(option)
urlvalues := url.Values{}
urlvalues.Set(systemGroupID, customAPITopologyOptionFilterID)
urlvalues.Set("stopped", "running")
urlvalues.Set("pseudo", "hide")
renderer, filter, err := topologyRegistry.RendererForTopology("containers", urlvalues, fixture.Report)
if err != nil {
t.Fatalf("Topology Registry Report error: %s", err)
}
input := fixture.Report.Copy()
input.Container.Nodes[fixture.ClientContainerNodeID] = input.Container.Nodes[fixture.ClientContainerNodeID].WithLatests(map[string]string{
docker.LabelPrefix + "works.weave.role": "system",
})
have := utils.Prune(render.Render(context.Background(), input, renderer, filter).Nodes)
want := utils.Prune(expected.RenderedContainers.Copy())
delete(want, fixture.ClientContainerNodeID)
delete(want, render.MakePseudoNodeID(render.UncontainedID, fixture.ServerHostID))
delete(want, render.OutgoingInternetID)
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestRendererForTopologyNoFiltering(t *testing.T) {
ts := topologyServer()
defer ts.Close()
topologyRegistry := app.MakeRegistry()
option := app.MakeAPITopologyOption(customAPITopologyOptionFilterID, "title", nil, false)
topologyRegistry.AddContainerFilters(option)
urlvalues := url.Values{}
urlvalues.Set(systemGroupID, customAPITopologyOptionFilterID)
urlvalues.Set("stopped", "running")
urlvalues.Set("pseudo", "hide")
renderer, filter, err := topologyRegistry.RendererForTopology("containers", urlvalues, fixture.Report)
if err != nil {
t.Fatalf("Topology Registry Report error: %s", err)
}
input := fixture.Report.Copy()
input.Container.Nodes[fixture.ClientContainerNodeID] = input.Container.Nodes[fixture.ClientContainerNodeID].WithLatests(map[string]string{
docker.LabelPrefix + "works.weave.role": "system",
})
have := utils.Prune(render.Render(context.Background(), input, renderer, filter).Nodes)
want := utils.Prune(expected.RenderedContainers.Copy())
delete(want, render.MakePseudoNodeID(render.UncontainedID, fixture.ServerHostID))
delete(want, render.OutgoingInternetID)
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func getTestContainerLabelFilterTopologySummary(t *testing.T, exclude bool) (detailed.NodeSummaries, error) {
ts := topologyServer()
defer ts.Close()
var (
topologyRegistry = app.MakeRegistry()
filterFunc render.FilterFunc
)
if exclude == true {
filterFunc = render.DoesNotHaveLabel(fixture.TestLabelKey2, fixture.ApplicationLabelValue2)
} else {
filterFunc = render.HasLabel(fixture.TestLabelKey1, fixture.ApplicationLabelValue1)
}
option := app.MakeAPITopologyOption(customAPITopologyOptionFilterID, "title", filterFunc, false)
topologyRegistry.AddContainerFilters(option)
urlvalues := url.Values{}
urlvalues.Set(systemGroupID, customAPITopologyOptionFilterID)
urlvalues.Set("stopped", "running")
urlvalues.Set("pseudo", "hide")
renderer, filter, err := topologyRegistry.RendererForTopology("containers", urlvalues, fixture.Report)
if err != nil {
return nil, err
}
ctx := context.Background()
return detailed.Summaries(ctx, detailed.RenderContext{Report: fixture.Report}, render.Render(ctx, fixture.Report, renderer, filter).Nodes), nil
}
func TestAPITopologyAddsKubernetes(t *testing.T) {
router := mux.NewRouter()
c := app.NewCollector(1 * time.Minute)
app.RegisterReportPostHandler(c, router)
app.RegisterTopologyRoutes(router, c, map[string]bool{"foo_capability": true})
ts := httptest.NewServer(router)
defer ts.Close()
body := getRawJSON(t, ts, "/api/topology")
var topologies []app.APITopologyDesc
decoder := codec.NewDecoderBytes(body, &codec.JsonHandle{})
if err := decoder.Decode(&topologies); err != nil {
t.Fatalf("JSON parse error: %s", err)
}
equals(t, 6, len(topologies))
// Enable the kubernetes topologies
rpt := report.MakeReport()
rpt.Pod = report.MakeTopology()
rpt.Pod.Nodes[fixture.ClientPodNodeID] = kubernetes.NewPod(&apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pong-a",
Namespace: "ping",
Labels: map[string]string{"ponger": "true"},
},
Status: apiv1.PodStatus{
HostIP: "1.2.3.4",
ContainerStatuses: []apiv1.ContainerStatus{
{ContainerID: "container1"},
{ContainerID: "container2"},
},
},
Spec: apiv1.PodSpec{
SecurityContext: &apiv1.PodSecurityContext{},
},
}).GetNode("")
buf := &bytes.Buffer{}
encoder := codec.NewEncoder(buf, &codec.MsgpackHandle{})
if err := encoder.Encode(rpt); err != nil {
t.Fatalf("Msgpack encoding error: %s", err)
}
checkRequest(t, ts, "POST", "/api/report", buf.Bytes())
body = getRawJSON(t, ts, "/api/topology")
decoder = codec.NewDecoderBytes(body, &codec.JsonHandle{})
if err := decoder.Decode(&topologies); err != nil {
t.Fatalf("JSON parse error: %s", err)
}
equals(t, 6, len(topologies))
found := false
for _, topology := range topologies {
if topology.Name == "Pods" {
found = true
break
}
}
if !found {
t.Error("Could not find pods topology")
}
}

View File

@@ -1,208 +0,0 @@
package app
import (
"net/http"
"net/url"
"time"
"context"
"github.com/gorilla/mux"
ot "github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/scope/common/xfer"
"github.com/weaveworks/scope/render"
"github.com/weaveworks/scope/render/detailed"
"github.com/weaveworks/scope/report"
)
const (
websocketLoop = 1 * time.Second
)
// APITopology is returned by the /api/topology/{name} handler.
type APITopology struct {
Nodes detailed.NodeSummaries `json:"nodes"`
}
// APINode is returned by the /api/topology/{name}/{id} handler.
type APINode struct {
Node detailed.Node `json:"node"`
}
// RenderContextForReporter creates the rendering context for the given reporter.
func RenderContextForReporter(rep Reporter, r report.Report) detailed.RenderContext {
rc := detailed.RenderContext{Report: r}
if wrep, ok := rep.(WebReporter); ok {
rc.MetricsGraphURL = wrep.MetricsGraphURL
}
return rc
}
type rendererHandler func(context.Context, render.Renderer, render.Transformer, detailed.RenderContext, http.ResponseWriter, *http.Request)
// Full topology.
func handleTopology(ctx context.Context, renderer render.Renderer, transformer render.Transformer, rc detailed.RenderContext, w http.ResponseWriter, r *http.Request) {
censorCfg := report.GetCensorConfigFromRequest(r)
nodeSummaries := detailed.Summaries(ctx, rc, render.Render(ctx, rc.Report, renderer, transformer).Nodes)
respondWith(ctx, w, http.StatusOK, APITopology{
Nodes: detailed.CensorNodeSummaries(nodeSummaries, censorCfg),
})
}
// Individual nodes.
func handleNode(ctx context.Context, renderer render.Renderer, transformer render.Transformer, rc detailed.RenderContext, w http.ResponseWriter, r *http.Request) {
var (
censorCfg = report.GetCensorConfigFromRequest(r)
vars = mux.Vars(r)
topologyID = vars["topology"]
nodeID = vars["id"]
)
// We must not lose the node during filtering. We achieve that by
// (1) rendering the report with the base renderer, without
// filtering, which gives us the node (if it exists at all), and
// then (2) applying the filter separately to that result. If the
// node is lost in the second step, we simply put it back.
nodes := renderer.Render(ctx, rc.Report)
node, ok := nodes.Nodes[nodeID]
if !ok {
http.NotFound(w, r)
return
}
nodes = transformer.Transform(nodes)
if filteredNode, ok := nodes.Nodes[nodeID]; ok {
node = filteredNode
} else { // we've lost the node during filtering; put it back
nodes.Nodes[nodeID] = node
nodes.Filtered--
}
rawNode := detailed.MakeNode(topologyID, rc, nodes.Nodes, node)
respondWith(ctx, w, http.StatusOK, APINode{Node: detailed.CensorNode(rawNode, censorCfg)})
}
// Websocket for the full topology.
func handleWebsocket(
ctx context.Context,
rep Reporter,
w http.ResponseWriter,
r *http.Request,
) {
if err := r.ParseForm(); err != nil {
respondWith(ctx, w, http.StatusInternalServerError, err)
return
}
loop := websocketLoop
if t := r.Form.Get("t"); t != "" {
var err error
if loop, err = time.ParseDuration(t); err != nil {
respondWith(ctx, w, http.StatusBadRequest, t)
return
}
}
conn, err := xfer.Upgrade(w, r, nil)
if err != nil {
// log.Info("Upgrade:", err)
return
}
defer conn.Close()
quit := make(chan struct{})
go func(c xfer.Websocket) {
for { // just discard everything the browser sends
if _, _, err := c.ReadMessage(); err != nil {
if !xfer.IsExpectedWSCloseError(err) {
log.Error("err:", err)
}
close(quit)
break
}
}
}(conn)
wc := websocketState{
rep: rep,
values: r.Form,
conn: conn,
topologyID: mux.Vars(r)["topology"],
startReportingAt: deserializeTimestamp(r.Form.Get("timestamp")),
censorCfg: report.GetCensorConfigFromRequest(r),
channelOpenedAt: time.Now(),
}
wait := make(chan struct{}, 1)
rep.WaitOn(ctx, wait)
defer rep.UnWait(ctx, wait)
tick := time.Tick(loop)
for {
if err := wc.update(ctx); err != nil {
log.Errorf("%v", err)
return
}
select {
case <-wait:
case <-tick:
case <-quit:
return
}
}
}
type websocketState struct {
rep Reporter
values url.Values
conn xfer.Websocket
previousTopo detailed.NodeSummaries
topologyID string
startReportingAt time.Time
reportTimestamp time.Time
censorCfg report.CensorConfig
channelOpenedAt time.Time
}
func (wc *websocketState) update(ctx context.Context) error {
span := ot.StartSpan("websocket.Render", ot.Tag{"topology", wc.topologyID})
defer span.Finish()
ctx = ot.ContextWithSpan(ctx, span)
// We measure how much time has passed since the channel was opened
// and add it to the initial report timestamp to get the timestamp
// of the snapshot we want to report right now.
// NOTE: Multiplying `timestampDelta` by a constant factor here
// would have an effect of fast-forward, which is something we
// might be interested in implementing in the future.
timestampDelta := time.Since(wc.channelOpenedAt)
reportTimestamp := wc.startReportingAt.Add(timestampDelta)
span.LogFields(otlog.String("opened-at", wc.channelOpenedAt.String()),
otlog.String("timestamp", reportTimestamp.String()))
re, err := wc.rep.Report(ctx, reportTimestamp)
if err != nil {
return errors.Wrap(err, "Error generating report")
}
renderer, filter, err := topologyRegistry.RendererForTopology(wc.topologyID, wc.values, re)
if err != nil {
return errors.Wrap(err, "Error generating report")
}
newTopo := detailed.CensorNodeSummaries(
detailed.Summaries(
ctx,
RenderContextForReporter(wc.rep, re),
render.Render(ctx, re, renderer, filter).Nodes,
),
wc.censorCfg,
)
diff := detailed.TopoDiff(wc.previousTopo, newTopo)
wc.previousTopo = newTopo
if err := wc.conn.WriteJSON(diff); err != nil {
if !xfer.IsExpectedWSCloseError(err) {
return errors.Wrap(err, "cannot serialize topology diff")
}
}
return nil
}

View File

@@ -1,155 +0,0 @@
package app_test
import (
"fmt"
"net/url"
"testing"
"github.com/gorilla/websocket"
"github.com/ugorji/go/codec"
"github.com/weaveworks/scope/app"
"github.com/weaveworks/scope/render/detailed"
"github.com/weaveworks/scope/render/expected"
"github.com/weaveworks/scope/test/fixture"
)
func TestAll(t *testing.T) {
ts := topologyServer()
defer ts.Close()
body := getRawJSON(t, ts, "/api/topology")
var topologies []app.APITopologyDesc
decoder := codec.NewDecoderBytes(body, &codec.JsonHandle{})
if err := decoder.Decode(&topologies); err != nil {
t.Fatalf("JSON parse error: %s", err)
}
getTopology := func(topologyURL string) {
body := getRawJSON(t, ts, topologyURL)
var topology app.APITopology
decoder := codec.NewDecoderBytes(body, &codec.JsonHandle{})
if err := decoder.Decode(&topology); err != nil {
t.Fatalf("JSON parse error: %s", err)
}
for _, node := range topology.Nodes {
body := getRawJSON(t, ts, fmt.Sprintf("%s/%s", topologyURL, url.QueryEscape(node.ID)))
var node app.APINode
decoder = codec.NewDecoderBytes(body, &codec.JsonHandle{})
if err := decoder.Decode(&node); err != nil {
t.Fatalf("JSON parse error: %s", err)
}
}
}
for _, topology := range topologies {
getTopology(topology.URL)
for _, subTopology := range topology.SubTopologies {
getTopology(subTopology.URL)
}
}
}
func TestAPITopologyProcesses(t *testing.T) {
ts := topologyServer()
defer ts.Close()
is404(t, ts, "/api/topology/processes/foobar")
{
body := getRawJSON(t, ts, "/api/topology/processes/"+fixture.ServerProcessNodeID)
var node app.APINode
decoder := codec.NewDecoderBytes(body, &codec.JsonHandle{})
if err := decoder.Decode(&node); err != nil {
t.Fatal(err)
}
equals(t, fixture.ServerProcessNodeID, node.Node.ID)
equals(t, "apache", node.Node.Label)
equals(t, false, node.Node.Pseudo)
// Let's not unit-test the specific content of the detail tables
}
{
body := getRawJSON(t, ts, "/api/topology/processes-by-name/"+
url.QueryEscape(fixture.Client1Name))
var node app.APINode
decoder := codec.NewDecoderBytes(body, &codec.JsonHandle{})
if err := decoder.Decode(&node); err != nil {
t.Fatal(err)
}
equals(t, fixture.Client1Name, node.Node.ID)
equals(t, fixture.Client1Name, node.Node.Label)
equals(t, false, node.Node.Pseudo)
// Let's not unit-test the specific content of the detail tables
}
}
func TestAPITopologyHosts(t *testing.T) {
ts := topologyServer()
defer ts.Close()
is404(t, ts, "/api/topology/hosts/foobar")
{
body := getRawJSON(t, ts, "/api/topology/hosts")
var topo app.APITopology
decoder := codec.NewDecoderBytes(body, &codec.JsonHandle{})
if err := decoder.Decode(&topo); err != nil {
t.Fatal(err)
}
// Should have the rendered host nodes
for id := range expected.RenderedHosts {
if _, ok := topo.Nodes[id]; !ok {
t.Errorf("Expected output to include node: %s, but wasn't found", id)
}
}
}
{
body := getRawJSON(t, ts, "/api/topology/hosts/"+fixture.ServerHostNodeID)
var node app.APINode
decoder := codec.NewDecoderBytes(body, &codec.JsonHandle{})
if err := decoder.Decode(&node); err != nil {
t.Fatal(err)
}
equals(t, fixture.ServerHostNodeID, node.Node.ID)
equals(t, "server", node.Node.Label)
equals(t, false, node.Node.Pseudo)
// Let's not unit-test the specific content of the detail tables
}
}
// Basic websocket test
func TestAPITopologyWebsocket(t *testing.T) {
ts := topologyServer()
defer ts.Close()
url := "/api/topology/processes/ws"
// Not a websocket request
res, _ := checkGet(t, ts, url)
if have := res.StatusCode; have != 400 {
t.Fatalf("Expected status %d, got %d.", 400, have)
}
// Proper websocket request
ts.URL = "ws" + ts.URL[len("http"):]
dialer := &websocket.Dialer{}
ws, res, err := dialer.Dial(ts.URL+url, nil)
ok(t, err)
defer ws.Close()
if want, have := 101, res.StatusCode; want != have {
t.Fatalf("want %d, have %d", want, have)
}
_, p, err := ws.ReadMessage()
ok(t, err)
var d detailed.Diff
decoder := codec.NewDecoderBytes(p, &codec.JsonHandle{})
if err := decoder.Decode(&d); err != nil {
t.Fatalf("JSON parse error: %s", err)
}
equals(t, 6, len(d.Add))
equals(t, 0, len(d.Update))
equals(t, 0, len(d.Remove))
}
func newu64(value uint64) *uint64 { return &value }

View File

@@ -1,176 +0,0 @@
package app
import (
"context"
"flag"
"math/rand"
"net/http"
"net/url"
"os"
"path/filepath"
"testing"
"github.com/weaveworks/scope/render"
"github.com/weaveworks/scope/render/detailed"
"github.com/weaveworks/scope/report"
"github.com/weaveworks/scope/test/fixture"
)
var (
benchReportPath = flag.String("bench-report-path", "", "report file, or dir with files, to use for benchmarking (relative to this package)")
)
func readReportFiles(b *testing.B, path string) []report.Report {
reports := []report.Report{}
if err := filepath.Walk(path,
func(p string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
rpt, err := report.MakeFromFile(context.Background(), p)
if err != nil {
return err
}
reports = append(reports, *rpt)
return nil
}); err != nil {
b.Fatal(err)
}
return reports
}
func BenchmarkReportUnmarshal(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
readReportFiles(b, *benchReportPath)
}
}
func upgradeReports(reports []report.Report) []report.Report {
upgraded := make([]report.Report, len(reports))
for i, r := range reports {
upgraded[i] = r.Upgrade()
}
return upgraded
}
func BenchmarkReportUpgrade(b *testing.B) {
reports := readReportFiles(b, *benchReportPath)
b.ResetTimer()
for i := 0; i < b.N; i++ {
upgradeReports(reports)
}
}
func BenchmarkReportMerge(b *testing.B) {
reports := upgradeReports(readReportFiles(b, *benchReportPath))
rand.Shuffle(len(reports), func(i, j int) {
reports[i], reports[j] = reports[j], reports[i]
})
merger := NewFastMerger()
b.ResetTimer()
for i := 0; i < b.N; i++ {
merger.Merge(reports)
}
}
func getReport(b *testing.B) report.Report {
r := fixture.Report
if *benchReportPath != "" {
r = NewFastMerger().Merge(upgradeReports(readReportFiles(b, *benchReportPath)))
}
return r
}
func benchmarkRender(b *testing.B, f func(report.Report)) {
r := getReport(b)
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
render.ResetCache()
b.StartTimer()
f(r)
}
}
func renderForTopology(b *testing.B, topologyID string, report report.Report) report.Nodes {
renderer, filter, err := topologyRegistry.RendererForTopology(topologyID, url.Values{}, report)
if err != nil {
b.Fatal(err)
}
return render.Render(context.Background(), report, renderer, filter).Nodes
}
func benchmarkRenderTopology(b *testing.B, topologyID string) {
benchmarkRender(b, func(report report.Report) {
renderForTopology(b, topologyID, report)
})
}
func BenchmarkRenderList(b *testing.B) {
benchmarkRender(b, func(report report.Report) {
topologyRegistry.renderTopologies(context.Background(), report, &http.Request{Form: url.Values{}})
})
}
func BenchmarkRenderHosts(b *testing.B) {
benchmarkRenderTopology(b, "hosts")
}
func BenchmarkRenderControllers(b *testing.B) {
benchmarkRenderTopology(b, "kube-controllers")
}
func BenchmarkRenderPods(b *testing.B) {
benchmarkRenderTopology(b, "pods")
}
func BenchmarkRenderContainers(b *testing.B) {
benchmarkRenderTopology(b, "containers")
}
func BenchmarkRenderProcesses(b *testing.B) {
benchmarkRenderTopology(b, "processes")
}
func BenchmarkRenderProcessNames(b *testing.B) {
benchmarkRenderTopology(b, "processes-by-name")
}
func benchmarkSummarizeTopology(b *testing.B, topologyID string) {
ctx := context.Background()
r := getReport(b)
rc := detailed.RenderContext{Report: r}
nodes := renderForTopology(b, topologyID, r)
b.ResetTimer()
for i := 0; i < b.N; i++ {
detailed.Summaries(ctx, rc, nodes)
}
}
func BenchmarkSummarizeHosts(b *testing.B) {
benchmarkSummarizeTopology(b, "hosts")
}
func BenchmarkSummarizeControllers(b *testing.B) {
benchmarkSummarizeTopology(b, "kube-controllers")
}
func BenchmarkSummarizePods(b *testing.B) {
benchmarkSummarizeTopology(b, "pods")
}
func BenchmarkSummarizeContainers(b *testing.B) {
benchmarkSummarizeTopology(b, "containers")
}
func BenchmarkSummarizeProcesses(b *testing.B) {
benchmarkSummarizeTopology(b, "processes")
}
func BenchmarkSummarizeProcessNames(b *testing.B) {
benchmarkSummarizeTopology(b, "processes-by-name")
}

View File

@@ -1,366 +0,0 @@
package app
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"context"
"github.com/weaveworks/common/mtime"
"github.com/weaveworks/scope/report"
)
// We merge all reports received within the specified interval, and
// discard the orignals. Higher figures improve the performance of
// Report(), but at the expense of lower time resolution, since time
// is effectively advancing in quantiles.
//
// The current figure is identical to the default
// probe.publishInterval, which results in performance improvements
// as soon as there is more than one probe.
const reportQuantisationInterval = 3 * time.Second
// Reporter is something that can produce reports on demand. It's a convenient
// interface for parts of the app, and several experimental components.
type Reporter interface {
Report(context.Context, time.Time) (report.Report, error)
HasReports(context.Context, time.Time) (bool, error)
HasHistoricReports() bool
AdminSummary(context.Context, time.Time) (string, error)
WaitOn(context.Context, chan struct{})
UnWait(context.Context, chan struct{})
}
// WebReporter is a reporter that creates reports whose data is eventually
// displayed on websites. It carries fields that will be forwarded to the
// detailed.RenderContext
type WebReporter struct {
Reporter
MetricsGraphURL string
}
// Adder is something that can accept reports. It's a convenient interface for
// parts of the app, and several experimental components. It takes the following
// arguments:
// - context.Context: the request context
// - report.Report: the deserialised report
// - []byte: the serialised report (as gzip'd msgpack)
type Adder interface {
Add(context.Context, report.Report, []byte) error
}
// A Collector is a Reporter and an Adder
type Collector interface {
Reporter
Adder
Close()
}
// Collector receives published reports from multiple producers. It yields a
// single merged report, representing all collected reports.
type collector struct {
mtx sync.Mutex
reports []report.Report
timestamps []time.Time
window time.Duration
cached *report.Report
merger Merger
waitableCondition
}
type waitableCondition struct {
sync.Mutex
waiters map[chan struct{}]struct{}
}
func (wc *waitableCondition) WaitOn(_ context.Context, waiter chan struct{}) {
wc.Lock()
wc.waiters[waiter] = struct{}{}
wc.Unlock()
}
func (wc *waitableCondition) UnWait(_ context.Context, waiter chan struct{}) {
wc.Lock()
delete(wc.waiters, waiter)
wc.Unlock()
}
func (wc *waitableCondition) Broadcast() {
wc.Lock()
for waiter := range wc.waiters {
// Non-block write to channel
select {
case waiter <- struct{}{}:
default:
}
}
wc.Unlock()
}
// NewCollector returns a collector ready for use.
func NewCollector(window time.Duration) Collector {
return &collector{
window: window,
waitableCondition: waitableCondition{
waiters: map[chan struct{}]struct{}{},
},
merger: NewFastMerger(),
}
}
// Close is a no-op for the regular collector
func (c *collector) Close() {}
// Add adds a report to the collector's internal state. It implements Adder.
func (c *collector) Add(_ context.Context, rpt report.Report, _ []byte) error {
c.mtx.Lock()
defer c.mtx.Unlock()
c.reports = append(c.reports, rpt)
c.timestamps = append(c.timestamps, mtime.Now())
c.clean()
c.cached = nil
if rpt.Shortcut {
c.Broadcast()
}
return nil
}
// Report returns a merged report over all added reports. It implements
// Reporter.
func (c *collector) Report(_ context.Context, timestamp time.Time) (report.Report, error) {
c.mtx.Lock()
defer c.mtx.Unlock()
// If the oldest report is still within range,
// and there is a cached report, return that.
if c.cached != nil && len(c.reports) > 0 {
oldest := timestamp.Add(-c.window)
if c.timestamps[0].After(oldest) {
return *c.cached, nil
}
}
c.clean()
c.quantise()
for i := range c.reports {
c.reports[i] = c.reports[i].Upgrade()
}
rpt := c.merger.Merge(c.reports)
c.cached = &rpt
return rpt, nil
}
// HasReports indicates whether the collector contains reports between
// timestamp-app.window and timestamp.
func (c *collector) HasReports(ctx context.Context, timestamp time.Time) (bool, error) {
c.mtx.Lock()
defer c.mtx.Unlock()
if len(c.timestamps) < 1 {
return false, nil
}
return !c.timestamps[0].After(timestamp) && !c.timestamps[len(c.reports)-1].Before(timestamp.Add(-c.window)), nil
}
// HasHistoricReports indicates whether the collector contains reports
// older than now-app.window.
func (c *collector) HasHistoricReports() bool {
return false
}
// AdminSummary returns a string with some internal information about
// the report, which may be useful to troubleshoot.
func (c *collector) AdminSummary(ctx context.Context, timestamp time.Time) (string, error) {
c.mtx.Lock()
defer c.mtx.Unlock()
var b strings.Builder
for i := range c.reports {
fmt.Fprintf(&b, "%v: ", c.timestamps[i].Format(time.StampMilli))
b.WriteString(c.reports[i].Summary())
b.WriteByte('\n')
}
return b.String(), nil
}
// remove reports older than the app.window
func (c *collector) clean() {
var (
cleanedReports = make([]report.Report, 0, len(c.reports))
cleanedTimestamps = make([]time.Time, 0, len(c.timestamps))
oldest = mtime.Now().Add(-c.window)
)
for i, r := range c.reports {
if c.timestamps[i].After(oldest) {
cleanedReports = append(cleanedReports, r)
cleanedTimestamps = append(cleanedTimestamps, c.timestamps[i])
}
}
c.reports = cleanedReports
c.timestamps = cleanedTimestamps
}
// Merge reports received within the same reportQuantisationInterval.
//
// Quantisation is relative to the time of the first report in a given
// interval, rather than absolute time. So, for example, with a
// reportQuantisationInterval of 3s and reports with timestamps [0, 1,
// 2, 5, 6, 7], the result contains merged reports with
// timestamps/content of [0:{0,1,2}, 5:{5,6,7}].
func (c *collector) quantise() {
if len(c.reports) == 0 {
return
}
var (
quantisedReports = make([]report.Report, 0, len(c.reports))
quantisedTimestamps = make([]time.Time, 0, len(c.timestamps))
)
quantumStartIdx := 0
quantumStartTimestamp := c.timestamps[0]
for i, t := range c.timestamps {
if t.Sub(quantumStartTimestamp) < reportQuantisationInterval {
continue
}
quantisedReports = append(quantisedReports, c.merger.Merge(c.reports[quantumStartIdx:i]))
quantisedTimestamps = append(quantisedTimestamps, quantumStartTimestamp)
quantumStartIdx = i
quantumStartTimestamp = t
}
c.reports = append(quantisedReports, c.merger.Merge(c.reports[quantumStartIdx:]))
c.timestamps = append(quantisedTimestamps, c.timestamps[quantumStartIdx])
}
// StaticCollector always returns the given report.
type StaticCollector report.Report
// Report returns a merged report over all added reports. It implements
// Reporter.
func (c StaticCollector) Report(context.Context, time.Time) (report.Report, error) {
return report.Report(c), nil
}
// Close is a no-op for the static collector
func (c StaticCollector) Close() {}
// HasReports indicates whether the collector contains reports between
// timestamp-app.window and timestamp.
func (c StaticCollector) HasReports(context.Context, time.Time) (bool, error) {
return true, nil
}
// HasHistoricReports indicates whether the collector contains reports
// older than now-app.window.
func (c StaticCollector) HasHistoricReports() bool {
return false
}
// AdminSummary implements Reporter
func (c StaticCollector) AdminSummary(ctx context.Context, timestamp time.Time) (string, error) {
return "not implemented", nil
}
// Add adds a report to the collector's internal state. It implements Adder.
func (c StaticCollector) Add(context.Context, report.Report, []byte) error { return nil }
// WaitOn lets other components wait on a new report being received. It
// implements Reporter.
func (c StaticCollector) WaitOn(context.Context, chan struct{}) {}
// UnWait lets other components stop waiting on a new report being received. It
// implements Reporter.
func (c StaticCollector) UnWait(context.Context, chan struct{}) {}
// NewFileCollector reads and parses the files at path (a file or
// directory) as reports. If there are multiple files, and they all
// have names representing "nanoseconds since epoch" timestamps,
// e.g. "1488557088545489008.msgpack.gz", then the collector will
// return merged reports resulting from replaying the file reports in
// a loop at a sequence and speed determined by the timestamps.
// Otherwise the collector always returns the merger of all reports.
func NewFileCollector(path string, window time.Duration) (Collector, error) {
var (
timestamps []time.Time
reports []report.Report
)
allTimestamped := true
if err := filepath.Walk(path,
func(p string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
t, err := timestampFromFilepath(p)
if err != nil {
allTimestamped = false
}
timestamps = append(timestamps, t)
rpt, err := report.MakeFromFile(context.Background(), p)
if err != nil {
return err
}
reports = append(reports, *rpt)
return nil
}); err != nil {
return nil, err
}
if len(reports) > 1 && allTimestamped {
collector := NewCollector(window)
go replay(collector, timestamps, reports)
return collector, nil
}
return StaticCollector(NewFastMerger().Merge(reports).Upgrade()), nil
}
func timestampFromFilepath(path string) (time.Time, error) {
name := filepath.Base(path)
for {
ext := filepath.Ext(name)
if ext == "" {
break
}
name = strings.TrimSuffix(name, ext)
}
nanosecondsSinceEpoch, err := strconv.ParseInt(name, 10, 64)
if err != nil {
return time.Time{}, fmt.Errorf("filename '%s' is not a number (representing nanoseconds since epoch): %v", name, err)
}
return time.Unix(0, nanosecondsSinceEpoch), nil
}
func replay(a Adder, timestamps []time.Time, reports []report.Report) {
// calculate delays between report n and n+1
l := len(timestamps)
delays := make([]time.Duration, l, l)
for i, t := range timestamps[0 : l-1] {
delays[i] = timestamps[i+1].Sub(t)
if delays[i] < 0 {
panic(fmt.Errorf("replay timestamps are not in order! %v", timestamps))
}
}
// We don't know how long to wait before looping round, so make a
// good guess.
delays[l-1] = timestamps[l-1].Sub(timestamps[0]) / time.Duration(l)
due := time.Now()
for {
for i, r := range reports {
a.Add(nil, r, nil)
due = due.Add(delays[i])
delay := due.Sub(time.Now())
if delay > 0 {
time.Sleep(delay)
}
}
}
}

View File

@@ -1,132 +0,0 @@
package app_test
import (
"testing"
"time"
"context"
"github.com/weaveworks/common/mtime"
"github.com/weaveworks/common/test"
"github.com/weaveworks/scope/app"
"github.com/weaveworks/scope/report"
"github.com/weaveworks/scope/test/reflect"
)
func TestCollector(t *testing.T) {
ctx := context.Background()
window := 10 * time.Second
c := app.NewCollector(window)
now := time.Now()
mtime.NowForce(now)
defer mtime.NowReset()
r1 := report.MakeReport()
r1.Endpoint.AddNode(report.MakeNode("foo"))
r2 := report.MakeReport()
r2.Endpoint.AddNode(report.MakeNode("foo"))
have, err := c.Report(ctx, mtime.Now())
if err != nil {
t.Error(err)
}
if want := report.MakeReport(); !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
c.Add(ctx, r1, nil)
have, err = c.Report(ctx, mtime.Now())
if err != nil {
t.Error(err)
}
if want := r1; !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
timeBefore := mtime.Now()
mtime.NowForce(now.Add(time.Second))
c.Add(ctx, r2, nil)
merged := report.MakeReport()
merged.UnsafeMerge(r1)
merged.UnsafeMerge(r2)
have, err = c.Report(ctx, mtime.Now())
if err != nil {
t.Error(err)
}
if want := merged; !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
// Since the timestamp given is before r2 was added,
// it shouldn't be included in the final report.
have, err = c.Report(ctx, timeBefore)
if err != nil {
t.Error(err)
}
if want := r1; !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestCollectorExpire(t *testing.T) {
now := time.Now()
mtime.NowForce(now)
defer mtime.NowReset()
ctx := context.Background()
window := 10 * time.Second
c := app.NewCollector(window)
// 1st check the collector is empty
have, err := c.Report(ctx, mtime.Now())
if err != nil {
t.Error(err)
}
if want := report.MakeReport(); !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
// Now check an added report is returned
r1 := report.MakeReport()
r1.Endpoint.AddNode(report.MakeNode("foo"))
c.Add(ctx, r1, nil)
have, err = c.Report(ctx, mtime.Now())
if err != nil {
t.Error(err)
}
if want := r1; !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
// Finally move time forward to expire the report
mtime.NowForce(now.Add(window))
have, err = c.Report(ctx, mtime.Now())
if err != nil {
t.Error(err)
}
if want := report.MakeReport(); !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestCollectorWait(t *testing.T) {
ctx := context.Background()
window := time.Millisecond
c := app.NewCollector(window)
waiter := make(chan struct{}, 1)
c.WaitOn(ctx, waiter)
defer c.UnWait(ctx, waiter)
c.(interface {
Broadcast()
}).Broadcast()
select {
case <-waiter:
default:
t.Fatal("Didn't unblock")
}
}

View File

@@ -1,69 +0,0 @@
package app
import (
"fmt"
"math/rand"
"sync"
"context"
"github.com/weaveworks/scope/common/xfer"
)
// ControlRouter is a thing that can route control requests and responses
// between the UI and a probe.
type ControlRouter interface {
Handle(ctx context.Context, probeID string, req xfer.Request) (xfer.Response, error)
Register(ctx context.Context, probeID string, handler xfer.ControlHandlerFunc) (int64, error)
Deregister(ctx context.Context, probeID string, id int64) error
}
// NewLocalControlRouter creates a new ControlRouter that does everything
// locally, in memory.
func NewLocalControlRouter() ControlRouter {
return &localControlRouter{
probes: map[string]probe{},
}
}
type localControlRouter struct {
sync.Mutex
probes map[string]probe
}
type probe struct {
id int64
handler xfer.ControlHandlerFunc
}
func (l *localControlRouter) Handle(_ context.Context, probeID string, req xfer.Request) (xfer.Response, error) {
l.Lock()
probe, ok := l.probes[probeID]
l.Unlock()
if !ok {
return xfer.Response{}, fmt.Errorf("probe %s is not connected right now", probeID)
}
return probe.handler(req), nil
}
func (l *localControlRouter) Register(_ context.Context, probeID string, handler xfer.ControlHandlerFunc) (int64, error) {
l.Lock()
defer l.Unlock()
id := rand.Int63()
l.probes[probeID] = probe{
id: id,
handler: handler,
}
return id, nil
}
func (l *localControlRouter) Deregister(_ context.Context, probeID string, id int64) error {
l.Lock()
defer l.Unlock()
// NB probe might have reconnected in the mean time, need to ensure we do not
// delete new connection! Also, it might have connected then deleted itself!
if l.probes[probeID].id == id {
delete(l.probes, probeID)
}
return nil
}

View File

@@ -1,103 +0,0 @@
package app
import (
"net/http"
"net/rpc"
"context"
"github.com/gorilla/mux"
log "github.com/sirupsen/logrus"
"github.com/ugorji/go/codec"
"github.com/weaveworks/scope/common/xfer"
)
// RegisterControlRoutes registers the various control routes with a http mux.
func RegisterControlRoutes(router *mux.Router, cr ControlRouter) {
router.
Methods("GET").
Path("/api/control/ws").
HandlerFunc(requestContextDecorator(handleProbeWS(cr)))
router.
Methods("POST").
Name("api_control_probeid_nodeid_control").
MatcherFunc(URLMatcher("/api/control/{probeID}/{nodeID}/{control}")).
HandlerFunc(requestContextDecorator(handleControl(cr)))
}
// handleControl routes control requests from the client to the appropriate
// probe. Its is blocking.
func handleControl(cr ControlRouter) CtxHandlerFunc {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
var (
vars = mux.Vars(r)
probeID = vars["probeID"]
nodeID = vars["nodeID"]
control = vars["control"]
controlArgs map[string]string
)
if r.ContentLength > 0 {
err := codec.NewDecoder(r.Body, &codec.JsonHandle{}).Decode(&controlArgs)
defer r.Body.Close()
if err != nil {
respondWith(ctx, w, http.StatusBadRequest, err)
return
}
}
result, err := cr.Handle(ctx, probeID, xfer.Request{
NodeID: nodeID,
Control: control,
ControlArgs: controlArgs,
})
if err != nil {
respondWith(ctx, w, http.StatusBadRequest, err.Error())
return
}
if result.Error != "" {
respondWith(ctx, w, http.StatusBadRequest, result.Error)
return
}
respondWith(ctx, w, http.StatusOK, result)
}
}
// handleProbeWS accepts websocket connections from the probe and registers
// them in the control router, such that HandleControl calls can find them.
func handleProbeWS(cr ControlRouter) CtxHandlerFunc {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
probeID := r.Header.Get(xfer.ScopeProbeIDHeader)
if probeID == "" {
respondWith(ctx, w, http.StatusBadRequest, xfer.ScopeProbeIDHeader)
return
}
conn, err := xfer.Upgrade(w, r, nil)
if err != nil {
log.Printf("Error upgrading control websocket: %v", err)
return
}
defer conn.Close()
codec := xfer.NewJSONWebsocketCodec(conn)
client := rpc.NewClientWithCodec(codec)
defer client.Close()
id, err := cr.Register(ctx, probeID, func(req xfer.Request) xfer.Response {
var res xfer.Response
if err := client.Call("control.Handle", req, &res); err != nil {
return xfer.ResponseError(err)
}
return res
})
if err != nil {
respondWith(ctx, w, http.StatusBadRequest, err)
return
}
defer cr.Deregister(ctx, probeID, id)
if err := codec.WaitForReadError(); err != nil && !xfer.IsExpectedWSCloseError(err) {
log.Errorf("Error on websocket: %v", err)
}
}
}

View File

@@ -1,79 +0,0 @@
package app_test
import (
"net"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"time"
"github.com/gorilla/mux"
"github.com/ugorji/go/codec"
"github.com/weaveworks/scope/app"
"github.com/weaveworks/scope/common/xfer"
"github.com/weaveworks/scope/probe/appclient"
)
func TestControl(t *testing.T) {
router := mux.NewRouter()
app.RegisterControlRoutes(router, app.NewLocalControlRouter())
server := httptest.NewServer(router)
defer server.Close()
ip, port, err := net.SplitHostPort(strings.TrimPrefix(server.URL, "http://"))
if err != nil {
t.Fatal(err)
}
probeConfig := appclient.ProbeConfig{
ProbeID: "foo",
}
controlHandler := xfer.ControlHandlerFunc(func(req xfer.Request) xfer.Response {
if req.NodeID != "nodeid" {
t.Fatalf("'%s' != 'nodeid'", req.NodeID)
}
if req.Control != "control" {
t.Fatalf("'%s' != 'control'", req.Control)
}
return xfer.Response{
Value: "foo",
}
})
url := url.URL{Scheme: "http", Host: ip + ":" + port}
client, err := appclient.NewAppClient(probeConfig, ip+":"+port, url, controlHandler)
if err != nil {
t.Fatal(err)
}
client.ControlConnection()
defer client.Stop()
time.Sleep(100 * time.Millisecond)
httpClient := http.Client{
Timeout: 1 * time.Second,
}
resp, err := httpClient.Post(
server.URL+"/api/control/foo/nodeid/control",
"application/json",
strings.NewReader("{}"),
)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
var response xfer.Response
decoder := codec.NewDecoder(resp.Body, &codec.JsonHandle{})
if err := decoder.Decode(&response); err != nil {
t.Fatal(err)
}
if response.Value != "foo" {
t.Fatalf("'%s' != 'foo'", response.Value)
}
}

View File

@@ -1,32 +0,0 @@
package app
import (
"fmt"
"github.com/spaolacci/murmur3"
"github.com/weaveworks/scope/report"
)
// Merger is the type for a thing that can merge reports.
type Merger interface {
Merge([]report.Report) report.Report
}
type fastMerger struct{}
// NewFastMerger makes a Merger which merges together reports, mutating the one we are building up
func NewFastMerger() Merger {
return fastMerger{}
}
func (fastMerger) Merge(reports []report.Report) report.Report {
rpt := report.MakeReport()
id := murmur3.New64()
for _, r := range reports {
rpt.UnsafeMerge(r)
id.Write([]byte(r.ID))
}
rpt.ID = fmt.Sprintf("%x", id.Sum64())
return rpt
}

View File

@@ -1,81 +0,0 @@
package app_test
import (
"fmt"
"math/rand"
"testing"
"github.com/weaveworks/common/test"
"github.com/weaveworks/scope/app"
"github.com/weaveworks/scope/report"
"github.com/weaveworks/scope/test/reflect"
)
func TestMerger(t *testing.T) {
// Use 3 reports to check the pair-wise merging in SmartMerger
report1 := report.MakeReport()
report1.Endpoint.AddNode(report.MakeNode("foo"))
report2 := report.MakeReport()
report2.Endpoint.AddNode(report.MakeNode("bar"))
report3 := report.MakeReport()
report3.Endpoint.AddNode(report.MakeNode("baz"))
reports := []report.Report{
report1, report2, report3,
}
want := report.MakeReport()
want.Endpoint.AddNode(report.MakeNode("foo"))
want.Endpoint.AddNode(report.MakeNode("bar"))
want.Endpoint.AddNode(report.MakeNode("baz"))
for _, merger := range []app.Merger{app.NewFastMerger()} {
// Test the empty list case
if have := merger.Merge([]report.Report{}); !reflect.DeepEqual(have, report.MakeReport()) {
t.Errorf("Bad merge: %s", test.Diff(have, want))
}
if have := merger.Merge(reports); !reflect.DeepEqual(have, want) {
t.Errorf("Bad merge: %s", test.Diff(have, want))
}
// Repeat the above test to ensure caching works
if have := merger.Merge(reports); !reflect.DeepEqual(have, want) {
t.Errorf("Bad merge: %s", test.Diff(have, want))
}
}
}
func BenchmarkFastMerger(b *testing.B) {
benchmarkMerger(b, app.NewFastMerger())
}
const numHosts = 15
func benchmarkMerger(b *testing.B, merger app.Merger) {
makeReport := func() report.Report {
rpt := report.MakeReport()
for i := 0; i < 100; i++ {
rpt.Endpoint.AddNode(report.MakeNode(fmt.Sprintf("%x", rand.Int63())))
}
return rpt
}
reports := []report.Report{}
for i := 0; i < numHosts*5; i++ {
reports = append(reports, makeReport())
}
replacements := []report.Report{}
for i := 0; i < numHosts/3; i++ {
replacements = append(replacements, makeReport())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
// replace 1/3 of hosts work of reports & merge them all
for i := 0; i < len(replacements); i++ {
reports[rand.Intn(len(reports))] = replacements[i]
}
merger.Merge(reports)
}
}

View File

@@ -1,820 +0,0 @@
package multitenant
import (
"crypto/md5"
"fmt"
"io"
"strconv"
"strings"
"sync"
"time"
"context"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/bluele/gcache"
"github.com/nats-io/nats"
opentracing "github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/common/instrument"
"github.com/weaveworks/scope/app"
"github.com/weaveworks/scope/report"
)
const (
hourField = "hour"
tsField = "ts"
reportField = "report"
natsTimeout = 10 * time.Second
reportQuantisationInterval = 3 * time.Second
// Grace period allows for some gap between the timestamp on reports
// (assigned when they arrive at collector) and them appearing in DynamoDB query
gracePeriod = 500 * time.Millisecond
)
var (
dynamoRequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "scope",
Name: "dynamo_request_duration_seconds",
Help: "Time in seconds spent doing DynamoDB requests.",
Buckets: prometheus.DefBuckets,
}, []string{"method", "status_code"})
dynamoConsumedCapacity = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "scope",
Name: "dynamo_consumed_capacity_total",
Help: "Total count of capacity units consumed per operation.",
}, []string{"method"})
dynamoValueSize = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "scope",
Name: "dynamo_value_size_bytes_total",
Help: "Total size of data read / written from DynamoDB in bytes.",
}, []string{"method"})
inProcessCacheRequests = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "scope",
Name: "in_process_cache_requests_total",
Help: "Total count of reports requested from the in-process cache.",
})
inProcessCacheHits = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "scope",
Name: "in_process_cache_hits_total",
Help: "Total count of reports found in the in-process cache.",
})
reportSizeHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: "scope",
Name: "report_size_bytes",
Help: "Distribution of memcache report sizes",
Buckets: prometheus.ExponentialBuckets(4096, 2.0, 10),
})
reportsPerUser = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "scope",
Name: "reports_stored_total",
Help: "Total count of stored reports per user.",
}, []string{"user"})
reportSizePerUser = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "scope",
Name: "reports_bytes_total",
Help: "Total bytes stored in reports per user.",
}, []string{"user"})
topologiesDropped = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "scope",
Name: "topologies_dropped_total",
Help: "Total count of topologies dropped for being over limit.",
}, []string{"user", "topology"})
natsRequests = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "scope",
Name: "nats_requests_total",
Help: "Total count of NATS requests.",
}, []string{"method", "status_code"})
flushDuration = instrument.NewHistogramCollectorFromOpts(prometheus.HistogramOpts{
Namespace: "scope",
Name: "flush_duration_seconds",
Help: "Time in seconds spent flushing merged reports.",
Buckets: prometheus.DefBuckets,
})
)
func registerAWSCollectorMetrics() {
prometheus.MustRegister(dynamoRequestDuration)
prometheus.MustRegister(dynamoConsumedCapacity)
prometheus.MustRegister(dynamoValueSize)
prometheus.MustRegister(inProcessCacheRequests)
prometheus.MustRegister(inProcessCacheHits)
prometheus.MustRegister(reportSizeHistogram)
prometheus.MustRegister(reportsPerUser)
prometheus.MustRegister(reportSizePerUser)
prometheus.MustRegister(topologiesDropped)
prometheus.MustRegister(natsRequests)
flushDuration.Register()
}
var registerAWSCollectorMetricsOnce sync.Once
// AWSCollector is a Collector which can also CreateTables
type AWSCollector interface {
app.Collector
CreateTables() error
}
// ReportStore is a thing that we can get reports from.
type ReportStore interface {
FetchReports(context.Context, []string) (map[string]report.Report, []string, error)
}
// AWSCollectorConfig has everything we need to make an AWS collector.
type AWSCollectorConfig struct {
UserIDer UserIDer
DynamoDBConfig *aws.Config
DynamoTable string
S3Store *S3Store
StoreInterval time.Duration
NatsHost string
MemcacheClient *MemcacheClient
Window time.Duration
MaxTopNodes int
}
// if StoreInterval is set, reports are merged into here and held until flushed to store
type pendingEntry struct {
sync.Mutex
report report.Report
count int
}
type awsCollector struct {
cfg AWSCollectorConfig
db *dynamodb.DynamoDB
merger app.Merger
inProcess inProcessStore
pending sync.Map
ticker *time.Ticker
nats *nats.Conn
waitersLock sync.Mutex
waiters map[watchKey]*nats.Subscription
}
// Shortcut reports:
// When the UI connects a WS to the query service, a goroutine periodically
// published rendered reports to that ws. This process can be interrupted by
// "shortcut" reports, causing the query service to push a render report
// immediately. This whole process is controlled by the aforementioned
// goroutine registering a channel with the collector. We store these
// registered channels in a map keyed by the userid and the channel itself,
// which in go is hashable. We then listen on a NATS topic for any shortcut
// reports coming from the collection service.
type watchKey struct {
userid string
c chan struct{}
}
// NewAWSCollector the elastic reaper of souls
// https://github.com/aws/aws-sdk-go/wiki/common-examples
func NewAWSCollector(config AWSCollectorConfig) (AWSCollector, error) {
registerAWSCollectorMetricsOnce.Do(registerAWSCollectorMetrics)
var nc *nats.Conn
if config.NatsHost != "" {
if config.MemcacheClient == nil {
return nil, fmt.Errorf("Must supply memcache client when using nats")
}
var err error
nc, err = nats.Connect(config.NatsHost)
if err != nil {
return nil, err
}
}
// (window * report rate) * number of hosts per user * number of users
reportCacheSize := (int(config.Window.Seconds()) / 3) * 10 * 5
c := &awsCollector{
cfg: config,
db: dynamodb.New(session.New(config.DynamoDBConfig)),
merger: app.NewFastMerger(),
inProcess: newInProcessStore(reportCacheSize, config.Window+reportQuantisationInterval),
nats: nc,
waiters: map[watchKey]*nats.Subscription{},
}
if config.StoreInterval != 0 {
c.ticker = time.NewTicker(config.StoreInterval)
go c.flushLoop()
}
return c, nil
}
func (c *awsCollector) flushLoop() {
for _ = range c.ticker.C {
c.flushPending(context.Background())
}
}
// Range over all users (instances) that have pending reports and send to store
func (c *awsCollector) flushPending(ctx context.Context) {
instrument.CollectedRequest(ctx, "FlushPending", flushDuration, nil, func(ctx context.Context) error {
type queueEntry struct {
userid string
buf []byte
}
queue := make(chan queueEntry)
const numParallel = 10
var group sync.WaitGroup
group.Add(numParallel)
// Run n parallel goroutines fetching reports from the queue and flushing them
for i := 0; i < numParallel; i++ {
go func() {
for entry := range queue {
rowKey, colKey, reportKey := calculateReportKeys(entry.userid, time.Now())
err := c.persistReport(ctx, entry.userid, rowKey, colKey, reportKey, entry.buf)
if err != nil {
log.Errorf("Could not persist combined report: %v", err)
}
}
group.Done()
}()
}
c.pending.Range(func(key, value interface{}) bool {
userid := key.(string)
entry := value.(*pendingEntry)
entry.Lock()
rpt, count := entry.report, entry.count
entry.report, entry.count = report.MakeReport(), 0
entry.Unlock()
if count > 0 {
// serialise reports on one goroutine to limit CPU usage
buf, err := rpt.WriteBinary()
if err != nil {
log.Errorf("Could not serialise combined report: %v", err)
return true
}
queue <- queueEntry{userid: userid, buf: buf.Bytes()}
}
return true
})
close(queue)
group.Wait()
return nil
})
}
// Close will flush pending data
func (c *awsCollector) Close() {
c.ticker.Stop() // note this doesn't close the chan; goroutine keeps running
c.flushPending(context.Background())
}
// CreateTables creates the required tables in dynamodb
func (c *awsCollector) CreateTables() error {
// see if tableName exists
resp, err := c.db.ListTables(&dynamodb.ListTablesInput{
Limit: aws.Int64(10),
})
if err != nil {
return err
}
for _, s := range resp.TableNames {
if *s == c.cfg.DynamoTable {
return nil
}
}
params := &dynamodb.CreateTableInput{
TableName: aws.String(c.cfg.DynamoTable),
AttributeDefinitions: []*dynamodb.AttributeDefinition{
{
AttributeName: aws.String(hourField),
AttributeType: aws.String("S"),
},
{
AttributeName: aws.String(tsField),
AttributeType: aws.String("N"),
},
// Don't need to specify non-key attributes in schema
//{
// AttributeName: aws.String(reportField),
// AttributeType: aws.String("S"),
//},
},
KeySchema: []*dynamodb.KeySchemaElement{
{
AttributeName: aws.String(hourField),
KeyType: aws.String("HASH"),
},
{
AttributeName: aws.String(tsField),
KeyType: aws.String("RANGE"),
},
},
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
ReadCapacityUnits: aws.Int64(10),
WriteCapacityUnits: aws.Int64(5),
},
}
log.Infof("Creating table %s", c.cfg.DynamoTable)
_, err = c.db.CreateTable(params)
return err
}
type keyInfo struct {
key string
ts int64
}
// reportKeysInRange returns the s3 keys for reports in the specified range
func (c *awsCollector) reportKeysInRange(ctx context.Context, userid string, row int64, start, end time.Time) ([]keyInfo, error) {
rowKey := fmt.Sprintf("%s-%s", userid, strconv.FormatInt(row, 10))
var resp *dynamodb.QueryOutput
err := instrument.TimeRequestHistogram(ctx, "DynamoDB.Query", dynamoRequestDuration, func(_ context.Context) error {
var err error
resp, err = c.db.Query(&dynamodb.QueryInput{
TableName: aws.String(c.cfg.DynamoTable),
KeyConditions: map[string]*dynamodb.Condition{
hourField: {
AttributeValueList: []*dynamodb.AttributeValue{
{S: aws.String(rowKey)},
},
ComparisonOperator: aws.String("EQ"),
},
tsField: {
AttributeValueList: []*dynamodb.AttributeValue{
{N: aws.String(strconv.FormatInt(start.UnixNano(), 10))},
{N: aws.String(strconv.FormatInt(end.UnixNano(), 10))},
},
ComparisonOperator: aws.String("BETWEEN"),
},
},
ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal),
})
return err
})
if resp.ConsumedCapacity != nil {
dynamoConsumedCapacity.WithLabelValues("Query").
Add(float64(*resp.ConsumedCapacity.CapacityUnits))
}
if err != nil {
return nil, err
}
result := []keyInfo{}
for _, item := range resp.Items {
reportKey := item[reportField].S
tsValue := item[tsField].N
if reportKey == nil || tsValue == nil {
log.Errorf("Empty row!")
continue
}
dynamoValueSize.WithLabelValues("BatchGetItem").
Add(float64(len(*reportKey)))
ts, _ := strconv.ParseInt(*tsValue, 10, 64)
result = append(result, keyInfo{key: *reportKey, ts: ts})
}
return result, nil
}
// getReportKeys returns the S3 for reports in the interval [start, end].
func (c *awsCollector) getReportKeys(ctx context.Context, userid string, start, end time.Time) ([]keyInfo, error) {
var (
rowStart = start.UnixNano() / time.Hour.Nanoseconds()
rowEnd = end.UnixNano() / time.Hour.Nanoseconds()
err error
)
// Queries will only every span 2 rows max.
var reportKeys []keyInfo
if rowStart != rowEnd {
reportKeys1, err := c.reportKeysInRange(ctx, userid, rowStart, start, end)
if err != nil {
return nil, err
}
reportKeys2, err := c.reportKeysInRange(ctx, userid, rowEnd, start, end)
if err != nil {
return nil, err
}
reportKeys = append(reportKeys, reportKeys1...)
reportKeys = append(reportKeys, reportKeys2...)
} else {
if reportKeys, err = c.reportKeysInRange(ctx, userid, rowEnd, start, end); err != nil {
return nil, err
}
}
return reportKeys, nil
}
func (c *awsCollector) getReports(ctx context.Context, userid string, reportKeys []string) ([]report.Report, error) {
missing := reportKeys
stores := []ReportStore{c.inProcess}
if c.cfg.MemcacheClient != nil {
stores = append(stores, c.cfg.MemcacheClient)
}
stores = append(stores, c.cfg.S3Store)
var reports []report.Report
for _, store := range stores {
if store == nil {
continue
}
found, newMissing, err := store.FetchReports(ctx, missing)
missing = newMissing
if err != nil {
log.Warningf("Error fetching from cache: %v", err)
}
for key, report := range found {
report = c.massageReport(userid, report)
c.inProcess.StoreReport(key, report)
reports = append(reports, report)
}
if len(missing) == 0 {
return reports, nil
}
}
if len(missing) > 0 {
return nil, fmt.Errorf("Error fetching from s3, still have missing reports: %v", missing)
}
return reports, nil
}
// process a report from a probe which may be at an older version or overloaded
func (c *awsCollector) massageReport(userid string, report report.Report) report.Report {
if c.cfg.MaxTopNodes > 0 {
max := c.cfg.MaxTopNodes
if len(report.Host.Nodes) > 1 {
max = max * len(report.Host.Nodes) // higher limit for merged reports
}
var dropped []string
report, dropped = report.DropTopologiesOver(max)
for _, name := range dropped {
topologiesDropped.WithLabelValues(userid, name).Inc()
}
}
report = report.Upgrade()
return report
}
/*
S3 stores original reports from one probe at the timestamp they arrived at collector.
Collector also sends every report to memcached.
The in-memory cache stores:
- individual reports deserialised, under S3 key for report
- sets of reports in interval [t,t+3) merged, under key "instance:t"
- so to check the cache for reports from 14:31:00 to 14:31:15 you would request 5 keys 3 seconds apart
*/
func (c *awsCollector) Report(ctx context.Context, timestamp time.Time) (report.Report, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "awsCollector.Report")
defer span.Finish()
userid, err := c.cfg.UserIDer(ctx)
if err != nil {
return report.MakeReport(), err
}
span.SetTag("userid", userid)
end := timestamp
start := end.Add(-c.cfg.Window)
reportKeys, err := c.getReportKeys(ctx, userid, start, end)
if err != nil {
return report.MakeReport(), err
}
span.LogFields(otlog.Int("keys", len(reportKeys)), otlog.String("timestamp", timestamp.String()))
var reports []report.Report
// Fetch a merged report for each time quantum covering the window
startTS, endTS := start.UnixNano(), end.UnixNano()
ts := startTS - (startTS % reportQuantisationInterval.Nanoseconds())
for ; ts+(reportQuantisationInterval+gracePeriod).Nanoseconds() < endTS; ts += reportQuantisationInterval.Nanoseconds() {
quantumReport, err := c.reportForQuantum(ctx, userid, reportKeys, ts)
if err != nil {
return report.MakeReport(), err
}
reports = append(reports, quantumReport)
}
// Fetch individual reports for the period after the last quantum
last, err := c.reportsForKeysInRange(ctx, userid, reportKeys, ts, endTS)
if err != nil {
return report.MakeReport(), err
}
reports = append(reports, last...)
span.LogFields(otlog.Int("merging", len(reports)))
return c.merger.Merge(reports), nil
}
// Fetch a merged report either from cache or from store which we put in cache
func (c *awsCollector) reportForQuantum(ctx context.Context, userid string, reportKeys []keyInfo, start int64) (report.Report, error) {
key := fmt.Sprintf("%s:%d", userid, start)
cached, _, err := c.inProcess.FetchReports(ctx, []string{key})
if len(cached) == 1 {
return cached[key], nil
}
reports, err := c.reportsForKeysInRange(ctx, userid, reportKeys, start, start+reportQuantisationInterval.Nanoseconds())
if err != nil {
return report.MakeReport(), err
}
merged := c.merger.Merge(reports)
c.inProcess.StoreReport(key, merged)
return merged, nil
}
// Find the keys relating to this time period then fetch from memcached and/or S3
func (c *awsCollector) reportsForKeysInRange(ctx context.Context, userid string, reportKeys []keyInfo, start, end int64) ([]report.Report, error) {
var keys []string
for _, k := range reportKeys {
if k.ts >= start && k.ts < end {
keys = append(keys, k.key)
}
}
if span := opentracing.SpanFromContext(ctx); span != nil {
span.LogFields(otlog.Int("fetching", len(keys)), otlog.Int64("start", start), otlog.Int64("end", end))
}
log.Debugf("Fetching %d reports from %v to %v", len(keys), start, end)
return c.getReports(ctx, userid, keys)
}
func (c *awsCollector) HasReports(ctx context.Context, timestamp time.Time) (bool, error) {
userid, err := c.cfg.UserIDer(ctx)
if err != nil {
return false, err
}
start := timestamp.Add(-c.cfg.Window)
reportKeys, err := c.getReportKeys(ctx, userid, start, timestamp)
return len(reportKeys) > 0, err
}
func (c *awsCollector) HasHistoricReports() bool {
return true
}
// AdminSummary returns a string with some internal information about
// the report, which may be useful to troubleshoot.
func (c *awsCollector) AdminSummary(ctx context.Context, timestamp time.Time) (string, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "awsCollector.Report")
defer span.Finish()
userid, err := c.cfg.UserIDer(ctx)
if err != nil {
return "", err
}
end := timestamp
start := end.Add(-c.cfg.Window)
reportKeys, err := c.getReportKeys(ctx, userid, start, end)
if err != nil {
return "", err
}
reports, err := c.reportsForKeysInRange(ctx, userid, reportKeys, start.UnixNano(), end.UnixNano())
if err != nil {
return "", err
}
var b strings.Builder
for i := range reports {
// TODO: print the key - note reports may be in a different order from reportKeys
b.WriteString(reports[i].Summary())
b.WriteByte('\n')
}
return b.String(), nil
}
// calculateDynamoKeys generates the row & column keys for Dynamo.
func calculateDynamoKeys(userid string, now time.Time) (string, string) {
rowKey := fmt.Sprintf("%s-%s", userid, strconv.FormatInt(now.UnixNano()/time.Hour.Nanoseconds(), 10))
colKey := strconv.FormatInt(now.UnixNano(), 10)
return rowKey, colKey
}
// calculateReportKeys returns DynamoDB row & col keys, and S3/memcached key that we will use for a report
func calculateReportKeys(userid string, now time.Time) (string, string, string) {
rowKey, colKey := calculateDynamoKeys(userid, now)
rowKeyHash := md5.New()
_, _ = io.WriteString(rowKeyHash, rowKey) // hash write doesn't error
return rowKey, colKey, fmt.Sprintf("%x/%s", rowKeyHash.Sum(nil), colKey)
}
func (c *awsCollector) persistReport(ctx context.Context, userid, rowKey, colKey, reportKey string, buf []byte) error {
// Put in S3 and cache before index, so it is fetchable before it is discoverable
reportSize, err := c.cfg.S3Store.StoreReportBytes(ctx, reportKey, buf)
if err != nil {
return err
}
if c.cfg.MemcacheClient != nil {
_, err = c.cfg.MemcacheClient.StoreReportBytes(ctx, reportKey, buf)
if err != nil {
// NOTE: We don't abort here because failing to store in memcache
// doesn't actually break anything else -- it's just an
// optimization.
log.Warningf("Could not store %v in memcache: %v", reportKey, err)
}
}
dynamoValueSize.WithLabelValues("PutItem").Add(float64(len(reportKey)))
err = instrument.TimeRequestHistogram(ctx, "DynamoDB.PutItem", dynamoRequestDuration, func(_ context.Context) error {
resp, err := c.putItemInDynamo(rowKey, colKey, reportKey)
if resp.ConsumedCapacity != nil {
dynamoConsumedCapacity.WithLabelValues("PutItem").
Add(float64(*resp.ConsumedCapacity.CapacityUnits))
}
return err
})
if err != nil {
return err
}
reportSizeHistogram.Observe(float64(reportSize))
reportSizePerUser.WithLabelValues(userid).Add(float64(reportSize))
reportsPerUser.WithLabelValues(userid).Inc()
return nil
}
func (c *awsCollector) putItemInDynamo(rowKey, colKey, reportKey string) (*dynamodb.PutItemOutput, error) {
// Back off on ProvisionedThroughputExceededException
const (
maxRetries = 5
throuputExceededError = "ProvisionedThroughputExceededException"
)
var (
resp *dynamodb.PutItemOutput
err error
retries = 0
backoff = 50 * time.Millisecond
)
for {
resp, err = c.db.PutItem(&dynamodb.PutItemInput{
TableName: aws.String(c.cfg.DynamoTable),
Item: map[string]*dynamodb.AttributeValue{
hourField: {
S: aws.String(rowKey),
},
tsField: {
N: aws.String(colKey),
},
reportField: {
S: aws.String(reportKey),
},
},
ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal),
})
if err != nil && retries < maxRetries {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == throuputExceededError {
time.Sleep(backoff)
retries++
backoff *= 2
continue
}
}
break
}
return resp, err
}
func (c *awsCollector) Add(ctx context.Context, rep report.Report, buf []byte) error {
userid, err := c.cfg.UserIDer(ctx)
if err != nil {
return err
}
// Shortcut reports are published to nats but not persisted -
// we'll get a full report from the same probe in a few seconds
if rep.Shortcut {
if c.nats != nil {
_, _, reportKey := calculateReportKeys(userid, time.Now())
_, err = c.cfg.MemcacheClient.StoreReportBytes(ctx, reportKey, buf)
if err != nil {
log.Warningf("Could not store shortcut %v in memcache: %v", reportKey, err)
// No point publishing on nats if cache store failed
return nil
}
err := c.nats.Publish(userid, []byte(reportKey))
natsRequests.WithLabelValues("Publish", instrument.ErrorCode(err)).Add(1)
if err != nil {
log.Errorf("Error sending shortcut report: %v", err)
}
}
return nil
}
if c.cfg.StoreInterval == 0 {
rowKey, colKey, reportKey := calculateReportKeys(userid, time.Now())
err = c.persistReport(ctx, userid, rowKey, colKey, reportKey, buf)
if err != nil {
return err
}
} else {
rep = c.massageReport(userid, rep)
entry := &pendingEntry{report: report.MakeReport()}
if e, found := c.pending.LoadOrStore(userid, entry); found {
entry = e.(*pendingEntry)
}
entry.Lock()
entry.report.UnsafeMerge(rep)
entry.count++
entry.Unlock()
}
return nil
}
func (c *awsCollector) WaitOn(ctx context.Context, waiter chan struct{}) {
userid, err := c.cfg.UserIDer(ctx)
if err != nil {
log.Errorf("Error getting user id in WaitOn: %v", err)
return
}
if c.nats == nil {
return
}
sub, err := c.nats.SubscribeSync(userid)
natsRequests.WithLabelValues("SubscribeSync", instrument.ErrorCode(err)).Add(1)
if err != nil {
log.Errorf("Error subscribing for shortcuts: %v", err)
return
}
c.waitersLock.Lock()
c.waiters[watchKey{userid, waiter}] = sub
c.waitersLock.Unlock()
go func() {
for {
_, err := sub.NextMsg(natsTimeout)
if err == nats.ErrTimeout {
continue
}
natsRequests.WithLabelValues("NextMsg", instrument.ErrorCode(err)).Add(1)
if err != nil {
log.Debugf("NextMsg error: %v", err)
return
}
select {
case waiter <- struct{}{}:
default:
}
}
}()
}
func (c *awsCollector) UnWait(ctx context.Context, waiter chan struct{}) {
userid, err := c.cfg.UserIDer(ctx)
if err != nil {
log.Errorf("Error getting user id in WaitOn: %v", err)
return
}
if c.nats == nil {
return
}
c.waitersLock.Lock()
key := watchKey{userid, waiter}
sub := c.waiters[key]
delete(c.waiters, key)
c.waitersLock.Unlock()
err = sub.Unsubscribe()
natsRequests.WithLabelValues("Unsubscribe", instrument.ErrorCode(err)).Add(1)
if err != nil {
log.Errorf("Error on unsubscribe: %v", err)
}
}
type inProcessStore struct {
cache gcache.Cache
}
// newInProcessStore creates an in-process store for reports.
func newInProcessStore(size int, expiration time.Duration) inProcessStore {
return inProcessStore{gcache.New(size).LRU().Expiration(expiration).Build()}
}
// FetchReports retrieves the given reports from the store.
func (c inProcessStore) FetchReports(_ context.Context, keys []string) (map[string]report.Report, []string, error) {
found := map[string]report.Report{}
missing := []string{}
for _, key := range keys {
rpt, err := c.cache.Get(key)
if err == nil {
found[key] = rpt.(report.Report)
} else {
missing = append(missing, key)
}
}
inProcessCacheHits.Add(float64(len(found)))
inProcessCacheRequests.Add(float64(len(keys)))
return found, missing, nil
}
// StoreReport stores a report in the store.
func (c inProcessStore) StoreReport(key string, report report.Report) {
c.cache.Set(key, report)
}

View File

@@ -1,203 +0,0 @@
package multitenant
import (
"context"
"crypto/sha256"
"encoding/base64"
"flag"
"math"
"strings"
"sync"
"time"
log "github.com/sirupsen/logrus"
billing "github.com/weaveworks/billing-client"
"github.com/weaveworks/scope/app"
"github.com/weaveworks/scope/report"
)
// BillingEmitterConfig has everything we need to make a billing emitter
type BillingEmitterConfig struct {
Enabled bool
DefaultInterval time.Duration
UserIDer UserIDer
}
// RegisterFlags registers the billing emitter flags with the main flag set.
func (cfg *BillingEmitterConfig) RegisterFlags(f *flag.FlagSet) {
f.BoolVar(&cfg.Enabled, "app.billing.enabled", false, "enable emitting billing info")
f.DurationVar(&cfg.DefaultInterval, "app.billing.default-publish-interval", 3*time.Second, "default publish interval to assume for reports")
}
// BillingEmitter is the billing emitter
type BillingEmitter struct {
app.Collector
BillingEmitterConfig
billing *billing.Client
sync.Mutex
intervalCache map[string]time.Duration
rounding map[string]float64
}
// NewBillingEmitter changes a new billing emitter which emits billing events
func NewBillingEmitter(upstream app.Collector, billingClient *billing.Client, cfg BillingEmitterConfig) (*BillingEmitter, error) {
return &BillingEmitter{
Collector: upstream,
billing: billingClient,
BillingEmitterConfig: cfg,
intervalCache: make(map[string]time.Duration),
rounding: make(map[string]float64),
}, nil
}
// Add implements app.Collector
func (e *BillingEmitter) Add(ctx context.Context, rep report.Report, buf []byte) error {
now := time.Now().UTC()
userID, err := e.UserIDer(ctx)
if err != nil {
// Underlying collector needs to get userID too, so it's OK to abort
// here. If this fails, so will underlying collector so no point
// proceeding.
return err
}
rowKey, colKey := calculateDynamoKeys(userID, now)
interval := e.reportInterval(rep)
// Cache the last-known value of interval for this user, and use
// it if we didn't find one in this report.
e.Lock()
if interval != 0 {
e.intervalCache[userID] = interval
} else {
if lastKnown, found := e.intervalCache[userID]; found {
interval = lastKnown
} else {
interval = e.DefaultInterval
}
}
// Billing takes an integer number of seconds, so keep track of the amount lost to rounding
nodeSeconds := interval.Seconds()*float64(len(rep.Host.Nodes)) + e.rounding[userID]
rounding := nodeSeconds - math.Floor(nodeSeconds)
e.rounding[userID] = rounding
e.Unlock()
hasher := sha256.New()
hasher.Write(buf)
hash := "sha256:" + base64.URLEncoding.EncodeToString(hasher.Sum(nil))
weaveNetCount := 0
if hasWeaveNet(rep) {
weaveNetCount = 1
}
amounts := billing.Amounts{
billing.ContainerSeconds: int64(interval/time.Second) * int64(len(rep.Container.Nodes)),
billing.NodeSeconds: int64(nodeSeconds),
billing.WeaveNetSeconds: int64(interval/time.Second) * int64(weaveNetCount),
}
metadata := map[string]string{
"row_key": rowKey,
"col_key": colKey,
}
err = e.billing.AddAmounts(
hash,
userID,
now,
amounts,
metadata,
)
if err != nil {
// No return, because we want to proceed even if we fail to emit
// billing data, so that defects in the billing system don't break
// report collection. Just log the fact & carry on.
log.Errorf("Failed emitting billing data: %v", err)
}
return e.Collector.Add(ctx, rep, buf)
}
func commandParameter(cmd, flag string) (string, bool) {
i := strings.Index(cmd, flag)
if i != -1 {
// here we expect the command looks like `-foo=bar` or `-foo bar`
aft := strings.Fields(cmd[i+len(flag):])
if len(aft) > 0 && len(aft[0]) > 0 {
if aft[0][0] == '=' {
return aft[0][1:], true
}
return aft[0], true
}
}
return "", false
}
func intervalFromCommand(cmd string) string {
if strings.Contains(cmd, "scope") {
if publishInterval, ok := commandParameter(cmd, "probe.publish.interval"); ok {
// If spy interval is higher than publish interval, some reports will have no process data
if spyInterval, ok := commandParameter(cmd, "spy.interval"); ok {
pubDuration, err1 := time.ParseDuration(publishInterval)
spyDuration, err2 := time.ParseDuration(spyInterval)
if err1 == nil && err2 == nil && spyDuration > pubDuration {
return spyInterval
}
}
return publishInterval
}
}
return ""
}
// reportInterval tries to find the custom report interval of this report. If
// it is malformed, or not set, it returns zero.
func (e *BillingEmitter) reportInterval(r report.Report) time.Duration {
if r.Window != 0 {
return r.Window
}
var inter string
for _, c := range r.Container.Nodes {
if cmd, ok := c.Latest.Lookup(report.DockerContainerCommand); ok {
if inter = intervalFromCommand(cmd); inter != "" {
break
}
}
}
if inter == "" { // not found in containers: look in processes
for _, c := range r.Process.Nodes {
if cmd, ok := c.Latest.Lookup(report.Cmdline); ok {
if inter = intervalFromCommand(cmd); inter != "" {
break
}
}
}
}
if inter == "" {
return 0
}
d, err := time.ParseDuration(inter)
if err != nil {
return 0
}
return d
}
// Tries to determine if this report came from a host running Weave Net
func hasWeaveNet(r report.Report) bool {
for _, n := range r.Overlay.Nodes {
overlayType, _ := report.ParseOverlayNodeID(n.ID)
if overlayType == report.WeaveOverlayPeerPrefix {
return true
}
}
return false
}
// Close shuts down the billing emitter and billing client flushing events.
func (e *BillingEmitter) Close() {
e.Collector.Close()
_ = e.billing.Close()
}

View File

@@ -1,29 +0,0 @@
package multitenant
import "testing"
func Test_intervalFromCommand(t *testing.T) {
tests := []struct {
name string
cmd string
want string
}{
{cmd: "/home/weave/scope --mode=probe --probe-only --probe.kubernetes=true --probe.spy.interval=3s --probe.publish.interval=5s --probe.processes=false --probe.conntrack=false --probe.ebpf.connections=false --probe.docker.bridge=docker0 --probe.docker=true https://redacted@cloud.weave.works.", want: "5s", name: "seconds"},
{cmd: "/home/weave/scope --mode=probe --probe-only --probe.kubernetes=true --probe.spy.interval=3s --probe.publish.interval 5s --probe.processes=false --probe.conntrack=false --probe.ebpf.connections=false --probe.docker.bridge=docker0 --probe.docker=true https://redacted@cloud.weave.works.", want: "5s", name: "space"},
{cmd: "/home/weave/scope --mode=probe --no-app --probe.docker=true --probe.kubernetes.role=host --weave=false --probe.publish.interval=4500ms --probe.spy.interval=2s --probe.http.listen=:4041 --probe.conntrack.buffersize=4194304 https://redacted@cloud.weave.works scope.weave.svc.cluster.local:80", want: "4500ms", name: "miliseconds"},
{cmd: "/home/weave/scope --mode=probe --no-app --probe.docker=true --probe.kubernetes.role=host --weave=false --probe.spy.interval=2s --probe.http.listen=:4041 --probe.conntrack.buffersize=4194304 https://redacted@cloud.weave.works scope.weave.svc.cluster.local:80", want: "", name: "notset"},
{cmd: "/home/weave/scope --mode=probe --probe-only --probe.kubernetes.role=host --probe.publish.interval=4500ms --probe.spy.interval=10s --probe.docker.bridge=docker0 --probe.docker=true --probe.ebpf.connections=false --probe.conntrack=false https://redacted@cloud.weave.works.", want: "10s", name: "higher-spy-interval"},
{cmd: "/bin/prometheus --config.file=/etc/prometheus/prometheus.yml --web.listen-address=:8080 --storage.tsdb.retention.time=2h --web.enable-lifecycle", want: "", name: "notscope"},
{cmd: "", want: "", name: "blank"},
{cmd: "/home/weave/scope --probe.publish.interval=3s", want: "3s", name: "at-end"},
{cmd: "/home/weave/scope --probe.publish.interval=", want: "", name: "equals-blank"},
{cmd: "/home/weave/scope --probe.publish.interval", want: "", name: "no-value"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := intervalFromCommand(tt.cmd); got != tt.want {
t.Errorf("intervalFromCommand() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -1,187 +0,0 @@
package multitenant
import (
"bytes"
"context"
"encoding/json"
"fmt"
"time"
consul "github.com/hashicorp/consul/api"
opentracing "github.com/opentracing/opentracing-go"
log "github.com/sirupsen/logrus"
)
const (
longPollDuration = 10 * time.Second
)
// ConsulClient is a high-level client for Consul, that exposes operations
// such as CAS and Watch which take callbacks. It also deals with serialisation.
type ConsulClient interface {
Get(ctx context.Context, key string, out interface{}) error
CAS(ctx context.Context, key string, out interface{}, f CASCallback) error
WatchPrefix(prefix string, out interface{}, done chan struct{}, f func(string, interface{}) bool)
}
// CASCallback is the type of the callback to CAS. If err is nil, out must be non-nil.
type CASCallback func(in interface{}) (out interface{}, retry bool, err error)
// NewConsulClient returns a new ConsulClient
func NewConsulClient(addr string) (ConsulClient, error) {
client, err := consul.NewClient(&consul.Config{
Address: addr,
Scheme: "http",
})
if err != nil {
return nil, err
}
return &consulClient{client.KV()}, nil
}
var (
queryOptions = &consul.QueryOptions{
RequireConsistent: true,
}
writeOptions = &consul.WriteOptions{}
// ErrNotFound is returned by ConsulClient.Get
ErrNotFound = fmt.Errorf("Not found")
)
type kv interface {
CAS(p *consul.KVPair, q *consul.WriteOptions) (bool, *consul.WriteMeta, error)
Get(key string, q *consul.QueryOptions) (*consul.KVPair, *consul.QueryMeta, error)
List(prefix string, q *consul.QueryOptions) (consul.KVPairs, *consul.QueryMeta, error)
}
type consulClient struct {
kv kv
}
// Get and deserialise a JSON value from consul.
func (c *consulClient) Get(ctx context.Context, key string, out interface{}) error {
span, ctx := opentracing.StartSpanFromContext(ctx, "Consul Get", opentracing.Tag{"key", key})
defer span.Finish()
kvp, _, err := c.kv.Get(key, queryOptions)
if err != nil {
return err
}
if kvp == nil {
return ErrNotFound
}
return json.NewDecoder(bytes.NewReader(kvp.Value)).Decode(out)
}
// CAS atomically modify a value in a callback.
// If value doesn't exist you'll get nil as a argument to your callback.
func (c *consulClient) CAS(ctx context.Context, key string, out interface{}, f CASCallback) error {
span, ctx := opentracing.StartSpanFromContext(ctx, "Consul CAS", opentracing.Tag{"key", key})
defer span.Finish()
var (
index = uint64(0)
retries = 10
retry = true
intermediate interface{}
)
for i := 0; i < retries; i++ {
kvp, _, err := c.kv.Get(key, queryOptions)
if err != nil {
log.Errorf("Error getting %s: %v", key, err)
continue
}
if kvp != nil {
if err := json.NewDecoder(bytes.NewReader(kvp.Value)).Decode(out); err != nil {
log.Errorf("Error deserialising %s: %v", key, err)
continue
}
index = kvp.ModifyIndex // if key doesn't exist, index will be 0
intermediate = out
}
intermediate, retry, err = f(intermediate)
if err != nil {
log.Errorf("Error CASing %s: %v", key, err)
if !retry {
return err
}
continue
}
if intermediate == nil {
panic("Callback must instantiate value!")
}
value := bytes.Buffer{}
if err := json.NewEncoder(&value).Encode(intermediate); err != nil {
log.Errorf("Error serialising value for %s: %v", key, err)
continue
}
ok, _, err := c.kv.CAS(&consul.KVPair{
Key: key,
Value: value.Bytes(),
ModifyIndex: index,
}, writeOptions)
if err != nil {
log.Errorf("Error CASing %s: %v", key, err)
continue
}
if !ok {
log.Errorf("Error CASing %s, trying again %d", key, index)
continue
}
return nil
}
return fmt.Errorf("Failed to CAS %s", key)
}
func (c *consulClient) WatchPrefix(prefix string, out interface{}, done chan struct{}, f func(string, interface{}) bool) {
const (
initialBackoff = 1 * time.Second
maxBackoff = 1 * time.Minute
)
var (
backoff = initialBackoff / 2
index = uint64(0)
)
for {
select {
case <-done:
return
default:
}
kvps, meta, err := c.kv.List(prefix, &consul.QueryOptions{
RequireConsistent: true,
WaitIndex: index,
WaitTime: longPollDuration,
})
if err != nil {
log.Errorf("Error getting path %s: %v", prefix, err)
backoff = backoff * 2
if backoff > maxBackoff {
backoff = maxBackoff
}
select {
case <-done:
return
case <-time.After(backoff):
continue
}
}
backoff = initialBackoff
if index == meta.LastIndex {
continue
}
index = meta.LastIndex
for _, kvp := range kvps {
if err := json.NewDecoder(bytes.NewReader(kvp.Value)).Decode(out); err != nil {
log.Errorf("Error deserialising %s: %v", kvp.Key, err)
continue
}
if !f(kvp.Key, out) {
return
}
}
}
}

View File

@@ -1,454 +0,0 @@
package multitenant
import (
"fmt"
"io"
"net/http"
"net/url"
"sync"
"time"
"context"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
opentracing "github.com/opentracing/opentracing-go"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/common/middleware"
"github.com/weaveworks/common/mtime"
"github.com/weaveworks/scope/app"
"github.com/weaveworks/scope/common/xfer"
)
const (
gcInterval = 30 * time.Second // we check all the pipes every 30s
pipeTimeout = 1 * time.Minute // pipes are closed when a client hasn't been connected for 1 minute
gcTimeout = 10 * time.Minute // after another 10 minutes, tombstoned pipes are forgotten
)
var (
wsDialer = &websocket.Dialer{}
)
// TODO deal with garbage collection
type consulPipe struct {
CreatedAt, DeletedAt time.Time
UIAddr, ProbeAddr string // Addrs where each end is connected
UIRef, ProbeRef int // Ref counts
}
func (c *consulPipe) setAddrFor(e app.End, addr string) {
if e == app.UIEnd {
c.UIAddr = addr
} else {
c.ProbeAddr = addr
}
}
func (c *consulPipe) addrFor(e app.End) string {
if e == app.UIEnd {
return c.UIAddr
}
return c.ProbeAddr
}
func (c *consulPipe) eitherEndFor(addr string) bool {
return c.addrFor(app.UIEnd) == addr || c.addrFor(app.ProbeEnd) == addr
}
func (c *consulPipe) acquire(e app.End) int {
if e == app.UIEnd {
c.UIRef++
return c.UIRef
}
c.ProbeRef++
return c.ProbeRef
}
func (c *consulPipe) release(e app.End) int {
if e == app.UIEnd {
c.UIRef--
return c.UIRef
}
c.ProbeRef--
return c.ProbeRef
}
type consulPipeRouter struct {
prefix string
advertise string // Address of this pipe router to advertise in consul
client ConsulClient
userIDer UserIDer
activePipes map[string]xfer.Pipe
bridges map[string]*bridgeConnection
actorChan chan func()
pipeWaiters map[string][]chan xfer.Pipe
// Used by Stop()
quit chan struct{}
wait sync.WaitGroup
}
// NewConsulPipeRouter returns a new consul based router
func NewConsulPipeRouter(client ConsulClient, prefix, advertise string, userIDer UserIDer) app.PipeRouter {
pipeRouter := &consulPipeRouter{
prefix: prefix,
advertise: advertise,
client: client,
userIDer: userIDer,
activePipes: map[string]xfer.Pipe{},
bridges: map[string]*bridgeConnection{},
actorChan: make(chan func()),
pipeWaiters: map[string][]chan xfer.Pipe{},
quit: make(chan struct{}),
}
pipeRouter.wait.Add(2)
go pipeRouter.watchAll()
go pipeRouter.actor()
go pipeRouter.privateAPI()
return pipeRouter
}
func (pr *consulPipeRouter) Stop() {
close(pr.quit)
pr.wait.Wait()
}
func (pr *consulPipeRouter) actor() {
defer pr.wait.Done()
for {
select {
case f := <-pr.actorChan:
f()
case <-pr.quit:
return
}
}
}
// watchAll listens to all pipe updates from consul.
// This is effectively a distributed, consistent actor routine.
// All state changes for this pipe router happen in this loop,
// and all the methods are implemented as CAS's on consul, to
// trigger an event in this loop.
func (pr *consulPipeRouter) watchAll() {
defer pr.wait.Done()
pr.client.WatchPrefix(pr.prefix, &consulPipe{}, pr.quit, func(key string, value interface{}) bool {
cp := *value.(*consulPipe)
select {
case pr.actorChan <- func() { pr.handlePipeUpdate(key, cp) }:
return true
case <-pr.quit:
return false
}
})
}
func (pr *consulPipeRouter) handlePipeUpdate(key string, cp consulPipe) {
// 1. If this pipe is closed, or we're not one of the ends, we
// should ensure our local pipe (and bridge) is closed.
if !cp.DeletedAt.IsZero() || !cp.eitherEndFor(pr.advertise) {
pipe, ok := pr.activePipes[key]
delete(pr.activePipes, key)
if ok {
log.Infof("Deleting pipe %s", key)
pipe.Close()
}
bridge, ok := pr.bridges[key]
delete(pr.bridges, key)
if ok {
bridge.stop()
}
return
}
if !cp.eitherEndFor(pr.advertise) {
return
}
// 2. If this pipe if for us, we should have a pipe for it.
pipe, ok := pr.activePipes[key]
if !ok {
log.Infof("Creating pipe %s", key)
pipe = xfer.NewPipe()
pr.activePipes[key] = pipe
for _, pw := range pr.pipeWaiters[key] {
pw <- pipe
}
delete(pr.pipeWaiters, key)
}
// 3. Ensure there is a bridging connection for this pipe.
// Semantics are the owner of the UIEnd connects to the owner of the ProbeEnd
shouldBridge := cp.DeletedAt.IsZero() &&
cp.addrFor(app.UIEnd) != cp.addrFor(app.ProbeEnd) &&
cp.addrFor(app.UIEnd) == pr.advertise &&
cp.addrFor(app.ProbeEnd) != ""
bridge, ok := pr.bridges[key]
// If we shouldn't be bridging but are, or we should be bridging but are pointing
// at the wrong place, stop the current bridge.
if (!shouldBridge && ok) || (shouldBridge && ok && bridge.addr != cp.addrFor(app.ProbeEnd)) {
delete(pr.bridges, key)
bridge.stop()
ok = false
}
// If we should be bridging and are not, start a new bridge
if shouldBridge && !ok {
bridge = newBridgeConnection(key, cp.addrFor(app.ProbeEnd), pipe)
pr.bridges[key] = bridge
}
}
func (pr *consulPipeRouter) getPipe(key string) xfer.Pipe {
pc := make(chan xfer.Pipe)
select {
case pr.actorChan <- func() { pc <- pr.activePipes[key] }:
return <-pc
case <-pr.quit:
return nil
}
}
func (pr *consulPipeRouter) waitForPipe(key string) xfer.Pipe {
pc := make(chan xfer.Pipe)
select {
case pr.actorChan <- func() {
pipe, ok := pr.activePipes[key]
if ok {
pc <- pipe
} else {
pr.pipeWaiters[key] = append(pr.pipeWaiters[key], pc)
}
}:
return <-pc
case <-pr.quit:
return nil
}
}
func (pr *consulPipeRouter) privateAPI() {
router := mux.NewRouter()
router.Methods("GET").
MatcherFunc(app.URLMatcher("/private/api/pipe/{key}")).
HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
key := mux.Vars(r)["key"]
log.Infof("%s: Server bridge connection started", key)
defer log.Infof("%s: Server bridge connection stopped", key)
pipe := pr.getPipe(key)
if pipe == nil {
log.Errorf("%s: Server bridge connection; Unknown pipe!", key)
w.WriteHeader(http.StatusNotFound)
return
}
conn, err := xfer.Upgrade(w, r, nil)
if err != nil {
log.Errorf("%s: Server bridge connection; Error upgrading to websocket: %v", key, err)
return
}
defer conn.Close()
end, _ := pipe.Ends()
if _, err := pipe.CopyToWebsocket(end, conn); err != nil {
log.Errorf("%s: Server bridge connection; Error copying pipe to websocket: %v", key, err)
}
})
handler := middleware.Tracer{RouteMatcher: router}.Wrap(router)
log.Infof("Serving private API on endpoint %s.", pr.advertise)
log.Infof("Private API terminated: %v", http.ListenAndServe(pr.advertise, handler))
}
func (pr *consulPipeRouter) Exists(ctx context.Context, id string) (bool, error) {
userID, err := pr.userIDer(ctx)
if err != nil {
return false, err
}
key := fmt.Sprintf("%s%s-%s", pr.prefix, userID, id)
consulPipe := consulPipe{}
err = pr.client.Get(ctx, key, &consulPipe)
if err == ErrNotFound {
return false, nil
} else if err != nil {
return false, err
}
return consulPipe.DeletedAt.IsZero(), nil
}
func (pr *consulPipeRouter) Get(ctx context.Context, id string, e app.End) (xfer.Pipe, io.ReadWriter, error) {
userID, err := pr.userIDer(ctx)
if err != nil {
return nil, nil, err
}
key := fmt.Sprintf("%s%s-%s", pr.prefix, userID, id)
log.Infof("Get %s:%s", key, e)
span, ctx := opentracing.StartSpanFromContext(ctx, "PipeRouter Get", opentracing.Tag{"key", key})
defer span.Finish()
// Try to ensure the given end of the given pipe
// is 'owned' by this pipe service replica in consul.
err = pr.client.CAS(ctx, key, &consulPipe{}, func(in interface{}) (interface{}, bool, error) {
var pipe *consulPipe
if in == nil {
pipe = &consulPipe{
CreatedAt: mtime.Now(),
}
} else {
pipe = in.(*consulPipe)
}
if !pipe.DeletedAt.IsZero() {
return nil, false, fmt.Errorf("Pipe %s has been deleted", key)
}
end := pipe.addrFor(e)
if end != "" && end != pr.advertise {
return nil, true, fmt.Errorf("Error: Pipe %s has existing connection to %s", key, end)
}
pipe.setAddrFor(e, pr.advertise)
pipe.acquire(e)
return pipe, false, nil
})
if err != nil {
return nil, nil, err
}
pipe := pr.waitForPipe(key)
myEnd, _ := pipe.Ends()
if e == app.ProbeEnd {
_, myEnd = pipe.Ends()
}
return pipe, myEnd, nil
}
func (pr *consulPipeRouter) Release(ctx context.Context, id string, e app.End) error {
userID, err := pr.userIDer(ctx)
if err != nil {
return err
}
key := fmt.Sprintf("%s%s-%s", pr.prefix, userID, id)
log.Infof("Release %s:%s", key, e)
span, ctx := opentracing.StartSpanFromContext(ctx, "PipeRouter Release", opentracing.Tag{"key", key})
defer span.Finish()
// atomically clear my end of the pipe in consul
return pr.client.CAS(ctx, key, &consulPipe{}, func(in interface{}) (interface{}, bool, error) {
if in == nil {
return nil, false, fmt.Errorf("pipe %s not found", id)
}
p := in.(*consulPipe)
if p.addrFor(e) != pr.advertise {
return nil, false, fmt.Errorf("pipe %s not owned by us", id)
}
refs := p.release(e)
if refs == 0 {
p.setAddrFor(e, "")
}
return p, true, nil
})
}
func (pr *consulPipeRouter) Delete(ctx context.Context, id string) error {
userID, err := pr.userIDer(ctx)
if err != nil {
return err
}
key := fmt.Sprintf("%s%s-%s", pr.prefix, userID, id)
log.Infof("Delete %s", key)
span, ctx := opentracing.StartSpanFromContext(ctx, "PipeRouter Delete", opentracing.Tag{"key", key})
defer span.Finish()
return pr.client.CAS(ctx, key, &consulPipe{}, func(in interface{}) (interface{}, bool, error) {
if in == nil {
return nil, false, fmt.Errorf("Pipe %s not found", id)
}
p := in.(*consulPipe)
p.DeletedAt = mtime.Now()
return p, false, nil
})
}
// A bridgeConnection represents a connection between two pipe router replicas.
// They are created & destroyed in response to events from consul, which in turn
// are triggered when UIs or Probes connect to various pipe routers.
type bridgeConnection struct {
key string
addr string // address to connect to
pipe xfer.Pipe
mtx sync.Mutex
conn xfer.Websocket
stopped bool
wait sync.WaitGroup
}
func newBridgeConnection(key, addr string, pipe xfer.Pipe) *bridgeConnection {
log.Infof("%s: Starting client bridge connection", key)
result := &bridgeConnection{
key: key,
addr: addr,
pipe: pipe,
}
result.wait.Add(1)
go result.loop()
return result
}
func (bc *bridgeConnection) stop() {
log.Infof("%s: Stopping client bridge connection", bc.key)
bc.mtx.Lock()
bc.stopped = true
if bc.conn != nil {
bc.conn.Close()
end, _ := bc.pipe.Ends()
end.Write(nil) // this will cause the other end of wake up and exit
}
bc.mtx.Unlock()
bc.wait.Wait()
}
func (bc *bridgeConnection) loop() {
log.Infof("%s: Client bridge connection started", bc.key)
defer bc.wait.Done()
defer log.Infof("%s: Client bridge connection stopped", bc.key)
_, end := bc.pipe.Ends()
url := fmt.Sprintf("ws://%s/private/api/pipe/%s", bc.addr, url.QueryEscape(bc.key))
for {
bc.mtx.Lock()
bc.conn = nil
if bc.stopped {
bc.mtx.Unlock()
return
}
bc.mtx.Unlock()
// connect to other pipes instance
conn, _, err := xfer.DialWS(wsDialer, url, http.Header{})
if err != nil {
log.Errorf("%s: Client bridge connection; Error connecting to %s: %v", bc.key, url, err)
time.Sleep(time.Second) // TODO backoff
continue
}
bc.mtx.Lock()
if bc.stopped {
bc.mtx.Unlock()
conn.Close()
return
}
bc.conn = conn
bc.mtx.Unlock()
if _, err := bc.pipe.CopyToWebsocket(end, conn); err != nil {
log.Errorf("%s: Client bridge connection; Error copying pipe to websocket: %v", bc.key, err)
}
conn.Close()
}
}

View File

@@ -1,206 +0,0 @@
package multitenant
import (
"bytes"
"fmt"
"io"
"log"
"math/rand"
"sync"
"testing"
"context"
"github.com/weaveworks/scope/app"
"github.com/weaveworks/scope/common/xfer"
"github.com/weaveworks/scope/probe/appclient"
)
type adapter struct {
c appclient.AppClient
}
func (a adapter) PipeConnection(_, pipeID string, pipe xfer.Pipe) error {
a.c.PipeConnection(pipeID, pipe)
return nil
}
func (a adapter) PipeClose(_, pipeID string) error {
return a.c.PipeClose(pipeID)
}
type pipeconn struct {
id string
uiPR, probePR app.PipeRouter
uiPipe, probePipe xfer.Pipe
uiIO, probeIO io.ReadWriter
}
func (p *pipeconn) test(t *testing.T) {
msg := []byte("hello " + p.id)
wait := sync.WaitGroup{}
wait.Add(2)
go func() {
defer wait.Done()
// write something to the probe end
_, err := p.probeIO.Write(msg)
if err != nil {
t.Fatal(err)
}
}()
go func() {
defer wait.Done()
// read it back off the other end
buf := make([]byte, len(msg))
n, err := p.uiIO.Read(buf)
if n != len(buf) {
t.Fatalf("only read %d", n)
}
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, msg) {
t.Fatalf("Got: %v, Expected: %v", buf, msg)
}
}()
wait.Wait()
}
type pipeTest struct {
prs []app.PipeRouter
pipes []*pipeconn
}
func (pt *pipeTest) newPipe(t *testing.T) {
// make a new pipe id
id := fmt.Sprintf("pipe-%d", rand.Int63())
log.Printf(">>>> newPipe %s", id)
// pick a random PR to connect app to
uiIndex := rand.Intn(len(pt.prs))
uiPR := pt.prs[uiIndex]
uiPipe, uiIO, err := uiPR.Get(context.Background(), id, app.UIEnd)
if err != nil {
t.Fatal(err)
}
// pick a random PR to connect probe to
probeIndex := rand.Intn(len(pt.prs))
for probeIndex == uiIndex {
probeIndex = rand.Intn(len(pt.prs))
}
probePR := pt.prs[probeIndex]
probePipe, probeIO, err := probePR.Get(context.Background(), id, app.ProbeEnd)
if err != nil {
t.Fatal(err)
}
pipe := &pipeconn{
id: id,
uiPR: uiPR,
uiPipe: uiPipe,
uiIO: uiIO,
probePR: probePR,
probePipe: probePipe,
probeIO: probeIO,
}
pipe.test(t)
pt.pipes = append(pt.pipes, pipe)
}
func (pt *pipeTest) deletePipe(t *testing.T) {
// pick a random pipe
i := rand.Intn(len(pt.pipes))
pipe := pt.pipes[i]
log.Printf(">>>> deletePipe %s", pipe.id)
if err := pipe.uiPR.Release(context.Background(), pipe.id, app.UIEnd); err != nil {
t.Fatal(err)
}
if err := pipe.probePR.Release(context.Background(), pipe.id, app.ProbeEnd); err != nil {
t.Fatal(err)
}
// remove from list
pt.pipes = pt.pipes[:i+copy(pt.pipes[i:], pt.pipes[i+1:])]
}
func (pt *pipeTest) reconnectPipe(t *testing.T) {
// pick a random pipe
pipe := pt.pipes[rand.Intn(len(pt.pipes))]
log.Printf(">>>> reconnectPipe %s", pipe.id)
// pick a random PR to connect to
newPR := pt.prs[rand.Intn(len(pt.prs))]
// pick a random end
if rand.Float32() < 0.5 {
if err := pipe.uiPR.Release(context.Background(), pipe.id, app.UIEnd); err != nil {
t.Fatal(err)
}
uiPipe, uiIO, err := newPR.Get(context.Background(), pipe.id, app.UIEnd)
if err != nil {
t.Fatal(err)
}
pipe.uiPR, pipe.uiPipe, pipe.uiIO = newPR, uiPipe, uiIO
} else {
if err := pipe.probePR.Release(context.Background(), pipe.id, app.ProbeEnd); err != nil {
t.Fatal(err)
}
probePipe, probeIO, err := newPR.Get(context.Background(), pipe.id, app.ProbeEnd)
if err != nil {
t.Fatal(err)
}
pipe.probePR, pipe.probePipe, pipe.probeIO = newPR, probePipe, probeIO
}
}
func TestPipeRouter(t *testing.T) {
var (
consul = newMockConsulClient()
replicas = 2
iterations = 10
pt = pipeTest{}
)
for i := 0; i < replicas; i++ {
pr := NewConsulPipeRouter(consul, "", fmt.Sprintf("127.0.0.1:44%02d", i), NoopUserIDer)
defer pr.Stop()
pt.prs = append(pt.prs, pr)
}
for i := 0; i < iterations; i++ {
log.Printf("Iteration %d", i)
pt.newPipe(t)
pt.deletePipe(t)
}
}
//func TestPipeHard(t *testing.T) {
// if len(pipes) <= 0 {
// newPipe()
// continue
// } else if len(pipes) >= 2 {
// deletePipe()
// continue
// }
// r := rand.Float32()
// switch {
// case 0.0 < r && r <= 0.3:
// newPipe()
// case 0.3 < r && r <= 0.6:
// deletePipe()
// case 0.6 < r && r <= 1.0:
// reconnectPipe()
// }
//}

View File

@@ -1,224 +0,0 @@
package multitenant
import (
"bytes"
"fmt"
"net"
"sort"
"sync"
"time"
"context"
"github.com/bradfitz/gomemcache/memcache"
opentracing "github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/common/instrument"
"github.com/weaveworks/scope/report"
)
var (
memcacheRequests = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "scope",
Name: "memcache_requests_total",
Help: "Total count of reports requested from memcache that were not found in our in-memory cache.",
})
memcacheHits = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "scope",
Name: "memcache_hits_total",
Help: "Total count of reports found in memcache that were not found in our in-memory cache.",
})
memcacheRequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "scope",
Name: "memcache_request_duration_seconds",
Help: "Total time spent in seconds doing memcache requests.",
Buckets: prometheus.DefBuckets,
}, []string{"method", "status_code"})
)
func registerMemcacheClientMetrics() {
prometheus.MustRegister(memcacheRequests)
prometheus.MustRegister(memcacheHits)
prometheus.MustRegister(memcacheRequestDuration)
}
var registerMemcacheClientMetricsOnce sync.Once
// MemcacheClient is a memcache client that gets its server list from SRV
// records, and periodically updates that ServerList.
type MemcacheClient struct {
client *memcache.Client
serverList *memcache.ServerList
expiration int32
hostname string
service string
compressionLevel int
quit chan struct{}
wait sync.WaitGroup
}
// MemcacheConfig defines how a MemcacheClient should be constructed.
type MemcacheConfig struct {
Host string
Service string
Timeout time.Duration
UpdateInterval time.Duration
Expiration time.Duration
CompressionLevel int
}
// NewMemcacheClient creates a new MemcacheClient that gets its server list
// from SRV and updates the server list on a regular basis.
func NewMemcacheClient(config MemcacheConfig) *MemcacheClient {
registerMemcacheClientMetricsOnce.Do(registerMemcacheClientMetrics)
var servers memcache.ServerList
client := memcache.NewFromSelector(&servers)
client.Timeout = config.Timeout
newClient := &MemcacheClient{
client: client,
serverList: &servers,
expiration: int32(config.Expiration.Seconds()),
hostname: config.Host,
service: config.Service,
compressionLevel: config.CompressionLevel,
quit: make(chan struct{}),
}
err := newClient.updateMemcacheServers()
if err != nil {
log.Errorf("Error setting memcache servers to '%v': %v", config.Host, err)
}
newClient.wait.Add(1)
go newClient.updateLoop(config.UpdateInterval)
return newClient
}
// Stop the memcache client.
func (c *MemcacheClient) Stop() {
close(c.quit)
c.wait.Wait()
}
func (c *MemcacheClient) updateLoop(updateInterval time.Duration) error {
defer c.wait.Done()
ticker := time.NewTicker(updateInterval)
var err error
for {
select {
case <-ticker.C:
err = c.updateMemcacheServers()
if err != nil {
log.Warningf("Error updating memcache servers: %v", err)
}
case <-c.quit:
ticker.Stop()
}
}
}
// updateMemcacheServers sets a memcache server list from SRV records. SRV
// priority & weight are ignored.
func (c *MemcacheClient) updateMemcacheServers() error {
_, addrs, err := net.LookupSRV(c.service, "tcp", c.hostname)
if err != nil {
return err
}
var servers []string
for _, srv := range addrs {
servers = append(servers, fmt.Sprintf("%s:%d", srv.Target, srv.Port))
}
// ServerList deterministically maps keys to _index_ of the server list.
// Since DNS returns records in different order each time, we sort to
// guarantee best possible match between nodes.
sort.Strings(servers)
return c.serverList.SetServers(servers...)
}
func memcacheStatusCode(err error) string {
// See https://godoc.org/github.com/bradfitz/gomemcache/memcache#pkg-variables
switch err {
case nil:
return "200"
case memcache.ErrCacheMiss:
return "404"
case memcache.ErrMalformedKey:
return "400"
default:
return "500"
}
}
// FetchReports gets reports from memcache.
func (c *MemcacheClient) FetchReports(ctx context.Context, keys []string) (map[string]report.Report, []string, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "Memcache.FetchReports")
defer span.Finish()
defer memcacheRequests.Add(float64(len(keys)))
var found map[string]*memcache.Item
err := instrument.TimeRequestHistogramStatus(ctx, "Memcache.GetMulti", memcacheRequestDuration, memcacheStatusCode, func(_ context.Context) error {
var err error
found, err = c.client.GetMulti(keys)
return err
})
span.LogFields(otlog.Int("keys", len(keys)), otlog.Int("hits", len(found)))
if err != nil {
return nil, keys, err
}
// Decode all the reports in parallel.
type result struct {
key string
report *report.Report
}
ch := make(chan result, len(keys))
var missing []string
for _, key := range keys {
item, ok := found[key]
if !ok {
missing = append(missing, key)
continue
}
go func(key string) {
rep, err := report.MakeFromBinary(ctx, bytes.NewBuffer(item.Value), true, true)
if err != nil {
log.Warningf("Corrupt report in memcache %v: %v", key, err)
ch <- result{key: key}
return
}
ch <- result{key: key, report: rep}
}(key)
}
reports := map[string]report.Report{}
lenFound := len(keys) - len(missing)
for i := 0; i < lenFound; i++ {
r := <-ch
if r.report == nil {
missing = append(missing, r.key)
} else {
reports[r.key] = *r.report
}
}
if len(missing) > 0 {
sort.Strings(missing)
log.Warningf("Missing %d reports from memcache: %v", len(missing), missing)
}
memcacheHits.Add(float64(len(reports)))
return reports, missing, nil
}
// StoreReportBytes stores a report.
func (c *MemcacheClient) StoreReportBytes(ctx context.Context, key string, rpt []byte) (int, error) {
err := instrument.TimeRequestHistogramStatus(ctx, "Memcache.Put", memcacheRequestDuration, memcacheStatusCode, func(_ context.Context) error {
item := memcache.Item{Key: key, Value: rpt, Expiration: c.expiration}
return c.client.Set(&item)
})
return len(rpt), err
}

View File

@@ -1,98 +0,0 @@
package multitenant
import (
"sync"
"time"
consul "github.com/hashicorp/consul/api"
)
type mockKV struct {
mtx sync.Mutex
cond *sync.Cond
kvps map[string]*consul.KVPair
next uint64 // the next update will have this 'index in the the log'
}
func newMockConsulClient() ConsulClient {
m := mockKV{
kvps: map[string]*consul.KVPair{},
}
m.cond = sync.NewCond(&m.mtx)
go m.loop()
return &consulClient{&m}
}
func copyKVPair(in *consul.KVPair) *consul.KVPair {
value := make([]byte, len(in.Value))
copy(value, in.Value)
return &consul.KVPair{
Key: in.Key,
CreateIndex: in.CreateIndex,
ModifyIndex: in.ModifyIndex,
LockIndex: in.LockIndex,
Flags: in.Flags,
Value: value,
Session: in.Session,
}
}
// periodic loop to wake people up, so they can honour timeouts
func (m *mockKV) loop() {
for range time.Tick(1 * time.Second) {
m.mtx.Lock()
m.cond.Broadcast()
m.mtx.Unlock()
}
}
func (m *mockKV) CAS(p *consul.KVPair, q *consul.WriteOptions) (bool, *consul.WriteMeta, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
existing, ok := m.kvps[p.Key]
if ok && existing.ModifyIndex != p.ModifyIndex {
return false, nil, nil
}
if ok {
existing.Value = p.Value
} else {
m.kvps[p.Key] = copyKVPair(p)
}
m.kvps[p.Key].ModifyIndex++
m.kvps[p.Key].LockIndex = m.next
m.next++
m.cond.Broadcast()
return true, nil, nil
}
func (m *mockKV) Get(key string, q *consul.QueryOptions) (*consul.KVPair, *consul.QueryMeta, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
value, ok := m.kvps[key]
if !ok {
return nil, nil, nil
}
for q.WaitIndex >= value.ModifyIndex {
m.cond.Wait()
}
return copyKVPair(value), nil, nil
}
func (m *mockKV) List(prefix string, q *consul.QueryOptions) (consul.KVPairs, *consul.QueryMeta, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
deadline := time.Now().Add(q.WaitTime)
for m.next <= q.WaitIndex && time.Now().Before(deadline) {
m.cond.Wait()
}
if time.Now().After(deadline) {
return nil, &consul.QueryMeta{LastIndex: q.WaitIndex}, nil
}
result := consul.KVPairs{}
for _, kvp := range m.kvps {
if kvp.LockIndex >= q.WaitIndex {
result = append(result, copyKVPair(kvp))
}
}
return result, &consul.QueryMeta{LastIndex: m.next}, nil
}

View File

@@ -1,104 +0,0 @@
package multitenant
import (
"bytes"
"sync"
"context"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/prometheus/client_golang/prometheus"
"github.com/weaveworks/common/instrument"
"github.com/weaveworks/scope/report"
)
var (
s3RequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "scope",
Name: "s3_request_duration_seconds",
Help: "Time in seconds spent doing S3 requests.",
Buckets: prometheus.DefBuckets,
}, []string{"method", "status_code"})
)
// S3Store is an S3 client that stores and retrieves Reports.
type S3Store struct {
s3 *s3.S3
bucketName string
}
func registerS3ClientMetrics() {
prometheus.MustRegister(s3RequestDuration)
}
var registerS3ClientMetricsOnce sync.Once
// NewS3Client creates a new S3 client.
func NewS3Client(config *aws.Config, bucketName string) S3Store {
registerS3ClientMetricsOnce.Do(registerS3ClientMetrics)
return S3Store{
s3: s3.New(session.New(config)),
bucketName: bucketName,
}
}
// FetchReports fetches multiple reports in parallel from S3.
func (store *S3Store) FetchReports(ctx context.Context, keys []string) (map[string]report.Report, []string, error) {
type result struct {
key string
report *report.Report
err error
}
ch := make(chan result, len(keys))
for _, key := range keys {
go func(key string) {
r := result{key: key}
r.report, r.err = store.fetchReport(ctx, key)
ch <- r
}(key)
}
reports := map[string]report.Report{}
for range keys {
r := <-ch
if r.err != nil {
return nil, []string{}, r.err
}
reports[r.key] = *r.report
}
return reports, []string{}, nil
}
func (store *S3Store) fetchReport(ctx context.Context, key string) (*report.Report, error) {
var resp *s3.GetObjectOutput
err := instrument.TimeRequestHistogram(ctx, "S3.Get", s3RequestDuration, func(_ context.Context) error {
var err error
resp, err = store.s3.GetObject(&s3.GetObjectInput{
Bucket: aws.String(store.bucketName),
Key: aws.String(key),
})
return err
})
if err != nil {
return nil, err
}
defer resp.Body.Close()
return report.MakeFromBinary(ctx, resp.Body, true, true)
}
// StoreReportBytes stores a report.
func (store *S3Store) StoreReportBytes(ctx context.Context, key string, buf []byte) (int, error) {
err := instrument.TimeRequestHistogram(ctx, "S3.Put", s3RequestDuration, func(_ context.Context) error {
_, err := store.s3.PutObject(&s3.PutObjectInput{
Body: bytes.NewReader(buf),
Bucket: aws.String(store.bucketName),
Key: aws.String(key),
})
return err
})
return len(buf), err
}

View File

@@ -1,372 +0,0 @@
package multitenant
import (
"bytes"
"encoding/json"
"fmt"
"math/rand"
"sync"
"time"
"context"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/common/instrument"
"github.com/weaveworks/scope/app"
"github.com/weaveworks/scope/common/xfer"
)
var (
longPollTime = aws.Int64(10)
sqsRequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "scope",
Name: "sqs_request_duration_seconds",
Help: "Time in seconds spent doing SQS requests.",
Buckets: prometheus.DefBuckets,
}, []string{"method", "status_code"})
)
func registerSQSMetrics() {
prometheus.MustRegister(sqsRequestDuration)
}
var registerSQSMetricsOnce sync.Once
// sqsControlRouter:
// Creates a queue for every probe that connects to it, and a queue for
// responses back to it. When it receives a request, posts it to the
// probe queue. When probe receives a request, handles it and posts the
// response back to the response queue.
type sqsControlRouter struct {
service *sqs.SQS
responseQueueURL *string
userIDer UserIDer
prefix string
rpcTimeout time.Duration
mtx sync.Mutex
responses map[string]chan xfer.Response
probeWorkers map[int64]*probeWorker
}
type sqsRequestMessage struct {
ID string
Request xfer.Request
ResponseQueueURL string
}
type sqsResponseMessage struct {
ID string
Response xfer.Response
}
// NewSQSControlRouter the harbinger of death
func NewSQSControlRouter(config *aws.Config, userIDer UserIDer, prefix string, rpcTimeout time.Duration) app.ControlRouter {
registerSQSMetricsOnce.Do(registerSQSMetrics)
result := &sqsControlRouter{
service: sqs.New(session.New(config)),
responseQueueURL: nil,
userIDer: userIDer,
prefix: prefix,
rpcTimeout: rpcTimeout,
responses: map[string]chan xfer.Response{},
probeWorkers: map[int64]*probeWorker{},
}
go result.loop()
return result
}
func (cr *sqsControlRouter) Stop() error {
return nil
}
func (cr *sqsControlRouter) setResponseQueueURL(url *string) {
cr.mtx.Lock()
defer cr.mtx.Unlock()
cr.responseQueueURL = url
}
func (cr *sqsControlRouter) getResponseQueueURL() *string {
cr.mtx.Lock()
defer cr.mtx.Unlock()
return cr.responseQueueURL
}
func (cr *sqsControlRouter) getOrCreateQueue(ctx context.Context, name string) (*string, error) {
// CreateQueue creates a queue or if it already exists, returns url of said queue
var createQueueRes *sqs.CreateQueueOutput
var err error
err = instrument.TimeRequestHistogram(ctx, "SQS.CreateQueue", sqsRequestDuration, func(_ context.Context) error {
createQueueRes, err = cr.service.CreateQueue(&sqs.CreateQueueInput{
QueueName: aws.String(name),
})
return err
})
if err != nil {
return nil, err
}
return createQueueRes.QueueUrl, nil
}
func (cr *sqsControlRouter) loop() {
var (
responseQueueURL *string
err error
ctx = context.Background()
)
for {
// This app has a random id and uses this as a return path for all responses from probes.
name := fmt.Sprintf("%scontrol-app-%d", cr.prefix, rand.Int63())
responseQueueURL, err = cr.getOrCreateQueue(ctx, name)
if err != nil {
log.Errorf("Failed to create queue: %v", err)
time.Sleep(1 * time.Second)
continue
}
cr.setResponseQueueURL(responseQueueURL)
break
}
for {
var res *sqs.ReceiveMessageOutput
var err error
err = instrument.TimeRequestHistogram(ctx, "SQS.ReceiveMessage", sqsRequestDuration, func(_ context.Context) error {
res, err = cr.service.ReceiveMessage(&sqs.ReceiveMessageInput{
QueueUrl: responseQueueURL,
WaitTimeSeconds: longPollTime,
})
return err
})
if err != nil {
log.Errorf("Error receiving message from %s: %v", *responseQueueURL, err)
continue
}
if len(res.Messages) == 0 {
continue
}
if err := cr.deleteMessages(ctx, responseQueueURL, res.Messages); err != nil {
log.Errorf("Error deleting message from %s: %v", *responseQueueURL, err)
}
cr.handleResponses(res)
}
}
func (cr *sqsControlRouter) deleteMessages(ctx context.Context, queueURL *string, messages []*sqs.Message) error {
entries := []*sqs.DeleteMessageBatchRequestEntry{}
for _, message := range messages {
entries = append(entries, &sqs.DeleteMessageBatchRequestEntry{
ReceiptHandle: message.ReceiptHandle,
Id: message.MessageId,
})
}
return instrument.TimeRequestHistogram(ctx, "SQS.DeleteMessageBatch", sqsRequestDuration, func(_ context.Context) error {
_, err := cr.service.DeleteMessageBatch(&sqs.DeleteMessageBatchInput{
QueueUrl: queueURL,
Entries: entries,
})
return err
})
}
func (cr *sqsControlRouter) handleResponses(res *sqs.ReceiveMessageOutput) {
cr.mtx.Lock()
defer cr.mtx.Unlock()
for _, message := range res.Messages {
var sqsResponse sqsResponseMessage
if err := json.NewDecoder(bytes.NewBufferString(*message.Body)).Decode(&sqsResponse); err != nil {
log.Errorf("Error decoding message: %v", err)
continue
}
waiter, ok := cr.responses[sqsResponse.ID]
if !ok {
log.Errorf("Dropping response %s - no one waiting for it!", sqsResponse.ID)
continue
}
waiter <- sqsResponse.Response
}
}
func (cr *sqsControlRouter) sendMessage(ctx context.Context, queueURL *string, message interface{}) error {
buf := bytes.Buffer{}
if err := json.NewEncoder(&buf).Encode(message); err != nil {
return err
}
log.Debugf("sendMessage to %s: %s", *queueURL, buf.String())
return instrument.TimeRequestHistogram(ctx, "SQS.SendMessage", sqsRequestDuration, func(_ context.Context) error {
_, err := cr.service.SendMessage(&sqs.SendMessageInput{
QueueUrl: queueURL,
MessageBody: aws.String(buf.String()),
})
return err
})
}
func (cr *sqsControlRouter) Handle(ctx context.Context, probeID string, req xfer.Request) (xfer.Response, error) {
// Make sure we know the users
userID, err := cr.userIDer(ctx)
if err != nil {
return xfer.Response{}, err
}
// Get the queue url for the local (control app) queue, and for the probe.
responseQueueURL := cr.getResponseQueueURL()
if responseQueueURL == nil {
return xfer.Response{}, fmt.Errorf("no SQS queue yet")
}
var probeQueueURL *sqs.GetQueueUrlOutput
err = instrument.TimeRequestHistogram(ctx, "SQS.GetQueueUrl", sqsRequestDuration, func(_ context.Context) error {
probeQueueName := fmt.Sprintf("%sprobe-%s-%s", cr.prefix, userID, probeID)
probeQueueURL, err = cr.service.GetQueueUrl(&sqs.GetQueueUrlInput{
QueueName: aws.String(probeQueueName),
})
return err
})
if err != nil {
return xfer.Response{}, err
}
// Add a response channel before we send the request, to prevent races
id := fmt.Sprintf("request-%s-%d", userID, rand.Int63())
waiter := make(chan xfer.Response, 1)
cr.mtx.Lock()
cr.responses[id] = waiter
cr.mtx.Unlock()
defer func() {
cr.mtx.Lock()
delete(cr.responses, id)
cr.mtx.Unlock()
}()
// Next, send the request to that queue
if err := instrument.TimeRequestHistogram(ctx, "SQS.SendMessage", sqsRequestDuration, func(ctx context.Context) error {
return cr.sendMessage(ctx, probeQueueURL.QueueUrl, sqsRequestMessage{
ID: id,
Request: req,
ResponseQueueURL: *responseQueueURL,
})
}); err != nil {
return xfer.Response{}, err
}
// Finally, wait for a response on our queue
select {
case response := <-waiter:
return response, nil
case <-time.After(cr.rpcTimeout):
return xfer.Response{}, fmt.Errorf("request timed out")
}
}
func (cr *sqsControlRouter) Register(ctx context.Context, probeID string, handler xfer.ControlHandlerFunc) (int64, error) {
userID, err := cr.userIDer(ctx)
if err != nil {
return 0, err
}
name := fmt.Sprintf("%sprobe-%s-%s", cr.prefix, userID, probeID)
queueURL, err := cr.getOrCreateQueue(ctx, name)
if err != nil {
return 0, err
}
pwID := rand.Int63()
pw := &probeWorker{
ctx: ctx,
router: cr,
requestQueueURL: queueURL,
handler: handler,
quit: make(chan struct{}),
}
pw.done.Add(1)
go pw.loop()
cr.mtx.Lock()
defer cr.mtx.Unlock()
cr.probeWorkers[pwID] = pw
return pwID, nil
}
func (cr *sqsControlRouter) Deregister(_ context.Context, probeID string, id int64) error {
cr.mtx.Lock()
pw, ok := cr.probeWorkers[id]
delete(cr.probeWorkers, id)
cr.mtx.Unlock()
if ok {
pw.stop()
}
return nil
}
// a probeWorker encapsulates a goroutine serving a probe's websocket connection.
type probeWorker struct {
ctx context.Context
router *sqsControlRouter
requestQueueURL *string
handler xfer.ControlHandlerFunc
quit chan struct{}
done sync.WaitGroup
}
func (pw *probeWorker) stop() {
close(pw.quit)
pw.done.Wait()
}
func (pw *probeWorker) loop() {
defer pw.done.Done()
for {
// have we been stopped?
select {
case <-pw.quit:
return
default:
}
var res *sqs.ReceiveMessageOutput
var err error
err = instrument.TimeRequestHistogram(pw.ctx, "SQS.ReceiveMessage", sqsRequestDuration, func(_ context.Context) error {
res, err = pw.router.service.ReceiveMessage(&sqs.ReceiveMessageInput{
QueueUrl: pw.requestQueueURL,
WaitTimeSeconds: longPollTime,
})
return err
})
if err != nil {
log.Errorf("Error receiving message: %v", err)
continue
}
if len(res.Messages) == 0 {
continue
}
if err := pw.router.deleteMessages(pw.ctx, pw.requestQueueURL, res.Messages); err != nil {
log.Errorf("Error deleting message from %s: %v", *pw.requestQueueURL, err)
}
for _, message := range res.Messages {
var sqsRequest sqsRequestMessage
if err := json.NewDecoder(bytes.NewBufferString(*message.Body)).Decode(&sqsRequest); err != nil {
log.Errorf("Error decoding message from: %v", err)
continue
}
response := pw.handler(sqsRequest.Request)
if err := pw.router.sendMessage(pw.ctx, &sqsRequest.ResponseQueueURL, sqsResponseMessage{
ID: sqsRequest.ID,
Response: response,
}); err != nil {
log.Errorf("Error sending response: %v", err)
}
}
}
}

View File

@@ -1,37 +0,0 @@
package multitenant
import (
"fmt"
"net/http"
"context"
"github.com/weaveworks/scope/app"
)
// ErrUserIDNotFound should be returned by a UserIDer when it fails to ID the
// user for a request.
var ErrUserIDNotFound = fmt.Errorf("User ID not found")
// UserIDer identifies users given a request context.
type UserIDer func(context.Context) (string, error)
// UserIDHeader returns a UserIDer which a header by the supplied key.
func UserIDHeader(headerName string) UserIDer {
return func(ctx context.Context) (string, error) {
request, ok := ctx.Value(app.RequestCtxKey).(*http.Request)
if !ok || request == nil {
return "", ErrUserIDNotFound
}
userID := request.Header.Get(headerName)
if userID == "" {
return "", ErrUserIDNotFound
}
return userID, nil
}
}
// NoopUserIDer always returns the empty user ID.
func NoopUserIDer(context.Context) (string, error) {
return "", nil
}

View File

@@ -1,201 +0,0 @@
package app
import (
"fmt"
"io"
"sync"
"time"
"context"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/common/mtime"
"github.com/weaveworks/scope/common/xfer"
)
const (
gcInterval = 30 * time.Second // we check all the pipes every 30s
pipeTimeout = 1 * time.Minute // pipes are closed when a client hasn't been connected for 1 minute
gcTimeout = 10 * time.Minute // after another 10 minutes, tombstoned pipes are forgotten
)
// End is an enum for either end of the pipe.
type End int
// Valid values of type End
const (
UIEnd = iota
ProbeEnd
)
func (e End) String() string {
if e == UIEnd {
return "ui"
}
return "probe"
}
// PipeRouter stores pipes and allows you to connect to either end of them.
type PipeRouter interface {
Exists(context.Context, string) (bool, error)
Get(context.Context, string, End) (xfer.Pipe, io.ReadWriter, error)
Release(context.Context, string, End) error
Delete(context.Context, string) error
Stop()
}
// PipeRouter connects incoming and outgoing pipes.
type localPipeRouter struct {
sync.Mutex
wait sync.WaitGroup
quit chan struct{}
pipes map[string]*pipe
}
// for each end of the pipe, we keep a reference count & lastUsedTIme,
// such that we can timeout pipes when either end is inactive.
type pipe struct {
xfer.Pipe
tombstoneTime time.Time
ui, probe end
}
type end struct {
refCount int
lastUsedTime time.Time
}
func (p *pipe) end(end End) (*end, io.ReadWriter) {
ui, probe := p.Ends()
if end == UIEnd {
return &p.ui, ui
}
return &p.probe, probe
}
// NewLocalPipeRouter returns a new local (in-memory) pipe router.
func NewLocalPipeRouter() PipeRouter {
pipeRouter := &localPipeRouter{
quit: make(chan struct{}),
pipes: map[string]*pipe{},
}
pipeRouter.wait.Add(1)
go pipeRouter.gcLoop()
return pipeRouter
}
func (pr *localPipeRouter) Exists(_ context.Context, id string) (bool, error) {
pr.Lock()
defer pr.Unlock()
p, ok := pr.pipes[id]
if !ok {
return true, nil
}
return !p.Closed(), nil
}
func (pr *localPipeRouter) Get(_ context.Context, id string, e End) (xfer.Pipe, io.ReadWriter, error) {
pr.Lock()
defer pr.Unlock()
p, ok := pr.pipes[id]
if !ok {
log.Debugf("Creating pipe id %s", id)
p = &pipe{
ui: end{lastUsedTime: mtime.Now()},
probe: end{lastUsedTime: mtime.Now()},
Pipe: xfer.NewPipe(),
}
pr.pipes[id] = p
}
if p.Closed() {
return nil, nil, fmt.Errorf("Pipe %s closed", id)
}
end, endIO := p.end(e)
end.refCount++
return p, endIO, nil
}
func (pr *localPipeRouter) Release(_ context.Context, id string, e End) error {
pr.Lock()
defer pr.Unlock()
p, ok := pr.pipes[id]
if !ok {
return fmt.Errorf("Pipe %s not found", id)
}
end, _ := p.end(e)
end.refCount--
if end.refCount > 0 {
return nil
}
if !p.Closed() {
end.lastUsedTime = mtime.Now()
}
return nil
}
func (pr *localPipeRouter) Delete(_ context.Context, id string) error {
pr.Lock()
defer pr.Unlock()
p, ok := pr.pipes[id]
if !ok {
return nil
}
p.Close()
p.tombstoneTime = mtime.Now()
return nil
}
func (pr *localPipeRouter) Stop() {
close(pr.quit)
pr.wait.Wait()
}
func (pr *localPipeRouter) gcLoop() {
defer pr.wait.Done()
ticker := time.Tick(gcInterval)
for {
select {
case <-pr.quit:
return
case <-ticker:
}
pr.timeout()
pr.garbageCollect()
}
}
func (pr *localPipeRouter) timeout() {
pr.Lock()
defer pr.Unlock()
now := mtime.Now()
for id, pipe := range pr.pipes {
if pipe.Closed() || (pipe.ui.refCount > 0 && pipe.probe.refCount > 0) {
continue
}
if (pipe.ui.refCount == 0 && now.Sub(pipe.ui.lastUsedTime) >= pipeTimeout) ||
(pipe.probe.refCount == 0 && now.Sub(pipe.probe.lastUsedTime) >= pipeTimeout) {
log.Infof("Timing out pipe %s", id)
pipe.Close()
pipe.tombstoneTime = now
}
}
}
func (pr *localPipeRouter) garbageCollect() {
pr.Lock()
defer pr.Unlock()
now := mtime.Now()
for pipeID, pipe := range pr.pipes {
if pipe.Closed() && now.Sub(pipe.tombstoneTime) >= gcTimeout {
delete(pr.pipes, pipeID)
}
}
}

View File

@@ -1,87 +0,0 @@
package app
import (
"net/http"
"context"
"github.com/gorilla/mux"
opentracing "github.com/opentracing/opentracing-go"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/scope/common/xfer"
)
// RegisterPipeRoutes registers the pipe routes
func RegisterPipeRoutes(router *mux.Router, pr PipeRouter) {
router.Methods("GET").
Name("api_pipe_pipeid_check").
Path("/api/pipe/{pipeID}/check").
HandlerFunc(requestContextDecorator(checkPipe(pr)))
router.Methods("GET").
Name("api_pipe_pipeid").
Path("/api/pipe/{pipeID}").
HandlerFunc(requestContextDecorator(handlePipeWs(pr, UIEnd)))
router.Methods("GET").
Name("api_pipe_pipeid_probe").
Path("/api/pipe/{pipeID}/probe").
HandlerFunc(requestContextDecorator(handlePipeWs(pr, ProbeEnd)))
router.Methods("DELETE", "POST").
Name("api_pipe_pipeid").
Path("/api/pipe/{pipeID}").
HandlerFunc(requestContextDecorator(deletePipe(pr)))
}
func checkPipe(pr PipeRouter) CtxHandlerFunc {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["pipeID"]
exists, err := pr.Exists(ctx, id)
if err != nil {
respondWith(ctx, w, http.StatusInternalServerError, err)
} else if exists {
w.WriteHeader(http.StatusNoContent)
} else {
http.NotFound(w, r)
}
}
}
func handlePipeWs(pr PipeRouter, end End) CtxHandlerFunc {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["pipeID"]
pipe, endIO, err := pr.Get(ctx, id, end)
if err != nil {
// this usually means the pipe has been closed
log.Debugf("Error getting pipe %s: %v", id, err)
http.NotFound(w, r)
return
}
defer pr.Release(ctx, id, end)
conn, err := xfer.Upgrade(w, r, nil)
if err != nil {
log.Errorf("Error upgrading pipe %s (%d) websocket: %v", id, end, err)
return
}
defer conn.Close()
if _, err := pipe.CopyToWebsocket(endIO, conn); err != nil {
if span := opentracing.SpanFromContext(ctx); span != nil {
span.LogKV("error", err.Error())
}
log.Errorf("Error copying to pipe %s (%d) websocket: %v", id, end, err)
}
}
}
func deletePipe(pr PipeRouter) CtxHandlerFunc {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
pipeID := mux.Vars(r)["pipeID"]
log.Debugf("Deleting pipe %s", pipeID)
if err := pr.Delete(ctx, pipeID); err != nil {
respondWith(ctx, w, http.StatusInternalServerError, err)
}
}
}

View File

@@ -1,143 +0,0 @@
package app
import (
"bytes"
"fmt"
"net"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"time"
"context"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"github.com/weaveworks/common/mtime"
"github.com/weaveworks/scope/common/xfer"
"github.com/weaveworks/scope/probe/appclient"
"github.com/weaveworks/scope/probe/controls"
"github.com/weaveworks/scope/test"
)
func TestPipeTimeout(t *testing.T) {
router := mux.NewRouter()
pr := NewLocalPipeRouter().(*localPipeRouter)
RegisterPipeRoutes(router, pr)
pr.Stop() // we don't want the loop running in the background
mtime.NowForce(time.Now())
defer mtime.NowReset()
// create a new pipe.
id := "foo"
ctx := context.Background()
pipe, _, err := pr.Get(ctx, id, UIEnd)
if err != nil {
t.Fatalf("not ok: %v", err)
}
// move time forward such that the new pipe should timeout
mtime.NowForce(mtime.Now().Add(pipeTimeout))
pr.timeout()
if !pipe.Closed() {
t.Fatalf("pipe didn't timeout")
}
// move time forward such that the pipe should be GCd
mtime.NowForce(mtime.Now().Add(gcTimeout))
pr.garbageCollect()
if _, ok := pr.pipes[id]; ok {
t.Fatalf("pipe not gc'd")
}
}
type adapter struct {
c appclient.AppClient
}
func (a adapter) PipeConnection(_, pipeID string, pipe xfer.Pipe) error {
a.c.PipeConnection(pipeID, pipe)
return nil
}
func (a adapter) PipeClose(_, pipeID string) error {
return a.c.PipeClose(pipeID)
}
func TestPipeClose(t *testing.T) {
router := mux.NewRouter()
pr := NewLocalPipeRouter()
RegisterPipeRoutes(router, pr)
defer pr.Stop()
server := httptest.NewServer(router)
defer server.Close()
ip, port, err := net.SplitHostPort(strings.TrimPrefix(server.URL, "http://"))
if err != nil {
t.Fatal(err)
}
probeConfig := appclient.ProbeConfig{
ProbeID: "foo",
}
url := url.URL{Scheme: "http", Host: ip + ":" + port}
client, err := appclient.NewAppClient(probeConfig, ip+":"+port, url, nil)
if err != nil {
t.Fatal(err)
}
defer client.Stop()
// this is the probe end of the pipe
pipeID, pipe, err := controls.NewPipe(adapter{client}, "appid")
if err != nil {
t.Fatal(err)
}
// this is a client to the app
pipeURL := fmt.Sprintf("ws://%s:%s/api/pipe/%s", ip, port, pipeID)
conn, _, err := websocket.DefaultDialer.Dial(pipeURL, http.Header{})
if err != nil {
t.Fatal(err)
}
// Send something from pipe -> app -> conn
local, _ := pipe.Ends()
msg := []byte("hello world")
if _, err := local.Write(msg); err != nil {
t.Fatal(err)
}
if _, buf, err := conn.ReadMessage(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(buf, msg) {
t.Fatalf("%v != %v", buf, msg)
}
// Send something from conn -> app -> probe
msg = []byte("goodbye, cruel world")
if err := conn.WriteMessage(websocket.BinaryMessage, msg); err != nil {
t.Fatal(err)
}
buf := make([]byte, 1024)
if n, err := local.Read(buf); err != nil {
t.Fatal(err)
} else if !bytes.Equal(msg, buf[:n]) {
t.Fatalf("%v != %v", buf, msg)
}
// Now delete the pipe
if err := pipe.Close(); err != nil {
t.Fatal(err)
}
// the client backs off for 1 second before trying to reconnect the pipe,
// so we need to wait for longer.
test.Poll(t, 2*time.Second, true, func() interface{} {
return pipe.Closed()
})
}

View File

@@ -1,205 +0,0 @@
package app
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"sync"
"time"
"context"
"github.com/NYTimes/gziphandler"
"github.com/gorilla/mux"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/scope/common/hostname"
"github.com/weaveworks/scope/common/xfer"
"github.com/weaveworks/scope/report"
)
var (
// Version - set at buildtime.
Version = "dev"
// UniqueID - set at runtime.
UniqueID = "0"
)
// contextKey is a wrapper type for use in context.WithValue() to satisfy golint
// https://github.com/golang/go/issues/17293
// https://github.com/golang/lint/pull/245
type contextKey string
// RequestCtxKey is key used for request entry in context
const RequestCtxKey contextKey = contextKey("request")
// CtxHandlerFunc is a http.HandlerFunc, with added contexts
type CtxHandlerFunc func(context.Context, http.ResponseWriter, *http.Request)
func requestContextDecorator(f CtxHandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := context.WithValue(r.Context(), RequestCtxKey, r)
f(ctx, w, r)
}
}
// URLMatcher uses request.RequestURI (the raw, unparsed request) to attempt
// to match pattern. It does this as go's URL.Parse method is broken, and
// mistakenly unescapes the Path before parsing it. This breaks %2F (encoded
// forward slashes) in the paths.
func URLMatcher(pattern string) mux.MatcherFunc {
return func(r *http.Request, rm *mux.RouteMatch) bool {
vars, match := matchURL(r, pattern)
if match {
rm.Vars = vars
}
return match
}
}
func matchURL(r *http.Request, pattern string) (map[string]string, bool) {
matchParts := strings.Split(pattern, "/")
path := strings.SplitN(r.RequestURI, "?", 2)[0]
parts := strings.Split(path, "/")
if len(parts) != len(matchParts) {
return nil, false
}
vars := map[string]string{}
for i, part := range parts {
unescaped, err := url.QueryUnescape(part)
if err != nil {
return nil, false
}
match := matchParts[i]
if strings.HasPrefix(match, "{") && strings.HasSuffix(match, "}") {
vars[strings.Trim(match, "{}")] = unescaped
} else if matchParts[i] != unescaped {
return nil, false
}
}
return vars, true
}
func gzipHandler(h http.HandlerFunc) http.Handler {
return gziphandler.GzipHandler(h)
}
// RegisterTopologyRoutes registers the various topology routes with a http mux.
func RegisterTopologyRoutes(router *mux.Router, r Reporter, capabilities map[string]bool) {
get := router.Methods("GET").Subrouter()
get.Handle("/api",
gzipHandler(requestContextDecorator(apiHandler(r, capabilities))))
get.Handle("/api/topology",
gzipHandler(requestContextDecorator(topologyRegistry.makeTopologyList(r))))
get.Handle("/api/topology/{topology}",
gzipHandler(requestContextDecorator(topologyRegistry.captureRenderer(r, handleTopology)))).
Name("api_topology_topology")
get.Handle("/api/topology/{topology}/ws",
requestContextDecorator(captureReporter(r, handleWebsocket))). // NB not gzip!
Name("api_topology_topology_ws")
get.MatcherFunc(URLMatcher("/api/topology/{topology}/{id}")).Handler(
gzipHandler(requestContextDecorator(topologyRegistry.captureRenderer(r, handleNode)))).
Name("api_topology_topology_id")
get.Handle("/api/report",
gzipHandler(requestContextDecorator(makeRawReportHandler(r))))
get.Handle("/api/probes",
gzipHandler(requestContextDecorator(makeProbeHandler(r))))
}
// RegisterReportPostHandler registers the handler for report submission
func RegisterReportPostHandler(a Adder, router *mux.Router) {
post := router.Methods("POST").Subrouter()
post.HandleFunc("/api/report", requestContextDecorator(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
var (
buf = &bytes.Buffer{}
reader = io.TeeReader(r.Body, buf)
)
gzipped := strings.Contains(r.Header.Get("Content-Encoding"), "gzip")
if !gzipped {
reader = io.TeeReader(r.Body, gzip.NewWriter(buf))
}
contentType := r.Header.Get("Content-Type")
var isMsgpack bool
switch {
case strings.HasPrefix(contentType, "application/msgpack"):
isMsgpack = true
case strings.HasPrefix(contentType, "application/json"):
isMsgpack = false
default:
respondWith(ctx, w, http.StatusBadRequest, fmt.Errorf("Unsupported Content-Type: %v", contentType))
return
}
rpt, err := report.MakeFromBinary(ctx, reader, gzipped, isMsgpack)
if err != nil {
respondWith(ctx, w, http.StatusBadRequest, err)
return
}
// a.Add(..., buf) assumes buf is gzip'd msgpack
if !isMsgpack {
buf, _ = rpt.WriteBinary()
}
if err := a.Add(ctx, *rpt, buf.Bytes()); err != nil {
log.Errorf("Error Adding report: %v", err)
respondWith(ctx, w, http.StatusInternalServerError, err)
return
}
w.WriteHeader(http.StatusOK)
}))
}
// RegisterAdminRoutes registers routes for admin calls with a http mux.
func RegisterAdminRoutes(router *mux.Router, reporter Reporter) {
get := router.Methods("GET").Subrouter()
get.Handle("/admin/summary", requestContextDecorator(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
summary, err := reporter.AdminSummary(ctx, time.Now())
if err != nil {
respondWith(ctx, w, http.StatusBadRequest, err)
}
fmt.Fprintln(w, summary)
}))
}
var newVersion = struct {
sync.Mutex
*xfer.NewVersionInfo
}{}
// NewVersion is called to expose new version information to /api
func NewVersion(version, downloadURL string) {
newVersion.Lock()
defer newVersion.Unlock()
newVersion.NewVersionInfo = &xfer.NewVersionInfo{
Version: version,
DownloadURL: downloadURL,
}
}
func apiHandler(rep Reporter, capabilities map[string]bool) CtxHandlerFunc {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
report, err := rep.Report(ctx, time.Now())
if err != nil {
respondWith(ctx, w, http.StatusInternalServerError, err)
return
}
newVersion.Lock()
defer newVersion.Unlock()
respondWith(ctx, w, http.StatusOK, xfer.Details{
ID: UniqueID,
Version: Version,
Hostname: hostname.Get(),
Plugins: report.Plugins,
Capabilities: capabilities,
NewVersion: newVersion.NewVersionInfo,
})
}
}

View File

@@ -1,96 +0,0 @@
package app_test
import (
"bytes"
"io/ioutil"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"time"
"context"
"github.com/gorilla/mux"
"github.com/ugorji/go/codec"
"github.com/weaveworks/common/test"
"github.com/weaveworks/scope/app"
"github.com/weaveworks/scope/test/fixture"
)
type v map[string]string
func TestURLMatcher(t *testing.T) {
test := func(pattern, path string, match bool, vars v) {
routeMatch := &mux.RouteMatch{}
if app.URLMatcher(pattern)(&http.Request{RequestURI: path}, routeMatch) != match {
t.Fatalf("'%s' '%s'", pattern, path)
}
if match && !reflect.DeepEqual(v(routeMatch.Vars), vars) {
t.Fatalf("%v != %v", v(routeMatch.Vars), vars)
}
}
test("/a/b/c", "/a/b/c", true, v{})
test("/a/b/c", "/c/b/a", false, v{})
test("/{a}/b/c", "/b/b/c", true, v{"a": "b"})
test("/{a}/b/c", "/b/b/b", false, v{})
test("/a/b/{c}", "/a/b/b", true, v{"c": "b"})
test("/a/b/{c}", "/a/b/b%2Fb", true, v{"c": "b/b"})
}
func TestReportPostHandler(t *testing.T) {
test := func(contentType string, encoder func(interface{}) ([]byte, error)) {
router := mux.NewRouter()
c := app.NewCollector(1 * time.Minute)
app.RegisterReportPostHandler(c, router)
ts := httptest.NewServer(router)
defer ts.Close()
b, err := encoder(fixture.Report)
if err != nil {
t.Fatalf("Content-Type %s: %s", contentType, err)
}
req, err := http.NewRequest("POST", ts.URL+"/api/report", bytes.NewReader(b))
if err != nil {
t.Fatalf("Error posting report: %v", err)
}
req.Header.Set("Content-Type", contentType)
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("Error posting report %v", err)
}
_, err = ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
t.Fatalf("Error posting report: %v", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatalf("Error posting report: %d", resp.StatusCode)
}
ctx := context.Background()
report, err := c.Report(ctx, time.Now())
if err != nil {
t.Error(err)
}
if want, have := fixture.Report.Endpoint.Nodes, report.Endpoint.Nodes; len(have) == 0 || len(want) != len(have) {
t.Fatalf("Content-Type %s: %v", contentType, test.Diff(have, want))
}
}
test("application/json", func(v interface{}) ([]byte, error) {
buf := &bytes.Buffer{}
err := codec.NewEncoder(buf, &codec.JsonHandle{}).Encode(v)
return buf.Bytes(), err
})
test("application/msgpack", func(v interface{}) ([]byte, error) {
buf := &bytes.Buffer{}
err := codec.NewEncoder(buf, &codec.MsgpackHandle{}).Encode(v)
return buf.Bytes(), err
})
}

View File

@@ -1,119 +0,0 @@
package app_test
import (
"bytes"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"path/filepath"
"reflect"
"runtime"
"testing"
)
// assert fails the test if the condition is false.
func assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
if !condition {
_, file, line, _ := runtime.Caller(1)
tb.Fatalf("%s:%d: "+msg, append([]interface{}{filepath.Base(file), line}, v...)...)
}
}
// ok errors the test if an err is not nil.
func ok(tb testing.TB, err error) {
if err != nil {
_, file, line, _ := runtime.Caller(1)
tb.Errorf("%s:%d: unexpected error: %v", filepath.Base(file), line, err)
}
}
// equals errors the test if want is not equal to have.
func equals(tb testing.TB, want, have interface{}) {
if !reflect.DeepEqual(want, have) {
_, file, line, _ := runtime.Caller(1)
tb.Errorf("%s:%d: want %#v, have %#v", filepath.Base(file), line, want, have)
}
}
// checkGet does a GET and returns the response and the body
func checkGet(t *testing.T, ts *httptest.Server, path string) (*http.Response, []byte) {
return checkRequest(t, ts, "GET", path, nil)
}
// checkRequest does a 'method'-request (e.g. 'GET') and returns the response and the body
func checkRequest(t *testing.T, ts *httptest.Server, method, path string, body []byte) (*http.Response, []byte) {
fullPath := ts.URL + path
var bodyReader io.Reader
if len(body) > 0 {
bodyReader = bytes.NewReader(body)
}
req, err := http.NewRequest(method, fullPath, bodyReader)
if err != nil {
t.Fatalf("Error getting %s: %s %s", method, path, err)
}
req.Header.Set("Content-Type", "application/msgpack")
client := &http.Client{}
res, err := client.Do(req)
if err != nil {
t.Fatalf("Error getting %s %s: %s", method, path, err)
}
body, err = ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
t.Fatalf("%s %s body read error: %s", method, path, err)
}
return res, body
}
// getRawJSON GETs a file, checks it is JSON, and returns the non-parsed body
func getRawJSON(t *testing.T, ts *httptest.Server, path string) []byte {
res, body := checkGet(t, ts, path)
_, file, line, _ := runtime.Caller(1)
file = filepath.Base(file)
if res.StatusCode != 200 {
t.Fatalf("%s:%d: Expected status %d, got %d. Path: %s", file, line, 200, res.StatusCode, path)
}
foundCtype := res.Header.Get("content-type")
if foundCtype != "application/json" {
t.Errorf("%s:%d: Wrong Content-type for JSON: %s", file, line, foundCtype)
}
if len(body) == 0 {
t.Errorf("%s:%d: No response body", file, line)
}
// fmt.Printf("Body: %s", body)
return body
}
// is200 GETs path and verifies the status code. Returns the body
func is200(t *testing.T, ts *httptest.Server, path string) []byte {
res, body := checkGet(t, ts, path)
if res.StatusCode != 200 {
t.Fatalf("Expected status %d, got %d. Path: %s", 200, res.StatusCode, path)
}
return body
}
// is404 GETs path and verifies it returns a 404 status code. Returns the body
func is404(t *testing.T, ts *httptest.Server, path string) []byte {
res, body := checkGet(t, ts, path)
if res.StatusCode != 404 {
t.Fatalf("Expected status %d, got %d", 404, res.StatusCode)
}
return body
}
// is400 GETs path and verifies it returns a 400 status code. Returns the body
func is400(t *testing.T, ts *httptest.Server, path string) []byte {
res, body := checkGet(t, ts, path)
if res.StatusCode != 400 {
t.Fatalf("Expected status %d, got %d", 400, res.StatusCode)
}
return body
}

View File

@@ -1,35 +0,0 @@
package app
import (
"context"
"net/http"
opentracing "github.com/opentracing/opentracing-go"
"github.com/ugorji/go/codec"
log "github.com/sirupsen/logrus"
)
func respondWith(ctx context.Context, w http.ResponseWriter, code int, response interface{}) {
if err, ok := response.(error); ok {
log.Errorf("Error %d: %v", code, err)
response = err.Error()
} else if 500 <= code && code < 600 {
log.Errorf("Non-error %d: %v", code, response)
} else if ctx.Err() != nil {
log.Debugf("Context error %v", ctx.Err())
code = 499
response = nil
}
if span := opentracing.SpanFromContext(ctx); span != nil {
span.LogKV("response-code", code)
}
w.Header().Set("Content-Type", "application/json")
w.Header().Add("Cache-Control", "no-cache")
w.WriteHeader(code)
encoder := codec.NewEncoder(w, &codec.JsonHandle{})
if err := encoder.Encode(response); err != nil {
log.Errorf("Error encoding response: %v", err)
}
}

View File

@@ -1,148 +0,0 @@
package app
import (
"fmt"
"net"
"strings"
fsouza "github.com/fsouza/go-dockerclient"
"github.com/weaveworks/common/backoff"
)
// Default values for weave app integration
const (
DefaultHostname = "scope.weave.local."
DefaultWeaveURL = "http://127.0.0.1:6784"
DefaultContainerName = "weavescope"
)
// WeavePublisher is a thing which periodically registers this app with WeaveDNS.
type WeavePublisher struct {
containerName string
hostname string
dockerClient DockerClient
weaveClient WeaveClient
backoff backoff.Interface
interfaces InterfaceFunc
}
// DockerClient is the little bit of the docker client we need.
type DockerClient interface {
ListContainers(fsouza.ListContainersOptions) ([]fsouza.APIContainers, error)
}
// WeaveClient is the little bit of the weave clent we need.
type WeaveClient interface {
AddDNSEntry(hostname, containerid string, ip net.IP) error
Expose() error
}
// Interface is because net.Interface isn't mockable.
type Interface struct {
Name string
Addrs []net.Addr
}
// InterfaceFunc is the type of Interfaces()
type InterfaceFunc func() ([]Interface, error)
// Interfaces returns the list of Interfaces on the machine.
func Interfaces() ([]Interface, error) {
ifaces, err := net.Interfaces()
if err != nil {
return nil, err
}
result := []Interface{}
for _, i := range ifaces {
addrs, err := i.Addrs()
if err != nil {
continue
}
result = append(result, Interface{
Name: i.Name,
Addrs: addrs,
})
}
return result, nil
}
// NewWeavePublisher makes a new Weave.
func NewWeavePublisher(weaveClient WeaveClient, dockerClient DockerClient, interfaces InterfaceFunc, hostname, containerName string) *WeavePublisher {
w := &WeavePublisher{
containerName: containerName,
hostname: hostname,
dockerClient: dockerClient,
weaveClient: weaveClient,
interfaces: interfaces,
}
w.backoff = backoff.New(w.updateDNS, "updating weaveDNS")
go w.backoff.Start()
return w
}
// Stop the Weave.
func (w *WeavePublisher) Stop() {
w.backoff.Stop()
}
func (w *WeavePublisher) updateDNS() (bool, error) {
// 0. expose this host
if err := w.weaveClient.Expose(); err != nil {
return false, err
}
// 1. work out my IP addresses
ifaces, err := w.interfaces()
if err != nil {
return false, err
}
ips := []net.IP{}
for _, i := range ifaces {
if strings.HasPrefix(i.Name, "lo") ||
strings.HasPrefix(i.Name, "docker") ||
strings.HasPrefix(i.Name, "veth") {
continue
}
for _, addr := range i.Addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPAddr:
ip = v.IP
case *net.IPNet:
ip = v.IP
}
if ip != nil && ip.To4() != nil {
ips = append(ips, ip)
}
}
}
// 2. work out my container name
containers, err := w.dockerClient.ListContainers(fsouza.ListContainersOptions{})
if err != nil {
return false, err
}
containerID := ""
outer:
for _, container := range containers {
for _, name := range container.Names {
if name == "/"+w.containerName {
containerID = container.ID
break outer
}
}
}
if containerID == "" {
return false, fmt.Errorf("Container %s not found", w.containerName)
}
// 3. Register these with weave dns
for _, ip := range ips {
if err := w.weaveClient.AddDNSEntry(w.hostname, containerID, ip); err != nil {
return false, err
}
}
return false, nil
}

View File

@@ -1,103 +0,0 @@
package app_test
import (
"net"
"sync"
"testing"
"time"
fsouza "github.com/fsouza/go-dockerclient"
"github.com/weaveworks/scope/app"
"github.com/weaveworks/scope/test"
)
type mockDockerClient struct{}
func (mockDockerClient) ListContainers(fsouza.ListContainersOptions) ([]fsouza.APIContainers, error) {
return []fsouza.APIContainers{
{
Names: []string{"/" + containerName},
ID: containerID,
},
{
Names: []string{"/notme"},
ID: "1234abcd",
},
}, nil
}
type entry struct {
containerid string
ip net.IP
}
type mockWeaveClient struct {
sync.Mutex
published map[string]entry
}
func (m *mockWeaveClient) AddDNSEntry(hostname, containerid string, ip net.IP) error {
m.Lock()
defer m.Unlock()
m.published[hostname] = entry{containerid, ip}
return nil
}
func (m *mockWeaveClient) Expose() error {
return nil
}
const (
hostname = "foo.weave"
containerName = "bar"
containerID = "a1b2c3d4"
)
var (
ip = net.ParseIP("1.2.3.4")
)
func TestWeave(t *testing.T) {
weaveClient := &mockWeaveClient{
published: map[string]entry{},
}
dockerClient := mockDockerClient{}
interfaces := func() ([]app.Interface, error) {
return []app.Interface{
{
Name: "eth0",
Addrs: []net.Addr{
&net.IPAddr{
IP: ip,
},
},
},
{
Name: "docker0",
Addrs: []net.Addr{
&net.IPAddr{
IP: net.ParseIP("4.3.2.1"),
},
},
},
}, nil
}
publisher := app.NewWeavePublisher(
weaveClient, dockerClient, interfaces,
hostname, containerName)
defer publisher.Stop()
want := map[string]entry{
hostname: {containerID, ip},
}
test.Poll(t, 100*time.Millisecond, want, func() interface{} {
weaveClient.Lock()
defer weaveClient.Unlock()
result := map[string]entry{}
for k, v := range weaveClient.published {
result[k] = v
}
return result
})
}

View File

@@ -1,65 +0,0 @@
FROM golang:1.14.2
ENV SCOPE_SKIP_UI_ASSETS true
RUN set -eux; \
export arch_val="$(dpkg --print-architecture)"; \
apt-get update && \
if [ "$arch_val" = "amd64" ]; then \
apt-get install -y libpcap-dev time file shellcheck git gcc-arm-linux-gnueabihf curl build-essential python-pip; \
else \
apt-get install -y libpcap-dev time file shellcheck git curl build-essential python-pip; \
fi; \
\
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
RUN go clean -i net && \
go install -tags netgo std && \
export arch_val="$(dpkg --print-architecture)"; \
if [ "$arch_val" != "ppc64el" ]; then \
go install -race -tags netgo std; \
fi; \
go get -tags netgo \
github.com/fzipp/gocyclo \
golang.org/x/lint/golint \
github.com/kisielk/errcheck \
github.com/fatih/hclfmt \
github.com/mjibson/esc \
github.com/client9/misspell/cmd/misspell && \
chmod a+wr --recursive /usr/local/go && \
rm -rf /go/pkg/ /go/src/
# Only install shfmt on amd64, as the version v1.3.0 isn't supported for ppc64le
# and the later version of shfmt doesn't work with the application well
RUN export arch_val="$(dpkg --print-architecture)"; \
if [ "$arch_val" = "amd64" ]; then \
curl -fsSL -o shfmt https://github.com/mvdan/sh/releases/download/v1.3.0/shfmt_v1.3.0_linux_amd64 && \
chmod +x shfmt && \
mv shfmt /usr/bin; \
fi;
RUN pip install yapf==0.16.2 flake8==3.3.0 requests==2.19.1
# Install Docker (client only)
ENV DOCKERVERSION=17.09.1-ce
RUN export arch_val="$(dpkg --print-architecture)"; \
if [ "$arch_val" = "arm64" ]; then \
curl -fsSLO https://download.docker.com/linux/static/stable/aarch64/docker-${DOCKERVERSION}.tgz; \
elif [ "$arch_val" = "amd64" ]; then \
curl -fsSLO https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKERVERSION}.tgz; \
elif [ "$arch_val" = "ppc64el" ]; then \
curl -fsSLO https://download.docker.com/linux/static/stable/ppc64le/docker-${DOCKERVERSION}.tgz; \
else \
echo "No Docker client found for architecture $(arch_val)." && \
exit 1; \
fi; \
tar xzvf docker-${DOCKERVERSION}.tgz --strip 1 -C /usr/local/bin docker/docker && \
rm docker-${DOCKERVERSION}.tgz;
COPY build.sh /
ENTRYPOINT ["/build.sh"]
ARG revision
LABEL maintainer="Weaveworks <help@weave.works>" \
org.opencontainers.image.title="backend" \
org.opencontainers.image.source="https://github.com/weaveworks/scope/tree/master/backend" \
org.opencontainers.image.revision="${revision}" \
org.opencontainers.image.vendor="Weaveworks"

View File

@@ -1,20 +0,0 @@
#!/bin/sh
set -eu
SCOPE_SRC=$GOPATH/src/github.com/weaveworks/scope
# Mount the scope repo:
# -v $(pwd):/go/src/github.com/weaveworks/scope
# If we run make directly, any files created on the bind mount
# will have awkward ownership. So we switch to a user with the
# same user and group IDs as source directory. We have to set a
# few things up so that sudo works without complaining later on.
uid=$(stat --format="%u" "$SCOPE_SRC")
gid=$(stat --format="%g" "$SCOPE_SRC")
echo "weave:x:$uid:$gid::$SCOPE_SRC:/bin/sh" >>/etc/passwd
echo "weave:*:::::::" >>/etc/shadow
echo "weave ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
su weave -c "PATH=$PATH make -C $SCOPE_SRC BUILD_IN_CONTAINER=false $*"

View File

@@ -1,292 +0,0 @@
#! /bin/bash
set -e
SUDO=${SUDO-sudo}
GITHUB_USER=${GITHUB_USER:-weaveworks}
DOCKERHUB_USER=${DOCKERHUB_USER:-weaveworks}
RELEASE_NAME=${RELEASE_NAME:-"Weave Scope"}
RELEASE_DESCRIPTION=${RELEASE_DESCRIPTION:-"Container Visibility"}
PWD=$(pwd)
infer_release_type() {
if echo "$1" | grep -qE '^v[0-9]+\.[0-9]+\.0+$'; then
echo MAINLINE
elif echo "$1" | grep -qE '^v[0-9]+\.[0-9]+\.[0-9]+$'; then
echo BRANCH
else
echo PRERELEASE
fi
}
setup() {
# Ensure we have exactly one annotated tag pointing at HEAD
HEAD_TAGS=$(git tag --points-at HEAD)
# shellcheck disable=SC2116
# shellcheck disable=SC2005
TAG_COUNT=$(echo "$(echo "$HEAD_TAGS" | wc -w)") # mac hack
if [ "$TAG_COUNT" -eq 1 ]; then
if [ "$HEAD_TAGS" != "latest_release" ]; then
LATEST_TAG=$HEAD_TAGS
else
echo "Cannot determine version - latest_release points at HEAD" >&2
exit 1
fi
elif [ "$TAG_COUNT" -eq 0 ]; then
echo "Cannot determine version - no tags point at HEAD" >&2
exit 1
else
echo "Cannot determine version - multiple tags point at HEAD:" >&2
for TAG in $HEAD_TAGS; do
echo -e "\t$TAG" >&2
done
exit 1
fi
RELEASE_TYPE=$(infer_release_type "$LATEST_TAG")
echo "== Inferred release type $RELEASE_TYPE from tag $LATEST_TAG"
LATEST_TAG_SHA=$(git rev-parse "$LATEST_TAG")
LATEST_TAG_COMMIT_SHA=$(git rev-list -1 "$LATEST_TAG")
LATEST_RELEASE_SHA=$(git rev-parse latest_release)
LATEST_RELEASE_COMMIT_SHA=$(git rev-list -1 latest_release)
if [ "$RELEASE_TYPE" != 'PRERELEASE' ]; then
VERSION=${LATEST_TAG#v}
else
VERSION=${LATEST_TAG}
fi
# NB does not check that this tag is on master
RELEASE_DIR=./releases/$LATEST_TAG
}
build() {
setup
echo "== Clone repo at $LATEST_TAG for version $VERSION"
if [ -d "$RELEASE_DIR" ]; then
echo -e "\u2757 Release directory $RELEASE_DIR already exists, you may want to" >&2
echo -e "\trm -rf $RELEASE_DIR" >&2
exit 1
fi
## Clone the repo at the tag and go there
mkdir -p releases
git clone -q -b "$LATEST_TAG" . "$RELEASE_DIR" 2>/dev/null
cd "$RELEASE_DIR"
## Check that the top changelog entry is this version
if ! latest_changelog=$(perl -nle'print $& if m{(?<=^## Release ).*}' ./CHANGELOG.md | head -1) \
|| ! [ "$latest_changelog" = "$VERSION" ]; then
echo -e "\u2757 Latest changelog entry \"$latest_changelog\" does not match the release version $VERSION" >&2
exit 1
fi
echo
echo "== Build and test"
## Inject the version numbers and build the distributables
## (library versions?)
sed -i.tmp "s/SCRIPT_VERSION=\"[^\"]*\"/SCRIPT_VERSION=\"$VERSION\"/" ./scope
make SUDO="$SUDO" SCOPE_VERSION="$VERSION" DOCKERHUB_USER="$DOCKERHUB_USER" realclean all
if make tests SUDO="$SUDO"; then
echo -e '\u2713 Tests pass'
else
echo -e "\u2757 Tests failed, probably best not publish this one" >&2
exit 1
fi
## Run tests with the distributables, including version check
#v=$(./scope version | grep -o '[0-9].*')
#if ! [ "$v" == "$VERSION" ]; then
# echo "Version of distributable "$v" does not match release version $VERSION" >&2
# exit 1
#fi
tag_images
echo -e '\u2713 Build OK'
echo '** Release artefacts in' "$RELEASE_DIR"
}
draft() {
setup
cd "$PWD"/"$RELEASE_DIR"
echo "== Sanity checks"
## Check that the tag exists by looking at github
if ! curl -sSf "https://api.github.com/repos/$GITHUB_USER/scope/git/tags/$LATEST_TAG_SHA" >/dev/null 2>&1; then
echo -e "\u2757 Tag $LATEST_TAG is not on GitHub, or is not the same as the local tag" >&2
echo -e "\thttps://github.com/$GITHUB_USER/scope/tags" >&2
echo "You may need to" >&2
echo -e "\tgit push git@github.com:$GITHUB_USER/scope $LATEST_TAG"
exit 1
fi
echo -e "\u2713 Tag $LATEST_TAG exists in GitHub repo $GITHUB_USER/scope"
## Check that the version does not already exist by looking at github
## releases
if github-release info --user "$GITHUB_USER" --repo scope --tag "$LATEST_TAG" >/dev/null 2>&1; then
echo -e "\u2757 Release $LATEST_TAG already exists on GitHub" >&2
echo -e "\thttps://github.com/$GITHUB_USER/scope/releases/$LATEST_TAG" >&2
exit 1
fi
echo '** Sanity checks OK for publishing tag' "$LATEST_TAG" as "$DOCKERHUB_USER/scope:$VERSION"
RELEASE_ARGS="--draft"
if [ "$RELEASE_TYPE" = 'PRERELEASE' ]; then
RELEASE_ARGS="$RELEASE_ARGS --pre-release"
fi
echo "== Creating GitHub release $RELEASE_ARGS $RELEASE_NAME $VERSION"
github-release release $RELEASE_ARGS \
--user "$GITHUB_USER" \
--repo scope \
--tag "$LATEST_TAG" \
--name "$RELEASE_NAME $VERSION" \
--description "$RELEASE_DESCRIPTION"
github-release upload \
--user "$GITHUB_USER" \
--repo scope \
--tag "$LATEST_TAG" \
--name "scope" \
--file "./scope"
echo "** Draft $TYPE $RELEASE_NAME $VERSION created at"
echo -e "\thttps://github.com/$GITHUB_USER/scope/releases/$LATEST_TAG"
}
publish() {
setup
cd "$PWD"/"$RELEASE_DIR"
if [ "$RELEASE_TYPE" = 'PRERELEASE' ]; then
push_images
echo "== Publishing pre-release on GitHub"
github-release publish \
--user "$GITHUB_USER" \
--repo scope \
--tag "$LATEST_TAG"
echo "** Pre-release $RELEASE_NAME $VERSION published at"
echo -e "\thttps://github.com/$GITHUB_USER/scope/releases/$LATEST_TAG"
else
echo "== Sanity checks"
if ! [ "$LATEST_TAG_COMMIT_SHA" == "$LATEST_RELEASE_COMMIT_SHA" ]; then
echo -e "\u2757 The tag latest_release does not point to the same commit as $LATEST_TAG" >&2
echo "You may need to" >&2
echo -e "\tgit tag -af latest_release $LATEST_TAG" >&2
exit 1
fi
## Check that the 'latest_release' tag exists by looking at github
if ! curl -sSf "https://api.github.com/repos/$GITHUB_USER/scope/git/tags/$LATEST_RELEASE_SHA" >/dev/null 2>&1; then
echo -e "\u2757 Tag latest_release is not on GitHub, or is not the same as the local tag" >&2
echo -e "\thttps://github.com/$GITHUB_USER/scope/tags" >&2
echo "You may need to" >&2
echo -e "\tgit push -f git@github.com:$GITHUB_USER/scope latest_release" >&2
exit 1
fi
echo '** Sanity checks OK for publishing tag' "$LATEST_TAG" as "$DOCKERHUB_USER/scope:$VERSION"
push_images
echo "== Publishing release on GitHub"
github-release publish \
--user "$GITHUB_USER" \
--repo scope \
--tag "$LATEST_TAG"
if github-release info --user "$GITHUB_USER" --repo scope \
--tag latest_release >/dev/null 2>&1; then
github-release delete \
--user "$GITHUB_USER" \
--repo scope \
--tag latest_release
fi
github-release release \
--user "$GITHUB_USER" \
--repo scope \
--tag latest_release \
--name "$RELEASE_NAME latest ($VERSION)" \
--description "[Release Notes](https://github.com/$GITHUB_USER/scope/releases/$LATEST_TAG)"
github-release upload \
--user "$GITHUB_USER" \
--repo scope \
--tag latest_release \
--name "scope" \
--file "./scope"
echo "** Release $RELEASE_NAME $VERSION published at"
echo -e "\thttps://github.com/$GITHUB_USER/scope/releases/$LATEST_TAG"
echo -e "\thttps://github.com/$GITHUB_USER/scope/releases/latest_release"
fi
}
tag_images() {
echo "== Tagging images for docker hub as user $DOCKERHUB_USER"
for IMAGE in "scope" "cloud-agent"; do
$SUDO docker tag "$DOCKERHUB_USER/$IMAGE" "$DOCKERHUB_USER/$IMAGE:$VERSION"
$SUDO docker tag "$DOCKERHUB_USER/$IMAGE:$VERSION" "$DOCKERHUB_USER/$IMAGE:latest_release"
done
echo "** Docker images tagged"
}
push_images() {
echo "== Pushing images on docker hub as user $DOCKERHUB_USER"
for IMAGE in "scope" "cloud-agent"; do
$SUDO docker push "$DOCKERHUB_USER/$IMAGE:$VERSION"
$SUDO docker push "$DOCKERHUB_USER/$IMAGE:latest_release"
done
echo "** Docker images pushed"
}
usage() {
echo "Usage:"
echo -e "\t./bin/release build"
echo "-- Build artefacts for the latest version tag"
echo -e "\t./bin/release draft"
echo "-- Create draft release with artefacts in GitHub"
echo -e "\t./bin/release publish"
echo "-- Publish the GitHub release and update DockerHub"
exit 1
}
# Ensure required tooling is installed
if ! which github-release >/dev/null; then
echo "Please install git-release:" >&2
echo -e "\tgo get github.com/weaveworks/github-release" >&2
echo "and create a git token per https://github.com/weaveworks/github-release" >&2
exit 1
fi
[ $# -eq 0 ] && usage
COMMAND=$1
shift
case "$COMMAND" in
build)
build "$@"
;;
draft)
draft "$@"
;;
publish)
publish "$@"
;;
*)
echo "Unknown command \"$COMMAND\""
usage
;;
esac

View File

@@ -1,113 +0,0 @@
#!/bin/bash
set -eu
# openssl enc -in do-setup-circleci-secrets.orig -out setup-circleci-secrets.orig -e -aes256 -pass stdin
# openssl base64 < setup-circleci-secrets.orig
if [ -z "${SECRET_SCRIPT+x}" ]; then
# "read" return a non-zero value when reading until EOF, see "help read"
read -r -d '' SECRET_SCRIPT <<EOF || true
U2FsdGVkX193YHZJXNzxU9GqigQaXWrA0AKd+BIjRcx7bmmKn/zSgOv+FfApRRjn
KGBd2ulZw9CwsftX0HWHzVdtpgqbJUW+FEma8eNldau4/f+T+yWTVpCNQXGc3DvB
cWYhmkoTfGWmI2v/0/Bv2TYkw7MAfjCocdluFAv7sSvYnSgIjoYxD4XXkTjLWy1P
KUMUBPSRgQlyY01fTHmh+mvY/pLAF+Cfe+vvBNbCUVj2IfplWbN/K3ewco8UHJQI
9KZO36dvevExXfIhCa0FS1A9cxjTRl+wX6pi9d6XCbfmJ727CPOBz1Cla2+Yr3Pr
yXuTb/oIgKZ2tbmFrABRmui69NLqfi7AwkWM7W8Xs3oxrY09TRtyuN6gc3X+vlhu
2ylNFgVQEA4KfWDewfl8zILYwzO5gMpEpkduFMjSTn3v170s7IYjUtNyq9FFA/GK
kfh/LCPYIXyk6qoRLNEOFw2DkA04D3dG6Qm0np2TacBdEBchYQhZB1wqKNCOj6rZ
ROBgBvn5DwiE51WOGdnekWF1chbFReVXcW8Nl4XWZTQ8HoRRx7F0JgV+PRMqvxpn
Qp5Iv/TTvYwQUToM0XK7GdQObDvC6vnuCQXLl+iBOXwsEp1jzf/3Zb45vp+L5Qwi
p2HaT+cvW1SkSwktO34mnGVMRRb8/TqtEKCVCLlNXNiZPIDyKr0HLOs3sdXT844f
K8h9qmStr9bRpa7QBMXvQtwdnuI3V4cFU00AVief6V5gwfHzsMO6cWxBUtQudRr1
EysuvemGtsNNBR1UTtLzbWRtTc2pC3bPQe/ZLW5KMgwTvehBGsCEs7k9lgedXZLj
5bg5aF9OxeOAfpOj49AKDOtYf6KlOrBydO6/3q9XXijaEsX8cFI4ynwgGztYIAjZ
O/2KyD6Tm4s/3RmfR1IHRW4Spb9ApeA9IC+FZVFn4rhwCmyu5ujU6+PQFpAPM0M3
W7QYxnaY/Jhh7/rhMj+dy4/aEnMVVi0hF71DPew73ZZRiD13d5y0FkjrV++xIMiu
emuE9z57r0RrC0/I6/S+TDVRXmhWAerNJ03evcg1o3BqVs72lF24iAMFA3IRBugH
uW1Nm22aqi2OfMu0rewnRS0PwOgPccq5c2BOJTm2/XPHF2T4v5jDBD4FXDenScZM
3aW8vRRcmFdvivWE08uTJQkajX0O74StIRr0FPRB8+tr3yqIVWtiDB2o2hn/vfVM
cjck/l37qQzjwZ/5UyBl80BsDz5PENRFWQGi0lwAT3HNvnvWChoIk/bma5htttqO
iSOiZ1IlElnhLERmIIoeolB9b2dXPFJ1eKYovEJyIdSxKCrwa5lzxYw7Mc8CApv5
DzCCUTqZwh2e0+e7C4ywIRZreZ7t4kTJOSGNMaALaQ1KgM6RCVH3y8FRMJ2Q8alH
kjZsI9wDZRVE6/U8qP+m+kb3MIRpSIRm2r8jyoPRwYVjpjGtU36wFFZNA55yEJWx
/qiVilHymIA8dcItAybinf+87VA3bIEPXDLhP4LMPkmu4ZQTzOF4bejA5lKk/vMs
4Hz0YmHW8BU8m+Pm8QRKfPYsEpre4FpiaGB3xfrzcIg7NIT5/dqQODAn21B/Dl8E
mZHU4bVsyccwEhgF/vXogZiAroiZbEXvz8LE+A1wC+G3UW28P8OSh7GYTW9D1yQK
MkTSmFmEjxdvZNXV3MCmjuBwqBRQa+3f0rZqmKu7aA+U9vA5uoMjKW5Ed52mX74d
gF7EbmDniIN5BunmbEpsKFEs/AXOR6+/tM2kKqm0jTSzEJjM0sglmLlfiEYV9vNK
maHYoiIQWhO5mlhosSABeYFf3268O2O8EUnzbV2xoqtpREbfjlvSylb1VeVe5/bE
NRKL8wC+HIXYZVOBO2AcZyCgLk6SPpaDaA7Gi5LoVMglmKRLRuxzV53eYjjl9bkz
q2xwee3cfk8waAc37BEVbJGmRC4LCmaiCqiSkPyOlXYIc4I6zttojx1rWskVk/Fj
2V3G2S5aEXhdTOhVykY4U8dl5MEimne+On4Nj7yUbQT6CKO/FCiHHZ33G1XGZMtM
3orvtpy5t7VIh7y8jmqMzI2KUJIWyYT4znLZY4RCDR5I8mlG5L83HfqNy72fC0pl
KrRh14s5uPt3Lo5CdMCiJaqB0EFYPfsUN8xV6k9MtKMT/CgKDZdoYgaJR3HTf0IO
Ubqp7YYXAdfY2IZA9DcImYTlM5kgj79gxh0ZRMKOLjbHLH6wVDPjmdLDKebFWiTq
lmyKbhvJOGZW2WW7lD23yZkyxCWU4PVoXyRiDHWi/kMtwFPAq9DRoeLZT/se2elF
nC7Fp5F3b0AljeqTyuLLRgTmuCLetk6jLGVEri26VB0gM9Jj67jv1tmRytOHjxDX
QgvQ999SCtv3zJvUGp0PfAhzejIynBPlXZu59Eo8eP7Ti0ooLzw/zFfLGdMkBrII
LBOHE5CL6Gk73mLQb8K6hVAivhcZHthzaiZCk4XLIzRCg4IpEhg12gr62ofM+tLJ
32sfdRRPDY0J73+6HPO/j1j2lFUWRx9eXkKewEmtC3Y3VUq/dFDrCGTcdWKyVzyb
jwvEEv605kJea1z7/FPzC++nXwleFzkaK/essmCYymQOsw01gRrnIauFalj1c2pY
ioqpWfVjWvk5tij6T40tTQllGV9zSQAw07cTB0x5TWWrJpPp+/SxzwwkC5jyXyGS
zIxVyglh20U4d4BTNXwQJbXRzxJeMQhAcfiU1xMSNTXrQ3if1wbj8KwHcrMDq2eI
Y3FNSnXGlpfiGMCC0xhwubmwBO+OKzQESy2VZMLo7DahaCov0PnpKu/jeZFRZuLu
5vsz//bRlK2rouAeP3C+0U1EcN51hRbEuU7Y4XhMGgzH0oOJHDVK79pYQ3uS8f/2
cHIlq9kUrm38kvPvvwcSEikmAmNHIswCskBkh1v+kILB5tlMhBBgV3eW29BoNiKk
vpCD+70GeE9KuWX4fioDWeC1DBPLeNlEL/hIrIjl1CxmBRvgQmtCG7BbGRgdxZ3c
VSyG4usPPLK1pjMl9jl0S5Y1B5qG3njEl1vmpdxBjjsWIw2x/UqLJs2vP5v0s7Ai
ZcQ2npTP9n3M9Dj49bZrxOclPzsG8Cuwu3c4MhfsQPh22Ao6J6G1dtjz/GvPkEEH
24Yu+z8Iv2fnRYbXOnkDWlnoaJTZqkXd2KN4c+Hy6zBLlq0DWXTeQIgQR19coKDn
UfhWT9GzW/OsrzBeba002jQayOM3KVt+cOk7+JTK0krx7ZlodUXa0D9NzWsx5GuC
Zx9K6mGE+RuOir1x0x7Z1XH6UBKDnmzHTe545fa5I03SIB0k02YrtxuO8UYPODac
X9T3JRD4EhH2o5uPkktEdzIJT+8oVaU+xW+fa5hgiEfatpVXvIhT8aNzyxFwYbHr
gd6TZcqe04/CQYfUUCPDjwSJimeoDm8zVQttcjA2q4SUOtV9zODqkQOrpQ+sx0SJ
ky15Y40dJ98toj/xlLUe2HDzodofOI0+y99lpI/Ym1OqZV09wjPDAkue5yLCa+H1
rSCKAtFOP68MerPIOEcbU7lL18tBY65p+/0UrdOkjcHl9fbN3mjDpNGl+NRNGVWY
Ly8J247AXaeaFBGMNuyD5bhKHB8INFSbmWqOxkmjNiyPCnDX5oLdibay94cMbf5D
BobCDdz/SSnHGI+B+R15i6TjC/mdBrPd6LHIcxaC4107FB2n0urCQE0ovd5AFdPP
i8W6VBDkZuz5KpfPPkTjrOOH8cRVu971gIHSSfpiNbHlFgH6ewySPo41wd3TbSuk
TqWeeLmfDwHPIikBGQXax6lhNUIUrCyeAlPalkN2skQd1D+EyP+2siiGUP1VPHiq
lrUY1/3+qLafGEtovOoqcUsE3wZqbjJfZq5filXrVdt/ZSRIKfWuJ+eU2avfpy6n
y88+yp5nhZjHIdmpwtlbuIKo1SgZA1oqI0pBXA1q8T2xhHBUhyBg7wgofDvo/8Lu
fF6e/+u1cgaRDnLilL0UVzWB4C5GYncLOSuuvCFArmehR7MiK2LPNinFpDml40C7
QMDuhj/3Ox+HTrOn9VjZfo8Xh+xRn8K1TmXSq8OGh1+q/OYMZJJLbRVCkv6YPL0U
UEBhRN3eXjUWaOKAIBhBgjhEcMEueN/8ijklmkUQ08i677Lvb+uSc/bBmnsbEisZ
n2LccJiGcmmKj8LqEfwSBylfbIZNN70tvWe64RZ60YepGEOaPX69E7LDg8cg7V/Z
87XeZXPiADcYrb36b5WzlxVlvTV66xkfjOOmBz+BJ5JBmJTWQSerARmxFqKIkaF0
gatay37z+GVsSDV5tI/OONcTnamvFDn5hTFKDFjcPtzSlXRV9ZnjOeLNnjz4vPrv
GAPFAYO2eE7T/YGxiYPwy6fEvN+dQ4aIK9A3ZR0nzdBTvjw03VSNcSLQYlUXhoEO
nfWuEtZeWycMabC8OPIuaKWajMZlUBLnxIp/+Pv/oer8/LIkFWtY0wQ7WtEbesLb
w6OaPx2Gr0o4yxc2NiAl82wfkkxgX6J9MpcRB3d0SoLnopFwzzLRbq9nr9tglc4J
ps95F+89SBb5PY9o3eV7ixmDFvykRQ6E/r+gO0WQ5ToIeZfIasnyQyPKMUA1/l0z
W/m9Ibi0VwdKO7BAvnOKJBglsW8TqMwYDFpecY8jw6y9MTkwEryIWh7l/uIiu4J+
wfLAq8u4ltlILow5BiuuyvPsKrohZIC3NHwHcFKzw62/aprfcXITyfnlsrrqfT5A
COulRUYWlY8XstsWSTlEsqa5OzTdx9uprO0e6tce3YpCSSwYwZYxQvf8wlyaD6iq
eFECRFbbV4BGvQTxf/f+WORgUgcULbrObeJV4b8biJaJeqRKZpMVvFTxLPWZgXzJ
wQ64KTE71KMNRapxlx2CB4Fh465iS9rjaQEc4wOmC2v3gyG+cD+8QXnJFs/l741g
8WXQK2O/QnPzRx0sL9R54J9OnJAyMZGsjD3spV6Pr4Vo3BlEv8UzhK4teDakvrHk
WwAzvZMqJIgI/XWiFIZq76Hk11d7fIW9G2ELst982iAHXzmCqMZD0VLbozIcZel5
mExz+6I3NSGL4Rbbew/ZRhLuGnc0WlzNp96sCmrv0cl1I0ZXyg8i6PaT/VV33VXh
Cz21WO4lQxmZulO4mUY7ms1kkvWjGRRjThRtc+PErexGDpkCBQHxs033+WAdG83Z
CuOL6Hhk9tV4JzT+YWcwQXs1MvzSaP1vtHsEaCO8WgR5FIQtvNEuGPA2js//aVPS
abYO2U7LCM5OSygbAVBIumi3wW4f51kIXtwhuiwEnsgVlnIdr4bPZikLuSiuA2Ly
bYFcBZ7UJC1jQ4Ar1Tn8H+V8IzXSzmKLi1A0bPwg25X4RYE6YWSsb7Pg+aGlWglw
LR8soIKQdxN5phjzFuzzBeolvAlknqMpL5/u91XiFW5R3JAu3RC8s1/kzzTX8lCz
M+mfR6bN7VSMHVYwCpkyGeHY8isivbkkNi8RSTgx+jeokQBCmhWcoNl2znjtJwm1
CkoeAsgpNGiDbXSGEAN2TJfJbUpP3BB9VQZc+bDD+I6gp4wRYcyV5yD156lbBQKd
VPFvGNc6CttfqanQ6hpN03yDWNTOjltQPyxrtlIou8GAKgJkszJUhCW15c5DGndZ
u+XeqQYBf5/wDLwEXFEc5n5qfPIfO9j9ETyw8/EkeJQVig5wnmYDz2BRTklY+swB
dzO2aTTaJRaWCEbXbTiM9puKZrtb/fLJLTJj58rxzy6+Gjp1k0Dm2eUYSavSCFNO
34SX3KVaAe/oQUzJtrWrZgFPgMbFr7NNowalyXReFQnuISWaicuLnvcj09WXqIw4
kUkJh9/Wa0h0V0dZJhOyKqJvPhvxt88cOpxrgdaHwkL+QfgidaBAecXrjmhJetaQ
LrFefj0OWhRx4w7pblAnZqUSYunhhhUYimEG40GkM1ZI9b0vDmbgQP/UxMj1M2yB
aVSSW69rmjO7xjVuJnm+wNq2P3H3MFB5MaswXrn2Ah83K4oegpannt7H1nG+mWh/
DJcoVj5UyCTFEyZMdtVWloF4TOVODNNxA+zAE9fUgrI=
EOF
fi
echo "${SECRET_SCRIPT}" | base64 -d | openssl enc \
-out bin/do-setup-circleci-secrets \
-d -aes256 -pass pass:"$1"
exec sh bin/do-setup-circleci-secrets

View File

@@ -1,43 +0,0 @@
{
"presets": [
[
"@babel/preset-env",
{
"modules": "commonjs"
}
],
"@babel/preset-react"
],
"plugins": [
[
"@babel/plugin-proposal-object-rest-spread",
{
"useBuiltIns": true
}
],
"@babel/plugin-proposal-class-properties",
"lodash",
],
"env": {
"test": {
"presets": [
[
"@babel/preset-env",
{
"modules": "commonjs"
}
],
"@babel/preset-react"
],
"plugins": [
[
"@babel/plugin-proposal-object-rest-spread",
{
"useBuiltIns": true
}
],
"@babel/plugin-proposal-class-properties",
]
}
}
}

View File

@@ -1,2 +0,0 @@
node_modules
dist

View File

@@ -1,22 +0,0 @@
# EditorConfig helps developers define and maintain consistent
# coding styles between different editors and IDEs
# editorconfig.org
root = true
[*]
# Change these settings to your own preference
indent_style = space
indent_size = 2
# We recommend you to keep these unchanged
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[*.md]
trim_trailing_whitespace = false

View File

@@ -1 +0,0 @@
app/scripts/vendor/term.js

View File

@@ -1,51 +0,0 @@
{
"extends": "airbnb",
"parser": "babel-eslint",
"env": {
"browser": true,
"jest": true,
"node": true
},
"rules": {
"no-debugger": 1,
"comma-dangle": 0,
"global-require": 0,
"sort-keys": [
"error",
"asc",
{
"caseSensitive": false,
"natural": true
}
],
"import/no-extraneous-dependencies": [
"error",
{
"devDependencies": true,
"optionalDependencies": true,
"peerDependencies": true
}
],
"import/prefer-default-export": 0,
"jsx-a11y/no-static-element-interactions": 0,
"no-param-reassign": 0,
"no-restricted-properties": 0,
"object-curly-spacing": 0,
"react/destructuring-assignment": 0,
"react/jsx-closing-bracket-location": 0,
"react/jsx-filename-extension": [
2,
{
"extensions": [
".js",
".jsx"
]
}
],
"react/prefer-stateless-function": 0,
"react/sort-comp": 0,
"react/prop-types": 0,
"jsx-a11y/click-events-have-key-events": 0,
"jsx-a11y/mouse-events-have-key-events": 0
}
}

View File

@@ -1 +0,0 @@
* text=auto

6
client/.gitignore vendored
View File

@@ -1,6 +0,0 @@
node_modules
build/
build-external/
coverage/
test/*png
weave-scope.tgz

View File

@@ -1 +0,0 @@
v10.19.0

View File

@@ -1,31 +0,0 @@
{
"processors": ["stylelint-processor-styled-components"],
"extends": [
"stylelint-config-styled-components",
"stylelint-config-recommended",
],
"plugins": ["stylelint-declaration-use-variable"],
"rules": {
"block-no-empty": null,
"color-named": "never",
"color-no-hex": true,
"function-blacklist": ["/^rgb/", "/^hsl/"],
"no-empty-source": null,
"no-descending-specificity": null,
"no-duplicate-selectors": null,
"property-no-vendor-prefix": [true, {
"ignoreProperties": ["tab-size", "hyphens"],
}],
"selector-type-no-unknown": null,
"sh-waqar/declaration-use-variable": [[
"border-radius",
"border-top-left-radius",
"border-top-right-radius",
"border-bottom-left-radius",
"border-bottom-right-radius",
"font-family",
"font-size",
"z-index"
]],
},
}

View File

@@ -1,14 +0,0 @@
# Changes to this file will not take effect in CI
# until the image version in the CI config is updated. See
# https://github.com/weaveworks/scope/blob/master/.circleci/config.yml#L11
FROM node:10.19
ENV NPM_CONFIG_LOGLEVEL=warn
ENV NPM_CONFIG_PROGRESS=false
ENV XDG_CACHE_HOME=/home/weave/scope/.cache
ARG revision
LABEL maintainer="Weaveworks <help@weave.works>" \
org.opencontainers.image.title="scope-ui-build" \
org.opencontainers.image.source="https://github.com/weaveworks/scope" \
org.opencontainers.image.revision="${revision}" \
org.opencontainers.image.vendor="Weaveworks"

View File

@@ -1,50 +0,0 @@
# Scope UI
## Getting Started (using local node)
- You need at least Node.js 6.9.0 and a running `weavescope` container
- Get Yarn: `npm install -g yarn`
- Setup: `yarn install`
- Develop: `BACKEND_HOST=<dockerhost-ip> yarn start` and then open `http://localhost:4042/`
This will start a webpack-dev-server that serves the UI and proxies API requests to the container.
## Getting Started (using node in a container)
- You need a running `weavescope` container
- Develop: `make WEBPACK_SERVER_HOST=<dockerhost-ip> client-start` and then open `http://<dockerhost-ip>:4042/`
This will start a webpack-dev-server that serves the UI from the UI build container and proxies API requests to the weavescope container.
## Test Production Bundles Locally
- Build: `yarn run build`, output will be in `build/`
- Serve files from `build/`: `BACKEND_HOST=<dockerhost-ip> yarn run start-production` and then open `http://localhost:4042/`
## Coding
This directory has a `.eslintrc`, make sure your editor supports linter hints.
To run a linter, you also run `yarn run lint`.
## Logging
To enable logging in the console, activate it via `localStorage` in the dev tools console:
```
localStorage["debug"] = "scope:*"
```
The Scope UI uses [debug](https://www.npmjs.com/package/debug) for logging, e.g.,:
```
const debug = require('debug')('scope:app-store');
debug('Store log message');
```
## Gotchas
Got a blank screen when loading `http://localhost:4042`?
Make sure you are accessing the right machine:
If you're running `yarn start` on a virtual machine with IP 10.0.0.8, you need to point your browser to `http://10.0.0.8:4042`.
Also, you may need to manually configure the virtual machine to expose ports 4041 (webpack-dev-server) and 4042 (express proxy).

View File

@@ -1,18 +0,0 @@
<!doctype html>
<html class="no-js">
<head>
<meta charset="utf-8">
<title>Weave Scope</title>
<meta name="description" content="">
<meta name="viewport" content="width=device-width, initial-scale=1">
<script language="javascript">window.__WEAVEWORKS_CSRF_TOKEN = "$__CSRF_TOKEN_PLACEHOLDER__";</script>
</head>
<body>
<!--[if lt IE 10]>
<p class="browsehappy">You are using an <strong>outdated</strong> browser. Please <a href="http://browsehappy.com/">upgrade your browser</a> to improve your experience.</p>
<![endif]-->
<div class="wrap">
<div id="app"></div>
</div>
</body>
</html>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 741 B

View File

@@ -1 +0,0 @@
module.exports = require('./actions/app-actions');

View File

@@ -1,463 +0,0 @@
import ActionTypes from '../constants/action-types';
import { saveGraph } from '../utils/file-utils';
import { clearStoredViewState, updateRoute } from '../utils/router-utils';
import { isPausedSelector } from '../selectors/time-travel';
import {
nextPinnedMetricTypeSelector,
previousPinnedMetricTypeSelector,
} from '../selectors/node-metric';
import { isResourceViewModeSelector } from '../selectors/topology';
import {
GRAPH_VIEW_MODE,
TABLE_VIEW_MODE,
} from '../constants/naming';
export function showHelp() {
return { type: ActionTypes.SHOW_HELP };
}
export function hideHelp() {
return { type: ActionTypes.HIDE_HELP };
}
export function toggleHelp() {
return (dispatch, getState) => {
if (getState().get('showingHelp')) {
dispatch(hideHelp());
} else {
dispatch(showHelp());
}
};
}
export function sortOrderChanged(sortedBy, sortedDesc) {
return (dispatch, getState) => {
dispatch({
sortedBy,
sortedDesc,
type: ActionTypes.SORT_ORDER_CHANGED
});
updateRoute(getState);
};
}
//
// Networks
//
export function showNetworks(visible) {
return (dispatch, getState) => {
dispatch({
type: ActionTypes.SHOW_NETWORKS,
visible
});
updateRoute(getState);
};
}
export function selectNetwork(networkId) {
return {
networkId,
type: ActionTypes.SELECT_NETWORK
};
}
export function pinNetwork(networkId) {
return (dispatch, getState) => {
dispatch({
networkId,
type: ActionTypes.PIN_NETWORK,
});
updateRoute(getState);
};
}
export function unpinNetwork(networkId) {
return (dispatch, getState) => {
dispatch({
networkId,
type: ActionTypes.UNPIN_NETWORK,
});
updateRoute(getState);
};
}
//
// Metrics
//
export function hoverMetric(metricType) {
return {
metricType,
type: ActionTypes.HOVER_METRIC,
};
}
export function unhoverMetric() {
return {
type: ActionTypes.UNHOVER_METRIC,
};
}
export function pinMetric(metricType) {
return (dispatch, getState) => {
dispatch({
metricType,
type: ActionTypes.PIN_METRIC,
});
updateRoute(getState);
};
}
export function unpinMetric() {
return (dispatch, getState) => {
// We always have to keep metrics pinned in the resource view.
if (!isResourceViewModeSelector(getState())) {
dispatch({
type: ActionTypes.UNPIN_METRIC,
});
updateRoute(getState);
}
};
}
export function pinNextMetric() {
return (dispatch, getState) => {
const nextPinnedMetricType = nextPinnedMetricTypeSelector(getState());
dispatch(pinMetric(nextPinnedMetricType));
};
}
export function pinPreviousMetric() {
return (dispatch, getState) => {
const previousPinnedMetricType = previousPinnedMetricTypeSelector(getState());
dispatch(pinMetric(previousPinnedMetricType));
};
}
export function updateSearch(searchQuery = '', pinnedSearches = []) {
return (dispatch, getState) => {
dispatch({
pinnedSearches,
searchQuery,
type: ActionTypes.UPDATE_SEARCH,
});
updateRoute(getState);
};
}
export function blurSearch() {
return { type: ActionTypes.BLUR_SEARCH };
}
export function clickBackground() {
return (dispatch, getState) => {
dispatch({
type: ActionTypes.CLICK_BACKGROUND
});
updateRoute(getState);
};
}
export function closeTerminal(pipeId) {
return (dispatch, getState) => {
dispatch({
pipeId,
type: ActionTypes.CLOSE_TERMINAL
});
updateRoute(getState);
};
}
export function clickDownloadGraph() {
return (dispatch) => {
dispatch({ exporting: true, type: ActionTypes.SET_EXPORTING_GRAPH });
saveGraph();
dispatch({ exporting: false, type: ActionTypes.SET_EXPORTING_GRAPH });
};
}
export function clickForceRelayout() {
return (dispatch) => {
dispatch({
forceRelayout: true,
type: ActionTypes.CLICK_FORCE_RELAYOUT
});
// fire only once, reset after dispatch
setTimeout(() => {
dispatch({
forceRelayout: false,
type: ActionTypes.CLICK_FORCE_RELAYOUT
});
}, 100);
};
}
export function setViewportDimensions(width, height) {
return (dispatch) => {
dispatch({ height, type: ActionTypes.SET_VIEWPORT_DIMENSIONS, width });
};
}
export function setGraphView() {
return (dispatch, getState) => {
dispatch({
type: ActionTypes.SET_VIEW_MODE,
viewMode: GRAPH_VIEW_MODE,
});
updateRoute(getState);
};
}
export function setTableView() {
return (dispatch, getState) => {
dispatch({
type: ActionTypes.SET_VIEW_MODE,
viewMode: TABLE_VIEW_MODE,
});
updateRoute(getState);
};
}
export function cacheZoomState(zoomState) {
return {
type: ActionTypes.CACHE_ZOOM_STATE,
// Make sure only proper numerical values are cached.
zoomState: zoomState.filter(value => !window.isNaN(value)),
};
}
export function openWebsocket() {
return {
type: ActionTypes.OPEN_WEBSOCKET
};
}
export function clearControlError(nodeId) {
return {
nodeId,
type: ActionTypes.CLEAR_CONTROL_ERROR
};
}
export function closeWebsocket() {
return {
type: ActionTypes.CLOSE_WEBSOCKET
};
}
export function enterEdge(edgeId) {
return {
edgeId,
type: ActionTypes.ENTER_EDGE
};
}
export function enterNode(nodeId) {
return {
nodeId,
type: ActionTypes.ENTER_NODE
};
}
export function hitEsc() {
return (dispatch, getState) => {
const state = getState();
const controlPipe = state.get('controlPipes').last();
if (controlPipe && controlPipe.get('status') === 'PIPE_DELETED') {
dispatch({
pipeId: controlPipe.get('id'),
type: ActionTypes.CLOSE_TERMINAL
});
updateRoute(getState);
} else if (state.get('showingHelp')) {
dispatch(hideHelp());
} else if (state.get('nodeDetails').last() && !controlPipe) {
dispatch({ type: ActionTypes.DESELECT_NODE });
updateRoute(getState);
}
};
}
export function leaveEdge(edgeId) {
return {
edgeId,
type: ActionTypes.LEAVE_EDGE
};
}
export function leaveNode(nodeId) {
return {
nodeId,
type: ActionTypes.LEAVE_NODE
};
}
export function receiveControlError(nodeId, err) {
return {
error: err,
nodeId,
type: ActionTypes.DO_CONTROL_ERROR
};
}
export function receiveControlSuccess(nodeId) {
return {
nodeId,
type: ActionTypes.DO_CONTROL_SUCCESS
};
}
export function receiveNodeDetails(details, requestTimestamp) {
return {
details,
requestTimestamp,
type: ActionTypes.RECEIVE_NODE_DETAILS
};
}
export function receiveNodesDelta(delta) {
return (dispatch, getState) => {
if (!isPausedSelector(getState())) {
// Allow css-animation to run smoothly by scheduling it to run on the
// next tick after any potentially expensive canvas re-draws have been
// completed.
setTimeout(() => dispatch({ type: ActionTypes.SET_RECEIVED_NODES_DELTA }), 0);
// When moving in time, we will consider the transition complete
// only when the first batch of nodes delta has been received. We
// do that because we want to keep the previous state blurred instead
// of transitioning over an empty state like when switching topologies.
if (getState().get('timeTravelTransitioning')) {
dispatch({ type: ActionTypes.FINISH_TIME_TRAVEL_TRANSITION });
}
const hasChanges = delta.add || delta.update || delta.remove || delta.reset;
if (hasChanges) {
dispatch({
delta,
type: ActionTypes.RECEIVE_NODES_DELTA
});
}
}
};
}
export function receiveNodes(nodes) {
return {
nodes,
type: ActionTypes.RECEIVE_NODES,
};
}
export function receiveNodesForTopology(nodes, topologyId) {
return {
nodes,
topologyId,
type: ActionTypes.RECEIVE_NODES_FOR_TOPOLOGY
};
}
export function receiveControlNodeRemoved(nodeId) {
return (dispatch, getState) => {
dispatch({
nodeId,
type: ActionTypes.RECEIVE_CONTROL_NODE_REMOVED
});
updateRoute(getState);
};
}
export function receiveControlPipeFromParams(pipeId, rawTty, resizeTtyControl) {
// TODO add nodeId
return {
pipeId,
rawTty,
resizeTtyControl,
type: ActionTypes.RECEIVE_CONTROL_PIPE
};
}
export function receiveControlPipeStatus(pipeId, status) {
return {
pipeId,
status,
type: ActionTypes.RECEIVE_CONTROL_PIPE_STATUS
};
}
export function receiveError(errorUrl) {
return {
errorUrl,
type: ActionTypes.RECEIVE_ERROR
};
}
export function receiveNotFound(nodeId, requestTimestamp) {
return {
nodeId,
requestTimestamp,
type: ActionTypes.RECEIVE_NOT_FOUND,
};
}
export function setContrastMode(enabled) {
return (dispatch, getState) => {
dispatch({
enabled,
type: ActionTypes.TOGGLE_CONTRAST_MODE,
});
updateRoute(getState);
};
}
export function resetLocalViewState() {
return (dispatch) => {
dispatch({ type: ActionTypes.RESET_LOCAL_VIEW_STATE });
clearStoredViewState();
// eslint-disable-next-line prefer-destructuring
window.location.href = window.location.href.split('#')[0];
};
}
export function toggleTroubleshootingMenu(ev) {
if (ev) { ev.preventDefault(); ev.stopPropagation(); }
return {
type: ActionTypes.TOGGLE_TROUBLESHOOTING_MENU
};
}
export function changeInstance() {
return (dispatch, getState) => {
dispatch({
type: ActionTypes.CHANGE_INSTANCE
});
updateRoute(getState);
};
}
export function setMonitorState(monitor) {
return {
monitor,
type: ActionTypes.MONITOR_STATE
};
}
export function setStoreViewState(storeViewState) {
return {
storeViewState,
type: ActionTypes.SET_STORE_VIEW_STATE
};
}

View File

@@ -1,608 +0,0 @@
/*
This file consists of functions that both dispatch actions to Redux and also make API requests.
TODO: Refactor all the methods below so that the split between actions and
requests is more clear, and make user components make explicit calls to requests
and dispatch actions when handling request promises.
*/
import debug from 'debug';
import { fromJS } from 'immutable';
import ActionTypes from '../constants/action-types';
import { RESOURCE_VIEW_MODE } from '../constants/naming';
import {
API_REFRESH_INTERVAL,
TOPOLOGY_REFRESH_INTERVAL,
} from '../constants/timer';
import { updateRoute } from '../utils/router-utils';
import { getCurrentTopologyUrl } from '../utils/topology-utils';
import {
doRequest,
getApiPath,
getAllNodes,
getNodesOnce,
deletePipe,
getNodeDetails,
getResourceViewNodesSnapshot,
topologiesUrl,
buildWebsocketUrl,
} from '../utils/web-api-utils';
import {
availableMetricTypesSelector,
pinnedMetricSelector,
} from '../selectors/node-metric';
import {
isResourceViewModeSelector,
resourceViewAvailableSelector,
activeTopologyOptionsSelector,
} from '../selectors/topology';
import { isPausedSelector } from '../selectors/time-travel';
import {
receiveControlNodeRemoved,
receiveControlPipeStatus,
receiveControlSuccess,
receiveControlError,
receiveError,
pinMetric,
openWebsocket,
closeWebsocket,
receiveNodesDelta,
clearControlError,
blurSearch,
} from './app-actions';
const log = debug('scope:app-actions');
const reconnectTimerInterval = 5000;
const FIRST_RENDER_TOO_LONG_THRESHOLD = 100; // ms
let socket;
let topologyTimer = 0;
let controlErrorTimer = 0;
let reconnectTimer = 0;
let apiDetailsTimer = 0;
let continuePolling = true;
let firstMessageOnWebsocketAt = null;
let createWebsocketAt = null;
let currentUrl = null;
function createWebsocket(websocketUrl, getState, dispatch) {
if (socket) {
socket.onclose = null;
socket.onerror = null;
socket.close();
// onclose() is not called, but that's fine since we're opening a new one
// right away
}
// profiling
createWebsocketAt = new Date();
firstMessageOnWebsocketAt = null;
socket = new WebSocket(websocketUrl);
socket.onopen = () => {
log(`Opening websocket to ${websocketUrl}`);
dispatch(openWebsocket());
};
socket.onclose = () => {
clearTimeout(reconnectTimer);
log(`Closing websocket to ${websocketUrl}`, socket.readyState);
socket = null;
dispatch(closeWebsocket());
if (continuePolling && !isPausedSelector(getState())) {
reconnectTimer = setTimeout(() => {
createWebsocket(websocketUrl, getState, dispatch);
}, reconnectTimerInterval);
}
};
socket.onerror = () => {
log(`Error in websocket to ${websocketUrl}`);
dispatch(receiveError(websocketUrl));
};
socket.onmessage = (event) => {
const msg = JSON.parse(event.data);
dispatch(receiveNodesDelta(msg));
// profiling (receiveNodesDelta triggers synchronous render)
if (!firstMessageOnWebsocketAt) {
firstMessageOnWebsocketAt = new Date();
const timeToFirstMessage = firstMessageOnWebsocketAt - createWebsocketAt;
if (timeToFirstMessage > FIRST_RENDER_TOO_LONG_THRESHOLD) {
log(
'Time (ms) to first nodes render after websocket was created',
firstMessageOnWebsocketAt - createWebsocketAt
);
}
}
};
}
function teardownWebsockets() {
clearTimeout(reconnectTimer);
if (socket) {
socket.onerror = null;
socket.onclose = null;
socket.onmessage = null;
socket.onopen = null;
socket.close();
socket = null;
currentUrl = null;
}
}
function updateWebsocketChannel(getState, dispatch, forceRequest) {
const topologyUrl = getCurrentTopologyUrl(getState());
const topologyOptions = activeTopologyOptionsSelector(getState());
const websocketUrl = buildWebsocketUrl(topologyUrl, topologyOptions, getState());
// Only recreate websocket if url changed or if forced (weave cloud instance reload);
const isNewUrl = websocketUrl !== currentUrl;
// `topologyUrl` can be undefined initially, so only create a socket if it is truthy
// and no socket exists, or if we get a new url.
if (topologyUrl && (!socket || isNewUrl || forceRequest)) {
createWebsocket(websocketUrl, getState, dispatch);
currentUrl = websocketUrl;
}
}
function getNodes(getState, dispatch, forceRequest = false) {
if (isPausedSelector(getState())) {
getNodesOnce(getState, dispatch);
} else {
updateWebsocketChannel(getState, dispatch, forceRequest);
}
getNodeDetails(getState, dispatch);
}
export function pauseTimeAtNow() {
return (dispatch, getState) => {
dispatch({
type: ActionTypes.PAUSE_TIME_AT_NOW
});
updateRoute(getState);
if (!getState().get('nodesLoaded')) {
getNodes(getState, dispatch);
if (isResourceViewModeSelector(getState())) {
getResourceViewNodesSnapshot(getState(), dispatch);
}
}
};
}
function receiveTopologies(topologies) {
return (dispatch, getState) => {
const firstLoad = !getState().get('topologiesLoaded');
dispatch({
topologies,
type: ActionTypes.RECEIVE_TOPOLOGIES
});
getNodes(getState, dispatch);
// Populate search matches on first load
const state = getState();
// Fetch all the relevant nodes once on first load
if (firstLoad && isResourceViewModeSelector(state)) {
getResourceViewNodesSnapshot(state, dispatch);
}
};
}
function getTopologiesOnce(getState, dispatch) {
const url = topologiesUrl(getState());
doRequest({
error: (req) => {
log(`Error in topology request: ${req.responseText}`);
dispatch(receiveError(url));
},
success: (res) => {
dispatch(receiveTopologies(res));
},
url
});
}
function pollTopologies(getState, dispatch, initialPoll = false) {
// Used to resume polling when navigating between pages in Weave Cloud.
continuePolling = initialPoll === true ? true : continuePolling;
clearTimeout(topologyTimer);
// NOTE: getState is called every time to make sure the up-to-date state is used.
const url = topologiesUrl(getState());
doRequest({
error: (req) => {
log(`Error in topology request: ${req.responseText}`);
dispatch(receiveError(url));
// Only retry in stand-alone mode
if (continuePolling && !isPausedSelector(getState())) {
topologyTimer = setTimeout(() => {
pollTopologies(getState, dispatch);
}, TOPOLOGY_REFRESH_INTERVAL);
}
},
success: (res) => {
if (continuePolling && !isPausedSelector(getState())) {
dispatch(receiveTopologies(res));
topologyTimer = setTimeout(() => {
pollTopologies(getState, dispatch);
}, TOPOLOGY_REFRESH_INTERVAL);
}
},
url
});
}
function getTopologies(getState, dispatch, forceRequest) {
if (isPausedSelector(getState())) {
getTopologiesOnce(getState, dispatch);
} else {
pollTopologies(getState, dispatch, forceRequest);
}
}
export function jumpToTime(timestamp) {
return (dispatch, getState) => {
dispatch({
timestamp,
type: ActionTypes.JUMP_TO_TIME,
});
updateRoute(getState);
getTopologies(getState, dispatch);
if (!getState().get('nodesLoaded')) {
getNodes(getState, dispatch);
if (isResourceViewModeSelector(getState())) {
getResourceViewNodesSnapshot(getState(), dispatch);
}
} else {
// Get most recent details before freezing the state.
getNodeDetails(getState, dispatch);
}
};
}
export function receiveApiDetails(apiDetails) {
return (dispatch, getState) => {
const isFirstTime = !getState().get('version');
const pausedAt = getState().get('pausedAt');
dispatch({
capabilities: fromJS(apiDetails.capabilities || {}),
hostname: apiDetails.hostname,
newVersion: apiDetails.newVersion,
plugins: apiDetails.plugins,
type: ActionTypes.RECEIVE_API_DETAILS,
version: apiDetails.version,
});
// On initial load either start time travelling at the pausedAt timestamp
// (if it was given as URL param) if time travelling is enabled, otherwise
// simply pause at the present time which is arguably the next best thing
// we could do.
// NOTE: We can't make this decision before API details are received because
// we have no prior info on whether time travel would be available.
if (isFirstTime && pausedAt) {
if (apiDetails.capabilities && apiDetails.capabilities.historic_reports) {
dispatch(jumpToTime(pausedAt));
} else {
dispatch(pauseTimeAtNow());
}
}
};
}
export function getApiDetails(dispatch) {
clearTimeout(apiDetailsTimer);
const url = `${getApiPath()}/api`;
doRequest({
error: (req) => {
log(`Error in api details request: ${req.responseText}`);
receiveError(url);
if (continuePolling) {
apiDetailsTimer = setTimeout(() => {
getApiDetails(dispatch);
}, API_REFRESH_INTERVAL / 2);
}
},
success: (res) => {
dispatch(receiveApiDetails(res));
if (continuePolling) {
apiDetailsTimer = setTimeout(() => {
getApiDetails(dispatch);
}, API_REFRESH_INTERVAL);
}
},
url
});
}
function stopPolling() {
clearTimeout(apiDetailsTimer);
clearTimeout(topologyTimer);
continuePolling = false;
}
export function focusSearch() {
return (dispatch, getState) => {
dispatch({ type: ActionTypes.FOCUS_SEARCH });
// update nodes cache to allow search across all topologies,
// wait a second until animation is over
// NOTE: This will cause matching recalculation (and rerendering)
// of all the nodes in the topology, instead applying it only on
// the nodes delta. The solution would be to implement deeper
// search selectors with per-node caching instead of per-topology.
setTimeout(() => {
getAllNodes(getState(), dispatch);
}, 1200);
};
}
export function getPipeStatus(pipeId, dispatch) {
const url = `${getApiPath()}/api/pipe/${encodeURIComponent(pipeId)}/check`;
doRequest({
complete: (res) => {
const status = {
204: 'PIPE_ALIVE',
404: 'PIPE_DELETED'
}[res.status];
if (!status) {
log('Unexpected pipe status:', res.status);
return;
}
dispatch(receiveControlPipeStatus(pipeId, status));
},
method: 'GET',
url
});
}
export function receiveControlPipe(pipeId, nodeId, rawTty, resizeTtyControl, control) {
return (dispatch, getState) => {
const state = getState();
if (state.get('nodeDetails').last()
&& nodeId !== state.get('nodeDetails').last().id) {
log('Node was deselected before we could set up control!');
deletePipe(pipeId, dispatch);
return;
}
const controlPipe = state.get('controlPipes').last();
if (controlPipe && controlPipe.get('id') !== pipeId) {
deletePipe(controlPipe.get('id'), dispatch);
}
dispatch({
control,
nodeId,
pipeId,
rawTty,
resizeTtyControl,
type: ActionTypes.RECEIVE_CONTROL_PIPE
});
updateRoute(getState);
};
}
function doControlRequest(nodeId, control, dispatch) {
clearTimeout(controlErrorTimer);
const url = `${getApiPath()}/api/control/${encodeURIComponent(control.probeId)}/`
+ `${encodeURIComponent(control.nodeId)}/${control.id}`;
doRequest({
error: (err) => {
dispatch(receiveControlError(nodeId, err.response));
controlErrorTimer = setTimeout(() => {
dispatch(clearControlError(nodeId));
}, 10000);
},
method: 'POST',
success: (res) => {
dispatch(receiveControlSuccess(nodeId));
if (res) {
if (res.pipe) {
dispatch(blurSearch());
const resizeTtyControl = res.resize_tty_control
&& { id: res.resize_tty_control, nodeId: control.nodeId, probeId: control.probeId };
dispatch(receiveControlPipe(
res.pipe,
nodeId,
res.raw_tty,
resizeTtyControl,
control
));
}
if (res.removedNode) {
dispatch(receiveControlNodeRemoved(nodeId));
}
}
},
url
});
}
export function doControl(nodeId, control) {
return (dispatch) => {
dispatch({
control,
nodeId,
type: ActionTypes.DO_CONTROL
});
doControlRequest(nodeId, control, dispatch);
};
}
export function shutdown() {
return (dispatch) => {
stopPolling();
teardownWebsockets();
dispatch({
type: ActionTypes.SHUTDOWN
});
};
}
export function setResourceView() {
return (dispatch, getState) => {
if (resourceViewAvailableSelector(getState())) {
dispatch({
type: ActionTypes.SET_VIEW_MODE,
viewMode: RESOURCE_VIEW_MODE,
});
// Pin the first metric if none of the visible ones is pinned.
const state = getState();
if (!pinnedMetricSelector(state)) {
const firstAvailableMetricType = availableMetricTypesSelector(state).first();
dispatch(pinMetric(firstAvailableMetricType));
}
getResourceViewNodesSnapshot(getState(), dispatch);
updateRoute(getState);
}
};
}
export function changeTopologyOption(option, value, topologyId, addOrRemove) {
return (dispatch, getState) => {
dispatch({
addOrRemove,
option,
topologyId,
type: ActionTypes.CHANGE_TOPOLOGY_OPTION,
value
});
updateRoute(getState);
// update all request workers with new options
getTopologies(getState, dispatch);
getNodes(getState, dispatch);
};
}
export function getTopologiesWithInitialPoll() {
return (dispatch, getState) => {
getTopologies(getState, dispatch, true);
};
}
export function resumeTime() {
return (dispatch, getState) => {
if (isPausedSelector(getState())) {
dispatch({
type: ActionTypes.RESUME_TIME
});
updateRoute(getState);
// After unpausing, all of the following calls will re-activate polling.
getTopologies(getState, dispatch);
getNodes(getState, dispatch, true);
if (isResourceViewModeSelector(getState())) {
getResourceViewNodesSnapshot(getState(), dispatch);
}
}
};
}
export function route(urlState) {
return (dispatch, getState) => {
dispatch({
state: urlState,
type: ActionTypes.ROUTE_TOPOLOGY
});
// Handle Time Travel state update through separate actions as it's more complex.
// This is mostly to handle switching contexts Explore <-> Monitor in WC while
// the timestamp keeps changing - e.g. if we were Time Travelling in Scope and
// then went live in Monitor, switching back to Explore should properly close
// the Time Travel etc, not just update the pausedAt state directly.
if (!urlState.pausedAt) {
dispatch(resumeTime());
} else {
dispatch(jumpToTime(urlState.pausedAt));
}
// update all request workers with new options
getTopologies(getState, dispatch);
getNodes(getState, dispatch);
// If we are landing on the resource view page, we need to fetch not only all the
// nodes for the current topology, but also the nodes of all the topologies that make
// the layers in the resource view.
const state = getState();
if (isResourceViewModeSelector(state)) {
getResourceViewNodesSnapshot(state, dispatch);
}
};
}
export function clickCloseDetails(nodeId) {
return (dispatch, getState) => {
dispatch({
nodeId,
type: ActionTypes.CLICK_CLOSE_DETAILS
});
// Pull the most recent details for the next details panel that comes into focus.
getNodeDetails(getState, dispatch);
updateRoute(getState);
};
}
export function clickNode(nodeId, label, origin, topologyId = null) {
return (dispatch, getState) => {
dispatch({
label,
nodeId,
origin,
topologyId,
type: ActionTypes.CLICK_NODE,
});
updateRoute(getState);
getNodeDetails(getState, dispatch);
};
}
export function clickRelative(nodeId, topologyId, label, origin) {
return (dispatch, getState) => {
dispatch({
label,
nodeId,
origin,
topologyId,
type: ActionTypes.CLICK_RELATIVE
});
updateRoute(getState);
getNodeDetails(getState, dispatch);
};
}
function updateTopology(dispatch, getState) {
const state = getState();
// If we're in the resource view, get the snapshot of all the relevant node topologies.
if (isResourceViewModeSelector(state)) {
getResourceViewNodesSnapshot(state, dispatch);
}
updateRoute(getState);
// NOTE: This is currently not needed for our static resource
// view, but we'll need it here later and it's simpler to just
// keep it than to redo the nodes delta updating logic.
getNodes(getState, dispatch);
}
export function clickShowTopologyForNode(topologyId, nodeId) {
return (dispatch, getState) => {
dispatch({
nodeId,
topologyId,
type: ActionTypes.CLICK_SHOW_TOPOLOGY_FOR_NODE
});
updateTopology(dispatch, getState);
};
}
export function clickTopology(topologyId) {
return (dispatch, getState) => {
dispatch({
topologyId,
type: ActionTypes.CLICK_TOPOLOGY
});
updateTopology(dispatch, getState);
};
}

View File

@@ -1,489 +0,0 @@
import { fromJS, Map } from 'immutable';
import { constructEdgeId as edge } from '../../utils/layouter-utils';
const makeMap = Map;
describe('NodesLayout', () => {
const NodesLayout = require('../nodes-layout');
function getNodeCoordinates(nodes) {
const coords = [];
nodes
.sortBy(node => node.get('id'))
.forEach((node) => {
coords.push(node.get('x'));
coords.push(node.get('y'));
});
return coords;
}
let options;
let nodes;
let coords;
let resultCoords;
const nodeSets = {
initial4: {
edges: fromJS({
[edge('n1', 'n3')]: {id: edge('n1', 'n3'), source: 'n1', target: 'n3'},
[edge('n1', 'n4')]: {id: edge('n1', 'n4'), source: 'n1', target: 'n4'},
[edge('n2', 'n4')]: {id: edge('n2', 'n4'), source: 'n2', target: 'n4'}
}),
nodes: fromJS({
n1: {id: 'n1'},
n2: {id: 'n2'},
n3: {id: 'n3'},
n4: {id: 'n4'}
})
},
layoutProps: {
edges: fromJS({}),
nodes: fromJS({
n1: {
id: 'n1', label: 'lold', labelMinor: 'lmold', rank: 'rold'
},
})
},
layoutProps2: {
edges: fromJS({}),
nodes: fromJS({
n1: {
id: 'n1', label: 'lnew', labelMinor: 'lmnew', rank: 'rnew', x: 111, y: 109
},
})
},
rank4: {
edges: fromJS({
[edge('n1', 'n3')]: {id: edge('n1', 'n3'), source: 'n1', target: 'n3'},
[edge('n1', 'n4')]: {id: edge('n1', 'n4'), source: 'n1', target: 'n4'},
[edge('n2', 'n4')]: {id: edge('n2', 'n4'), source: 'n2', target: 'n4'}
}),
nodes: fromJS({
n1: {id: 'n1', rank: 'A'},
n2: {id: 'n2', rank: 'A'},
n3: {id: 'n3', rank: 'B'},
n4: {id: 'n4', rank: 'B'}
})
},
rank6: {
edges: fromJS({
[edge('n1', 'n3')]: {id: edge('n1', 'n3'), source: 'n1', target: 'n3'},
[edge('n1', 'n4')]: {id: edge('n1', 'n4'), source: 'n1', target: 'n4'},
[edge('n1', 'n5')]: {id: edge('n1', 'n5'), source: 'n1', target: 'n5'},
[edge('n2', 'n4')]: {id: edge('n2', 'n4'), source: 'n2', target: 'n4'},
[edge('n2', 'n6')]: {id: edge('n2', 'n6'), source: 'n2', target: 'n6'},
}),
nodes: fromJS({
n1: {id: 'n1', rank: 'A'},
n2: {id: 'n2', rank: 'A'},
n3: {id: 'n3', rank: 'B'},
n4: {id: 'n4', rank: 'B'},
n5: {id: 'n5', rank: 'A'},
n6: {id: 'n6', rank: 'B'},
})
},
removeEdge24: {
edges: fromJS({
[edge('n1', 'n3')]: {id: edge('n1', 'n3'), source: 'n1', target: 'n3'},
[edge('n1', 'n4')]: {id: edge('n1', 'n4'), source: 'n1', target: 'n4'}
}),
nodes: fromJS({
n1: {id: 'n1'},
n2: {id: 'n2'},
n3: {id: 'n3'},
n4: {id: 'n4'}
})
},
removeNode2: {
edges: fromJS({
[edge('n1', 'n3')]: {id: edge('n1', 'n3'), source: 'n1', target: 'n3'},
[edge('n1', 'n4')]: {id: edge('n1', 'n4'), source: 'n1', target: 'n4'}
}),
nodes: fromJS({
n1: {id: 'n1'},
n3: {id: 'n3'},
n4: {id: 'n4'}
})
},
removeNode23: {
edges: fromJS({
[edge('n1', 'n4')]: {id: edge('n1', 'n4'), source: 'n1', target: 'n4'}
}),
nodes: fromJS({
n1: {id: 'n1'},
n4: {id: 'n4'}
})
},
single3: {
edges: fromJS({}),
nodes: fromJS({
n1: {id: 'n1'},
n2: {id: 'n2'},
n3: {id: 'n3'}
})
},
singlePortrait: {
edges: fromJS({
[edge('n1', 'n4')]: {id: edge('n1', 'n4'), source: 'n1', target: 'n4'}
}),
nodes: fromJS({
n1: {id: 'n1'},
n2: {id: 'n2'},
n3: {id: 'n3'},
n4: {id: 'n4'},
n5: {id: 'n5'}
})
},
singlePortrait6: {
edges: fromJS({
[edge('n1', 'n4')]: {id: edge('n1', 'n4'), source: 'n1', target: 'n4'}
}),
nodes: fromJS({
n1: {id: 'n1'},
n2: {id: 'n2'},
n3: {id: 'n3'},
n4: {id: 'n4'},
n5: {id: 'n5'},
n6: {id: 'n6'}
})
}
};
beforeEach(() => {
// clear feature flags
window.localStorage.clear();
options = {
edgeCache: makeMap(),
nodeCache: makeMap()
};
});
it('detects unseen nodes', () => {
const set1 = fromJS({
n1: {id: 'n1'}
});
const set12 = fromJS({
n1: {id: 'n1'},
n2: {id: 'n2'}
});
const set13 = fromJS({
n1: {id: 'n1'},
n3: {id: 'n3'}
});
let hasUnseen;
hasUnseen = NodesLayout.hasUnseenNodes(set12, set1);
expect(hasUnseen).toBeTruthy();
hasUnseen = NodesLayout.hasUnseenNodes(set13, set1);
expect(hasUnseen).toBeTruthy();
hasUnseen = NodesLayout.hasUnseenNodes(set1, set12);
expect(hasUnseen).toBeFalsy();
hasUnseen = NodesLayout.hasUnseenNodes(set1, set13);
expect(hasUnseen).toBeFalsy();
hasUnseen = NodesLayout.hasUnseenNodes(set12, set13);
expect(hasUnseen).toBeTruthy();
});
it('lays out initial nodeset in a rectangle', () => {
const result = NodesLayout.doLayout(
nodeSets.initial4.nodes,
nodeSets.initial4.edges
);
// console.log('initial', result.get('nodes'));
nodes = result.nodes.toJS();
expect(nodes.n1.x).toBeLessThan(nodes.n2.x);
expect(nodes.n1.y).toEqual(nodes.n2.y);
expect(nodes.n1.x).toEqual(nodes.n3.x);
expect(nodes.n1.y).toBeLessThan(nodes.n3.y);
expect(nodes.n3.x).toBeLessThan(nodes.n4.x);
expect(nodes.n3.y).toEqual(nodes.n4.y);
});
it('keeps nodes in rectangle after removing one edge', () => {
let result = NodesLayout.doLayout(
nodeSets.initial4.nodes,
nodeSets.initial4.edges
);
options.cachedLayout = result;
options.nodeCache = options.nodeCache.merge(result.nodes);
options.edgeCache = options.edgeCache.merge(result.edge);
coords = getNodeCoordinates(result.nodes);
result = NodesLayout.doLayout(
nodeSets.removeEdge24.nodes,
nodeSets.removeEdge24.edges,
options
);
nodes = result.nodes.toJS();
// console.log('remove 1 edge', nodes, result);
resultCoords = getNodeCoordinates(result.nodes);
expect(resultCoords).toEqual(coords);
});
it('keeps nodes in rectangle after removed edge reappears', () => {
let result = NodesLayout.doLayout(
nodeSets.initial4.nodes,
nodeSets.initial4.edges
);
coords = getNodeCoordinates(result.nodes);
options.cachedLayout = result;
options.nodeCache = options.nodeCache.merge(result.nodes);
options.edgeCache = options.edgeCache.merge(result.edge);
result = NodesLayout.doLayout(
nodeSets.removeEdge24.nodes,
nodeSets.removeEdge24.edges,
options
);
options.cachedLayout = result;
options.nodeCache = options.nodeCache.merge(result.nodes);
options.edgeCache = options.edgeCache.merge(result.edge);
result = NodesLayout.doLayout(
nodeSets.initial4.nodes,
nodeSets.initial4.edges,
options
);
nodes = result.nodes.toJS();
// console.log('re-add 1 edge', nodes, result);
resultCoords = getNodeCoordinates(result.nodes);
expect(resultCoords).toEqual(coords);
});
it('keeps nodes in rectangle after node disappears', () => {
let result = NodesLayout.doLayout(
nodeSets.initial4.nodes,
nodeSets.initial4.edges
);
options.cachedLayout = result;
options.nodeCache = options.nodeCache.merge(result.nodes);
options.edgeCache = options.edgeCache.merge(result.edge);
result = NodesLayout.doLayout(
nodeSets.removeNode2.nodes,
nodeSets.removeNode2.edges,
options
);
nodes = result.nodes.toJS();
resultCoords = getNodeCoordinates(result.nodes);
expect(resultCoords.slice(0, 2)).toEqual(coords.slice(0, 2));
expect(resultCoords.slice(2, 6)).toEqual(coords.slice(4, 8));
});
it('keeps nodes in rectangle after removed node reappears', () => {
let result = NodesLayout.doLayout(
nodeSets.initial4.nodes,
nodeSets.initial4.edges
);
nodes = result.nodes.toJS();
coords = getNodeCoordinates(result.nodes);
options.cachedLayout = result;
options.nodeCache = options.nodeCache.merge(result.nodes);
options.edgeCache = options.edgeCache.merge(result.edge);
result = NodesLayout.doLayout(
nodeSets.removeNode23.nodes,
nodeSets.removeNode23.edges,
options
);
nodes = result.nodes.toJS();
expect(nodes.n1.x).toBeLessThan(nodes.n4.x);
expect(nodes.n1.y).toBeLessThan(nodes.n4.y);
options.cachedLayout = result;
options.nodeCache = options.nodeCache.merge(result.nodes);
options.edgeCache = options.edgeCache.merge(result.edge);
result = NodesLayout.doLayout(
nodeSets.removeNode2.nodes,
nodeSets.removeNode2.edges,
options
);
nodes = result.nodes.toJS();
// console.log('re-add 1 node', nodes);
resultCoords = getNodeCoordinates(result.nodes);
expect(resultCoords.slice(0, 2)).toEqual(coords.slice(0, 2));
expect(resultCoords.slice(2, 6)).toEqual(coords.slice(4, 8));
});
it('renders single nodes in a square', () => {
const result = NodesLayout.doLayout(
nodeSets.single3.nodes,
nodeSets.single3.edges
);
nodes = result.nodes.toJS();
expect(nodes.n1.x).toEqual(nodes.n3.x);
expect(nodes.n1.y).toEqual(nodes.n2.y);
expect(nodes.n1.x).toBeLessThan(nodes.n2.x);
expect(nodes.n1.y).toBeLessThan(nodes.n3.y);
});
it('renders single nodes next to portrait graph', () => {
const result = NodesLayout.doLayout(
nodeSets.singlePortrait.nodes,
nodeSets.singlePortrait.edges,
{ noCache: true }
);
nodes = result.nodes.toJS();
// first square row on same level as top-most other node
expect(nodes.n1.y).toEqual(nodes.n2.y);
expect(nodes.n1.y).toEqual(nodes.n3.y);
expect(nodes.n4.y).toEqual(nodes.n5.y);
// all singles right to other nodes
expect(nodes.n1.x).toEqual(nodes.n4.x);
expect(nodes.n1.x).toBeLessThan(nodes.n2.x);
expect(nodes.n1.x).toBeLessThan(nodes.n3.x);
expect(nodes.n1.x).toBeLessThan(nodes.n5.x);
expect(nodes.n2.x).toEqual(nodes.n5.x);
});
it('renders an additional single node in single nodes group', () => {
let result = NodesLayout.doLayout(
nodeSets.singlePortrait.nodes,
nodeSets.singlePortrait.edges,
{ noCache: true }
);
nodes = result.nodes.toJS();
// first square row on same level as top-most other node
expect(nodes.n1.y).toEqual(nodes.n2.y);
expect(nodes.n1.y).toEqual(nodes.n3.y);
expect(nodes.n4.y).toEqual(nodes.n5.y);
// all singles right to other nodes
expect(nodes.n1.x).toEqual(nodes.n4.x);
expect(nodes.n1.x).toBeLessThan(nodes.n2.x);
expect(nodes.n1.x).toBeLessThan(nodes.n3.x);
expect(nodes.n1.x).toBeLessThan(nodes.n5.x);
expect(nodes.n2.x).toEqual(nodes.n5.x);
options.cachedLayout = result;
options.nodeCache = options.nodeCache.merge(result.nodes);
options.edgeCache = options.edgeCache.merge(result.edge);
result = NodesLayout.doLayout(
nodeSets.singlePortrait6.nodes,
nodeSets.singlePortrait6.edges,
options
);
nodes = result.nodes.toJS();
expect(nodes.n1.x).toBeLessThan(nodes.n2.x);
expect(nodes.n1.x).toBeLessThan(nodes.n3.x);
expect(nodes.n1.x).toBeLessThan(nodes.n5.x);
expect(nodes.n1.x).toBeLessThan(nodes.n6.x);
});
it('adds a new node to existing layout in a line', () => {
// feature flag
window.localStorage.setItem('scope-experimental:layout-dance', true);
let result = NodesLayout.doLayout(
nodeSets.rank4.nodes,
nodeSets.rank4.edges,
{ noCache: true }
);
nodes = result.nodes.toJS();
coords = getNodeCoordinates(result.nodes);
options.cachedLayout = result;
options.nodeCache = options.nodeCache.merge(result.nodes);
options.edgeCache = options.edgeCache.merge(result.edge);
expect(NodesLayout.hasNewNodesOfExistingRank(
nodeSets.rank6.nodes,
nodeSets.rank6.edges,
result.nodes
)).toBeTruthy();
result = NodesLayout.doLayout(
nodeSets.rank6.nodes,
nodeSets.rank6.edges,
options
);
nodes = result.nodes.toJS();
expect(nodes.n5.x).toBeGreaterThan(nodes.n1.x);
expect(nodes.n5.y).toEqual(nodes.n1.y);
expect(nodes.n6.x).toBeGreaterThan(nodes.n3.x);
expect(nodes.n6.y).toEqual(nodes.n3.y);
});
it('rerenders the nodes completely after the coordinates have been messed up', () => {
// Take an initial setting
let result = NodesLayout.doLayout(
nodeSets.rank4.nodes,
nodeSets.rank4.edges,
);
// Cache the result layout
options.cachedLayout = result;
options.nodeCache = options.nodeCache.merge(result.nodes);
options.edgeCache = options.edgeCache.merge(result.edge);
// Shrink the coordinates of all the notes 2x to make them closer to one another
options.nodeCache = options.nodeCache.update(cache => cache.map(node => node.merge({
x: node.get('x') / 2,
y: node.get('y') / 2,
})));
// Rerun the initial layout to get a trivial diff and skip all the advanced layouting logic.
result = NodesLayout.doLayout(
nodeSets.rank4.nodes,
nodeSets.rank4.edges,
options
);
// The layout should have updated by running into our last 'integration testing' criterion
coords = getNodeCoordinates(options.nodeCache);
resultCoords = getNodeCoordinates(result.nodes);
expect(resultCoords).not.toEqual(coords);
});
it('only caches layout-related properties', () => {
// populate cache by doing a full layout run, this stores layout values in cache
const first = NodesLayout.doLayout(
nodeSets.layoutProps.nodes,
nodeSets.layoutProps.edges,
{ noCache: true }
);
// now pass updated nodes with modified values
const second = NodesLayout.doLayout(
nodeSets.layoutProps2.nodes,
nodeSets.layoutProps2.edges,
{}
);
// new labels should not be overwritten by cache
nodes = second.nodes.toJS();
expect(nodes.n1.label).toEqual('lnew');
expect(nodes.n1.labelMinor).toEqual('lmnew');
// but layout values should be preferred from cache
expect(nodes.n1.rank).toEqual('rold');
expect(nodes.n1.x).toEqual(first.nodes.getIn(['n1', 'x']));
expect(nodes.n1.y).toEqual(first.nodes.getIn(['n1', 'y']));
});
});

View File

@@ -1,115 +0,0 @@
import React from 'react';
import { Motion } from 'react-motion';
import { Repeat, fromJS, Map as makeMap } from 'immutable';
import { line, curveBasis } from 'd3-shape';
import { times } from 'lodash';
import { weakSpring } from 'weaveworks-ui-components/lib/utils/animation';
import { NODE_BASE_SIZE, EDGE_WAYPOINTS_CAP } from '../constants/styles';
import Edge from './edge';
const spline = line()
.curve(curveBasis)
.x(d => d.x)
.y(d => d.y);
const transformedEdge = (props, path, thickness) => (
<Edge {...props} path={spline(path)} thickness={thickness} />
);
// Converts a waypoints map of the format { x0: 11, y0: 22, x1: 33, y1: 44 }
// that is used by Motion to an array of waypoints in the format
// [{ x: 11, y: 22 }, { x: 33, y: 44 }] that can be used by D3.
const waypointsMapToArray = (waypointsMap) => {
const waypointsArray = times(EDGE_WAYPOINTS_CAP, () => ({}));
waypointsMap.forEach((value, key) => {
const [axis, index] = [key[0], key.slice(1)];
waypointsArray[index][axis] = value;
});
return waypointsArray;
};
// Converts a waypoints array of the input format [{ x: 11, y: 22 }, { x: 33, y: 44 }]
// to an array of waypoints that is used by Motion in the format { x0: 11, y0: 22, x1: 33, y1: 44 }.
const waypointsArrayToMap = (waypointsArray) => {
let waypointsMap = makeMap();
waypointsArray.forEach((point, index) => {
waypointsMap = waypointsMap.set(`x${index}`, weakSpring(point.get('x')));
waypointsMap = waypointsMap.set(`y${index}`, weakSpring(point.get('y')));
});
return waypointsMap;
};
export default class EdgeContainer extends React.PureComponent {
constructor(props, context) {
super(props, context);
this.state = {
thickness: 1,
waypointsMap: makeMap(),
};
}
componentWillMount() {
this.prepareWaypointsForMotion(this.props);
}
componentWillReceiveProps(nextProps) {
// immutablejs allows us to `===`! \o/
const waypointsChanged = this.props.waypoints !== nextProps.waypoints;
const animationChanged = this.props.isAnimated !== nextProps.isAnimated;
if (waypointsChanged || animationChanged) {
this.prepareWaypointsForMotion(nextProps);
}
// Edge thickness will reflect the zoom scale.
const baseScale = (nextProps.scale * 0.01) * NODE_BASE_SIZE;
const thickness = (nextProps.focused ? 3 : 1) * baseScale;
this.setState({ thickness });
}
render() {
const {
isAnimated, waypoints, scale, ...forwardedProps
} = this.props;
const { thickness, waypointsMap } = this.state;
if (!isAnimated) {
return transformedEdge(forwardedProps, waypoints.toJS(), thickness);
}
return (
// For the Motion interpolation to work, the waypoints need to be in a map format like
// { x0: 11, y0: 22, x1: 33, y1: 44 } that we convert to the array format when rendering.
<Motion style={{
interpolatedThickness: weakSpring(thickness),
...waypointsMap.toJS(),
}}>
{
({ interpolatedThickness, ...interpolatedWaypoints }) => transformedEdge(
forwardedProps,
waypointsMapToArray(fromJS(interpolatedWaypoints)),
interpolatedThickness
)
}
</Motion>
);
}
prepareWaypointsForMotion({ waypoints, isAnimated }) {
// Don't update if the edges are not animated.
if (!isAnimated) return;
// The Motion library requires the number of waypoints to be constant, so we fill in for
// the missing ones by reusing the edge source point, which doesn't affect the edge shape
// because of how the curveBasis interpolation is done.
const waypointsMissing = EDGE_WAYPOINTS_CAP - waypoints.size;
if (waypointsMissing > 0) {
waypoints = Repeat(waypoints.get(0), waypointsMissing).concat(waypoints);
}
this.setState({ waypointsMap: waypointsArrayToMap(waypoints) });
}
}

View File

@@ -1,82 +0,0 @@
import React from 'react';
import { connect } from 'react-redux';
import classNames from 'classnames';
import { enterEdge, leaveEdge } from '../actions/app-actions';
import { encodeIdAttribute, decodeIdAttribute } from '../utils/dom-utils';
function isStorageComponent(id) {
const storageComponents = ['<persistent_volume>', '<storage_class>', '<persistent_volume_claim>', '<volume_snapshot>', '<volume_snapshot_data>'];
return storageComponents.includes(id);
}
// getAdjacencyClass takes id which contains information about edge as a topology
// of parent and child node.
// For example: id is of form "nodeA;<storage_class>---nodeB;<persistent_volume_claim>"
function getAdjacencyClass(id) {
const topologyId = id.split('---');
const fromNode = topologyId[0].split(';');
const toNode = topologyId[1].split(';');
if (fromNode[1] !== undefined && toNode[1] !== undefined) {
if (isStorageComponent(fromNode[1]) || isStorageComponent(toNode[1])) {
return 'link-storage';
}
}
return 'link-none';
}
class Edge extends React.Component {
constructor(props, context) {
super(props, context);
this.handleMouseEnter = this.handleMouseEnter.bind(this);
this.handleMouseLeave = this.handleMouseLeave.bind(this);
}
render() {
const {
id, path, highlighted, focused, thickness, source, target
} = this.props;
const shouldRenderMarker = (focused || highlighted) && (source !== target);
const className = classNames('edge', { highlighted });
return (
<g
id={encodeIdAttribute(id)}
className={className}
onMouseEnter={this.handleMouseEnter}
onMouseLeave={this.handleMouseLeave}
>
<path className="shadow" d={path} style={{ strokeWidth: 10 * thickness }} />
<path
className={getAdjacencyClass(id)}
d={path}
style={{ strokeWidth: 5 }}
/>
<path
className="link"
d={path}
markerEnd={shouldRenderMarker ? 'url(#end-arrow)' : null}
style={{ strokeWidth: thickness }}
/>
</g>
);
}
handleMouseEnter(ev) {
this.props.enterEdge(decodeIdAttribute(ev.currentTarget.id));
}
handleMouseLeave(ev) {
this.props.leaveEdge(decodeIdAttribute(ev.currentTarget.id));
}
}
function mapStateToProps(state) {
return {
contrastMode: state.get('contrastMode')
};
}
export default connect(
mapStateToProps,
{ enterEdge, leaveEdge }
)(Edge);

View File

@@ -1,105 +0,0 @@
import React from 'react';
import { connect } from 'react-redux';
import { List as makeList } from 'immutable';
import { GraphNode } from 'weaveworks-ui-components';
import {
getMetricValue,
getMetricColor,
} from '../utils/metric-utils';
import { clickNode } from '../actions/request-actions';
import { enterNode, leaveNode } from '../actions/app-actions';
import { trackAnalyticsEvent } from '../utils/tracking-utils';
import { getNodeColor } from '../utils/color-utils';
import MatchedResults from '../components/matched-results';
import { GRAPH_VIEW_MODE } from '../constants/naming';
import NodeNetworksOverlay from './node-networks-overlay';
class NodeContainer extends React.Component {
saveRef = (ref) => {
this.ref = ref;
};
handleMouseClick = (nodeId, ev) => {
ev.stopPropagation();
trackAnalyticsEvent('scope.node.click', {
layout: GRAPH_VIEW_MODE,
parentTopologyId: this.props.currentTopology.get('parentId'),
topologyId: this.props.currentTopology.get('id'),
});
this.props.clickNode(nodeId, this.props.label, this.ref.getBoundingClientRect());
};
renderPrependedInfo = () => {
const { showingNetworks, networks } = this.props;
if (!showingNetworks) return null;
return (
<NodeNetworksOverlay networks={networks} />
);
};
renderAppendedInfo = () => {
const matchedMetadata = this.props.matches.get('metadata', makeList());
const matchedParents = this.props.matches.get('parents', makeList());
const matchedDetails = matchedMetadata.concat(matchedParents);
return (
<MatchedResults matches={matchedDetails} searchTerms={this.props.searchTerms} />
);
};
render() {
const {
rank, label, pseudo, metric, showingNetworks, networks
} = this.props;
const { hasMetric, height, formattedValue } = getMetricValue(metric);
const metricFormattedValue = !pseudo && hasMetric ? formattedValue : '';
const labelOffset = (showingNetworks && networks) ? 10 : 0;
return (
<GraphNode
id={this.props.id}
shape={this.props.shape}
tag={this.props.tag}
label={this.props.label}
labelMinor={this.props.labelMinor}
labelOffset={labelOffset}
stacked={this.props.stacked}
highlighted={this.props.highlighted}
color={getNodeColor(rank, label, pseudo)}
size={this.props.size}
isAnimated={this.props.isAnimated}
contrastMode={this.props.contrastMode}
forceSvg={this.props.exportingGraph}
searchTerms={this.props.searchTerms}
metricColor={getMetricColor(metric)}
metricFormattedValue={metricFormattedValue}
metricNumericValue={height}
renderPrependedInfo={this.renderPrependedInfo}
renderAppendedInfo={this.renderAppendedInfo}
onMouseEnter={this.props.enterNode}
onMouseLeave={this.props.leaveNode}
onClick={this.handleMouseClick}
graphNodeRef={this.saveRef}
x={this.props.x}
y={this.props.y}
/>
);
}
}
function mapStateToProps(state) {
return {
contrastMode: state.get('contrastMode'),
currentTopology: state.get('currentTopology'),
exportingGraph: state.get('exportingGraph'),
searchTerms: [state.get('searchQuery')],
showingNetworks: state.get('showingNetworks'),
};
}
export default connect(
mapStateToProps,
{ clickNode, enterNode, leaveNode }
)(NodeContainer);

View File

@@ -1,53 +0,0 @@
import React from 'react';
import { scaleBand } from 'd3-scale';
import { List as makeList } from 'immutable';
import { connect } from 'react-redux';
import { getNetworkColor } from '../utils/color-utils';
// Min size is about a quarter of the width, feels about right.
const minBarWidth = 0.25;
const barHeight = 0.08;
const innerPadding = 0.04;
const borderRadius = 0.01;
const offset = 0.67;
const x = scaleBand();
function NodeNetworksOverlay({ networks = makeList() }) {
const barWidth = Math.max(1, minBarWidth * networks.size);
const yPosition = offset - (barHeight * 0.5);
// Update singleton scale.
x.domain(networks.map((n, i) => i).toJS());
x.range([barWidth * -0.5, barWidth * 0.5]);
x.paddingInner(innerPadding);
const bandwidth = x.bandwidth();
const bars = networks.map((n, i) => (
<rect
className="node-network"
key={n.get('id')}
x={x(i)}
y={yPosition}
width={bandwidth}
height={barHeight}
rx={borderRadius}
ry={borderRadius}
style={{ fill: getNetworkColor(n.get('colorKey', n.get('id'))) }}
/>
));
return (
<g transform="translate(0, -5) scale(60)">
{bars.toJS()}
</g>
);
}
function mapStateToProps(state) {
return {
contrastMode: state.get('contrastMode')
};
}
export default connect(mapStateToProps)(NodeNetworksOverlay);

View File

@@ -1,291 +0,0 @@
import React from 'react';
import classNames from 'classnames';
import { connect } from 'react-redux';
import { fromJS, Map as makeMap, List as makeList } from 'immutable';
import theme from 'weaveworks-ui-components/lib/theme';
import NodeContainer from './node-container';
import EdgeContainer from './edge-container';
import { getAdjacentNodes, hasSelectedNode as hasSelectedNodeFn } from '../utils/topology-utils';
import { graphExceedsComplexityThreshSelector } from '../selectors/topology';
import { nodeNetworksSelector, selectedNetworkNodesIdsSelector } from '../selectors/node-networks';
import { searchNodeMatchesSelector } from '../selectors/search';
import { nodeMetricSelector } from '../selectors/node-metric';
import {
highlightedNodeIdsSelector,
highlightedEdgeIdsSelector
} from '../selectors/graph-view/decorators';
import {
selectedScaleSelector,
layoutNodesSelector,
layoutEdgesSelector
} from '../selectors/graph-view/layout';
import { NODE_BASE_SIZE } from '../constants/styles';
import {
BLURRED_EDGES_LAYER,
BLURRED_NODES_LAYER,
NORMAL_EDGES_LAYER,
NORMAL_NODES_LAYER,
HIGHLIGHTED_EDGES_LAYER,
HIGHLIGHTED_NODES_LAYER,
HOVERED_EDGES_LAYER,
HOVERED_NODES_LAYER,
} from '../constants/naming';
class NodesChartElements extends React.Component {
constructor(props, context) {
super(props, context);
this.renderNode = this.renderNode.bind(this);
this.renderEdge = this.renderEdge.bind(this);
this.renderElement = this.renderElement.bind(this);
this.nodeDisplayLayer = this.nodeDisplayLayer.bind(this);
this.edgeDisplayLayer = this.edgeDisplayLayer.bind(this);
// Node decorators
this.nodeHighlightedDecorator = this.nodeHighlightedDecorator.bind(this);
this.nodeFocusedDecorator = this.nodeFocusedDecorator.bind(this);
this.nodeBlurredDecorator = this.nodeBlurredDecorator.bind(this);
this.nodeMatchesDecorator = this.nodeMatchesDecorator.bind(this);
this.nodeNetworksDecorator = this.nodeNetworksDecorator.bind(this);
this.nodeMetricDecorator = this.nodeMetricDecorator.bind(this);
this.nodeScaleDecorator = this.nodeScaleDecorator.bind(this);
// Edge decorators
this.edgeFocusedDecorator = this.edgeFocusedDecorator.bind(this);
this.edgeBlurredDecorator = this.edgeBlurredDecorator.bind(this);
this.edgeHighlightedDecorator = this.edgeHighlightedDecorator.bind(this);
this.edgeScaleDecorator = this.edgeScaleDecorator.bind(this);
}
nodeDisplayLayer(node) {
if (node.get('id') === this.props.mouseOverNodeId) {
return HOVERED_NODES_LAYER;
} if (node.get('blurred') && !node.get('focused')) {
return BLURRED_NODES_LAYER;
} if (node.get('highlighted')) {
return HIGHLIGHTED_NODES_LAYER;
}
return NORMAL_NODES_LAYER;
}
edgeDisplayLayer(edge) {
if (edge.get('id') === this.props.mouseOverEdgeId) {
return HOVERED_EDGES_LAYER;
} if (edge.get('blurred') && !edge.get('focused')) {
return BLURRED_EDGES_LAYER;
} if (edge.get('highlighted')) {
return HIGHLIGHTED_EDGES_LAYER;
}
return NORMAL_EDGES_LAYER;
}
nodeHighlightedDecorator(node) {
const nodeSelected = (this.props.selectedNodeId === node.get('id'));
const nodeHighlighted = this.props.highlightedNodeIds.has(node.get('id'));
return node.set('highlighted', nodeHighlighted || nodeSelected);
}
nodeFocusedDecorator(node) {
const nodeSelected = (this.props.selectedNodeId === node.get('id'));
const isNeighborOfSelected = this.props.neighborsOfSelectedNode.includes(node.get('id'));
return node.set('focused', nodeSelected || isNeighborOfSelected);
}
nodeBlurredDecorator(node) {
const belongsToNetwork = this.props.selectedNetworkNodesIds.contains(node.get('id'));
const noMatches = this.props.searchNodeMatches.get(node.get('id'), makeMap()).isEmpty();
const notMatched = (this.props.searchQuery && !node.get('highlighted') && noMatches);
const notFocused = (this.props.selectedNodeId && !node.get('focused'));
const notInNetwork = (this.props.selectedNetwork && !belongsToNetwork);
return node.set('blurred', notMatched || notFocused || notInNetwork);
}
nodeMatchesDecorator(node) {
return node.set('matches', this.props.searchNodeMatches.get(node.get('id')));
}
nodeNetworksDecorator(node) {
return node.set('networks', this.props.nodeNetworks.get(node.get('id')));
}
nodeMetricDecorator(node) {
return node.set('metric', this.props.nodeMetric.get(node.get('id')));
}
nodeScaleDecorator(node) {
return node.set('scale', node.get('focused') ? this.props.selectedScale : 1);
}
edgeHighlightedDecorator(edge) {
return edge.set('highlighted', this.props.highlightedEdgeIds.has(edge.get('id')));
}
edgeFocusedDecorator(edge) {
const sourceSelected = (this.props.selectedNodeId === edge.get('source'));
const targetSelected = (this.props.selectedNodeId === edge.get('target'));
return edge.set('focused', this.props.hasSelectedNode && (sourceSelected || targetSelected));
}
edgeBlurredDecorator(edge) {
const { selectedNodeId, searchNodeMatches, selectedNetworkNodesIds } = this.props;
const sourceSelected = (selectedNodeId === edge.get('source'));
const targetSelected = (selectedNodeId === edge.get('target'));
const otherNodesSelected = this.props.hasSelectedNode && !sourceSelected && !targetSelected;
const sourceNoMatches = searchNodeMatches.get(edge.get('source'), makeMap()).isEmpty();
const targetNoMatches = searchNodeMatches.get(edge.get('target'), makeMap()).isEmpty();
const notMatched = this.props.searchQuery && (sourceNoMatches || targetNoMatches);
const sourceInNetwork = selectedNetworkNodesIds.contains(edge.get('source'));
const targetInNetwork = selectedNetworkNodesIds.contains(edge.get('target'));
const notInNetwork = this.props.selectedNetwork && (!sourceInNetwork || !targetInNetwork);
return edge.set('blurred', !edge.get('highlighted') && !edge.get('focused')
&& (otherNodesSelected || notMatched || notInNetwork));
}
edgeScaleDecorator(edge) {
return edge.set('scale', edge.get('focused') ? this.props.selectedScale : 1);
}
renderNode(node) {
const { isAnimated } = this.props;
// old versions of scope reports have a node shape of `storagesheet`
// if so, normalise to `sheet`
const shape = node.get('shape') === 'storagesheet' ? 'sheet' : node.get('shape');
return (
<NodeContainer
matches={node.get('matches')}
networks={node.get('networks')}
metric={node.get('metric')}
focused={node.get('focused')}
highlighted={node.get('highlighted')}
shape={shape}
tag={node.get('tag')}
stacked={node.get('stack')}
key={node.get('id')}
id={node.get('id')}
label={node.get('label')}
labelMinor={node.get('labelMinor')}
pseudo={node.get('pseudo')}
rank={node.get('rank')}
x={node.get('x')}
y={node.get('y')}
size={node.get('scale') * NODE_BASE_SIZE}
isAnimated={isAnimated}
/>
);
}
renderEdge(edge) {
const { isAnimated } = this.props;
return (
<EdgeContainer
key={edge.get('id')}
id={edge.get('id')}
source={edge.get('source')}
target={edge.get('target')}
waypoints={edge.get('points')}
highlighted={edge.get('highlighted')}
focused={edge.get('focused')}
scale={edge.get('scale')}
isAnimated={isAnimated}
/>
);
}
renderOverlay(element) {
// NOTE: This piece of code is a bit hacky - as we can't set the absolute coords for the
// SVG element, we set the zoom level high enough that we're sure it covers the screen.
const className = classNames('nodes-chart-overlay', { active: element.get('isActive') });
const scale = (this.props.selectedScale || 1) * 100000;
return (
<rect
className={className}
key="nodes-chart-overlay"
transform={`scale(${scale})`}
fill={theme.colors.purple25}
x={-1}
y={-1}
width={2}
height={2}
/>
);
}
renderElement(element) {
if (element.get('isOverlay')) {
return this.renderOverlay(element);
}
// This heuristics is not ideal but it works.
return element.get('points') ? this.renderEdge(element) : this.renderNode(element);
}
render() {
const nodes = this.props.layoutNodes.toIndexedSeq()
.map(this.nodeHighlightedDecorator)
.map(this.nodeFocusedDecorator)
.map(this.nodeBlurredDecorator)
.map(this.nodeMatchesDecorator)
.map(this.nodeNetworksDecorator)
.map(this.nodeMetricDecorator)
.map(this.nodeScaleDecorator)
.groupBy(this.nodeDisplayLayer);
const edges = this.props.layoutEdges.toIndexedSeq()
.map(this.edgeHighlightedDecorator)
.map(this.edgeFocusedDecorator)
.map(this.edgeBlurredDecorator)
.map(this.edgeScaleDecorator)
.groupBy(this.edgeDisplayLayer);
// NOTE: The elements need to be arranged into a single array outside
// of DOM structure for React rendering engine to do smart rearrangements
// without unnecessary re-rendering of the elements themselves. So e.g.
// rendering the element layers individually below would be significantly slower.
const orderedElements = makeList([
edges.get(BLURRED_EDGES_LAYER, makeList()),
nodes.get(BLURRED_NODES_LAYER, makeList()),
fromJS([{ isActive: !!nodes.get(BLURRED_NODES_LAYER), isOverlay: true }]),
edges.get(NORMAL_EDGES_LAYER, makeList()),
nodes.get(NORMAL_NODES_LAYER, makeList()),
edges.get(HIGHLIGHTED_EDGES_LAYER, makeList()),
nodes.get(HIGHLIGHTED_NODES_LAYER, makeList()),
edges.get(HOVERED_EDGES_LAYER, makeList()),
nodes.get(HOVERED_NODES_LAYER, makeList()),
]).flatten(true);
return (
<g className="tour-step-anchor nodes-chart-elements">
{orderedElements.map(this.renderElement)}
</g>
);
}
}
function mapStateToProps(state) {
return {
contrastMode: state.get('contrastMode'),
hasSelectedNode: hasSelectedNodeFn(state),
highlightedEdgeIds: highlightedEdgeIdsSelector(state),
highlightedNodeIds: highlightedNodeIdsSelector(state),
isAnimated: !graphExceedsComplexityThreshSelector(state),
layoutEdges: layoutEdgesSelector(state),
layoutNodes: layoutNodesSelector(state),
mouseOverEdgeId: state.get('mouseOverEdgeId'),
mouseOverNodeId: state.get('mouseOverNodeId'),
neighborsOfSelectedNode: getAdjacentNodes(state),
nodeMetric: nodeMetricSelector(state),
nodeNetworks: nodeNetworksSelector(state),
searchNodeMatches: searchNodeMatchesSelector(state),
searchQuery: state.get('searchQuery'),
selectedNetwork: state.get('selectedNetwork'),
selectedNetworkNodesIds: selectedNetworkNodesIdsSelector(state),
selectedNodeId: state.get('selectedNodeId'),
selectedScale: selectedScaleSelector(state),
};
}
export default connect(mapStateToProps)(NodesChartElements);

View File

@@ -1,85 +0,0 @@
import React from 'react';
import { connect } from 'react-redux';
import NodesChartElements from './nodes-chart-elements';
import ZoomableCanvas from '../components/zoomable-canvas';
import { transformToString } from '../utils/transform-utils';
import { clickBackground } from '../actions/app-actions';
import {
graphLimitsSelector,
graphZoomStateSelector,
} from '../selectors/graph-view/zoom';
import { CONTENT_INCLUDED } from '../constants/naming';
const EdgeMarkerDefinition = ({ selectedNodeId }) => {
const markerOffset = selectedNodeId ? '35' : '40';
const markerSize = selectedNodeId ? '10' : '30';
return (
<defs>
<marker
className="edge-marker"
id="end-arrow"
viewBox="1 0 10 10"
refX={markerOffset}
refY="3.5"
markerWidth={markerSize}
markerHeight={markerSize}
orient="auto">
<polygon className="link" points="0 0, 10 3.5, 0 7" />
</marker>
</defs>
);
};
class NodesChart extends React.Component {
constructor(props, context) {
super(props, context);
this.handleMouseClick = this.handleMouseClick.bind(this);
}
handleMouseClick() {
if (this.props.selectedNodeId) {
this.props.clickBackground();
}
}
renderContent(transform) {
return (
<g transform={transformToString(transform)}>
<EdgeMarkerDefinition selectedNodeId={this.props.selectedNodeId} />
<NodesChartElements />
</g>
);
}
render() {
return (
<div className="nodes-chart">
<ZoomableCanvas
onClick={this.handleMouseClick}
boundContent={CONTENT_INCLUDED}
limitsSelector={graphLimitsSelector}
zoomStateSelector={graphZoomStateSelector}
disabled={this.props.selectedNodeId}>
{transform => this.renderContent(transform)}
</ZoomableCanvas>
</div>
);
}
}
function mapStateToProps(state) {
return {
selectedNodeId: state.get('selectedNodeId'),
};
}
export default connect(
mapStateToProps,
{ clickBackground }
)(NodesChart);

View File

@@ -1,23 +0,0 @@
import React from 'react';
import classnames from 'classnames';
const NodesError = ({
children, faIconClass, hidden, mainClassName = 'nodes-chart-error'
}) => {
const className = classnames(mainClassName, {
hide: hidden
});
return (
<div className={className}>
<div className="nodes-chart-error-icon-container">
<div className="nodes-chart-error-icon">
<span className={faIconClass} />
</div>
</div>
{children}
</div>
);
};
export default NodesError;

View File

@@ -1,207 +0,0 @@
/* eslint react/jsx-no-bind: "off" */
import React from 'react';
import styled from 'styled-components';
import { connect } from 'react-redux';
import { List as makeList, Map as makeMap } from 'immutable';
import capitalize from 'lodash/capitalize';
import NodeDetailsTable from '../components/node-details/node-details-table';
import { clickNode } from '../actions/request-actions';
import { sortOrderChanged } from '../actions/app-actions';
import { shownNodesSelector } from '../selectors/node-filters';
import { trackAnalyticsEvent } from '../utils/tracking-utils';
import { findTopologyById } from '../utils/topology-utils';
import { TABLE_VIEW_MODE } from '../constants/naming';
import { windowHeightSelector } from '../selectors/canvas';
import { searchNodeMatchesSelector } from '../selectors/search';
import { getNodeColor } from '../utils/color-utils';
const IGNORED_COLUMNS = ['docker_container_ports', 'docker_container_id', 'docker_image_id',
'docker_container_command', 'docker_container_networks'];
const Icon = styled.span`
border-radius: ${props => props.theme.borderRadius.soft};
background-color: ${props => props.color};
margin-top: 3px;
display: block;
height: 12px;
width: 12px;
`;
function topologyLabel(topologies, id) {
const topology = findTopologyById(topologies, id);
if (!topology) {
return capitalize(id);
}
return topology.get('fullName');
}
function getColumns(nodes, topologies) {
const metricColumns = nodes
.toList()
.flatMap((n) => {
const metrics = (n.get('metrics') || makeList())
.filter(m => !m.get('valueEmpty'))
.map(m => makeMap({ dataType: 'number', id: m.get('id'), label: m.get('label') }));
return metrics;
})
.toSet()
.toList()
.sortBy(m => m.get('label'));
const metadataColumns = nodes
.toList()
.flatMap((n) => {
const metadata = (n.get('metadata') || makeList())
.map(m => makeMap({
dataType: m.get('dataType'),
id: m.get('id'),
label: m.get('label')
}));
return metadata;
})
.toSet()
.filter(n => !IGNORED_COLUMNS.includes(n.get('id')))
.toList()
.sortBy(m => m.get('label'));
const relativesColumns = nodes
.toList()
.flatMap((n) => {
const metadata = (n.get('parents') || makeList())
.map(m => makeMap({
id: m.get('topologyId'),
label: topologyLabel(topologies, m.get('topologyId'))
}));
return metadata;
})
.toSet()
.toList()
.sortBy(m => m.get('label'));
return relativesColumns.concat(metadataColumns, metricColumns).toJS();
}
function renderIdCell({
rank, label, labelMinor, pseudo
}) {
const showSubLabel = Boolean(pseudo) && labelMinor;
const title = showSubLabel ? `${label} (${labelMinor})` : label;
return (
<div title={title} className="nodes-grid-id-column">
<div style={{ flex: 'none', width: 16 }}>
<Icon color={getNodeColor(rank, label)} />
</div>
<div className="truncate">
{label}
{' '}
{showSubLabel && <span className="nodes-grid-label-minor">{labelMinor}</span>}
</div>
</div>
);
}
class NodesGrid extends React.Component {
constructor(props, context) {
super(props, context);
this.onClickRow = this.onClickRow.bind(this);
this.onSortChange = this.onSortChange.bind(this);
this.saveTableRef = this.saveTableRef.bind(this);
}
onClickRow(ev, node) {
trackAnalyticsEvent('scope.node.click', {
layout: TABLE_VIEW_MODE,
parentTopologyId: this.props.currentTopology.get('parentId'),
topologyId: this.props.currentTopology.get('id'),
});
this.props.clickNode(node.id, node.label, ev.target.getBoundingClientRect());
}
onSortChange(sortedBy, sortedDesc) {
this.props.sortOrderChanged(sortedBy, sortedDesc);
}
saveTableRef(ref) {
this.tableRef = ref;
}
render() {
const {
nodes, gridSortedBy, gridSortedDesc, searchNodeMatches, searchQuery, windowHeight, topologies
} = this.props;
const height = this.tableRef
? windowHeight - this.tableRef.getBoundingClientRect().top - 30
: 0;
const cmpStyle = {
height,
paddingLeft: 40,
paddingRight: 40,
};
// TODO: What are 24 and 18? Use a comment or extract into constants.
const tbodyHeight = height - 24 - 18;
const className = 'tour-step-anchor scroll-body';
const tbodyStyle = {
height: `${tbodyHeight}px`,
};
const detailsData = {
columns: getColumns(nodes, topologies),
id: '',
label: this.props.currentTopology && this.props.currentTopology.get('fullName'),
nodes: nodes
.toList()
.filter(n => !(searchQuery && searchNodeMatches.get(n.get('id'), makeMap()).isEmpty()))
.toJS()
};
return (
<div className="nodes-grid" ref={this.saveTableRef}>
{nodes.size > 0 && (
<NodeDetailsTable
style={cmpStyle}
className={className}
renderIdCell={renderIdCell}
tbodyStyle={tbodyStyle}
topologyId={this.props.currentTopologyId}
onSortChange={this.onSortChange}
onClickRow={this.onClickRow}
sortedBy={gridSortedBy}
sortedDesc={gridSortedDesc}
selectedNodeId={this.props.selectedNodeId}
limit={1000}
{...detailsData}
/>
)}
</div>
);
}
}
function mapStateToProps(state) {
return {
currentTopology: state.get('currentTopology'),
currentTopologyId: state.get('currentTopologyId'),
gridSortedBy: state.get('gridSortedBy'),
gridSortedDesc: state.get('gridSortedDesc'),
nodes: shownNodesSelector(state),
searchNodeMatches: searchNodeMatchesSelector(state),
searchQuery: state.get('searchQuery'),
selectedNodeId: state.get('selectedNodeId'),
topologies: state.get('topologies'),
windowHeight: windowHeightSelector(state),
};
}
export default connect(
mapStateToProps,
{ clickNode, sortOrderChanged }
)(NodesGrid);

View File

@@ -1,499 +0,0 @@
import dagre from 'dagre';
import debug from 'debug';
import { fromJS, Map as makeMap, Set as ImmSet } from 'immutable';
import pick from 'lodash/pick';
import { NODE_BASE_SIZE, EDGE_WAYPOINTS_CAP } from '../constants/styles';
import { EDGE_ID_SEPARATOR } from '../constants/naming';
import { trackAnalyticsEvent } from '../utils/tracking-utils';
import { featureIsEnabledAny } from '../utils/feature-utils';
import { buildTopologyCacheId, updateNodeDegrees } from '../utils/topology-utils';
import { minEuclideanDistanceBetweenPoints } from '../utils/math-utils';
import { uniformSelect } from '../utils/array-utils';
const log = debug('scope:nodes-layout');
const topologyCaches = {};
export const DEFAULT_MARGINS = { left: 0, top: 0 };
// Pretend the nodes are bigger than they are so that the edges would not enter
// them under a high curvature which would cause arrow heads to be misplaced.
const NODE_SIZE_FACTOR = 1.5 * NODE_BASE_SIZE;
const NODE_SEPARATION_FACTOR = 1 * NODE_BASE_SIZE;
const RANK_SEPARATION_FACTOR = 2 * NODE_BASE_SIZE;
const NODE_CENTERS_SEPARATION_FACTOR = NODE_SIZE_FACTOR + NODE_SEPARATION_FACTOR;
let layoutRuns = 0;
let layoutRunsTrivial = 0;
function graphNodeId(id) {
return id.replace('.', '<DOT>');
}
function fromGraphNodeId(encodedId) {
return encodedId.replace('<DOT>', '.');
}
// Adds some additional waypoints to the edge to make sure the it connects the node
// centers and that the edge enters the target node relatively straight so that the
// arrow is drawn correctly. The total number of waypoints is capped to EDGE_WAYPOINTS_CAP.
function correctedEdgePath(waypoints, source, target) {
// Get the relevant waypoints that will be added/replicated.
const sourcePoint = fromJS({ x: source.get('x'), y: source.get('y') });
const targetPoint = fromJS({ x: target.get('x'), y: target.get('y') });
const entrancePoint = waypoints.last();
if (target !== source) {
// The strategy for the non-loop edges is the following:
// * Uniformly select at most CAP - 4 of the central waypoints ignoring the target node
// entrance point. Such a selection will ensure that both the source node exit point and
// the point before the target node entrance point are taken as boundaries of the interval.
// * Now manually add those 4 points that we always want to have included in the edge path -
// centers of source/target nodes and twice the target node entrance point to ensure the
// edge path actually goes through it and thus doesn't miss the arrow element.
// * In the end, what matters for the arrow is that the last 4 points of the array are always
// fixed regardless of the total number of waypoints. That way we ensure the arrow is drawn
// correctly, but also that the edge path enters the target node smoothly.
waypoints = fromJS(uniformSelect(waypoints.butLast().toJS(), EDGE_WAYPOINTS_CAP - 4));
waypoints = waypoints.unshift(sourcePoint);
waypoints = waypoints.push(entrancePoint);
waypoints = waypoints.push(entrancePoint);
waypoints = waypoints.push(targetPoint);
} else {
// For loops we simply set the endpoints at the center of source/target node to
// make them smoother and, of course, we cap the total number of waypoints.
waypoints = fromJS(uniformSelect(waypoints.toJS(), EDGE_WAYPOINTS_CAP));
waypoints = waypoints.set(0, sourcePoint);
waypoints = waypoints.set(waypoints.size - 1, targetPoint);
}
return waypoints;
}
/**
* Add coordinates to 0-degree nodes using a square layout
* Depending on the previous layout run's graph aspect ratio, the square will be
* placed on the right side or below the graph.
* @param {Object} layout Layout with nodes and edges
* @param {Object} opts Options with node distances
* @return {Object} modified layout
*/
function layoutSingleNodes(layout, opts) {
const result = Object.assign({}, layout);
const options = opts || {};
const margins = options.margins || DEFAULT_MARGINS;
const ranksep = RANK_SEPARATION_FACTOR / 2; // dagre splits it in half
const nodesep = NODE_SEPARATION_FACTOR;
const nodeWidth = NODE_SIZE_FACTOR;
const nodeHeight = NODE_SIZE_FACTOR;
const graphHeight = layout.graphHeight || layout.height;
const graphWidth = layout.graphWidth || layout.width;
const aspectRatio = graphHeight ? graphWidth / graphHeight : 1;
let { nodes } = layout;
// 0-degree nodes
const singleNodes = nodes.filter(node => node.get('degree') === 0);
if (singleNodes.size) {
let offsetX;
let offsetY;
const nonSingleNodes = nodes.filter(node => node.get('degree') !== 0);
if (nonSingleNodes.size > 0) {
if (aspectRatio < 1) {
log('laying out single nodes to the right', aspectRatio);
offsetX = nonSingleNodes.maxBy(node => node.get('x')).get('x');
offsetY = nonSingleNodes.minBy(node => node.get('y')).get('y');
if (offsetX) {
offsetX += nodeWidth + nodesep;
}
} else {
log('laying out single nodes below', aspectRatio);
offsetX = nonSingleNodes.minBy(node => node.get('x')).get('x');
offsetY = nonSingleNodes.maxBy(node => node.get('y')).get('y');
if (offsetY) {
offsetY += nodeHeight + ranksep;
}
}
}
// default margins
offsetX = offsetX || (margins.left + nodeWidth) / 2;
offsetY = offsetY || (margins.top + nodeHeight) / 2;
const columns = Math.ceil(Math.sqrt(singleNodes.size));
let row = 0;
let col = 0;
let singleX;
let singleY;
nodes = nodes.sortBy(node => node.get('rank')).map((node) => {
if (singleNodes.has(node.get('id'))) {
if (col === columns) {
col = 0;
row += 1;
}
singleX = (col * (nodesep + nodeWidth)) + offsetX;
singleY = (row * (ranksep + nodeHeight)) + offsetY;
col += 1;
return node.merge({
x: singleX,
y: singleY
});
}
return node;
});
// adjust layout dimensions if graph is now bigger
result.width = Math.max(layout.width, singleX + (nodeWidth / 2) + nodesep);
result.height = Math.max(layout.height, singleY + (nodeHeight / 2) + ranksep);
result.nodes = nodes;
}
return result;
}
/**
* Layout engine runner
* After the layout engine run nodes and edges have x-y-coordinates. Engine is
* not run if the number of nodes is bigger than `MAX_NODES`.
* @param {Object} graph dagre graph instance
* @param {Map} imNodes new node set
* @param {Map} imEdges new edge set
* @param {Object} opts Options with nodes layout
* @return {Object} Layout with nodes, edges, dimensions
*/
function runLayoutEngine(graph, imNodes, imEdges, opts) {
let nodes = imNodes;
let edges = imEdges;
const ranksep = RANK_SEPARATION_FACTOR;
const nodesep = NODE_SEPARATION_FACTOR;
const nodeWidth = NODE_SIZE_FACTOR;
const nodeHeight = NODE_SIZE_FACTOR;
// configure node margins
graph.setGraph({
nodesep,
ranksep
});
// add nodes to the graph if not already there
nodes.forEach((node) => {
const gNodeId = graphNodeId(node.get('id'));
if (!graph.hasNode(gNodeId)) {
graph.setNode(gNodeId, {
height: nodeHeight,
width: nodeWidth
});
}
});
// remove nodes that are no longer there or are 0-degree nodes
graph.nodes().forEach((gNodeId) => {
const nodeId = fromGraphNodeId(gNodeId);
if (!nodes.has(nodeId) || nodes.get(nodeId).get('degree') === 0) {
graph.removeNode(gNodeId);
}
});
// add edges to the graph if not already there
edges.forEach((edge) => {
const s = graphNodeId(edge.get('source'));
const t = graphNodeId(edge.get('target'));
if (!graph.hasEdge(s, t)) {
const virtualNodes = s === t ? 1 : 0;
graph.setEdge(s, t, {id: edge.get('id'), minlen: virtualNodes});
}
});
// remove edges that are no longer there
graph.edges().forEach((edgeObj) => {
const edge = [fromGraphNodeId(edgeObj.v), fromGraphNodeId(edgeObj.w)];
const edgeId = edge.join(EDGE_ID_SEPARATOR);
if (!edges.has(edgeId)) {
graph.removeEdge(edgeObj.v, edgeObj.w);
}
});
dagre.layout(graph, { debugTiming: false });
// apply coordinates to nodes and edges
graph.nodes().forEach((gNodeId) => {
const graphNode = graph.node(gNodeId);
const nodeId = fromGraphNodeId(gNodeId);
nodes = nodes.setIn([nodeId, 'x'], graphNode.x);
nodes = nodes.setIn([nodeId, 'y'], graphNode.y);
});
graph.edges().forEach((graphEdge) => {
const graphEdgeMeta = graph.edge(graphEdge);
const edge = edges.get(graphEdgeMeta.id);
const source = nodes.get(fromGraphNodeId(edge.get('source')));
const target = nodes.get(fromGraphNodeId(edge.get('target')));
const waypoints = correctedEdgePath(fromJS(graphEdgeMeta.points), source, target);
edges = edges.setIn([graphEdgeMeta.id, 'points'], waypoints);
});
const { width, height } = graph.graph();
let layout = {
edges,
graphHeight: height,
graphWidth: width,
height,
nodes,
width
};
// layout the single nodes
layout = layoutSingleNodes(layout, opts);
// return object with the width and height of layout
return layout;
}
/**
* Adds `points` array to edge based on location of source and target
* @param {Map} edge new edge
* @param {Map} nodeCache all nodes
* @returns {Map} modified edge
*/
function setSimpleEdgePoints(edge, nodeCache) {
const source = nodeCache.get(edge.get('source'));
const target = nodeCache.get(edge.get('target'));
return edge.set('points', fromJS([
{x: source.get('x'), y: source.get('y')},
{x: target.get('x'), y: target.get('y')}
]));
}
/**
* Layout nodes that have rank that already exists.
* Relies on only nodes being added that have a connection to an existing node
* while having a rank of an existing node. They will be laid out in the same
* line as the latter, with a direct connection between the existing and the new node.
* @param {object} layout Layout with nodes and edges
* @param {Map} nodeCache previous nodes
* @param {object} opts Options
* @return {object} new layout object
*/
export function doLayoutNewNodesOfExistingRank(layout, nodeCache) {
const result = Object.assign({}, layout);
const nodesep = NODE_SEPARATION_FACTOR;
const nodeWidth = NODE_SIZE_FACTOR;
// determine new nodes
const oldNodes = ImmSet.fromKeys(nodeCache);
const newNodes = ImmSet.fromKeys(layout.nodes.filter(n => n.get('degree') > 0))
.subtract(oldNodes);
result.nodes = layout.nodes.map((n) => {
if (newNodes.contains(n.get('id'))) {
const nodesSameRank = nodeCache.filter(nn => nn.get('rank') === n.get('rank'));
if (nodesSameRank.size > 0) {
const y = nodesSameRank.first().get('y');
const x = nodesSameRank.maxBy(nn => nn.get('x')).get('x') + nodesep + nodeWidth;
return n.merge({ x, y });
}
return n;
}
return n;
});
result.edges = layout.edges.map((edge) => {
if (!edge.has('points')) {
return setSimpleEdgePoints(edge, layout.nodes);
}
return edge;
});
return result;
}
/**
* Determine if nodes were added between node sets
* @param {Map} nodes new Map of nodes
* @param {Map} cache old Map of nodes
* @return {Boolean} True if nodes had node ids that are not in cache
*/
export function hasUnseenNodes(nodes, cache) {
const hasUnseen = nodes.size > cache.size
|| !ImmSet.fromKeys(nodes).isSubset(ImmSet.fromKeys(cache));
if (hasUnseen) {
log('unseen nodes:', ...ImmSet.fromKeys(nodes).subtract(ImmSet.fromKeys(cache)).toJS());
}
return hasUnseen;
}
/**
* Determine if all new nodes are 0-degree nodes
* Requires cached nodes (implies a previous layout run).
* @param {Map} nodes new Map of nodes
* @param {Map} cache old Map of nodes
* @return {Boolean} True if all new nodes are 0-nodes
*/
function hasNewSingleNode(nodes, cache) {
const oldNodes = ImmSet.fromKeys(cache);
const newNodes = ImmSet.fromKeys(nodes).subtract(oldNodes);
const hasNewSingleNodes = newNodes.every(key => nodes.getIn([key, 'degree']) === 0);
return oldNodes.size > 0 && hasNewSingleNodes;
}
/**
* Determine if all new nodes are of existing ranks
* Requires cached nodes (implies a previous layout run).
* @param {Map} nodes new Map of nodes
* @param {Map} edges new Map of edges
* @param {Map} cache old Map of nodes
* @return {Boolean} True if all new nodes have a rank that already exists
*/
export function hasNewNodesOfExistingRank(nodes, edges, cache) {
const oldNodes = ImmSet.fromKeys(cache);
const newNodes = ImmSet.fromKeys(nodes).subtract(oldNodes);
// if new there are edges that connect 2 new nodes, need a full layout
const bothNodesNew = edges.find(edge => newNodes.contains(edge.get('source'))
&& newNodes.contains(edge.get('target')));
if (bothNodesNew) {
return false;
}
const oldRanks = cache.filter(n => n.get('rank')).map(n => n.get('rank')).toSet();
const hasNewNodesOfExistingRankOrSingle = newNodes.every(key => nodes.getIn([key, 'degree']) === 0
|| oldRanks.contains(nodes.getIn([key, 'rank'])));
return oldNodes.size > 0 && hasNewNodesOfExistingRankOrSingle;
}
/**
* Determine if edge has same endpoints in new nodes as well as in the nodeCache
* @param {Map} edge Edge with source and target
* @param {Map} nodes new node set
* @return {Boolean} True if old and new endpoints have same coordinates
*/
function hasSameEndpoints(cachedEdge, nodes) {
const oldPoints = cachedEdge.get('points');
const oldSourcePoint = oldPoints.first();
const oldTargetPoint = oldPoints.last();
const newSource = nodes.get(cachedEdge.get('source'));
const newTarget = nodes.get(cachedEdge.get('target'));
return (oldSourcePoint && oldTargetPoint && newSource && newTarget
&& oldSourcePoint.get('x') === newSource.get('x')
&& oldSourcePoint.get('y') === newSource.get('y')
&& oldTargetPoint.get('x') === newTarget.get('x')
&& oldTargetPoint.get('y') === newTarget.get('y'));
}
/**
* Clones a previous layout
* @param {Object} layout Layout object
* @param {Map} nodes new nodes
* @param {Map} edges new edges
* @return {Object} layout clone
*/
function cloneLayout(layout, nodes, edges) {
const clone = Object.assign({}, layout, {edges, nodes});
return clone;
}
/**
* Copies node properties from previous layout runs to new nodes.
* This assumes the cache has data for all new nodes.
* @param {Object} layout Layout
* @param {Object} nodeCache cache of all old nodes
* @param {Object} edgeCache cache of all old edges
* @return {Object} modified layout
*/
function copyLayoutProperties(layout, nodeCache, edgeCache) {
const result = Object.assign({}, layout);
result.nodes = layout.nodes.map(node => (nodeCache.has(node.get('id'))
? node.merge(nodeCache.get(node.get('id'))) : node));
result.edges = layout.edges.map((edge) => {
if (edgeCache.has(edge.get('id'))
&& hasSameEndpoints(edgeCache.get(edge.get('id')), result.nodes)) {
return edge.merge(edgeCache.get(edge.get('id')));
} if (nodeCache.get(edge.get('source')) && nodeCache.get(edge.get('target'))) {
return setSimpleEdgePoints(edge, nodeCache);
}
return edge;
});
return result;
}
/**
* Layout of nodes and edges
* If a previous layout was given and not too much changed, the previous layout
* is changed and returned. Otherwise does a new layout engine run.
* @param {Map} immNodes All nodes
* @param {Map} immEdges All edges
* @param {object} opts width, height, margins, etc...
* @return {object} graph object with nodes, edges, dimensions
*/
export function doLayout(immNodes, immEdges, opts) {
const options = opts || {};
const cacheId = buildTopologyCacheId(options.topologyId, options.topologyOptions);
// one engine and node and edge caches per topology, to keep renderings similar
if (options.noCache || !topologyCaches[cacheId]) {
topologyCaches[cacheId] = {
edgeCache: makeMap(),
graph: new dagre.graphlib.Graph({}),
nodeCache: makeMap()
};
}
const cache = topologyCaches[cacheId];
const cachedLayout = options.cachedLayout || cache.cachedLayout;
const nodeCache = options.nodeCache || cache.nodeCache;
const edgeCache = options.edgeCache || cache.edgeCache;
const useCache = !options.forceRelayout && cachedLayout && nodeCache && edgeCache;
const nodesWithDegrees = updateNodeDegrees(immNodes, immEdges);
let layout;
layoutRuns += 1;
if (useCache && !hasUnseenNodes(immNodes, nodeCache)) {
layoutRunsTrivial += 1;
// trivial case: no new nodes have been added
log('skip layout, trivial adjustment', layoutRunsTrivial, layoutRuns);
layout = cloneLayout(cachedLayout, immNodes, immEdges);
layout = copyLayoutProperties(layout, nodeCache, edgeCache);
} else if (useCache
&& featureIsEnabledAny('layout-dance', 'layout-dance-single')
&& hasNewSingleNode(nodesWithDegrees, nodeCache)) {
// special case: new nodes are 0-degree nodes, no need for layout run,
// they will be laid out further below
log('skip layout, only 0-degree node(s) added');
layout = cloneLayout(cachedLayout, nodesWithDegrees, immEdges);
layout = copyLayoutProperties(layout, nodeCache, edgeCache);
layout = layoutSingleNodes(layout, opts);
} else if (useCache
&& featureIsEnabledAny('layout-dance', 'layout-dance-rank')
&& hasNewNodesOfExistingRank(nodesWithDegrees, immEdges, nodeCache)) {
// special case: few new nodes were added, no need for layout run,
// they will inserted according to ranks
log('skip layout, used rank-based insertion');
layout = cloneLayout(cachedLayout, nodesWithDegrees, immEdges);
layout = copyLayoutProperties(layout, nodeCache, edgeCache);
layout = doLayoutNewNodesOfExistingRank(layout, nodeCache);
layout = layoutSingleNodes(layout, opts);
} else {
// default case: the new layout is too different and refreshing is required
layout = runLayoutEngine(cache.graph, nodesWithDegrees, immEdges, opts);
}
if (layout) {
// Last line of defense - re-render everything if two nodes are too close to one another.
if (minEuclideanDistanceBetweenPoints(layout.nodes) < NODE_CENTERS_SEPARATION_FACTOR) {
layout = runLayoutEngine(cache.graph, nodesWithDegrees, immEdges, opts);
trackAnalyticsEvent('scope.layout.graph.overlap');
}
// cache results
cache.cachedLayout = layout;
// only cache layout-related properties
// NB: These properties must be immutable wrt a given node because properties of updated nodes
// will be overwritten with the cached values, see copyLayoutProperties()
cache.nodeCache = cache.nodeCache.merge(layout.nodes.map(n => fromJS(pick(n.toJS(), ['x', 'y', 'rank']))));
cache.edgeCache = cache.edgeCache.merge(layout.edges);
}
return layout;
}

View File

@@ -1 +0,0 @@
module.exports = require('./components/app').default;

View File

@@ -1,51 +0,0 @@
import React from 'react';
import Immutable from 'immutable';
import TestUtils from 'react-dom/test-utils';
import { Provider } from 'react-redux';
import configureStore from '../../stores/configureStore';
// need ES5 require to keep automocking off
const NodeDetails = require('../node-details.js').default.WrappedComponent;
describe('NodeDetails', () => {
let nodes;
let nodeId;
let details;
const makeMap = Immutable.OrderedMap;
beforeEach(() => {
nodes = makeMap();
nodeId = 'n1';
});
it('shows n/a when node was not found', () => {
const c = TestUtils.renderIntoDocument((
<Provider store={configureStore()}>
<NodeDetails notFound />
</Provider>
));
const notFound = TestUtils.findRenderedDOMComponentWithClass(
c,
'node-details-header-notavailable'
);
expect(notFound).toBeDefined();
});
it('show label of node with title', () => {
nodes = nodes.set(nodeId, Immutable.fromJS({id: nodeId}));
details = {label: 'Node 1'};
const c = TestUtils.renderIntoDocument((
<Provider store={configureStore()}>
<NodeDetails
nodes={nodes}
topologyId="containers"
nodeId={nodeId}
details={details}
/>
</Provider>
));
const title = TestUtils.findRenderedDOMComponentWithClass(c, 'node-details-header-label');
expect(title.title).toBe('Node 1');
});
});

View File

@@ -1,305 +0,0 @@
import debug from 'debug';
import React from 'react';
import PropTypes from 'prop-types';
import classNames from 'classnames';
import { connect } from 'react-redux';
import { debounce, isEqual } from 'lodash';
import { ThemeProvider } from 'styled-components';
import commonTheme from 'weaveworks-ui-components/lib/theme';
import GlobalStyle from './global-style';
import Logo from './logo';
import Footer from './footer';
import Sidebar from './sidebar';
import HelpPanel from './help-panel';
import TroubleshootingMenu from './troubleshooting-menu';
import Search from './search';
import Status from './status';
import Topologies from './topologies';
import TopologyOptions from './topology-options';
import Overlay from './overlay';
import {
pinNextMetric,
pinPreviousMetric,
hitEsc,
unpinMetric,
toggleHelp,
setGraphView,
setMonitorState,
setTableView,
setStoreViewState,
setViewportDimensions,
} from '../actions/app-actions';
import {
focusSearch,
getApiDetails,
setResourceView,
getTopologiesWithInitialPoll,
shutdown,
} from '../actions/request-actions';
import Details from './details';
import Nodes from './nodes';
import TimeControl from './time-control';
import TimeTravelWrapper from './time-travel-wrapper';
import ViewModeSelector from './view-mode-selector';
import NetworkSelector from './networks-selector';
import DebugToolbar, { showingDebugToolbar, toggleDebugToolbar } from './debug-toolbar';
import { getUrlState } from '../utils/router-utils';
import { getRouter } from '../router';
import { trackAnalyticsEvent } from '../utils/tracking-utils';
import { availableNetworksSelector } from '../selectors/node-networks';
import { timeTravelSupportedSelector } from '../selectors/time-travel';
import {
isResourceViewModeSelector,
isTableViewModeSelector,
isGraphViewModeSelector,
} from '../selectors/topology';
import defaultTheme from '../themes/default';
import contrastTheme from '../themes/contrast';
import { VIEWPORT_RESIZE_DEBOUNCE_INTERVAL } from '../constants/timer';
import {
ESC_KEY_CODE,
} from '../constants/key-codes';
const keyPressLog = debug('scope:app-key-press');
class App extends React.Component {
constructor(props, context) {
super(props, context);
this.props.dispatch(setMonitorState(this.props.monitor));
this.props.dispatch(setStoreViewState(!this.props.disableStoreViewState));
this.setViewportDimensions = this.setViewportDimensions.bind(this);
this.handleResize = debounce(this.setViewportDimensions, VIEWPORT_RESIZE_DEBOUNCE_INTERVAL);
this.handleRouteChange = debounce(props.onRouteChange, 50);
this.saveAppRef = this.saveAppRef.bind(this);
this.onKeyPress = this.onKeyPress.bind(this);
this.onKeyUp = this.onKeyUp.bind(this);
}
componentDidMount() {
this.setViewportDimensions();
window.addEventListener('resize', this.handleResize);
window.addEventListener('keypress', this.onKeyPress);
window.addEventListener('keyup', this.onKeyUp);
this.router = this.props.dispatch(getRouter(this.props.urlState));
this.router.start({ hashbang: true });
if (!this.props.routeSet || process.env.WEAVE_CLOUD) {
// dont request topologies when already done via router.
// If running as a component, always request topologies when the app mounts.
this.props.dispatch(getTopologiesWithInitialPoll());
}
getApiDetails(this.props.dispatch);
}
componentWillUnmount() {
window.removeEventListener('resize', this.handleResize);
window.removeEventListener('keypress', this.onKeyPress);
window.removeEventListener('keyup', this.onKeyUp);
this.props.dispatch(shutdown());
this.router.stop();
}
componentWillReceiveProps(nextProps) {
if (nextProps.monitor !== this.props.monitor) {
this.props.dispatch(setMonitorState(nextProps.monitor));
}
if (nextProps.disableStoreViewState !== this.props.disableStoreViewState) {
this.props.dispatch(setStoreViewState(!nextProps.disableStoreViewState));
}
// Debounce-notify about the route change if the URL state changes its content.
if (!isEqual(nextProps.urlState, this.props.urlState)) {
this.handleRouteChange(nextProps.urlState);
}
}
onKeyUp(ev) {
const { showingTerminal } = this.props;
keyPressLog('onKeyUp', 'keyCode', ev.keyCode, ev);
// don't get esc in onKeyPress
if (ev.keyCode === ESC_KEY_CODE) {
this.props.dispatch(hitEsc());
} else if (ev.code === 'KeyD' && ev.ctrlKey && !showingTerminal) {
toggleDebugToolbar();
this.forceUpdate();
}
}
onKeyPress(ev) {
const { dispatch, searchFocused, showingTerminal } = this.props;
//
// keyup gives 'key'
// keypress gives 'char'
// Distinction is important for international keyboard layouts where there
// is often a different {key: char} mapping.
if (!searchFocused && !showingTerminal) {
keyPressLog('onKeyPress', 'keyCode', ev.keyCode, ev);
const char = String.fromCharCode(ev.charCode);
if (char === '<') {
dispatch(pinPreviousMetric());
this.trackEvent('scope.metric.selector.pin.previous.keypress', {
metricType: this.props.pinnedMetricType
});
} else if (char === '>') {
dispatch(pinNextMetric());
this.trackEvent('scope.metric.selector.pin.next.keypress', {
metricType: this.props.pinnedMetricType
});
} else if (char === 'g') {
dispatch(setGraphView());
this.trackEvent('scope.layout.selector.keypress');
} else if (char === 't') {
dispatch(setTableView());
this.trackEvent('scope.layout.selector.keypress');
} else if (char === 'r') {
dispatch(setResourceView());
this.trackEvent('scope.layout.selector.keypress');
} else if (char === 'q') {
this.trackEvent('scope.metric.selector.unpin.keypress', {
metricType: this.props.pinnedMetricType
});
dispatch(unpinMetric());
} else if (char === '/') {
ev.preventDefault();
dispatch(focusSearch());
} else if (char === '?') {
dispatch(toggleHelp());
}
}
}
trackEvent(eventName, additionalProps = {}) {
trackAnalyticsEvent(eventName, {
layout: this.props.topologyViewMode,
parentTopologyId: this.props.currentTopology.get('parentId'),
topologyId: this.props.currentTopology.get('id'),
...additionalProps,
});
}
setViewportDimensions() {
if (this.appRef) {
const { width, height } = this.appRef.getBoundingClientRect();
this.props.dispatch(setViewportDimensions(width, height));
}
}
saveAppRef(ref) {
this.appRef = ref;
}
render() {
const {
isTableViewMode, isGraphViewMode, isResourceViewMode, showingDetails,
showingHelp, showingNetworkSelector, showingTroubleshootingMenu,
timeTravelTransitioning, timeTravelSupported, contrastMode,
} = this.props;
const className = classNames('scope-app', {
'contrast-mode': contrastMode,
'time-travel-open': timeTravelSupported,
});
const isIframe = window !== window.top;
return (
<ThemeProvider theme={{...commonTheme, scope: contrastMode ? contrastTheme : defaultTheme }}>
<>
<GlobalStyle />
<div className={className} ref={this.saveAppRef}>
{showingDebugToolbar() && <DebugToolbar />}
{showingHelp && <HelpPanel />}
{showingTroubleshootingMenu && <TroubleshootingMenu />}
{showingDetails && (
<Details
renderNodeDetailsExtras={this.props.renderNodeDetailsExtras}
/>
)}
<div className="header">
{timeTravelSupported && this.props.renderTimeTravel()}
<div className="selectors">
<div className="logo">
{!isIframe
&& (
<svg width="100%" height="100%" viewBox="0 0 1089 217">
<Logo />
</svg>
)
}
</div>
<Search />
<Topologies />
<ViewModeSelector />
<TimeControl />
</div>
</div>
<Nodes />
<Sidebar classNames={isTableViewMode ? 'sidebar-gridmode' : ''}>
{showingNetworkSelector && isGraphViewMode && <NetworkSelector />}
{!isResourceViewMode && <Status />}
{!isResourceViewMode && <TopologyOptions />}
</Sidebar>
<Footer />
<Overlay faded={timeTravelTransitioning} />
</div>
</>
</ThemeProvider>
);
}
}
function mapStateToProps(state) {
return {
contrastMode: state.get('contrastMode'),
currentTopology: state.get('currentTopology'),
isGraphViewMode: isGraphViewModeSelector(state),
isResourceViewMode: isResourceViewModeSelector(state),
isTableViewMode: isTableViewModeSelector(state),
pinnedMetricType: state.get('pinnedMetricType'),
routeSet: state.get('routeSet'),
searchFocused: state.get('searchFocused'),
searchQuery: state.get('searchQuery'),
showingDetails: state.get('nodeDetails').size > 0,
showingHelp: state.get('showingHelp'),
showingNetworkSelector: availableNetworksSelector(state).count() > 0,
showingTerminal: state.get('controlPipes').size > 0,
showingTroubleshootingMenu: state.get('showingTroubleshootingMenu'),
timeTravelSupported: timeTravelSupportedSelector(state),
timeTravelTransitioning: state.get('timeTravelTransitioning'),
topologyViewMode: state.get('topologyViewMode'),
urlState: getUrlState(state)
};
}
App.propTypes = {
disableStoreViewState: PropTypes.bool,
monitor: PropTypes.bool,
onRouteChange: PropTypes.func,
renderNodeDetailsExtras: PropTypes.func,
renderTimeTravel: PropTypes.func,
};
App.defaultProps = {
disableStoreViewState: false,
monitor: false,
onRouteChange: () => null,
renderNodeDetailsExtras: () => null,
renderTimeTravel: () => <TimeTravelWrapper />,
};
export default connect(mapStateToProps)(App);

View File

@@ -1,43 +0,0 @@
import React from 'react';
import PropTypes from 'prop-types';
import { connect } from 'react-redux';
class CloudFeature extends React.Component {
getChildContext() {
return {
store: this.context.serviceStore || this.context.store
};
}
render() {
if (process.env.WEAVE_CLOUD) {
return React.cloneElement(React.Children.only(this.props.children), {
params: this.context.router.params,
router: this.context.router
});
}
// also show if not in weave cloud?
if (this.props.alwaysShow) {
return React.cloneElement(React.Children.only(this.props.children));
}
return null;
}
}
/* eslint-disable react/forbid-prop-types */
// TODO: Remove this component as part of https://github.com/weaveworks/scope/issues/3278.
CloudFeature.contextTypes = {
router: PropTypes.object,
serviceStore: PropTypes.object,
store: PropTypes.object.isRequired
};
CloudFeature.childContextTypes = {
router: PropTypes.object,
store: PropTypes.object
};
/* eslint-enable react/forbid-prop-types */
export default connect()(CloudFeature);

View File

@@ -1,72 +0,0 @@
import React from 'react';
import { connect } from 'react-redux';
import filterInvalidDOMProps from 'filter-invalid-dom-props';
import CloudFeature from './cloud-feature';
/**
* CloudLink provides an anchor that allows to set a target
* that is comprised of Weave Cloud related pieces.
*
* We support here relative links with a leading `/` that rewrite
* the browser url as well as cloud-related placeholders (:instanceId).
*
* If no `url` is given, only the children is rendered (no anchor).
*
* If you want to render the content even if not on the cloud, set
* the `alwaysShow` property. A location redirect will be made for
* clicks instead.
*/
const CloudLink = ({ alwaysShow, ...props }) => (
<CloudFeature alwaysShow={alwaysShow}>
<LinkWrapper {...props} />
</CloudFeature>
);
class LinkWrapper extends React.Component {
constructor(props, context) {
super(props, context);
this.handleClick = this.handleClick.bind(this);
this.buildHref = this.buildHref.bind(this);
}
handleClick(ev, href) {
ev.preventDefault();
if (!href) return;
const { router, onClick } = this.props;
if (onClick) {
onClick();
}
if (router && href[0] === '/') {
router.push(href);
} else {
window.location.href = href;
}
}
buildHref(url) {
const { params } = this.props;
if (!url || !params || !params.instanceId) return url;
return url.replace(/:instanceid/gi, encodeURIComponent(params.instanceId));
}
render() {
const { url, children, ...props } = this.props;
if (!url) {
return React.isValidElement(children) ? children : (<span>{children}</span>);
}
const href = this.buildHref(url);
return (
<a {...filterInvalidDOMProps(props)} href={href} onClick={e => this.handleClick(e, href)}>
{children}
</a>
);
}
}
export default connect()(CloudLink);

View File

@@ -1,382 +0,0 @@
/* eslint react/jsx-no-bind: "off" */
import React from 'react';
import { connect } from 'react-redux';
import {
sampleSize, sample, random, range, flattenDeep, times
} from 'lodash';
import { fromJS, Set as makeSet } from 'immutable';
import { hsl } from 'd3-color';
import debug from 'debug';
import ActionTypes from '../constants/action-types';
import { receiveNodesDelta } from '../actions/app-actions';
import { getNodeColor, getNodeColorDark, text2degree } from '../utils/color-utils';
import { availableMetricsSelector } from '../selectors/node-metric';
const SHAPES = ['square', 'hexagon', 'heptagon', 'circle'];
const STACK_VARIANTS = [false, true];
const METRIC_FILLS = [0, 0.1, 50, 99.9, 100];
const NETWORKS = [
'be', 'fe', 'zb', 'db', 're', 'gh', 'jk', 'lol', 'nw'
].map(n => ({ colorKey: n, id: n, label: n }));
const INTERNET = 'the-internet';
const LOREM = `Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor
incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in
voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non
proident, sunt in culpa qui officia deserunt mollit anim id est laborum.`;
const sampleArray = (collection, n = 4) => sampleSize(collection, random(n));
const log = debug('scope:debug-panel');
const shapeTypes = {
circle: ['Host', 'Hosts'],
heptagon: ['Pod', 'Pods'],
hexagon: ['Container', 'Containers'],
square: ['Process', 'Processes']
};
const LABEL_PREFIXES = range('A'.charCodeAt(), 'Z'.charCodeAt() + 1)
.map(n => String.fromCharCode(n));
const deltaAdd = (name, adjacency = [], shape = 'circle', stack = false, networks = NETWORKS) => ({
adjacency,
controls: {},
id: name,
label: name,
labelMinor: name,
latest: {},
networks,
origins: [],
rank: name,
shape,
stack
});
function addMetrics(availableMetrics, node, v) {
const metrics = availableMetrics.size > 0 ? availableMetrics : fromJS([
{ id: 'host_cpu_usage_percent', label: 'CPU' }
]);
return Object.assign({}, node, {
metrics: metrics.map(m => Object.assign({}, m, {
id: 'zing', label: 'zing', max: 100, value: v
})).toJS()
});
}
function label(shape, stacked) {
const type = shapeTypes[shape];
return stacked ? `Group of ${type[1]}` : type[0];
}
function addAllVariants(dispatch) {
const newNodes = flattenDeep(STACK_VARIANTS.map(stack => (SHAPES.map((s) => {
if (!stack) return [deltaAdd(label(s, stack), [], s, stack)];
return times(3).map(() => deltaAdd(label(s, stack), [], s, stack));
}))));
dispatch(receiveNodesDelta({
add: newNodes
}));
}
function addAllMetricVariants(availableMetrics) {
const newNodes = flattenDeep(METRIC_FILLS.map((v, i) => (
SHAPES.map(s => [addMetrics(availableMetrics, deltaAdd(label(s) + i, [], s), v)])
)));
return (dispatch) => {
dispatch(receiveNodesDelta({
add: newNodes
}));
};
}
export function showingDebugToolbar() {
return (('debugToolbar' in localStorage && JSON.parse(localStorage.debugToolbar))
|| window.location.pathname.indexOf('debug') > -1);
}
export function toggleDebugToolbar() {
if ('debugToolbar' in localStorage) {
localStorage.debugToolbar = !showingDebugToolbar();
}
}
function enableLog(ns) {
debug.enable(`scope:${ns}`);
window.location.reload();
}
function disableLog() {
debug.disable();
window.location.reload();
}
function setAppState(fn) {
return (dispatch) => {
dispatch({
fn,
type: ActionTypes.DEBUG_TOOLBAR_INTERFERING
});
};
}
class DebugToolbar extends React.Component {
constructor(props, context) {
super(props, context);
this.onChange = this.onChange.bind(this);
this.toggleColors = this.toggleColors.bind(this);
this.addNodes = this.addNodes.bind(this);
this.intermittentTimer = null;
this.intermittentNodes = makeSet();
this.shortLivedTimer = null;
this.shortLivedNodes = makeSet();
this.state = {
nodesToAdd: 30,
showColors: false
};
}
onChange(ev) {
this.setState({ nodesToAdd: parseInt(ev.target.value, 10) });
}
toggleColors() {
this.setState(prevState => ({
showColors: !prevState.showColors
}));
}
asyncDispatch(v) {
setTimeout(() => this.props.dispatch(v), 0);
}
setLoading(loading) {
this.asyncDispatch(setAppState(state => state.set('topologiesLoaded', !loading)));
}
setIntermittent() {
// simulate epheremal nodes
if (this.intermittentTimer) {
clearInterval(this.intermittentTimer);
this.intermittentTimer = null;
} else {
this.intermittentTimer = setInterval(() => {
// add new node
this.addNodes(1);
// remove random node
const ns = this.props.nodes;
const nodeNames = ns.keySeq().toJS();
const randomNode = sample(nodeNames);
this.asyncDispatch(receiveNodesDelta({
remove: [randomNode]
}));
}, 1000);
}
}
setShortLived() {
// simulate nodes with same ID popping in and out
if (this.shortLivedTimer) {
clearInterval(this.shortLivedTimer);
this.shortLivedTimer = null;
} else {
this.shortLivedTimer = setInterval(() => {
// filter random node
const ns = this.props.nodes;
const nodeNames = ns.keySeq().toJS();
const randomNode = sample(nodeNames);
if (randomNode) {
let nextNodes = ns.setIn([randomNode, 'filtered'], true);
this.shortLivedNodes = this.shortLivedNodes.add(randomNode);
// bring nodes back after a bit
if (this.shortLivedNodes.size > 5) {
const returningNode = this.shortLivedNodes.first();
this.shortLivedNodes = this.shortLivedNodes.rest();
nextNodes = nextNodes.setIn([returningNode, 'filtered'], false);
}
this.asyncDispatch(setAppState(state => state.set('nodes', nextNodes)));
}
}, 1000);
}
}
updateAdjacencies() {
const ns = this.props.nodes;
const nodeNames = ns.keySeq().toJS();
this.asyncDispatch(receiveNodesDelta({
add: this.createRandomNodes(7),
remove: this.randomExistingNode(),
update: sampleArray(nodeNames).map(n => ({
adjacency: sampleArray(nodeNames),
id: n,
}), nodeNames.length),
}));
}
createRandomNodes(n, prefix = 'zing') {
const ns = this.props.nodes;
const nodeNames = ns.keySeq().toJS();
const newNodeNames = range(ns.size, ns.size + n).map(i => (
// `${randomLetter()}${randomLetter()}-zing`
`${prefix}${i}`
));
const allNodes = nodeNames.concat(newNodeNames);
return newNodeNames.map(name => deltaAdd(
name,
sampleArray(allNodes),
sample(SHAPES),
sample(STACK_VARIANTS),
sampleArray(NETWORKS, 10)
));
}
addInternetNode() {
setTimeout(() => {
this.asyncDispatch(receiveNodesDelta({
add: [{
id: INTERNET, label: INTERNET, labelMinor: 'Outgoing packets', pseudo: true, shape: 'cloud'
}]
}));
}, 0);
}
addNodes(n, prefix = 'zing') {
setTimeout(() => {
this.asyncDispatch(receiveNodesDelta({
add: this.createRandomNodes(n, prefix)
}));
log('added nodes', n);
}, 0);
}
randomExistingNode() {
const ns = this.props.nodes;
const nodeNames = ns.keySeq().toJS();
return [nodeNames[random(nodeNames.length - 1)]];
}
removeNode() {
this.asyncDispatch(receiveNodesDelta({
remove: this.randomExistingNode()
}));
}
render() {
const { availableMetrics } = this.props;
return (
<div className="debug-panel">
<div>
<strong>Add nodes </strong>
<button type="button" onClick={() => this.addNodes(1)}>+1</button>
<button type="button" onClick={() => this.addNodes(10)}>+10</button>
<input type="number" onChange={this.onChange} value={this.state.nodesToAdd} />
<button type="button" onClick={() => this.addNodes(this.state.nodesToAdd)}>+</button>
<button type="button" onClick={() => this.asyncDispatch(addAllVariants)}>
Variants
</button>
<button
type="button"
onClick={() => this.asyncDispatch(addAllMetricVariants(availableMetrics))}>
Metric Variants
</button>
<button type="button" onClick={() => this.addNodes(1, LOREM)}>Long name</button>
<button type="button" onClick={() => this.addInternetNode()}>Internet</button>
<button type="button" onClick={() => this.removeNode()}>Remove node</button>
<button type="button" onClick={() => this.updateAdjacencies()}>Update adj.</button>
</div>
<div>
<strong>Logging </strong>
<button type="button" onClick={() => enableLog('*')}>scope:*</button>
<button type="button" onClick={() => enableLog('dispatcher')}>scope:dispatcher</button>
<button type="button" onClick={() => enableLog('app-key-press')}>
scope:app-key-press
</button>
<button type="button" onClick={() => enableLog('terminal')}>scope:terminal</button>
<button type="button" onClick={() => disableLog()}>Disable log</button>
</div>
<div>
<strong>Colors </strong>
<button type="button" onClick={this.toggleColors}>toggle</button>
</div>
{this.state.showColors
&& (
<table>
<tbody>
{LABEL_PREFIXES.map(r => (
<tr key={r}>
<td
title={`${r}`}
style={{ backgroundColor: hsl(text2degree(r), 0.5, 0.5).toString() }} />
</tr>
))}
</tbody>
</table>
)}
{this.state.showColors && [getNodeColor, getNodeColorDark].map(fn => (
<table key={fn}>
<tbody>
{LABEL_PREFIXES.map(r => (
<tr key={r}>
{LABEL_PREFIXES.map(c => (
<td key={c} title={`(${r}, ${c})`} style={{ backgroundColor: fn(r, c) }} />
))}
</tr>
))}
</tbody>
</table>
))}
<div>
<strong>State </strong>
<button type="button" onClick={() => this.setLoading(true)}>
Set doing initial load
</button>
<button type="button" onClick={() => this.setLoading(false)}>Stop</button>
</div>
<div>
<strong>Short-lived nodes </strong>
<button type="button" onClick={() => this.setShortLived()}>
Toggle short-lived nodes
</button>
<button type="button" onClick={() => this.setIntermittent()}>
Toggle intermittent nodes
</button>
</div>
</div>
);
}
}
function mapStateToProps(state) {
return {
availableMetrics: availableMetricsSelector(state),
nodes: state.get('nodes'),
};
}
export default connect(mapStateToProps)(DebugToolbar);

View File

@@ -1,80 +0,0 @@
import React from 'react';
import { connect } from 'react-redux';
import NodeDetails from './node-details';
import EmbeddedTerminal from './embedded-terminal';
import {
DETAILS_PANEL_WIDTH as WIDTH,
DETAILS_PANEL_OFFSET as OFFSET,
DETAILS_PANEL_MARGINS as MARGINS
} from '../constants/styles';
class DetailsCard extends React.Component {
constructor(props, context) {
super(props, context);
this.state = {
mounted: null
};
}
componentDidMount() {
setTimeout(() => {
this.setState({mounted: true});
});
}
render() {
let transform;
const { origin, showingTerminal } = this.props;
const panelHeight = window.innerHeight - MARGINS.bottom - MARGINS.top;
if (origin && !this.state.mounted) {
// render small panel near origin, will transition into normal panel after being mounted
const scaleY = origin.height / (window.innerHeight - MARGINS.bottom - MARGINS.top) / 2;
const scaleX = origin.width / WIDTH / 2;
const centerX = window.innerWidth - MARGINS.right - (WIDTH / 2);
const centerY = (panelHeight / 2) + MARGINS.top;
const dx = (origin.left + (origin.width / 2)) - centerX;
const dy = (origin.top + (origin.height / 2)) - centerY;
transform = `translate(${dx}px, ${dy}px) scale(${scaleX},${scaleY})`;
} else {
// stack effect: shift top cards to the left, shrink lower cards vertically
const shiftX = -1 * this.props.index * OFFSET;
const position = this.props.cardCount - this.props.index - 1; // reverse index
const scaleY = (position === 0) ? 1 : (panelHeight - (2 * OFFSET * position)) / panelHeight;
if (scaleY !== 1) {
transform = `translateX(${shiftX}px) scaleY(${scaleY})`;
} else {
// scale(1) is sometimes blurry
transform = `translateX(${shiftX}px)`;
}
}
const style = {
left: showingTerminal ? MARGINS.right : null,
transform,
width: showingTerminal ? null : WIDTH
};
return (
<div className="details-wrapper" style={style}>
{showingTerminal && <EmbeddedTerminal />}
<NodeDetails
key={this.props.id}
nodeId={this.props.id}
mounted={this.state.mounted}
renderNodeDetailsExtras={this.props.renderNodeDetailsExtras}
{...this.props}
/>
</div>
);
}
}
function mapStateToProps(state, props) {
const pipe = state.get('controlPipes').last();
return {
showingTerminal: pipe && pipe.get('nodeId') === props.id,
};
}
export default connect(mapStateToProps)(DetailsCard);

View File

@@ -1,34 +0,0 @@
import React from 'react';
import { connect } from 'react-redux';
import DetailsCard from './details-card';
class Details extends React.Component {
render() {
const { controlStatus, details } = this.props;
// render all details as cards, later cards go on top
return (
<div className="details">
{details.toIndexedSeq().map((obj, index) => (
<DetailsCard
key={obj.id}
index={index}
cardCount={details.size}
nodeControlStatus={controlStatus.get(obj.id)}
renderNodeDetailsExtras={this.props.renderNodeDetailsExtras}
{...obj}
/>
))}
</div>
);
}
}
function mapStateToProps(state) {
return {
controlStatus: state.get('controlStatus'),
details: state.get('nodeDetails')
};
}
export default connect(mapStateToProps)(Details);

View File

@@ -1,80 +0,0 @@
import React from 'react';
import { connect } from 'react-redux';
import { brightenColor, getNodeColorDark } from '../utils/color-utils';
import { DETAILS_PANEL_WIDTH, DETAILS_PANEL_MARGINS } from '../constants/styles';
import Terminal from './terminal';
class EmeddedTerminal extends React.Component {
constructor(props, context) {
super(props, context);
this.state = {
animated: null,
mounted: null,
};
this.handleTransitionEnd = this.handleTransitionEnd.bind(this);
}
componentDidMount() {
this.mountedTimeout = setTimeout(() => {
this.setState({mounted: true});
});
this.animationTimeout = setTimeout(() => {
this.setState({ animated: true });
}, 2000);
}
componentWillUnmount() {
clearTimeout(this.mountedTimeout);
clearTimeout(this.animationTimeout);
}
getTransform() {
const dx = this.state.mounted ? 0
: window.innerWidth - DETAILS_PANEL_WIDTH - DETAILS_PANEL_MARGINS.right;
return `translateX(${dx}px)`;
}
handleTransitionEnd() {
this.setState({ animated: true });
}
render() {
const { pipe, details } = this.props;
const nodeId = pipe.get('nodeId');
const node = details.get(nodeId);
const d = node && node.details;
const titleBarColor = d && getNodeColorDark(d.rank, d.label, d.pseudo);
const statusBarColor = d && brightenColor(titleBarColor);
const title = d && d.label;
// React unmount/remounts when key changes, this is important for cleaning up
// the term.js and creating a new one for the new pipe.
return (
<div className="tour-step-anchor terminal-embedded">
<div
onTransitionEnd={this.handleTransitionEnd}
className="terminal-animation-wrapper"
style={{transform: this.getTransform()}}>
<Terminal
key={pipe.get('id')}
pipe={pipe}
connect={this.state.animated}
titleBarColor={titleBarColor}
statusBarColor={statusBarColor}
title={title} />
</div>
</div>
);
}
}
function mapStateToProps(state) {
return {
details: state.get('nodeDetails'),
pipe: state.get('controlPipes').last()
};
}
export default connect(mapStateToProps)(EmeddedTerminal);

View File

@@ -1,126 +0,0 @@
import React from 'react';
import { connect } from 'react-redux';
import Plugins from './plugins';
import { trackAnalyticsEvent } from '../utils/tracking-utils';
import {
clickDownloadGraph,
clickForceRelayout,
toggleHelp,
toggleTroubleshootingMenu,
setContrastMode
} from '../actions/app-actions';
class Footer extends React.Component {
handleContrastClick = (ev) => {
ev.preventDefault();
this.props.setContrastMode(!this.props.contrastMode);
}
handleRelayoutClick = (ev) => {
ev.preventDefault();
trackAnalyticsEvent('scope.layout.refresh.click', {
layout: this.props.topologyViewMode,
});
this.props.clickForceRelayout();
}
render() {
const {
hostname, version, versionUpdate, contrastMode
} = this.props;
const otherContrastModeTitle = contrastMode
? 'Switch to normal contrast' : 'Switch to high contrast';
const forceRelayoutTitle = 'Force re-layout (might reduce edge crossings, '
+ 'but may shift nodes around)';
const versionUpdateTitle = versionUpdate
? `New version available: ${versionUpdate.get('version')} Click to download`
: '';
return (
<div className="footer">
<div className="footer-status">
{versionUpdate
&& (
<a
className="footer-versionupdate"
title={versionUpdateTitle}
href={versionUpdate.get('downloadUrl')}
target="_blank"
rel="noopener noreferrer">
Update available:
{' '}
{versionUpdate.get('version')}
</a>
)
}
<span className="footer-label">Version</span>
{version || '...'}
<span className="footer-label">on</span>
{hostname}
</div>
<div className="footer-plugins">
<Plugins />
</div>
<div className="footer-tools">
<button
type="button"
className="footer-icon"
onClick={this.handleRelayoutClick}
title={forceRelayoutTitle}>
<i className="fa fa-sync" />
</button>
<button
type="button"
onClick={this.handleContrastClick}
className="footer-icon"
title={otherContrastModeTitle}>
<i className="fa fa-adjust" />
</button>
<button
type="button"
onClick={this.props.toggleTroubleshootingMenu}
className="footer-icon"
title="Open troubleshooting menu"
href=""
>
<i className="fa fa-bug" />
</button>
<button
type="button"
className="footer-icon"
onClick={this.props.toggleHelp}
title="Show help">
<i className="fa fa-question" />
</button>
</div>
</div>
);
}
}
function mapStateToProps(state) {
return {
contrastMode: state.get('contrastMode'),
hostname: state.get('hostname'),
topologyViewMode: state.get('topologyViewMode'),
version: state.get('version'),
versionUpdate: state.get('versionUpdate'),
};
}
export default connect(
mapStateToProps,
{
clickDownloadGraph,
clickForceRelayout,
setContrastMode,
toggleHelp,
toggleTroubleshootingMenu
}
)(Footer);

File diff suppressed because it is too large Load Diff

View File

@@ -1,232 +0,0 @@
import React from 'react';
import { connect } from 'react-redux';
import { searchableFieldsSelector } from '../selectors/search';
import { canvasMarginsSelector } from '../selectors/canvas';
import { hideHelp } from '../actions/app-actions';
const GENERAL_SHORTCUTS = [
{ key: 'esc', label: 'Close active panel' },
{ key: '/', label: 'Activate search field' },
{ key: '?', label: 'Toggle shortcut menu' },
{ key: 'g', label: 'Switch to Graph view' },
{ key: 't', label: 'Switch to Table view' },
{ key: 'r', label: 'Switch to Resources view' },
];
const CANVAS_METRIC_SHORTCUTS = [
{ key: '<', label: 'Select and pin previous metric' },
{ key: '>', label: 'Select and pin next metric' },
{ key: 'q', label: 'Unpin current metric' },
];
function renderShortcuts(cuts) {
return (
<div>
{cuts.map(({ key, label }) => (
<div key={key} className="help-panel-shortcuts-shortcut">
<div className="key"><kbd>{key}</kbd></div>
<div className="label">{label}</div>
</div>
))}
</div>
);
}
function renderShortcutPanel() {
return (
<div className="help-panel-shortcuts">
<h2>Shortcuts</h2>
<h3>General</h3>
{renderShortcuts(GENERAL_SHORTCUTS)}
<h3>Canvas Metrics</h3>
{renderShortcuts(CANVAS_METRIC_SHORTCUTS)}
</div>
);
}
const BASIC_SEARCHES = [
{ label: 'All fields for foo', term: 'foo' },
{
label: (
<span>
Any field matching
<b>pid</b>
{' '}
for the value 12345
</span>
),
term: 'pid: 12345'
},
];
const REGEX_SEARCHES = [
{
label: 'All fields for foo or bar',
term: 'foo|bar'
},
{
label: (
<span>
<b>command</b>
{' '}
field for foobar or foobaz
</span>
),
term: 'command: foo(bar|baz)'
},
];
const METRIC_SEARCHES = [
{
label: (
<span>
<b>CPU</b>
{' '}
greater than 4%
</span>
),
term: 'cpu > 4%'
},
{
label: (
<span>
<b>Memory</b>
{' '}
less than 10 megabytes
</span>
),
term: 'memory < 10mb'
},
];
function renderSearches(searches) {
return (
<div>
{searches.map(({ term, label }) => (
<div key={term} className="help-panel-search-row">
<div className="help-panel-search-row-term">
<i className="fa fa-search search-label-icon" />
{term}
</div>
<div className="help-panel-search-row-term-label">{label}</div>
</div>
))}
</div>
);
}
function renderSearchPanel() {
return (
<div className="help-panel-search">
<h2>Search</h2>
<h3>Basics</h3>
{renderSearches(BASIC_SEARCHES)}
<h3>Regular expressions</h3>
{renderSearches(REGEX_SEARCHES)}
<h3>Metrics</h3>
{renderSearches(METRIC_SEARCHES)}
</div>
);
}
function renderFieldsPanel(currentTopologyName, searchableFields) {
const none = (
<span style={{ fontStyle: 'italic' }}>None</span>
);
const currentTopology = (
<span className="help-panel-fields-current-topology">
{currentTopologyName}
</span>
);
return (
<div className="help-panel-fields">
<h2>Fields and Metrics</h2>
<p>
Searchable fields and metrics in the
{' '}
<br />
currently selected
{' '}
{currentTopology}
{' '}
topology:
</p>
<div className="help-panel-fields-fields">
<div className="help-panel-fields-fields-column">
<h3>Fields</h3>
<div className="help-panel-fields-fields-column-content">
{searchableFields.get('fields').map(f => (
<div key={f}>{f}</div>
))}
{searchableFields.get('fields').size === 0 && none}
</div>
</div>
<div className="help-panel-fields-fields-column">
<h3>Metrics</h3>
<div className="help-panel-fields-fields-column-content">
{searchableFields.get('metrics').map(m => (
<div key={m}>{m}</div>
))}
{searchableFields.get('metrics').size === 0 && none}
</div>
</div>
</div>
</div>
);
}
function HelpPanel({
currentTopologyName, searchableFields, onClickClose, canvasMargins
}) {
return (
<div className="help-panel-wrapper">
<div className="help-panel" style={{ marginTop: canvasMargins.top }}>
<div className="help-panel-header">
<h2>Help</h2>
</div>
<div className="help-panel-main">
{renderShortcutPanel()}
{renderSearchPanel()}
{renderFieldsPanel(currentTopologyName, searchableFields)}
</div>
<div className="help-panel-tools">
<i
title="Close details"
className="fa fa-times"
onClick={onClickClose}
/>
</div>
</div>
</div>
);
}
function mapStateToProps(state) {
return {
canvasMargins: canvasMarginsSelector(state),
currentTopologyName: state.getIn(['currentTopology', 'fullName']),
searchableFields: searchableFieldsSelector(state)
};
}
export default connect(mapStateToProps, {
onClickClose: hideHelp
})(HelpPanel);

View File

@@ -1,56 +0,0 @@
import React from 'react';
import { sample } from 'lodash';
import { findTopologyById } from '../utils/topology-utils';
import NodesError from '../charts/nodes-error';
const LOADING_TEMPLATES = [
'Loading THINGS',
'Verifying THINGS',
'Fetching THINGS',
'Processing THINGS',
'Reticulating THINGS',
'Locating THINGS',
'Optimizing THINGS',
'Transporting THINGS',
];
export function getNodeType(topology, topologies) {
if (!topology || topologies.size === 0) {
return '';
}
let name = topology.get('name');
if (topology.get('parentId')) {
const parentTopology = findTopologyById(topologies, topology.get('parentId'));
name = parentTopology.get('name');
}
return name.toLowerCase();
}
function renderTemplate(nodeType, template) {
return template.replace('THINGS', nodeType);
}
export class Loading extends React.Component {
constructor(props, context) {
super(props, context);
this.state = {
template: sample(LOADING_TEMPLATES)
};
}
render() {
const { itemType, show } = this.props;
const message = renderTemplate(itemType, this.state.template);
return (
<NodesError mainClassName="nodes-chart-loading" faIconClass="far fa-circle" hidden={!show}>
<div className="heading">{message}</div>
</NodesError>
);
}
}

View File

@@ -1,72 +0,0 @@
/* eslint react/jsx-first-prop-new-line: "off" */
/* eslint max-len: "off" */
import React from 'react';
export default function Logo({ transform = '' }) {
return (
<g className="logo" transform={transform}>
<path fill="#32324B" d="M114.937,118.165l75.419-67.366c-5.989-4.707-12.71-8.52-19.981-11.211l-55.438,49.52V118.165z" />
<path fill="#32324B" d="M93.265,108.465l-20.431,18.25c1.86,7.57,4.88,14.683,8.87,21.135l11.561-10.326V108.465z" />
<path fill="#00D2FF"
d="M155.276,53.074V35.768C151.815,35.27,148.282,35,144.685,35c-3.766,0-7.465,0.286-11.079,0.828v36.604
L155.276,53.074z" />
<path fill="#00D2FF"
d="M155.276,154.874V82.133l-21.671,19.357v80.682c3.614,0.543,7.313,0.828,11.079,0.828
c4.41,0,8.723-0.407,12.921-1.147l58.033-51.838c1.971-6.664,3.046-13.712,3.046-21.015c0-3.439-0.254-6.817-0.708-10.132
L155.276,154.874z" />
<path fill="#FF4B19" d="M155.276,133.518l58.14-51.933c-2.77-6.938-6.551-13.358-11.175-19.076l-46.965,41.951V133.518z" />
<path fill="#FF4B19"
d="M133.605,123.817l-18.668,16.676V41.242c-8.086,3.555-15.409,8.513-21.672,14.567V162.19
c4.885,4.724,10.409,8.787,16.444,12.03l23.896-21.345V123.817z" />
<polygon fill="#32324B"
points="325.563,124.099 339.389,72.22 357.955,72.22 337.414,144.377 315.556,144.377 303.311,95.79
291.065,144.377 269.207,144.377 248.666,72.22 267.232,72.22 281.058,124.099 294.752,72.22 311.869,72.22 " />
<path fill="#32324B"
d="M426.429,120.676c-2.106,14.352-13.167,24.623-32.128,24.623c-20.146,0-35.025-12.114-35.025-36.605
c0-24.622,15.406-37.395,35.025-37.395c21.726,0,33.182,15.933,33.182,37.263v3.819h-49.772c0,8.031,3.291,18.17,16.327,18.17
c7.242,0,12.904-3.555,14.353-10.27L426.429,120.676z M408.654,99.608c-0.659-10.008-7.11-13.694-14.484-13.694
c-8.427,0-14.879,5.135-15.801,13.694H408.654z" />
<path fill="#32324B"
d="M480.628,97.634v-2.502c0-5.662-2.37-9.351-13.036-9.351c-13.298,0-13.694,7.375-13.694,9.877h-17.117
c0-10.666,4.477-24.359,31.338-24.359c25.676,0,30.285,12.771,30.285,23.174v39.766c0,2.897,0.131,5.267,0.395,7.11l0.527,3.028
h-18.172v-7.241c-5.134,5.134-12.245,8.163-22.384,8.163c-14.221,0-25.018-8.296-25.018-22.648c0-16.59,15.67-20.146,21.99-21.199
L480.628,97.634z M480.628,111.195l-6.979,1.054c-3.819,0.658-8.427,1.315-11.192,1.843c-3.029,0.527-5.662,1.186-7.637,2.765
c-1.844,1.449-2.765,3.425-2.765,5.926c0,2.107,0.79,8.69,10.666,8.69c5.793,0,10.928-2.105,13.693-4.872
c3.556-3.555,4.214-8.032,4.214-11.587V111.195z" />
<polygon fill="#32324B"
points="549.495,144.377 525.399,144.377 501.698,72.221 521.186,72.221 537.775,127.392 554.499,72.221
573.459,72.221 " />
<path fill="#32324B"
d="M641.273,120.676c-2.106,14.352-13.167,24.623-32.128,24.623c-20.146,0-35.025-12.114-35.025-36.605
c0-24.622,15.406-37.395,35.025-37.395c21.726,0,33.182,15.933,33.182,37.263v3.819h-49.772c0,8.031,3.291,18.17,16.327,18.17
c7.242,0,12.904-3.555,14.354-10.27L641.273,120.676z M623.498,99.608c-0.659-10.008-7.109-13.694-14.483-13.694
c-8.428,0-14.88,5.135-15.802,13.694H623.498z" />
<path fill="#32324B"
d="M682.976,80.873c-7.524,0-16.896,2.376-16.896,10.692c0,17.952,46.201,1.452,46.201,30.229
c0,9.637-5.676,22.309-30.229,22.309c-19.009,0-27.721-9.636-28.249-22.44h11.881c0.264,7.788,5.147,13.332,17.688,13.332
c14.52,0,17.952-6.204,17.952-12.54c0-13.332-24.421-7.788-37.753-15.181c-4.885-2.771-8.316-7.128-8.316-15.048
c0-11.616,10.824-20.461,27.853-20.461c20.989,0,27.193,12.145,27.589,20.196h-11.484
C698.685,83.381,691.556,80.873,682.976,80.873z" />
<path fill="#32324B"
d="M756.233,134.994c10.429,0,17.953-5.939,19.009-16.632h10.957c-1.98,17.028-13.597,25.74-29.966,25.74
c-18.744,0-32.076-12.012-32.076-35.905c0-23.76,13.464-36.433,32.209-36.433c16.104,0,27.721,8.712,29.568,25.213h-10.956
c-1.452-11.353-9.24-16.104-18.877-16.104c-12.012,0-20.856,8.448-20.856,27.324C735.245,127.471,744.485,134.994,756.233,134.994z
" />
<path fill="#32324B"
d="M830.418,144.103c-19.141,0-32.341-12.145-32.341-36.169c0-23.893,13.2-36.169,32.341-36.169
c19.009,0,32.209,12.145,32.209,36.169C862.627,132.091,849.427,144.103,830.418,144.103z M830.418,134.994
c12.145,0,21.12-7.392,21.12-27.061c0-19.536-8.976-27.061-21.12-27.061c-12.276,0-21.253,7.393-21.253,27.061
C809.165,127.603,818.142,134.994,830.418,134.994z" />
<path fill="#32324B"
d="M888.629,72.688v10.692c3.96-6.732,12.54-11.616,22.969-11.616c19.009,0,30.757,12.673,30.757,36.169
c0,23.629-12.145,36.169-31.152,36.169c-10.429,0-18.745-4.224-22.573-11.22v35.641h-10.824V72.688H888.629z M910.409,134.994
c12.145,0,20.857-7.392,20.857-27.061c0-19.536-8.713-27.061-20.857-27.061c-12.275,0-21.912,7.393-21.912,27.061
C888.497,127.603,898.134,134.994,910.409,134.994z" />
<path fill="#32324B"
d="M1016.801,119.022c-1.452,12.408-10.032,25.08-30.229,25.08c-18.745,0-32.341-12.804-32.341-36.037
c0-21.912,13.464-36.301,32.209-36.301c19.8,0,30.757,14.784,30.757,38.018h-51.878c0.265,13.332,5.809,25.212,21.385,25.212
c11.484,0,18.217-7.128,19.141-16.104L1016.801,119.022z M1005.448,101.201c-1.056-14.916-9.636-20.328-19.272-20.328
c-10.824,0-19.141,7.26-20.46,20.328H1005.448z" />
</g>
);
}

View File

@@ -1,56 +0,0 @@
import React from 'react';
import { MatchedText } from 'weaveworks-ui-components';
const SHOW_ROW_COUNT = 2;
const Match = (searchTerms, match) => (
<div className="matched-results-match" key={match.label}>
<div className="matched-results-match-wrapper">
<span className="matched-results-match-label">
{match.label}
:
</span>
<MatchedText
text={match.text}
matches={searchTerms}
/>
</div>
</div>
);
export default class MatchedResults extends React.PureComponent {
render() {
const { matches, searchTerms, style } = this.props;
if (!matches) {
return null;
}
let moreFieldMatches;
let moreFieldMatchesTitle;
if (matches.size > SHOW_ROW_COUNT) {
moreFieldMatches = matches
.valueSeq()
.skip(SHOW_ROW_COUNT)
.map(field => field.label);
moreFieldMatchesTitle = `More matches:\n${moreFieldMatches.join(',\n')}`;
}
return (
<div className="matched-results" style={style}>
{matches
.keySeq()
.take(SHOW_ROW_COUNT)
.map(fieldId => Match(searchTerms, matches.get(fieldId)))
}
{moreFieldMatches
&& (
<div className="matched-results-more" title={moreFieldMatchesTitle}>
{`${moreFieldMatches.size} more matches`}
</div>
)
}
</div>
);
}
}

View File

@@ -1,111 +0,0 @@
import React from 'react';
const TRUNCATE_CONTEXT = 6;
const TRUNCATE_ELLIPSIS = '…';
/**
* Returns an array with chunks that cover the whole text via {start, length}
* objects.
*
* `('text', {start: 2, length: 1}) => [{text: 'te'}, {text: 'x', match: true}, {text: 't'}]`
*/
function chunkText(text, { start, length }) {
if (text && !window.isNaN(start) && !window.isNaN(length)) {
const chunks = [];
// text chunk before match
if (start > 0) {
chunks.push({text: text.substr(0, start)});
}
// matching chunk
chunks.push({match: true, offset: start, text: text.substr(start, length)});
// text after match
const remaining = start + length;
if (remaining < text.length) {
chunks.push({text: text.substr(remaining)});
}
return chunks;
}
return [{ text }];
}
/**
* Truncates chunks with ellipsis
*
* First chunk is truncated from left, second chunk (match) is truncated in the
* middle, last chunk is truncated at the end, e.g.
* `[{text: "...cation is a "}, {text: "useful...or not"}, {text: "tool..."}]`
*/
function truncateChunks(chunks, text, maxLength) {
if (chunks && chunks.length === 3 && maxLength && text && text.length > maxLength) {
const res = chunks.map(c => Object.assign({}, c));
let needToCut = text.length - maxLength;
// trucate end
const end = res[2];
if (end.text.length > TRUNCATE_CONTEXT) {
needToCut -= end.text.length - TRUNCATE_CONTEXT;
end.text = `${end.text.substr(0, TRUNCATE_CONTEXT)}${TRUNCATE_ELLIPSIS}`;
}
if (needToCut) {
// truncate front
const start = res[0];
if (start.text.length > TRUNCATE_CONTEXT) {
needToCut -= start.text.length - TRUNCATE_CONTEXT;
start.text = `${TRUNCATE_ELLIPSIS}`
+ `${start.text.substr(start.text.length - TRUNCATE_CONTEXT)}`;
}
}
if (needToCut) {
// truncate match
const middle = res[1];
if (middle.text.length > 2 * TRUNCATE_CONTEXT) {
middle.text = `${middle.text.substr(0, TRUNCATE_CONTEXT)}`
+ `${TRUNCATE_ELLIPSIS}`
+ `${middle.text.substr(middle.text.length - TRUNCATE_CONTEXT)}`;
}
}
return res;
}
return chunks;
}
/**
* Renders text with highlighted search match.
*
* A match object is of shape `{text, label, match}`.
* `match` is a text match object of shape `{start, length}`
* that delimit text matches in `text`. `label` shows the origin of the text.
*/
export default class MatchedText extends React.PureComponent {
render() {
const {
match, text, truncate, maxLength
} = this.props;
const showFullValue = !truncate || (match && (match.start + match.length) > truncate);
const displayText = showFullValue ? text : text.slice(0, truncate);
if (!match) {
return <span>{displayText}</span>;
}
const chunks = chunkText(displayText, match);
return (
<span className="matched-text" title={text}>
{truncateChunks(chunks, displayText, maxLength).map((chunk) => {
if (chunk.match) {
return (
<span className="match" key={chunk.offset}>
{chunk.text}
</span>
);
}
return chunk.text;
})}
</span>
);
}
}

View File

@@ -1,79 +0,0 @@
import React from 'react';
import classNames from 'classnames';
import { connect } from 'react-redux';
import { hoverMetric, pinMetric, unpinMetric } from '../actions/app-actions';
import { selectedMetricTypeSelector } from '../selectors/node-metric';
import { trackAnalyticsEvent } from '../utils/tracking-utils';
class MetricSelectorItem extends React.Component {
constructor(props, context) {
super(props, context);
this.onMouseOver = this.onMouseOver.bind(this);
this.onMouseClick = this.onMouseClick.bind(this);
}
trackEvent(eventName) {
trackAnalyticsEvent(eventName, {
layout: this.props.topologyViewMode,
metricType: this.props.metric.get('label'),
parentTopologyId: this.props.currentTopology.get('parentId'),
topologyId: this.props.currentTopology.get('id'),
});
}
onMouseOver() {
const metricType = this.props.metric.get('label');
this.props.hoverMetric(metricType);
}
onMouseClick() {
const metricType = this.props.metric.get('label');
const { pinnedMetricType } = this.props;
if (metricType !== pinnedMetricType) {
this.trackEvent('scope.metric.selector.pin.click');
this.props.pinMetric(metricType);
} else {
this.trackEvent('scope.metric.selector.unpin.click');
this.props.unpinMetric();
}
}
render() {
const { metric, selectedMetricType, pinnedMetricType } = this.props;
const type = metric.get('label');
const isPinned = (type === pinnedMetricType);
const isSelected = (type === selectedMetricType);
const className = classNames('metric-selector-action', {
'metric-selector-action-selected': isSelected
});
return (
<div
key={type}
className={className}
onMouseOver={this.onMouseOver}
onClick={this.onMouseClick}>
{type}
{isPinned && <i className="fa fa-thumbtack" />}
</div>
);
}
}
function mapStateToProps(state) {
return {
currentTopology: state.get('currentTopology'),
pinnedMetricType: state.get('pinnedMetricType'),
selectedMetricType: selectedMetricTypeSelector(state),
topologyViewMode: state.get('topologyViewMode'),
};
}
export default connect(
mapStateToProps,
{ hoverMetric, pinMetric, unpinMetric }
)(MetricSelectorItem);

Some files were not shown because too many files have changed in this diff Show More