mirror of
https://github.com/weaveworks/scope.git
synced 2026-03-03 02:00:43 +00:00
Merge pull request #652 from weaveworks/release-0.10.0
Changelog for 0.10.0
This commit is contained in:
36
CHANGELOG.md
36
CHANGELOG.md
@@ -1,3 +1,39 @@
|
||||
## Release 0.10.0
|
||||
|
||||
Notes:
|
||||
- Due to the Scope UI now being able to start/stop/restart Docker
|
||||
containers, it is not wise to have it accessible to untrusted
|
||||
parties.
|
||||
|
||||
New features:
|
||||
- Add lifecycle controls (start/stop/restart) for Docker containers
|
||||
[#598](https://github.com/weaveworks/scope/pull/598)
|
||||
[#642](https://github.com/weaveworks/scope/pull/642)
|
||||
- Add sparklines to the UI for some metrics
|
||||
[#622](https://github.com/weaveworks/scope/pull/622)
|
||||
- Show a message when the selected topology is empty
|
||||
[#505](https://github.com/weaveworks/scope/pull/505)
|
||||
|
||||
Bug fixes:
|
||||
- Change node layout incrementally to reduce re-layouts
|
||||
[#593](https://github.com/weaveworks/scope/pull/593)
|
||||
- Improve responsiveness of UI updates to container state changes
|
||||
[#628](https://github.com/weaveworks/scope/pull/628)
|
||||
[#640](https://github.com/weaveworks/scope/pull/640)
|
||||
- Handle DNS Resolution to a set of names
|
||||
[#639](https://github.com/weaveworks/scope/pull/639)
|
||||
- Correctly show node counts for sub-topologies
|
||||
[#621](https://github.com/weaveworks/scope/issues/621)
|
||||
- Allow scope to start after being upgraded
|
||||
[#617](https://github.com/weaveworks/scope/pull/617)
|
||||
- Prevent a stranded pseudo-nodes from appearing in the container view
|
||||
[#627](https://github.com/weaveworks/scope/pull/627)
|
||||
[#674](https://github.com/weaveworks/scope/pull/674)
|
||||
- Parallelise and improve the testing infrastructure
|
||||
[#614](https://github.com/weaveworks/scope/pull/614)
|
||||
[#618](https://github.com/weaveworks/scope/pull/618)
|
||||
[#644](https://github.com/weaveworks/scope/pull/644)
|
||||
|
||||
## Release 0.9.0
|
||||
|
||||
New features:
|
||||
|
||||
7897
app/static.go
7897
app/static.go
File diff suppressed because it is too large
Load Diff
@@ -103,7 +103,7 @@ while true; do
|
||||
shift
|
||||
fi
|
||||
PROBE_ARGS="$PROBE_ARGS -token=$ARG_VALUE"
|
||||
echo "scope.weave.works:443" >/etc/weave/apps
|
||||
echo "scope.weave.works:80" >/etc/weave/apps
|
||||
touch /etc/service/app/down
|
||||
;;
|
||||
--no-app)
|
||||
|
||||
@@ -84,6 +84,7 @@ type Container interface {
|
||||
PID() int
|
||||
Hostname() string
|
||||
GetNode(string, []net.IP) report.Node
|
||||
State() string
|
||||
|
||||
StartGatheringStats() error
|
||||
StopGatheringStats()
|
||||
@@ -131,6 +132,15 @@ func (c *container) Hostname() string {
|
||||
c.container.Config.Domainname)
|
||||
}
|
||||
|
||||
func (c *container) State() string {
|
||||
if c.container.State.Paused {
|
||||
return StatePaused
|
||||
} else if c.container.State.Running {
|
||||
return StateRunning
|
||||
}
|
||||
return StateStopped
|
||||
}
|
||||
|
||||
func (c *container) StartGatheringStats() error {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
@@ -297,14 +307,7 @@ func (c *container) GetNode(hostID string, localAddrs []net.IP) report.Node {
|
||||
ipsWithScopes = append(ipsWithScopes, report.MakeScopedAddressNodeID(hostID, ip))
|
||||
}
|
||||
|
||||
var state string
|
||||
if c.container.State.Paused {
|
||||
state = StatePaused
|
||||
} else if c.container.State.Running {
|
||||
state = StateRunning
|
||||
} else {
|
||||
state = StateStopped
|
||||
}
|
||||
state := c.State()
|
||||
|
||||
result := report.MakeNodeWith(map[string]string{
|
||||
ContainerID: c.ID(),
|
||||
|
||||
@@ -105,8 +105,8 @@ func TestContainer(t *testing.T) {
|
||||
if c.Image() != "baz" {
|
||||
t.Errorf("%s != baz", c.Image())
|
||||
}
|
||||
if c.PID() != 1 {
|
||||
t.Errorf("%d != 1", c.PID())
|
||||
if c.PID() != 2 {
|
||||
t.Errorf("%d != 2", c.PID())
|
||||
}
|
||||
if have := docker.ExtractContainerIPs(c.GetNode("", []net.IP{})); !reflect.DeepEqual(have, []string{"1.2.3.4"}) {
|
||||
t.Errorf("%v != %v", have, []string{"1.2.3.4"})
|
||||
|
||||
@@ -252,11 +252,17 @@ func (r *registry) updateContainerState(containerID string) {
|
||||
if !ok {
|
||||
c = NewContainerStub(dockerContainer)
|
||||
r.containers[containerID] = c
|
||||
r.containersByPID[dockerContainer.State.Pid] = c
|
||||
} else {
|
||||
// potentially remove existing pid mapping.
|
||||
delete(r.containersByPID, c.PID())
|
||||
c.UpdateState(dockerContainer)
|
||||
}
|
||||
|
||||
// Update PID index
|
||||
if c.PID() > 1 {
|
||||
r.containersByPID[c.PID()] = c
|
||||
}
|
||||
|
||||
// Trigger anyone watching for updates
|
||||
for _, f := range r.watchers {
|
||||
f(c)
|
||||
|
||||
@@ -38,6 +38,10 @@ func (c *mockContainer) Hostname() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *mockContainer) State() string {
|
||||
return docker.StateRunning
|
||||
}
|
||||
|
||||
func (c *mockContainer) StartGatheringStats() error {
|
||||
return nil
|
||||
}
|
||||
@@ -133,7 +137,7 @@ var (
|
||||
ID: "ping",
|
||||
Name: "pong",
|
||||
Image: "baz",
|
||||
State: client.State{Pid: 1, Running: true},
|
||||
State: client.State{Pid: 2, Running: true},
|
||||
NetworkSettings: &client.NetworkSettings{
|
||||
IPAddress: "1.2.3.4",
|
||||
Ports: map[client.Port][]client.PortBinding{
|
||||
@@ -249,7 +253,7 @@ func TestLookupByPID(t *testing.T) {
|
||||
test.Poll(t, 100*time.Millisecond, want, func() interface{} {
|
||||
var have docker.Container
|
||||
registry.LockedPIDLookup(func(lookup func(int) docker.Container) {
|
||||
have = lookup(1)
|
||||
have = lookup(2)
|
||||
})
|
||||
return have
|
||||
})
|
||||
|
||||
@@ -41,7 +41,7 @@ func (r *mockRegistry) WatchContainerUpdates(_ docker.ContainerUpdateWatcher) {}
|
||||
var (
|
||||
mockRegistryInstance = &mockRegistry{
|
||||
containersByPID: map[int]docker.Container{
|
||||
1: &mockContainer{container1},
|
||||
2: &mockContainer{container1},
|
||||
},
|
||||
images: map[string]*client.APIImages{
|
||||
"baz": &apiImage1,
|
||||
|
||||
@@ -78,7 +78,7 @@ func (t *Tagger) tag(tree process.Tree, topology *report.Topology) {
|
||||
}
|
||||
})
|
||||
|
||||
if c == nil {
|
||||
if c == nil || c.State() == StateStopped || c.PID() == 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -28,22 +28,22 @@ func TestTagger(t *testing.T) {
|
||||
defer func() { docker.NewProcessTreeStub = oldProcessTree }()
|
||||
|
||||
docker.NewProcessTreeStub = func(_ process.Walker) (process.Tree, error) {
|
||||
return &mockProcessTree{map[int]int{2: 1}}, nil
|
||||
return &mockProcessTree{map[int]int{3: 2}}, nil
|
||||
}
|
||||
|
||||
var (
|
||||
pid1NodeID = report.MakeProcessNodeID("somehost.com", "1")
|
||||
pid2NodeID = report.MakeProcessNodeID("somehost.com", "2")
|
||||
pid1NodeID = report.MakeProcessNodeID("somehost.com", "2")
|
||||
pid2NodeID = report.MakeProcessNodeID("somehost.com", "3")
|
||||
wantNode = report.MakeNodeWith(map[string]string{docker.ContainerID: "ping"})
|
||||
)
|
||||
|
||||
input := report.MakeReport()
|
||||
input.Process.AddNode(pid1NodeID, report.MakeNodeWith(map[string]string{"pid": "1"}))
|
||||
input.Process.AddNode(pid2NodeID, report.MakeNodeWith(map[string]string{"pid": "2"}))
|
||||
input.Process.AddNode(pid1NodeID, report.MakeNodeWith(map[string]string{process.PID: "2"}))
|
||||
input.Process.AddNode(pid2NodeID, report.MakeNodeWith(map[string]string{process.PID: "3"}))
|
||||
|
||||
want := report.MakeReport()
|
||||
want.Process.AddNode(pid1NodeID, report.MakeNodeWith(map[string]string{"pid": "1"}).Merge(wantNode))
|
||||
want.Process.AddNode(pid2NodeID, report.MakeNodeWith(map[string]string{"pid": "2"}).Merge(wantNode))
|
||||
want.Process.AddNode(pid1NodeID, report.MakeNodeWith(map[string]string{process.PID: "2"}).Merge(wantNode))
|
||||
want.Process.AddNode(pid2NodeID, report.MakeNodeWith(map[string]string{process.PID: "3"}).Merge(wantNode))
|
||||
|
||||
tagger := docker.NewTagger(mockRegistryInstance, nil)
|
||||
have, err := tagger.Tag(input)
|
||||
|
||||
@@ -101,21 +101,19 @@ var ContainerRenderer = MakeReduce(
|
||||
// We need to be careful to ensure we only include each edge once. Edges brought in
|
||||
// by the above renders will have a pid, so its enough to filter out any nodes with
|
||||
// pids.
|
||||
Map{
|
||||
FilterUnconnected(Map{
|
||||
MapFunc: MapIP2Container,
|
||||
Renderer: FilterUnconnected(
|
||||
MakeReduce(
|
||||
Map{
|
||||
MapFunc: MapContainer2IP,
|
||||
Renderer: SelectContainer,
|
||||
},
|
||||
Map{
|
||||
MapFunc: MapEndpoint2IP,
|
||||
Renderer: SelectEndpoint,
|
||||
},
|
||||
),
|
||||
Renderer: MakeReduce(
|
||||
Map{
|
||||
MapFunc: MapContainer2IP,
|
||||
Renderer: SelectContainer,
|
||||
},
|
||||
Map{
|
||||
MapFunc: MapEndpoint2IP,
|
||||
Renderer: SelectEndpoint,
|
||||
},
|
||||
),
|
||||
},
|
||||
}),
|
||||
)
|
||||
|
||||
type containerWithImageNameRenderer struct {
|
||||
|
||||
Reference in New Issue
Block a user