Merge remote-tracking branch 'origin/develop' into store_event_actions

pull/4/merge
David Baker 9 years ago
commit 928c575c6f
  1. 28
      CHANGES.rst
  2. 2
      synapse/__init__.py
  3. 25
      synapse/handlers/message.py
  4. 2
      synapse/notifier.py
  5. 2
      synapse/rest/client/v1/login.py
  6. 93
      synapse/util/caches/snapshot_cache.py
  7. 60
      tests/util/test_snapshot_cache.py

@ -1,3 +1,31 @@
Changes in synapse v0.12.0-rc4 (unreleased)
===========================================
* Fix C-S API to expose ``/login`` under ``r0`` (PR #459)
Changes in synapse v0.12.0-rc3 (2015-12-23)
===========================================
* Allow guest accounts access to ``/sync`` (PR #455)
* Allow filters to include/exclude rooms at the room level
rather than just from the components of the sync for each
room. (PR #454)
* Include urls for room avatars in the response to ``/publicRooms`` (PR #453)
* Don't set a identicon as the avatar for a user when they register (PR #450)
* Add a ``display_name`` to third-party invites (PR #449)
* Send more information to the identity server for third-party invites so that
it can send richer messages to the invitee (PR #446)
* Cache the responses to ``/intialSync`` for 5 minutes. If a client
retries a request to ``/initialSync`` before the a response was computed
to the first request then the same response is used for both requests
(PR #457)
* Fix a bug where synapse would always request the signing keys of
remote servers even when the key was cached locally (PR #452)
* Fix 500 when pagination search results (PR #447)
* Fix a bug where synapse was leaking raw email address in third-party invites
(PR #448)
Changes in synapse v0.12.0-rc2 (2015-12-14) Changes in synapse v0.12.0-rc2 (2015-12-14)
=========================================== ===========================================

@ -16,4 +16,4 @@
""" This is a reference implementation of a Matrix home server. """ This is a reference implementation of a Matrix home server.
""" """
__version__ = "0.12.0-rc2" __version__ = "0.12.0-rc3"

@ -22,6 +22,7 @@ from synapse.events.utils import serialize_event
from synapse.events.validator import EventValidator from synapse.events.validator import EventValidator
from synapse.util import unwrapFirstError from synapse.util import unwrapFirstError
from synapse.util.logcontext import PreserveLoggingContext from synapse.util.logcontext import PreserveLoggingContext
from synapse.util.caches.snapshot_cache import SnapshotCache
from synapse.types import UserID, RoomStreamToken, StreamToken from synapse.types import UserID, RoomStreamToken, StreamToken
from ._base import BaseHandler from ._base import BaseHandler
@ -45,6 +46,7 @@ class MessageHandler(BaseHandler):
self.state = hs.get_state_handler() self.state = hs.get_state_handler()
self.clock = hs.get_clock() self.clock = hs.get_clock()
self.validator = EventValidator() self.validator = EventValidator()
self.snapshot_cache = SnapshotCache()
@defer.inlineCallbacks @defer.inlineCallbacks
def get_message(self, msg_id=None, room_id=None, sender_id=None, def get_message(self, msg_id=None, room_id=None, sender_id=None,
@ -326,7 +328,6 @@ class MessageHandler(BaseHandler):
[serialize_event(c, now) for c in room_state.values()] [serialize_event(c, now) for c in room_state.values()]
) )
@defer.inlineCallbacks
def snapshot_all_rooms(self, user_id=None, pagin_config=None, def snapshot_all_rooms(self, user_id=None, pagin_config=None,
as_client_event=True, include_archived=False): as_client_event=True, include_archived=False):
"""Retrieve a snapshot of all rooms the user is invited or has joined. """Retrieve a snapshot of all rooms the user is invited or has joined.
@ -346,6 +347,28 @@ class MessageHandler(BaseHandler):
is joined on, may return a "messages" key with messages, depending is joined on, may return a "messages" key with messages, depending
on the specified PaginationConfig. on the specified PaginationConfig.
""" """
key = (
user_id,
pagin_config.from_token,
pagin_config.to_token,
pagin_config.direction,
pagin_config.limit,
as_client_event,
include_archived,
)
now_ms = self.clock.time_msec()
result = self.snapshot_cache.get(now_ms, key)
if result is not None:
return result
return self.snapshot_cache.set(now_ms, key, self._snapshot_all_rooms(
user_id, pagin_config, as_client_event, include_archived
))
@defer.inlineCallbacks
def _snapshot_all_rooms(self, user_id=None, pagin_config=None,
as_client_event=True, include_archived=False):
memberships = [Membership.INVITE, Membership.JOIN] memberships = [Membership.INVITE, Membership.JOIN]
if include_archived: if include_archived:
memberships.append(Membership.LEAVE) memberships.append(Membership.LEAVE)

@ -349,7 +349,7 @@ class Notifier(object):
room_ids = [] room_ids = []
if is_guest: if is_guest:
if guest_room_id: if guest_room_id:
if not self._is_world_readable(guest_room_id): if not (yield self._is_world_readable(guest_room_id)):
raise AuthError(403, "Guest access not allowed") raise AuthError(403, "Guest access not allowed")
room_ids = [guest_room_id] room_ids = [guest_room_id]
else: else:

@ -35,7 +35,7 @@ logger = logging.getLogger(__name__)
class LoginRestServlet(ClientV1RestServlet): class LoginRestServlet(ClientV1RestServlet):
PATTERNS = client_path_patterns("/login$", releases=(), include_in_unstable=False) PATTERNS = client_path_patterns("/login$")
PASS_TYPE = "m.login.password" PASS_TYPE = "m.login.password"
SAML2_TYPE = "m.login.saml2" SAML2_TYPE = "m.login.saml2"
CAS_TYPE = "m.login.cas" CAS_TYPE = "m.login.cas"

@ -0,0 +1,93 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.util.async import ObservableDeferred
class SnapshotCache(object):
"""Cache for snapshots like the response of /initialSync.
The response of initialSync only has to be a recent snapshot of the
server state. It shouldn't matter to clients if it is a few minutes out
of date.
This caches a deferred response. Until the deferred completes it will be
returned from the cache. This means that if the client retries the request
while the response is still being computed, that original response will be
used rather than trying to compute a new response.
Once the deferred completes it will removed from the cache after 5 minutes.
We delay removing it from the cache because a client retrying its request
could race with us finishing computing the response.
Rather than tracking precisely how long something has been in the cache we
keep two generations of completed responses. Every 5 minutes discard the
old generation, move the new generation to the old generation, and set the
new generation to be empty. This means that a result will be in the cache
somewhere between 5 and 10 minutes.
"""
DURATION_MS = 5 * 60 * 1000 # Cache results for 5 minutes.
def __init__(self):
self.pending_result_cache = {} # Request that haven't finished yet.
self.prev_result_cache = {} # The older requests that have finished.
self.next_result_cache = {} # The newer requests that have finished.
self.time_last_rotated_ms = 0
def rotate(self, time_now_ms):
# Rotate once if the cache duration has passed since the last rotation.
if time_now_ms - self.time_last_rotated_ms >= self.DURATION_MS:
self.prev_result_cache = self.next_result_cache
self.next_result_cache = {}
self.time_last_rotated_ms += self.DURATION_MS
# Rotate again if the cache duration has passed twice since the last
# rotation.
if time_now_ms - self.time_last_rotated_ms >= self.DURATION_MS:
self.prev_result_cache = self.next_result_cache
self.next_result_cache = {}
self.time_last_rotated_ms = time_now_ms
def get(self, time_now_ms, key):
self.rotate(time_now_ms)
# This cache is intended to deduplicate requests, so we expect it to be
# missed most of the time. So we just lookup the key in all of the
# dictionaries rather than trying to short circuit the lookup if the
# key is found.
result = self.prev_result_cache.get(key)
result = self.next_result_cache.get(key, result)
result = self.pending_result_cache.get(key, result)
if result is not None:
return result.observe()
else:
return None
def set(self, time_now_ms, key, deferred):
self.rotate(time_now_ms)
result = ObservableDeferred(deferred)
self.pending_result_cache[key] = result
def shuffle_along(r):
# When the deferred completes we shuffle it along to the first
# generation of the result cache. So that it will eventually
# expire from the rotation of that cache.
self.next_result_cache[key] = result
self.pending_result_cache.pop(key, None)
result.observe().addBoth(shuffle_along)
return result.observe()

@ -0,0 +1,60 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import unittest
from synapse.util.caches.snapshot_cache import SnapshotCache
from twisted.internet.defer import Deferred
class SnapshotCacheTestCase(unittest.TestCase):
def setUp(self):
self.cache = SnapshotCache()
self.cache.DURATION_MS = 1
def test_get_set(self):
# Check that getting a missing key returns None
self.assertEquals(self.cache.get(0, "key"), None)
# Check that setting a key with a deferred returns
# a deferred that resolves when the initial deferred does
d = Deferred()
set_result = self.cache.set(0, "key", d)
self.assertIsNotNone(set_result)
self.assertFalse(set_result.called)
# Check that getting the key before the deferred has resolved
# returns a deferred that resolves when the initial deferred does.
get_result_at_10 = self.cache.get(10, "key")
self.assertIsNotNone(get_result_at_10)
self.assertFalse(get_result_at_10.called)
# Check that the returned deferreds resolve when the initial deferred
# does.
d.callback("v")
self.assertTrue(set_result.called)
self.assertTrue(get_result_at_10.called)
# Check that getting the key after the deferred has resolved
# before the cache expires returns a resolved deferred.
get_result_at_11 = self.cache.get(11, "key")
self.assertIsNotNone(get_result_at_11)
self.assertTrue(get_result_at_11.called)
# Check that getting the key after the deferred has resolved
# after the cache expires returns None
get_result_at_12 = self.cache.get(12, "key")
self.assertIsNone(get_result_at_12)
Loading…
Cancel
Save