Fix load more in the new room history and fast sync

With the remove of the initial sync without a since param and the store
of the prev_batch for each room, the load_more messages isn't working
correctly.

I've detected two issues that this patch fixes:

1. The first problem is that it's possible to have rooms that doesn't
have the prev_batch, I'm solving this with a new backend call that loads
more using the last messages id and in the case that a new room doesn't
have messages we can use the last sync batch.

2. The second problem is when you load more, close and open again
Fractal, if you go to load more, the prev_batch is the last one so we're
not loading the corresponding messages, we continue loading from the
last one that's not what is showed. This is because at close we're
storing the last batch in cache, but we're only storing the last 40
messages. To solve this, I'm setting the prev_batch to None at store in
the cache so the first time we'll try to get more messages using the
last message id.
This commit is contained in:
Daniel García Moreno 2018-09-26 10:35:12 +02:00
parent 18262cb937
commit c1e84ba973
5 changed files with 34 additions and 1 deletions

View file

@ -471,9 +471,15 @@ impl AppOp {
self.internal.send(InternalCommand::LoadMoreNormal).unwrap();
} else if let Some(prev_batch) = r.prev_batch.clone() {
self.backend.send(BKCommand::GetRoomMessages(r.id.clone(), prev_batch)).unwrap();
} else if let Some(msg) = r.messages.iter().next() {
// no prev_batch so we use the last message to calculate that in the backend
self.backend.send(BKCommand::GetRoomMessagesFromMsg(r.id.clone(), msg.clone())).unwrap();
} else if let Some(from) = self.since.clone() {
// no messages and no prev_batch so we use the last since
self.backend.send(BKCommand::GetRoomMessages(r.id.clone(), from)).unwrap();
} else {
self.loading_more = false;
self.load_more_spn.stop();
self.loading_more = false;
}
}
}

View file

@ -47,6 +47,9 @@ pub fn store(
_ => 0,
};
r.messages = r.messages.iter().skip(skip).cloned().collect();
// setting prev_batch to none because we're removing some messages so the
// prev_batch isn't valid now, it's not pointing to the stored last msg
r.prev_batch = None;
}
let data = CacheData {

View file

@ -198,6 +198,10 @@ impl Backend {
let r = room::get_room_messages(self, room, from);
bkerror!(r, tx, BKResponse::RoomMessagesError);
}
Ok(BKCommand::GetRoomMessagesFromMsg(room, from)) => {
let r = room::get_room_messages_from_msg(self, room, from);
bkerror!(r, tx, BKResponse::RoomMessagesError);
}
Ok(BKCommand::GetMessageContext(message)) => {
let r = room::get_message_context(self, message);
bkerror!(r, tx, BKResponse::RoomMessagesError);

View file

@ -147,6 +147,25 @@ pub fn get_room_messages(bk: &Backend, roomid: String, from: String) -> Result<(
Ok(())
}
pub fn get_room_messages_from_msg(bk: &Backend, roomid: String, msg: Message) -> Result<(), Error> {
// first of all, we calculate the from param using the context api, then we call the
// normal get_room_messages
let baseu = bk.get_base_url()?;
let tk = bk.data.lock().unwrap().access_token.clone();
let id = msg.id.unwrap_or("".to_string());
let tx = bk.internal_tx.clone();
thread::spawn(move || {
if let Ok(from) = util::get_prev_batch_from(&baseu, tk, roomid.clone(), id) {
if let Some(t) = tx {
t.send(BKCommand::GetRoomMessages(roomid, from)).unwrap();
}
}
});
Ok(())
}
fn parse_context(tx: Sender<BKResponse>, tk: String, baseu: Url, roomid: String, eid: String, limit: i32) -> Result<(), Error> {
let url = client_url(&baseu, &format!("rooms/{}/context/{}", roomid, eid),
vec![("limit", format!("{}", limit)), ("access_token", tk.clone())])?;

View file

@ -41,6 +41,7 @@ pub enum BKCommand {
SyncForced,
GetRoomMembers(String),
GetRoomMessages(String, String),
GetRoomMessagesFromMsg(String, Message),
GetMessageContext(Message),
GetRoomAvatar(String),
GetThumbAsync(String, Sender<String>),