first push, simple setup to use opengl

It's all insired in copy/paste style by: https://github.com/Nercury/rust-and-opengl-lessons
This commit is contained in:
Lapin 2021-07-21 19:39:59 +02:00
commit cf64bec257
24 changed files with 3139 additions and 0 deletions

23
lib/resources/Cargo.toml Normal file
View file

@ -0,0 +1,23 @@
[package]
name = "resources"
version = "0.1.0"
authors = ["Nerijus Arlauskas <nercury@gmail.com>"]
[features]
default = []
backend_in_memory = []
backend_miniz = ["miniz_oxide"]
backend_filesystem = []
backend_filesystem_watch = ["backend_filesystem", "notify"]
[dependencies]
failure = "0.1.3"
slab = "0.4"
twox-hash = "1.1"
log = "0.4.6"
miniz_oxide = { version = "0.1", optional = true }
notify = { version = "4.0.0", optional = true }
[dev-dependencies]
miniz_oxide = "0.1"

View file

@ -0,0 +1,217 @@
use crate::backend::{Backend, BackendSyncPoint, Modification};
use std::path::{Path, PathBuf};
use std::{fs, io};
use std::collections::VecDeque;
use crate::{Error, ResourcePath};
use std::sync::Mutex;
#[cfg(feature = "backend_filesystem_watch")]
mod watch_impl {
use std::collections::VecDeque;
use std::path::{Path, PathBuf};
use std::sync::mpsc::{channel, Receiver, TryRecvError};
use std::time::{Duration, Instant};
use notify::{RecommendedWatcher, Watcher as NotifyWatcher, RecursiveMode, DebouncedEvent};
use crate::backend::{BackendSyncPoint, Modification};
use crate::{ResourcePathBuf};
pub struct Watcher {
root_path: PathBuf,
_watcher: RecommendedWatcher,
receiver: Receiver<DebouncedEvent>,
outdated_at: Option<Instant>,
}
impl Watcher {
pub fn new(root_path: &Path) -> Option<Watcher> {
let (tx, rx) = channel();
let mut watcher: RecommendedWatcher = NotifyWatcher::new(tx, Duration::from_millis(50))
.map_err(|e| error!("failed to create watcher for {:?}, {:?}", root_path, e))
.ok()?;
watcher.watch(root_path, RecursiveMode::Recursive).ok()?;
Some(Watcher {
root_path: root_path.into(),
_watcher: watcher,
receiver: rx,
outdated_at: None,
})
}
pub fn notify_changes_synced(&mut self, point: BackendSyncPoint) {
if let Some(last_outdated) = self.outdated_at {
if point.instant == last_outdated {
self.outdated_at = None;
}
}
}
pub fn new_changes(&mut self, queue: &mut VecDeque<Modification>) -> Option<BackendSyncPoint> {
let mut something_outdated = false;
loop {
match self.receiver.try_recv() {
Ok(event) => {
match event {
DebouncedEvent::Create(path) => {
if let Some(resource_path) = ResourcePathBuf::from_filesystem_path(&self.root_path, &path) {
queue.push_back(Modification::Create(resource_path));
something_outdated = true;
} else {
warn!("unrecognised resource path {:?} for {} event", path, "Create")
}
},
DebouncedEvent::Write(path) => {
if let Some(resource_path) = ResourcePathBuf::from_filesystem_path(&self.root_path, &path) {
queue.push_back(Modification::Write(resource_path));
something_outdated = true;
} else {
warn!("unrecognised resource path {:?} for {} event", path, "Write")
}
},
DebouncedEvent::Chmod(path) => {
if let Some(resource_path) = ResourcePathBuf::from_filesystem_path(&self.root_path, &path) {
queue.push_back(Modification::Write(resource_path));
something_outdated = true;
} else {
warn!("unrecognised resource path {:?} for {} event", path, "Write")
}
},
DebouncedEvent::Remove(path) => {
if let Some(resource_path) = ResourcePathBuf::from_filesystem_path(&self.root_path, &path) {
queue.push_back(Modification::Remove(resource_path));
something_outdated = true;
} else {
warn!("unrecognised resource path {:?} for {} event", path, "Remove")
}
},
DebouncedEvent::Rename(from_path, to_path) => {
match (ResourcePathBuf::from_filesystem_path(&self.root_path, &from_path), ResourcePathBuf::from_filesystem_path(&self.root_path, &to_path)) {
(Some(from), Some(to)) => {
queue.push_back(Modification::Rename { from, to });
something_outdated = true;
},
(None, Some(_)) => warn!("unrecognised resource path {:?} for {} event", from_path, "Rename"),
(Some(_), None) => warn!("unrecognised resource path {:?} for {} event", to_path, "Rename"),
(None, None) => warn!("unrecognised resource paths {:?} and {:?} for Rename event", from_path, to_path),
}
},
_ => (),
}
},
Err(TryRecvError::Empty) => break,
Err(TryRecvError::Disconnected) => {
error!("filesystem watcher disconnected");
break;
},
}
}
if something_outdated {
let outdated_at = Instant::now();
self.outdated_at = Some(outdated_at);
Some(BackendSyncPoint { instant: outdated_at })
} else {
None
}
}
}
}
#[cfg(not(feature = "backend_filesystem_watch"))]
mod watch_impl {
use std::collections::VecDeque;
use crate::backend::{BackendSyncPoint, Modification};
pub struct Watcher {}
impl Watcher {
pub fn notify_changes_synced(&mut self, _point: BackendSyncPoint) {}
pub fn new_changes(&mut self, _queue: &mut VecDeque<Modification>) -> Option<BackendSyncPoint> {
None
}
}
}
pub struct FileSystem {
root_path: PathBuf,
can_write: bool,
watch: Option<Mutex<watch_impl::Watcher>>,
}
impl FileSystem {
pub fn from_rel_path<P: AsRef<Path>, RP: AsRef<ResourcePath>>(
root_path: P,
rel_path: RP,
) -> FileSystem {
FileSystem::from_path(resource_name_to_path(root_path.as_ref(), rel_path.as_ref()))
}
pub fn from_path<P: AsRef<Path>>(root_path: P) -> FileSystem {
FileSystem {
root_path: root_path.as_ref().into(),
can_write: false,
watch: None,
}
}
pub fn with_write(mut self) -> Self {
self.can_write = true;
self
}
#[cfg(feature = "backend_filesystem_watch")]
pub fn with_watch(mut self) -> Self {
self.watch = watch_impl::Watcher::new(&self.root_path).map(|v| Mutex::new(v));
self
}
}
impl Backend for FileSystem {
fn can_write(&self) -> bool {
self.can_write
}
fn exists(&self, path: &ResourcePath) -> bool {
resource_name_to_path(&self.root_path, path).exists()
}
fn notify_changes_synced(&mut self, point: BackendSyncPoint) {
if let Some(ref mut watch) = self.watch {
watch.lock().unwrap().notify_changes_synced(point);
}
}
fn new_changes(&mut self, queue: &mut VecDeque<Modification>) -> Option<BackendSyncPoint> {
if let Some(ref mut watch) = self.watch {
watch.lock().unwrap().new_changes(queue)
} else {
None
}
}
fn read_into(&mut self, path: &ResourcePath, mut output: &mut io::Write) -> Result<(), Error> {
let path = resource_name_to_path(&self.root_path, path);
let mut reader = io::BufReader::new(fs::File::open(path)?);
io::copy(&mut reader, &mut output)?;
Ok(())
}
fn write_from(&mut self, _path: &ResourcePath, _buffer: &mut io::Read) -> Result<(), Error> {
unimplemented!()
}
}
fn resource_name_to_path(root_dir: &Path, location: &ResourcePath) -> PathBuf {
let mut path: PathBuf = root_dir.into();
for part in location.items() {
path = path.join(part);
}
path
}

View file

@ -0,0 +1,106 @@
use crate::backend::{Backend, BackendSyncPoint};
use std::collections::HashMap;
use std::hash::BuildHasherDefault;
use std::io;
use std::sync::{Arc, RwLock};
use twox_hash::XxHash;
use crate::{Error, ResourcePath, ResourcePathBuf};
#[derive(Debug)]
struct Shared {
map: HashMap<ResourcePathBuf, Vec<u8>, BuildHasherDefault<XxHash>>,
unsynced_change_time: Option<BackendSyncPoint>,
}
impl Shared {
pub fn new() -> Shared {
Shared {
map: HashMap::default(),
unsynced_change_time: None,
}
}
pub fn insert(&mut self, key: &ResourcePath, value: &[u8]) {
self.map.insert(key.as_ref().into(), value.into());
}
}
#[derive(Debug)]
pub struct InMemory {
shared: Arc<RwLock<Shared>>,
}
impl InMemory {
pub fn new() -> InMemory {
InMemory {
shared: Arc::new(RwLock::new(Shared::new())),
}
}
pub fn with<P: AsRef<ResourcePath>>(self, key: P, value: &[u8]) -> Self {
self.shared
.write()
.expect("failed to lock InMemory for write")
.insert(key.as_ref(), value);
self
}
}
impl Backend for InMemory {
fn can_write(&self) -> bool {
true
}
fn exists(&self, path: &ResourcePath) -> bool {
self.shared
.read()
.expect("failed to lock InMemory for read")
.map
.contains_key::<ResourcePath>(path.as_clean_str().as_ref())
}
fn notify_changes_synced(&mut self, point: BackendSyncPoint) {
let mut shared_ref = self
.shared
.write()
.expect("failed to lock InMemory for write");
if shared_ref.unsynced_change_time == Some(point) {
shared_ref.unsynced_change_time = None;
}
}
fn new_changes(&mut self) -> Option<BackendSyncPoint> {
self.shared
.read()
.expect("failed to lock InMemory for read")
.unsynced_change_time
}
fn read_into(&mut self, path: &ResourcePath, output: &mut io::Write) -> Result<(), Error> {
let shared = self
.shared
.read()
.expect("failed to lock InMemory for read");
let item_ref = match shared.map.get(path) {
None => return Err(Error::NotFound),
Some(val) => val,
};
output.write_all(&item_ref)?;
Ok(())
}
fn write_from(&mut self, path: &ResourcePath, buffer: &mut io::Read) -> Result<(), Error> {
let mut data = Vec::new();
buffer.read_to_end(&mut data)?;
let mut shared = self
.shared
.write()
.expect("failed to lock InMemory for write");
shared.map.insert(path.into(), data);
shared.unsynced_change_time = Some(BackendSyncPoint::now());
Ok(())
}
}

View file

@ -0,0 +1,90 @@
extern crate miniz_oxide as miniz;
use crate::backend::{Backend, BackendSyncPoint};
use failure;
use std::io;
use crate::{Error, ResourcePath, ResourcePathBuf};
#[derive(Debug)]
pub struct Miniz<T>
where
T: Backend,
{
inner: T,
level: u8,
}
impl<T> Miniz<T>
where
T: Backend,
{
pub fn new(inner: T, level: u8) -> Miniz<T> {
Miniz { inner, level }
}
}
impl<T> Backend for Miniz<T>
where
T: Backend,
{
fn can_write(&self) -> bool {
self.inner.can_write()
}
fn exists(&self, path: &ResourcePath) -> bool {
self.inner.exists(path)
}
fn notify_changes_synced(&mut self, point: BackendSyncPoint) {
self.inner.notify_changes_synced(point);
}
fn new_changes(&mut self) -> Option<BackendSyncPoint> {
self.inner.new_changes()
}
fn read_into(&mut self, path: &ResourcePath, output: &mut io::Write) -> Result<(), Error> {
let mut input_data = Vec::new();
self.inner.read_into(path, &mut input_data)?;
let output_data =
self::miniz::inflate::decompress_to_vec_zlib(&mut input_data).map_err(write_error)?;
output.write_all(&output_data[..])?;
Ok(())
}
fn write_from(&mut self, path: &ResourcePath, buffer: &mut io::Read) -> Result<(), Error> {
let mut input_data = Vec::new();
buffer.read_to_end(&mut input_data)?;
let output_data = self::miniz::deflate::compress_to_vec_zlib(&mut input_data, self.level);
let mut cursor = io::Cursor::new(output_data);
Ok(self.inner.write_from(path, &mut cursor)?)
}
}
#[derive(Fail, Debug)]
pub enum MinizError {
#[fail(display = "Miniz error {:?}", _0)]
ErrorCode(self::miniz::inflate::TINFLStatus),
}
fn write_error(miniz_error: self::miniz::inflate::TINFLStatus) -> Error {
Error::BackendFailedToWrite {
path: ResourcePathBuf::from(String::from("")),
inner: failure::Error::from(MinizError::ErrorCode(miniz_error)),
}
}
#[cfg(test)]
mod test {
use crate::backend::{Backend, InMemory, Miniz};
#[test]
fn test_can_write_and_read() {
let mut be = Miniz::new(InMemory::new(), 9);
be.write("x".into(), b"hello world").unwrap();
let result = be.read_vec("x".into()).unwrap();
assert_eq!(b"hello world", &result[..]);
}
}

View file

@ -0,0 +1,62 @@
use crate::path::{ResourcePath, ResourcePathBuf};
use std::collections::VecDeque;
use std::io;
use std::time::Instant;
use crate::Error;
#[cfg(any(test, feature = "backend_in_memory"))]
mod in_memory;
#[cfg(any(test, feature = "backend_in_memory"))]
pub use self::in_memory::InMemory;
#[cfg(any(test, feature = "backend_miniz"))]
mod miniz;
#[cfg(any(test, feature = "backend_miniz"))]
pub use self::miniz::Miniz;
#[cfg(any(test, feature = "backend_filesystem"))]
mod filesystem;
#[cfg(any(test, feature = "backend_filesystem"))]
pub use self::filesystem::FileSystem;
#[derive(Eq, PartialEq, Copy, Clone, Debug)]
pub struct BackendSyncPoint {
pub (crate) instant: Instant,
}
#[derive(Eq, PartialEq,Clone, Debug)]
pub enum Modification {
Create(ResourcePathBuf),
Write(ResourcePathBuf),
Remove(ResourcePathBuf),
Rename { from: ResourcePathBuf, to: ResourcePathBuf },
}
impl BackendSyncPoint {
pub fn now() -> BackendSyncPoint {
BackendSyncPoint {
instant: Instant::now(),
}
}
}
pub trait Backend: Send + Sync {
fn can_write(&self) -> bool;
fn exists(&self, path: &ResourcePath) -> bool;
fn notify_changes_synced(&mut self, point: BackendSyncPoint);
fn new_changes(&mut self, queue: &mut VecDeque<Modification>) -> Option<BackendSyncPoint>;
fn read_into(&mut self, path: &ResourcePath, output: &mut io::Write) -> Result<(), Error>;
fn read_vec(&mut self, path: &ResourcePath) -> Result<Vec<u8>, Error> {
let mut output = Vec::new();
self.read_into(path, &mut output)?;
Ok(output)
}
fn write_from(&mut self, path: &ResourcePath, buffer: &mut io::Read) -> Result<(), Error>;
fn write(&mut self, path: &ResourcePath, mut value: &[u8]) -> Result<(), Error> {
self.write_from(path, &mut value)?;
Ok(())
}
}

View file

@ -0,0 +1,36 @@
use failure;
use std::io;
use crate::ResourcePathBuf;
#[derive(Debug, Fail)]
pub enum Error {
#[fail(display = "I/O error")]
Io(#[cause] io::Error),
#[fail(display = "Item not found")]
NotFound,
#[fail(display = "Backend can not write")]
NotWritable,
#[fail(display = "Failed to write {}, {}", path, inner)]
BackendFailedToWrite {
path: ResourcePathBuf,
inner: failure::Error,
},
}
impl From<io::Error> for Error {
fn from(other: io::Error) -> Self {
Error::Io(other)
}
}
impl ::std::cmp::PartialEq for Error {
fn eq(&self, other: &Error) -> bool {
match (self, other) {
(Error::Io(_), Error::Io(_)) => true,
(Error::NotFound, Error::NotFound) => true,
(Error::NotWritable, Error::NotWritable) => true,
(a, b) if a == b => true,
_ => false,
}
}
}

372
lib/resources/src/lib.rs Normal file
View file

@ -0,0 +1,372 @@
#[macro_use]
extern crate failure;
extern crate slab;
extern crate twox_hash;
#[macro_use]
extern crate log;
#[cfg(feature = "backend_filesystem_watch")]
extern crate notify;
mod path;
pub use self::path::{ResourcePath, ResourcePathBuf};
mod shared;
use self::shared::{InternalSyncPoint, SharedResources, UserKey};
pub mod backend;
mod error;
pub use self::error::Error;
use std::sync::Arc;
use std::sync::RwLock;
use std::time::Instant;
pub struct SyncPoint(InternalSyncPoint);
#[derive(Clone)]
pub struct Resources {
shared: Arc<RwLock<SharedResources>>,
}
impl Resources {
pub fn new() -> Resources {
Resources {
shared: Arc::new(RwLock::new(SharedResources::new())),
}
}
pub fn loaded_from<L: backend::Backend + 'static>(
self,
loader_id: &str,
order: isize,
backend: L,
) -> Resources {
self.insert_loader(loader_id, order, backend);
self
}
pub fn insert_loader<L: backend::Backend + 'static>(
&self,
loader_id: &str,
order: isize,
backend: L,
) {
let mut resources = self.shared.write().expect("failed to lock for write");
resources.insert_loader(loader_id, order, backend);
}
pub fn remove_loader(&self, loader_id: &str) {
let mut resources = self.shared.write().expect("failed to lock for write");
resources.remove_loader(loader_id);
}
pub fn resource<P: AsRef<ResourcePath>>(&self, path: P) -> Resource {
Resource {
shared: self.shared.clone(),
key: self
.shared
.write()
.expect("failed to lock for write")
.new_resource_user(path),
}
}
pub fn new_changes(&self) -> Option<SyncPoint> {
self.shared
.write()
.expect("failed to lock for write")
.new_changes()
.map(|p| SyncPoint(p))
}
pub fn notify_changes_synced(&self, sync_point: SyncPoint) {
self.shared
.write()
.expect("failed to lock for write")
.notify_changes_synced(sync_point.0)
}
}
pub struct Resource {
shared: Arc<RwLock<SharedResources>>,
key: UserKey,
}
impl Resource {
pub fn name(&self) -> String {
let shared_ref = &self.shared;
let resources = shared_ref.read().expect("failed to lock for read");
resources
.get_resource_path(self.key)
.map(|p| p.to_string())
.expect("expected resource to have access to the name")
}
/// Check if this resource exists.
///
/// This unreliable command can tell if at least one backend can return the resource at this moment.
/// Not that the next moment the resource can be gone.
pub fn exists(&self) -> bool {
let shared_ref = &self.shared;
let resources = shared_ref.read().expect("failed to lock for read");
resources
.get_resource_path_backend_containing_resource(self.key)
.map(|(path, _, b)| b.exists(path))
.unwrap_or(false)
}
/// Read value from the backend that has highest order number and contains the resource.
pub fn get(&self) -> Result<Vec<u8>, Error> {
let shared_ref = &self.shared;
let mut resources = shared_ref.write().expect("failed to lock for write");
let mut did_read = None;
{
for (path, modification_time, backend) in resources.resource_backends(self.key) {
match backend.read_vec(path) {
Ok(result) => {
did_read = Some((modification_time, result));
break;
}
Err(Error::NotFound) => continue,
Err(e) => return Err(e),
}
}
}
if let Some((modification_time, result)) = did_read {
resources.notify_did_read(self.key, modification_time);
return Ok(result);
}
Err(Error::NotFound)
}
/// Write value to the backend that has highest order number and can write.
pub fn write(&self, data: &[u8]) -> Result<(), Error> {
let shared_ref = &self.shared;
let mut resources = shared_ref.write().expect("failed to lock for write");
let mut did_write = false;
{
for (path, _, backend) in resources.resource_backends(self.key) {
match backend.write(path, data) {
Ok(()) => {
did_write = true;
break;
}
Err(Error::NotWritable) => continue,
Err(e) => return Err(e),
}
}
}
if did_write {
resources.notify_did_write(self.key, Instant::now());
return Ok(());
}
Err(Error::NotWritable)
}
pub fn is_modified(&self) -> bool {
let resources = self.shared.read().expect("failed to lock for read");
resources
.get_path_user_metadata(self.key)
.map(|m| m.outdated_at.is_some())
.unwrap_or(false)
}
}
impl Clone for Resource {
fn clone(&self) -> Self {
let new_key = {
let mut resources = self.shared.write().expect("failed to lock for write");
resources.append_resource_user(self.key.resource_id)
};
Resource {
shared: self.shared.clone(),
key: new_key,
}
}
}
impl Drop for Resource {
fn drop(&mut self) {
let mut resources = self.shared.write().expect("failed to lock for write");
resources.remove_resource_user(self.key);
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn with_no_loaders_should_have_no_reader() {
let res = Resources::new();
assert!(!res.resource("a").exists());
}
#[test]
fn should_read_value() {
let res =
Resources::new().loaded_from("a", 0, backend::InMemory::new().with("name", b"hello"));
assert_eq!(&res.resource("name").get().unwrap(), b"hello");
}
#[test]
fn there_should_be_no_changes_and_resources_should_not_be_modified_at_start() {
let res =
Resources::new().loaded_from("a", 0, backend::InMemory::new().with("name", b"hello"));
assert!(res.new_changes().is_none());
let resource_proxy_a = res.resource("name");
let resource_proxy_b = res.resource("name");
let resource_proxy_clone_a = resource_proxy_a.clone();
let resource_proxy_clone_b = resource_proxy_b.clone();
assert!(res.new_changes().is_none());
assert!(!resource_proxy_a.is_modified());
assert!(!resource_proxy_b.is_modified());
assert!(!resource_proxy_clone_a.is_modified());
assert!(!resource_proxy_clone_b.is_modified());
}
#[test]
fn writing_resource_should_produce_change_sync_point_and_other_resource_proxies_should_see_it_as_modified() {
let res =
Resources::new().loaded_from("a", 0, backend::InMemory::new().with("name", b"hello"));
let resource_proxy_a = res.resource("name");
let resource_proxy_b = res.resource("name");
let resource_proxy_clone_a = resource_proxy_a.clone();
let resource_proxy_clone_b = resource_proxy_b.clone();
assert!(resource_proxy_b.write(b"world").is_ok());
assert!(res.new_changes().is_some());
assert!(resource_proxy_a.is_modified());
assert!(
!resource_proxy_b.is_modified(),
"the most recent written item is assumed to be up to date"
);
assert!(resource_proxy_clone_a.is_modified());
assert!(resource_proxy_clone_b.is_modified());
}
#[test]
fn notifying_changes_synced_should_clear_syn_point() {
let res =
Resources::new().loaded_from("a", 0, backend::InMemory::new().with("name", b"hello"));
let resource_proxy_a = res.resource("name");
let resource_proxy_b = res.resource("name");
resource_proxy_b.write(b"world").unwrap();
assert!(res.new_changes().is_some());
let point = res.new_changes().unwrap();
res.notify_changes_synced(point);
assert!(
resource_proxy_a.is_modified(),
"resources remain marked as modified until read"
);
assert!(
!resource_proxy_b.is_modified(),
"last written resource looses modified state"
);
assert!(res.new_changes().is_none());
}
#[test]
fn notifying_changes_synced_should_not_clear_syn_point_if_there_were_new_writes() {
let res =
Resources::new().loaded_from("a", 0, backend::InMemory::new().with("name", b"hello"));
let resource_proxy_a = res.resource("name");
let resource_proxy_b = res.resource("name");
resource_proxy_b.write(b"world").unwrap();
assert!(res.new_changes().is_some());
let point = res.new_changes().unwrap();
resource_proxy_a.write(b"world2").unwrap();
res.notify_changes_synced(point);
assert!(
resource_proxy_b.is_modified(),
"resources remain marked as modified until read"
);
assert!(
!resource_proxy_a.is_modified(),
"last written resource looses modified state"
);
assert!(res.new_changes().is_some());
}
#[test]
fn removing_the_loader_should_invalidate_resource() {
let res =
Resources::new().loaded_from("a", 0, backend::InMemory::new().with("name", b"hello"));
let resource_proxy_a = res.resource("name");
res.remove_loader("a");
assert!(res.new_changes().is_some());
let point = res.new_changes().unwrap();
assert!(
resource_proxy_a.is_modified(),
"removed loader should trigger modified flag on resource"
);
res.notify_changes_synced(point);
assert!(res.new_changes().is_none());
}
#[test]
fn adding_the_loader_should_override_resource_and_invalidate_it() {
let res =
Resources::new().loaded_from("a", 0, backend::InMemory::new().with("name", b"hello"));
let resource_proxy_a = res.resource("name");
res.insert_loader("b", 1, backend::InMemory::new().with("name", b"world"));
assert!(res.new_changes().is_some());
let point = res.new_changes().unwrap();
assert!(
resource_proxy_a.is_modified(),
"adding loader should trigger modified flag on resource"
);
assert_eq!(&resource_proxy_a.get().unwrap(), b"world");
assert!(
!resource_proxy_a.is_modified(),
"reading resouce should mark it read"
);
res.notify_changes_synced(point);
assert!(res.new_changes().is_none());
}
}

849
lib/resources/src/path.rs Normal file
View file

@ -0,0 +1,849 @@
/*!
Resource path implementation.
Universal resource path help to query resources the same way across different platforms and backends.
*/
#[derive(Clone, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct ResourcePathBuf {
inner: String,
}
#[derive(Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct ResourcePath {
inner: str,
}
impl ResourcePath {
fn from_inner(inner: &str) -> &ResourcePath {
unsafe { &*(inner as *const str as *const ResourcePath) }
}
}
impl ::std::ops::Deref for ResourcePathBuf {
type Target = ResourcePath;
fn deref(&self) -> &ResourcePath {
&ResourcePath::from_inner(&self.inner[..])
}
}
impl AsRef<ResourcePath> for str {
fn as_ref(&self) -> &ResourcePath {
&ResourcePath::from_inner(self)
}
}
impl AsRef<ResourcePath> for String {
fn as_ref(&self) -> &ResourcePath {
&ResourcePath::from_inner(&self)
}
}
impl AsRef<ResourcePath> for ResourcePathBuf {
fn as_ref(&self) -> &ResourcePath {
&ResourcePath::from_inner(&self.inner)
}
}
impl<'a> From<&'a ResourcePath> for ResourcePathBuf {
fn from(other: &ResourcePath) -> Self {
ResourcePathBuf {
inner: other.inner.into(),
}
}
}
impl<'a> From<&'a str> for &'a ResourcePath {
fn from(other: &'a str) -> Self {
&ResourcePath::from_inner(other)
}
}
impl From<String> for ResourcePathBuf {
fn from(other: String) -> Self {
ResourcePathBuf { inner: other }
}
}
impl ::std::borrow::Borrow<ResourcePath> for ResourcePathBuf {
fn borrow(&self) -> &ResourcePath {
&ResourcePath::from_inner(&self.inner)
}
}
impl AsRef<ResourcePath> for ResourcePath {
fn as_ref(&self) -> &ResourcePath {
self
}
}
// ---- IMPL ----
impl ResourcePath {
pub fn parent(&self) -> Option<&ResourcePath> {
match self.inner.rfind('/') {
Some(index) => Some(ResourcePath::from_inner(&self.inner[..index])),
None => if &self.inner == "" {
None
} else {
Some(ResourcePath::from_inner(""))
},
}
}
pub fn to_string(&self) -> String {
self.inner.into()
}
pub fn items(&self) -> impl Iterator<Item = &str> {
self.inner.split('/')
}
/// Returns path as str and ensures that the returned str does not have a leading or trailing slash
pub fn as_clean_str(&self) -> &str {
let mut result = &self.inner;
if result.starts_with('/') {
result = &result[1..];
}
if result.ends_with('/') {
result = &result[..1];
}
result
}
pub fn join<P: AsRef<ResourcePath>>(&self, other: P) -> ResourcePathBuf {
let left = self.as_clean_str();
let right = other.as_ref().as_clean_str();
if left.is_empty() {
return ResourcePathBuf::from(right.as_ref());
}
if right.is_empty() {
return ResourcePathBuf::from(left.as_ref());
}
ResourcePathBuf {
inner: [left, "/", right].concat(),
}
}
pub fn to_filesystem_path(&self, root_dir: &::std::path::Path) -> ::std::path::PathBuf {
let mut path: ::std::path::PathBuf = root_dir.into();
for part in self.items() {
path = path.join(sanitize_path_component(part).as_ref());
}
path
}
}
impl ResourcePathBuf {
pub fn from_filesystem_path(root_dir: &::std::path::Path, path: &::std::path::Path) -> Option<Self> {
let relative_dir = path.strip_prefix(root_dir).ok()?;
let mut path = ResourcePathBuf { inner: String::with_capacity(relative_dir.as_os_str().len() + 32) };
for part in relative_dir.components() {
if let Some(unsanitized_part) = unsanitize_path_component(part.as_os_str()) {
path = path.join(unsanitized_part.as_ref());
} else {
return None;
}
}
Some(path)
}
}
// ---- Formatting ----
use std::fmt;
impl fmt::Debug for ResourcePath {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_tuple("ResourcePath").field(&&self.inner).finish()
}
}
impl fmt::Display for ResourcePath {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt::Display::fmt(&self.inner, f)
}
}
impl fmt::Debug for ResourcePathBuf {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_tuple("ResourcePathBuf").field(&self.inner).finish()
}
}
impl fmt::Display for ResourcePathBuf {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt::Display::fmt(&self.inner, f)
}
}
// ---- Other utils ---
use std::borrow::Cow;
struct FixSolution<'s> {
problematic_sequence_len: usize,
fix: FixOutput<'s>,
}
enum FixOutput<'s> {
/// Insert a slice to the byt output
Single(&'s [u8]),
/// Insert 3 slices to byte output
Triple(&'s [u8], &'s [u8], &'s [u8]),
}
/// Check if the subsequent string requires a fix
///
/// The fixes here should be reversible. It should be possible to reconstruct the
/// resource name from the sanitized output.
fn check_for_sanitize_fix(previous_len: usize, remainder: &[u8]) -> Option<FixSolution> {
let next_char = remainder[0];
if previous_len == 0 && remainder.len() >= 3 {
match next_char {
b'C' | b'c' => {
let c1 = remainder[1].to_ascii_lowercase();
let c2 = remainder[2].to_ascii_lowercase();
let c3 = remainder.iter().skip(3).next().cloned();
let c4 = remainder.iter().skip(4).next().cloned();
match (c1, c2, c3, c4) {
(b'o', b'n', None, None) => {
return Some(FixSolution {
problematic_sequence_len: 3,
fix: FixOutput::Triple(b"+r", remainder, b"+"),
})
}
(b'o', b'n', Some(b'.'), _) => {
return Some(FixSolution {
problematic_sequence_len: 3,
fix: FixOutput::Triple(b"+r", &remainder[..3], b"+"),
})
}
(b'o', b'm', Some(b'1'...b'9'), None) => {
return Some(FixSolution {
problematic_sequence_len: 4,
fix: FixOutput::Triple(b"+r", remainder, b"+"),
})
}
(b'o', b'm', Some(b'1'...b'9'), Some(b'.')) => {
return Some(FixSolution {
problematic_sequence_len: 4,
fix: FixOutput::Triple(b"+r", &remainder[..4], b"+"),
})
}
_ => (),
}
}
b'P' | b'p' => {
let c1 = remainder[1].to_ascii_lowercase();
let c2 = remainder[2].to_ascii_lowercase();
let c3 = remainder.iter().skip(3).next().cloned();
match (c1, c2, c3) {
(b'r', b'n', None) => {
return Some(FixSolution {
problematic_sequence_len: 3,
fix: FixOutput::Triple(b"+r", remainder, b"+"),
})
}
(b'r', b'n', Some(b'.')) => {
return Some(FixSolution {
problematic_sequence_len: 3,
fix: FixOutput::Triple(b"+r", &remainder[..3], b"+"),
})
}
_ => (),
}
}
b'A' | b'a' => {
let c1 = remainder[1].to_ascii_lowercase();
let c2 = remainder[2].to_ascii_lowercase();
let c3 = remainder.iter().skip(3).next().cloned();
match (c1, c2, c3) {
(b'u', b'x', None) => {
return Some(FixSolution {
problematic_sequence_len: 3,
fix: FixOutput::Triple(b"+r", remainder, b"+"),
})
}
(b'u', b'x', Some(b'.')) => {
return Some(FixSolution {
problematic_sequence_len: 3,
fix: FixOutput::Triple(b"+r", &remainder[..3], b"+"),
})
}
_ => (),
}
}
b'N' | b'n' => {
let c1 = remainder[1].to_ascii_lowercase();
let c2 = remainder[2].to_ascii_lowercase();
let c3 = remainder.iter().skip(3).next().cloned();
match (c1, c2, c3) {
(b'u', b'l', None) => {
return Some(FixSolution {
problematic_sequence_len: 3,
fix: FixOutput::Triple(b"+r", remainder, b"+"),
})
}
(b'u', b'l', Some(b'.')) => {
return Some(FixSolution {
problematic_sequence_len: 3,
fix: FixOutput::Triple(b"+r", &remainder[..3], b"+"),
})
}
_ => (),
}
}
b'L' | b'l' if remainder.len() >= 4 => {
let c1 = remainder[1].to_ascii_lowercase();
let c2 = remainder[2].to_ascii_lowercase();
let c3 = remainder[3];
let c4 = remainder.iter().skip(4).next().cloned();
match (c1, c2, c3, c4) {
(b'p', b't', b'1'...b'9', None) => {
return Some(FixSolution {
problematic_sequence_len: 4,
fix: FixOutput::Triple(b"+r", remainder, b"+"),
})
}
(b'p', b't', b'1'...b'9', Some(b'.')) => {
return Some(FixSolution {
problematic_sequence_len: 4,
fix: FixOutput::Triple(b"+r", &remainder[..4], b"+"),
})
}
_ => (),
}
}
_ => (),
}
}
match next_char {
b'\\' => Some(FixSolution {
problematic_sequence_len: 1,
fix: FixOutput::Single(b"+b+"),
}),
b'+' => Some(FixSolution {
problematic_sequence_len: 1,
fix: FixOutput::Single(b"++"),
}),
b'<' => Some(FixSolution {
problematic_sequence_len: 1,
fix: FixOutput::Single(b"+lt+"),
}),
b'>' => Some(FixSolution {
problematic_sequence_len: 1,
fix: FixOutput::Single(b"+gt+"),
}),
b':' => Some(FixSolution {
problematic_sequence_len: 1,
fix: FixOutput::Single(b"+c+"),
}),
b'\"' => Some(FixSolution {
problematic_sequence_len: 1,
fix: FixOutput::Single(b"+q+"),
}),
b'/' => Some(FixSolution {
problematic_sequence_len: 1,
fix: FixOutput::Single(b"+sl+"),
}),
b'|' => Some(FixSolution {
problematic_sequence_len: 1,
fix: FixOutput::Single(b"+p+"),
}),
b'?' => Some(FixSolution {
problematic_sequence_len: 1,
fix: FixOutput::Single(b"+m+"),
}),
b'*' => Some(FixSolution {
problematic_sequence_len: 1,
fix: FixOutput::Single(b"+a+"),
}),
i @ 1..=31 => Some(FixSolution {
problematic_sequence_len: 1,
fix: match i {
1 => FixOutput::Single(b"+i1+"),
2 => FixOutput::Single(b"+i2+"),
3 => FixOutput::Single(b"+i3+"),
4 => FixOutput::Single(b"+i4+"),
5 => FixOutput::Single(b"+i5+"),
6 => FixOutput::Single(b"+i6+"),
7 => FixOutput::Single(b"+i7+"),
8 => FixOutput::Single(b"+i8+"),
9 => FixOutput::Single(b"+i9+"),
10 => FixOutput::Single(b"+i10+"),
11 => FixOutput::Single(b"+i11+"),
12 => FixOutput::Single(b"+i12+"),
13 => FixOutput::Single(b"+i13+"),
14 => FixOutput::Single(b"+i14+"),
15 => FixOutput::Single(b"+i15+"),
16 => FixOutput::Single(b"+i16+"),
17 => FixOutput::Single(b"+i17+"),
18 => FixOutput::Single(b"+i18+"),
19 => FixOutput::Single(b"+i19+"),
20 => FixOutput::Single(b"+i20+"),
21 => FixOutput::Single(b"+i21+"),
22 => FixOutput::Single(b"+i22+"),
23 => FixOutput::Single(b"+i23+"),
24 => FixOutput::Single(b"+i24+"),
25 => FixOutput::Single(b"+i25+"),
26 => FixOutput::Single(b"+i26+"),
27 => FixOutput::Single(b"+i27+"),
28 => FixOutput::Single(b"+i28+"),
29 => FixOutput::Single(b"+i29+"),
30 => FixOutput::Single(b"+i30+"),
31 => FixOutput::Single(b"+i31+"),
_ => unreachable!("should be in range 1 - 31"),
},
}),
b'.' if remainder.len() == 1 => Some(FixSolution {
problematic_sequence_len: 1,
fix: FixOutput::Single(b"+d+"),
}),
b' ' if remainder.len() == 1 => Some(FixSolution {
problematic_sequence_len: 1,
fix: FixOutput::Single(b"+s+"),
}),
_ => None,
}
}
enum SanitizeState {
/// Nothing was encountered that would need fixing
Good { position: usize },
/// Something was fixed, and the buffer for fixes was allocated
Fixed { buffer: Vec<u8> },
}
/// Apply the fix based on previous sanitization state and fix output that was returned from requires_sanitize_fix
fn apply_sanitize_fix(
problematic_sequence_len: usize,
replacement: FixOutput,
remainder: &mut &[u8],
state: SanitizeState,
all_bytes: &[u8],
) -> SanitizeState {
match state {
SanitizeState::Fixed { mut buffer } => {
match replacement {
FixOutput::Single(replacement) => buffer.extend_from_slice(replacement),
FixOutput::Triple(a, b, c) => {
buffer.extend_from_slice(a);
buffer.extend_from_slice(b);
buffer.extend_from_slice(c);
}
}
*remainder = &remainder[problematic_sequence_len..];
SanitizeState::Fixed { buffer }
}
SanitizeState::Good { position } => {
let mut buffer = Vec::with_capacity(1024);
buffer.extend_from_slice(&all_bytes[..position]);
match replacement {
FixOutput::Single(replacement) => buffer.extend_from_slice(replacement),
FixOutput::Triple(a, b, c) => {
buffer.extend_from_slice(a);
buffer.extend_from_slice(b);
buffer.extend_from_slice(c);
}
}
*remainder = &remainder[problematic_sequence_len..];
SanitizeState::Fixed { buffer }
}
}
}
/// Create a version of path string that is safe to use as filesystem path component,
/// provided that it is not empty.
pub fn sanitize_path_component(component: &str) -> Cow<str> {
let bytes = component.as_bytes();
let mut remainder = bytes;
let mut state = SanitizeState::Good { position: 0 };
'main: loop {
state = match state {
SanitizeState::Good { .. } => {
let mut index = 0;
loop {
if remainder.len() == 0 {
return Cow::from(component);
}
if let Some(s) = check_for_sanitize_fix(index, remainder) {
state = apply_sanitize_fix(
s.problematic_sequence_len,
s.fix,
&mut remainder,
SanitizeState::Good { position: index },
bytes,
);
continue 'main;
}
index += 1;
remainder = &remainder[1..];
}
}
SanitizeState::Fixed { mut buffer } => {
if remainder.len() == 0 {
return Cow::from(
String::from_utf8(buffer).expect("expected valid utf8 sequence"),
);
}
if let Some(s) = check_for_sanitize_fix(buffer.len(), remainder) {
apply_sanitize_fix(
s.problematic_sequence_len,
s.fix,
&mut remainder,
SanitizeState::Fixed { buffer },
bytes,
)
} else {
buffer.extend_from_slice(&remainder[..1]);
remainder = &remainder[1..];
SanitizeState::Fixed { buffer }
}
}
};
}
}
use std::ffi::OsStr;
pub fn unsanitize_path_component(component: &OsStr) -> Option<Cow<str>> {
#[derive(Copy, Clone)]
enum FixState {
Underscore,
Scan,
}
enum UnsanitizeState {
Fixed { bytes: Vec<u8>, state: FixState, position: usize },
ReuseSameString,
}
let part = component.to_string_lossy();
if part.len() == 0 {
return Some(part);
}
let state = {
let bytes = part.as_ref().as_bytes();
let bytes_len = bytes.len();
let mut position = 0;
loop {
if bytes[position] == b'+' {
let mut ok_data = Vec::with_capacity(bytes_len);
ok_data.extend(bytes.iter().take(position));
break UnsanitizeState::Fixed { bytes: ok_data, state: FixState::Underscore, position: position + 1 };
}
position += 1;
if position >= bytes_len {
break UnsanitizeState::ReuseSameString;
}
}
};
match state {
UnsanitizeState::ReuseSameString => return Some(part),
UnsanitizeState::Fixed { mut bytes, mut state, mut position } => {
let src_bytes = part.as_ref().as_bytes();
let src_bytes_len = src_bytes.len();
loop {
match state {
FixState::Underscore => {
let remaining_len = src_bytes_len - position;
if remaining_len == 0 {
return None;
}
let next_char = src_bytes[position];
if remaining_len > 0 && next_char == b'+' {
bytes.push(b'+');
position += 1;
state = FixState::Scan;
} else if remaining_len > 4 && next_char == b'r' && src_bytes[position + 4] == b'+' {
bytes.extend_from_slice(&src_bytes[position + 1..position + 4]);
position += 5;
state = FixState::Scan;
} else if remaining_len > 5 && next_char == b'r' && src_bytes[position + 5] == b'+' {
bytes.extend_from_slice(&src_bytes[position + 1..position + 5]);
position += 6;
state = FixState::Scan;
} else if remaining_len > 2 && next_char == b'i' {
let next_char2 = src_bytes[position + 1];
let next_char3 = src_bytes[position + 2];
match (next_char2, next_char3) {
(b'1', b'+') => bytes.push(1),
(b'2', b'+') => bytes.push(2),
(b'3', b'+') => bytes.push(3),
(b'4', b'+') => bytes.push(4),
(b'5', b'+') => bytes.push(5),
(b'6', b'+') => bytes.push(6),
(b'7', b'+') => bytes.push(7),
(b'8', b'+') => bytes.push(8),
(b'9', b'+') => bytes.push(9),
_ => if remaining_len > 3 {
let next_char4 = src_bytes[position + 3];
match (next_char2, next_char3, next_char4) {
(b'1', b'0', b'+') => bytes.push(10),
(b'1', b'1', b'+') => bytes.push(11),
(b'1', b'2', b'+') => bytes.push(12),
(b'1', b'3', b'+') => bytes.push(13),
(b'1', b'4', b'+') => bytes.push(14),
(b'1', b'5', b'+') => bytes.push(15),
(b'1', b'6', b'+') => bytes.push(16),
(b'1', b'7', b'+') => bytes.push(17),
(b'1', b'8', b'+') => bytes.push(18),
(b'1', b'9', b'+') => bytes.push(19),
(b'2', b'0', b'+') => bytes.push(20),
(b'2', b'1', b'+') => bytes.push(21),
(b'2', b'2', b'+') => bytes.push(22),
(b'2', b'3', b'+') => bytes.push(23),
(b'2', b'4', b'+') => bytes.push(24),
(b'2', b'5', b'+') => bytes.push(25),
(b'2', b'6', b'+') => bytes.push(26),
(b'2', b'7', b'+') => bytes.push(27),
(b'2', b'8', b'+') => bytes.push(28),
(b'2', b'9', b'+') => bytes.push(29),
(b'3', b'0', b'+') => bytes.push(30),
(b'3', b'1', b'+') => bytes.push(31),
_ => return None,
}
position += 1;
},
}
position += 3;
state = FixState::Scan;
} else if remaining_len > 1 {
let next_char2 = src_bytes[position + 1];
match (next_char, next_char2) {
(b'd', b'+') => bytes.push(b'.'),
(b'b', b'+') => bytes.push(b'\\'),
(b'c', b'+') => bytes.push(b':'),
(b'q', b'+') => bytes.push(b'\"'),
(b'p', b'+') => bytes.push(b'|'),
(b'm', b'+') => bytes.push(b'?'),
(b'a', b'+') => bytes.push(b'*'),
(b's', b'+') => bytes.push(b' '),
_ => if remaining_len > 2 {
let next_char3 = src_bytes[position + 2];
match (next_char, next_char2, next_char3) {
(b'l', b't', b'+') => bytes.push(b'<'),
(b'g', b't', b'+') => bytes.push(b'>'),
(b's', b'l', b'+') => bytes.push(b'/'),
_ => return None,
}
position += 1;
},
}
position += 2;
state = FixState::Scan;
} else { return None }
},
FixState::Scan => {
if position == src_bytes_len {
break;
}
let next_char = src_bytes[position];
if next_char == b'+' {
state = FixState::Underscore;
} else {
bytes.push(next_char);
}
position += 1;
}
}
}
Some(Cow::from(String::from_utf8(bytes).expect("bytes already undergone lossy conversion to utf8")))
}
}
}
#[cfg(test)]
mod normalize_path_tests {
use super::{sanitize_path_component, unsanitize_path_component};
use std::ffi::OsString;
fn check(sanitized: &str, unsanitized: &str) {
assert_eq!(sanitized, sanitize_path_component(unsanitized).as_ref());
assert_eq!(unsanitized, unsanitize_path_component(&OsString::from(sanitized)).as_ref());
}
#[test]
fn test_common() {
// this is not valid path, but not a concern of this function
check("", "");
// + is the start of the escape sequence, so this escapes the escape sequence
check("++", "+");
check("++++", "++");
// kill path traversing
check("+d+", ".");
check(".+d+", "..");
// simple unsanitized names
check("hello world", "hello world");
check("hello-world", "hello-world");
check("hello_world", "hello_world");
// underscore handling
assert_eq!("quad+.vert", unsanitize_path_component(&OsString::from("quad+.vert")).as_ref());
}
#[test]
fn test_windows() {
check("+b+", "\\");
check("+b++b+", "\\\\");
check("+lt+", "<");
check("+lt++lt+", "<<");
check("+gt+", ">");
check("+gt++gt+", ">>");
check("+c+", ":");
check("+c++c+", "::");
check("+q+", "\"");
check("+q++q+", "\"\"");
check("+sl+", "/");
check("+sl++sl+", "//");
check("+p+", "|");
check("+p++p+", "||");
check("+m+", "?");
check("+m++m+", "??");
check("+a+", "*");
check("+a++a+", "**");
for i in 1u8..=31 {
let mut output = String::new();
output.push_str("+i");
output.push_str(&format!("{}", i));
output.push('+');
let mut input = String::new();
input.push(i as char);
check(&output, &input);
let mut output = String::new();
output.push_str("+i");
output.push_str(&format!("{}", i));
output.push('+');
output.push_str("+i");
output.push_str(&format!("{}", i));
output.push('+');
let mut input = String::new();
input.push(i as char);
input.push(i as char);
check(&output, &input);
}
check("hello+s+", "hello ");
check("hello+d+", "hello.");
check("hello +s+", "hello ");
check("hello.+d+", "hello..");
check(" hello +s+", " hello ");
check(".hello.+d+", ".hello..");
for reserved_name in &[
"CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7",
"COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9",
] {
let seq = format!("{}", &reserved_name);
check(
&format!("+r{}+", seq),
&seq
);
let seq = format!("{}", reserved_name.to_lowercase());
check(
&format!("+r{}+", seq),
&seq
);
let seq = format!("{}", title_case(reserved_name));
check(
&format!("+r{}+", seq),
&seq
);
let seq = format!("{}", &reserved_name);
let input = format!("{}.txt", &seq);
let output = format!("+r{}+.txt", &seq);
check(&output, &input);
let seq = format!("{}", &reserved_name);
let input = format!("{}.", &seq);
let output = format!("+r{}++d+", &seq);
check(&output, &input);
let seq = format!("{}", &reserved_name);
let input = format!("{}.a", &seq);
let output = format!("+r{}+.a", &seq);
check(&output, &input);
let seq = format!("{}", &reserved_name);
let input = format!("hi {} and bye", &seq);
let output = format!("hi {} and bye", &seq);
check(&output, &input);
}
}
fn title_case(value: &str) -> String {
value
.chars()
.enumerate()
.flat_map(|(i, c)| {
if i == 0 {
Box::new(c.to_uppercase()) as Box<Iterator<Item = char>>
} else {
Box::new(c.to_lowercase()) as Box<Iterator<Item = char>>
}
})
.collect()
}
}

View file

@ -0,0 +1,371 @@
use crate::backend::{Backend, BackendSyncPoint, Modification};
use crate::path::{ResourcePath, ResourcePathBuf};
use slab::Slab;
use std::collections::{HashMap, BTreeMap, VecDeque};
use std::hash::BuildHasherDefault;
use std::time::Instant;
use twox_hash::XxHash;
mod resource_metadata;
use self::resource_metadata::{ResourceMetadata, ResourceUserMetadata};
#[derive(Clone, Debug, Eq, PartialEq)]
struct LoaderKey {
id: String,
order: isize,
}
impl Ord for LoaderKey {
fn cmp(&self, other: &LoaderKey) -> ::std::cmp::Ordering {
match self.order.cmp(&other.order) {
::std::cmp::Ordering::Equal => self.id.cmp(&other.id),
ordering => ordering,
}
}
}
impl PartialOrd for LoaderKey {
fn partial_cmp(&self, other: &LoaderKey) -> Option<::std::cmp::Ordering> {
Some(self.cmp(other))
}
}
#[derive(Copy, Clone)]
pub struct UserKey {
pub resource_id: usize,
user_id: usize,
}
#[derive(Eq, PartialEq, Copy, Clone)]
pub enum InternalSyncPoint {
Backend {
backend_hash: u64,
sync_point: BackendSyncPoint,
},
Everything {
time: Instant,
},
}
pub struct SharedResources {
resource_metadata: Slab<ResourceMetadata>,
path_resource_ids: HashMap<ResourcePathBuf, usize, BuildHasherDefault<XxHash>>,
backends: BTreeMap<LoaderKey, Box<Backend>>,
outdated_at: Option<Instant>,
modification_queue: VecDeque<Modification>,
}
fn backend_hash(id: &str) -> u64 {
use std::hash::Hasher;
let mut hasher = XxHash::with_seed(8745287);
hasher.write(id.as_bytes());
hasher.finish()
}
impl SharedResources {
pub fn new() -> SharedResources {
SharedResources {
resource_metadata: Slab::with_capacity(1024), // 1024 files is enough for everyone
path_resource_ids: HashMap::default(),
backends: BTreeMap::new(),
outdated_at: None,
modification_queue: VecDeque::new(),
}
}
pub fn new_changes(&mut self) -> Option<InternalSyncPoint> {
if let Some(instant) = self.outdated_at {
return Some(InternalSyncPoint::Everything { time: instant });
}
let mut new_change_point = None;
let mut mod_queue = ::std::mem::replace(&mut self.modification_queue, VecDeque::new());
for (key, backend) in self.backends.iter_mut() {
mod_queue.clear();
if let Some(sync_point) = backend.new_changes(&mut mod_queue) {
new_change_point = Some(InternalSyncPoint::Backend {
backend_hash: backend_hash(&key.id),
sync_point,
});
break;
}
}
if let Some(InternalSyncPoint::Backend { backend_hash: bh, sync_point }) = new_change_point {
let mut some_resource_is_modified = false;
while let Some(modification) = mod_queue.pop_front() {
match modification {
Modification::Create(p) => {
if let Some(resource_id) = self.path_resource_ids.get(&p) {
if let Some(ref mut meta) = self.resource_metadata.get_mut(*resource_id) {
meta.everyone_should_reload(sync_point.instant);
some_resource_is_modified = true;
}
}
},
Modification::Write(p) => {
if let Some(resource_id) = self.path_resource_ids.get(&p) {
if let Some(ref mut meta) = self.resource_metadata.get_mut(*resource_id) {
meta.everyone_should_reload(sync_point.instant);
some_resource_is_modified = true;
}
}
},
Modification::Remove(p) => {
if let Some(resource_id) = self.path_resource_ids.get(&p) {
if let Some(ref mut meta) = self.resource_metadata.get_mut(*resource_id) {
meta.everyone_should_reload(sync_point.instant);
some_resource_is_modified = true;
}
}
},
Modification::Rename { from, to } => {
if let (Some(resource_id), Some(resource_id_to)) = (self.path_resource_ids.get(&from), self.path_resource_ids.get(&to)) {
if let Some(ref mut meta) = self.resource_metadata.get_mut(*resource_id) {
meta.everyone_should_reload(sync_point.instant);
some_resource_is_modified = true;
}
if let Some(ref mut meta) = self.resource_metadata.get_mut(*resource_id_to) {
meta.everyone_should_reload(sync_point.instant);
some_resource_is_modified = true;
}
}
},
}
}
if let false = some_resource_is_modified {
for (key, backend) in self.backends.iter_mut() {
if backend_hash(&key.id) == bh {
backend.notify_changes_synced(sync_point);
break;
}
}
new_change_point = None;
}
}
::std::mem::replace(&mut self.modification_queue, mod_queue);
new_change_point
}
pub fn notify_changes_synced(&mut self, sync_point: InternalSyncPoint) {
match sync_point {
InternalSyncPoint::Everything { time } => if self.outdated_at == Some(time) {
self.outdated_at = None;
},
InternalSyncPoint::Backend {
backend_hash: bh,
sync_point: sp,
} => {
for (key, backend) in self.backends.iter_mut() {
if backend_hash(&key.id) == bh {
backend.notify_changes_synced(sp);
break;
}
}
}
}
}
pub fn new_resource_user<P: AsRef<ResourcePath>>(&mut self, path: P) -> UserKey {
let clean_path_str: &ResourcePath = path.as_ref().as_clean_str().into();
let maybe_id = self.path_resource_ids.get(clean_path_str).cloned();
match maybe_id {
Some(id) => self.append_resource_user(id),
None => {
let mut metadata = ResourceMetadata::new(clean_path_str);
let user_id = metadata.new_user();
let resource_id = self.resource_metadata.insert(metadata);
self.path_resource_ids
.insert(ResourcePathBuf::from(clean_path_str), resource_id);
UserKey {
resource_id,
user_id,
}
}
}
}
/// Appends user to resource, the resource id must exist.
pub fn append_resource_user(&mut self, resource_id: usize) -> UserKey {
UserKey {
resource_id,
user_id: self
.resource_metadata
.get_mut(resource_id)
.expect("expected resource_id to exist when appending new user")
.new_user(),
}
}
pub fn remove_resource_user(&mut self, key: UserKey) {
let has_users = {
if let Some(metadata) = self.resource_metadata.get_mut(key.resource_id) {
metadata.remove_user(key.user_id);
Some(metadata.has_users())
} else {
None
}
};
if let Some(false) = has_users {
let metadata = self.resource_metadata.remove(key.resource_id);
self.path_resource_ids.remove(&metadata.path);
}
}
pub fn get_path_user_metadata(&self, key: UserKey) -> Option<&ResourceUserMetadata> {
self.resource_metadata
.get(key.resource_id)
.and_then(|path_metadata| path_metadata.get_user_metadata(key.user_id))
}
fn get_path_user_metadata_mut(&mut self, key: UserKey) -> Option<&mut ResourceUserMetadata> {
self.resource_metadata
.get_mut(key.resource_id)
.and_then(|path_metadata| path_metadata.get_user_metadata_mut(key.user_id))
}
pub fn insert_loader<L: Backend + 'static>(
&mut self,
loader_id: &str,
order: isize,
backend: L,
) {
let outdated_at = Instant::now();
for (path, resource_id) in self.path_resource_ids.iter() {
if backend.exists(&path) {
if let Some(metadata) = self.resource_metadata.get_mut(*resource_id) {
metadata.everyone_should_reload(outdated_at);
}
}
}
self.backends.insert(
LoaderKey {
id: loader_id.into(),
order,
},
Box::new(backend) as Box<Backend>,
);
if self.path_resource_ids.len() > 0 {
self.outdated_at = Some(outdated_at);
}
}
pub fn remove_loader(&mut self, loader_id: &str) {
let outdated_at = Instant::now();
let remove_keys: Vec<_> = self
.backends
.keys()
.filter(|k| k.id == loader_id)
.map(|k| k.clone())
.collect();
for removed_key in remove_keys {
if let Some(removed_backend) = self.backends.remove(&removed_key) {
for (path, resource_id) in self.path_resource_ids.iter() {
if removed_backend.exists(&path) {
if let Some(metadata) = self.resource_metadata.get_mut(*resource_id) {
metadata.everyone_should_reload(outdated_at);
}
}
}
}
}
if self.path_resource_ids.len() > 0 {
self.outdated_at = Some(outdated_at);
}
}
pub fn resource_backends(
&mut self,
key: UserKey,
) -> impl Iterator<Item = (&ResourcePath, Option<Instant>, &mut Box<Backend>)> {
let path_with_modification_time =
self.resource_metadata.get(key.resource_id).and_then(|m| {
m.users
.get(key.user_id)
.map(|u| (m.path.as_ref(), u.outdated_at))
});
self.backends.iter_mut().rev().filter_map(move |(_, b)| {
path_with_modification_time.map(move |(path, instant)| (path, instant, b))
})
}
#[allow(dead_code)]
pub fn get_resource_path_backend(
&self,
backend_id: &str,
key: UserKey,
) -> Option<(&ResourcePath, Option<Instant>, &Box<Backend>)> {
let path_with_modification_time =
self.resource_metadata.get(key.resource_id).and_then(|m| {
m.users
.get(key.user_id)
.map(|u| (m.path.as_ref(), u.outdated_at))
});
if let (Some((path, modification_time)), Some((_, backend))) = (
path_with_modification_time,
self.backends
.iter()
.filter(|(k, _)| &k.id == backend_id)
.next(),
) {
return Some((path, modification_time, backend));
}
None
}
pub fn get_resource_path(&self, key: UserKey) -> Option<&ResourcePath> {
self.resource_metadata
.get(key.resource_id)
.map(|m| m.path.as_ref())
}
pub fn get_resource_path_backend_containing_resource(
&self,
key: UserKey,
) -> Option<(&ResourcePath, Option<Instant>, &Box<Backend>)> {
let path_with_modification_time =
self.resource_metadata.get(key.resource_id).and_then(|m| {
m.users
.get(key.user_id)
.map(|u| (m.path.as_ref(), u.outdated_at))
});
if let Some((path, modification_time)) = path_with_modification_time {
for backend in self.backends.values().rev() {
if backend.exists(path) {
return Some((path, modification_time, backend));
}
}
}
None
}
pub fn notify_did_read(&mut self, key: UserKey, modified_time: Option<Instant>) {
if let Some(metadata) = self.get_path_user_metadata_mut(key) {
if metadata.outdated_at == modified_time {
metadata.outdated_at = None;
}
}
}
pub fn notify_did_write(&mut self, key: UserKey, modified_time: Instant) {
if let Some(metadata) = self.resource_metadata.get_mut(key.resource_id) {
metadata.everyone_should_reload_except(key.user_id, modified_time)
}
}
}

View file

@ -0,0 +1,69 @@
use slab::Slab;
use std::time::Instant;
use crate::{ResourcePath, ResourcePathBuf};
/// Information about the latest resource update.
///
/// If it is none, there are no updates, otherwise it contains a timestamp of the latest update.
pub struct ResourceUserMetadata {
pub outdated_at: Option<Instant>,
}
/// Shared information about the resource.
///
/// Each resource can be owned by multiple proxies (called `Resource`). In that case, every proxy
/// gets an identifier from the `users` slab, and can check for resource updates in
/// `ResourceUserMetadata`.
pub struct ResourceMetadata {
pub path: ResourcePathBuf,
pub users: Slab<ResourceUserMetadata>,
}
impl ResourceMetadata {
pub fn new(path: &ResourcePath) -> ResourceMetadata {
ResourceMetadata {
path: ResourcePathBuf::from(path),
users: Slab::with_capacity(2),
}
}
pub fn new_user(&mut self) -> usize {
self.users
.insert(ResourceUserMetadata { outdated_at: None })
}
pub fn remove_user(&mut self, id: usize) {
self.users.remove(id);
if self.users.len() > 8 && self.users.capacity() / self.users.len() > 2 {
self.users.shrink_to_fit()
}
}
pub fn get_user_metadata(&self, id: usize) -> Option<&ResourceUserMetadata> {
self.users.get(id)
}
pub fn get_user_metadata_mut(&mut self, id: usize) -> Option<&mut ResourceUserMetadata> {
self.users.get_mut(id)
}
pub fn has_users(&mut self) -> bool {
self.users.len() > 0
}
pub fn everyone_should_reload_except(&mut self, id: usize, outdated_at: Instant) {
for (user_id, user) in self.users.iter_mut() {
user.outdated_at = if user_id != id {
Some(outdated_at)
} else {
None
};
}
}
pub fn everyone_should_reload(&mut self, outdated_at: Instant) {
for (_, user) in self.users.iter_mut() {
user.outdated_at = Some(outdated_at);
}
}
}