13 Commits

Author SHA1 Message Date
timvisee
5a5fa785b7 Bump version to 0.1.3 2021-11-15 15:18:53 +01:00
timvisee
78e9abec59 Fix binary release on GitLab CI 2021-11-15 15:18:19 +01:00
timvisee
dde9fdeab4 Bump version to 0.1.2 2021-11-15 14:53:05 +01:00
timvisee
901fb62f25 Use future tokio supported sync types throughout server handling logic 2021-11-15 14:41:01 +01:00
timvisee
dabeabeff4 Increase server monitoring timeouts to 20 seconds
Should improve polling reliability for overloaded servers.
2021-11-15 13:59:51 +01:00
timvisee
96d7fc9dec Grab exclusive lock for RCON invocations to server 2021-11-15 13:57:41 +01:00
timvisee
5ffc6ee911 Add RCON cooldown, do not require active PID to stop server
This hopefully improves server stopping reliability.
2021-11-15 13:42:58 +01:00
timvisee
b71d0d1013 Add delay between RCON commands, hopefully improve reliablity
The Minecraft RCON implementation is very broken/brittle. With this we
hope to improve reliablity.
2021-11-15 13:21:12 +01:00
Tim Visée
261acafab0 Merge branch 'ci-arm' into 'master'
Add ARMv7 and aarch64 builds to CI

See merge request timvisee/lazymc!2
2021-11-15 10:07:30 +00:00
timvisee
a04a5f93e9 Add ARMv7 and aarch64 builds to release job 2021-11-15 00:40:52 +01:00
timvisee
10c57f87ea Add GitLab CI builds for ARMv7 and aarch64 2021-11-15 00:20:23 +01:00
timvisee
88fc5892a3 Simplify waiting for server logic even further 2021-11-14 16:37:09 +01:00
timvisee
38d90681c7 Improve waiting for server when holding client
Instead of constantly polling the server state until it is ready, this
now subscribes to server state changes and uses a proper timeout.
2021-11-14 16:21:54 +01:00
9 changed files with 305 additions and 111 deletions

View File

@@ -106,6 +106,76 @@ build-x86_64-linux-musl:
- lazymc-$TARGET
expire_in: 1 month
# Build using Rust stable on Linux for ARMv7
build-armv7-linux-gnu:
stage: build
image: ubuntu
needs: []
variables:
TARGET: armv7-unknown-linux-gnueabihf
cache:
<<: *rust-build-cache
before_script:
- apt-get update
- apt-get install -y --no-install-recommends build-essential
- |
apt-get install -y curl
curl https://sh.rustup.rs -sSf | sh -s -- -y
source $HOME/.cargo/env
- |
rustc --version
cargo --version
script:
- apt-get install -y gcc-arm-linux-gnueabihf
- rustup target add $TARGET
- mkdir -p ~/.cargo
- 'echo "[target.$TARGET]" >> ~/.cargo/config'
- 'echo "linker = \"arm-linux-gnueabihf-gcc\"" >> ~/.cargo/config'
- cargo build --target=$TARGET --release --locked --verbose
- mv target/$TARGET/release/lazymc ./lazymc-$TARGET
artifacts:
name: lazymc-armv7-linux-gnu
paths:
- lazymc-$TARGET
expire_in: 1 month
# Build using Rust stable on Linux for aarch64
build-aarch64-linux-gnu:
stage: build
image: ubuntu
needs: []
variables:
TARGET: aarch64-unknown-linux-gnu
cache:
<<: *rust-build-cache
before_script:
- apt-get update
- apt-get install -y --no-install-recommends build-essential
- |
apt-get install -y curl
curl https://sh.rustup.rs -sSf | sh -s -- -y
source $HOME/.cargo/env
- |
rustc --version
cargo --version
script:
- apt-get install -y gcc-aarch64-linux-gnu
- rustup target add $TARGET
- mkdir -p ~/.cargo
- 'echo "[target.$TARGET]" >> ~/.cargo/config'
- 'echo "linker = \"aarch64-linux-gnu-gcc\"" >> ~/.cargo/config'
- cargo build --target=$TARGET --release --locked --verbose
- mv target/$TARGET/release/lazymc ./lazymc-$TARGET
artifacts:
name: lazymc-aarch64-linux-gnu
paths:
- lazymc-$TARGET
expire_in: 1 month
# Build using Rust stable on macOS
build-macos:
stage: build
@@ -178,6 +248,8 @@ release-gitlab-generic-package:
dependencies:
- build-x86_64-linux-gnu
- build-x86_64-linux-musl
- build-armv7-linux-gnu
- build-aarch64-linux-gnu
- build-macos
- build-x86_64-windows
only:
@@ -185,6 +257,8 @@ release-gitlab-generic-package:
variables:
LINUX_GNU_BIN: "lazymc-x86_64-unknown-linux-gnu"
LINUX_MUSL_BIN: "lazymc-x86_64-unknown-linux-musl"
LINUX_ARMV7_GNU_BIN: "lazymc-armv7-unknown-linux-gnueabihf"
LINUX_AARCH64_GNU_BIN: "lazymc-aarch64-unknown-linux-gnu"
MACOS_BIN: "lazymc-x86_64-apple-darwin"
WINDOWS_BIN: "lazymc-x86_64-pc-windows-msvc.exe"
before_script: []
@@ -198,6 +272,10 @@ release-gitlab-generic-package:
curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file ${LINUX_GNU_BIN} ${PACKAGE_REGISTRY_URL}/${LINUX_GNU_BIN}
- |
curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file ${LINUX_MUSL_BIN} ${PACKAGE_REGISTRY_URL}/${LINUX_MUSL_BIN}
- |
curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file ${LINUX_ARMV7_GNU_BIN} ${PACKAGE_REGISTRY_URL}/${LINUX_ARMV7_GNU_BIN}
- |
curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file ${LINUX_AARCH64_GNU_BIN} ${PACKAGE_REGISTRY_URL}/${LINUX_AARCH64_GNU_BIN}
- |
curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file ${MACOS_BIN} ${PACKAGE_REGISTRY_URL}/${MACOS_BIN}
- |
@@ -212,6 +290,8 @@ release-gitlab-release:
variables:
LINUX_GNU_BIN: "lazymc-x86_64-unknown-linux-gnu"
LINUX_MUSL_BIN: "lazymc-x86_64-unknown-linux-musl"
LINUX_ARMV7_GNU_BIN: "lazymc-armv7-unknown-linux-gnueabihf"
LINUX_AARCH64_GNU_BIN: "lazymc-aarch64-unknown-linux-gnu"
MACOS_BIN: "lazymc-x86_64-apple-darwin"
WINDOWS_BIN: "lazymc-x86_64-pc-windows-msvc.exe"
before_script: []
@@ -225,6 +305,8 @@ release-gitlab-release:
release-cli create --name "lazymc $CI_COMMIT_TAG" --tag-name $CI_COMMIT_TAG \
--assets-link "{\"name\":\"${LINUX_GNU_BIN}\",\"url\":\"${PACKAGE_REGISTRY_URL}/${LINUX_GNU_BIN}\"}" \
--assets-link "{\"name\":\"${LINUX_MUSL_BIN}\",\"url\":\"${PACKAGE_REGISTRY_URL}/${LINUX_MUSL_BIN}\"}" \
--assets-link "{\"name\":\"${LINUX_ARMV7_GNU_BIN}\",\"url\":\"${PACKAGE_REGISTRY_URL}/${LINUX_ARMV7_GNU_BIN}\"}" \
--assets-link "{\"name\":\"${LINUX_AARCH64_GNU_BIN}\",\"url\":\"${PACKAGE_REGISTRY_URL}/${LINUX_AARCH64_GNU_BIN}\"}" \
--assets-link "{\"name\":\"${MACOS_BIN}\",\"url\":\"${PACKAGE_REGISTRY_URL}/${MACOS_BIN}\"}" \
--assets-link "{\"name\":\"${WINDOWS_BIN}\",\"url\":\"${PACKAGE_REGISTRY_URL}/${WINDOWS_BIN}\"}"
@@ -236,6 +318,8 @@ release-github:
dependencies:
- build-x86_64-linux-gnu
- build-x86_64-linux-musl
- build-armv7-linux-gnu
- build-aarch64-linux-gnu
- build-macos
- build-x86_64-windows
before_script: []
@@ -253,5 +337,7 @@ release-github:
- ./github-release release --token "$GITHUB_TOKEN" --owner timvisee --repo lazymc --tag "$CI_COMMIT_REF_NAME" --title "lazymc $CI_COMMIT_REF_NAME"
- ./github-release upload --token "$GITHUB_TOKEN" --owner timvisee --repo lazymc --tag "$CI_COMMIT_REF_NAME" --file ./lazymc-x86_64-unknown-linux-gnu --name lazymc-$CI_COMMIT_REF_NAME-linux-x64
- ./github-release upload --token "$GITHUB_TOKEN" --owner timvisee --repo lazymc --tag "$CI_COMMIT_REF_NAME" --file ./lazymc-x86_64-unknown-linux-musl --name lazymc-$CI_COMMIT_REF_NAME-linux-x64-static
- ./github-release upload --token "$GITHUB_TOKEN" --owner timvisee --repo lazymc --tag "$CI_COMMIT_REF_NAME" --file ./lazymc-armv7-unknown-linux-gnueabihf --name lazymc-$CI_COMMIT_REF_NAME-linux-armv7
- ./github-release upload --token "$GITHUB_TOKEN" --owner timvisee --repo lazymc --tag "$CI_COMMIT_REF_NAME" --file ./lazymc-aarch64-unknown-linux-gnu --name lazymc-$CI_COMMIT_REF_NAME-linux-aarch64
- ./github-release upload --token "$GITHUB_TOKEN" --owner timvisee --repo lazymc --tag "$CI_COMMIT_REF_NAME" --file ./lazymc-x86_64-apple-darwin --name lazymc-$CI_COMMIT_REF_NAME-macos
- ./github-release upload --token "$GITHUB_TOKEN" --owner timvisee --repo lazymc --tag "$CI_COMMIT_REF_NAME" --file ./lazymc-x86_64-pc-windows-msvc.exe --name lazymc-$CI_COMMIT_REF_NAME-windows.exe

View File

@@ -1,5 +1,18 @@
# Changelog
## 0.1.3 (2021-11-15)
- Fix binary release
## 0.1.2 (2021-11-15)
- Add Linux ARMv7 and aarch64 releases
- RCON now works if server is running while server command already quit
- Various RCON tweaks in an attempt to make it more robust and reliable (cooldown, exclusive lock, invocation spacing)
- Increase server monitoring timeout to 20 seconds
- Improve waiting for server logic when holding client
- Various fixes and improvements
## 0.1.1 (2021-11-14)
- Make server sleeping errors more descriptive

2
Cargo.lock generated
View File

@@ -614,7 +614,7 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "lazymc"
version = "0.1.1"
version = "0.1.3"
dependencies = [
"anyhow",
"bytes",

View File

@@ -1,6 +1,6 @@
[package]
name = "lazymc"
version = "0.1.1"
version = "0.1.3"
authors = ["Tim Visee <3a4fb3964f@sinenomine.email>"]
license = "GPL-3.0"
readme = "README.md"
@@ -37,7 +37,7 @@ rand = "0.8"
serde = "1.0"
shlex = "1.1"
thiserror = "1.0"
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "io-util", "net", "macros", "time", "process", "signal"] }
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "io-util", "net", "macros", "time", "process", "signal", "sync"] }
toml = "0.5"
# Feature: rcon

View File

@@ -1,4 +1,14 @@
use std::time::Duration;
use rust_rcon::{Connection, Error as RconError};
use tokio::time;
/// Minecraft RCON quirk.
///
/// Wait this time between RCON operations.
/// The Minecraft RCON implementation is very broken and brittle, this is used in the hopes to
/// improve reliability.
const QUIRK_RCON_GRACE_TIME: Duration = Duration::from_millis(200);
/// An RCON client.
pub struct Rcon {
@@ -19,7 +29,17 @@ impl Rcon {
/// Send command over RCON.
pub async fn cmd(&mut self, cmd: &str) -> Result<String, RconError> {
// Minecraft quirk
time::sleep(QUIRK_RCON_GRACE_TIME).await;
// Actually send RCON command
debug!(target: "lazymc::rcon", "Sending RCON: {}", cmd);
self.con.cmd(cmd).await
}
/// Close connection.
pub async fn close(self) {
// Minecraft quirk
time::sleep(QUIRK_RCON_GRACE_TIME).await;
}
}

View File

@@ -23,10 +23,10 @@ use crate::server::{Server, State};
const MONITOR_POLL_INTERVAL: Duration = Duration::from_secs(2);
/// Status request timeout in seconds.
const STATUS_TIMEOUT: u64 = 8;
const STATUS_TIMEOUT: u64 = 20;
/// Ping request timeout in seconds.
const PING_TIMEOUT: u64 = 10;
const PING_TIMEOUT: u64 = 20;
/// Monitor server.
pub async fn monitor_server(config: Arc<Config>, server: Arc<Server>) {
@@ -36,15 +36,17 @@ pub async fn monitor_server(config: Arc<Config>, server: Arc<Server>) {
let mut poll_interval = time::interval(MONITOR_POLL_INTERVAL);
loop {
poll_interval.tick().await;
// Poll server state and update internal status
trace!(target: "lazymc::monitor", "Fetching status for {} ... ", addr);
let status = poll_server(&config, &server, addr).await;
match status {
// Got status, update
Ok(Some(status)) => server.update_status(&config, Some(status)),
Ok(Some(status)) => server.update_status(&config, Some(status)).await,
// Error, reset status
Err(_) => server.update_status(&config, None),
Err(_) => server.update_status(&config, None).await,
// Didn't get status, but ping fallback worked, leave as-is, show warning
Ok(None) => {
@@ -53,20 +55,18 @@ pub async fn monitor_server(config: Arc<Config>, server: Arc<Server>) {
}
// Sleep server when it's bedtime
if server.should_sleep(&config) {
if server.should_sleep(&config).await {
info!(target: "lazymc::montior", "Server has been idle, sleeping...");
server.stop(&config).await;
}
// Check whether we should force kill server
if server.should_kill() {
if server.should_kill().await {
error!(target: "lazymc::montior", "Force killing server, took too long to start or stop");
if !server.force_kill().await {
warn!(target: "lazymc", "Failed to force kill server");
}
}
poll_interval.tick().await;
}
}

View File

@@ -1,10 +1,14 @@
use std::sync::atomic::{AtomicU8, Ordering};
use std::sync::{Arc, Mutex, RwLock, RwLockReadGuard};
use std::sync::Arc;
use std::time::{Duration, Instant};
use futures::FutureExt;
use minecraft_protocol::data::server_status::ServerStatus;
use tokio::process::Command;
use tokio::sync::watch;
#[cfg(feature = "rcon")]
use tokio::sync::Semaphore;
use tokio::sync::{Mutex, RwLock, RwLockReadGuard};
use tokio::time;
use crate::config::Config;
@@ -14,6 +18,13 @@ use crate::os;
/// Used to give it some more time to quit forgotten threads, such as for RCON.
const SERVER_QUIT_COOLDOWN: Duration = Duration::from_millis(2500);
/// RCON cooldown. Required period between RCON invocations.
///
/// The Minecraft RCON implementation is very broken and brittle, this is used in the hopes to
/// improve reliability.
#[cfg(feature = "rcon")]
const RCON_COOLDOWN: Duration = Duration::from_secs(15);
/// Server state.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum State {
@@ -61,6 +72,12 @@ pub struct Server {
/// Matches `State`, utilzes AtomicU8 for better performance.
state: AtomicU8,
/// State watch sender, broadcast state changes.
state_watch_sender: watch::Sender<State>,
/// State watch receiver, subscribe to state changes.
state_watch_receiver: watch::Receiver<State>,
/// Server process PID.
///
/// Set if a server process is running.
@@ -84,6 +101,14 @@ pub struct Server {
///
/// Used as starting/stopping timeout.
kill_at: RwLock<Option<Instant>>,
/// Lock for exclusive RCON operations.
#[cfg(feature = "rcon")]
rcon_lock: Semaphore,
/// Last time server was stopped over RCON.
#[cfg(feature = "rcon")]
rcon_last_stop: Mutex<Option<Instant>>,
}
impl Server {
@@ -92,13 +117,18 @@ impl Server {
State::from_u8(self.state.load(Ordering::Relaxed))
}
/// Get state receiver to subscribe on server state changes.
pub fn state_receiver(&self) -> watch::Receiver<State> {
self.state_watch_receiver.clone()
}
/// Set a new state.
///
/// This updates various other internal things depending on how the state changes.
///
/// Returns false if the state didn't change, in which case nothing happens.
fn update_state(&self, state: State, config: &Config) -> bool {
self.update_state_from(None, state, config)
async fn update_state(&self, state: State, config: &Config) -> bool {
self.update_state_from(None, state, config).await
}
/// Set new state, from a current state.
@@ -106,7 +136,7 @@ impl Server {
/// This updates various other internal things depending on how the state changes.
///
/// Returns false if current state didn't match `from` or if nothing changed.
fn update_state_from(&self, from: Option<State>, new: State, config: &Config) -> bool {
async fn update_state_from(&self, from: Option<State>, new: State, config: &Config) -> bool {
// Atomically swap state to new, return if from doesn't match
let old = State::from_u8(match from {
Some(from) => match self.state.compare_exchange(
@@ -128,8 +158,11 @@ impl Server {
trace!("Change server state from {:?} to {:?}", old, new);
// Broadcast change
let _ = self.state_watch_sender.send(new);
// Update kill at time for starting/stopping state
*self.kill_at.write().unwrap() = match new {
*self.kill_at.write().await = match new {
State::Starting if config.time.start_timeout > 0 => {
Some(Instant::now() + Duration::from_secs(config.time.start_timeout as u64))
}
@@ -148,25 +181,26 @@ impl Server {
// If Starting -> Started, update active time and keep it online for configured time
if old == State::Starting && new == State::Started {
self.update_last_active();
self.keep_online_for(Some(config.time.min_online_time));
self.update_last_active().await;
self.keep_online_for(Some(config.time.min_online_time))
.await;
}
true
}
/// Update status as polled from the server.
/// Update status as obtained from the server.
///
/// This updates various other internal things depending on the current state and the given
/// status.
pub fn update_status(&self, config: &Config, status: Option<ServerStatus>) {
pub async fn update_status(&self, config: &Config, status: Option<ServerStatus>) {
// Update state based on curren
match (self.state(), &status) {
(State::Stopped | State::Starting, Some(_)) => {
self.update_state(State::Started, config);
self.update_state(State::Started, config).await;
}
(State::Started, None) => {
self.update_state(State::Stopped, config);
self.update_state(State::Stopped, config).await;
}
_ => {}
}
@@ -175,19 +209,22 @@ impl Server {
if let Some(status) = status {
// Update last active time if there are online players
if status.players.online > 0 {
self.update_last_active();
self.update_last_active().await;
}
self.status.write().unwrap().replace(status);
self.status.write().await.replace(status);
}
}
/// Try to start the server.
///
/// Does nothing if currently not in stopped state.
pub fn start(config: Arc<Config>, server: Arc<Server>, username: Option<String>) -> bool {
pub async fn start(config: Arc<Config>, server: Arc<Server>, username: Option<String>) -> bool {
// Must set state from stopped to starting
if !server.update_state_from(Some(State::Stopped), State::Starting, &config) {
if !server
.update_state_from(Some(State::Stopped), State::Starting, &config)
.await
{
return false;
}
@@ -197,23 +234,24 @@ impl Server {
None => info!(target: "lazymc", "Starting server..."),
}
// Invoke server command in separate task
tokio::spawn(invoke_server_cmd(config, server).map(|_| ()));
// Spawn server in new task
Self::spawn_server_task(config, server);
true
}
/// Spawn the server task.
///
/// This should not be called directly.
fn spawn_server_task(config: Arc<Config>, server: Arc<Server>) {
tokio::spawn(invoke_server_cmd(config, server).map(|_| ()));
}
/// Stop running server.
///
/// This requires the server PID to be known.
/// This will attempt to stop the server with all available methods.
#[allow(unused_variables)]
pub async fn stop(&self, config: &Config) -> bool {
// We must have a running process
let has_process = self.pid.lock().unwrap().is_some();
if !has_process {
debug!(target: "lazymc", "Tried to stop server, while no PID is known");
return false;
}
// Try to stop through RCON if started
#[cfg(feature = "rcon")]
if self.state() == State::Started && stop_server_rcon(config, self).await {
@@ -222,7 +260,7 @@ impl Server {
// Try to stop through signal
#[cfg(unix)]
if stop_server_signal(config, self) {
if stop_server_signal(config, self).await {
return true;
}
@@ -234,7 +272,7 @@ impl Server {
///
/// This requires the server PID to be known.
pub async fn force_kill(&self) -> bool {
if let Some(pid) = *self.pid.lock().unwrap() {
if let Some(pid) = *self.pid.lock().await {
return os::force_kill(pid);
}
false
@@ -243,7 +281,7 @@ impl Server {
/// Decide whether the server should sleep.
///
/// Always returns false if it is currently not online.
pub fn should_sleep(&self, config: &Config) -> bool {
pub async fn should_sleep(&self, config: &Config) -> bool {
// Server must be online
if self.state() != State::Started {
return false;
@@ -253,7 +291,7 @@ impl Server {
let players_online = self
.status
.read()
.unwrap()
.await
.as_ref()
.map(|status| status.players.online > 0)
.unwrap_or(false);
@@ -266,7 +304,7 @@ impl Server {
let keep_online = self
.keep_online_until
.read()
.unwrap()
.await
.map(|i| i >= Instant::now())
.unwrap_or(false);
if keep_online {
@@ -275,7 +313,7 @@ impl Server {
}
// Last active time must have passed sleep threshold
if let Some(last_idle) = self.last_active.read().unwrap().as_ref() {
if let Some(last_idle) = self.last_active.read().await.as_ref() {
return last_idle.elapsed() >= Duration::from_secs(config.time.sleep_after as u64);
}
@@ -283,27 +321,27 @@ impl Server {
}
/// Decide whether to force kill the server process.
pub fn should_kill(&self) -> bool {
pub async fn should_kill(&self) -> bool {
self.kill_at
.read()
.unwrap()
.await
.map(|t| t <= Instant::now())
.unwrap_or(false)
}
/// Read last known server status.
pub fn status(&self) -> RwLockReadGuard<Option<ServerStatus>> {
self.status.read().unwrap()
pub async fn status<'a>(&'a self) -> RwLockReadGuard<'a, Option<ServerStatus>> {
self.status.read().await
}
/// Update the last active time.
fn update_last_active(&self) {
self.last_active.write().unwrap().replace(Instant::now());
async fn update_last_active(&self) {
self.last_active.write().await.replace(Instant::now());
}
/// Force the server to be online for the given number of seconds.
fn keep_online_for(&self, duration: Option<u32>) {
*self.keep_online_until.write().unwrap() = duration
async fn keep_online_for(&self, duration: Option<u32>) {
*self.keep_online_until.write().await = duration
.filter(|d| *d > 0)
.map(|d| Instant::now() + Duration::from_secs(d as u64));
}
@@ -311,13 +349,21 @@ impl Server {
impl Default for Server {
fn default() -> Self {
let (state_watch_sender, state_watch_receiver) = watch::channel(State::Stopped);
Self {
state: AtomicU8::new(State::Stopped.to_u8()),
state_watch_sender,
state_watch_receiver,
pid: Default::default(),
status: Default::default(),
last_active: Default::default(),
keep_online_until: Default::default(),
kill_at: Default::default(),
#[cfg(feature = "rcon")]
rcon_lock: Semaphore::new(1),
#[cfg(feature = "rcon")]
rcon_last_stop: Default::default(),
}
}
}
@@ -327,7 +373,7 @@ pub async fn invoke_server_cmd(
config: Arc<Config>,
state: Arc<Server>,
) -> Result<(), Box<dyn std::error::Error>> {
// Build command
// Configure command
let args = shlex::split(&config.server.command).expect("invalid server command");
let mut cmd = Command::new(&args[0]);
cmd.args(args.iter().skip(1));
@@ -351,7 +397,7 @@ pub async fn invoke_server_cmd(
state
.pid
.lock()
.unwrap()
.await
.replace(child.id().expect("unknown server PID"));
// Wait for process to exit, handle status
@@ -372,18 +418,18 @@ pub async fn invoke_server_cmd(
};
// Forget server PID
state.pid.lock().unwrap().take();
state.pid.lock().await.take();
// Give server a little more time to quit forgotten threads
time::sleep(SERVER_QUIT_COOLDOWN).await;
// Set server state to stopped
state.update_state(State::Stopped, &config);
state.update_state(State::Stopped, &config).await;
// Restart on crash
if crashed && config.server.wake_on_crash {
warn!(target: "lazymc", "Server crashed, restarting...");
Server::start(config, state, None);
Server::start(config, state, None).await;
}
Ok(())
@@ -400,6 +446,21 @@ async fn stop_server_rcon(config: &Config, server: &Server) -> bool {
return false;
}
// Grab RCON lock
let rcon_lock = server.rcon_lock.acquire().await.unwrap();
// Ensure RCON has cooled down
let rcon_cooled_down = server
.rcon_last_stop
.lock()
.await
.map(|t| t.elapsed() >= RCON_COOLDOWN)
.unwrap_or(true);
if !rcon_cooled_down {
debug!(target: "lazymc", "Not using RCON to stop server, in cooldown, used too recently");
return false;
}
// RCON address
let mut addr = config.server.address;
addr.set_port(config.rcon.port);
@@ -420,9 +481,14 @@ async fn stop_server_rcon(config: &Config, server: &Server) -> bool {
return false;
}
// Set server to stopping state
// TODO: set before stop command, revert state on failure
server.update_state(State::Stopping, config);
// Set server to stopping state, update last RCON time
server.rcon_last_stop.lock().await.replace(Instant::now());
server.update_state(State::Stopping, config).await;
drop(rcon_lock);
// Gracefully close connection
rcon.close().await;
true
}
@@ -431,9 +497,9 @@ async fn stop_server_rcon(config: &Config, server: &Server) -> bool {
///
/// Only available on Unix.
#[cfg(unix)]
fn stop_server_signal(config: &Config, server: &Server) -> bool {
async fn stop_server_signal(config: &Config, server: &Server) -> bool {
// Grab PID
let pid = match *server.pid.lock().unwrap() {
let pid = match *server.pid.lock().await {
Some(pid) => pid,
None => {
debug!(target: "lazymc", "Could not send stop signal to server process, PID unknown");
@@ -448,8 +514,12 @@ fn stop_server_signal(config: &Config, server: &Server) -> bool {
}
// Update from starting/started to stopping
server.update_state_from(Some(State::Starting), State::Stopping, config);
server.update_state_from(Some(State::Started), State::Stopping, config);
server
.update_state_from(Some(State::Starting), State::Stopping, config)
.await;
server
.update_state_from(Some(State::Started), State::Stopping, config)
.await;
true
}

View File

@@ -44,7 +44,7 @@ pub async fn service(config: Arc<Config>) -> Result<(), ()> {
// Initiate server start
if config.server.wake_on_start {
Server::start(config.clone(), server.clone(), None);
Server::start(config.clone(), server.clone(), None).await;
}
// Route all incomming connections

View File

@@ -1,5 +1,6 @@
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Duration, Instant};
use std::time::Duration;
use crate::server::State;
use bytes::BytesMut;
@@ -20,9 +21,6 @@ use crate::proto::{self, Client, ClientState, RawPacket};
use crate::server::{self, Server};
use crate::service;
/// Client holding server state poll interval.
const HOLD_POLL_INTERVAL: Duration = Duration::from_millis(500);
/// Proxy the given inbound stream to a target address.
// TODO: do not drop error here, return Box<dyn Error>
pub async fn serve(
@@ -81,7 +79,7 @@ pub async fn serve(
// Hijack server status packet
if client_state == ClientState::Status && packet.id == proto::STATUS_PACKET_ID_STATUS {
let server_status = server_status(&config, &server);
let server_status = server_status(&config, &server).await;
let packet = StatusResponse { server_status };
let mut data = Vec::new();
@@ -107,7 +105,7 @@ pub async fn serve(
.map(|p| p.name);
// Start server if not starting yet
Server::start(config.clone(), server.clone(), username);
Server::start(config.clone(), server.clone(), username).await;
// Hold client if enabled and starting
if config.time.hold() && server.state() == State::Starting {
@@ -159,56 +157,63 @@ pub async fn hold<'a>(
) -> Result<(), ()> {
trace!(target: "lazymc", "Started holding client");
// Set up polling interval, get timeout
let mut poll_interval = time::interval(HOLD_POLL_INTERVAL);
let since = Instant::now();
let timeout = config.time.hold_client_for as u64;
// A task to wait for suitable server state
// Waits for started state, errors if stopping/stopped state is reached
let task_wait = async {
let mut state = server.state_receiver();
loop {
// TODO: wait for start signal over channel instead of polling
poll_interval.tick().await;
// Wait for state change
state.changed().await.unwrap();
trace!("Polling server state for holding client...");
match server.state() {
match state.borrow().deref() {
// Still waiting on server start
State::Starting => {
trace!(target: "lazymc", "Server not ready, holding client for longer");
// If hold timeout is reached, kick client
if since.elapsed().as_secs() >= timeout {
warn!(target: "lazymc", "Held client reached timeout of {}s, disconnecting", timeout);
kick(&config.messages.login_starting, &mut inbound.split().1).await?;
return Ok(());
}
continue;
}
// Server started, start relaying and proxy
State::Started => {
// TODO: drop client if already disconnected
// Relay client to proxy
info!(target: "lazymc", "Server ready for held client, relaying to server");
service::server::route_proxy_queue(inbound, config, hold_queue);
return Ok(());
break true;
}
// Server stopping, this shouldn't happen, kick
State::Stopping => {
warn!(target: "lazymc", "Server stopping for held client, disconnecting");
kick(&config.messages.login_stopping, &mut inbound.split().1).await?;
break;
break false;
}
// Server stopped, this shouldn't happen, disconnect
State::Stopped => {
error!(target: "lazymc", "Server stopped for held client, disconnecting");
break;
break false;
}
}
}
};
// Wait for server state with timeout
let timeout = Duration::from_secs(config.time.hold_client_for as u64);
match time::timeout(timeout, task_wait).await {
// Relay client to proxy
Ok(true) => {
info!(target: "lazymc", "Server ready for held client, relaying to server");
service::server::route_proxy_queue(inbound, config, hold_queue);
return Ok(());
}
// Server stopping/stopped, this shouldn't happen, kick
Ok(false) => {
warn!(target: "lazymc", "Server stopping for held client, disconnecting");
kick(&config.messages.login_stopping, &mut inbound.split().1).await?;
}
// Timeout reached, kick with starting message
Err(_) => {
warn!(target: "lazymc", "Held client reached timeout of {}s, disconnecting", config.time.hold_client_for);
kick(&config.messages.login_starting, &mut inbound.split().1).await?;
}
}
// Gracefully close connection
match inbound.shutdown().await {
@@ -236,8 +241,8 @@ async fn kick(msg: &str, writer: &mut WriteHalf<'_>) -> Result<(), ()> {
}
/// Build server status object to respond to client with.
fn server_status(config: &Config, server: &Server) -> ServerStatus {
let status = server.status();
async fn server_status(config: &Config, server: &Server) -> ServerStatus {
let status = server.status().await;
// Select version and player max from last known server status
let (version, max) = match status.as_ref() {