DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
Grimoire
These are notes on how to do some tricks and workarounds that are easy to forget, not sufficiently easy to re-discover, or easy to remember incorrectly
This page is not an educational resource, it is intetionally opaque due to its use for personal reference. See the official docs or your preffered chatbot.
bash
Get dir where script file is placed
Default variable value
CRAWL=${CRAWL:-crawl.tar.gz}
Integer math
echo "Freed up $(($OLD_MB - $REM_MB)) MiB"
Common test flags
Flag | Description |
---|---|
-z | String is empty |
-e | String denotes a file (or dir) that exists |
-f | String denotes a file that exists |
-d | String denotes a dir |
Special variables
Variable | Description |
---|---|
$? | Exit status of last command |
$! | PID of last command |
$0 | argv[0] |
$1 | argv[1] |
… | … |
$n | argv[n] |
$@ | All arguments |
“$@” | All arguments, placing each inside a “” |
function
name() {
SOURCE=${1:-default.c}
test -e "$SOURCE" || \
exit 1 # exit FROM THE SHELL, not the function
return 0 # exit code of function
}
Get absolue file path (shoud work on Mac OS)
get_abs_filename() {
echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")"
}
Rotate file (appending ISO date as suffix)
rotate() {
if [ -e "$1" ]; then
SUFF="$(date -Iminutes)"
echo "$1 exists, moving to \"$1.$SUFF\""
if ( ! mv "$1" "$1.$SUFF" ); then
echo "Failed to rotate $1. Will ignore"
fi
fi
}
Handle command line options (getopts)
# d: means -d takes a argument
while getopts "hd:z:K" o; do
case $o in
d)
DELAY_MS=${OPTARG}
;;
z)
TARGZ_FILE="$(get_abs_filename "${OPTARG}")"
;;
K)
KEEP=no
;;
h)
usage
exit 0
;;
*)
usage
exit 1
;;
esac
done
CRAWL_DIR=${@:$OPTIND:1}
NON_EXISTING=${@:$OPTIND+1:1}
if [ -z "$CRAWL_DIR" ]; then
echo "Positional argument CRAWL_DIR is required!"; usage; exit 1
else
CRAWL_DIR="$(get_abs_filename "$CRAWL_DIR")"
fi
if [ ! -z "$NON_EXISTING" ]; then
echo "Too many arguments!"; usage; exit 1
fi
Docker
See this page
Kubernetes
See this page
Linux networking
UFW
ufw default deny incoming
ufw default allow outgoing
ufw limit proto tcp from 187.0.0.0/8 to any port 22 comment MHNNET
ufw limit proto tcp from 186.0.0.0/8 to any port 22 comment MHNNET
ufw limit proto tcp from 191.0.0.0/8 to any port 22 comment MHNNET
ufw limit proto tcp from 179.0.0.0/8 to any port 22 comment MHNNET
ufw limit proto tcp from 177.0.0.0/8 to any port 22 comment MHNNET
ufw limit proto tcp from 170.0.0.0/8 to any port 22 comment MHNNET
ufw limit proto tcp from 168.0.0.0/8 to any port 22 comment MHNNET
ufw limit proto tcp from 143.0.0.0/8 to any port 22 comment MHNNET
ufw limit proto tcp from 138.0.0.0/8 to any port 22 comment MHNNET
ufw limit proto tcp from 45.0.0.0/8 to any port 22 comment MHNNET
limit
means after 6 connections within 30 seconds, further connections by the same origin IP will be rejected. This will affect bck-liv.sh
and other scripts that call rsync
/ssh
/scp
multiple times. To exempt a specific IP address from the 6 connections limit:
ufw prepend allow from 187.45.109.4 to any comment home
Logging all connection attempts matched by a rule
ufw allow log 80,443/tcp comment nginx
Get the logs:
dmesg --time-format iso | grep -E 'UFW ALLOW.*DPT=(443|80)'
List unique source address that connected to nginx
dmesg --time-format iso | grep -E 'UFW ALLOW.*DPT=(443|80)' | sed -nE 's/^.*SRC=([^ ]+).*$/\1/p' | sort | uniq | wc -l
Create CSV with timestamp, IP and source port:
fill_csv() {
test -f "$1" || echo "iso_time;src_addr;src_port;dst_port" > "$1"
dmesg --time-format iso |
grep -E 'UFW ALLOW.*SRC=177.*DPT=(443|80)' |
sed -nE 's/^([^ ]+).*UFW ALLOW.*SRC=([^ ]+).*SPT=([^ ]+).*DPT=([^ ]+).*$/\1;\2;\3;\4/p' >> "$1"
}
fill_csv /tmp/ips.csv
wireguard server
References:
- https://www.wireguard.com/quickstart/
- https://www.digitalocean.com/community/tutorials/how-to-set-up-wireguard-on-ubuntu-20-04
Private subnets:
- Private ipv4 subnet: 10.0.23.0/24
- Private IPv6 subnet: xxxx:xxxx:xxxx::/64 (RFC4194)
Assigned IPs:
IPv4 | device | comment |
---|---|---|
10.0.23.1 | liv | wg server |
10.0.23.2 | raido | wgliv-global |
10.0.23.3 | raido | wgliv-whitelist |
10.0.23.4 | samsung-a03s | adguard@10.0.23.1 |
10.0.23.5 | fs-experiments | hetzner |
Server-side configuration behaving as NAT:
/etc/wireguard/wg0.conf
:
[Interface]
PrivateKey = ###############################
Address = 10.0.23.1/24, xxxx:xxxx:xxxx::1/64
ListenPort = 99123
PostUp = ufw route allow in on wg0 out on eth0
PostUp = iptables -t nat -I POSTROUTING -o eth0 -j MASQUERADE
PostUp = ip6tables -t nat -I POSTROUTING -o eth0 -j MASQUERADE
PreDown = ufw route delete allow in on wg0 out on eth0
PreDown = iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
PreDown = ip6tables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
[Peer]
PublicKey = ###########################################=
AllowedIPs = 10.0.23.2, xxxx:xxxx:xxxx::2
[Peer]
PublicKey = ###########################################=
AllowedIPs = 10.0.23.3, xxxx:xxxx:xxxx::3
- Persistently enable IP forwarding in
/etc/sysctl.conf
by settingnet.ipv4.ip_forward=1
andnet.ipv6.conf.all.forwarding=1
- Apply changes to sysctl.conf:
sysctl -p
- Open wireguard port on firewall:
ufw allow from 187.45.109.235 to any port 99123 proto udp comment "wg-home"
- Enable & start wg0 systemd unit
wg-quick@wg0
- Allow SSH from peers:
sudo ufw prepend allow from 10.0.23.0/24 to any comment "wg peers"
raido:/etc/wireguard/wgliv-global.conf:
[Interface]
PrivateKey = ##################################3
Address = 10.0.23.2/24, xxxx:xxxx:xxxx::2/64
DNS = 10.23.0.1
Table = 99123
[Peer]
Endpoint = 149.999.999.252:99123
PublicKey = ############################################
AllowedIPs = 0.0.0.0/0, ::/0
route only mullvad and other specific through liv
raido:/etc/wireguard/wgliv-whitelist
[Interface]
PrivateKey = ############################
Address = 10.0.23.3/24, xxxx:xxxx:xxxx::3/64
DNS = 10.0.23.1
Table = 99123
[Peer]
Endpoint = 149.999.999.252:99123
PublicKey = ############################################
AllowedIPs = __BIG_IP_LIST__
Replace AllowedIPs = __BIG_IP_LIST__
with the result of the wgliv-gen-whitelist
script. It will add CIDR blocks for proton, tutanota and mullvad website, followed by a list of (nearly) all mullvad wireguard servers.
Containerize connections via mullvad
This does not rely specifically on on wgliv-global/wgliv-whitelist, but if any of them are enabled the wireguard tunnel will be established inside the already existing tunnel to liv. The outline is described here and relies on creating a wireguard interface in the root network namespace, create a new network namespace, move the wireguard interface to it and configure it from inside the namespace, and then move or spawn the applications to be routed via mullvad.
See scripts for implementation:
- Create netns and connect wireguard to mullvad:
mvd-ns
- Kill all processes in namespace and delete it:
mvd-ns-killall
- run firefox in netns:
mvd-ns-exec
_with-pulse-firefox
AdGuardHome
“Install” with:
docker pull adguard/adguardhome
mkdir -p /opt/adguard/work /opt/adguard/conf
Ubuntu uses systemd-resolved, which must be disabled in order to release port 53: - Set DNSStubListener=no
in /etc/systemd/resolved.conf
- stop
and disable
unit systemd-resolved
- rm /etc/resolve.conf
- echo nameserver 9.9.9.9 > /etc/resolv.conf
Create a systemd unit: /etc/systemd/system/adguard.service
[Unit]
Description=AdGuardHome
Requires=docker.service
After=docker.service
[Service]
Type=exec
Restart=on-abnormal
ExecStartPre=-/usr/bin/docker stop adguard
ExecStartPre=-/usr/bin/docker rm adguard
ExecStart=/usr/bin/docker run --name adguard --rm -p 53:53/udp -v /opt/adguard/work:/opt/adguardhome/work -v /opt/adguard/conf:/opt/adguardhome/conf --network host adguard/adguardhome
ExecStop=/usr/bin/docker stop adguard
[Install]
WantedBy=multi-user.target
Enable & start: systemctl enable adguard && systemctl start adguard
Due to use of DNS resolvers in DDoS attacks the ports should not be exposed in ufw. Therefore, clients must use it over wireguard.
Updating
docker pull adguard/adguardhome
systemctl restart adguard
letsencrypt
- In Cloudflare dashboard, create new DNS A records for
liv
(pointing to the public IP) andwg.liv
(pointing to 10.0.23.1). Install - Create an
Zone:DNS:Edit
API Token in Cloudflare dashboard- The token must be save in a file as
dns_cloudflare_api_token = ...
- The token must be save in a file as
- Install
certbot-dns-cloudflare
anywhere - fetch certificates:
sudo chown 600 token
sudo chmod root:root token
sudo certbot certonly --dns-cloudflare \
--dns-cloudflare-propagation-seconds 60 \
--dns-cloudflare-credentials token \
-d liv.argosware.com -d *.liv.argosware.com
- Certificate files will be in
/etc/letsencrypt/live/liv.argosware.com
- Use
fullchain.pem
as certificate andprivkey.pem
as private key in AdGuard settings
- Use
UFSC VPN
Install L2TP into Network Manager and strongswan for IPSec
pacman -S networkmanager-l2tp strongswan
systemctl restart NetworkManager
Without
strongswan
the “IPSec settings” button on NetworkManager vpn settings will be grayed out
Create new L2TP VPN connection (using the GUI): - Gateway: vpn.ufsc.br
- User: user@ufsc.br - On IPSec settings, enable and set PSK to ufsc
If using UFW, two rules must be added (if not already covered):
ufw allow out from any to 150.162.0.0/16
allow direct output to UFSCufw allow out on ppp0 from any to any
: allow outbound over L2TP
Java
Run a child JVM with same classpath
Use ProcessBuilder with java.home
and java.class.path
system properties:
String separator = System.getProperty("file.separator");
String classpath = System.getProperty("java.class.path");
String java = System.getProperty("java.home")
+ separator + "bin" + separator + "java";
List<String> args = new ArrayList<>(asList(java, "-cp", classpath,
/*"-agentlib:jdwp=transport=dt_socket,server=y,address=5005", */
));
mainClassName.addAll(mainClassArgs);
args
try {
Process child = new ProcessBuilder().command(args)
.redirectError(ProcessBuilder.Redirect.INHERIT)
.redirectOutput(ProcessBuilder.Redirect.INHERIT)
.start();
int exitCode = child.waitFor();
if (exitCode != 0)
throw new RuntimeException("Child did not executed correctly. code="+exitCode);
}
Get free TCP port
Start a listening service on a random port then close the service. This is moderatedly racy.
int port = 3331;
try (ServerSocket serverSocket = new ServerSocket(0, 50, getLocalHost())) {
= serverSocket.getLocalPort();
port } catch (IOException ignored) { }
try {
Thread.sleep(100);
} catch (InterruptedException ignored) { }
Maven
Maven wrapper
mvn wrapper:wrapper
git add .mvn mvnw*
Embedd jars in VCS and use as maven repo
Source from stack overflow. Add a repository to the pom.xml
:
repositories>
<repository>
<id>3rd-party</id>
<url>file://${basedir}/3rd-party</url>
<repository>
</repositories> </
Create the 3rd-party dir on the project base dir and install jar files to it:
mvn org.apache.maven.plugins:maven-install-plugin:2.3.1:install-file \
-Dfile=<path-to-file> -DgroupId=<myGroup> \
-DartifactId=<myArtifactId> -Dversion=<myVersion> \
-Dpackaging=<myPackaging> -DlocalRepositoryPath=3rd-party
submodules
Parent pom must have pom
<packaging>
and must list each submodule within <modules>
:
project xmlns="http://maven.apache.org/POM/4.0.0"
< xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/maven-v4_0_0.xsd">
modelVersion>4.0.0</modelVersion>
<
groupId>org.sonatype.mavenbook.multi</groupId>
<artifactId>simple-parent</artifactId>
<packaging>pom</packaging>
<version>1.0</version>
<name>Multi Chapter Simple Parent Project</name>
<
modules>
<module>simple-weather</module>
<module>simple-webapp</module>
<modules>
</<!-- plugins dependencies configurations -->
project> </
Plugins and dependencies in the parent will only apply to sub-modules that list the parent pom as a parent:
parent>
<groupId>org.sonatype.mavenbook.multi</groupId>
<artifactId>simple-parent</artifactId>
<version>1.0</version>
<parent> </
Fat/über Jar
Using maven-shade:
plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>3.2.1</version>
<executions>
<execution>
<phase>package</phase>
<goals><goal>shade</goal></goals>
<configuration>
<transformers>
<transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer" />
<transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer" />
<transformer implementation="org.apache.maven.plugins.shade.resource.ApacheNoticeResourceTransformer">
<addHeader>false</addHeader>
<transformer>
</transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<manifestEntries>
<Main-Class>br.ufsc.lapesd.riefederator.ExperimentsApp</Main-Class>
<Multi-Release>true</Multi-Release>
<manifestEntries>
</transformer>
</transformers>
</filters>
<filter>
<artifact>*:*</artifact>
<excludes> <!-- Some jars are signed but shading breaks that.
< Don't include signing files. -->
exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
<excludes>
</filter>
</filters>
</configuration>
</execution>
</executions>
</plugin> </
Role of transformers:
ServicesResourceTransformer
: merges SPI files under META-INF/services/ of included jars into a single directoryApacheLicenseResourceTransformer
: Do not shipLICENSE
files of deps as if theLICENSE
where of the packaged jarApacheNoticeResourceTransformer
: Creates aNOTICE
file as required by jars licensed using APL
The <Multi-Release>
manifest entry fixes this warning when using log4j2 from a fat jar on Java 11+:
WARNING: sun.reflect.Reflection.getCallerClass is not supported. This will impact performance.
The <filter
removing signature files is not always necessary. Apache Jean does require such filtering.
Really Executable JAR really-executable-jar
Use the shade plugin for creating a fat jar and use this maven plugin to create a executable shell script wrapping the fat jar:
plugin>
<groupId>org.skife.maven</groupId>
<artifactId>really-executable-jar-maven-plugin</artifactId>
<version>1.5.0</version>
<configuration>
<programFile>executable</programFile>
<configuration>
</executions>
<execution>
<phase>package</phase>
<goals><goal>really-executable-jar</goal>
<goals>
</execution>
</executions>
</plugin> </
The <programFile>
configuration is optional. if omitted the .jar
file itself will have the shell script prepended. To replace the default script code use the <scriptFile>
configuration.
Error prone static analysis
plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${maven.compiler.source}</source>
<target>${maven.compiler.target}</target>
<compilerArgs>
<arg>-XDcompilePolicy=simple</arg>
<arg>-Xplugin:ErrorProne</arg>
<compilerArgs>
</annotationProcessorPaths>
<path>
<groupId>com.google.errorprone</groupId>
<artifactId>error_prone_core</artifactId>
<version>2.3.4</version>
<path>
</annotationProcessorPaths>
</configuration>
</plugin>
</plugins> </
Tolerate Java 9 in maven tests
profiles>
<profile>
<id>java9-onward</id>
<activation>
<jdk>[1.9,)</jdk>
<activation>
</build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<argLine>
<
--add-opens java.base/java.lang=ALL-UNNAMEDargLine>
</configuration>
</plugin>
</plugins>
</build>
</profile>
</profiles> </
Copy files during/after build
Use the copy-rename-maven-plugin:
Reminders:
- Do not use on the parent pom: its phases run before the modules, thus
sourceFile
s will be stale or missing - The plugin runs after at the end of the phase
- Plugins run in the order declared on the pom.xml. Put after plugins that generate
sourceFile
s
plugin>
<groupId>com.coderplus.maven.plugins</groupId>
<artifactId>copy-rename-maven-plugin</artifactId>
<version>1.0.1</version>
<executions>
<execution>
<phase>package</phase>
<goals><goal>copy</goal></goals>
<configuration>
<sourceFile>${project.build.directory}/freqel-driver</sourceFile>
<destinationFile>${project.basedir}/../docker/driver/freqel-driver</destinationFile>
<configuration>
</execution>
</executions>
</plugin> </
Publishing to Maven central
OSSRH, run by Sonatype allows open source projects to publish their artifacts to Maven central repository. To request access:
- Create an user at the JIRA instance
- Create a new project ticket Within the Github user that hosts the project, create a OSSRH-XYZ, repository, where XYZ is the ticket number. This proves ownership/control of the github user holding the project repository.
Full intructions are available here.
In order to publish binaries, they must be signed with GPG. It is better to create a key specific for signing and publish it to a keyserver:
gpg --full-generate-key
Before publishing the key, add a photo (optional) and sign with your main key
$ gpg --list-keys --keyid-format short alexishuf@gmail.com
$ gpg --edit key KEYID
addphoto
sign
$ gpg --send-keys KEYID
Setting up the pom.xml
Make sure the POM has the following metadata:
- name
- description
- url
- licenses/license/{name,url}
- developers/developer/{name,email}
- scm/{connection,developerConnection,url,tag}
Add and enable the source, javadoc and gpg maven plugins in a release profile (to avoid slowing down development builds):
Notice that:
javadocExecutable
fixes the plugin complaining about JAVA_HOME not set.- setting
source=8
for the javadoc plugin fixes “The code being documented uses modules but the packages defined in https://docs.oracle.com/javase/8/docs/api/ are in the unnamed module”${gpg.keyname}
is set in settings.xml and says which key to sign withautoReleaseAfterClose=false
allows inspection in the staging repository aftermvn clean deploy
.
profile>
<id>release</id>
<build>
<plugins>
<plugin>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<goals><goal>jar-no-fork</goal></goals>
<execution>
</executions>
</plugin>
</plugin>
<artifactId>maven-javadoc-plugin</artifactId>
<configuration>
<javadocExecutable>${java.home}/bin/javadoc</javadocExecutable>
<source>8</source>
<configuration>
</executions>
<execution>
<id>attach-javadoc</id>
<goals><goal>jar</goal></goals>
<execution>
</executions>
</plugin>
</plugin>
<artifactId>maven-gpg-plugin</artifactId>
<executions>
<execution>
<id>sign-artifacts</id>
<goals><goal>sign</goal></goals>
<configuration>
<<!--suppress MavenModelInspection: property set in settings.xml -->
keyname>${gpg.keyname}</keyname>
<configuration>
</execution>
</executions>
</plugin>
</plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>2.5.3</version>
<configuration>
<autoVersionSubmodules>true</autoVersionSubmodules>
<useReleaseProfile>false</useReleaseProfile>
<releaseProfiles>release</releaseProfiles>
<goals>deploy</goals>
<configuration>
</plugin>
</plugin>
<groupId>org.sonatype.plugins</groupId>
<artifactId>nexus-staging-maven-plugin</artifactId>
<extensions>true</extensions>
<configuration>
<serverId>ossrh</serverId>
<nexusUrl>https://oss.sonatype.org/</nexusUrl>
<autoReleaseAfterClose>false</autoReleaseAfterClose>
<configuration>
</plugin>
</plugins>
</build>
</profile> </
Credentials should be stored at ~/.m2/settings.xml
, but it may be worthwhile to encrypt them instead of storing them in plain text.
Create a master password with mvn --encrypt-master-password
and paste the base64 (including the surrounding {}) into ~/.m2/settings-security.xml
:
settingsSecurity>
<master>{MASTER_PASSWORD_BASE64}</master>
<settingsSecurity> </
New encrypted passwords can then be generated using mvn --encrypt-pasword
.
Edit settings.xml file at ~/.m2/settings.xml
to include the following:
<?xml version="1.0" encoding="utf-8" ?>
settings xmlns="http://maven.apache.org/SETTINGS/1.0.0">
<servers>
<server>
<id>ossrh</id>
<username>alexishuf</username>
<password>{BASE64_FOR_JIRA_PASSWORD}</password>
<server>
</servers>
</
profiles>
<profile>
<id>ossrh</id>
<activation>
<activeByDefault>true</activeByDefault>
<activation>
</properties>
<gpg.keyname>D2BEC7E7</gpg.keyname>
<properties>
</profile>
</profiles>
</settings> </
Performing a release
- Use the release plugin to set the release version:
mvn clean release:prepare
docs - Deploy to the staging repository (since
autoReleaseAfterClose=false
):mvn release:perform
- Promote from staging to the release repository:
mvn nexus-staging:release
- To abort a staged release, use
mvn nexus-staging:drop
.
- To abort a staged release, use
Dependency management
To get a tree of all dependencies:
mvn dependency:tree
On a multi-module root:
mvn compile dependency:tree
Using BOMs
Bill-of-materials POMs set version for a set of related artifacts. This is useful for multi-module projects where different dependencies may pull only some of the modules and each dependency is tied to a different version. Importing a BOM from the root pom will cause all modules to use versions known to be compatible.
dependencyManagement>
<dependencies>
<dependency>
<groupId>com.squareup.okhttp3</groupId>
<artifactId>okhttp-bom</artifactId>
<version>4.6.0</version>
<type>pom</type>
<scope>import</scope>
<dependency>
</dependencies>
</dependencyManagement> </
After importing the BOM, import specific modules as usual.
Picocli (CLI Options)
Add the dependency:
dependency>
<groupId>info.picocli</groupId>
<artifactId>picocli</artifactId>
<version>4.6.1</version>
<dependency>
</<!-- ... -->
plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<annotationProcessorPaths>
<path>
<groupId>info.picocli</groupId>
<artifactId>picocli-codegen</artifactId>
<version>4.6.1</version>
<path>
</annotationProcessorPaths>
</compilerArgs>-Aproject=${project.groupId}</compilerArgs>
<configuration>
</plugin> </
How-to:
- Annotate a
Callable<Void>
/Runnable
with@Command
- Annotate fields or setter methods with
@Option
/@Parameter
- Implement
call()
/run()
- to report invalid arg combinations, declare a
@Spec CommandSpec spec
field and throw aParameterException(spec.commandLine(), msg)
- to report invalid arg combinations, declare a
- On the
main(String[])
method, writeSystem.exit(new CommandLine(new App()).execute(args))
- List
@Command
-annotated classes in@Command(subcommands= [})
- Annotate a method with
@Command
(method args receive@Option
and@Parameter
) - Add a help subcommand with
CommandLine.HelpCommand.class
Main features:
-h
/--help
options with@Command(mixinStandardHelpOptions = true)
- Custom parsing: pass an implementation of
ITypeConverter
toconverter=
@
Mixin imports all@Option
s,@Parameter
s and the@Command
attributes of the mixing @Command-annotated class- –no-options with
@Option(negatable=true)
- Do actions and validations when options are set by annotating a setter method. To reject a set value, throw a
ParameterException
(see above) - Defaults are taken from the field initializer or from
defaultValue
attribute - Defaults are appended to descriptions only if
showDefaultValues=true
is set on the@Command
or@Option
- Missing options are
null
unlessOptional<T>
@Parameter
index="0"
(bind to first position),1
(bind to second),2..*
(bind to third and everything after). Default is*
(all positional)@Option
can take0..*
positional parameters (arity="0..*"
)- For
List<T>
/T[]
types, arity can be above 1 - For
boolean
,arity=0
- If arity for
-o
can be 0,fallbackValue=x
sets the value for when-o
is given without parameter.defaultValue
kicks in when-o
is not given at all.
- For
R
Custom position for %>% argument
|>
and %>%
will inject the left-side result into the first type-compatible argument of the right-side function. With %>%
, if there is a .
, it will be replaced with the left-side value
$conf %>% gsub('^.*-NN\\+.*$', 'NN', .) prof.st
Bootstrap
library(Rmisc);
library(boot);
<- 1000
R <- function(df) {
compute.ci if (nrow(df) <= 1) {
list(lo=NA, hi=NA)
else {
} <- boot(df$ms, \(data, indices) mean(data[indices]), R)
b <- boot.ci(b, type=c("bca"))$bca
bca list(lo=bca[4], hi=bca[5])
} }
Format numbers with thousands separator
format(numbers, digits=2, big.mark=',')
will insert the thousands separator, but will ignore digits=2
in favor of having all numbers in numbers
have the same width after formatted. The solution is to use %.2f
on sprintf
and then insert the thousands separator with a regex:
gsub('([0-9])([0-9]{3}[,.])', '\\1,\\2', sprintf("%.2f", numbers))
Miscellaneous
Edit PDF metadata
- Dump metadata with pdftk:
pdftk file.pdf dump_data data.txt
- Edit
data.txt
- Apply metadata with
pdftk file.pdf update_info data.txt output out.pdf
Drop kernel disk read caches
sudo su -c 'echo 3 > /proc/sys/vm/drop_caches'
CACHEDIR.TAG for rsync
The closet one can get to tar --exclude-caches
is rsync -F
. To exclude some/dir/
from a backup, create a some/.rsync-filters
file:
- /dir
Important:
- To avoid files matching excluded paths from being deleted at the receiver, use
--delete-after
- Using
-n
(--dry-run
) will report excluded files as deleted since the receiver has not received the.rsync-filter
file. In a “wet” run such deletes would not occurr - Using
*
in place of dir/file names in the.rsync-filters
will not work. /dir
only machessome/dir
while justdir
would also matchsome/sub/dir
..rsync-filters
files only have effect withrsync -F
.
Bootable Arch USB stick with encrypted root
Relevant pages on the wiki:
Required partitions: - EFI system partition (mkfs.fat -F32 /dev/sdx1
) - Root partition (/boot has no dedicated partition)
To create an encrypted partition on /dev/sdx2 with LUKS:
cryptsetup -y -v luksFormat /dev/sdx2
cryptsetup open /dev/sdx2 cryptroot
mkfs.ext4 /dev/mapper/cryptroot
mount /dev/mapper/cryptroot /mnt/root
Follow the instalation guide after mounting the root partition. Install base system & chroot:
pacstrap /mnt/root base linux linux-firmware base-devel sudo git tar unrar unzip python nano grub efibootmgr
genfstab -U /mnt/root >> /mnt/root/etc/fstab
arch-chroot /mnt/root
Mount the EFI partition over /boot:
UUID=98F9-A1A8 /boot vfat rw,relatime 0 2
Configure locale, time, hostname & root password
ln -sf /usr/share/zoneinfo/America/Sao_Paulo /etc/localtime
hwclock --systohc
locale-gen
echo 'LANG=en_US.UTF-8' > /etc/locale.conf
echo 'KEYMAP=br-abnt2' > /etc/vconsole.conf
echo zeta-crypt2 > /etc/hostname
cat > /etc/hosts <<EOF
127.0.0.1 localhost
::1 localhost
127.0.1.1 zeta-crypt2.localdomain zeta-crypt2
EOF
passwd
The initramfs requires special configuration for encryption. Add the keyboard
, keymap
and encrypt
to HOOKS in /etc/mkinitcpio.conf and then recreate the initramfs:
HOOKS=(base udev autodetect keyboard keymap consolefont modconf block encrypt filesystems fsck)
mkinitcpio -P
Before installing GRUB, find the UUID of /dev/sdx2 with lsblk -f
. Add the kernel parameters to /etc/default/grub:
cat > /etc/default/grub <<EOF
GRUB_CMDLINE_LINUX_DEFAULT="cryptdevice=UUID=device-UUID:cryptroot root=/dev/mapper/cryptroot"
EOF
Generate the grub configuration file and install to the EFI partition (/dev/sdx1):
mkdir /boot/grub
grub-mkconfig -o /boot/grub/grub.cfg
grub-install --target=x86_64-efi --efi-directory=/efi --bootloader-id=GRUB --removable
To avoid disk writes from systemd (journal stored in RAM):
mkdir -p /etc/systemd/journald.conf.d/
cat > /etc/systemd/journald.conf.d/usbstick.conf <<EOF
[Journal]
Storage=volatile
RuntimeMaxUse=30M
EOF