mirror of https://github.com/apache/cloudstack.git
build/deploy added to 2.1.x. This is only used for ant builds -- waf does not use them at all
This commit is contained in:
parent
13bf7242d1
commit
5cc1b66603
Binary file not shown.
|
After Width: | Height: | Size: 1.4 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 5.2 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 3.0 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 2.7 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 2.8 KiB |
|
|
@ -0,0 +1,103 @@
|
|||
#!/usr/bin/env bash
|
||||
# deploy-db.sh -- deploys the database configuration.
|
||||
|
||||
# set -x
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
printf "Usage: %s [path to additional sql] [root password]\n" $(basename $0) >&2
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
if [ ! -f $1 ]; then
|
||||
echo "Error: Unable to find $1"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
if [ "$2" != "" ]; then
|
||||
if [ ! -f $2 ]; then
|
||||
echo "Error: Unable to find $2"
|
||||
exit 3
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -f create-database.sql ]; then
|
||||
printf "Error: Unable to find create-database.sql\n"
|
||||
exit 4
|
||||
fi
|
||||
|
||||
if [ ! -f create-schema.sql ]; then
|
||||
printf "Error: Unable to find create-schema.sql\n"
|
||||
exit 5
|
||||
fi
|
||||
|
||||
if [ ! -f create-index-fk.sql ]; then
|
||||
printf "Error: Unable to find create-index-fk.sql\n"
|
||||
exit 6;
|
||||
fi
|
||||
|
||||
PATHSEP=':'
|
||||
if [[ $OSTYPE == "cygwin" ]] ; then
|
||||
export CATALINA_HOME=`cygpath -m $CATALINA_HOME`
|
||||
PATHSEP=';'
|
||||
else
|
||||
mysql="mysql"
|
||||
service mysql status > /dev/null 2>/dev/null
|
||||
if [ $? -eq 1 ]; then
|
||||
mysql="mysqld"
|
||||
service mysqld status > /dev/null 2>/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
printf "Unable to find mysql daemon\n"
|
||||
exit 7
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Starting mysql"
|
||||
service $mysql start > /dev/null 2>/dev/null
|
||||
|
||||
fi
|
||||
|
||||
echo "Recreating Database."
|
||||
mysql --user=root --password=$3 < create-database.sql > /dev/null 2>/dev/null
|
||||
mysqlout=$?
|
||||
if [ $mysqlout -eq 1 ]; then
|
||||
printf "Please enter root password for MySQL.\n"
|
||||
mysql --user=root --password < create-database.sql
|
||||
if [ $? -ne 0 ]; then
|
||||
printf "Error: Cannot execute create-database.sql\n"
|
||||
exit 10
|
||||
fi
|
||||
elif [ $mysqlout -ne 0 ]; then
|
||||
printf "Error: Cannot execute create-database.sql\n"
|
||||
exit 11
|
||||
fi
|
||||
|
||||
mysql --user=cloud --password=cloud cloud < create-schema.sql
|
||||
if [ $? -ne 0 ]; then
|
||||
printf "Error: Cannot execute create-schema.sql\n"
|
||||
exit 11
|
||||
fi
|
||||
|
||||
if [ "$1" != "" ]; then
|
||||
mysql --user=cloud --password=cloud cloud < $1
|
||||
if [ $? -ne 0 ]; then
|
||||
printf "Error: Cannot execute $1\n"
|
||||
exit 12
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$2" != "" ]; then
|
||||
echo "Adding Templates"
|
||||
mysql --user=cloud --password=cloud cloud < $2
|
||||
if [ $? -ne 0 ]; then
|
||||
printf "Error: Cannot execute $2\n"
|
||||
exit 12
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
echo "Creating Indice and Foreign Keys"
|
||||
mysql --user=cloud --password=cloud cloud < create-index-fk.sql
|
||||
if [ $? -ne 0 ]; then
|
||||
printf "Error: Cannot execute create-index-fk.sql\n"
|
||||
exit 13
|
||||
fi
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.stdout.Target=System.out
|
||||
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.stdout.layout.ConversionPattern=%d{ABSOLUTE} %5p %c{1}:%L - %m%n
|
||||
log4j.appender.stdout.threshold=ERROR
|
||||
log4j.rootLogger=INFO, stdout
|
||||
log4j.category.org.apache=INFO, stdout
|
||||
|
|
@ -0,0 +1,217 @@
|
|||
#!/usr/bin/env bash
|
||||
# install.sh -- installs an agent
|
||||
#
|
||||
#
|
||||
|
||||
usage() {
|
||||
printf "Usage: %s: -d [directory to deploy to] -t [routing|storage|computing] -z [zip file] -h [host] -p [pod] -c [data center] -m [expert|novice|setup]\n" $(basename $0) >&2
|
||||
}
|
||||
|
||||
mode=
|
||||
host=
|
||||
pod=
|
||||
zone=
|
||||
|
||||
deploydir=
|
||||
confdir=
|
||||
zipfile=
|
||||
typ=
|
||||
|
||||
#set -x
|
||||
|
||||
while getopts 'd:z:t:x:m:h:p:c:' OPTION
|
||||
do
|
||||
case "$OPTION" in
|
||||
d) deploydir="$OPTARG"
|
||||
;;
|
||||
z) zipfile="$OPTARG"
|
||||
;;
|
||||
t) typ="$OPTARG"
|
||||
;;
|
||||
m) mode="$OPTARG"
|
||||
;;
|
||||
h) host="$OPTARG"
|
||||
;;
|
||||
p) pod="$OPTARG"
|
||||
;;
|
||||
c) zone="$OPTARG"
|
||||
;;
|
||||
?) usage
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
printf "NOTE: You must have root privileges to install and run this program.\n"
|
||||
|
||||
if [ "$typ" == "" ]; then
|
||||
if [ "$mode" != "expert" ]
|
||||
then
|
||||
printf "Type of agent to install [routing|computing|storage]: "
|
||||
read typ
|
||||
fi
|
||||
fi
|
||||
if [ "$typ" != "computing" ] && [ "$typ" != "routing" ] && [ "$typ" != "storage" ]
|
||||
then
|
||||
printf "ERROR: The choices are computing, routing, or storage.\n"
|
||||
exit 4
|
||||
fi
|
||||
|
||||
if [ "$host" == "" ]; then
|
||||
if [ "$mode" != "expert" ]
|
||||
then
|
||||
printf "Host name or ip address of management server [Required]: "
|
||||
read host
|
||||
if [ "$host" == "" ]; then
|
||||
printf "ERROR: Host is required\n"
|
||||
exit 23;
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
port=
|
||||
if [ "$mode" != "expert" ]
|
||||
then
|
||||
printf "Port number of management server [defaults to 8250]: "
|
||||
read port
|
||||
fi
|
||||
if [ "$port" == "" ]
|
||||
then
|
||||
port=8250
|
||||
fi
|
||||
|
||||
if [ "$zone" == "" ]; then
|
||||
if [ "$mode" != "expert" ]; then
|
||||
printf "Availability Zone [Required]: "
|
||||
read zone
|
||||
if [ "$zone" == "" ]; then
|
||||
printf "ERROR: Zone is required\n";
|
||||
exit 21;
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$pod" == "" ]; then
|
||||
if [ "$mode" != "expert" ]; then
|
||||
printf "Pod [Required]: "
|
||||
read pod
|
||||
if [ "$pod" == "" ]; then
|
||||
printf "ERROR: Pod is required\n";
|
||||
exit 22;
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
workers=
|
||||
if [ "$mode" != "expert" ]; then
|
||||
printf "# of workers to start [defaults to 3]: "
|
||||
read workers
|
||||
fi
|
||||
if [ "$workers" == "" ]; then
|
||||
workers=3
|
||||
fi
|
||||
|
||||
if [ "$deploydir" == "" ]; then
|
||||
if [ "$mode" != "expert" ]; then
|
||||
printf "Directory to deploy to [defaults to /usr/local/vmops/agent]: "
|
||||
read deploydir
|
||||
fi
|
||||
if [ "$deploydir" == "" ]; then
|
||||
deploydir="/usr/local/vmops/agent"
|
||||
fi
|
||||
fi
|
||||
if ! mkdir -p $deploydir
|
||||
then
|
||||
printf "ERROR: Unable to create $deploydir\n"
|
||||
exit 5
|
||||
fi
|
||||
|
||||
if [ "$zipfile" == "" ]; then
|
||||
if [ "$mode" != "expert" ]; then
|
||||
printf "Path of the zip file [defaults to agent.zip]: "
|
||||
read zipfile
|
||||
fi
|
||||
if [ "$zipfile" == "" ]; then
|
||||
zipfile="agent.zip"
|
||||
fi
|
||||
|
||||
fi
|
||||
if ! unzip -o $zipfile -d $deploydir
|
||||
then
|
||||
printf "ERROR: Unable to unzip $zipfile to $deploydir\n"
|
||||
exit 6
|
||||
fi
|
||||
|
||||
#if ! chmod -R +x $deploydir/scripts/*.sh
|
||||
#then
|
||||
# printf "ERROR: Unable to change scripts to executable.\n"
|
||||
# exit 7
|
||||
#fi
|
||||
#if ! chmod -R +x $deploydir/scripts/iscsi/*.sh
|
||||
#then
|
||||
# printf "ERROR: Unable to change scripts to executable.\n"
|
||||
# exit 8
|
||||
#fi
|
||||
#if ! chmod -R +x $deploydir/*.sh
|
||||
#then
|
||||
# printf "ERROR: Unable to change scripts to executable.\n"
|
||||
# exit 9
|
||||
#fi
|
||||
|
||||
if [ "$mode" == "setup" ]; then
|
||||
mode="expert"
|
||||
deploydir="/usr/local/vmops/agent"
|
||||
confdir="/etc/vmops"
|
||||
/bin/cp -f $deploydir/conf/agent.properties $confdir/agent.properties
|
||||
if [ $? -gt 0 ]; then
|
||||
printf "ERROR: Failed to copy the agent.properties file into the right place."
|
||||
exit 10;
|
||||
fi
|
||||
else
|
||||
confdir="$deploydir/conf"
|
||||
fi
|
||||
|
||||
if [ "$typ" != "" ]; then
|
||||
sed s/@TYPE@/"$typ"/ $confdir/agent.properties > $confdir/tmp
|
||||
/bin/mv -f $confdir/tmp $confdir/agent.properties
|
||||
else
|
||||
printf "INFO: Type is not set\n"
|
||||
fi
|
||||
|
||||
if [ "$host" != "" ]; then
|
||||
sed s/@HOST@/"$host"/ $confdir/agent.properties > $confdir/tmp
|
||||
/bin/mv -f $confdir/tmp $confdir/agent.properties
|
||||
else
|
||||
printf "INFO: host is not set\n"
|
||||
fi
|
||||
|
||||
if [ "$port" != "" ]; then
|
||||
sed s/@PORT@/"$port"/ $confdir/agent.properties > $confdir/tmp
|
||||
/bin/mv -f $confdir/tmp $confdir/agent.properties
|
||||
else
|
||||
printf "INFO: Port is not set\n"
|
||||
fi
|
||||
|
||||
if [ "$pod" != "" ]; then
|
||||
sed s/@POD@/"$pod"/ $confdir/agent.properties > $confdir/tmp
|
||||
/bin/mv -f $confdir/tmp $confdir/agent.properties
|
||||
else
|
||||
printf "INFO: Pod is not set\n"
|
||||
fi
|
||||
|
||||
if [ "$zone" != "" ]; then
|
||||
sed s/@ZONE@/"$zone"/ $confdir/agent.properties > $confdir/tmp
|
||||
/bin/mv -f $confdir/tmp $confdir/agent.properties
|
||||
else
|
||||
printf "INFO: Zone is not set\n"
|
||||
fi
|
||||
|
||||
if [ "$workers" != "" ]; then
|
||||
sed s/@WORKERS@/"$workers"/ $confdir/agent.properties > $confdir/tmp
|
||||
/bin/mv -f $confdir/tmp $confdir/agent.properties
|
||||
else
|
||||
printf "INFO: Workers is not set\n"
|
||||
fi
|
||||
|
||||
printf "SUCCESS: Installation is now complete. If you like to make changes, edit $confdir/agent.properties\n"
|
||||
exit 0
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
#!/usr/bin/env bash
|
||||
# Deploy console proxy package to an existing VM template
|
||||
#
|
||||
usage() {
|
||||
printf "Usage: %s: -d [work directory to deploy to] -z [zip file]" $(basename $0) >&2
|
||||
}
|
||||
|
||||
deploydir=
|
||||
zipfile=
|
||||
|
||||
#set -x
|
||||
|
||||
while getopts 'd:z:' OPTION
|
||||
do
|
||||
case "$OPTION" in
|
||||
d) deploydir="$OPTARG"
|
||||
;;
|
||||
z) zipfile="$OPTARG"
|
||||
;;
|
||||
?) usage
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
printf "NOTE: You must have root privileges to install and run this program.\n"
|
||||
|
||||
if [ "$deploydir" == "" ]; then
|
||||
printf "ERROR: Unable to find deployment work directory $deploydir\n"
|
||||
exit 3;
|
||||
fi
|
||||
if [ ! -f $deploydir/consoleproxy.tar.gz ]
|
||||
then
|
||||
printf "ERROR: Unable to find existing console proxy template file (consoleproxy.tar.gz) to work on at $deploydir\n"
|
||||
exit 5
|
||||
fi
|
||||
|
||||
if [ "$zipfile" == "" ]; then
|
||||
zipfile="console-proxy.zip"
|
||||
fi
|
||||
|
||||
if ! mkdir -p /mnt/consoleproxy
|
||||
then
|
||||
printf "ERROR: Unable to create /mnt/consoleproxy for mounting template image\n"
|
||||
exit 5
|
||||
fi
|
||||
|
||||
tar xvfz $deploydir/consoleproxy.tar.gz -C $deploydir
|
||||
mount -o loop $deploydir/vmi-root-fc8-x86_64-domP /mnt/consoleproxy
|
||||
|
||||
if ! unzip -o $zipfile -d /mnt/consoleproxy/usr/local/vmops/consoleproxy
|
||||
then
|
||||
printf "ERROR: Unable to unzip $zipfile to $deploydir\n"
|
||||
exit 6
|
||||
fi
|
||||
|
||||
umount /mnt/consoleproxy
|
||||
|
||||
pushd $deploydir
|
||||
tar cvf consoleproxy.tar vmi-root-fc8-x86_64-domP
|
||||
|
||||
mv -f consoleproxy.tar.gz consoleproxy.tar.gz.old
|
||||
gzip consoleproxy.tar
|
||||
popd
|
||||
|
||||
if [ ! -f $deploydir/consoleproxy.tar.gz ]
|
||||
then
|
||||
mv consoleproxy.tar.gz.old consoleproxy.tar.gz
|
||||
printf "ERROR: failed to deploy and recreate the template at $deploydir\n"
|
||||
fi
|
||||
|
||||
printf "SUCCESS: Installation is now complete. please go to $deploydir to review it\n"
|
||||
exit 0
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
#!/usr/bin/env bash
|
||||
# deploy.sh -- deploys a management server
|
||||
#
|
||||
#
|
||||
|
||||
usage() {
|
||||
printf "Usage: %s: -d [tomcat directory to deploy to] -z [zip file to use]\n" $(basename $0) >&2
|
||||
}
|
||||
|
||||
dflag=
|
||||
zflag=
|
||||
tflag=
|
||||
iflag=
|
||||
|
||||
deploydir=
|
||||
zipfile="client.zip"
|
||||
typ=
|
||||
|
||||
#set -x
|
||||
|
||||
while getopts 'd:z:x:h:' OPTION
|
||||
do
|
||||
case "$OPTION" in
|
||||
d) dflag=1
|
||||
deploydir="$OPTARG"
|
||||
;;
|
||||
z) zflag=1
|
||||
zipfile="$OPTARG"
|
||||
;;
|
||||
h) iflag="$OPTARG"
|
||||
;;
|
||||
?) usage
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ "$deploydir" == "" ]
|
||||
then
|
||||
if [ "$CATALINA_HOME" == "" ]
|
||||
then
|
||||
printf "Tomcat Directory to deploy to: "
|
||||
read deploydir
|
||||
else
|
||||
deploydir="$CATALINA_HOME"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$deploydir" == "" ]
|
||||
then
|
||||
printf "Tomcat directory was not specified\n";
|
||||
exit 15;
|
||||
fi
|
||||
|
||||
printf "Check to see if the Tomcat directory exist: $deploydir\n"
|
||||
if [ ! -d $deploydir ]
|
||||
then
|
||||
printf "Tomcat directory does not exist\n";
|
||||
exit 16;
|
||||
fi
|
||||
|
||||
if [ "$zipfile" == "" ]
|
||||
then
|
||||
printf "Path of the zip file [defaults to client.zip]: "
|
||||
read zipfile
|
||||
if [ "$zipfile" == "" ]
|
||||
then
|
||||
zipfile="client.zip"
|
||||
fi
|
||||
fi
|
||||
if ! unzip -o $zipfile client.war
|
||||
then
|
||||
exit 6
|
||||
fi
|
||||
|
||||
rm -fr $deploydir/webapps/client
|
||||
|
||||
if ! unzip -o ./client.war -d $deploydir/webapps/client
|
||||
then
|
||||
exit 10;
|
||||
fi
|
||||
|
||||
rm -f ./client.war
|
||||
|
||||
if ! unzip -o $zipfile lib/* -d $deploydir
|
||||
then
|
||||
exit 11;
|
||||
fi
|
||||
|
||||
if ! unzip -o $zipfile conf/* -d $deploydir
|
||||
then
|
||||
exit 12;
|
||||
fi
|
||||
|
||||
if ! unzip -o $zipfile bin/* -d $deploydir
|
||||
then
|
||||
exit 13;
|
||||
fi
|
||||
|
||||
printf "Adding the conf directory to the class loader for tomcat\n"
|
||||
sed 's/shared.loader=$/shared.loader=\$\{catalina.home\},\$\{catalina.home\}\/conf\
|
||||
/' $deploydir/conf/catalina.properties > $deploydir/conf/catalina.properties.tmp
|
||||
mv $deploydir/conf/catalina.properties.tmp $deploydir/conf/catalina.properties
|
||||
|
||||
printf "Installation is now complete\n"
|
||||
exit 0
|
||||
|
|
@ -0,0 +1,185 @@
|
|||
#!/usr/bin/env bash
|
||||
# install.sh -- installs an agent
|
||||
#
|
||||
#
|
||||
|
||||
usage() {
|
||||
printf "Usage: %s: -d [directory to deploy to] -z [zip file] -h [host] -p [pod] -c [data center] -m [expert|novice|setup]\n" $(basename $0) >&2
|
||||
}
|
||||
|
||||
mode=
|
||||
host=
|
||||
pod=
|
||||
zone=
|
||||
|
||||
deploydir=
|
||||
confdir=
|
||||
zipfile=
|
||||
typ=
|
||||
|
||||
#set -x
|
||||
|
||||
while getopts 'd:z:x:m:h:p:c:' OPTION
|
||||
do
|
||||
case "$OPTION" in
|
||||
d) deploydir="$OPTARG"
|
||||
;;
|
||||
z) zipfile="$OPTARG"
|
||||
;;
|
||||
m) mode="$OPTARG"
|
||||
;;
|
||||
h) host="$OPTARG"
|
||||
;;
|
||||
p) pod="$OPTARG"
|
||||
;;
|
||||
c) zone="$OPTARG"
|
||||
;;
|
||||
?) usage
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
printf "NOTE: You must have root privileges to install and run this program.\n"
|
||||
|
||||
if [ "$mode" == "setup" ]; then
|
||||
mode="expert"
|
||||
deploydir="/usr/local/vmops/agent-simulator"
|
||||
confdir="/etc/vmops"
|
||||
/bin/cp -f $deploydir/conf/agent.properties $confdir/agent.properties
|
||||
if [ $? -gt 0 ]; then
|
||||
printf "ERROR: Failed to copy the agent.properties file into the right place."
|
||||
exit 10;
|
||||
fi
|
||||
else
|
||||
confdir="$deploydir/conf"
|
||||
fi
|
||||
|
||||
if [ "$host" == "" ]; then
|
||||
if [ "$mode" != "expert" ]
|
||||
then
|
||||
printf "Host name or ip address of management server [Required]: "
|
||||
read host
|
||||
if [ "$host" == "" ]; then
|
||||
printf "ERROR: Host is required\n"
|
||||
exit 23;
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
port=
|
||||
if [ "$mode" != "expert" ]
|
||||
then
|
||||
printf "Port number of management server [defaults to 8250]: "
|
||||
read port
|
||||
fi
|
||||
if [ "$port" == "" ]
|
||||
then
|
||||
port=8250
|
||||
fi
|
||||
|
||||
if [ "$zone" == "" ]; then
|
||||
if [ "$mode" != "expert" ]; then
|
||||
printf "Availability Zone [Required]: "
|
||||
read zone
|
||||
if [ "$zone" == "" ]; then
|
||||
printf "ERROR: Zone is required\n";
|
||||
exit 21;
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$pod" == "" ]; then
|
||||
if [ "$mode" != "expert" ]; then
|
||||
printf "Pod [Required]: "
|
||||
read pod
|
||||
if ["$pod" == ""]; then
|
||||
printf "ERROR: Pod is required\n";
|
||||
exit 22;
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
workers=
|
||||
if [ "$mode" != "expert" ]; then
|
||||
printf "# of workers to start [defaults to 3]: "
|
||||
read workers
|
||||
fi
|
||||
if [ "$workers" == "" ]; then
|
||||
workers=3
|
||||
fi
|
||||
|
||||
if [ "$deploydir" == "" ]; then
|
||||
if [ "$mode" != "expert" ]; then
|
||||
printf "Directory to deploy to [defaults to /usr/local/vmops/agent-simulator]: "
|
||||
read deploydir
|
||||
fi
|
||||
if [ "$deploydir" == "" ]; then
|
||||
deploydir="/usr/local/vmops/agent-simulator"
|
||||
fi
|
||||
fi
|
||||
if ! mkdir -p $deploydir
|
||||
then
|
||||
printf "ERROR: Unable to create $deploydir\n"
|
||||
exit 5
|
||||
fi
|
||||
|
||||
if [ "$zipfile" == "" ]; then
|
||||
if [ "$mode" != "expert" ]; then
|
||||
printf "Path of the zip file [defaults to agent-simulator.zip]: "
|
||||
read zipfile
|
||||
fi
|
||||
if [ "$zipfile" == "" ]; then
|
||||
zipfile="agent-simulator.zip"
|
||||
fi
|
||||
|
||||
fi
|
||||
if ! unzip -o $zipfile -d $deploydir
|
||||
then
|
||||
printf "ERROR: Unable to unzip $zipfile to $deploydir\n"
|
||||
exit 6
|
||||
fi
|
||||
|
||||
if ! chmod +x $deploydir/*.sh
|
||||
then
|
||||
printf "ERROR: Unable to change scripts to executable.\n"
|
||||
exit 9
|
||||
fi
|
||||
|
||||
if [ "$host" != "" ]; then
|
||||
sed s/@HOST@/"$host"/ $confdir/agent.properties > $confdir/tmp
|
||||
/bin/mv -f $confdir/tmp $confdir/agent.properties
|
||||
else
|
||||
printf "INFO: host is not set\n"
|
||||
fi
|
||||
|
||||
if [ "$port" != "" ]; then
|
||||
sed s/@PORT@/"$port"/ $confdir/agent.properties > $confdir/tmp
|
||||
/bin/mv -f $confdir/tmp $confdir/agent.properties
|
||||
else
|
||||
printf "INFO: Port is not set\n"
|
||||
fi
|
||||
|
||||
if [ "$pod" != "" ]; then
|
||||
sed s/@POD@/"$pod"/ $confdir/agent.properties > $confdir/tmp
|
||||
/bin/mv -f $confdir/tmp $confdir/agent.properties
|
||||
else
|
||||
printf "INFO: Pod is not set\n"
|
||||
fi
|
||||
|
||||
if [ "$zone" != "" ]; then
|
||||
sed s/@ZONE@/"$zone"/ $confdir/agent.properties > $confdir/tmp
|
||||
/bin/mv -f $confdir/tmp $confdir/agent.properties
|
||||
else
|
||||
printf "INFO: Zone is not set\n"
|
||||
fi
|
||||
|
||||
if [ "$workers" != "" ]; then
|
||||
sed s/@WORKERS@/"$workers"/ $confdir/agent.properties > $confdir/tmp
|
||||
/bin/mv -f $confdir/tmp $confdir/agent.properties
|
||||
else
|
||||
printf "INFO: Workers is not set\n"
|
||||
fi
|
||||
|
||||
printf "SUCCESS: Installation is now complete. If you like to make changes, edit $confdir/agent.properties\n"
|
||||
exit 0
|
||||
|
|
@ -0,0 +1,133 @@
|
|||
#!/usr/bin/env bash
|
||||
# install-storage-server.sh: Installs a VMOps Storage Server
|
||||
#
|
||||
|
||||
choose_correct_filename() {
|
||||
local default_filename=$1
|
||||
local user_specified_filename=$2
|
||||
|
||||
if [ -f "$user_specified_filename" ]
|
||||
then
|
||||
echo $user_specified_filename
|
||||
return 0
|
||||
else
|
||||
if [ -f "$default_filename" ]
|
||||
then
|
||||
echo $default_filename
|
||||
return 0
|
||||
else
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
install_opensolaris_package() {
|
||||
pkg_name=$1
|
||||
|
||||
pkg info $pkg_name >> /dev/null
|
||||
|
||||
if [ $? -gt 0 ]
|
||||
then
|
||||
# The package is not installed, so install it
|
||||
pkg install $pkg_name
|
||||
return $?
|
||||
else
|
||||
# The package is already installed
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
exit_if_error() {
|
||||
return_code=$1
|
||||
msg=$2
|
||||
|
||||
if [ $return_code -gt 0 ]
|
||||
then
|
||||
echo $msg
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
usage() {
|
||||
printf "Usage: ./install-storage-server.sh <path to agent.zip> <path to templates.tar.gz>"
|
||||
}
|
||||
|
||||
AGENT_FILE=$(choose_correct_filename "./agent.zip" $1)
|
||||
exit_if_error $? "Please download agent.zip to your Storage Server."
|
||||
|
||||
TEMPLATES_FILE=$(choose_correct_filename "./templates.tar.gz" $2)
|
||||
exit_if_error $? "Please download templates.tar.gz to your Storage Server."
|
||||
|
||||
VMOPS_DIR="/usr/local/vmops"
|
||||
AGENT_DIR="/usr/local/vmops/agent"
|
||||
CONF_DIR="/etc/vmops"
|
||||
TEMPLATES_DIR="/root/template"
|
||||
|
||||
# Make all the necessary directories if they don't already exist
|
||||
|
||||
echo "Creating VMOps directories..."
|
||||
for dir in $VMOPS_DIR $CONF_DIR $TEMPLATES_DIR
|
||||
do
|
||||
mkdir -p $dir
|
||||
done
|
||||
|
||||
# Unzip agent.zip to $AGENT_DIR
|
||||
|
||||
echo "Uncompressing and installing VMOps Storage Agent..."
|
||||
unzip -o $AGENT_FILE -d $AGENT_DIR >> /dev/null
|
||||
|
||||
# Remove agent/conf/agent.properties, since we should use the file in the real configuration directory
|
||||
|
||||
rm $AGENT_DIR/conf/agent.properties
|
||||
|
||||
# Backup any existing VMOps configuration files, if there aren't any backups already
|
||||
|
||||
if [ ! -d $CONF_DIR/BACKUP ]
|
||||
then
|
||||
echo "Backing up existing configuration files..."
|
||||
mkdir -p $CONF_DIR/BACKUP
|
||||
cp $CONF_DIR/*.properties $CONF_DIR/BACKUP >> /dev/null
|
||||
fi
|
||||
|
||||
# Copy all the files in storagehdpatch to their proper places
|
||||
|
||||
echo "Installing system files..."
|
||||
(cd $AGENT_DIR/storagehdpatch; tar cf - .) | (cd /; tar xf -)
|
||||
exit_if_error $? "There was a problem with installing system files. Please contact VMOps Support."
|
||||
|
||||
# Make vsetup executable
|
||||
chmod +x /usr/sbin/vsetup
|
||||
|
||||
# Make vmops executable
|
||||
chmod +x /lib/svc/method/vmops
|
||||
|
||||
# Uncompress the templates and copy them to the templates directory
|
||||
|
||||
echo "Uncompressing templates..."
|
||||
tar -xzf $TEMPLATES_FILE -C $TEMPLATES_DIR >> /dev/null
|
||||
exit_if_error $? "There was a problem with uncompressing templates. Please contact VMOps Support."
|
||||
|
||||
# Install the storage-server package, if it is not already installed
|
||||
echo "Installing OpenSolaris storage server package..."
|
||||
install_opensolaris_package "storage-server"
|
||||
exit_if_error $? "There was a problem with installing the storage server package. Please contact VMOps Support."
|
||||
|
||||
echo "Installing COMSTAR..."
|
||||
install_opensolaris_package "SUNWiscsit"
|
||||
exit_if_error $? "Unable to install COMSTAR iscsi target. Please contact VMOps Support."
|
||||
|
||||
# Install the SUNWinstall-test package, if it is not already installed
|
||||
|
||||
echo "Installing OpenSolaris test tools package..."
|
||||
install_opensolaris_package "SUNWinstall-test"
|
||||
exit_if_error $? "There was a problem with installing the test tools package. Please contact VMOps Support."
|
||||
|
||||
# Print a success message
|
||||
printf "\nSuccessfully installed the VMOps Storage Server.\n"
|
||||
printf "Please complete the following steps to configure your networking settings and storage pools:\n\n"
|
||||
printf "1. Specify networking settings in /etc/vmops/network.properties\n"
|
||||
printf "2. Run \"vsetup networking\" and then specify disk settings in /etc/vmops/disks.properties\n"
|
||||
printf "3. Run \"vsetup zpool\" and reboot the machine when prompted.\n\n"
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,139 @@
|
|||
#!/bin/bash
|
||||
|
||||
# install.sh -- installs MySQL, Java, Tomcat, and the VMOps server
|
||||
|
||||
#set -x
|
||||
set -e
|
||||
|
||||
EX_NOHOSTNAME=15
|
||||
EX_SELINUX=16
|
||||
|
||||
function usage() {
|
||||
printf "Usage: %s [path to server-setup.xml]\n" $(basename $0) >&2
|
||||
exit 64
|
||||
}
|
||||
|
||||
function checkhostname() {
|
||||
if hostname | grep -qF . ; then true ; else
|
||||
echo "You need to have a fully-qualified host name for the setup to work." > /dev/stderr
|
||||
echo "Please use your operating system's network setup tools to set one." > /dev/stderr
|
||||
exit $EX_NOHOSTNAME
|
||||
fi
|
||||
}
|
||||
|
||||
function checkselinux() {
|
||||
#### before checking arguments, make sure SELINUX is "permissible" in /etc/selinux/config
|
||||
if /usr/sbin/getenforce | grep -qi enforcing ; then borked=1 ; fi
|
||||
if grep -i SELINUX=enforcing /etc/selinux/config ; then borked=1 ; fi
|
||||
if [ "$borked" == "1" ] ; then
|
||||
echo "SELINUX is set to enforcing, please set it to permissive in /etc/selinux/config" > /dev/stderr
|
||||
echo "then reboot the machine, after which you can run the install script again." > /dev/stderr
|
||||
exit $EX_SELINUX
|
||||
fi
|
||||
}
|
||||
|
||||
checkhostname
|
||||
checkselinux
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
if [ ! -f $1 ]; then
|
||||
echo "Error: Unable to find $1" > /dev/stderr
|
||||
exit 2
|
||||
fi
|
||||
|
||||
#### check that all files exist
|
||||
if [ ! -f apache-tomcat-6.0.18.tar.gz ]; then
|
||||
printf "Error: Unable to find apache-tomcat-6.0.18.tar.gz\n" > /dev/stderr
|
||||
exit 3
|
||||
fi
|
||||
|
||||
if [ ! -f MySQL-client-5.1.30-0.glibc23.x86_64.rpm ]; then
|
||||
printf "Error: Unable to find MySQL-client-5.1.30-0.glibc23.x86_64.rpm\n" > /dev/stderr
|
||||
exit 4
|
||||
fi
|
||||
|
||||
if [ ! -f MySQL-server-5.1.30-0.glibc23.x86_64.rpm ]; then
|
||||
printf "Error: Unable to find MySQL-server-5.1.30-0.glibc23.x86_64.rpm\n" > /dev/stderr
|
||||
exit 5
|
||||
fi
|
||||
|
||||
if [ ! -f jdk-6u13-linux-amd64.rpm.bin ]; then
|
||||
printf "Error: Unable to find jdk-6u13-linux-amd64.rpm.bin\n" > /dev/stderr
|
||||
exit 6
|
||||
fi
|
||||
|
||||
#if [ ! -f osol.tar.bz2 ]; then
|
||||
# printf "Error: Unable to find osol.tar.bz2\n"
|
||||
# exit 7
|
||||
#fi
|
||||
|
||||
if [ ! -f apache-tomcat-6.0.18.tar.gz ]; then
|
||||
printf "Error: Unable to find apache-tomcat-6.0.18.tar.gz\n" > /dev/stderr
|
||||
exit 8
|
||||
fi
|
||||
|
||||
if [ ! -f vmops-*.zip ]; then
|
||||
printf "Error: Unable to find vmops install file\n" > /dev/stderr
|
||||
exit 9
|
||||
fi
|
||||
|
||||
if [ ! -f catalina ] ; then
|
||||
printf "Error: Unable to find catalina initscript\n" > /dev/stderr
|
||||
exit 10
|
||||
fi
|
||||
|
||||
if [ ! -f usageserver ] ; then
|
||||
printf "Error: Unable to find usageserver initscript\n" > /dev/stderr
|
||||
exit 11
|
||||
fi
|
||||
|
||||
###### install Apache
|
||||
# if [ ! -d /usr/local/tomcat ] ; then
|
||||
echo "installing Apache..."
|
||||
mkdir -p /usr/local/tomcat
|
||||
tar xfz apache-tomcat-6.0.18.tar.gz -C /usr/local/tomcat
|
||||
ln -s /usr/local/tomcat/apache-tomcat-6.0.18 /usr/local/tomcat/current
|
||||
# fi
|
||||
# if [ ! -f /etc/profile.d/catalinahome.sh ] ; then
|
||||
# echo "export CATALINA_HOME=/usr/local/tomcat/current" >> /etc/profile.d/catalinahome.sh
|
||||
# fi
|
||||
source /etc/profile.d/catalinahome.sh
|
||||
# if [ ! -f /etc/init.d/catalina ] ; then
|
||||
cp -f catalina /etc/init.d
|
||||
/sbin/chkconfig catalina on
|
||||
# fi
|
||||
|
||||
####### set up usage server as a service
|
||||
if [ ! -f /ec/init.d/usageserver ] ; then
|
||||
cp -f usageserver /etc/init.d
|
||||
/sbin/chkconfig usageserver on
|
||||
fi
|
||||
|
||||
##### set up mysql
|
||||
if rpm -q MySQL-server MySQL-client > /dev/null 2>&1 ; then true ; else
|
||||
echo "installing MySQL..."
|
||||
yum localinstall --nogpgcheck -y MySQL-*.rpm
|
||||
fi
|
||||
|
||||
#### install JDK
|
||||
echo "installing JDK..."
|
||||
sh jdk-6u13-linux-amd64.rpm.bin
|
||||
rm -rf /usr/bin/java
|
||||
ln -s /usr/java/default/bin/java /usr/bin/java
|
||||
|
||||
#### setting up OSOL image
|
||||
#mkdir -p $CATALINA_HOME/webapps/images
|
||||
#echo "copying Open Solaris image, this may take a few moments..."
|
||||
#cp osol.tar.bz2 $CATALINA_HOME/webapps/images
|
||||
|
||||
#### deploying database
|
||||
unzip -o vmops-*.zip
|
||||
cd vmops-*
|
||||
sh deploy-server.sh -d "$CATALINA_HOME"
|
||||
cd db
|
||||
sh deploy-db.sh "../../$1" templates.sql
|
||||
|
||||
exit 0
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
#
|
||||
# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
|
||||
# Use is subject to license terms.
|
||||
#
|
||||
# CDDL HEADER START
|
||||
#
|
||||
# The contents of this file are subject to the terms of the
|
||||
# Common Development and Distribution License, Version 1.0 only
|
||||
# (the "License"). You may not use this file except in compliance
|
||||
# with the License.
|
||||
#
|
||||
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
# or http://www.opensolaris.org/os/licensing.
|
||||
# See the License for the specific language governing permissions
|
||||
# and limitations under the License.
|
||||
#
|
||||
# When distributing Covered Code, include this CDDL HEADER in each
|
||||
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
# If applicable, add the following below this CDDL HEADER, with the
|
||||
# fields enclosed by brackets "[]" replaced with your own identifying
|
||||
# information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
#
|
||||
# CDDL HEADER END
|
||||
#
|
||||
#ident "%Z%%M% %I% %E% SMI"
|
||||
#
|
||||
# This file is /etc/default/init. /etc/TIMEZONE is a symlink to this file.
|
||||
# This file looks like a shell script, but it is not. To maintain
|
||||
# compatibility with old versions of /etc/TIMEZONE, some shell constructs
|
||||
# (i.e., export commands) are allowed in this file, but are ignored.
|
||||
#
|
||||
# Lines of this file should be of the form VAR=value, where VAR is one of
|
||||
# TZ, LANG, CMASK, or any of the LC_* environment variables. value may
|
||||
# be enclosed in double quotes (") or single quotes (').
|
||||
#
|
||||
TZ=GMT
|
||||
CMASK=022
|
||||
LANG=en_US.UTF-8
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
driftfile /var/lib/ntp/ntp.drift
|
||||
|
||||
server 0.pool.ntp.org
|
||||
server 1.pool.ntp.org
|
||||
server 2.pool.ntp.org
|
||||
server 3.pool.ntp.org
|
||||
|
|
@ -0,0 +1,70 @@
|
|||
#
|
||||
# CDDL HEADER START
|
||||
#
|
||||
# The contents of this file are subject to the terms of the
|
||||
# Common Development and Distribution License (the "License").
|
||||
# You may not use this file except in compliance with the License.
|
||||
#
|
||||
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
# or http://www.opensolaris.org/os/licensing.
|
||||
# See the License for the specific language governing permissions
|
||||
# and limitations under the License.
|
||||
#
|
||||
# When distributing Covered Code, include this CDDL HEADER in each
|
||||
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
# If applicable, add the following below this CDDL HEADER, with the
|
||||
# fields enclosed by brackets "[]" replaced with your own identifying
|
||||
# information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
#
|
||||
# CDDL HEADER END
|
||||
#
|
||||
# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
|
||||
# Use is subject to license terms.
|
||||
#
|
||||
#ident "%Z%%M% %I% %E% SMI"
|
||||
|
||||
#
|
||||
# /etc/nsswitch.dns:
|
||||
#
|
||||
# An example file that could be copied over to /etc/nsswitch.conf; it uses
|
||||
# DNS for hosts lookups, otherwise it does not use any other naming service.
|
||||
#
|
||||
# "hosts:" and "services:" in this file are used only if the
|
||||
# /etc/netconfig file has a "-" for nametoaddr_libs of "inet" transports.
|
||||
|
||||
# DNS service expects that an instance of svc:/network/dns/client be
|
||||
# enabled and online.
|
||||
|
||||
passwd: files
|
||||
group: files
|
||||
|
||||
# You must also set up the /etc/resolv.conf file for DNS name
|
||||
# server lookup. See resolv.conf(4). For lookup via mdns
|
||||
# svc:/network/dns/multicast:default must also be enabled. See mdnsd(1M)
|
||||
hosts: files dns
|
||||
|
||||
# Note that IPv4 addresses are searched for in all of the ipnodes databases
|
||||
# before searching the hosts databases.
|
||||
ipnodes: files dns
|
||||
|
||||
networks: files
|
||||
protocols: files
|
||||
rpc: files
|
||||
ethers: files
|
||||
netmasks: files
|
||||
bootparams: files
|
||||
publickey: files
|
||||
# At present there isn't a 'files' backend for netgroup; the system will
|
||||
# figure it out pretty quickly, and won't use netgroups at all.
|
||||
netgroup: files
|
||||
automount: files
|
||||
aliases: files
|
||||
services: files
|
||||
printers: user files
|
||||
|
||||
auth_attr: files
|
||||
prof_attr: files
|
||||
project: files
|
||||
|
||||
tnrhtp: files
|
||||
tnrhdb: files
|
||||
|
|
@ -0,0 +1,154 @@
|
|||
#
|
||||
# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
|
||||
# Use is subject to license terms.
|
||||
#
|
||||
# Configuration file for sshd(1m)
|
||||
|
||||
# Protocol versions supported
|
||||
#
|
||||
# The sshd shipped in this release of Solaris has support for major versions
|
||||
# 1 and 2. It is recommended due to security weaknesses in the v1 protocol
|
||||
# that sites run only v2 if possible. Support for v1 is provided to help sites
|
||||
# with existing ssh v1 clients/servers to transition.
|
||||
# Support for v1 may not be available in a future release of Solaris.
|
||||
#
|
||||
# To enable support for v1 an RSA1 key must be created with ssh-keygen(1).
|
||||
# RSA and DSA keys for protocol v2 are created by /etc/init.d/sshd if they
|
||||
# do not already exist, RSA1 keys for protocol v1 are not automatically created.
|
||||
|
||||
# Uncomment ONLY ONE of the following Protocol statements.
|
||||
|
||||
# Only v2 (recommended)
|
||||
Protocol 2
|
||||
|
||||
# Both v1 and v2 (not recommended)
|
||||
#Protocol 2,1
|
||||
|
||||
# Only v1 (not recommended)
|
||||
#Protocol 1
|
||||
|
||||
# Listen port (the IANA registered port number for ssh is 22)
|
||||
Port 22
|
||||
|
||||
# The default listen address is all interfaces, this may need to be changed
|
||||
# if you wish to restrict the interfaces sshd listens on for a multi homed host.
|
||||
# Multiple ListenAddress entries are allowed.
|
||||
|
||||
# IPv4 only
|
||||
#ListenAddress 0.0.0.0
|
||||
# IPv4 & IPv6
|
||||
ListenAddress ::
|
||||
|
||||
# Port forwarding
|
||||
AllowTcpForwarding no
|
||||
|
||||
# If port forwarding is enabled, specify if the server can bind to INADDR_ANY.
|
||||
# This allows the local port forwarding to work when connections are received
|
||||
# from any remote host.
|
||||
GatewayPorts no
|
||||
|
||||
# X11 tunneling options
|
||||
X11Forwarding yes
|
||||
X11DisplayOffset 10
|
||||
X11UseLocalhost yes
|
||||
|
||||
# The maximum number of concurrent unauthenticated connections to sshd.
|
||||
# start:rate:full see sshd(1) for more information.
|
||||
# The default is 10 unauthenticated clients.
|
||||
#MaxStartups 10:30:60
|
||||
|
||||
# Banner to be printed before authentication starts.
|
||||
#Banner /etc/issue
|
||||
|
||||
# Should sshd print the /etc/motd file and check for mail.
|
||||
# On Solaris it is assumed that the login shell will do these (eg /etc/profile).
|
||||
PrintMotd no
|
||||
|
||||
# KeepAlive specifies whether keep alive messages are sent to the client.
|
||||
# See sshd(1) for detailed description of what this means.
|
||||
# Note that the client may also be sending keep alive messages to the server.
|
||||
KeepAlive yes
|
||||
|
||||
# Syslog facility and level
|
||||
SyslogFacility auth
|
||||
LogLevel info
|
||||
|
||||
#
|
||||
# Authentication configuration
|
||||
#
|
||||
|
||||
# Host private key files
|
||||
# Must be on a local disk and readable only by the root user (root:sys 600).
|
||||
HostKey /etc/ssh/ssh_host_rsa_key
|
||||
HostKey /etc/ssh/ssh_host_dsa_key
|
||||
|
||||
# Length of the server key
|
||||
# Default 768, Minimum 512
|
||||
ServerKeyBits 768
|
||||
|
||||
# sshd regenerates the key every KeyRegenerationInterval seconds.
|
||||
# The key is never stored anywhere except the memory of sshd.
|
||||
# The default is 1 hour (3600 seconds).
|
||||
KeyRegenerationInterval 3600
|
||||
|
||||
# Ensure secure permissions on users .ssh directory.
|
||||
StrictModes yes
|
||||
|
||||
# Length of time in seconds before a client that hasn't completed
|
||||
# authentication is disconnected.
|
||||
# Default is 600 seconds. 0 means no time limit.
|
||||
LoginGraceTime 600
|
||||
|
||||
# Maximum number of retries for authentication
|
||||
# Default is 6. Default (if unset) for MaxAuthTriesLog is MaxAuthTries / 2
|
||||
MaxAuthTries 6
|
||||
MaxAuthTriesLog 3
|
||||
|
||||
# Are logins to accounts with empty passwords allowed.
|
||||
# If PermitEmptyPasswords is no, pass PAM_DISALLOW_NULL_AUTHTOK
|
||||
# to pam_authenticate(3PAM).
|
||||
PermitEmptyPasswords no
|
||||
|
||||
# To disable tunneled clear text passwords, change PasswordAuthentication to no.
|
||||
PasswordAuthentication yes
|
||||
|
||||
# Use PAM via keyboard interactive method for authentication.
|
||||
# Depending on the setup of pam.conf(4) this may allow tunneled clear text
|
||||
# passwords even when PasswordAuthentication is set to no. This is dependent
|
||||
# on what the individual modules request and is out of the control of sshd
|
||||
# or the protocol.
|
||||
PAMAuthenticationViaKBDInt yes
|
||||
|
||||
# Are root logins permitted using sshd.
|
||||
# Note that sshd uses pam_authenticate(3PAM) so the root (or any other) user
|
||||
# maybe denied access by a PAM module regardless of this setting.
|
||||
# Valid options are yes, without-password, no.
|
||||
PermitRootLogin yes
|
||||
|
||||
# sftp subsystem
|
||||
Subsystem sftp /usr/lib/ssh/sftp-server
|
||||
|
||||
|
||||
# SSH protocol v1 specific options
|
||||
#
|
||||
# The following options only apply to the v1 protocol and provide
|
||||
# some form of backwards compatibility with the very weak security
|
||||
# of /usr/bin/rsh. Their use is not recommended and the functionality
|
||||
# will be removed when support for v1 protocol is removed.
|
||||
|
||||
# Should sshd use .rhosts and .shosts for password less authentication.
|
||||
IgnoreRhosts yes
|
||||
RhostsAuthentication no
|
||||
|
||||
# Rhosts RSA Authentication
|
||||
# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts.
|
||||
# If the user on the client side is not root then this won't work on
|
||||
# Solaris since /usr/bin/ssh is not installed setuid.
|
||||
RhostsRSAAuthentication no
|
||||
|
||||
# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication.
|
||||
#IgnoreUserKnownHosts yes
|
||||
|
||||
# Is pure RSA authentication allowed.
|
||||
# Default is yes
|
||||
RSAAuthentication yes
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
*ident "%Z%%M% %I% %E% SMI" /* SVR4 1.5 */
|
||||
*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
* The contents of this file are subject to the terms of the
|
||||
* Common Development and Distribution License, Version 1.0 only
|
||||
* (the "License"). You may not use this file except in compliance
|
||||
* with the License.
|
||||
*
|
||||
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
* or http://www.opensolaris.org/os/licensing.
|
||||
* See the License for the specific language governing permissions
|
||||
* and limitations under the License.
|
||||
*
|
||||
* When distributing Covered Code, include this CDDL HEADER in each
|
||||
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
* If applicable, add the following below this CDDL HEADER, with the
|
||||
* fields enclosed by brackets "[]" replaced with your own identifying
|
||||
* information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
*
|
||||
* CDDL HEADER END
|
||||
*
|
||||
*
|
||||
* SYSTEM SPECIFICATION FILE
|
||||
*
|
||||
|
||||
* moddir:
|
||||
*
|
||||
* Set the search path for modules. This has a format similar to the
|
||||
* csh path variable. If the module isn't found in the first directory
|
||||
* it tries the second and so on. The default is /kernel /usr/kernel
|
||||
*
|
||||
* Example:
|
||||
* moddir: /kernel /usr/kernel /other/modules
|
||||
|
||||
|
||||
|
||||
* root device and root filesystem configuration:
|
||||
*
|
||||
* The following may be used to override the defaults provided by
|
||||
* the boot program:
|
||||
*
|
||||
* rootfs: Set the filesystem type of the root.
|
||||
*
|
||||
* rootdev: Set the root device. This should be a fully
|
||||
* expanded physical pathname. The default is the
|
||||
* physical pathname of the device where the boot
|
||||
* program resides. The physical pathname is
|
||||
* highly platform and configuration dependent.
|
||||
*
|
||||
* Example:
|
||||
* rootfs:ufs
|
||||
* rootdev:/sbus@1,f8000000/esp@0,800000/sd@3,0:a
|
||||
*
|
||||
* (Swap device configuration should be specified in /etc/vfstab.)
|
||||
|
||||
|
||||
|
||||
* exclude:
|
||||
*
|
||||
* Modules appearing in the moddir path which are NOT to be loaded,
|
||||
* even if referenced. Note that `exclude' accepts either a module name,
|
||||
* or a filename which includes the directory.
|
||||
*
|
||||
* Examples:
|
||||
* exclude: win
|
||||
* exclude: sys/shmsys
|
||||
|
||||
|
||||
|
||||
* forceload:
|
||||
*
|
||||
* Cause these modules to be loaded at boot time, (just before mounting
|
||||
* the root filesystem) rather than at first reference. Note that
|
||||
* forceload expects a filename which includes the directory. Also
|
||||
* note that loading a module does not necessarily imply that it will
|
||||
* be installed.
|
||||
*
|
||||
* Example:
|
||||
* forceload: drv/foo
|
||||
|
||||
|
||||
|
||||
* set:
|
||||
*
|
||||
* Set an integer variable in the kernel or a module to a new value.
|
||||
* This facility should be used with caution. See system(4).
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* To set variables in 'unix':
|
||||
*
|
||||
* set nautopush=32
|
||||
* set maxusers=40
|
||||
*
|
||||
* To set a variable named 'debug' in the module named 'test_module'
|
||||
*
|
||||
* set test_module:debug = 0x13
|
||||
|
||||
* set zfs:zfs_arc_max=0x4002000
|
||||
set zfs:zfs_vdev_cache_size=0
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
# Specify disks in this file
|
||||
# D: Data
|
||||
# C: Cache
|
||||
# L: Intent Log
|
||||
# S: Spare
|
||||
# U: Unused
|
||||
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
# Host Settings
|
||||
hostname=
|
||||
domain=
|
||||
dns1=
|
||||
dns2=
|
||||
|
||||
# Private/Storage Network Settings (required)
|
||||
storage.ip=
|
||||
storage.netmask=
|
||||
storage.gateway=
|
||||
|
||||
# Second Storage Network Settings (optional)
|
||||
storage.ip.2=
|
||||
storage.netmask.2=
|
||||
storage.gateway.2=
|
||||
|
||||
# Datacenter Settings
|
||||
pod=
|
||||
zone=
|
||||
host=
|
||||
port=
|
||||
|
||||
# Storage Appliance Settings (optional)
|
||||
# Specify if you would like to use this Storage Server with an external storage appliance)
|
||||
iscsi.iqn=
|
||||
iscsi.ip=
|
||||
iscsi.port=
|
||||
|
||||
# VMOps IQN (optional)
|
||||
# Specify if you would like to manually change the IQN of the Storage Server's iSCSI target
|
||||
vmops.iqn=
|
||||
|
||||
# MTU (optional)
|
||||
mtu=
|
||||
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# vmops Script to start and stop the VMOps Agent.
|
||||
#
|
||||
# Author: Chiradeep Vittal <chiradeep@vmops.com>
|
||||
# chkconfig: 2345 99 01
|
||||
# description: Start up the VMOps agent
|
||||
|
||||
# Source function library.
|
||||
if [ -f /etc/init.d/functions ]
|
||||
then
|
||||
. /etc/init.d/functions
|
||||
fi
|
||||
|
||||
_success() {
|
||||
if [ -f /etc/init.d/functions ]
|
||||
then
|
||||
success
|
||||
else
|
||||
echo "Success"
|
||||
fi
|
||||
}
|
||||
|
||||
_failure() {
|
||||
if [ -f /etc/init.d/functions ]
|
||||
then
|
||||
failure
|
||||
else
|
||||
echo "Failed"
|
||||
fi
|
||||
}
|
||||
RETVAL=$?
|
||||
VMOPS_HOME="/usr/local/vmops"
|
||||
|
||||
mkdir -p /var/log/vmops
|
||||
|
||||
get_pids() {
|
||||
local i
|
||||
for i in $(ps -ef | grep agent.sh | grep -v grep | awk '{print $2}');
|
||||
do
|
||||
echo $(pwdx $i) | grep "$VMOPS_HOME" | grep agent | awk -F: '{print $1}';
|
||||
done
|
||||
}
|
||||
|
||||
start() {
|
||||
local pid=$(get_pids)
|
||||
echo -n "Starting VMOps agent: "
|
||||
if [ -f $VMOPS_HOME/agent/agent.sh ];
|
||||
then
|
||||
if [ "$pid" == "" ]
|
||||
then
|
||||
(cd $VMOPS_HOME/agent; nohup ./agent.sh > /var/log/vmops/vmops.out 2>&1 & )
|
||||
pid=$(get_pids)
|
||||
echo $pid > /var/run/vmops.pid
|
||||
fi
|
||||
_success
|
||||
else
|
||||
_failure
|
||||
fi
|
||||
echo
|
||||
}
|
||||
|
||||
stop() {
|
||||
local pid
|
||||
echo -n "Stopping VMOps agent: "
|
||||
for pid in $(get_pids)
|
||||
do
|
||||
pgid=$(ps -o pgid -p $pid | tr '\n' ' ' | awk '{print $2}')
|
||||
pgid=${pgid## }
|
||||
pgid=${pgid%% }
|
||||
kill -- -$pgid
|
||||
done
|
||||
rm /var/run/vmops.pid
|
||||
_success
|
||||
echo
|
||||
}
|
||||
|
||||
status() {
|
||||
local pids=$(get_pids)
|
||||
if [ "$pids" == "" ]
|
||||
then
|
||||
echo "VMOps agent is not running"
|
||||
return 1
|
||||
fi
|
||||
echo "VMOps agent (pid $pids) is running"
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
case "$1" in
|
||||
start) start
|
||||
;;
|
||||
stop) stop
|
||||
;;
|
||||
status) status
|
||||
;;
|
||||
restart) stop
|
||||
sleep 1.5
|
||||
start
|
||||
;;
|
||||
*) echo $"Usage: $0 {start|stop|status|restart}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit $RETVAL
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
#! /bin/bash
|
||||
|
||||
stage=$1
|
||||
option=$2
|
||||
|
||||
export VMOPS_HOME=/usr/local/vmops
|
||||
|
||||
usage() {
|
||||
echo "Usage: vsetup [networking|zpool]"
|
||||
echo " networking: probe NICs, configure networking, and detect disks"
|
||||
echo " zpool: create ZFS storage pool"
|
||||
}
|
||||
|
||||
if [ "$stage" != "networking" ] && [ "$stage" != "zpool" ] && [ "$stage" != "detectdisks" ]
|
||||
then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$option" != "" ] && [ "$option" != "-listonly" ]
|
||||
then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
$VMOPS_HOME/agent/scripts/installer/run_installer.sh storage $stage $option
|
||||
|
||||
if [ $? -eq 0 ]
|
||||
then
|
||||
if [ "$stage" == "networking" ]
|
||||
then
|
||||
echo "Please edit /etc/vmops/disks.properties and then run \"vsetup zpool\"."
|
||||
else
|
||||
if [ "$stage" == "zpool" ]
|
||||
then
|
||||
echo "Press enter to reboot the computer..."
|
||||
read
|
||||
reboot
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
<?xml version="1.0"?>
|
||||
<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
|
||||
|
||||
<service_bundle type='manifest' name='cloud'>
|
||||
|
||||
<service
|
||||
name='application/cloud'
|
||||
type='service'
|
||||
version='0.1.0'>
|
||||
|
||||
<!-- This is the cloud storage agent
|
||||
-->
|
||||
|
||||
<create_default_instance enabled='false' />
|
||||
|
||||
<single_instance />
|
||||
|
||||
<dependency
|
||||
name='iscsi_target'
|
||||
grouping='require_all'
|
||||
restart_on='error'
|
||||
type='service'>
|
||||
<service_fmri value='svc:/network/iscsi/target:default' />
|
||||
</dependency>
|
||||
|
||||
<exec_method
|
||||
type='method'
|
||||
name='start'
|
||||
exec='/lib/svc/method/cloud start'
|
||||
timeout_seconds='60'>
|
||||
</exec_method>
|
||||
|
||||
<exec_method
|
||||
type='method'
|
||||
name='stop'
|
||||
exec='/lib/svc/method/cloud stop'
|
||||
timeout_seconds='60'>
|
||||
</exec_method>
|
||||
|
||||
|
||||
</service>
|
||||
|
||||
</service_bundle>
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
consoleproxy.tcpListenPort=0
|
||||
consoleproxy.httpListenPort=80
|
||||
consoleproxy.httpCmdListenPort=8001
|
||||
consoleproxy.jarDir=./applet/
|
||||
consoleproxy.viewerLinger=180
|
||||
consoleproxy.reconnectMaxRetry=5
|
||||
|
|
@ -0,0 +1,532 @@
|
|||
<?xml version="1.0" encoding="ISO-8859-1"?>
|
||||
<data>
|
||||
<version>2.0</version>
|
||||
<zones>
|
||||
<zone>
|
||||
<id>1</id>
|
||||
<name>AH</name>
|
||||
<dns1>72.52.126.11</dns1>
|
||||
<dns2>72.52.126.12</dns2>
|
||||
<internalDns1>192.168.10.253</internalDns1>
|
||||
<internalDns2>192.168.10.254</internalDns2>
|
||||
<vnet>100-199</vnet>
|
||||
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
|
||||
</zone>
|
||||
<zone>
|
||||
<id>2</id>
|
||||
<name>KM</name>
|
||||
<dns1>72.52.126.11</dns1>
|
||||
<dns2>72.52.126.12</dns2>
|
||||
<internalDns1>192.168.10.253</internalDns1>
|
||||
<internalDns2>192.168.10.254</internalDns2>
|
||||
<vnet>200-299</vnet>
|
||||
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
|
||||
</zone>
|
||||
<zone>
|
||||
<id>3</id>
|
||||
<name>KY</name>
|
||||
<dns1>72.52.126.11</dns1>
|
||||
<dns2>72.52.126.12</dns2>
|
||||
<internalDns1>192.168.10.253</internalDns1>
|
||||
<internalDns2>192.168.10.254</internalDns2>
|
||||
<vnet>300-399</vnet>
|
||||
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
|
||||
</zone>
|
||||
<zone>
|
||||
<id>4</id>
|
||||
<name>WC</name>
|
||||
<dns1>72.52.126.11</dns1>
|
||||
<dns2>72.52.126.12</dns2>
|
||||
<internalDns1>192.168.10.253</internalDns1>
|
||||
<internalDns2>192.168.10.254</internalDns2>
|
||||
<vnet>400-499</vnet>
|
||||
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
|
||||
</zone>
|
||||
<zone>
|
||||
<id>5</id>
|
||||
<name>CV</name>
|
||||
<dns1>72.52.126.11</dns1>
|
||||
<dns2>72.52.126.12</dns2>
|
||||
<internalDns1>192.168.10.253</internalDns1>
|
||||
<internalDns2>192.168.10.254</internalDns2>
|
||||
<vnet>500-599</vnet>
|
||||
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
|
||||
</zone>
|
||||
<zone>
|
||||
<id>6</id>
|
||||
<name>KS</name>
|
||||
<dns1>72.52.126.11</dns1>
|
||||
<dns2>72.52.126.12</dns2>
|
||||
<internalDns1>192.168.10.253</internalDns1>
|
||||
<internalDns2>192.168.10.254</internalDns2>
|
||||
<vnet>600-699</vnet>
|
||||
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
|
||||
</zone>
|
||||
<zone>
|
||||
<id>7</id>
|
||||
<name>ES</name>
|
||||
<dns1>72.52.126.11</dns1>
|
||||
<dns2>72.52.126.12</dns2>
|
||||
<internalDns1>192.168.10.253</internalDns1>
|
||||
<internalDns2>192.168.10.254</internalDns2>
|
||||
<vnet>700-799</vnet>
|
||||
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
|
||||
</zone>
|
||||
<zone>
|
||||
<id>8</id>
|
||||
<name>RC</name>
|
||||
<dns1>72.52.126.11</dns1>
|
||||
<dns2>72.52.126.12</dns2>
|
||||
<internalDns1>192.168.10.253</internalDns1>
|
||||
<internalDns2>192.168.10.254</internalDns2>
|
||||
<vnet>800-899</vnet>
|
||||
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
|
||||
</zone>
|
||||
<zone>
|
||||
<id>9</id>
|
||||
<name>AX</name>
|
||||
<dns1>72.52.126.11</dns1>
|
||||
<dns2>72.52.126.12</dns2>
|
||||
<internalDns1>192.168.10.253</internalDns1>
|
||||
<internalDns2>192.168.10.254</internalDns2>
|
||||
<vnet>900-999</vnet>
|
||||
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
|
||||
</zone>
|
||||
<zone>
|
||||
<id>10</id>
|
||||
<name>JW</name>
|
||||
<dns1>72.52.126.11</dns1>
|
||||
<dns2>72.52.126.12</dns2>
|
||||
<internalDns1>192.168.10.253</internalDns1>
|
||||
<internalDns2>192.168.10.254</internalDns2>
|
||||
<vnet>900-999</vnet>
|
||||
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
|
||||
</zone>
|
||||
<zone>
|
||||
<id>11</id>
|
||||
<name>AJ</name>
|
||||
<dns1>72.52.126.11</dns1>
|
||||
<dns2>72.52.126.12</dns2>
|
||||
<internalDns1>192.168.10.253</internalDns1>
|
||||
<internalDns2>192.168.10.254</internalDns2>
|
||||
<vnet>1000-1099</vnet>
|
||||
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
|
||||
</zone>
|
||||
</zones>
|
||||
<!--
|
||||
<storagePools>
|
||||
<storagePool>
|
||||
<zoneId>5</zoneId>
|
||||
<name>sol10-2</name>
|
||||
<hostAddress>sol10-2</hostAddress>
|
||||
<hostPath>/tank/cloud-nfs/</hostPath>
|
||||
</storagePool>
|
||||
</storagePools>
|
||||
-->
|
||||
|
||||
<vlans>
|
||||
<vlan>
|
||||
<zoneId>1</zoneId>
|
||||
<vlanId>31</vlanId>
|
||||
<vlanType>VirtualNetwork</vlanType>
|
||||
<gateway>192.168.31.1</gateway>
|
||||
<netmask>255.255.255.0</netmask>
|
||||
<ipAddressRange>192.168.31.150-192.168.31.159</ipAddressRange>
|
||||
</vlan>
|
||||
<vlan>
|
||||
<zoneId>2</zoneId>
|
||||
<vlanId>32</vlanId>
|
||||
<vlanType>VirtualNetwork</vlanType>
|
||||
<gateway>192.168.32.1</gateway>
|
||||
<netmask>255.255.255.0</netmask>
|
||||
<ipAddressRange>192.168.32.150-192.168.32.159</ipAddressRange>
|
||||
</vlan>
|
||||
<vlan>
|
||||
<zoneId>3</zoneId>
|
||||
<vlanId>33</vlanId>
|
||||
<vlanType>VirtualNetwork</vlanType>
|
||||
<gateway>192.168.33.1</gateway>
|
||||
<netmask>255.255.255.0</netmask>
|
||||
<ipAddressRange>192.168.33.150-192.168.33.159</ipAddressRange>
|
||||
</vlan>
|
||||
<vlan>
|
||||
<zoneId>4</zoneId>
|
||||
<vlanId>34</vlanId>
|
||||
<vlanType>VirtualNetwork</vlanType>
|
||||
<gateway>192.168.34.1</gateway>
|
||||
<netmask>255.255.255.0</netmask>
|
||||
<ipAddressRange>192.168.34.150-192.168.34.159</ipAddressRange>
|
||||
</vlan>
|
||||
<vlan>
|
||||
<zoneId>5</zoneId>
|
||||
<vlanId>35</vlanId>
|
||||
<vlanType>VirtualNetwork</vlanType>
|
||||
<gateway>192.168.35.1</gateway>
|
||||
<netmask>255.255.255.0</netmask>
|
||||
<ipAddressRange>192.168.35.150-192.168.35.159</ipAddressRange>
|
||||
</vlan>
|
||||
<vlan>
|
||||
<zoneId>6</zoneId>
|
||||
<vlanId>36</vlanId>
|
||||
<vlanType>VirtualNetwork</vlanType>
|
||||
<gateway>192.168.36.1</gateway>
|
||||
<netmask>255.255.255.0</netmask>
|
||||
<ipAddressRange>192.168.36.150-192.168.36.159</ipAddressRange>
|
||||
</vlan>
|
||||
<vlan>
|
||||
<zoneId>7</zoneId>
|
||||
<vlanId>37</vlanId>
|
||||
<vlanType>VirtualNetwork</vlanType>
|
||||
<gateway>192.168.37.1</gateway>
|
||||
<netmask>255.255.255.0</netmask>
|
||||
<ipAddressRange>192.168.37.150-192.168.37.159</ipAddressRange>
|
||||
</vlan>
|
||||
<vlan>
|
||||
<zoneId>8</zoneId>
|
||||
<vlanId>38</vlanId>
|
||||
<vlanType>VirtualNetwork</vlanType>
|
||||
<gateway>192.168.38.1</gateway>
|
||||
<netmask>255.255.255.0</netmask>
|
||||
<ipAddressRange>192.168.38.150-192.168.38.159</ipAddressRange>
|
||||
</vlan>
|
||||
<vlan>
|
||||
<zoneId>9</zoneId>
|
||||
<vlanId>39</vlanId>
|
||||
<vlanType>VirtualNetwork</vlanType>
|
||||
<gateway>192.168.39.1</gateway>
|
||||
<netmask>255.255.255.0</netmask>
|
||||
<ipAddressRange>192.168.39.150-192.168.39.159</ipAddressRange>
|
||||
</vlan>
|
||||
<vlan>
|
||||
<zoneId>10</zoneId>
|
||||
<vlanId>40</vlanId>
|
||||
<vlanType>VirtualNetwork</vlanType>
|
||||
<gateway>192.168.40.1</gateway>
|
||||
<netmask>255.255.255.0</netmask>
|
||||
<ipAddressRange>192.168.40.150-192.168.40.159</ipAddressRange>
|
||||
</vlan>
|
||||
<vlan>
|
||||
<zoneId>11</zoneId>
|
||||
<vlanId>41</vlanId>
|
||||
<vlanType>VirtualNetwork</vlanType>
|
||||
<gateway>192.168.41.1</gateway>
|
||||
<netmask>255.255.255.0</netmask>
|
||||
<ipAddressRange>192.168.41.150-192.168.41.159</ipAddressRange>
|
||||
</vlan>
|
||||
</vlans>
|
||||
|
||||
<pods>
|
||||
<pod>
|
||||
<id>1</id>
|
||||
<name>AH</name>
|
||||
<zoneId>1</zoneId>
|
||||
<ipAddressRange>192.168.10.20-192.168.10.24</ipAddressRange>
|
||||
<cidr>192.168.10.0/24</cidr>
|
||||
</pod>
|
||||
<pod>
|
||||
<id>2</id>
|
||||
<name>KM</name>
|
||||
<zoneId>2</zoneId>
|
||||
<ipAddressRange>192.168.10.25-192.168.10.29</ipAddressRange>
|
||||
<cidr>192.168.10.0/24</cidr>
|
||||
</pod>
|
||||
<pod>
|
||||
<id>3</id>
|
||||
<name>KY</name>
|
||||
<zoneId>3</zoneId>
|
||||
<ipAddressRange>192.168.10.30-192.168.10.34</ipAddressRange>
|
||||
<cidr>192.168.10.0/24</cidr>
|
||||
</pod>
|
||||
<pod>
|
||||
<id>4</id>
|
||||
<name>WC</name>
|
||||
<zoneId>4</zoneId>
|
||||
<ipAddressRange>192.168.10.35-192.168.10.39</ipAddressRange>
|
||||
<cidr>192.168.10.0/24</cidr>
|
||||
</pod>
|
||||
<pod>
|
||||
<id>5</id>
|
||||
<name>CV</name>
|
||||
<zoneId>5</zoneId>
|
||||
<ipAddressRange>192.168.10.40-192.168.10.44</ipAddressRange>
|
||||
<cidr>192.168.10.0/24</cidr>
|
||||
</pod>
|
||||
<pod>
|
||||
<id>6</id>
|
||||
<name>KS</name>
|
||||
<zoneId>6</zoneId>
|
||||
<ipAddressRange>192.168.10.45-192.168.10.49</ipAddressRange>
|
||||
<cidr>192.168.10.0/24</cidr>
|
||||
</pod>
|
||||
<pod>
|
||||
<id>7</id>
|
||||
<name>ES</name>
|
||||
<zoneId>7</zoneId>
|
||||
<ipAddressRange>192.168.10.50-192.168.10.54</ipAddressRange>
|
||||
<cidr>192.168.10.0/24</cidr>
|
||||
</pod>
|
||||
<pod>
|
||||
<id>8</id>
|
||||
<name>RC</name>
|
||||
<zoneId>8</zoneId>
|
||||
<ipAddressRange>192.168.10.55-192.168.10.59</ipAddressRange>
|
||||
<cidr>192.168.10.0/24</cidr>
|
||||
</pod>
|
||||
<pod>
|
||||
<id>9</id>
|
||||
<name>AX</name>
|
||||
<zoneId>9</zoneId>
|
||||
<ipAddressRange>192.168.10.62-192.168.10.64</ipAddressRange>
|
||||
<cidr>192.168.10.0/24</cidr>
|
||||
</pod>
|
||||
<pod>
|
||||
<id>10</id>
|
||||
<name>JW</name>
|
||||
<zoneId>10</zoneId>
|
||||
<ipAddressRange>192.168.10.65-192.168.10.69</ipAddressRange>
|
||||
<cidr>192.168.10.0/24</cidr>
|
||||
</pod>
|
||||
<pod>
|
||||
<id>11</id>
|
||||
<name>AJ</name>
|
||||
<zoneId>11</zoneId>
|
||||
<ipAddressRange>192.168.10.70-192.168.10.74</ipAddressRange>
|
||||
<cidr>192.168.10.0/24</cidr>
|
||||
</pod>
|
||||
</pods>
|
||||
|
||||
<!--
|
||||
* cpu is the number of CPUs for the offering
|
||||
* ramSize is total memory in MB
|
||||
* speed is the CPU speed for each core in MHZ
|
||||
* diskSpace is the storage space in MB
|
||||
* price is the price of the offering per hour
|
||||
-->
|
||||
<serviceOfferings>
|
||||
<serviceOffering>
|
||||
<id>1</id>
|
||||
<name>Small Instance</name>
|
||||
<displayText>Small Instance [500MHZ CPU, 512MB MEM, 16GB Disk] - $0.10 per hour</displayText>
|
||||
<cpu>1</cpu>
|
||||
<ramSize>512</ramSize>
|
||||
<speed>500</speed>
|
||||
<mirrored>false</mirrored>
|
||||
</serviceOffering>
|
||||
<serviceOffering>
|
||||
<id>2</id>
|
||||
<name>Medium Instance</name>
|
||||
<displayText>Medium Instance [500MHZ CPU, 1GB MEM, 32GB Disk] - $0.20 per hour</displayText>
|
||||
<cpu>1</cpu>
|
||||
<ramSize>1024</ramSize>
|
||||
<speed>512</speed>
|
||||
</serviceOffering>
|
||||
<serviceOffering>
|
||||
<id>3</id>
|
||||
<name>Large Instance</name>
|
||||
<displayText>Large Instance [2GHZ CPU, 4GB MEM, 64GB Disk] - $0.30 per hour</displayText>
|
||||
<cpu>2</cpu>
|
||||
<ramSize>4096</ramSize>
|
||||
<speed>2000</speed>
|
||||
</serviceOffering>
|
||||
</serviceOfferings>
|
||||
|
||||
<diskOfferings>
|
||||
<diskOffering>
|
||||
<id>1</id>
|
||||
<domainId>1</domainId>
|
||||
<name>Small Disk</name>
|
||||
<displayText>Small Disk [16GB Disk]</displayText>
|
||||
<diskSpace>16000</diskSpace>
|
||||
</diskOffering>
|
||||
<diskOffering>
|
||||
<id>2</id>
|
||||
<domainId>1</domainId>
|
||||
<name>Medium Disk</name>
|
||||
<displayText>Medium Disk [32GB Disk]</displayText>
|
||||
<diskSpace>32000</diskSpace>
|
||||
</diskOffering>
|
||||
<diskOffering>
|
||||
<id>3</id>
|
||||
<domainId>1</domainId>
|
||||
<name>Large Disk</name>
|
||||
<displayText>Large Disk [64GB Disk]</displayText>
|
||||
<diskSpace>64000</diskSpace>
|
||||
</diskOffering>
|
||||
</diskOfferings>
|
||||
|
||||
<!--
|
||||
* firstname/lastname are optional parameters
|
||||
* id, username, password are required parameters
|
||||
-->
|
||||
<users>
|
||||
<user>
|
||||
<id>2</id>
|
||||
<username>admin</username>
|
||||
<password>password</password>
|
||||
<firstname>Admin</firstname>
|
||||
<lastname>User</lastname>
|
||||
<email>admin@mailprovider.com</email>
|
||||
</user>
|
||||
</users>
|
||||
|
||||
<configurationEntries>
|
||||
<configuration>
|
||||
<name>default.zone</name>
|
||||
<value>AH</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>domain.suffix</name>
|
||||
<value>cloud-test.cloud.com</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>instance.name</name>
|
||||
<value>AH</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>consoleproxy.ram.size</name>
|
||||
<value>256</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>host.stats.interval</name>
|
||||
<value>3600000</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>storage.stats.interval</name>
|
||||
<value>120000</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>volume.stats.interval</name>
|
||||
<value>-1</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>ping.interval</name>
|
||||
<value>60</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>alert.wait</name>
|
||||
<value>1800</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>expunge.interval</name>
|
||||
<value>86400</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>usage.aggregation.timezone</name>
|
||||
<value>GMT</value>
|
||||
</configuration>
|
||||
<!-- RSA Keys -->
|
||||
<configuration>
|
||||
<name>ssh.privatekey</name>
|
||||
<value>-----BEGIN RSA PRIVATE KEY-----\nMIIEoQIBAAKCAQEAnNUMVgQS87EzAQN9ufGgH3T1kOpqcvTmUrp8RVZyeA5qwptS\nrZxONRbhLK709pZFBJLmeFqiqciWoA/srVIFk+rPmBlVsMw8BK53hTGoax7iSe8s\nLFCAATm6vp0HnZzYqNfrzR2by36ET5aQD/VAyA55u+uUgAlxQuhKff2xjyahEHs+\nUiRlReiAgItygm9g3co3+8fJDOuRse+s0TOip1D0jPdo2AJFscyxrG9hWqQH86R/\nZlLJ7DqsiaAcUmn52u6Nsmd3BkRmGVx/D35Mq6upJqrk/QDfug9LF66yiIP/BEIn\n08N/wQ6m/O37WUtqqyl3rRKqs5TJ9ZnhsqeO9QIBIwKCAQA6QIDsv69EkkYk8qsK\njPJU06uq2rnS7T+bEhDmjdK+4MiRbOQx2vh6HnDktgM3BJ1K13oss/NGYHJ190lH\nsMA+QUXKx5TbRItSMixkrAta/Ne1D7FSScklBtBVbYZ8XtQhdMVML5GjWuCv2NZs\nU8eaw4xNHPyklcr7mBurI7b6p13VK5BNUWR/VNuigT4U89YzRcoEZ/sTlR+4ACYr\nxbUJJGBA03+NhdSAe2vodlMh5lGflD0JmHMFqqg9BcAtVb73JsOsxFQArbXwRd/q\nNckdoAvgJfhTOvXF5GMPLI0lGb6skJkS229F4GaBB2Iz4A9O0aHZob8I8zsWUbiu\npvBrAoGBAMjUDfF2x13NjH1cFHietO5O1oM0nZaAxKodxoAUvHVMUd5DIY50tqYw\n7ecKi2Cw43ONpdj0nP9Nc2NV3NDRqLopwkKUsTtq9AKQ2cIuw3+uS5vm0VZBzmTP\nuF04Qo4bXh/jFRA62u9bXsmIFtaehKxE1Gp6zi393GcbWP4HX/3dAoGBAMfq0KD3\ngeU1PHi9uI3Ss89nXzJsiGcwC5Iunu1aTzJCYhMlJkfmRcXYMAqSfg0nGWnfvlDh\nuOO26CHKjG182mTwYXdgQzIPpBc8suvgUWDBTrIzJI+zuyBLtPbd9DJEVrZkRVQX\nXrOV3Y5oOWsba4F+b20jaaHFAiY7s6OtrX/5AoGBAMMXI3zZyPwJgSlSIoPNX03m\nL3gke9QID4CvNduB26UlkVuRq5GzNRZ4rJdMEl3tqcC1fImdKswfWiX7o06ChqY3\nMb0FePfkPX7V2tnkSOJuzRsavLoxTCdqsxi6T0g318c0XZq81K4A/P5Jr8ksRl40\nPA+qfyVdAf3Cy3ptkHLzAoGASkFGLSi7N+CSzcLPhSJgCzUGGgsOF7LCeB/x4yGL\nIUvbSPCKj7vuB6gR2AqGlyvHnFprQpz7h8eYDI0PlmGS8kqn2+HtEpgYYGcAoMEI\nSIJQbhL+84vmaxTOL87IanEnhZL1LdzLZ0ZK+mE55fQ936P9gE77WVfNmSweJtob\n3xMCgYAl0aLeGf4oUZbI56eEaCbu8U7dEe6MF54VbozyiXqbp455QnUpuBrRn5uf\nc079dNcqTNDuk1+hYX9qNn1aXsvWeuofBXqWoFXu/c4yoWxJAPhEVhzZ9xrXI76I\nBKiPCyKrOa7bSLvs6SQPpuf5AQ8+NJrOxkEB9hbMuaAr2N5rCw==\n-----END RSA PRIVATE KEY-----
|
||||
</value>
|
||||
<category>Hidden</category>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>ssh.publickey</name>
|
||||
<value>
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAnNUMVgQS87EzAQN9ufGgH3T1kOpqcvTmUrp8RVZyeA5qwptSrZxONRbhLK709pZFBJLmeFqiqciWoA/srVIFk+rPmBlVsMw8BK53hTGoax7iSe8sLFCAATm6vp0HnZzYqNfrzR2by36ET5aQD/VAyA55u+uUgAlxQuhKff2xjyahEHs+UiRlReiAgItygm9g3co3+8fJDOuRse+s0TOip1D0jPdo2AJFscyxrG9hWqQH86R/ZlLJ7DqsiaAcUmn52u6Nsmd3BkRmGVx/D35Mq6upJqrk/QDfug9LF66yiIP/BEIn08N/wQ6m/O37WUtqqyl3rRKqs5TJ9ZnhsqeO9Q== root@test2.lab.vmops.com
|
||||
</value>
|
||||
<category>Hidden</category>
|
||||
</configuration>
|
||||
<!-- the following are for configuring alerts and need to be changed to proper configuration values -->
|
||||
<!--
|
||||
<configuration>
|
||||
<name>alert.smtp.host</name>
|
||||
<value>smtp.host.com</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>alert.smtp.port</name>
|
||||
<value>25</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>alert.smtp.useAuth</name>
|
||||
<value>false</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>alert.smtp.username</name>
|
||||
<value>some.user@example.com</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>alert.smtp.password</name>
|
||||
<value>password</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>alert.email.sender</name>
|
||||
<value>some.user@example.com</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>alert.email.addresses</name>
|
||||
<value>some.admin@example.com</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>alert.smtp.debug</name>
|
||||
<value>false</value>
|
||||
</configuration>
|
||||
-->
|
||||
<configuration>
|
||||
<name>memory.capacity.threshold</name>
|
||||
<value>0.85</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>cpu.capacity.threshold</name>
|
||||
<value>0.85</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>storage.capacity.threshold</name>
|
||||
<value>0.85</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>storage.allocated.capacity.threshold</name>
|
||||
<value>0.85</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>capacity.check.period</name>
|
||||
<value>3600000</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>wait</name>
|
||||
<value>240</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>network.throttling.rate</name>
|
||||
<value>200</value>
|
||||
</configuration>
|
||||
<configuration>
|
||||
<name>multicast.throttling.rate</name>
|
||||
<value>10</value>
|
||||
</configuration>
|
||||
</configurationEntries>
|
||||
|
||||
<!--
|
||||
It is possible to specify a single IP address. For example, to add 192.168.1.1
|
||||
as the only address, specify as follows.
|
||||
<publicIpAddresses>
|
||||
<zoneId>1</zoneId>
|
||||
<ipAddressRange>192.168.1.1</ipAddressRange>
|
||||
</publicIpAddresses>
|
||||
|
||||
For each ip address range, create a new object. For example, to add the range 192.168.2.1 to 192.168.2.255
|
||||
copy the following object tag into the privateIpRange
|
||||
<privateIpAddresses>
|
||||
<zoneId>1</zoneId>
|
||||
<podId>1</podId>
|
||||
<ipAddressRange>192.168.2.1-192.168.2.255</ipAddressRange>
|
||||
</privateIpAddresses>
|
||||
-->
|
||||
|
||||
<!--
|
||||
It is possible to specify a single IP address. For example, to add 65.37.141.29
|
||||
as the only address, specify as follows.
|
||||
<publicIpAddresses>
|
||||
<zoneId>1</zoneId>
|
||||
<ipAddressRange>65.37.141.29</ipAddressRange>
|
||||
</publicIpAddresses>
|
||||
|
||||
For each ip address range, create a new object. For example, to add the range 65.37.141.29 to 65.37.141.39
|
||||
copy the following object tag into the publicIpRange
|
||||
<publicIpAddresses>
|
||||
<zoneId>1</zoneId>
|
||||
<ipAddressRange>65.37.141.29-65.37.141.39</ipAddressRange>
|
||||
</publicIpAddresses>
|
||||
-->
|
||||
</data>
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
INSERT INTO `vmops`.`vm_template` (id, unique_name, name, public, path, created, type, hvm, bits, created_by, url, checksum, ready, display_text, enable_password)
|
||||
VALUES (1, 'routing', 'DomR Template', 0, 'tank/volumes/demo/template/private/u000000/os/routing', now(), 'ext3', 0, 64, 1, 'http://vmopsserver.lab.vmops.com/images/routing/vmi-root-fc8-x86_64-domR.img.bz2', 'd00927f863a23b98cc6df6e377c9d0c6', 0, 'DomR Template', 0);
|
||||
INSERT INTO `vmops`.`vm_template` (id, unique_name, name, public, path, created, type, hvm, bits, created_by, url, checksum, ready, display_text, enable_password)
|
||||
VALUES (3, 'centos53-x86_64', 'Centos 5.3(x86_64) no GUI', 1, 'tank/volumes/demo/template/public/os/centos53-x86_64', now(), 'ext3', 0, 64, 1, 'http://vmopsserver.lab.vmops.com/images/centos52-x86_64/vmi-root-centos.5-2.64.pv.img.gz', 'd4ca80825d936db00eedf26620f13d69', 0, 'Centos 5.3(x86_64) no GUI', 0);
|
||||
#INSERT INTO `vmops`.`vm_template` (id, unique_name, name, public, path, created, type, hvm, bits, created_by, url, checksum, ready, display_text, enable_password)
|
||||
# VALUES (4, 'centos52-x86_64-gui', 'Centos 5.2(x86_64) GUI', 1, 'tank/volumes/demo/template/public/os/centos52-x86_64-gui', now(), 'ext3', 0, 64, 1, 'http://vmopsserver.lab.vmops.com/images/centos52-x86_64/vmi-root-centos.5-2.64.pv.img.gz', 'd4ca80825d936db00eedf26620f13d69', 0, 'Centos 5.2(x86_64) GUI', 0);
|
||||
INSERT INTO `vmops`.`vm_template` (id, unique_name, name, public, path, created, type, hvm, bits, created_by, url, checksum, ready, display_text, enable_password)
|
||||
VALUES (5, 'winxpsp3', 'Windows XP SP3 (32-bit)', 1, 'tank/volumes/demo/template/public/os/winxpsp3', now(), 'ntfs', 1, 32, 1, 'http://vmopsserver.lab.vmops.com/images/fedora10-x86_64/vmi-root-fedora10.64.img.gz', 'c76d42703f14108b15acc9983307c759', 0, 'Windows XP SP3 (32-bit)', 0);
|
||||
INSERT INTO `vmops`.`vm_template` (id, unique_name, name, public, path, created, type, hvm, bits, created_by, url, checksum, ready, display_text, enable_password)
|
||||
VALUES (7, 'win2003sp2', 'Windows 2003 SP2 (32-bit)', 1, 'tank/volumes/demo/template/public/os/win2003sp2', now(), 'ntfs', 1, 32, 1, 'http://vmopsserver.lab.vmops.com/images/win2003sp2/vmi-root-win2003sp2.img.gz', '4d2cc51898d05c0f7a2852c15bcdc77b', 0, 'Windows 2003 SP2 (32-bit)', 0);
|
||||
INSERT INTO `vmops`.`vm_template` (id, unique_name, name, public, path, created, type, hvm, bits, created_by, url, checksum, ready, display_text, enable_password)
|
||||
VALUES (8, 'win2003sp2-x64', 'Windows 2003 SP2 (64-bit)', 1, 'tank/volumes/demo/template/public/os/win2003sp2-x64', now(), 'ntfs', 1, 64, 1, 'http://vmopsserver.lab.vmops.com/images/win2003sp2-x86_64/vmi-root-win2003sp2-x64.img.gz', '35d4de1c38eb4fb9d81a31c1d989c482', 0, 'Windows 2003 SP2 (64-bit)', 0);
|
||||
INSERT INTO `vmops`.`vm_template` (id, unique_name, name, public, path, created, type, hvm, bits, created_by, url, checksum, ready, display_text, enable_password)
|
||||
VALUES (9, 'fedora12-GUI-x86_64', 'Fedora 12 Desktop(64-bit)', 1, 'tank/volumes/demo/template/public/os/fedora12-GUI-x86_64', now(), 'ext3', 1, 64, 1, 'http://vmopsserver.lab.vmops.com/images/fedora12-GUI-x86_64/vmi-root-fedora12-GUI-x86_64.qcow2.gz', '', 0, 'Fedora 12 Desktop (with httpd,java and mysql)', 0);
|
||||
|
|
@ -0,0 +1 @@
|
|||
agent.minimal.version=@agent.min.version@
|
||||
Binary file not shown.
|
|
@ -0,0 +1,185 @@
|
|||
<?xml version="1.0"?>
|
||||
<!--
|
||||
components.xml is the configuration file for the VM Ops
|
||||
insertion servers. Someone can quickly pull together an
|
||||
insertion server by selecting the correct adapters to use.
|
||||
|
||||
Here are some places to look for information.
|
||||
- To find out the general functionality that each Manager
|
||||
or Adapter provide, look at the javadoc for the interface
|
||||
that it implements. The interface is usually the
|
||||
"key" attribute in the declaration.
|
||||
- To find specific implementation of each Manager or
|
||||
Adapter, look at the javadoc for the actual class. The
|
||||
class can be found in the <class> element.
|
||||
- To find out the configuration parameters for each Manager
|
||||
or Adapter, look at the javadoc for the actual implementation
|
||||
class. It should be documented in the description of the
|
||||
class.
|
||||
- To know more about the components.xml in general, look for
|
||||
the javadoc for ComponentLocator.java.
|
||||
|
||||
If you found that the Manager or Adapter are not properly
|
||||
documented, please contact the author.
|
||||
-->
|
||||
<components.xml>
|
||||
<management-server class="com.cloud.server.ManagementServerImpl">
|
||||
<dao name="domain router" class="com.cloud.vm.dao.DomainRouterDaoImpl"/>
|
||||
<dao name="host" class="com.cloud.host.dao.HostDaoImpl"/>
|
||||
<dao name="VM Instance" class="com.cloud.vm.dao.VMInstanceDaoImpl"/>
|
||||
<dao name="User VM" class="com.cloud.vm.dao.UserVmDaoImpl"/>
|
||||
<dao name="ServiceOffering" class="com.cloud.service.dao.ServiceOfferingDaoImpl">
|
||||
<param name="cache.size">50</param>
|
||||
<param name="cache.time.to.live">-1</param>
|
||||
</dao>
|
||||
<dao name="DiskOffering" class="com.cloud.storage.dao.DiskOfferingDaoImpl"/>
|
||||
<dao name="VMDisk" class="com.cloud.storage.dao.VmDiskDaoImpl"/>
|
||||
<dao name="host zone" class="com.cloud.dc.dao.DataCenterDaoImpl">
|
||||
<param name="cache.size">50</param>
|
||||
<param name="cache.time.to.live">-1</param>
|
||||
</dao>
|
||||
<dao name="host pod" class="com.cloud.dc.dao.HostPodDaoImpl">
|
||||
<param name="cache.size">50</param>
|
||||
<param name="cache.time.to.live">-1</param>
|
||||
</dao>
|
||||
<dao name="vlan" class="com.cloud.dc.dao.VlanDaoImpl">
|
||||
</dao>
|
||||
<dao name="Volume" class="com.cloud.storage.dao.VolumeDaoImpl"/>
|
||||
<dao name="Events" class="com.cloud.event.dao.EventDaoImpl"/>
|
||||
<dao name="VM Template" class="com.cloud.storage.dao.VMTemplateDaoImpl">
|
||||
<param name="cache.size">50</param>
|
||||
<param name="cache.time.to.live">-1</param>
|
||||
<param name="routing.uniquename">routing</param>
|
||||
</dao>
|
||||
<dao name="User" class="com.cloud.user.dao.UserDaoImpl">
|
||||
<param name="cache.size">5000</param>
|
||||
<param name="cache.time.to.live">300</param>
|
||||
</dao>
|
||||
<dao name="UserStats" class="com.cloud.user.dao.UserStatisticsDaoImpl"/>
|
||||
<dao name="Disk Template" class="com.cloud.storage.dao.DiskTemplateDaoImpl">
|
||||
<param name="cache.size">50</param>
|
||||
<param name="cache.time.to.live">-1</param>
|
||||
</dao>
|
||||
<dao name="Firewall Rules" class="com.cloud.network.dao.FirewallRulesDaoImpl"/>
|
||||
<dao name="Security Group" class="com.cloud.network.dao.SecurityGroupDaoImpl"/>
|
||||
<dao name="Load Balancer" class="com.cloud.network.dao.LoadBalancerDaoImpl"/>
|
||||
<dao name="Network Rule Config" class="com.cloud.network.dao.NetworkRuleConfigDaoImpl"/>
|
||||
<dao name="Security Group Mapping" class="com.cloud.network.dao.SecurityGroupVMMapDaoImpl"/>
|
||||
<dao name="Load Balancer Mapping" class="com.cloud.network.dao.LoadBalancerVMMapDaoImpl"/>
|
||||
<dao name="IP Addresses" class="com.cloud.network.dao.IPAddressDaoImpl"/>
|
||||
<dao name="Datacenter IP Addresses" class="com.cloud.dc.dao.DataCenterIpAddressDaoImpl"/>
|
||||
<dao name="Pricing" class="com.cloud.pricing.dao.PricingDaoImpl"/>
|
||||
<dao name="Alert" class="com.cloud.alert.dao.AlertDaoImpl"/>
|
||||
<dao name="Capacity" class="com.cloud.capacity.dao.CapacityDaoImpl"/>
|
||||
<dao name="Domain" class="com.cloud.domain.dao.DomainDaoImpl"/>
|
||||
<dao name="Account" class="com.cloud.user.dao.AccountDaoImpl"/>
|
||||
<dao name="Limit" class="com.cloud.configuration.dao.LimitDaoImpl"/>
|
||||
<dao name="UserAccount" class="com.cloud.user.dao.UserAccountDaoImpl"/>
|
||||
<dao name="VM Template Host" class="com.cloud.storage.dao.VMTemplateHostDaoImpl"/>
|
||||
<dao name="VM Template Pool" class="com.cloud.storage.dao.VMTemplatePoolDaoImpl"/>
|
||||
<dao name="Launch Permission" class="com.cloud.storage.dao.LaunchPermissionDaoImpl"/>
|
||||
<dao name="Configuration" class="com.cloud.configuration.dao.ConfigurationDaoImpl"/>
|
||||
<dao name="HostDetails" class="com.cloud.host.dao.DetailsDaoImpl"/>
|
||||
<dao name="HA" class="com.cloud.ha.dao.HighAvailabilityDaoImpl"/>
|
||||
<dao name="Console Proxy" class="com.cloud.vm.dao.ConsoleProxyDaoImpl"/>
|
||||
<dao name="Upgrade" class="com.cloud.maint.dao.AgentUpgradeDaoImpl"/>
|
||||
<dao name="Management Server Host" class="com.cloud.cluster.dao.ManagementServerHostDaoImpl"/>
|
||||
<dao name="Snapshot" class="com.cloud.storage.dao.SnapshotDaoImpl"/>
|
||||
<dao name="SnapshotPolicyDao" class="com.cloud.storage.dao.SnapshotPolicyDaoImpl"/>
|
||||
<dao name="SnapshotPolicyRefDao" class="com.cloud.storage.dao.SnapshotPolicyRefDaoImpl"/>
|
||||
<dao name="VolumeSnapshotPolicyDao" class="com.cloud.storage.dao.VolumeSnapshotPolicyDaoImpl"/>
|
||||
<dao name="SnapshotScheduleDao" class="com.cloud.storage.dao.SnapshotScheduleDaoImpl"/>
|
||||
<dao name="AsyncJobDao" class="com.cloud.async.dao.AsyncJobDaoImpl"/>
|
||||
<dao name="SyncQueueDao" class="com.cloud.async.dao.SyncQueueDaoImpl"/>
|
||||
<dao name="SyncQueueItemDao" class="com.cloud.async.dao.SyncQueueItemDaoImpl"/>
|
||||
<dao name="NetfsStoragePoolDao" class="com.cloud.storage.dao.NetfsStoragePoolDaoImpl"/>
|
||||
<dao name="StoragePoolDao" class="com.cloud.storage.dao.StoragePoolDaoImpl"/>
|
||||
<dao name="StoragePoolHostDao" class="com.cloud.storage.dao.StoragePoolHostDaoImpl"/>
|
||||
|
||||
<adapters key="com.cloud.agent.manager.allocator.HostAllocator">
|
||||
<adapter name="FirstFitRouting" class="com.cloud.agent.manager.allocator.impl.FirstFitRoutingAllocator"/>
|
||||
<adapter name="FirstFit" class="com.cloud.agent.manager.allocator.impl.FirstFitAllocator"/>
|
||||
</adapters>
|
||||
<adapters key="com.cloud.agent.manager.allocator.StorageAllocator">
|
||||
<adapter name="Storage" class="com.cloud.agent.manager.allocator.impl.FirstFitStorageAllocator">
|
||||
<param name="storage.overprovisioning.factor">2</param>
|
||||
</adapter>
|
||||
<adapter name="Storage" class="com.cloud.agent.manager.allocator.impl.FirstFitStoragePoolAllocator">
|
||||
<param name="storage.overprovisioning.factor">2</param>
|
||||
</adapter>
|
||||
</adapters>
|
||||
<adapters key="com.cloud.agent.manager.allocator.PodAllocator">
|
||||
<adapter name="User First" class="com.cloud.agent.manager.allocator.impl.UserConcentratedAllocator"/>
|
||||
</adapters>
|
||||
<!--
|
||||
<adapters key="com.cloud.consoleproxy.ConsoleProxyAllocator">
|
||||
<adapter name="Balance" class="com.cloud.consoleproxy.ConsoleProxyBalanceAllocator"/>
|
||||
</adapters>
|
||||
-->
|
||||
<adapters key="com.cloud.server.auth.UserAuthenticator">
|
||||
<adapter name="MD5" class="com.cloud.server.auth.MD5UserAuthenticator"/>
|
||||
</adapters>
|
||||
<adapters key="com.cloud.ha.Investigator">
|
||||
<adapter name="SimpleInvestigator" class="com.cloud.ha.CheckOnAgentInvestigator"/>
|
||||
<adapter name="PingInvestigator" class="com.cloud.ha.InvestigatorImpl"/>
|
||||
</adapters>
|
||||
<adapters key="com.cloud.ha.FenceBuilder">
|
||||
<adapter name="StorageFenceBuilder" class="com.cloud.ha.StorageFence"/>
|
||||
</adapters>
|
||||
<manager name="account manager" class="com.cloud.user.AccountManagerImpl">
|
||||
</manager>
|
||||
<manager name="agent manager" class="com.cloud.agent.manager.AgentManagerImpl">
|
||||
</manager>
|
||||
<manager name="configuration manager" class="com.cloud.configuration.ConfigurationManagerImpl">
|
||||
<param name="premium">false</param>
|
||||
</manager>
|
||||
<manager name="network manager" class="com.cloud.network.NetworkManagerImpl">
|
||||
</manager>
|
||||
<manager name="download manager" class="com.cloud.storage.download.DownloadMonitorImpl">
|
||||
</manager>
|
||||
<manager name="console proxy manager" class="com.cloud.consoleproxy.AgentBasedConsoleProxyManager">
|
||||
</manager>
|
||||
<manager name="vm manager" class="com.cloud.vm.UserVmManagerImpl"/>
|
||||
<manager name="upgrade manager" class="com.cloud.maint.UpgradeManagerImpl">
|
||||
</manager>
|
||||
<manager name="StorageManager" class="com.cloud.storage.StorageManagerImpl">
|
||||
</manager>
|
||||
<manager name="SnapshotManager" class="com.cloud.storage.snapshot.SnapshotManagerImpl">
|
||||
</manager>
|
||||
<manager name="SnapshotScheduler" class="com.cloud.storage.snapshot.SnapshotSchedulerImpl">
|
||||
</manager>
|
||||
<manager name="SyncQueueManager" class="com.cloud.async.SyncQueueManagerImpl">
|
||||
</manager>
|
||||
<manager name="AsyncJobManager" class="com.cloud.async.AsyncJobManagerImpl">
|
||||
</manager>
|
||||
<manager name="AsyncJobExecutorContext" class="com.cloud.async.AsyncJobExecutorContextImpl">
|
||||
</manager>
|
||||
<manager name="HA Manager" class="com.cloud.ha.HighAvailabilityManagerImpl">
|
||||
</manager>
|
||||
<manager name="Alert Manager" class="com.cloud.alert.AlertManagerImpl">
|
||||
</manager>
|
||||
<!--
|
||||
<adapters key="com.cloud.alert.AlertAdapter">
|
||||
<adapter name="ConsoleProxyAlert" class="com.cloud.alert.ConsoleProxyAlertAdapter"/>
|
||||
</adapters>
|
||||
-->
|
||||
</management-server>
|
||||
|
||||
<configuration-server class="com.cloud.server.ConfigurationServerImpl">
|
||||
<dao name="Configuration configuration server" class="com.cloud.configuration.dao.ConfigurationDaoImpl"/>
|
||||
<manager name="configuration manager configuration server" class="com.cloud.configuration.ConfigurationManagerImpl">
|
||||
<param name="premium">false</param>
|
||||
</manager>
|
||||
<dao name="User configuration server" class="com.cloud.user.dao.UserDaoImpl">
|
||||
</dao>
|
||||
<dao name="IP Addresses configuration server" class="com.cloud.network.dao.IPAddressDaoImpl"/>
|
||||
<dao name="Datacenter IP Addresses configuration server" class="com.cloud.dc.dao.DataCenterIpAddressDaoImpl"/>
|
||||
<dao name="host zone configuration server" class="com.cloud.dc.dao.DataCenterDaoImpl">
|
||||
</dao>
|
||||
<dao name="host pod configuration server" class="com.cloud.dc.dao.HostPodDaoImpl">
|
||||
</dao>
|
||||
<dao name="vlan configuration server" class="com.cloud.dc.dao.VlanDaoImpl">
|
||||
</dao>
|
||||
</configuration-server>
|
||||
|
||||
</components.xml>
|
||||
|
|
@ -0,0 +1,527 @@
|
|||
<ehcache xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:noNamespaceSchemaLocation="ehcache.xsd">
|
||||
|
||||
<!--
|
||||
CacheManager Configuration
|
||||
==========================
|
||||
An ehcache.xml corresponds to a single CacheManager.
|
||||
|
||||
See instructions below or the ehcache schema (ehcache.xsd) on how to configure.
|
||||
|
||||
System property tokens can be specified in this file which are replaced when the configuration is loaded.
|
||||
For example multicastGroupPort=${multicastGroupPort} can be replaced with the System property either
|
||||
from an environment variable or a system property specified with a command line switch such as
|
||||
-DmulticastGroupPort=4446.
|
||||
|
||||
DiskStore configuration
|
||||
=======================
|
||||
|
||||
The diskStore element is optional. To turn off disk store path creation, comment out the diskStore
|
||||
element below.
|
||||
|
||||
Configure it if you have overflowToDisk or diskPersistent enabled for any cache.
|
||||
|
||||
If it is not configured, and a cache is created which requires a disk store, a warning will be
|
||||
issued and java.io.tmpdir will automatically be used.
|
||||
|
||||
diskStore has only one attribute - "path". It is the path to the directory where
|
||||
.data and .index files will be created.
|
||||
|
||||
If the path is one of the following Java System Property it is replaced by its value in the
|
||||
running VM. For backward compatibility these are not specified without being enclosed in the ${token}
|
||||
replacement syntax.
|
||||
|
||||
The following properties are translated:
|
||||
* user.home - User's home directory
|
||||
* user.dir - User's current working directory
|
||||
* java.io.tmpdir - Default temp file path
|
||||
* ehcache.disk.store.dir - A system property you would normally specify on the command line
|
||||
e.g. java -Dehcache.disk.store.dir=/u01/myapp/diskdir ...
|
||||
|
||||
Subdirectories can be specified below the property e.g. java.io.tmpdir/one
|
||||
-->
|
||||
<!-- diskStore path="java.io.tmpdir"/ -->
|
||||
|
||||
<!--
|
||||
CacheManagerEventListener
|
||||
=========================
|
||||
Specifies a CacheManagerEventListenerFactory, be used to create a CacheManagerPeerProvider,
|
||||
which is notified when Caches are added or removed from the CacheManager.
|
||||
|
||||
The attributes of CacheManagerEventListenerFactory are:
|
||||
* class - a fully qualified factory class name
|
||||
* properties - comma separated properties having meaning only to the factory.
|
||||
|
||||
Sets the fully qualified class name to be registered as the CacheManager event listener.
|
||||
|
||||
The events include:
|
||||
* adding a Cache
|
||||
* removing a Cache
|
||||
|
||||
Callbacks to listener methods are synchronous and unsynchronized. It is the responsibility
|
||||
of the implementer to safely handle the potential performance and thread safety issues
|
||||
depending on what their listener is doing.
|
||||
|
||||
If no class is specified, no listener is created. There is no default.
|
||||
-->
|
||||
<cacheManagerEventListenerFactory class="" properties=""/>
|
||||
|
||||
|
||||
<!--
|
||||
CacheManagerPeerProvider
|
||||
========================
|
||||
(Enable for distributed operation)
|
||||
|
||||
Specifies a CacheManagerPeerProviderFactory which will be used to create a
|
||||
CacheManagerPeerProvider, which discovers other CacheManagers in the cluster.
|
||||
|
||||
The attributes of cacheManagerPeerProviderFactory are:
|
||||
* class - a fully qualified factory class name
|
||||
* properties - comma separated properties having meaning only to the factory.
|
||||
|
||||
Ehcache comes with a built-in RMI-based distribution system with two means of discovery of
|
||||
CacheManager peers participating in the cluster:
|
||||
* automatic, using a multicast group. This one automatically discovers peers and detects
|
||||
changes such as peers entering and leaving the group
|
||||
* manual, using manual rmiURL configuration. A hardcoded list of peers is provided at
|
||||
configuration time.
|
||||
|
||||
Configuring Automatic Discovery:
|
||||
Automatic discovery is configured as per the following example:
|
||||
<cacheManagerPeerProviderFactory
|
||||
class="net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
|
||||
properties="peerDiscovery=automatic, multicastGroupAddress=230.0.0.1,
|
||||
multicastGroupPort=4446, timeToLive=32"/>
|
||||
|
||||
Valid properties are:
|
||||
* peerDiscovery (mandatory) - specify "automatic"
|
||||
* multicastGroupAddress (mandatory) - specify a valid multicast group address
|
||||
* multicastGroupPort (mandatory) - specify a dedicated port for the multicast heartbeat
|
||||
traffic
|
||||
* timeToLive - specify a value between 0 and 255 which determines how far the packets will
|
||||
propagate.
|
||||
|
||||
By convention, the restrictions are:
|
||||
0 - the same host
|
||||
1 - the same subnet
|
||||
32 - the same site
|
||||
64 - the same region
|
||||
128 - the same continent
|
||||
255 - unrestricted
|
||||
|
||||
Configuring Manual Discovery:
|
||||
Manual discovery is configured as per the following example:
|
||||
<cacheManagerPeerProviderFactory class=
|
||||
"net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
|
||||
properties="peerDiscovery=manual,
|
||||
rmiUrls=//server1:40000/sampleCache1|//server2:40000/sampleCache1
|
||||
| //server1:40000/sampleCache2|//server2:40000/sampleCache2"
|
||||
propertySeparator="," />
|
||||
|
||||
Valid properties are:
|
||||
* peerDiscovery (mandatory) - specify "manual"
|
||||
* rmiUrls (mandatory) - specify a pipe separated list of rmiUrls, in the form
|
||||
//hostname:port
|
||||
|
||||
The hostname is the hostname of the remote CacheManager peer. The port is the listening
|
||||
port of the RMICacheManagerPeerListener of the remote CacheManager peer.
|
||||
|
||||
Configuring JGroups replication:
|
||||
<cacheManagerPeerProviderFactory class="net.sf.ehcache.distribution.jgroups.JGroupsCacheManagerPeerProviderFactory"
|
||||
properties="connect=UDP(mcast_addr=231.12.21.132;mcast_port=45566;ip_ttl=32;
|
||||
mcast_send_buf_size=150000;mcast_recv_buf_size=80000):
|
||||
PING(timeout=2000;num_initial_members=6):
|
||||
MERGE2(min_interval=5000;max_interval=10000):
|
||||
FD_SOCK:VERIFY_SUSPECT(timeout=1500):
|
||||
pbcast.NAKACK(gc_lag=10;retransmit_timeout=3000):
|
||||
UNICAST(timeout=5000):
|
||||
pbcast.STABLE(desired_avg_gossip=20000):
|
||||
FRAG:
|
||||
pbcast.GMS(join_timeout=5000;join_retry_timeout=2000;shun=false;print_local_addr=false)"
|
||||
propertySeparator="::"
|
||||
/>
|
||||
The only property necessay is the connect String used by jgroups to configure itself. Refer to the Jgroups documentation for explanation
|
||||
of all the protocols. The example above uses UDP multicast. If the connect property is not specified the default JGroups connection will be
|
||||
used.
|
||||
|
||||
-->
|
||||
<cacheManagerPeerProviderFactory
|
||||
class="net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
|
||||
properties="peerDiscovery=automatic,
|
||||
multicastGroupAddress=230.0.0.1,
|
||||
multicastGroupPort=4446, timeToLive=1"
|
||||
propertySeparator=","
|
||||
/>
|
||||
|
||||
|
||||
<!--
|
||||
CacheManagerPeerListener
|
||||
========================
|
||||
(Enable for distributed operation)
|
||||
|
||||
Specifies a CacheManagerPeerListenerFactory which will be used to create a
|
||||
CacheManagerPeerListener, which
|
||||
listens for messages from cache replicators participating in the cluster.
|
||||
|
||||
The attributes of cacheManagerPeerListenerFactory are:
|
||||
class - a fully qualified factory class name
|
||||
properties - comma separated properties having meaning only to the factory.
|
||||
|
||||
Ehcache comes with a built-in RMI-based distribution system. The listener component is
|
||||
RMICacheManagerPeerListener which is configured using
|
||||
RMICacheManagerPeerListenerFactory. It is configured as per the following example:
|
||||
|
||||
<cacheManagerPeerListenerFactory
|
||||
class="net.sf.ehcache.distribution.RMICacheManagerPeerListenerFactory"
|
||||
properties="hostName=fully_qualified_hostname_or_ip,
|
||||
port=40001,
|
||||
socketTimeoutMillis=120000"
|
||||
propertySeparator="," />
|
||||
|
||||
All properties are optional. They are:
|
||||
* hostName - the hostName of the host the listener is running on. Specify
|
||||
where the host is multihomed and you want to control the interface over which cluster
|
||||
messages are received. Defaults to the host name of the default interface if not
|
||||
specified.
|
||||
* port - the port the RMI Registry listener listens on. This defaults to a free port if not specified.
|
||||
* remoteObjectPort - the port number on which the remote objects bound in the registry receive calls.
|
||||
This defaults to a free port if not specified.
|
||||
* socketTimeoutMillis - the number of ms client sockets will stay open when sending
|
||||
messages to the listener. This should be long enough for the slowest message.
|
||||
If not specified it defaults 120000ms.
|
||||
|
||||
-->
|
||||
<cacheManagerPeerListenerFactory
|
||||
class="net.sf.ehcache.distribution.RMICacheManagerPeerListenerFactory"/>
|
||||
|
||||
|
||||
<!--
|
||||
Cache configuration
|
||||
===================
|
||||
|
||||
The following attributes are required.
|
||||
|
||||
name:
|
||||
Sets the name of the cache. This is used to identify the cache. It must be unique.
|
||||
|
||||
maxElementsInMemory:
|
||||
Sets the maximum number of objects that will be created in memory
|
||||
|
||||
maxElementsOnDisk:
|
||||
Sets the maximum number of objects that will be maintained in the DiskStore
|
||||
The default value is zero, meaning unlimited.
|
||||
|
||||
eternal:
|
||||
Sets whether elements are eternal. If eternal, timeouts are ignored and the
|
||||
element is never expired.
|
||||
|
||||
overflowToDisk:
|
||||
Sets whether elements can overflow to disk when the memory store
|
||||
has reached the maxInMemory limit.
|
||||
|
||||
The following attributes and elements are optional.
|
||||
|
||||
timeToIdleSeconds:
|
||||
Sets the time to idle for an element before it expires.
|
||||
i.e. The maximum amount of time between accesses before an element expires
|
||||
Is only used if the element is not eternal.
|
||||
Optional attribute. A value of 0 means that an Element can idle for infinity.
|
||||
The default value is 0.
|
||||
|
||||
timeToLiveSeconds:
|
||||
Sets the time to live for an element before it expires.
|
||||
i.e. The maximum time between creation time and when an element expires.
|
||||
Is only used if the element is not eternal.
|
||||
Optional attribute. A value of 0 means that and Element can live for infinity.
|
||||
The default value is 0.
|
||||
|
||||
diskPersistent:
|
||||
Whether the disk store persists between restarts of the Virtual Machine.
|
||||
The default value is false.
|
||||
|
||||
diskExpiryThreadIntervalSeconds:
|
||||
The number of seconds between runs of the disk expiry thread. The default value
|
||||
is 120 seconds.
|
||||
|
||||
diskSpoolBufferSizeMB:
|
||||
This is the size to allocate the DiskStore for a spool buffer. Writes are made
|
||||
to this area and then asynchronously written to disk. The default size is 30MB.
|
||||
Each spool buffer is used only by its cache. If you get OutOfMemory errors consider
|
||||
lowering this value. To improve DiskStore performance consider increasing it. Trace level
|
||||
logging in the DiskStore will show if put back ups are occurring.
|
||||
|
||||
memoryStoreEvictionPolicy:
|
||||
Policy would be enforced upon reaching the maxElementsInMemory limit. Default
|
||||
policy is Least Recently Used (specified as LRU). Other policies available -
|
||||
First In First Out (specified as FIFO) and Less Frequently Used
|
||||
(specified as LFU)
|
||||
|
||||
Cache elements can also contain sub elements which take the same format of a factory class
|
||||
and properties. Defined sub-elements are:
|
||||
|
||||
* cacheEventListenerFactory - Enables registration of listeners for cache events, such as
|
||||
put, remove, update, and expire.
|
||||
|
||||
* bootstrapCacheLoaderFactory - Specifies a BootstrapCacheLoader, which is called by a
|
||||
cache on initialisation to prepopulate itself.
|
||||
|
||||
* cacheExtensionFactory - Specifies a CacheExtension, a generic mechansim to tie a class
|
||||
which holds a reference to a cache to the cache lifecycle.
|
||||
|
||||
* cacheExceptionHandlerFactory - Specifies a CacheExceptionHandler, which is called when
|
||||
cache exceptions occur.
|
||||
|
||||
* cacheLoaderFactory - Specifies a CacheLoader, which can be used both asynchronously and
|
||||
synchronously to load objects into a cache.
|
||||
|
||||
RMI Cache Replication
|
||||
|
||||
Each cache that will be distributed needs to set a cache event listener which replicates
|
||||
messages to the other CacheManager peers. For the built-in RMI implementation this is done
|
||||
by adding a cacheEventListenerFactory element of type RMICacheReplicatorFactory to each
|
||||
distributed cache's configuration as per the following example:
|
||||
|
||||
<cacheEventListenerFactory class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
|
||||
properties="replicateAsynchronously=true,
|
||||
replicatePuts=true,
|
||||
replicateUpdates=true,
|
||||
replicateUpdatesViaCopy=true,
|
||||
replicateRemovals=true
|
||||
asynchronousReplicationIntervalMillis=<number of milliseconds"
|
||||
propertySeparator="," />
|
||||
|
||||
The RMICacheReplicatorFactory recognises the following properties:
|
||||
|
||||
* replicatePuts=true|false - whether new elements placed in a cache are
|
||||
replicated to others. Defaults to true.
|
||||
|
||||
* replicateUpdates=true|false - whether new elements which override an
|
||||
element already existing with the same key are replicated. Defaults to true.
|
||||
|
||||
* replicateRemovals=true - whether element removals are replicated. Defaults to true.
|
||||
|
||||
* replicateAsynchronously=true | false - whether replications are
|
||||
asynchronous (true) or synchronous (false). Defaults to true.
|
||||
|
||||
* replicateUpdatesViaCopy=true | false - whether the new elements are
|
||||
copied to other caches (true), or whether a remove message is sent. Defaults to true.
|
||||
|
||||
* asynchronousReplicationIntervalMillis=<number of milliseconds> - The asynchronous
|
||||
replicator runs at a set interval of milliseconds. The default is 1000. The minimum
|
||||
is 10. This property is only applicable if replicateAsynchronously=true
|
||||
|
||||
|
||||
For the Jgroups replication this is done with:
|
||||
<cacheEventListenerFactory class="net.sf.ehcache.distribution.jgroups.JGroupsCacheReplicatorFactory"
|
||||
properties="replicateAsynchronously=true, replicatePuts=true,
|
||||
replicateUpdates=true, replicateUpdatesViaCopy=false,
|
||||
replicateRemovals=true,asynchronousReplicationIntervalMillis=1000"/>
|
||||
This listener supports the same property than the RMICacheReplicationFactory.
|
||||
|
||||
Cluster Bootstrapping
|
||||
|
||||
The RMIBootstrapCacheLoader bootstraps caches in clusters where RMICacheReplicators are
|
||||
used. It is configured as per the following example:
|
||||
|
||||
<bootstrapCacheLoaderFactory
|
||||
class="net.sf.ehcache.distribution.RMIBootstrapCacheLoaderFactory"
|
||||
properties="bootstrapAsynchronously=true, maximumChunkSizeBytes=5000000"
|
||||
propertySeparator="," />
|
||||
|
||||
The RMIBootstrapCacheLoaderFactory recognises the following optional properties:
|
||||
|
||||
* bootstrapAsynchronously=true|false - whether the bootstrap happens in the background
|
||||
after the cache has started. If false, bootstrapping must complete before the cache is
|
||||
made available. The default value is true.
|
||||
|
||||
* maximumChunkSizeBytes=<integer> - Caches can potentially be very large, larger than the
|
||||
memory limits of the VM. This property allows the bootstraper to fetched elements in
|
||||
chunks. The default chunk size is 5000000 (5MB).
|
||||
|
||||
|
||||
Cache Exception Handling
|
||||
|
||||
By default, most cache operations will propagate a runtime CacheException on failure. An
|
||||
interceptor, using a dynamic proxy, may be configured so that a CacheExceptionHandler can
|
||||
be configured to intercept Exceptions. Errors are not intercepted.
|
||||
|
||||
It is configured as per the following example:
|
||||
|
||||
<cacheExceptionHandlerFactory class="com.example.ExampleExceptionHandlerFactory"
|
||||
properties="logLevel=FINE"/>
|
||||
|
||||
Caches with ExceptionHandling configured are not of type Cache, but are of type Ehcache only,
|
||||
and are not available using CacheManager.getCache(), but using CacheManager.getEhcache().
|
||||
|
||||
|
||||
Cache Loader
|
||||
|
||||
A default CacheLoader may be set which loads objects into the cache through asynchronous and
|
||||
synchronous methods on Cache. This is different to the bootstrap cache loader, which is used
|
||||
only in distributed caching.
|
||||
|
||||
It is configured as per the following example:
|
||||
|
||||
<cacheLoaderFactory class="com.example.ExampleCacheLoaderFactory"
|
||||
properties="type=int,startCounter=10"/>
|
||||
|
||||
Cache Extension
|
||||
|
||||
CacheExtensions are a general purpose mechanism to allow generic extensions to a Cache.
|
||||
CacheExtensions are tied into the Cache lifecycle.
|
||||
|
||||
CacheExtensions are created using the CacheExtensionFactory which has a
|
||||
<code>createCacheCacheExtension()</code> method which takes as a parameter a
|
||||
Cache and properties. It can thus call back into any public method on Cache, including, of
|
||||
course, the load methods.
|
||||
|
||||
Extensions are added as per the following example:
|
||||
|
||||
<cacheExtensionFactory class="com.example.FileWatchingCacheRefresherExtensionFactory"
|
||||
properties="refreshIntervalMillis=18000, loaderTimeout=3000,
|
||||
flushPeriod=whatever, someOtherProperty=someValue ..."/>
|
||||
|
||||
-->
|
||||
|
||||
|
||||
<!--
|
||||
Mandatory Default Cache configuration. These settings will be applied to caches
|
||||
created programmtically using CacheManager.add(String cacheName).
|
||||
|
||||
The defaultCache has an implicit name "default" which is a reserved cache name.
|
||||
-->
|
||||
<defaultCache
|
||||
maxElementsInMemory="10000"
|
||||
eternal="false"
|
||||
timeToIdleSeconds="120"
|
||||
timeToLiveSeconds="120"
|
||||
overflowToDisk="false"
|
||||
diskSpoolBufferSizeMB="30"
|
||||
maxElementsOnDisk="10000000"
|
||||
diskPersistent="false"
|
||||
diskExpiryThreadIntervalSeconds="120"
|
||||
memoryStoreEvictionPolicy="LRU"
|
||||
/>
|
||||
|
||||
<!--
|
||||
Sample caches. Following are some example caches. Remove these before use.
|
||||
-->
|
||||
|
||||
<!--
|
||||
Sample cache named sampleCache1
|
||||
This cache contains a maximum in memory of 10000 elements, and will expire
|
||||
an element if it is idle for more than 5 minutes and lives for more than
|
||||
10 minutes.
|
||||
|
||||
If there are more than 10000 elements it will overflow to the
|
||||
disk cache, which in this configuration will go to wherever java.io.tmp is
|
||||
defined on your system. On a standard Linux system this will be /tmp"
|
||||
-->
|
||||
<!--
|
||||
<cache name="sampleCache1"
|
||||
maxElementsInMemory="10000"
|
||||
maxElementsOnDisk="1000"
|
||||
eternal="false"
|
||||
overflowToDisk="true"
|
||||
diskSpoolBufferSizeMB="20"
|
||||
timeToIdleSeconds="300"
|
||||
timeToLiveSeconds="600"
|
||||
memoryStoreEvictionPolicy="LFU"
|
||||
/>
|
||||
-->
|
||||
|
||||
|
||||
<!--
|
||||
Sample cache named sampleCache2
|
||||
This cache has a maximum of 1000 elements in memory. There is no overflow to disk, so 1000
|
||||
is also the maximum cache size. Note that when a cache is eternal, timeToLive and
|
||||
timeToIdle are not used and do not need to be specified.
|
||||
-->
|
||||
<!--
|
||||
<cache name="sampleCache2"
|
||||
maxElementsInMemory="1000"
|
||||
eternal="true"
|
||||
overflowToDisk="false"
|
||||
memoryStoreEvictionPolicy="FIFO"
|
||||
/>
|
||||
-->
|
||||
|
||||
|
||||
<!--
|
||||
Sample cache named sampleCache3. This cache overflows to disk. The disk store is
|
||||
persistent between cache and VM restarts. The disk expiry thread interval is set to 10
|
||||
minutes, overriding the default of 2 minutes.
|
||||
-->
|
||||
<!--
|
||||
<cache name="sampleCache3"
|
||||
maxElementsInMemory="500"
|
||||
eternal="false"
|
||||
overflowToDisk="true"
|
||||
timeToIdleSeconds="300"
|
||||
timeToLiveSeconds="600"
|
||||
diskPersistent="true"
|
||||
diskExpiryThreadIntervalSeconds="1"
|
||||
memoryStoreEvictionPolicy="LFU"
|
||||
/>
|
||||
-->
|
||||
|
||||
|
||||
<!--
|
||||
Sample distributed cache named sampleDistributedCache1.
|
||||
This cache replicates using defaults.
|
||||
It also bootstraps from the cluster, using default properties.
|
||||
-->
|
||||
<!--
|
||||
<cache name="sampleDistributedCache1"
|
||||
maxElementsInMemory="10"
|
||||
eternal="false"
|
||||
timeToIdleSeconds="100"
|
||||
timeToLiveSeconds="100"
|
||||
overflowToDisk="false">
|
||||
<cacheEventListenerFactory
|
||||
class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"/>
|
||||
<bootstrapCacheLoaderFactory
|
||||
class="net.sf.ehcache.distribution.RMIBootstrapCacheLoaderFactory"/>
|
||||
</cache>
|
||||
-->
|
||||
|
||||
|
||||
<!--
|
||||
Sample distributed cache named sampleDistributedCache2.
|
||||
This cache replicates using specific properties.
|
||||
It only replicates updates and does so synchronously via copy
|
||||
-->
|
||||
<!--
|
||||
<cache name="sampleDistributedCache2"
|
||||
maxElementsInMemory="10"
|
||||
eternal="false"
|
||||
timeToIdleSeconds="100"
|
||||
timeToLiveSeconds="100"
|
||||
overflowToDisk="false">
|
||||
<cacheEventListenerFactory
|
||||
class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
|
||||
properties="replicateAsynchronously=false, replicatePuts=false,
|
||||
replicateUpdates=true, replicateUpdatesViaCopy=true,
|
||||
replicateRemovals=false"/>
|
||||
</cache>
|
||||
-->
|
||||
|
||||
<!--
|
||||
Sample distributed cache named sampleDistributedCache3.
|
||||
This cache replicates using defaults except that the asynchronous replication
|
||||
interval is set to 200ms.
|
||||
-->
|
||||
<!--
|
||||
<cache name="sampleDistributedCache3"
|
||||
maxElementsInMemory="10"
|
||||
eternal="false"
|
||||
timeToIdleSeconds="100"
|
||||
timeToLiveSeconds="100"
|
||||
overflowToDisk="false">
|
||||
<cacheEventListenerFactory
|
||||
class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
|
||||
properties="asynchronousReplicationIntervalMillis=200"/>
|
||||
</cache>
|
||||
-->
|
||||
|
||||
</ehcache>
|
||||
|
|
@ -0,0 +1,90 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
|
||||
|
||||
<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="false">
|
||||
|
||||
<!-- ================================= -->
|
||||
<!-- Preserve messages in a local file -->
|
||||
<!-- ================================= -->
|
||||
|
||||
<!-- A time/date based rolling appender -->
|
||||
<appender name="FILE" class="org.apache.log4j.rolling.RollingFileAppender">
|
||||
<param name="Append" value="true"/>
|
||||
<param name="Threshold" value="DEBUG"/>
|
||||
<rollingPolicy class="org.apache.log4j.rolling.TimeBasedRollingPolicy">
|
||||
<param name="FileNamePattern" value="/var/log/cloud/cloud.log.%d{yyyy-MM-dd}{GMT}.gz"/>
|
||||
<param name="ActiveFileName" value="/var/log/cloud/cloud.log"/>
|
||||
</rollingPolicy>
|
||||
|
||||
<layout class="org.apache.log4j.EnhancedPatternLayout">
|
||||
<param name="ConversionPattern" value="%d{ISO8601}{GMT} %-5p [%c{3}] (%t:%x) %m%n"/>
|
||||
</layout>
|
||||
</appender>
|
||||
|
||||
<appender name="APISERVER" class="org.apache.log4j.rolling.RollingFileAppender">
|
||||
<param name="Append" value="true"/>
|
||||
<param name="Threshold" value="DEBUG"/>
|
||||
<rollingPolicy class="org.apache.log4j.rolling.TimeBasedRollingPolicy">
|
||||
<param name="FileNamePattern" value="/var/log/cloud/api-server.log.%d{yyyy-MM-dd}{GMT}.gz"/>
|
||||
<param name="ActiveFileName" value="/var/log/cloud/api-server.log"/>
|
||||
</rollingPolicy>
|
||||
|
||||
<layout class="org.apache.log4j.EnhancedPatternLayout">
|
||||
<param name="ConversionPattern" value="%d{ISO8601}{GMT} %m%n"/>
|
||||
</layout>
|
||||
</appender>
|
||||
|
||||
<!-- ============================== -->
|
||||
<!-- Append messages to the console -->
|
||||
<!-- ============================== -->
|
||||
|
||||
<appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender">
|
||||
<param name="Target" value="System.out"/>
|
||||
<param name="Threshold" value="INFO"/>
|
||||
|
||||
<layout class="org.apache.log4j.EnhancedPatternLayout">
|
||||
<param name="ConversionPattern" value="%d{ABSOLUTE}{GMT} %5p %c{1}:%L - %m%n"/>
|
||||
</layout>
|
||||
</appender>
|
||||
|
||||
<!-- ================ -->
|
||||
<!-- Limit categories -->
|
||||
<!-- ================ -->
|
||||
|
||||
<category name="com.cloud">
|
||||
<priority value="DEBUG"/>
|
||||
</category>
|
||||
|
||||
<!-- Limit the org.apache category to INFO as its DEBUG is verbose -->
|
||||
<category name="org.apache">
|
||||
<priority value="INFO"/>
|
||||
</category>
|
||||
|
||||
<category name="org">
|
||||
<priority value="INFO"/>
|
||||
</category>
|
||||
|
||||
<category name="net">
|
||||
<priority value="INFO"/>
|
||||
</category>
|
||||
|
||||
<category name="apiserver.com.cloud">
|
||||
<priority value="DEBUG"/>
|
||||
</category>
|
||||
|
||||
<logger name="apiserver.com.cloud" additivity="false">
|
||||
<level value="DEBUG"/>
|
||||
<appender-ref ref="APISERVER"/>
|
||||
</logger>
|
||||
|
||||
<!-- ======================= -->
|
||||
<!-- Setup the Root category -->
|
||||
<!-- ======================= -->
|
||||
|
||||
<root>
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="CONSOLE"/>
|
||||
<appender-ref ref="FILE"/>
|
||||
</root>
|
||||
|
||||
</log4j:configuration>
|
||||
|
|
@ -0,0 +1,90 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
|
||||
|
||||
<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="false">
|
||||
|
||||
<!-- ================================= -->
|
||||
<!-- Preserve messages in a local file -->
|
||||
<!-- ================================= -->
|
||||
|
||||
<!-- A time/date based rolling appender -->
|
||||
<appender name="FILE" class="org.apache.log4j.rolling.RollingFileAppender">
|
||||
<param name="Append" value="true"/>
|
||||
<param name="Threshold" value="DEBUG"/>
|
||||
<rollingPolicy class="org.apache.log4j.rolling.TimeBasedRollingPolicy">
|
||||
<param name="FileNamePattern" value="@logdir@/cloud.log.%d{yyyy-MM-dd}{GMT}.gz"/>
|
||||
<param name="ActiveFileName" value="@logdir@/cloud.log"/>
|
||||
</rollingPolicy>
|
||||
|
||||
<layout class="org.apache.log4j.EnhancedPatternLayout">
|
||||
<param name="ConversionPattern" value="%d{ISO8601}{GMT} %-5p [%c{3}] (%t:%x) %m%n"/>
|
||||
</layout>
|
||||
</appender>
|
||||
|
||||
<appender name="APISERVER" class="org.apache.log4j.rolling.RollingFileAppender">
|
||||
<param name="Append" value="true"/>
|
||||
<param name="Threshold" value="DEBUG"/>
|
||||
<rollingPolicy class="org.apache.log4j.rolling.TimeBasedRollingPolicy">
|
||||
<param name="FileNamePattern" value="@logdir@/api-server.log.%d{yyyy-MM-dd}{GMT}.gz"/>
|
||||
<param name="ActiveFileName" value="@logdir@/api-server.log"/>
|
||||
</rollingPolicy>
|
||||
|
||||
<layout class="org.apache.log4j.EnhancedPatternLayout">
|
||||
<param name="ConversionPattern" value="%d{ISO8601}{GMT} %m%n"/>
|
||||
</layout>
|
||||
</appender>
|
||||
|
||||
<!-- ============================== -->
|
||||
<!-- Append messages to the console -->
|
||||
<!-- ============================== -->
|
||||
|
||||
<appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender">
|
||||
<param name="Target" value="System.out"/>
|
||||
<param name="Threshold" value="INFO"/>
|
||||
|
||||
<layout class="org.apache.log4j.EnhancedPatternLayout">
|
||||
<param name="ConversionPattern" value="%d{ABSOLUTE}{GMT} %5p %c{1}:%L - %m%n"/>
|
||||
</layout>
|
||||
</appender>
|
||||
|
||||
<!-- ================ -->
|
||||
<!-- Limit categories -->
|
||||
<!-- ================ -->
|
||||
|
||||
<category name="com.cloud">
|
||||
<priority value="DEBUG"/>
|
||||
</category>
|
||||
|
||||
<!-- Limit the org.apache category to INFO as its DEBUG is verbose -->
|
||||
<category name="org.apache">
|
||||
<priority value="INFO"/>
|
||||
</category>
|
||||
|
||||
<category name="org">
|
||||
<priority value="INFO"/>
|
||||
</category>
|
||||
|
||||
<category name="net">
|
||||
<priority value="INFO"/>
|
||||
</category>
|
||||
|
||||
<category name="apiserver.com.cloud">
|
||||
<priority value="DEBUG"/>
|
||||
</category>
|
||||
|
||||
<logger name="apiserver.com.cloud" additivity="false">
|
||||
<level value="DEBUG"/>
|
||||
<appender-ref ref="APISERVER"/>
|
||||
</logger>
|
||||
|
||||
<!-- ======================= -->
|
||||
<!-- Setup the Root category -->
|
||||
<!-- ======================= -->
|
||||
|
||||
<root>
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="CONSOLE"/>
|
||||
<appender-ref ref="FILE"/>
|
||||
</root>
|
||||
|
||||
</log4j:configuration>
|
||||
|
|
@ -0,0 +1,147 @@
|
|||
<?xml version='1.0' encoding='utf-8'?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<!-- Note: A "Server" is not itself a "Container", so you may not
|
||||
define subcomponents such as "Valves" at this level.
|
||||
Documentation at /docs/config/server.html
|
||||
-->
|
||||
<Server port="8005" shutdown="SHUTDOWN">
|
||||
|
||||
<!--APR library loader. Documentation at /docs/apr.html -->
|
||||
<Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
|
||||
<!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html -->
|
||||
<Listener className="org.apache.catalina.core.JasperListener" />
|
||||
<!-- JMX Support for the Tomcat server. Documentation at /docs/non-existent.html -->
|
||||
<Listener className="org.apache.catalina.mbeans.ServerLifecycleListener" />
|
||||
<Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
|
||||
|
||||
<!-- Global JNDI resources
|
||||
Documentation at /docs/jndi-resources-howto.html
|
||||
-->
|
||||
<GlobalNamingResources>
|
||||
<!-- Editable user database that can also be used by
|
||||
UserDatabaseRealm to authenticate users
|
||||
-->
|
||||
<Resource name="UserDatabase" auth="Container"
|
||||
type="org.apache.catalina.UserDatabase"
|
||||
description="User database that can be updated and saved"
|
||||
factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
|
||||
pathname="conf/tomcat-users.xml" />
|
||||
</GlobalNamingResources>
|
||||
|
||||
<!-- A "Service" is a collection of one or more "Connectors" that share
|
||||
a single "Container" Note: A "Service" is not itself a "Container",
|
||||
so you may not define subcomponents such as "Valves" at this level.
|
||||
Documentation at /docs/config/service.html
|
||||
-->
|
||||
<Service name="Catalina">
|
||||
|
||||
<!--The connectors can use a shared executor, you can define one or more named thread pools-->
|
||||
<Executor name="tomcatThreadPool" namePrefix="catalina-exec-"
|
||||
maxThreads="150" minSpareThreads="25"/>
|
||||
|
||||
|
||||
<!-- A "Connector" represents an endpoint by which requests are received
|
||||
and responses are returned. Documentation at :
|
||||
Java HTTP Connector: /docs/config/http.html (blocking & non-blocking)
|
||||
Java AJP Connector: /docs/config/ajp.html
|
||||
APR (HTTP/AJP) Connector: /docs/apr.html
|
||||
Define a non-SSL HTTP/1.1 Connector on port 8080
|
||||
-->
|
||||
<!--
|
||||
<Connector port="8080" protocol="HTTP/1.1"
|
||||
connectionTimeout="20000"
|
||||
redirectPort="8443" />
|
||||
-->
|
||||
<!-- A "Connector" using the shared thread pool-->
|
||||
<Connector executor="tomcatThreadPool"
|
||||
port="8080" protocol="org.apache.coyote.http11.Http11NioProtocol"
|
||||
connectionTimeout="20000" disableUploadTimeout="true"
|
||||
acceptCount="150" enableLookups="false" maxThreads="150"
|
||||
maxHttpHeaderSize="8192" redirectPort="8443" />
|
||||
<!-- Define a SSL HTTP/1.1 Connector on port 8443
|
||||
This connector uses the JSSE configuration, when using APR, the
|
||||
connector should be using the OpenSSL style configuration
|
||||
described in the APR documentation -->
|
||||
<!--
|
||||
<Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true"
|
||||
maxThreads="150" scheme="https" secure="true"
|
||||
clientAuth="false" sslProtocol="TLS"
|
||||
keystoreType="PKCS12"
|
||||
keystoreFile="conf\cloud-localhost.pk12"
|
||||
keystorePass="password"
|
||||
/>
|
||||
-->
|
||||
|
||||
<!-- Define an AJP 1.3 Connector on port 8009 -->
|
||||
<Connector port="8009" protocol="AJP/1.3" redirectPort="8443" />
|
||||
|
||||
|
||||
<!-- An Engine represents the entry point (within Catalina) that processes
|
||||
every request. The Engine implementation for Tomcat stand alone
|
||||
analyzes the HTTP headers included with the request, and passes them
|
||||
on to the appropriate Host (virtual host).
|
||||
Documentation at /docs/config/engine.html -->
|
||||
|
||||
<!-- You should set jvmRoute to support load-balancing via AJP ie :
|
||||
<Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
|
||||
-->
|
||||
<Engine name="Catalina" defaultHost="localhost">
|
||||
|
||||
<!--For clustering, please take a look at documentation at:
|
||||
/docs/cluster-howto.html (simple how to)
|
||||
/docs/config/cluster.html (reference documentation) -->
|
||||
<!--
|
||||
<Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
|
||||
-->
|
||||
|
||||
<!-- The request dumper valve dumps useful debugging information about
|
||||
the request and response data received and sent by Tomcat.
|
||||
Documentation at: /docs/config/valve.html -->
|
||||
<!--
|
||||
<Valve className="org.apache.catalina.valves.RequestDumperValve"/>
|
||||
-->
|
||||
|
||||
<!-- This Realm uses the UserDatabase configured in the global JNDI
|
||||
resources under the key "UserDatabase". Any edits
|
||||
that are performed against this UserDatabase are immediately
|
||||
available for use by the Realm. -->
|
||||
<Realm className="org.apache.catalina.realm.UserDatabaseRealm"
|
||||
resourceName="UserDatabase"/>
|
||||
|
||||
<!-- Define the default virtual host
|
||||
Note: XML Schema validation will not work with Xerces 2.2.
|
||||
-->
|
||||
<Host name="localhost" appBase="webapps"
|
||||
unpackWARs="true" autoDeploy="true"
|
||||
xmlValidation="false" xmlNamespaceAware="false">
|
||||
|
||||
<!-- SingleSignOn valve, share authentication between web applications
|
||||
Documentation at: /docs/config/valve.html -->
|
||||
<!--
|
||||
<Valve className="org.apache.catalina.authenticator.SingleSignOn" />
|
||||
-->
|
||||
|
||||
<!-- Access log processes all example.
|
||||
Documentation at: /docs/config/valve.html -->
|
||||
<Valve className="org.apache.catalina.valves.FastCommonAccessLogValve" directory="logs"
|
||||
prefix="access_log." suffix=".txt" pattern="common" resolveHosts="false"/>
|
||||
|
||||
</Host>
|
||||
</Engine>
|
||||
</Service>
|
||||
</Server>
|
||||
Loading…
Reference in New Issue