merging usage

This commit is contained in:
David Nalley 2011-08-31 13:36:41 -04:00
parent c56432bc0a
commit ee81905814
24 changed files with 3835 additions and 0 deletions

12
usage/.classpath Normal file
View File

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="src" path="src"/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
<classpathentry combineaccessrules="false" exported="true" kind="src" path="/core"/>
<classpathentry combineaccessrules="false" exported="true" kind="src" path="/utils"/>
<classpathentry combineaccessrules="false" kind="src" path="/server"/>
<classpathentry combineaccessrules="false" exported="true" kind="src" path="/premium"/>
<classpathentry kind="src" path="/api"/>
<classpathentry combineaccessrules="false" kind="src" path="/deps"/>
<classpathentry kind="output" path="bin"/>
</classpath>

17
usage/.project Normal file
View File

@ -0,0 +1,17 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>usage</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>

View File

@ -0,0 +1,68 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="false">
<!-- ============================== -->
<!-- Append messages to the console -->
<!-- ============================== -->
<appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender">
<param name="Target" value="System.out"/>
<param name="Threshold" value="INFO"/>
<layout class="org.apache.log4j.EnhancedPatternLayout">
<param name="ConversionPattern" value="%d{ABSOLUTE}{GMT} %5p %c{1}:%L - %m%n"/>
</layout>
</appender>
<!-- ================================ -->
<!-- Append messages to the usage log -->
<!-- ================================ -->
<!-- A time/date based rolling appender -->
<appender name="USAGE" class="org.apache.log4j.rolling.RollingFileAppender">
<param name="Append" value="true"/>
<param name="Threshold" value="DEBUG"/>
<rollingPolicy class="org.apache.log4j.rolling.TimeBasedRollingPolicy">
<param name="FileNamePattern" value="@USAGELOG@.%d{yyyy-MM-dd}{GMT}.gz"/>
<param name="ActiveFileName" value="@USAGELOG@"/>
</rollingPolicy>
<layout class="org.apache.log4j.EnhancedPatternLayout">
<param name="ConversionPattern" value="%d{ISO8601}{GMT} %-5p [%c{3}] (%t:%x) %m%n"/>
</layout>
</appender>
<!-- ================ -->
<!-- Limit categories -->
<!-- ================ -->
<category name="com.cloud">
<priority value="DEBUG"/>
</category>
<!-- Limit the org.apache category to INFO as its DEBUG is verbose -->
<category name="org.apache">
<priority value="INFO"/>
</category>
<category name="org">
<priority value="INFO"/>
</category>
<category name="net">
<priority value="INFO"/>
</category>
<!-- ======================= -->
<!-- Setup the Root category -->
<!-- ======================= -->
<root>
<level value="INFO"/>
<appender-ref ref="CONSOLE"/>
<appender-ref ref="USAGE"/>
</root>
</log4j:configuration>

View File

@ -0,0 +1,57 @@
<?xml version="1.0"?>
<!--
usage-components.xml is the configuration file for the VM Ops
usage servers.
Here are some places to look for information.
- To find out the general functionality that each Manager
or Adapter provide, look at the javadoc for the interface
that it implements. The interface is usually the
"key" attribute in the declaration.
- To find specific implementation of each Manager or
Adapter, look at the javadoc for the actual class. The
class can be found in the <class> element.
- To find out the configuration parameters for each Manager
or Adapter, look at the javadoc for the actual implementation
class. It should be documented in the description of the
class.
- To know more about the components.xml in general, look for
the javadoc for ComponentLocator.java.
If you found that the Manager or Adapter are not properly
documented, please contact the author.
-->
<components.xml>
<usage-server>
<dao name="VM Instance" class="com.cloud.vm.dao.VMInstanceDaoImpl"/>
<dao name="User VM" class="com.cloud.vm.dao.UserVmDaoImpl"/>
<dao name="ServiceOffering" class="com.cloud.service.dao.ServiceOfferingDaoImpl">
<param name="cache.size">50</param>
<param name="cache.time.to.live">-1</param>
</dao>
<dao name="Events" class="com.cloud.event.dao.EventDaoImpl"/>
<dao name="UserStats" class="com.cloud.user.dao.UserStatisticsDaoImpl"/>
<dao name="IP Addresses" class="com.cloud.network.dao.IPAddressDaoImpl"/>
<dao name="Usage" class="com.cloud.usage.dao.UsageDaoImpl"/>
<dao name="Domain" class="com.cloud.domain.dao.DomainDaoImpl"/>
<dao name="Account" class="com.cloud.user.dao.AccountDaoImpl"/>
<dao name="UserAccount" class="com.cloud.user.dao.UserAccountDaoImpl"/>
<dao name="Usage VmInstance" class="com.cloud.usage.dao.UsageVMInstanceDaoImpl"/>
<dao name="Usage Network" class="com.cloud.usage.dao.UsageNetworkDaoImpl"/>
<dao name="Usage IPAddress" class="com.cloud.usage.dao.UsageIPAddressDaoImpl"/>
<dao name="Usage Volume" class="com.cloud.usage.dao.UsageVolumeDaoImpl"/>
<dao name="Usage Storage" class="com.cloud.usage.dao.UsageStorageDaoImpl"/>
<dao name="Usage Load Balancer Policy" class="com.cloud.usage.dao.UsageLoadBalancerPolicyDaoImpl"/>
<dao name="Usage Port Forwarding Rule" class="com.cloud.usage.dao.UsagePortForwardingRuleDaoImpl"/>
<dao name="Usage Network Offering" class="com.cloud.usage.dao.UsageNetworkOfferingDaoImpl"/>
<dao name="Usage Job" class="com.cloud.usage.dao.UsageJobDaoImpl"/>
<dao name="Configuration" class="com.cloud.configuration.dao.ConfigurationDaoImpl"/>
<dao name="Alert" class="com.cloud.alert.dao.AlertDaoImpl"/>
<dao name="Usage Event" class="com.cloud.event.dao.UsageEventDaoImpl"/>
<manager name="usage manager" class="com.cloud.usage.UsageManagerImpl">
<param name="period">DAILY</param> <!-- DAILY, WEEKLY, MONTHLY; how often it creates usage records -->
</manager>
<manager name="Alert Manager" class="com.cloud.usage.UsageAlertManagerImpl">
</manager>
</usage-server>
</components.xml>

View File

@ -0,0 +1,82 @@
#!/bin/bash
# chkconfig: 35 99 10
# description: CloudStack Usage Monitor
# WARNING: if this script is changed, then all other initscripts MUST BE changed to match it as well
. /etc/rc.d/init.d/functions
whatami=cloud-usage
# set environment variables
SHORTNAME="$whatami"
PIDFILE=@PIDDIR@/"$whatami".pid
LOCKFILE=@LOCKDIR@/"$SHORTNAME"
LOGFILE=@USAGELOG@
PROGNAME="CloudStack Usage Monitor"
USER=@MSUSER@
unset OPTIONS
[ -r @SYSCONFDIR@/sysconfig/"$SHORTNAME" ] && source @SYSCONFDIR@/sysconfig/"$SHORTNAME"
DAEMONIZE=@BINDIR@/@PACKAGE@-daemonize
PROG=@LIBEXECDIR@/usage-runner
start() {
echo -n $"Starting $PROGNAME: "
if hostname --fqdn >/dev/null 2>&1 ; then
daemon --check=$SHORTNAME --pidfile=${PIDFILE} "$DAEMONIZE" \
-n "$SHORTNAME" -p "$PIDFILE" -l "$LOGFILE" -u "$USER" "$PROG" $OPTIONS
RETVAL=$?
echo
else
failure
echo
echo The host name does not resolve properly to an IP address. Cannot start "$PROGNAME". > /dev/stderr
RETVAL=9
fi
[ $RETVAL = 0 ] && touch ${LOCKFILE}
return $RETVAL
}
stop() {
echo -n $"Stopping $PROGNAME: "
killproc -p ${PIDFILE} $SHORTNAME # -d 10 $SHORTNAME
RETVAL=$?
echo
[ $RETVAL = 0 ] && rm -f ${LOCKFILE} ${PIDFILE}
}
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status -p ${PIDFILE} $SHORTNAME
RETVAL=$?
;;
restart)
stop
sleep 3
start
;;
condrestart)
if status -p ${PIDFILE} $SHORTNAME >&/dev/null; then
stop
sleep 3
start
fi
;;
*)
echo $"Usage: $whatami {start|stop|restart|condrestart|status|help}"
RETVAL=3
esac
exit $RETVAL

View File

@ -0,0 +1,82 @@
#!/bin/bash
# chkconfig: 35 99 10
# description: CloudStack Usage Monitor
# WARNING: if this script is changed, then all other initscripts MUST BE changed to match it as well
. /etc/rc.d/init.d/functions
whatami=cloud-usage
# set environment variables
SHORTNAME="$whatami"
PIDFILE=@PIDDIR@/"$whatami".pid
LOCKFILE=@LOCKDIR@/"$SHORTNAME"
LOGFILE=@USAGELOG@
PROGNAME="CloudStack Usage Monitor"
USER=@MSUSER@
unset OPTIONS
[ -r @SYSCONFDIR@/sysconfig/"$SHORTNAME" ] && source @SYSCONFDIR@/sysconfig/"$SHORTNAME"
DAEMONIZE=@BINDIR@/@PACKAGE@-daemonize
PROG=@LIBEXECDIR@/usage-runner
start() {
echo -n $"Starting $PROGNAME: "
if hostname --fqdn >/dev/null 2>&1 ; then
daemon --check=$SHORTNAME --pidfile=${PIDFILE} "$DAEMONIZE" \
-n "$SHORTNAME" -p "$PIDFILE" -l "$LOGFILE" -u "$USER" "$PROG" $OPTIONS
RETVAL=$?
echo
else
failure
echo
echo The host name does not resolve properly to an IP address. Cannot start "$PROGNAME". > /dev/stderr
RETVAL=9
fi
[ $RETVAL = 0 ] && touch ${LOCKFILE}
return $RETVAL
}
stop() {
echo -n $"Stopping $PROGNAME: "
killproc -p ${PIDFILE} $SHORTNAME # -d 10 $SHORTNAME
RETVAL=$?
echo
[ $RETVAL = 0 ] && rm -f ${LOCKFILE} ${PIDFILE}
}
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status -p ${PIDFILE} $SHORTNAME
RETVAL=$?
;;
restart)
stop
sleep 3
start
;;
condrestart)
if status -p ${PIDFILE} $SHORTNAME >&/dev/null; then
stop
sleep 3
start
fi
;;
*)
echo $"Usage: $whatami {start|stop|restart|condrestart|status|help}"
RETVAL=3
esac
exit $RETVAL

View File

@ -0,0 +1,82 @@
#!/bin/bash
# chkconfig: 35 99 10
# description: CloudStack Usage Monitor
# WARNING: if this script is changed, then all other initscripts MUST BE changed to match it as well
. /etc/rc.d/init.d/functions
whatami=cloud-usage
# set environment variables
SHORTNAME="$whatami"
PIDFILE=@PIDDIR@/"$whatami".pid
LOCKFILE=@LOCKDIR@/"$SHORTNAME"
LOGFILE=@USAGELOG@
PROGNAME="CloudStack Usage Monitor"
USER=@MSUSER@
unset OPTIONS
[ -r @SYSCONFDIR@/sysconfig/"$SHORTNAME" ] && source @SYSCONFDIR@/sysconfig/"$SHORTNAME"
DAEMONIZE=@BINDIR@/@PACKAGE@-daemonize
PROG=@LIBEXECDIR@/usage-runner
start() {
echo -n $"Starting $PROGNAME: "
if hostname --fqdn >/dev/null 2>&1 ; then
daemon --check=$SHORTNAME --pidfile=${PIDFILE} "$DAEMONIZE" \
-n "$SHORTNAME" -p "$PIDFILE" -l "$LOGFILE" -u "$USER" "$PROG" $OPTIONS
RETVAL=$?
echo
else
failure
echo
echo The host name does not resolve properly to an IP address. Cannot start "$PROGNAME". > /dev/stderr
RETVAL=9
fi
[ $RETVAL = 0 ] && touch ${LOCKFILE}
return $RETVAL
}
stop() {
echo -n $"Stopping $PROGNAME: "
killproc -p ${PIDFILE} $SHORTNAME # -d 10 $SHORTNAME
RETVAL=$?
echo
[ $RETVAL = 0 ] && rm -f ${LOCKFILE} ${PIDFILE}
}
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status -p ${PIDFILE} $SHORTNAME
RETVAL=$?
;;
restart)
stop
sleep 3
start
;;
condrestart)
if status -p ${PIDFILE} $SHORTNAME >&/dev/null; then
stop
sleep 3
start
fi
;;
*)
echo $"Usage: $whatami {start|stop|restart|condrestart|status|help}"
RETVAL=3
esac
exit $RETVAL

View File

@ -0,0 +1,96 @@
#!/bin/bash
# chkconfig: 35 99 10
# description: CloudStack Usage Monitor
# WARNING: if this script is changed, then all other initscripts MUST BE changed to match it as well
. /lib/lsb/init-functions
. /etc/default/rcS
whatami=cloud-usage
# set environment variables
SHORTNAME="$whatami"
PIDFILE=@PIDDIR@/"$whatami".pid
LOCKFILE=@LOCKDIR@/"$SHORTNAME"
LOGFILE=@USAGELOG@
PROGNAME="CloudStack Usage Monitor"
USER=@MSUSER@
unset OPTIONS
[ -r @SYSCONFDIR@/default/"$SHORTNAME" ] && source @SYSCONFDIR@/default/"$SHORTNAME"
DAEMONIZE=@BINDIR@/@PACKAGE@-daemonize
PROG=@LIBEXECDIR@/usage-runner
start() {
log_daemon_msg $"Starting $PROGNAME" "$SHORTNAME"
if [ -s "$PIDFILE" ] && kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then
log_progress_msg "apparently already running"
log_end_msg 0
exit 0
fi
if hostname --fqdn >/dev/null 2>&1 ; then
true
else
log_failure_msg "The host name does not resolve properly to an IP address. Cannot start $PROGNAME"
log_end_msg 1
exit 1
fi
if start-stop-daemon --start --quiet \
--pidfile "$PIDFILE" \
--exec "$DAEMONIZE" -- -n "$SHORTNAME" -p "$PIDFILE" -l "$LOGFILE" -u "$USER" "$PROG" $OPTIONS
RETVAL=$?
then
rc=0
sleep 1
if ! kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then
log_failure_msg "$PROG failed to start"
rc=1
fi
else
rc=1
fi
if [ $rc -eq 0 ]; then
log_end_msg 0
else
log_end_msg 1
rm -f "$PIDFILE"
fi
}
stop() {
echo -n $"Stopping $PROGNAME" "$SHORTNAME"
start-stop-daemon --stop --quiet --oknodo --pidfile "$PIDFILE"
log_end_msg $?
rm -f "$PIDFILE"
}
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status_of_proc -p "$PIDFILE" "$PROG" "$SHORTNAME"
RETVAL=$?
;;
restart)
stop
sleep 3
start
;;
*)
echo $"Usage: $whatami {start|stop|restart|status|help}"
RETVAL=3
esac
exit $RETVAL

21
usage/libexec/usage-runner.in Executable file
View File

@ -0,0 +1,21 @@
#!/bin/bash
SYSTEMJARS="@SYSTEMJARS@"
SCP=$(build-classpath $SYSTEMJARS) ; if [ $? != 0 ] ; then SCP="@SYSTEMCLASSPATH@" ; fi
DCP="@DEPSCLASSPATH@"
ACP="@USAGECLASSPATH@"
export CLASSPATH=$SCP:$DCP:$ACP:@USAGESYSCONFDIR@
for jarfile in "@PREMIUMJAVADIR@"/* ; do
if [ ! -e "$jarfile" ] ; then continue ; fi
CLASSPATH=$jarfile:$CLASSPATH
done
for plugin in "@PLUGINJAVADIR@"/* ; do
if [ ! -e "$plugin" ] ; then continue ; fi
CLASSPATH=$plugin:$CLASSPATH
done
export CLASSPATH
set -e
echo Current directory is "$PWD"
echo CLASSPATH to run the usage server: "$CLASSPATH"
exec java -cp "$CLASSPATH" -Dpid=$$ -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@USAGELOGDIR@ "$@" com.cloud.usage.UsageServer

33
usage/scripts/usageserver.sh Executable file
View File

@ -0,0 +1,33 @@
#!/usr/bin/env bash
#
# Copyright (C) 2011 Cloud.com, Inc. All rights reserved.
#
# run the usage server
PATHSEP=':'
if [[ $OSTYPE == "cygwin" ]]; then
PATHSEP=';'
export CATALINA_HOME=`cygpath -m $CATALINA_HOME`
fi
CP=./$PATHSEP$CATALINA_HOME/webapps/client/WEB-INF/lib/vmops-server.jar
CP=${CP}$PATHSEP$CATALINA_HOME/webapps/client/WEB-INF/lib/vmops-server-extras.jar
CP=${CP}$PATHSEP$CATALINA_HOME/webapps/client/WEB-INF/lib/vmops-utils.jar
CP=${CP}$PATHSEP$CATALINA_HOME/webapps/client/WEB-INF/lib/vmops-core.jar
CP=${CP}$PATHSEP$CATALINA_HOME/webapps/client/WEB-INF/lib/vmops-usage.jar
CP=${CP}$PATHSEP$CATALINA_HOME/conf
for file in $CATALINA_HOME/lib/*.jar; do
CP=${CP}$PATHSEP$file
done
#echo CP is $CP
DEBUG_OPTS=
#DEBUG_OPTS=-Xrunjdwp:transport=dt_socket,address=$1,server=y,suspend=n
java -cp $CP $DEBUG_OPTS -Dcatalina.home=${CATALINA_HOME} -Dpid=$$ com.vmops.usage.UsageServer $*

View File

@ -0,0 +1,260 @@
/**
* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
*
*/
package com.cloud.usage;
import java.io.UnsupportedEncodingException;
import java.util.Date;
import java.util.Map;
import java.util.Properties;
import javax.ejb.Local;
import javax.mail.Authenticator;
import javax.mail.MessagingException;
import javax.mail.PasswordAuthentication;
import javax.mail.Session;
import javax.mail.URLName;
import javax.mail.Message.RecipientType;
import javax.mail.internet.InternetAddress;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.alert.AlertManager;
import com.cloud.alert.AlertVO;
import com.cloud.alert.dao.AlertDao;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.component.ComponentLocator;
import com.sun.mail.smtp.SMTPMessage;
import com.sun.mail.smtp.SMTPSSLTransport;
import com.sun.mail.smtp.SMTPTransport;
@Local(value={AlertManager.class})
public class UsageAlertManagerImpl implements AlertManager {
private static final Logger s_logger = Logger.getLogger(UsageAlertManagerImpl.class.getName());
private String _name = null;
private EmailAlert _emailAlert;
private AlertDao _alertDao;
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
ComponentLocator locator = ComponentLocator.getCurrentLocator();
ConfigurationDao configDao = locator.getDao(ConfigurationDao.class);
if (configDao == null) {
s_logger.error("Unable to get the configuration dao.");
return false;
}
Map<String, String> configs = configDao.getConfiguration("management-server", params);
// set up the email system for alerts
String emailAddressList = configs.get("alert.email.addresses");
String[] emailAddresses = null;
if (emailAddressList != null) {
emailAddresses = emailAddressList.split(",");
}
String smtpHost = configs.get("alert.smtp.host");
int smtpPort = NumbersUtil.parseInt(configs.get("alert.smtp.port"), 25);
String useAuthStr = configs.get("alert.smtp.useAuth");
boolean useAuth = ((useAuthStr == null) ? false : Boolean.parseBoolean(useAuthStr));
String smtpUsername = configs.get("alert.smtp.username");
String smtpPassword = configs.get("alert.smtp.password");
String emailSender = configs.get("alert.email.sender");
String smtpDebugStr = configs.get("alert.smtp.debug");
boolean smtpDebug = false;
if (smtpDebugStr != null) {
smtpDebug = Boolean.parseBoolean(smtpDebugStr);
}
_emailAlert = new EmailAlert(emailAddresses, smtpHost, smtpPort, useAuth, smtpUsername, smtpPassword, emailSender, smtpDebug);
_alertDao = locator.getDao(AlertDao.class);
if (_alertDao == null) {
s_logger.error("Unable to get the alert dao.");
return false;
}
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
@Override
public void clearAlert(short alertType, long dataCenterId, long podId) {
try {
if (_emailAlert != null) {
_emailAlert.clearAlert(alertType, dataCenterId, podId);
}
} catch (Exception ex) {
s_logger.error("Problem clearing email alert", ex);
}
}
@Override
public void sendAlert(short alertType, long dataCenterId, Long podId, String subject, String body) {
// TODO: queue up these messages and send them as one set of issues once a certain number of issues is reached? If that's the case,
// shouldn't we have a type/severity as part of the API so that severe errors get sent right away?
try {
if (_emailAlert != null) {
_emailAlert.sendAlert(alertType, dataCenterId, podId, subject, body);
}
} catch (Exception ex) {
s_logger.error("Problem sending email alert", ex);
}
}
class EmailAlert {
private Session _smtpSession;
private InternetAddress[] _recipientList;
private final String _smtpHost;
private int _smtpPort = -1;
private boolean _smtpUseAuth = false;
private final String _smtpUsername;
private final String _smtpPassword;
private final String _emailSender;
public EmailAlert(String[] recipientList, String smtpHost, int smtpPort, boolean smtpUseAuth, final String smtpUsername, final String smtpPassword, String emailSender, boolean smtpDebug) {
if (recipientList != null) {
_recipientList = new InternetAddress[recipientList.length];
for (int i = 0; i < recipientList.length; i++) {
try {
_recipientList[i] = new InternetAddress(recipientList[i], recipientList[i]);
} catch (Exception ex) {
s_logger.error("Exception creating address for: " + recipientList[i], ex);
}
}
}
_smtpHost = smtpHost;
_smtpPort = smtpPort;
_smtpUseAuth = smtpUseAuth;
_smtpUsername = smtpUsername;
_smtpPassword = smtpPassword;
_emailSender = emailSender;
if (_smtpHost != null) {
Properties smtpProps = new Properties();
smtpProps.put("mail.smtp.host", smtpHost);
smtpProps.put("mail.smtp.port", smtpPort);
smtpProps.put("mail.smtp.auth", ""+smtpUseAuth);
if (smtpUsername != null) {
smtpProps.put("mail.smtp.user", smtpUsername);
}
smtpProps.put("mail.smtps.host", smtpHost);
smtpProps.put("mail.smtps.port", smtpPort);
smtpProps.put("mail.smtps.auth", ""+smtpUseAuth);
if (smtpUsername != null) {
smtpProps.put("mail.smtps.user", smtpUsername);
}
if ((smtpUsername != null) && (smtpPassword != null)) {
_smtpSession = Session.getInstance(smtpProps, new Authenticator() {
@Override
protected PasswordAuthentication getPasswordAuthentication() {
return new PasswordAuthentication(smtpUsername, smtpPassword);
}
});
} else {
_smtpSession = Session.getInstance(smtpProps);
}
_smtpSession.setDebug(smtpDebug);
} else {
_smtpSession = null;
}
}
// TODO: make sure this handles SSL transport (useAuth is true) and regular
public void sendAlert(short alertType, long dataCenterId, Long podId, String subject, String content) throws MessagingException, UnsupportedEncodingException {
AlertVO alert = null;
if ((alertType != AlertManager.ALERT_TYPE_HOST) &&
(alertType != AlertManager.ALERT_TYPE_USERVM) &&
(alertType != AlertManager.ALERT_TYPE_DOMAIN_ROUTER) &&
(alertType != AlertManager.ALERT_TYPE_CONSOLE_PROXY) &&
(alertType != AlertManager.ALERT_TYPE_STORAGE_MISC) &&
(alertType != AlertManager.ALERT_TYPE_MANAGMENT_NODE)) {
alert = _alertDao.getLastAlert(alertType, dataCenterId, podId);
}
if (alert == null) {
// set up a new alert
AlertVO newAlert = new AlertVO();
newAlert.setType(alertType);
newAlert.setSubject(subject);
newAlert.setPodId(podId);
newAlert.setDataCenterId(dataCenterId);
newAlert.setSentCount(1); // initialize sent count to 1 since we are now sending an alert
newAlert.setLastSent(new Date());
_alertDao.persist(newAlert);
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Have already sent: " + alert.getSentCount() + " emails for alert type '" + alertType + "' -- skipping send email");
}
return;
}
if (_smtpSession != null) {
SMTPMessage msg = new SMTPMessage(_smtpSession);
msg.setSender(new InternetAddress(_emailSender, _emailSender));
msg.setFrom(new InternetAddress(_emailSender, _emailSender));
for (InternetAddress address : _recipientList) {
msg.addRecipient(RecipientType.TO, address);
}
msg.setSubject(subject);
msg.setSentDate(new Date());
msg.setContent(content, "text/plain");
msg.saveChanges();
SMTPTransport smtpTrans = null;
if (_smtpUseAuth) {
smtpTrans = new SMTPSSLTransport(_smtpSession, new URLName("smtp", _smtpHost, _smtpPort, null, _smtpUsername, _smtpPassword));
} else {
smtpTrans = new SMTPTransport(_smtpSession, new URLName("smtp", _smtpHost, _smtpPort, null, _smtpUsername, _smtpPassword));
}
smtpTrans.connect();
smtpTrans.sendMessage(msg, msg.getAllRecipients());
smtpTrans.close();
}
}
public void clearAlert(short alertType, long dataCenterId, Long podId) {
if (alertType != -1) {
AlertVO alert = _alertDao.getLastAlert(alertType, dataCenterId, podId);
if (alert != null) {
AlertVO updatedAlert = _alertDao.createForUpdate();
updatedAlert.setResolved(new Date());
_alertDao.update(alert.getId(), updatedAlert);
}
}
}
}
@Override
public void recalculateCapacity() {
// TODO Auto-generated method stub
}
}

View File

@ -0,0 +1,15 @@
/**
* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
*
*/
package com.cloud.usage;
import com.cloud.usage.UsageJobVO;
import com.cloud.utils.component.Manager;
public interface UsageManager extends Manager {
public void scheduleParse();
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,217 @@
/**
* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
*
*/
package com.cloud.usage;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import com.cloud.utils.db.Transaction;
public class UsageSanityChecker {
private StringBuffer errors;
private String lastCheckId = "";
private final String lastCheckFile = "/usr/local/libexec/sanity-check-last-id";
private boolean checkMaxUsage(Connection conn) throws SQLException{
PreparedStatement pstmt = conn.prepareStatement("SELECT value FROM `cloud`.`configuration` where name = 'usage.stats.job.aggregation.range'");
ResultSet rs = pstmt.executeQuery();
int aggregationRange = 1440;
if(rs.next()){
aggregationRange = rs.getInt(1);
} else {
System.out.println("Failed to retrieve aggregation range. Using default : "+aggregationRange);
}
int aggregationHours = aggregationRange / 60;
/*
* Check for usage records with raw_usage > aggregationHours
*/
pstmt = conn.prepareStatement("SELECT count(*) FROM `cloud_usage`.`cloud_usage` cu where usage_type not in (4,5) and raw_usage > "+aggregationHours+lastCheckId);
rs = pstmt.executeQuery();
if(rs.next() && (rs.getInt(1) > 0)){
errors.append("Error: Found "+rs.getInt(1)+" usage records with raw_usage > "+aggregationHours);
errors.append("\n");
return false;
}
return true;
}
private boolean checkVmUsage(Connection conn) throws SQLException{
boolean success = true;
/*
* Check for Vm usage records which are created after the vm is destroyed
*/
PreparedStatement pstmt = conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.vm_instance vm where vm.type = 'User' " +
"and cu.usage_type in (1 , 2) and cu.usage_id = vm.id and cu.start_date > vm.removed"+lastCheckId);
ResultSet rs = pstmt.executeQuery();
if(rs.next() && (rs.getInt(1) > 0)){
errors.append("Error: Found "+rs.getInt(1)+" Vm usage records which are created after Vm is destroyed");
errors.append("\n");
success = false;
}
/*
* Check for Vms which have multiple running vm records in helper table
*/
pstmt = conn.prepareStatement("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance where usage_type =1 " +
"and end_date is null group by vm_instance_id having count(vm_instance_id) > 1) c ;");
rs = pstmt.executeQuery();
if(rs.next() && (rs.getInt(1) > 0)){
errors.append("Error: Found "+rs.getInt(1)+" duplicate running Vm entries in vm usage helper table");
errors.append("\n");
success = false;
}
/*
* Check for Vms which have multiple allocated vm records in helper table
*/
pstmt = conn.prepareStatement("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance where usage_type =2 " +
"and end_date is null group by vm_instance_id having count(vm_instance_id) > 1) c ;");
rs = pstmt.executeQuery();
if(rs.next() && (rs.getInt(1) > 0)){
errors.append("Error: Found "+rs.getInt(1)+" duplicate allocated Vm entries in vm usage helper table");
errors.append("\n");
success = false;
}
/*
* Check for Vms which have running vm entry without allocated vm entry in helper table
*/
pstmt = conn.prepareStatement("select count(vm_instance_id) from cloud_usage.usage_vm_instance o where o.end_date is null and o.usage_type=1 and not exists " +
"(select 1 from cloud_usage.usage_vm_instance i where i.vm_instance_id=o.vm_instance_id and usage_type=2 and i.end_date is null)");
rs = pstmt.executeQuery();
if(rs.next() && (rs.getInt(1) > 0)){
errors.append("Error: Found "+rs.getInt(1)+" running Vm entries without corresponding allocated entries in vm usage helper table");
errors.append("\n");
success = false;
}
return success;
}
private boolean checkVolumeUsage(Connection conn) throws SQLException{
boolean success = true;
/*
* Check for Volume usage records which are created after the volume is removed
*/
PreparedStatement pstmt = conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.volumes v " +
"where cu.usage_type = 6 and cu.usage_id = v.id and cu.start_date > v.removed"+lastCheckId);
ResultSet rs = pstmt.executeQuery();
if(rs.next() && (rs.getInt(1) > 0)){
errors.append("Error: Found "+rs.getInt(1)+" volume usage records which are created after volume is removed");
errors.append("\n");
success = false;
}
/*
* Check for duplicate records in volume usage helper table
*/
pstmt = conn.prepareStatement("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_volume " +
"where deleted is null group by id having count(id) > 1) c;");
rs = pstmt.executeQuery();
if(rs.next() && (rs.getInt(1) > 0)){
errors.append("Error: Found "+rs.getInt(1)+" duplicate records is volume usage helper table");
errors.append("\n");
success = false;
}
return success;
}
private boolean checkTemplateISOUsage(Connection conn) throws SQLException{
/*
* Check for Template/ISO usage records which are created after it is removed
*/
PreparedStatement pstmt = conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.template_zone_ref tzr " +
"where cu.usage_id = tzr.template_id and cu.zone_id = tzr.zone_id and cu.usage_type in (7,8) and cu.start_date > tzr.removed"+lastCheckId);
ResultSet rs = pstmt.executeQuery();
if(rs.next() && (rs.getInt(1) > 0)){
errors.append("Error: Found "+rs.getInt(1)+" template/ISO usage records which are created after it is removed");
errors.append("\n");
return false;
}
return true;
}
private boolean checkSnapshotUsage(Connection conn) throws SQLException{
/*
* Check for snapshot usage records which are created after snapshot is removed
*/
PreparedStatement pstmt = conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.snapshots s " +
"where cu.usage_id = s.id and cu.usage_type = 9 and cu.start_date > s.removed"+lastCheckId);
ResultSet rs = pstmt.executeQuery();
if(rs.next() && (rs.getInt(1) > 0)){
errors.append("Error: Found "+rs.getInt(1)+" snapshot usage records which are created after snapshot is removed");
errors.append("\n");
return false;
}
return true;
}
public String runSanityCheck() throws SQLException{
try {
BufferedReader reader = new BufferedReader( new FileReader (lastCheckFile));
String last_id = null;
if( (reader != null) && ( last_id = reader.readLine() ) != null ) {
int lastId = Integer.parseInt(last_id);
if(lastId > 0){
lastCheckId = " and cu.id > "+last_id;
}
}
reader.close();
} catch (Exception e) {
// Error while reading last check id
}
Connection conn = Transaction.getStandaloneConnection();
int maxId = 0;
PreparedStatement pstmt = conn.prepareStatement("select max(id) from cloud_usage.cloud_usage");
ResultSet rs = pstmt.executeQuery();
if(rs.next() && (rs.getInt(1) > 0)){
maxId = rs.getInt(1);
lastCheckId += " and cu.id <= "+maxId;
}
errors = new StringBuffer();
checkMaxUsage(conn);
checkVmUsage(conn);
checkVolumeUsage(conn);
checkTemplateISOUsage(conn);
checkSnapshotUsage(conn);
FileWriter fstream;
try {
fstream = new FileWriter(lastCheckFile);
BufferedWriter out = new BufferedWriter(fstream);
out.write(""+maxId);
out.close();
} catch (IOException e) {
// Error while writing last check id
}
return errors.toString();
}
public static void main(String args[]){
UsageSanityChecker usc = new UsageSanityChecker();
String sanityErrors;
try {
sanityErrors = usc.runSanityCheck();
if(sanityErrors.length() > 0){
System.out.println(sanityErrors.toString());
}
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -0,0 +1,30 @@
/**
* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
*
*/
package com.cloud.usage;
import org.apache.log4j.Logger;
import com.cloud.utils.component.ComponentLocator;
public class UsageServer {
private static final Logger s_logger = Logger.getLogger(UsageServer.class.getName());
public static final String Name = "usage-server";
/**
* @param args
*/
public static void main(String[] args) {
// TODO: do we need to communicate with mgmt server?
final ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage");
UsageManager mgr = _locator.getManager(UsageManager.class);
if (mgr != null) {
if (s_logger.isInfoEnabled()) {
s_logger.info("UsageServer ready...");
}
}
}
}

View File

@ -0,0 +1,165 @@
/**
* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
*
*/
package com.cloud.usage.parser;
import java.text.DecimalFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import com.cloud.usage.UsageIPAddressVO;
import com.cloud.usage.UsageServer;
import com.cloud.usage.UsageTypes;
import com.cloud.usage.UsageVO;
import com.cloud.usage.dao.UsageDao;
import com.cloud.usage.dao.UsageIPAddressDao;
import com.cloud.user.AccountVO;
import com.cloud.utils.Pair;
import com.cloud.utils.component.ComponentLocator;
public class IPAddressUsageParser {
public static final Logger s_logger = Logger.getLogger(IPAddressUsageParser.class.getName());
private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage");
private static UsageDao m_usageDao = _locator.getDao(UsageDao.class);
private static UsageIPAddressDao m_usageIPAddressDao = _locator.getDao(UsageIPAddressDao.class);
public static boolean parse(AccountVO account, Date startDate, Date endDate) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Parsing IP Address usage for account: " + account.getId());
}
if ((endDate == null) || endDate.after(new Date())) {
endDate = new Date();
}
// - query usage_ip_address table with the following criteria:
// - look for an entry for accountId with start date in the given range
// - look for an entry for accountId with end date in the given range
// - look for an entry for accountId with end date null (currently running vm or owned IP)
// - look for an entry for accountId with start date before given range *and* end date after given range
List<UsageIPAddressVO> usageIPAddress = m_usageIPAddressDao.getUsageRecords(account.getId(), account.getDomainId(), startDate, endDate);
if(usageIPAddress.isEmpty()){
s_logger.debug("No IP Address usage for this period");
return true;
}
// This map has both the running time *and* the usage amount.
Map<String, Pair<Long, Long>> usageMap = new HashMap<String, Pair<Long, Long>>();
Map<String, IpInfo> IPMap = new HashMap<String, IpInfo>();
// loop through all the usage IPs, create a usage record for each
for (UsageIPAddressVO usageIp : usageIPAddress) {
long IpId = usageIp.getId();
String key = ""+IpId;
// store the info in the IP map
IPMap.put(key, new IpInfo(usageIp.getZoneId(), IpId, usageIp.getAddress(), usageIp.isSourceNat()));
Date IpAssignDate = usageIp.getAssigned();
Date IpReleaseDeleteDate = usageIp.getReleased();
if ((IpReleaseDeleteDate == null) || IpReleaseDeleteDate.after(endDate)) {
IpReleaseDeleteDate = endDate;
}
// clip the start date to the beginning of our aggregation range if the vm has been running for a while
if (IpAssignDate.before(startDate)) {
IpAssignDate = startDate;
}
long currentDuration = (IpReleaseDeleteDate.getTime() - IpAssignDate.getTime()) + 1; // make sure this is an inclusive check for milliseconds (i.e. use n - m + 1 to find total number of millis to charge)
updateIpUsageData(usageMap, key, usageIp.getId(), currentDuration);
}
for (String ipIdKey : usageMap.keySet()) {
Pair<Long, Long> ipTimeInfo = usageMap.get(ipIdKey);
long useTime = ipTimeInfo.second().longValue();
// Only create a usage record if we have a runningTime of bigger than zero.
if (useTime > 0L) {
IpInfo info = IPMap.get(ipIdKey);
createUsageRecord(info.getZoneId(), useTime, startDate, endDate, account, info.getIpId(), info.getIPAddress(), info.isSourceNat());
}
}
return true;
}
private static void updateIpUsageData(Map<String, Pair<Long, Long>> usageDataMap, String key, long ipId, long duration) {
Pair<Long, Long> ipUsageInfo = usageDataMap.get(key);
if (ipUsageInfo == null) {
ipUsageInfo = new Pair<Long, Long>(new Long(ipId), new Long(duration));
} else {
Long runningTime = ipUsageInfo.second();
runningTime = new Long(runningTime.longValue() + duration);
ipUsageInfo = new Pair<Long, Long>(ipUsageInfo.first(), runningTime);
}
usageDataMap.put(key, ipUsageInfo);
}
private static void createUsageRecord(long zoneId, long runningTime, Date startDate, Date endDate, AccountVO account, long IpId, String IPAddress, boolean isSourceNat) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Total usage time " + runningTime + "ms");
}
float usage = runningTime / 1000f / 60f / 60f;
DecimalFormat dFormat = new DecimalFormat("#.######");
String usageDisplay = dFormat.format(usage);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating IP usage record with id: " + IpId + ", usage: " + usageDisplay + ", startDate: " + startDate + ", endDate: " + endDate + ", for account: " + account.getId());
}
String usageDesc = "IPAddress: "+IPAddress;
// Create the usage record
UsageVO usageRecord = new UsageVO(zoneId, account.getAccountId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs",
UsageTypes.IP_ADDRESS, new Double(usage), null, null, null, null, IpId, startDate, endDate, (isSourceNat?"SourceNat":""));
m_usageDao.persist(usageRecord);
}
private static class IpInfo {
private long zoneId;
private long IpId;
private String IPAddress;
private boolean isSourceNat;
public IpInfo(long zoneId,long IpId, String IPAddress, boolean isSourceNat) {
this.zoneId = zoneId;
this.IpId = IpId;
this.IPAddress = IPAddress;
this.isSourceNat = isSourceNat;
}
public long getZoneId() {
return zoneId;
}
public long getIpId() {
return IpId;
}
public String getIPAddress() {
return IPAddress;
}
public boolean isSourceNat() {
return isSourceNat;
}
}
}

View File

@ -0,0 +1,148 @@
/**
* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
*/
package com.cloud.usage.parser;
import java.text.DecimalFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import com.cloud.usage.UsageLoadBalancerPolicyVO;
import com.cloud.usage.UsageServer;
import com.cloud.usage.UsageTypes;
import com.cloud.usage.UsageVO;
import com.cloud.usage.dao.UsageDao;
import com.cloud.usage.dao.UsageLoadBalancerPolicyDao;
import com.cloud.user.AccountVO;
import com.cloud.utils.Pair;
import com.cloud.utils.component.ComponentLocator;
public class LoadBalancerUsageParser {
public static final Logger s_logger = Logger.getLogger(LoadBalancerUsageParser.class.getName());
private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage");
private static UsageDao m_usageDao = _locator.getDao(UsageDao.class);
private static UsageLoadBalancerPolicyDao m_usageLoadBalancerPolicyDao = _locator.getDao(UsageLoadBalancerPolicyDao.class);
public static boolean parse(AccountVO account, Date startDate, Date endDate) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Parsing all LoadBalancerPolicy usage events for account: " + account.getId());
}
if ((endDate == null) || endDate.after(new Date())) {
endDate = new Date();
}
// - query usage_volume table with the following criteria:
// - look for an entry for accountId with start date in the given range
// - look for an entry for accountId with end date in the given range
// - look for an entry for accountId with end date null (currently running vm or owned IP)
// - look for an entry for accountId with start date before given range *and* end date after given range
List<UsageLoadBalancerPolicyVO> usageLBs = m_usageLoadBalancerPolicyDao.getUsageRecords(account.getId(), account.getDomainId(), startDate, endDate, false, 0);
if(usageLBs.isEmpty()){
s_logger.debug("No load balancer usage events for this period");
return true;
}
// This map has both the running time *and* the usage amount.
Map<String, Pair<Long, Long>> usageMap = new HashMap<String, Pair<Long, Long>>();
Map<String, LBInfo> lbMap = new HashMap<String, LBInfo>();
// loop through all the load balancer policies, create a usage record for each
for (UsageLoadBalancerPolicyVO usageLB : usageLBs) {
long lbId = usageLB.getId();
String key = ""+lbId;
lbMap.put(key, new LBInfo(lbId, usageLB.getZoneId()));
Date lbCreateDate = usageLB.getCreated();
Date lbDeleteDate = usageLB.getDeleted();
if ((lbDeleteDate == null) || lbDeleteDate.after(endDate)) {
lbDeleteDate = endDate;
}
// clip the start date to the beginning of our aggregation range if the vm has been running for a while
if (lbCreateDate.before(startDate)) {
lbCreateDate = startDate;
}
long currentDuration = (lbDeleteDate.getTime() - lbCreateDate.getTime()) + 1; // make sure this is an inclusive check for milliseconds (i.e. use n - m + 1 to find total number of millis to charge)
updateLBUsageData(usageMap, key, usageLB.getId(), currentDuration);
}
for (String lbIdKey : usageMap.keySet()) {
Pair<Long, Long> sgtimeInfo = usageMap.get(lbIdKey);
long useTime = sgtimeInfo.second().longValue();
// Only create a usage record if we have a runningTime of bigger than zero.
if (useTime > 0L) {
LBInfo info = lbMap.get(lbIdKey);
createUsageRecord(UsageTypes.LOAD_BALANCER_POLICY, useTime, startDate, endDate, account, info.getId(), info.getZoneId() );
}
}
return true;
}
private static void updateLBUsageData(Map<String, Pair<Long, Long>> usageDataMap, String key, long lbId, long duration) {
Pair<Long, Long> lbUsageInfo = usageDataMap.get(key);
if (lbUsageInfo == null) {
lbUsageInfo = new Pair<Long, Long>(new Long(lbId), new Long(duration));
} else {
Long runningTime = lbUsageInfo.second();
runningTime = new Long(runningTime.longValue() + duration);
lbUsageInfo = new Pair<Long, Long>(lbUsageInfo.first(), runningTime);
}
usageDataMap.put(key, lbUsageInfo);
}
private static void createUsageRecord(int type, long runningTime, Date startDate, Date endDate, AccountVO account, long lbId, long zoneId) {
// Our smallest increment is hourly for now
if (s_logger.isDebugEnabled()) {
s_logger.debug("Total running time " + runningTime + "ms");
}
float usage = runningTime / 1000f / 60f / 60f;
DecimalFormat dFormat = new DecimalFormat("#.######");
String usageDisplay = dFormat.format(usage);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating Volume usage record for load balancer: " + lbId + ", usage: " + usageDisplay + ", startDate: " + startDate + ", endDate: " + endDate + ", for account: " + account.getId());
}
// Create the usage record
String usageDesc = "Load Balancing Policy: "+lbId+" usage time";
//ToDo: get zone id
UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", type,
new Double(usage), null, null, null, null, lbId, null, startDate, endDate);
m_usageDao.persist(usageRecord);
}
private static class LBInfo {
private long id;
private long zoneId;
public LBInfo(long id, long zoneId) {
this.id = id;
this.zoneId = zoneId;
}
public long getZoneId() {
return zoneId;
}
public long getId() {
return id;
}
}
}

View File

@ -0,0 +1,160 @@
/**
* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
*/
package com.cloud.usage.parser;
import java.text.DecimalFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import com.cloud.usage.UsageNetworkOfferingVO;
import com.cloud.usage.UsageServer;
import com.cloud.usage.UsageTypes;
import com.cloud.usage.UsageVO;
import com.cloud.usage.dao.UsageDao;
import com.cloud.usage.dao.UsageNetworkOfferingDao;
import com.cloud.user.AccountVO;
import com.cloud.utils.Pair;
import com.cloud.utils.component.ComponentLocator;
public class NetworkOfferingUsageParser {
public static final Logger s_logger = Logger.getLogger(NetworkOfferingUsageParser.class.getName());
private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage");
private static UsageDao m_usageDao = _locator.getDao(UsageDao.class);
private static UsageNetworkOfferingDao m_usageNetworkOfferingDao = _locator.getDao(UsageNetworkOfferingDao.class);
public static boolean parse(AccountVO account, Date startDate, Date endDate) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Parsing all NetworkOffering usage events for account: " + account.getId());
}
if ((endDate == null) || endDate.after(new Date())) {
endDate = new Date();
}
// - query usage_volume table with the following criteria:
// - look for an entry for accountId with start date in the given range
// - look for an entry for accountId with end date in the given range
// - look for an entry for accountId with end date null (currently running vm or owned IP)
// - look for an entry for accountId with start date before given range *and* end date after given range
List<UsageNetworkOfferingVO> usageNOs = m_usageNetworkOfferingDao.getUsageRecords(account.getId(), account.getDomainId(), startDate, endDate, false, 0);
if(usageNOs.isEmpty()){
s_logger.debug("No NetworkOffering usage events for this period");
return true;
}
// This map has both the running time *and* the usage amount.
Map<String, Pair<Long, Long>> usageMap = new HashMap<String, Pair<Long, Long>>();
Map<String, NOInfo> noMap = new HashMap<String, NOInfo>();
// loop through all the network offerings, create a usage record for each
for (UsageNetworkOfferingVO usageNO : usageNOs) {
long vmId = usageNO.getVmInstanceId();
long noId = usageNO.getNetworkOfferingId();
String key = ""+vmId+"NO"+noId;
noMap.put(key, new NOInfo(vmId, usageNO.getZoneId(), noId, usageNO.isDefault()));
Date noCreateDate = usageNO.getCreated();
Date noDeleteDate = usageNO.getDeleted();
if ((noDeleteDate == null) || noDeleteDate.after(endDate)) {
noDeleteDate = endDate;
}
// clip the start date to the beginning of our aggregation range if the vm has been running for a while
if (noCreateDate.before(startDate)) {
noCreateDate = startDate;
}
long currentDuration = (noDeleteDate.getTime() - noCreateDate.getTime()) + 1; // make sure this is an inclusive check for milliseconds (i.e. use n - m + 1 to find total number of millis to charge)
updateNOUsageData(usageMap, key, usageNO.getVmInstanceId(), currentDuration);
}
for (String noIdKey : usageMap.keySet()) {
Pair<Long, Long> notimeInfo = usageMap.get(noIdKey);
long useTime = notimeInfo.second().longValue();
// Only create a usage record if we have a runningTime of bigger than zero.
if (useTime > 0L) {
NOInfo info = noMap.get(noIdKey);
createUsageRecord(UsageTypes.NETWORK_OFFERING, useTime, startDate, endDate, account, info.getVmId(), info.getNOId(), info.getZoneId(), info.isDefault());
}
}
return true;
}
private static void updateNOUsageData(Map<String, Pair<Long, Long>> usageDataMap, String key, long vmId, long duration) {
Pair<Long, Long> noUsageInfo = usageDataMap.get(key);
if (noUsageInfo == null) {
noUsageInfo = new Pair<Long, Long>(new Long(vmId), new Long(duration));
} else {
Long runningTime = noUsageInfo.second();
runningTime = new Long(runningTime.longValue() + duration);
noUsageInfo = new Pair<Long, Long>(noUsageInfo.first(), runningTime);
}
usageDataMap.put(key, noUsageInfo);
}
private static void createUsageRecord(int type, long runningTime, Date startDate, Date endDate, AccountVO account, long vmId, long noId, long zoneId, boolean isDefault) {
// Our smallest increment is hourly for now
if (s_logger.isDebugEnabled()) {
s_logger.debug("Total running time " + runningTime + "ms");
}
float usage = runningTime / 1000f / 60f / 60f;
DecimalFormat dFormat = new DecimalFormat("#.######");
String usageDisplay = dFormat.format(usage);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating network offering:" + noId + " usage record for Vm : " + vmId + ", usage: " + usageDisplay + ", startDate: " + startDate + ", endDate: " + endDate + ", for account: " + account.getId());
}
// Create the usage record
String usageDesc = "Network offering:" + noId + " for Vm : " + vmId + " usage time";
long defaultNic = (isDefault) ? 1 : 0;
UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", type,
new Double(usage), vmId, null, noId, null, defaultNic, null, startDate, endDate);
m_usageDao.persist(usageRecord);
}
private static class NOInfo {
private long vmId;
private long zoneId;
private long noId;
private boolean isDefault;
public NOInfo(long vmId, long zoneId, long noId, boolean isDefault) {
this.vmId = vmId;
this.zoneId = zoneId;
this.noId = noId;
this.isDefault = isDefault;
}
public long getZoneId() {
return zoneId;
}
public long getVmId() {
return vmId;
}
public long getNOId() {
return noId;
}
public boolean isDefault(){
return isDefault;
}
}
}

View File

@ -0,0 +1,154 @@
/**
* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
*/
package com.cloud.usage.parser;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import com.cloud.usage.UsageNetworkVO;
import com.cloud.usage.UsageServer;
import com.cloud.usage.UsageTypes;
import com.cloud.usage.UsageVO;
import com.cloud.usage.dao.UsageDao;
import com.cloud.usage.dao.UsageNetworkDao;
import com.cloud.user.AccountVO;
import com.cloud.utils.Pair;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.db.SearchCriteria;
public class NetworkUsageParser {
public static final Logger s_logger = Logger.getLogger(NetworkUsageParser.class.getName());
private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage");
private static UsageDao m_usageDao = _locator.getDao(UsageDao.class);
private static UsageNetworkDao m_usageNetworkDao = _locator.getDao(UsageNetworkDao.class);
public static boolean parse(AccountVO account, Date startDate, Date endDate) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Parsing all Network usage events for account: " + account.getId());
}
if ((endDate == null) || endDate.after(new Date())) {
endDate = new Date();
}
// - query usage_network table for all entries for userId with
// event_date in the given range
SearchCriteria<UsageNetworkVO> sc = m_usageNetworkDao.createSearchCriteria();
sc.addAnd("accountId", SearchCriteria.Op.EQ, account.getId());
sc.addAnd("eventTimeMillis", SearchCriteria.Op.BETWEEN, startDate.getTime(), endDate.getTime());
List<UsageNetworkVO> usageNetworkVOs = m_usageNetworkDao.search(sc, null);
Map<String, NetworkInfo> networkUsageByZone = new HashMap<String, NetworkInfo>();
// Calculate the total bytes since last parsing
for (UsageNetworkVO usageNetwork : usageNetworkVOs) {
long zoneId = usageNetwork.getZoneId();
String key = ""+zoneId;
if(usageNetwork.getHostId() != 0){
key += "-Host"+usageNetwork.getHostId();
}
NetworkInfo networkInfo = networkUsageByZone.get(key);
long bytesSent = usageNetwork.getBytesSent();
long bytesReceived = usageNetwork.getBytesReceived();
if (networkInfo != null) {
bytesSent += networkInfo.getBytesSent();
bytesReceived += networkInfo.getBytesRcvd();
}
networkUsageByZone.put(key, new NetworkInfo(zoneId, usageNetwork.getHostId(), usageNetwork.getHostType(), usageNetwork.getNetworkId(), bytesSent, bytesReceived));
}
for (String key : networkUsageByZone.keySet()) {
NetworkInfo networkInfo = networkUsageByZone.get(key);
long totalBytesSent = networkInfo.getBytesSent();
long totalBytesReceived = networkInfo.getBytesRcvd();
if ((totalBytesSent > 0L) || (totalBytesReceived > 0L)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating usage record, total bytes sent:" + totalBytesSent + ", total bytes received: " + totalBytesReceived + " for account: "
+ account.getId() + " in availability zone " + networkInfo.getZoneId() + ", start: " + startDate + ", end: " + endDate);
}
Long hostId = null;
// Create the usage record for bytes sent
String usageDesc = "network bytes sent";
if(networkInfo.getHostId() != 0){
hostId = networkInfo.getHostId();
usageDesc += " for Host: "+networkInfo.getHostId();
}
UsageVO usageRecord = new UsageVO(networkInfo.getZoneId(), account.getId(), account.getDomainId(), usageDesc, totalBytesSent + " bytes sent",
UsageTypes.NETWORK_BYTES_SENT, new Double(totalBytesSent), hostId, networkInfo.getHostType(), networkInfo.getNetworkId(), startDate, endDate);
m_usageDao.persist(usageRecord);
// Create the usage record for bytes received
usageDesc = "network bytes received";
if(networkInfo.getHostId() != 0){
usageDesc += " for Host: "+networkInfo.getHostId();
}
usageRecord = new UsageVO(networkInfo.getZoneId(), account.getId(), account.getDomainId(), usageDesc, totalBytesReceived + " bytes received",
UsageTypes.NETWORK_BYTES_RECEIVED, new Double(totalBytesReceived), hostId, networkInfo.getHostType(), networkInfo.getNetworkId(), startDate, endDate);
m_usageDao.persist(usageRecord);
} else {
// Don't charge anything if there were zero bytes processed
if (s_logger.isDebugEnabled()) {
s_logger.debug("No usage record (0 bytes used) generated for account: " + account.getId());
}
}
}
return true;
}
private static class NetworkInfo {
private long zoneId;
private long hostId;
private String hostType;
private Long networkId;
private long bytesSent;
private long bytesRcvd;
public NetworkInfo(long zoneId, long hostId, String hostType, Long networkId, long bytesSent, long bytesRcvd) {
this.zoneId = zoneId;
this.hostId = hostId;
this.hostType = hostType;
this.networkId = networkId;
this.bytesSent = bytesSent;
this.bytesRcvd = bytesRcvd;
}
public long getZoneId() {
return zoneId;
}
public long getHostId() {
return hostId;
}
public Long getNetworkId() {
return networkId;
}
public long getBytesSent() {
return bytesSent;
}
public long getBytesRcvd() {
return bytesRcvd;
}
public String getHostType(){
return hostType;
}
}
}

View File

@ -0,0 +1,148 @@
/**
* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
*/
package com.cloud.usage.parser;
import java.text.DecimalFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import com.cloud.usage.UsagePortForwardingRuleVO;
import com.cloud.usage.UsageServer;
import com.cloud.usage.UsageTypes;
import com.cloud.usage.UsageVO;
import com.cloud.usage.dao.UsageDao;
import com.cloud.usage.dao.UsagePortForwardingRuleDao;
import com.cloud.user.AccountVO;
import com.cloud.utils.Pair;
import com.cloud.utils.component.ComponentLocator;
public class PortForwardingUsageParser {
public static final Logger s_logger = Logger.getLogger(PortForwardingUsageParser.class.getName());
private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage");
private static UsageDao m_usageDao = _locator.getDao(UsageDao.class);
private static UsagePortForwardingRuleDao m_usagePFRuleDao = _locator.getDao(UsagePortForwardingRuleDao.class);
public static boolean parse(AccountVO account, Date startDate, Date endDate) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Parsing all PortForwardingRule usage events for account: " + account.getId());
}
if ((endDate == null) || endDate.after(new Date())) {
endDate = new Date();
}
// - query usage_volume table with the following criteria:
// - look for an entry for accountId with start date in the given range
// - look for an entry for accountId with end date in the given range
// - look for an entry for accountId with end date null (currently running vm or owned IP)
// - look for an entry for accountId with start date before given range *and* end date after given range
List<UsagePortForwardingRuleVO> usagePFs = m_usagePFRuleDao.getUsageRecords(account.getId(), account.getDomainId(), startDate, endDate, false, 0);
if(usagePFs.isEmpty()){
s_logger.debug("No port forwarding usage events for this period");
return true;
}
// This map has both the running time *and* the usage amount.
Map<String, Pair<Long, Long>> usageMap = new HashMap<String, Pair<Long, Long>>();
Map<String, PFInfo> pfMap = new HashMap<String, PFInfo>();
// loop through all the port forwarding rule, create a usage record for each
for (UsagePortForwardingRuleVO usagePF : usagePFs) {
long pfId = usagePF.getId();
String key = ""+pfId;
pfMap.put(key, new PFInfo(pfId, usagePF.getZoneId()));
Date pfCreateDate = usagePF.getCreated();
Date pfDeleteDate = usagePF.getDeleted();
if ((pfDeleteDate == null) || pfDeleteDate.after(endDate)) {
pfDeleteDate = endDate;
}
// clip the start date to the beginning of our aggregation range if the vm has been running for a while
if (pfCreateDate.before(startDate)) {
pfCreateDate = startDate;
}
long currentDuration = (pfDeleteDate.getTime() - pfCreateDate.getTime()) + 1; // make sure this is an inclusive check for milliseconds (i.e. use n - m + 1 to find total number of millis to charge)
updatePFUsageData(usageMap, key, usagePF.getId(), currentDuration);
}
for (String pfIdKey : usageMap.keySet()) {
Pair<Long, Long> sgtimeInfo = usageMap.get(pfIdKey);
long useTime = sgtimeInfo.second().longValue();
// Only create a usage record if we have a runningTime of bigger than zero.
if (useTime > 0L) {
PFInfo info = pfMap.get(pfIdKey);
createUsageRecord(UsageTypes.PORT_FORWARDING_RULE, useTime, startDate, endDate, account, info.getId(), info.getZoneId() );
}
}
return true;
}
private static void updatePFUsageData(Map<String, Pair<Long, Long>> usageDataMap, String key, long pfId, long duration) {
Pair<Long, Long> pfUsageInfo = usageDataMap.get(key);
if (pfUsageInfo == null) {
pfUsageInfo = new Pair<Long, Long>(new Long(pfId), new Long(duration));
} else {
Long runningTime = pfUsageInfo.second();
runningTime = new Long(runningTime.longValue() + duration);
pfUsageInfo = new Pair<Long, Long>(pfUsageInfo.first(), runningTime);
}
usageDataMap.put(key, pfUsageInfo);
}
private static void createUsageRecord(int type, long runningTime, Date startDate, Date endDate, AccountVO account, long pfId, long zoneId) {
// Our smallest increment is hourly for now
if (s_logger.isDebugEnabled()) {
s_logger.debug("Total running time " + runningTime + "ms");
}
float usage = runningTime / 1000f / 60f / 60f;
DecimalFormat dFormat = new DecimalFormat("#.######");
String usageDisplay = dFormat.format(usage);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating usage record for port forwarding rule: " + pfId + ", usage: " + usageDisplay + ", startDate: " + startDate + ", endDate: " + endDate + ", for account: " + account.getId());
}
// Create the usage record
String usageDesc = "Port Forwarding Rule: "+pfId+" usage time";
//ToDo: get zone id
UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", type,
new Double(usage), null, null, null, null, pfId, null, startDate, endDate);
m_usageDao.persist(usageRecord);
}
private static class PFInfo {
private long id;
private long zoneId;
public PFInfo(long id, long zoneId) {
this.id = id;
this.zoneId = zoneId;
}
public long getZoneId() {
return zoneId;
}
public long getId() {
return id;
}
}
}

View File

@ -0,0 +1,194 @@
/**
* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
*/
package com.cloud.usage.parser;
import java.text.DecimalFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import com.cloud.usage.StorageTypes;
import com.cloud.usage.UsageServer;
import com.cloud.usage.UsageStorageVO;
import com.cloud.usage.UsageTypes;
import com.cloud.usage.UsageVO;
import com.cloud.usage.dao.UsageDao;
import com.cloud.usage.dao.UsageStorageDao;
import com.cloud.user.AccountVO;
import com.cloud.utils.Pair;
import com.cloud.utils.component.ComponentLocator;
public class StorageUsageParser {
public static final Logger s_logger = Logger.getLogger(StorageUsageParser.class.getName());
private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage");
private static UsageDao m_usageDao = _locator.getDao(UsageDao.class);
private static UsageStorageDao m_usageStorageDao = _locator.getDao(UsageStorageDao.class);
public static boolean parse(AccountVO account, Date startDate, Date endDate) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Parsing all Storage usage events for account: " + account.getId());
}
if ((endDate == null) || endDate.after(new Date())) {
endDate = new Date();
}
// - query usage_volume table with the following criteria:
// - look for an entry for accountId with start date in the given range
// - look for an entry for accountId with end date in the given range
// - look for an entry for accountId with end date null (currently running vm or owned IP)
// - look for an entry for accountId with start date before given range *and* end date after given range
List<UsageStorageVO> usageUsageStorages = m_usageStorageDao.getUsageRecords(account.getId(), account.getDomainId(), startDate, endDate, false, 0);
if(usageUsageStorages.isEmpty()){
s_logger.debug("No Storage usage events for this period");
return true;
}
// This map has both the running time *and* the usage amount.
Map<String, Pair<Long, Long>> usageMap = new HashMap<String, Pair<Long, Long>>();
Map<String, StorageInfo> storageMap = new HashMap<String, StorageInfo>();
// loop through all the usage volumes, create a usage record for each
for (UsageStorageVO usageStorage : usageUsageStorages) {
long storageId = usageStorage.getId();
int storage_type = usageStorage.getStorageType();
long size = usageStorage.getSize();
long zoneId = usageStorage.getZoneId();
Long sourceId = usageStorage.getSourceId();
String key = ""+storageId+"Z"+zoneId+"T"+storage_type;
// store the info in the storage map
storageMap.put(key, new StorageInfo(zoneId, storageId, storage_type, sourceId, size));
Date storageCreateDate = usageStorage.getCreated();
Date storageDeleteDate = usageStorage.getDeleted();
if ((storageDeleteDate == null) || storageDeleteDate.after(endDate)) {
storageDeleteDate = endDate;
}
// clip the start date to the beginning of our aggregation range if the vm has been running for a while
if (storageCreateDate.before(startDate)) {
storageCreateDate = startDate;
}
long currentDuration = (storageDeleteDate.getTime() - storageCreateDate.getTime()) + 1; // make sure this is an inclusive check for milliseconds (i.e. use n - m + 1 to find total number of millis to charge)
updateStorageUsageData(usageMap, key, usageStorage.getId(), currentDuration);
}
for (String storageIdKey : usageMap.keySet()) {
Pair<Long, Long> storagetimeInfo = usageMap.get(storageIdKey);
long useTime = storagetimeInfo.second().longValue();
// Only create a usage record if we have a runningTime of bigger than zero.
if (useTime > 0L) {
StorageInfo info = storageMap.get(storageIdKey);
createUsageRecord(info.getZoneId(), info.getStorageType(), useTime, startDate, endDate, account, info.getStorageId(), info.getSourceId(), info.getSize());
}
}
return true;
}
private static void updateStorageUsageData(Map<String, Pair<Long, Long>> usageDataMap, String key, long storageId, long duration) {
Pair<Long, Long> volUsageInfo = usageDataMap.get(key);
if (volUsageInfo == null) {
volUsageInfo = new Pair<Long, Long>(new Long(storageId), new Long(duration));
} else {
Long runningTime = volUsageInfo.second();
runningTime = new Long(runningTime.longValue() + duration);
volUsageInfo = new Pair<Long, Long>(volUsageInfo.first(), runningTime);
}
usageDataMap.put(key, volUsageInfo);
}
private static void createUsageRecord(long zoneId, int type, long runningTime, Date startDate, Date endDate, AccountVO account, long storageId, Long sourceId, long size) {
// Our smallest increment is hourly for now
if (s_logger.isDebugEnabled()) {
s_logger.debug("Total running time " + runningTime + "ms");
}
float usage = runningTime / 1000f / 60f / 60f;
DecimalFormat dFormat = new DecimalFormat("#.######");
String usageDisplay = dFormat.format(usage);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating Storage usage record for type: "+ type + " with id: " + storageId + ", usage: " + usageDisplay + ", startDate: " + startDate + ", endDate: " + endDate + ", for account: " + account.getId());
}
String usageDesc = "";
Long tmplSourceId = null;
int usage_type = 0;
switch(type){
case StorageTypes.TEMPLATE:
usage_type = UsageTypes.TEMPLATE;
usageDesc += "Template ";
tmplSourceId = sourceId;
break;
case StorageTypes.ISO:
usage_type = UsageTypes.ISO;
usageDesc += "ISO ";
break;
case StorageTypes.SNAPSHOT:
usage_type = UsageTypes.SNAPSHOT;
usageDesc += "Snapshot ";
break;
}
// Create the usage record
usageDesc += "Id:"+storageId+" Size:"+size;
//ToDo: get zone id
UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", usage_type,
new Double(usage), null, null, null, tmplSourceId, storageId, size, startDate, endDate);
m_usageDao.persist(usageRecord);
}
private static class StorageInfo {
private long zoneId;
private long storageId;
private int storageType;
private Long sourceId;
private long size;
public StorageInfo(long zoneId, long storageId, int storageType, Long sourceId, long size) {
this.zoneId = zoneId;
this.storageId = storageId;
this.storageType = storageType;
this.sourceId = sourceId;
this.size = size;
}
public long getZoneId() {
return zoneId;
}
public long getStorageId() {
return storageId;
}
public int getStorageType() {
return storageType;
}
public long getSourceId() {
return sourceId;
}
public long getSize() {
return size;
}
}
}

View File

@ -0,0 +1,24 @@
/**
* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
*/
package com.cloud.usage.parser;
import java.util.Date;
import org.apache.log4j.Logger;
public abstract class UsageParser implements Runnable {
public static final Logger s_logger = Logger.getLogger(UsageParser.class.getName());
public void run() {
try {
parse(null);
} catch (Exception e) {
s_logger.warn("Error while parsing usage events", e);
}
}
public abstract void parse(Date endDate);
}

View File

@ -0,0 +1,189 @@
/**
* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
*/
package com.cloud.usage.parser;
import java.text.DecimalFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import com.cloud.usage.UsageServer;
import com.cloud.usage.UsageTypes;
import com.cloud.usage.UsageVMInstanceVO;
import com.cloud.usage.UsageVO;
import com.cloud.usage.dao.UsageDao;
import com.cloud.usage.dao.UsageVMInstanceDao;
import com.cloud.user.AccountVO;
import com.cloud.utils.Pair;
import com.cloud.utils.component.ComponentLocator;
public class VMInstanceUsageParser {
public static final Logger s_logger = Logger.getLogger(VMInstanceUsageParser.class.getName());
private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage");
private static UsageDao m_usageDao = _locator.getDao(UsageDao.class);
private static UsageVMInstanceDao m_usageInstanceDao = _locator.getDao(UsageVMInstanceDao.class);
public static boolean parse(AccountVO account, Date startDate, Date endDate) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Parsing all VMInstance usage events for account: " + account.getId());
}
if ((endDate == null) || endDate.after(new Date())) {
endDate = new Date();
}
// - query usage_vm_instance table with the following criteria:
// - look for an entry for accountId with start date in the given range
// - look for an entry for accountId with end date in the given range
// - look for an entry for accountId with end date null (currently running vm or owned IP)
// - look for an entry for accountId with start date before given range *and* end date after given range
List<UsageVMInstanceVO> usageInstances = m_usageInstanceDao.getUsageRecords(account.getId(), startDate, endDate);
//ToDo: Add domainID for getting usage records
// This map has both the running time *and* the usage amount.
Map<String, Pair<String, Long>> usageVMUptimeMap = new HashMap<String, Pair<String, Long>>();
Map<String, Pair<String, Long>> allocatedVMMap = new HashMap<String, Pair<String, Long>>();
Map<String, VMInfo> vmServiceOfferingMap = new HashMap<String, VMInfo>();
// loop through all the usage instances, create a usage record for each
for (UsageVMInstanceVO usageInstance : usageInstances) {
long vmId = usageInstance.getVmInstanceId();
long soId = usageInstance.getSerivceOfferingId();
long zoneId = usageInstance.getZoneId();
long tId = usageInstance.getTemplateId();
int usageType = usageInstance.getUsageType();
String key = vmId + "-" + soId + "-" + usageType;
// store the info in the service offering map
vmServiceOfferingMap.put(key, new VMInfo(vmId, zoneId, soId, tId, usageInstance.getHypervisorType()));
Date vmStartDate = usageInstance.getStartDate();
Date vmEndDate = usageInstance.getEndDate();
if ((vmEndDate == null) || vmEndDate.after(endDate)) {
vmEndDate = endDate;
}
// clip the start date to the beginning of our aggregation range if the vm has been running for a while
if (vmStartDate.before(startDate)) {
vmStartDate = startDate;
}
long currentDuration = (vmEndDate.getTime() - vmStartDate.getTime()) + 1; // make sure this is an inclusive check for milliseconds (i.e. use n - m + 1 to find total number of millis to charge)
switch (usageType) {
case UsageTypes.ALLOCATED_VM:
updateVmUsageData(allocatedVMMap, key, usageInstance.getVmName(), currentDuration);
break;
case UsageTypes.RUNNING_VM:
updateVmUsageData(usageVMUptimeMap, key, usageInstance.getVmName(), currentDuration);
break;
}
}
for (String vmIdKey : usageVMUptimeMap.keySet()) {
Pair<String, Long> vmUptimeInfo = usageVMUptimeMap.get(vmIdKey);
long runningTime = vmUptimeInfo.second().longValue();
// Only create a usage record if we have a runningTime of bigger than zero.
if (runningTime > 0L) {
VMInfo info = vmServiceOfferingMap.get(vmIdKey);
createUsageRecord(UsageTypes.RUNNING_VM, runningTime, startDate, endDate, account, info.getVirtualMachineId(), vmUptimeInfo.first(), info.getZoneId(),
info.getServiceOfferingId(), info.getTemplateId(), info.getHypervisorType());
}
}
for (String vmIdKey : allocatedVMMap.keySet()) {
Pair<String, Long> vmAllocInfo = allocatedVMMap.get(vmIdKey);
long allocatedTime = vmAllocInfo.second().longValue();
// Only create a usage record if we have a runningTime of bigger than zero.
if (allocatedTime > 0L) {
VMInfo info = vmServiceOfferingMap.get(vmIdKey);
createUsageRecord(UsageTypes.ALLOCATED_VM, allocatedTime, startDate, endDate, account, info.getVirtualMachineId(), vmAllocInfo.first(), info.getZoneId(),
info.getServiceOfferingId(), info.getTemplateId(), info.getHypervisorType());
}
}
return true;
}
private static void updateVmUsageData(Map<String, Pair<String, Long>> usageDataMap, String key, String vmName, long duration) {
Pair<String, Long> vmUsageInfo = usageDataMap.get(key);
if (vmUsageInfo == null) {
vmUsageInfo = new Pair<String, Long>(vmName, new Long(duration));
} else {
Long runningTime = vmUsageInfo.second();
runningTime = new Long(runningTime.longValue() + duration);
vmUsageInfo = new Pair<String, Long>(vmUsageInfo.first(), runningTime);
}
usageDataMap.put(key, vmUsageInfo);
}
private static void createUsageRecord(int type, long runningTime, Date startDate, Date endDate, AccountVO account, long vmId, String vmName, long zoneId, long serviceOfferingId, long templateId, String hypervisorType) {
// Our smallest increment is hourly for now
if (s_logger.isDebugEnabled()) {
s_logger.debug("Total running time " + runningTime + "ms");
}
float usage = runningTime / 1000f / 60f / 60f;
DecimalFormat dFormat = new DecimalFormat("#.######");
String usageDisplay = dFormat.format(usage);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating VM usage record for vm: " + vmName + ", type: " + type + ", usage: " + usageDisplay + ", startDate: " + startDate + ", endDate: " + endDate + ", for account: " + account.getId());
}
// Create the usage record
String usageDesc = vmName;
if (type == UsageTypes.ALLOCATED_VM) {
usageDesc += " allocated";
} else {
usageDesc += " running time";
}
usageDesc += " (ServiceOffering: " + serviceOfferingId + ") (Template: " + templateId + ")";
UsageVO usageRecord = new UsageVO(Long.valueOf(zoneId), account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", type,
new Double(usage), Long.valueOf(vmId), vmName, Long.valueOf(serviceOfferingId), Long.valueOf(templateId), Long.valueOf(vmId), startDate, endDate, hypervisorType);
m_usageDao.persist(usageRecord);
}
private static class VMInfo {
private long virtualMachineId;
private long zoneId;
private long serviceOfferingId;
private long templateId;
private String hypervisorType;
public VMInfo(long vmId, long zId, long soId, long tId, String hypervisorType) {
virtualMachineId = vmId;
zoneId = zId;
serviceOfferingId = soId;
templateId = tId;
this.hypervisorType = hypervisorType;
}
public long getZoneId() {
return zoneId;
}
public long getVirtualMachineId() {
return virtualMachineId;
}
public long getServiceOfferingId() {
return serviceOfferingId;
}
public long getTemplateId() {
return templateId;
}
private String getHypervisorType(){
return hypervisorType;
}
}
}

View File

@ -0,0 +1,172 @@
/**
* * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved
*
*/
package com.cloud.usage.parser;
import java.text.DecimalFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import com.cloud.usage.UsageServer;
import com.cloud.usage.UsageTypes;
import com.cloud.usage.UsageVO;
import com.cloud.usage.UsageVolumeVO;
import com.cloud.usage.dao.UsageDao;
import com.cloud.usage.dao.UsageVolumeDao;
import com.cloud.user.AccountVO;
import com.cloud.utils.Pair;
import com.cloud.utils.component.ComponentLocator;
public class VolumeUsageParser {
public static final Logger s_logger = Logger.getLogger(VolumeUsageParser.class.getName());
private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage");
private static UsageDao m_usageDao = _locator.getDao(UsageDao.class);
private static UsageVolumeDao m_usageVolumeDao = _locator.getDao(UsageVolumeDao.class);
public static boolean parse(AccountVO account, Date startDate, Date endDate) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Parsing all Volume usage events for account: " + account.getId());
}
if ((endDate == null) || endDate.after(new Date())) {
endDate = new Date();
}
// - query usage_volume table with the following criteria:
// - look for an entry for accountId with start date in the given range
// - look for an entry for accountId with end date in the given range
// - look for an entry for accountId with end date null (currently running vm or owned IP)
// - look for an entry for accountId with start date before given range *and* end date after given range
List<UsageVolumeVO> usageUsageVols = m_usageVolumeDao.getUsageRecords(account.getId(), account.getDomainId(), startDate, endDate, false, 0);
if(usageUsageVols.isEmpty()){
s_logger.debug("No volume usage events for this period");
return true;
}
// This map has both the running time *and* the usage amount.
Map<String, Pair<Long, Long>> usageMap = new HashMap<String, Pair<Long, Long>>();
Map<String, VolInfo> diskOfferingMap = new HashMap<String, VolInfo>();
// loop through all the usage volumes, create a usage record for each
for (UsageVolumeVO usageVol : usageUsageVols) {
long volId = usageVol.getId();
Long doId = usageVol.getDiskOfferingId();
long zoneId = usageVol.getZoneId();
Long templateId = usageVol.getTemplateId();
long size = usageVol.getSize();
String key = ""+volId;
diskOfferingMap.put(key, new VolInfo(volId, zoneId, doId, templateId, size));
Date volCreateDate = usageVol.getCreated();
Date volDeleteDate = usageVol.getDeleted();
if ((volDeleteDate == null) || volDeleteDate.after(endDate)) {
volDeleteDate = endDate;
}
// clip the start date to the beginning of our aggregation range if the vm has been running for a while
if (volCreateDate.before(startDate)) {
volCreateDate = startDate;
}
long currentDuration = (volDeleteDate.getTime() - volCreateDate.getTime()) + 1; // make sure this is an inclusive check for milliseconds (i.e. use n - m + 1 to find total number of millis to charge)
updateVolUsageData(usageMap, key, usageVol.getId(), currentDuration);
}
for (String volIdKey : usageMap.keySet()) {
Pair<Long, Long> voltimeInfo = usageMap.get(volIdKey);
long useTime = voltimeInfo.second().longValue();
// Only create a usage record if we have a runningTime of bigger than zero.
if (useTime > 0L) {
VolInfo info = diskOfferingMap.get(volIdKey);
createUsageRecord(UsageTypes.VOLUME, useTime, startDate, endDate, account, info.getVolumeId(), info.getZoneId(), info.getDiskOfferingId(), info.getTemplateId(), info.getSize());
}
}
return true;
}
private static void updateVolUsageData(Map<String, Pair<Long, Long>> usageDataMap, String key, long volId, long duration) {
Pair<Long, Long> volUsageInfo = usageDataMap.get(key);
if (volUsageInfo == null) {
volUsageInfo = new Pair<Long, Long>(new Long(volId), new Long(duration));
} else {
Long runningTime = volUsageInfo.second();
runningTime = new Long(runningTime.longValue() + duration);
volUsageInfo = new Pair<Long, Long>(volUsageInfo.first(), runningTime);
}
usageDataMap.put(key, volUsageInfo);
}
private static void createUsageRecord(int type, long runningTime, Date startDate, Date endDate, AccountVO account, long volId, long zoneId, Long doId, Long templateId, long size) {
// Our smallest increment is hourly for now
if (s_logger.isDebugEnabled()) {
s_logger.debug("Total running time " + runningTime + "ms");
}
float usage = runningTime / 1000f / 60f / 60f;
DecimalFormat dFormat = new DecimalFormat("#.######");
String usageDisplay = dFormat.format(usage);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating Volume usage record for vol: " + volId + ", usage: " + usageDisplay + ", startDate: " + startDate + ", endDate: " + endDate + ", for account: " + account.getId());
}
// Create the usage record
String usageDesc = "Volume Id: "+volId+" usage time";
if(templateId != null){
usageDesc += " (Template: " +templateId+ ")";
} else if(doId != null){
usageDesc += " (DiskOffering: " +doId+ ")";
}
UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", type,
new Double(usage), null, null, doId, templateId, volId, size, startDate, endDate);
m_usageDao.persist(usageRecord);
}
private static class VolInfo {
private long volId;
private long zoneId;
private Long diskOfferingId;
private Long templateId;
private long size;
public VolInfo(long volId, long zoneId, Long diskOfferingId, Long templateId, long size) {
this.volId = volId;
this.zoneId = zoneId;
this.diskOfferingId = diskOfferingId;
this.templateId = templateId;
this.size = size;
}
public long getZoneId() {
return zoneId;
}
public long getVolumeId() {
return volId;
}
public Long getDiskOfferingId() {
return diskOfferingId;
}
public Long getTemplateId() {
return templateId;
}
public long getSize() {
return size;
}
}
}