fixing line endings in server

This commit is contained in:
David Nalley 2012-04-07 20:13:10 -04:00
parent 316fa631df
commit 59436be4ee
249 changed files with 23153 additions and 23153 deletions

View File

@ -1,28 +1,28 @@
<?xml version="1.0"?>
<components.xml>
<migration class="com.cloud.migration.Db21to22MigrationUtil">
<dao name="host pod dao" class="com.cloud.dc.dao.HostPodDaoImpl" />
<dao name="Datacenter dao" class="com.cloud.dc.dao.DataCenterDaoImpl" />
<dao name="Configuration dao" class="com.cloud.configuration.dao.ConfigurationDaoImpl" />
<dao name="Cluster dao" class="com.cloud.dc.dao.ClusterDaoImpl" />
<dao name="Host dao" class="com.cloud.host.dao.HostDaoImpl" />
<dao name="StoragePool dao" class="com.cloud.storage.dao.StoragePoolDaoImpl" />
<dao name="Domain dao" class="com.cloud.domain.dao.DomainDaoImpl" />
<dao name="DiskOffering20 dao" class="com.cloud.migration.DiskOffering20DaoImpl" />
<dao name="DiskOffering21 dao" class="com.cloud.migration.DiskOffering21DaoImpl" />
<dao name="ServiceOffering20 dao" class="com.cloud.migration.ServiceOffering20DaoImpl" />
<dao name="ServiceOffering21 dao" class="com.cloud.migration.ServiceOffering21DaoImpl" />
<dao name="Console Proxy dao" class="com.cloud.vm.dao.ConsoleProxyDaoImpl" />
<dao name="Secondary Storage VM dao" class="com.cloud.vm.dao.SecondaryStorageVmDaoImpl" />
<dao name="VM Instance dao" class="com.cloud.vm.dao.VMInstanceDaoImpl" />
<dao name="Volume dao" class="com.cloud.storage.dao.VolumeDaoImpl" />
<dao name="User VM dao" class="com.cloud.vm.dao.UserVmDaoImpl" />
<dao name="domain router dao" class="com.cloud.vm.dao.DomainRouterDaoImpl" />
<dao name="Storage pool dao" class="com.cloud.storage.dao.StoragePoolDaoImpl" />
<dao name="Account dao" class="com.cloud.user.dao.AccountDaoImpl" />
<dao name="Resouce count dao" class="com.cloud.configuration.dao.ResourceCountDaoImpl" />
<dao name="Instance group dao" class="com.cloud.vm.dao.InstanceGroupDaoImpl" />
<dao name="Instance group vm map dao" class="com.cloud.vm.dao.InstanceGroupVMMapDaoImpl" />
</migration>
</components.xml>
<?xml version="1.0"?>
<components.xml>
<migration class="com.cloud.migration.Db21to22MigrationUtil">
<dao name="host pod dao" class="com.cloud.dc.dao.HostPodDaoImpl" />
<dao name="Datacenter dao" class="com.cloud.dc.dao.DataCenterDaoImpl" />
<dao name="Configuration dao" class="com.cloud.configuration.dao.ConfigurationDaoImpl" />
<dao name="Cluster dao" class="com.cloud.dc.dao.ClusterDaoImpl" />
<dao name="Host dao" class="com.cloud.host.dao.HostDaoImpl" />
<dao name="StoragePool dao" class="com.cloud.storage.dao.StoragePoolDaoImpl" />
<dao name="Domain dao" class="com.cloud.domain.dao.DomainDaoImpl" />
<dao name="DiskOffering20 dao" class="com.cloud.migration.DiskOffering20DaoImpl" />
<dao name="DiskOffering21 dao" class="com.cloud.migration.DiskOffering21DaoImpl" />
<dao name="ServiceOffering20 dao" class="com.cloud.migration.ServiceOffering20DaoImpl" />
<dao name="ServiceOffering21 dao" class="com.cloud.migration.ServiceOffering21DaoImpl" />
<dao name="Console Proxy dao" class="com.cloud.vm.dao.ConsoleProxyDaoImpl" />
<dao name="Secondary Storage VM dao" class="com.cloud.vm.dao.SecondaryStorageVmDaoImpl" />
<dao name="VM Instance dao" class="com.cloud.vm.dao.VMInstanceDaoImpl" />
<dao name="Volume dao" class="com.cloud.storage.dao.VolumeDaoImpl" />
<dao name="User VM dao" class="com.cloud.vm.dao.UserVmDaoImpl" />
<dao name="domain router dao" class="com.cloud.vm.dao.DomainRouterDaoImpl" />
<dao name="Storage pool dao" class="com.cloud.storage.dao.StoragePoolDaoImpl" />
<dao name="Account dao" class="com.cloud.user.dao.AccountDaoImpl" />
<dao name="Resouce count dao" class="com.cloud.configuration.dao.ResourceCountDaoImpl" />
<dao name="Instance group dao" class="com.cloud.vm.dao.InstanceGroupDaoImpl" />
<dao name="Instance group vm map dao" class="com.cloud.vm.dao.InstanceGroupVMMapDaoImpl" />
</migration>
</components.xml>

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.agent.manager;
package com.cloud.agent.manager;
import java.nio.channels.ClosedChannelException;
import org.apache.log4j.Logger;
@ -22,57 +22,57 @@ import com.cloud.agent.transport.Request;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.host.Status;
import com.cloud.utils.nio.Link;
/**
* ConnectedAgentAttache implements an direct connection to this management server.
*/
public class ConnectedAgentAttache extends AgentAttache {
private static final Logger s_logger = Logger.getLogger(ConnectedAgentAttache.class);
protected Link _link;
public ConnectedAgentAttache(AgentManagerImpl agentMgr, final long id, final Link link, boolean maintenance) {
super(agentMgr, id, maintenance);
_link = link;
}
@Override
public synchronized void send(Request req) throws AgentUnavailableException {
try {
_link.send(req.toBytes());
} catch (ClosedChannelException e) {
throw new AgentUnavailableException("Channel is closed", _id);
}
}
@Override
public synchronized boolean isClosed() {
return _link == null;
}
@Override
public void disconnect(final Status state) {
synchronized (this) {
s_logger.debug("Processing Disconnect.");
if (_link != null) {
_link.close();
_link.terminated();
}
_link = null;
}
cancelAllCommands(state, true);
_requests.clear();
}
@Override
public boolean equals(Object obj) {
try {
ConnectedAgentAttache that = (ConnectedAgentAttache) obj;
return super.equals(obj) && this._link == that._link && this._link != null;
} catch (ClassCastException e) {
/**
* ConnectedAgentAttache implements an direct connection to this management server.
*/
public class ConnectedAgentAttache extends AgentAttache {
private static final Logger s_logger = Logger.getLogger(ConnectedAgentAttache.class);
protected Link _link;
public ConnectedAgentAttache(AgentManagerImpl agentMgr, final long id, final Link link, boolean maintenance) {
super(agentMgr, id, maintenance);
_link = link;
}
@Override
public synchronized void send(Request req) throws AgentUnavailableException {
try {
_link.send(req.toBytes());
} catch (ClosedChannelException e) {
throw new AgentUnavailableException("Channel is closed", _id);
}
}
@Override
public synchronized boolean isClosed() {
return _link == null;
}
@Override
public void disconnect(final Status state) {
synchronized (this) {
s_logger.debug("Processing Disconnect.");
if (_link != null) {
_link.close();
_link.terminated();
}
_link = null;
}
cancelAllCommands(state, true);
_requests.clear();
}
@Override
public boolean equals(Object obj) {
try {
ConnectedAgentAttache that = (ConnectedAgentAttache) obj;
return super.equals(obj) && this._link == that._link && this._link != null;
} catch (ClassCastException e) {
assert false : "Who's sending an " + obj.getClass().getSimpleName() + " to " + this.getClass().getSimpleName() + ".equals()? ";
return false;
}
return false;
}
}
@Override
@ -94,4 +94,4 @@ public class ConnectedAgentAttache extends AgentAttache {
public void updatePassword(Command newPassword) {
throw new IllegalStateException("Should not have come here ");
}
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.agent.manager.allocator.impl;
package com.cloud.agent.manager.allocator.impl;
import java.util.ArrayList;
import java.util.List;
import javax.ejb.Local;
@ -23,22 +23,22 @@ import com.cloud.host.Host;
import com.cloud.host.Host.Type;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
@Local(value={HostAllocator.class})
public class FirstFitRoutingAllocator extends FirstFitAllocator {
@Override
@Local(value={HostAllocator.class})
public class FirstFitRoutingAllocator extends FirstFitAllocator {
@Override
public List<Host> allocateTo(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, Type type,
ExcludeList avoid, int returnUpTo) {
try {
NDC.push("FirstFitRoutingAllocator");
if (type != Host.Type.Routing) {
// FirstFitRoutingAllocator is to find space on routing capable hosts only
return new ArrayList<Host>();
}
//all hosts should be of type routing anyway.
return super.allocateTo(vmProfile, plan, type, avoid, returnUpTo);
} finally {
NDC.pop();
}
}
}
try {
NDC.push("FirstFitRoutingAllocator");
if (type != Host.Type.Routing) {
// FirstFitRoutingAllocator is to find space on routing capable hosts only
return new ArrayList<Host>();
}
//all hosts should be of type routing anyway.
return super.allocateTo(vmProfile, plan, type, avoid, returnUpTo);
} finally {
NDC.pop();
}
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.alert;
package com.cloud.alert;
import java.io.UnsupportedEncodingException;
import java.text.DecimalFormat;
import java.util.ArrayList;
@ -71,112 +71,112 @@ import com.cloud.utils.db.SearchCriteria;
import com.sun.mail.smtp.SMTPMessage;
import com.sun.mail.smtp.SMTPSSLTransport;
import com.sun.mail.smtp.SMTPTransport;
@Local(value={AlertManager.class})
public class AlertManagerImpl implements AlertManager {
private static final Logger s_logger = Logger.getLogger(AlertManagerImpl.class.getName());
private static final long INITIAL_CAPACITY_CHECK_DELAY = 30L * 1000L; // thirty seconds expressed in milliseconds
private static final DecimalFormat _dfPct = new DecimalFormat("###.##");
private static final DecimalFormat _dfWhole = new DecimalFormat("########");
private String _name = null;
private EmailAlert _emailAlert;
@Inject private AlertDao _alertDao;
@Local(value={AlertManager.class})
public class AlertManagerImpl implements AlertManager {
private static final Logger s_logger = Logger.getLogger(AlertManagerImpl.class.getName());
private static final long INITIAL_CAPACITY_CHECK_DELAY = 30L * 1000L; // thirty seconds expressed in milliseconds
private static final DecimalFormat _dfPct = new DecimalFormat("###.##");
private static final DecimalFormat _dfWhole = new DecimalFormat("########");
private String _name = null;
private EmailAlert _emailAlert;
@Inject private AlertDao _alertDao;
@Inject private HostDao _hostDao;
@Inject protected StorageManager _storageMgr;
@Inject protected CapacityManager _capacityMgr;
@Inject private CapacityDao _capacityDao;
@Inject private DataCenterDao _dcDao;
@Inject protected CapacityManager _capacityMgr;
@Inject private CapacityDao _capacityDao;
@Inject private DataCenterDao _dcDao;
@Inject private HostPodDao _podDao;
@Inject private ClusterDao _clusterDao;
@Inject private VolumeDao _volumeDao;
@Inject private IPAddressDao _publicIPAddressDao;
@Inject private DataCenterIpAddressDao _privateIPAddressDao;
@Inject private ClusterDao _clusterDao;
@Inject private VolumeDao _volumeDao;
@Inject private IPAddressDao _publicIPAddressDao;
@Inject private DataCenterIpAddressDao _privateIPAddressDao;
@Inject private StoragePoolDao _storagePoolDao;
@Inject private ConfigurationDao _configDao;
@Inject private ResourceManager _resourceMgr;
private Timer _timer = null;
private float _cpuOverProvisioningFactor = 1;
private long _capacityCheckPeriod = 60L * 60L * 1000L; // one hour by default
private double _memoryCapacityThreshold = 0.75;
private double _cpuCapacityThreshold = 0.75;
private double _storageCapacityThreshold = 0.75;
private double _storageAllocCapacityThreshold = 0.75;
private double _publicIPCapacityThreshold = 0.75;
@Inject private ResourceManager _resourceMgr;
private Timer _timer = null;
private float _cpuOverProvisioningFactor = 1;
private long _capacityCheckPeriod = 60L * 60L * 1000L; // one hour by default
private double _memoryCapacityThreshold = 0.75;
private double _cpuCapacityThreshold = 0.75;
private double _storageCapacityThreshold = 0.75;
private double _storageAllocCapacityThreshold = 0.75;
private double _publicIPCapacityThreshold = 0.75;
private double _privateIPCapacityThreshold = 0.75;
private double _secondaryStorageCapacityThreshold = 0.75;
private double _vlanCapacityThreshold = 0.75;
private double _directNetworkPublicIpCapacityThreshold = 0.75;
private double _localStorageCapacityThreshold = 0.75;
Map<Short,Double> _capacityTypeThresholdMap = new HashMap<Short, Double>();
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
ComponentLocator locator = ComponentLocator.getCurrentLocator();
ConfigurationDao configDao = locator.getDao(ConfigurationDao.class);
if (configDao == null) {
s_logger.error("Unable to get the configuration dao.");
return false;
}
Map<String, String> configs = configDao.getConfiguration("management-server", params);
// set up the email system for alerts
String emailAddressList = configs.get("alert.email.addresses");
String[] emailAddresses = null;
if (emailAddressList != null) {
emailAddresses = emailAddressList.split(",");
}
String smtpHost = configs.get("alert.smtp.host");
int smtpPort = NumbersUtil.parseInt(configs.get("alert.smtp.port"), 25);
String useAuthStr = configs.get("alert.smtp.useAuth");
boolean useAuth = ((useAuthStr == null) ? false : Boolean.parseBoolean(useAuthStr));
String smtpUsername = configs.get("alert.smtp.username");
String smtpPassword = configs.get("alert.smtp.password");
String emailSender = configs.get("alert.email.sender");
String smtpDebugStr = configs.get("alert.smtp.debug");
boolean smtpDebug = false;
if (smtpDebugStr != null) {
smtpDebug = Boolean.parseBoolean(smtpDebugStr);
}
_emailAlert = new EmailAlert(emailAddresses, smtpHost, smtpPort, useAuth, smtpUsername, smtpPassword, emailSender, smtpDebug);
Map<Short,Double> _capacityTypeThresholdMap = new HashMap<Short, Double>();
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
ComponentLocator locator = ComponentLocator.getCurrentLocator();
ConfigurationDao configDao = locator.getDao(ConfigurationDao.class);
if (configDao == null) {
s_logger.error("Unable to get the configuration dao.");
return false;
}
Map<String, String> configs = configDao.getConfiguration("management-server", params);
// set up the email system for alerts
String emailAddressList = configs.get("alert.email.addresses");
String[] emailAddresses = null;
if (emailAddressList != null) {
emailAddresses = emailAddressList.split(",");
}
String smtpHost = configs.get("alert.smtp.host");
int smtpPort = NumbersUtil.parseInt(configs.get("alert.smtp.port"), 25);
String useAuthStr = configs.get("alert.smtp.useAuth");
boolean useAuth = ((useAuthStr == null) ? false : Boolean.parseBoolean(useAuthStr));
String smtpUsername = configs.get("alert.smtp.username");
String smtpPassword = configs.get("alert.smtp.password");
String emailSender = configs.get("alert.email.sender");
String smtpDebugStr = configs.get("alert.smtp.debug");
boolean smtpDebug = false;
if (smtpDebugStr != null) {
smtpDebug = Boolean.parseBoolean(smtpDebugStr);
}
_emailAlert = new EmailAlert(emailAddresses, smtpHost, smtpPort, useAuth, smtpUsername, smtpPassword, emailSender, smtpDebug);
String storageCapacityThreshold = _configDao.getValue(Config.StorageCapacityThreshold.key());
String cpuCapacityThreshold = _configDao.getValue(Config.CPUCapacityThreshold.key());
String memoryCapacityThreshold = _configDao.getValue(Config.MemoryCapacityThreshold.key());
String storageAllocCapacityThreshold = _configDao.getValue(Config.StorageAllocatedCapacityThreshold.key());
String publicIPCapacityThreshold = _configDao.getValue(Config.PublicIpCapacityThreshold.key());
String storageCapacityThreshold = _configDao.getValue(Config.StorageCapacityThreshold.key());
String cpuCapacityThreshold = _configDao.getValue(Config.CPUCapacityThreshold.key());
String memoryCapacityThreshold = _configDao.getValue(Config.MemoryCapacityThreshold.key());
String storageAllocCapacityThreshold = _configDao.getValue(Config.StorageAllocatedCapacityThreshold.key());
String publicIPCapacityThreshold = _configDao.getValue(Config.PublicIpCapacityThreshold.key());
String privateIPCapacityThreshold = _configDao.getValue(Config.PrivateIpCapacityThreshold.key());
String secondaryStorageCapacityThreshold = _configDao.getValue(Config.SecondaryStorageCapacityThreshold.key());
String vlanCapacityThreshold = _configDao.getValue(Config.VlanCapacityThreshold.key());
String directNetworkPublicIpCapacityThreshold = _configDao.getValue(Config.DirectNetworkPublicIpCapacityThreshold.key());
String localStorageCapacityThreshold = _configDao.getValue(Config.LocalStorageCapacityThreshold.key());
if (storageCapacityThreshold != null) {
_storageCapacityThreshold = Double.parseDouble(storageCapacityThreshold);
}
if (storageAllocCapacityThreshold != null) {
_storageAllocCapacityThreshold = Double.parseDouble(storageAllocCapacityThreshold);
}
if (cpuCapacityThreshold != null) {
_cpuCapacityThreshold = Double.parseDouble(cpuCapacityThreshold);
}
if (memoryCapacityThreshold != null) {
_memoryCapacityThreshold = Double.parseDouble(memoryCapacityThreshold);
}
if (publicIPCapacityThreshold != null) {
_publicIPCapacityThreshold = Double.parseDouble(publicIPCapacityThreshold);
}
if (privateIPCapacityThreshold != null) {
_privateIPCapacityThreshold = Double.parseDouble(privateIPCapacityThreshold);
String localStorageCapacityThreshold = _configDao.getValue(Config.LocalStorageCapacityThreshold.key());
if (storageCapacityThreshold != null) {
_storageCapacityThreshold = Double.parseDouble(storageCapacityThreshold);
}
if (storageAllocCapacityThreshold != null) {
_storageAllocCapacityThreshold = Double.parseDouble(storageAllocCapacityThreshold);
}
if (cpuCapacityThreshold != null) {
_cpuCapacityThreshold = Double.parseDouble(cpuCapacityThreshold);
}
if (memoryCapacityThreshold != null) {
_memoryCapacityThreshold = Double.parseDouble(memoryCapacityThreshold);
}
if (publicIPCapacityThreshold != null) {
_publicIPCapacityThreshold = Double.parseDouble(publicIPCapacityThreshold);
}
if (privateIPCapacityThreshold != null) {
_privateIPCapacityThreshold = Double.parseDouble(privateIPCapacityThreshold);
}
if (secondaryStorageCapacityThreshold != null) {
_secondaryStorageCapacityThreshold = Double.parseDouble(secondaryStorageCapacityThreshold);
@ -192,7 +192,7 @@ public class AlertManagerImpl implements AlertManager {
}
_capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_STORAGE, _storageCapacityThreshold);
_capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, _storageAllocCapacityThreshold);
_capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, _storageAllocCapacityThreshold);
_capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_CPU, _cpuCapacityThreshold);
_capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_MEMORY, _memoryCapacityThreshold);
_capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP, _publicIPCapacityThreshold);
@ -202,75 +202,75 @@ public class AlertManagerImpl implements AlertManager {
_capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_DIRECT_ATTACHED_PUBLIC_IP, _directNetworkPublicIpCapacityThreshold);
_capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_LOCAL_STORAGE, _localStorageCapacityThreshold);
String capacityCheckPeriodStr = configs.get("capacity.check.period");
if (capacityCheckPeriodStr != null) {
String capacityCheckPeriodStr = configs.get("capacity.check.period");
if (capacityCheckPeriodStr != null) {
_capacityCheckPeriod = Long.parseLong(capacityCheckPeriodStr);
if(_capacityCheckPeriod <= 0)
_capacityCheckPeriod = Long.parseLong(Config.CapacityCheckPeriod.getDefaultValue());
}
String cpuOverProvisioningFactorStr = configs.get("cpu.overprovisioning.factor");
if (cpuOverProvisioningFactorStr != null) {
_cpuOverProvisioningFactor = NumbersUtil.parseFloat(cpuOverProvisioningFactorStr,1);
if(_cpuOverProvisioningFactor < 1){
_cpuOverProvisioningFactor = 1;
}
}
_timer = new Timer("CapacityChecker");
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
_timer.schedule(new CapacityChecker(), INITIAL_CAPACITY_CHECK_DELAY, _capacityCheckPeriod);
return true;
}
@Override
public boolean stop() {
_timer.cancel();
return true;
}
@Override
public void clearAlert(short alertType, long dataCenterId, long podId) {
try {
if (_emailAlert != null) {
_emailAlert.clearAlert(alertType, dataCenterId, podId);
}
} catch (Exception ex) {
s_logger.error("Problem clearing email alert", ex);
}
}
@Override
public void sendAlert(short alertType, long dataCenterId, Long podId, String subject, String body) {
// TODO: queue up these messages and send them as one set of issues once a certain number of issues is reached? If that's the case,
// shouldn't we have a type/severity as part of the API so that severe errors get sent right away?
try {
if (_emailAlert != null) {
_emailAlert.sendAlert(alertType, dataCenterId, podId, null, subject, body);
}
} catch (Exception ex) {
s_logger.error("Problem sending email alert", ex);
}
}
@Override @DB
public void recalculateCapacity() {
// FIXME: the right way to do this is to register a listener (see RouterStatsListener, VMSyncListener)
// for the vm sync state. The listener model has connects/disconnects to keep things in sync much better
// than this model right now, so when a VM is started, we update the amount allocated, and when a VM
// is stopped we updated the amount allocated, and when VM sync reports a changed state, we update
// the amount allocated. Hopefully it's limited to 3 entry points and will keep the amount allocated
String cpuOverProvisioningFactorStr = configs.get("cpu.overprovisioning.factor");
if (cpuOverProvisioningFactorStr != null) {
_cpuOverProvisioningFactor = NumbersUtil.parseFloat(cpuOverProvisioningFactorStr,1);
if(_cpuOverProvisioningFactor < 1){
_cpuOverProvisioningFactor = 1;
}
}
_timer = new Timer("CapacityChecker");
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
_timer.schedule(new CapacityChecker(), INITIAL_CAPACITY_CHECK_DELAY, _capacityCheckPeriod);
return true;
}
@Override
public boolean stop() {
_timer.cancel();
return true;
}
@Override
public void clearAlert(short alertType, long dataCenterId, long podId) {
try {
if (_emailAlert != null) {
_emailAlert.clearAlert(alertType, dataCenterId, podId);
}
} catch (Exception ex) {
s_logger.error("Problem clearing email alert", ex);
}
}
@Override
public void sendAlert(short alertType, long dataCenterId, Long podId, String subject, String body) {
// TODO: queue up these messages and send them as one set of issues once a certain number of issues is reached? If that's the case,
// shouldn't we have a type/severity as part of the API so that severe errors get sent right away?
try {
if (_emailAlert != null) {
_emailAlert.sendAlert(alertType, dataCenterId, podId, null, subject, body);
}
} catch (Exception ex) {
s_logger.error("Problem sending email alert", ex);
}
}
@Override @DB
public void recalculateCapacity() {
// FIXME: the right way to do this is to register a listener (see RouterStatsListener, VMSyncListener)
// for the vm sync state. The listener model has connects/disconnects to keep things in sync much better
// than this model right now, so when a VM is started, we update the amount allocated, and when a VM
// is stopped we updated the amount allocated, and when VM sync reports a changed state, we update
// the amount allocated. Hopefully it's limited to 3 entry points and will keep the amount allocated
// per host accurate.
try {
@ -279,8 +279,8 @@ public class AlertManagerImpl implements AlertManager {
s_logger.debug("recalculating system capacity");
s_logger.debug("Executing cpu/ram capacity update");
}
// Calculate CPU and RAM capacities
// Calculate CPU and RAM capacities
// get all hosts...even if they are not in 'UP' state
List<HostVO> hosts = _resourceMgr.listAllNotInMaintenanceHostsInOneZone(Host.Type.Routing, null);
for (HostVO host : hosts) {
@ -291,9 +291,9 @@ public class AlertManagerImpl implements AlertManager {
s_logger.debug("Done executing cpu/ram capacity update");
s_logger.debug("Executing storage capacity update");
}
// Calculate storage pool capacity
List<StoragePoolVO> storagePools = _storagePoolDao.listAll();
for (StoragePoolVO pool : storagePools) {
// Calculate storage pool capacity
List<StoragePoolVO> storagePools = _storagePoolDao.listAll();
for (StoragePoolVO pool : storagePools) {
long disk = _capacityMgr.getAllocatedPoolCapacity(pool, null);
if (pool.isShared()){
_storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, disk);
@ -305,18 +305,18 @@ public class AlertManagerImpl implements AlertManager {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Done executing storage capacity update");
s_logger.debug("Executing capacity updates for public ip and Vlans");
}
List<DataCenterVO> datacenters = _dcDao.listAll();
for (DataCenterVO datacenter : datacenters) {
long dcId = datacenter.getId();
}
List<DataCenterVO> datacenters = _dcDao.listAll();
for (DataCenterVO datacenter : datacenters) {
long dcId = datacenter.getId();
//NOTE
//What happens if we have multiple vlans? Dashboard currently shows stats
//with no filter based on a vlan
//ideal way would be to remove out the vlan param, and filter only on dcId
//implementing the same
// Calculate new Public IP capacity for Virtual Network
if (datacenter.getNetworkType() == NetworkType.Advanced){
createOrUpdateIpCapacity(dcId, null, CapacityVO.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP);
@ -328,7 +328,7 @@ public class AlertManagerImpl implements AlertManager {
if (datacenter.getNetworkType() == NetworkType.Advanced){
//Calculate VLAN's capacity
createOrUpdateVlanCapacity(dcId);
}
}
}
if (s_logger.isDebugEnabled()) {
@ -336,23 +336,23 @@ public class AlertManagerImpl implements AlertManager {
s_logger.debug("Executing capacity updates for private ip");
}
// Calculate new Private IP capacity
List<HostPodVO> pods = _podDao.listAll();
for (HostPodVO pod : pods) {
long podId = pod.getId();
long dcId = pod.getDataCenterId();
// Calculate new Private IP capacity
List<HostPodVO> pods = _podDao.listAll();
for (HostPodVO pod : pods) {
long podId = pod.getId();
long dcId = pod.getDataCenterId();
createOrUpdateIpCapacity(dcId, podId, CapacityVO.CAPACITY_TYPE_PRIVATE_IP);
createOrUpdateIpCapacity(dcId, podId, CapacityVO.CAPACITY_TYPE_PRIVATE_IP);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Done executing capacity updates for private ip");
s_logger.debug("Done recalculating system capacity");
}
}
} catch (Throwable t) {
s_logger.error("Caught exception in recalculating capacity", t);
}
}
}
private void createOrUpdateVlanCapacity(long dcId) {
@ -416,19 +416,19 @@ public class AlertManagerImpl implements AlertManager {
_capacityDao.update(capacity.getId(), capacity);
}
}
class CapacityChecker extends TimerTask {
@Override
public void run() {
}
class CapacityChecker extends TimerTask {
@Override
public void run() {
try {
s_logger.debug("Running Capacity Checker ... ");
checkForAlerts();
s_logger.debug("Done running Capacity Checker ... ");
} catch (Throwable t) {
s_logger.error("Exception in CapacityChecker", t);
}
}
s_logger.debug("Done running Capacity Checker ... ");
} catch (Throwable t) {
s_logger.error("Exception in CapacityChecker", t);
}
}
}
@ -659,142 +659,142 @@ public class AlertManagerImpl implements AlertManager {
clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_LOCAL_STORAGE);
return clusterCapacityTypes;
}
class EmailAlert {
private Session _smtpSession;
private InternetAddress[] _recipientList;
private final String _smtpHost;
private int _smtpPort = -1;
private boolean _smtpUseAuth = false;
private final String _smtpUsername;
private final String _smtpPassword;
private final String _emailSender;
public EmailAlert(String[] recipientList, String smtpHost, int smtpPort, boolean smtpUseAuth, final String smtpUsername, final String smtpPassword, String emailSender, boolean smtpDebug) {
if (recipientList != null) {
_recipientList = new InternetAddress[recipientList.length];
for (int i = 0; i < recipientList.length; i++) {
try {
_recipientList[i] = new InternetAddress(recipientList[i], recipientList[i]);
} catch (Exception ex) {
s_logger.error("Exception creating address for: " + recipientList[i], ex);
}
}
}
_smtpHost = smtpHost;
_smtpPort = smtpPort;
_smtpUseAuth = smtpUseAuth;
_smtpUsername = smtpUsername;
_smtpPassword = smtpPassword;
_emailSender = emailSender;
if (_smtpHost != null) {
Properties smtpProps = new Properties();
smtpProps.put("mail.smtp.host", smtpHost);
smtpProps.put("mail.smtp.port", smtpPort);
smtpProps.put("mail.smtp.auth", ""+smtpUseAuth);
if (smtpUsername != null) {
smtpProps.put("mail.smtp.user", smtpUsername);
}
smtpProps.put("mail.smtps.host", smtpHost);
smtpProps.put("mail.smtps.port", smtpPort);
smtpProps.put("mail.smtps.auth", ""+smtpUseAuth);
if (smtpUsername != null) {
smtpProps.put("mail.smtps.user", smtpUsername);
}
if ((smtpUsername != null) && (smtpPassword != null)) {
_smtpSession = Session.getInstance(smtpProps, new Authenticator() {
@Override
protected PasswordAuthentication getPasswordAuthentication() {
return new PasswordAuthentication(smtpUsername, smtpPassword);
}
});
} else {
_smtpSession = Session.getInstance(smtpProps);
}
_smtpSession.setDebug(smtpDebug);
} else {
_smtpSession = null;
}
}
// TODO: make sure this handles SSL transport (useAuth is true) and regular
public void sendAlert(short alertType, long dataCenterId, Long podId, Long clusterId, String subject, String content) throws MessagingException, UnsupportedEncodingException {
AlertVO alert = null;
if ((alertType != AlertManager.ALERT_TYPE_HOST) &&
(alertType != AlertManager.ALERT_TYPE_USERVM) &&
(alertType != AlertManager.ALERT_TYPE_DOMAIN_ROUTER) &&
}
class EmailAlert {
private Session _smtpSession;
private InternetAddress[] _recipientList;
private final String _smtpHost;
private int _smtpPort = -1;
private boolean _smtpUseAuth = false;
private final String _smtpUsername;
private final String _smtpPassword;
private final String _emailSender;
public EmailAlert(String[] recipientList, String smtpHost, int smtpPort, boolean smtpUseAuth, final String smtpUsername, final String smtpPassword, String emailSender, boolean smtpDebug) {
if (recipientList != null) {
_recipientList = new InternetAddress[recipientList.length];
for (int i = 0; i < recipientList.length; i++) {
try {
_recipientList[i] = new InternetAddress(recipientList[i], recipientList[i]);
} catch (Exception ex) {
s_logger.error("Exception creating address for: " + recipientList[i], ex);
}
}
}
_smtpHost = smtpHost;
_smtpPort = smtpPort;
_smtpUseAuth = smtpUseAuth;
_smtpUsername = smtpUsername;
_smtpPassword = smtpPassword;
_emailSender = emailSender;
if (_smtpHost != null) {
Properties smtpProps = new Properties();
smtpProps.put("mail.smtp.host", smtpHost);
smtpProps.put("mail.smtp.port", smtpPort);
smtpProps.put("mail.smtp.auth", ""+smtpUseAuth);
if (smtpUsername != null) {
smtpProps.put("mail.smtp.user", smtpUsername);
}
smtpProps.put("mail.smtps.host", smtpHost);
smtpProps.put("mail.smtps.port", smtpPort);
smtpProps.put("mail.smtps.auth", ""+smtpUseAuth);
if (smtpUsername != null) {
smtpProps.put("mail.smtps.user", smtpUsername);
}
if ((smtpUsername != null) && (smtpPassword != null)) {
_smtpSession = Session.getInstance(smtpProps, new Authenticator() {
@Override
protected PasswordAuthentication getPasswordAuthentication() {
return new PasswordAuthentication(smtpUsername, smtpPassword);
}
});
} else {
_smtpSession = Session.getInstance(smtpProps);
}
_smtpSession.setDebug(smtpDebug);
} else {
_smtpSession = null;
}
}
// TODO: make sure this handles SSL transport (useAuth is true) and regular
public void sendAlert(short alertType, long dataCenterId, Long podId, Long clusterId, String subject, String content) throws MessagingException, UnsupportedEncodingException {
AlertVO alert = null;
if ((alertType != AlertManager.ALERT_TYPE_HOST) &&
(alertType != AlertManager.ALERT_TYPE_USERVM) &&
(alertType != AlertManager.ALERT_TYPE_DOMAIN_ROUTER) &&
(alertType != AlertManager.ALERT_TYPE_CONSOLE_PROXY) &&
(alertType != AlertManager.ALERT_TYPE_SSVM) &&
(alertType != AlertManager.ALERT_TYPE_STORAGE_MISC) &&
(alertType != AlertManager.ALERT_TYPE_MANAGMENT_NODE)) {
alert = _alertDao.getLastAlert(alertType, dataCenterId, podId, clusterId);
}
if (alert == null) {
// set up a new alert
AlertVO newAlert = new AlertVO();
newAlert.setType(alertType);
(alertType != AlertManager.ALERT_TYPE_SSVM) &&
(alertType != AlertManager.ALERT_TYPE_STORAGE_MISC) &&
(alertType != AlertManager.ALERT_TYPE_MANAGMENT_NODE)) {
alert = _alertDao.getLastAlert(alertType, dataCenterId, podId, clusterId);
}
if (alert == null) {
// set up a new alert
AlertVO newAlert = new AlertVO();
newAlert.setType(alertType);
newAlert.setSubject(subject);
newAlert.setClusterId(clusterId);
newAlert.setPodId(podId);
newAlert.setDataCenterId(dataCenterId);
newAlert.setSentCount(1); // initialize sent count to 1 since we are now sending an alert
newAlert.setLastSent(new Date());
_alertDao.persist(newAlert);
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Have already sent: " + alert.getSentCount() + " emails for alert type '" + alertType + "' -- skipping send email");
}
return;
}
if (_smtpSession != null) {
SMTPMessage msg = new SMTPMessage(_smtpSession);
msg.setSender(new InternetAddress(_emailSender, _emailSender));
msg.setFrom(new InternetAddress(_emailSender, _emailSender));
for (InternetAddress address : _recipientList) {
msg.addRecipient(RecipientType.TO, address);
}
msg.setSubject(subject);
msg.setSentDate(new Date());
msg.setContent(content, "text/plain");
msg.saveChanges();
SMTPTransport smtpTrans = null;
if (_smtpUseAuth) {
smtpTrans = new SMTPSSLTransport(_smtpSession, new URLName("smtp", _smtpHost, _smtpPort, null, _smtpUsername, _smtpPassword));
} else {
smtpTrans = new SMTPTransport(_smtpSession, new URLName("smtp", _smtpHost, _smtpPort, null, _smtpUsername, _smtpPassword));
}
smtpTrans.connect();
smtpTrans.sendMessage(msg, msg.getAllRecipients());
smtpTrans.close();
}
}
public void clearAlert(short alertType, long dataCenterId, Long podId) {
if (alertType != -1) {
AlertVO alert = _alertDao.getLastAlert(alertType, dataCenterId, podId, null);
if (alert != null) {
AlertVO updatedAlert = _alertDao.createForUpdate();
updatedAlert.setResolved(new Date());
_alertDao.update(alert.getId(), updatedAlert);
}
}
}
}
private static String formatPercent(double percentage) {
return _dfPct.format(percentage*100);
}
private static String formatBytesToMegabytes(double bytes) {
double megaBytes = (bytes / (1024 * 1024));
return _dfWhole.format(megaBytes);
}
}
newAlert.setClusterId(clusterId);
newAlert.setPodId(podId);
newAlert.setDataCenterId(dataCenterId);
newAlert.setSentCount(1); // initialize sent count to 1 since we are now sending an alert
newAlert.setLastSent(new Date());
_alertDao.persist(newAlert);
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Have already sent: " + alert.getSentCount() + " emails for alert type '" + alertType + "' -- skipping send email");
}
return;
}
if (_smtpSession != null) {
SMTPMessage msg = new SMTPMessage(_smtpSession);
msg.setSender(new InternetAddress(_emailSender, _emailSender));
msg.setFrom(new InternetAddress(_emailSender, _emailSender));
for (InternetAddress address : _recipientList) {
msg.addRecipient(RecipientType.TO, address);
}
msg.setSubject(subject);
msg.setSentDate(new Date());
msg.setContent(content, "text/plain");
msg.saveChanges();
SMTPTransport smtpTrans = null;
if (_smtpUseAuth) {
smtpTrans = new SMTPSSLTransport(_smtpSession, new URLName("smtp", _smtpHost, _smtpPort, null, _smtpUsername, _smtpPassword));
} else {
smtpTrans = new SMTPTransport(_smtpSession, new URLName("smtp", _smtpHost, _smtpPort, null, _smtpUsername, _smtpPassword));
}
smtpTrans.connect();
smtpTrans.sendMessage(msg, msg.getAllRecipients());
smtpTrans.close();
}
}
public void clearAlert(short alertType, long dataCenterId, Long podId) {
if (alertType != -1) {
AlertVO alert = _alertDao.getLastAlert(alertType, dataCenterId, podId, null);
if (alert != null) {
AlertVO updatedAlert = _alertDao.createForUpdate();
updatedAlert.setResolved(new Date());
_alertDao.update(alert.getId(), updatedAlert);
}
}
}
}
private static String formatPercent(double percentage) {
return _dfPct.format(percentage*100);
}
private static String formatBytesToMegabytes(double bytes) {
double megaBytes = (bytes / (1024 * 1024));
return _dfWhole.format(megaBytes);
}
}

View File

@ -10,217 +10,217 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.alert;
import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.alert.AlertAdapter;
import com.cloud.alert.AlertManager;
import com.cloud.consoleproxy.ConsoleProxyAlertEventArgs;
import com.cloud.consoleproxy.ConsoleProxyManager;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.events.SubscriptionMgr;
import com.cloud.vm.ConsoleProxyVO;
import com.cloud.vm.dao.ConsoleProxyDao;
@Local(value=AlertAdapter.class)
public class ConsoleProxyAlertAdapter implements AlertAdapter {
private static final Logger s_logger = Logger.getLogger(ConsoleProxyAlertAdapter.class);
private AlertManager _alertMgr;
private String _name;
private DataCenterDao _dcDao;
private ConsoleProxyDao _consoleProxyDao;
public void onProxyAlert(Object sender, ConsoleProxyAlertEventArgs args) {
if(s_logger.isDebugEnabled())
s_logger.debug("received console proxy alert");
DataCenterVO dc = _dcDao.findById(args.getZoneId());
ConsoleProxyVO proxy = args.getProxy();
if(proxy == null)
proxy = _consoleProxyDao.findById(args.getProxyId());
switch(args.getType()) {
case ConsoleProxyAlertEventArgs.PROXY_CREATED :
if(s_logger.isDebugEnabled())
s_logger.debug("New console proxy created, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
proxy.getPrivateIpAddress());
break;
case ConsoleProxyAlertEventArgs.PROXY_UP :
if(s_logger.isDebugEnabled())
s_logger.debug("Console proxy is up, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
proxy.getPrivateIpAddress());
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_CONSOLE_PROXY,
args.getZoneId(),
proxy.getPodIdToDeployIn(),
"Console proxy up in zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress()
+ ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()),
"Console proxy up (zone " + dc.getName() + ")"
);
break;
case ConsoleProxyAlertEventArgs.PROXY_DOWN :
if(s_logger.isDebugEnabled())
s_logger.debug("Console proxy is down, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
(proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_CONSOLE_PROXY,
args.getZoneId(),
proxy.getPodIdToDeployIn(),
"Console proxy down in zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress()
+ ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()),
"Console proxy down (zone " + dc.getName() + ")"
);
break;
case ConsoleProxyAlertEventArgs.PROXY_REBOOTED :
if(s_logger.isDebugEnabled())
s_logger.debug("Console proxy is rebooted, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
(proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_CONSOLE_PROXY,
args.getZoneId(),
proxy.getPodIdToDeployIn(),
"Console proxy rebooted in zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress()
+ ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()),
"Console proxy rebooted (zone " + dc.getName() + ")"
);
break;
case ConsoleProxyAlertEventArgs.PROXY_CREATE_FAILURE :
if(s_logger.isDebugEnabled())
s_logger.debug("Console proxy creation failure, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
(proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_CONSOLE_PROXY,
args.getZoneId(),
proxy.getPodIdToDeployIn(),
"Console proxy creation failure. zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress()
+ ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())
+ ", error details: " + args.getMessage(),
"Console proxy creation failure (zone " + dc.getName() + ")"
);
break;
case ConsoleProxyAlertEventArgs.PROXY_START_FAILURE :
if(s_logger.isDebugEnabled())
s_logger.debug("Console proxy startup failure, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
(proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_CONSOLE_PROXY,
args.getZoneId(),
proxy.getPodIdToDeployIn(),
"Console proxy startup failure. zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress()
+ ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())
+ ", error details: " + args.getMessage(),
"Console proxy startup failure (zone " + dc.getName() + ")"
);
break;
case ConsoleProxyAlertEventArgs.PROXY_FIREWALL_ALERT :
if(s_logger.isDebugEnabled())
s_logger.debug("Console proxy firewall alert, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
(proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_CONSOLE_PROXY,
args.getZoneId(),
proxy.getPodIdToDeployIn(),
"Failed to open console proxy firewall port. zone: " + dc.getName() + ", proxy: " + proxy.getHostName()
+ ", public IP: " + proxy.getPublicIpAddress()
+ ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()),
"Console proxy alert (zone " + dc.getName() + ")"
);
break;
case ConsoleProxyAlertEventArgs.PROXY_STORAGE_ALERT :
if(s_logger.isDebugEnabled())
s_logger.debug("Console proxy storage alert, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
proxy.getPrivateIpAddress() + ", message: " + args.getMessage());
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_STORAGE_MISC,
args.getZoneId(),
proxy.getPodIdToDeployIn(),
"Console proxy storage issue. zone: " + dc.getName() + ", message: " + args.getMessage(),
"Console proxy alert (zone " + dc.getName() + ")"
);
break;
}
}
@Override
public boolean configure(String name, Map<String, Object> params)
throws ConfigurationException {
if (s_logger.isInfoEnabled())
s_logger.info("Start configuring console proxy alert manager : " + name);
ComponentLocator locator = ComponentLocator.getCurrentLocator();
_dcDao = locator.getDao(DataCenterDao.class);
if (_dcDao == null) {
throw new ConfigurationException("Unable to get " + DataCenterDao.class.getName());
}
_consoleProxyDao = locator.getDao(ConsoleProxyDao.class);
if (_consoleProxyDao == null) {
throw new ConfigurationException("Unable to get " + ConsoleProxyDao.class.getName());
}
_alertMgr = locator.getManager(AlertManager.class);
if (_alertMgr == null) {
throw new ConfigurationException("Unable to get " + AlertManager.class.getName());
}
try {
SubscriptionMgr.getInstance().subscribe(ConsoleProxyManager.ALERT_SUBJECT, this, "onProxyAlert");
} catch (SecurityException e) {
throw new ConfigurationException("Unable to register console proxy event subscription, exception: " + e);
} catch (NoSuchMethodException e) {
throw new ConfigurationException("Unable to register console proxy event subscription, exception: " + e);
}
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
}
package com.cloud.alert;
import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.alert.AlertAdapter;
import com.cloud.alert.AlertManager;
import com.cloud.consoleproxy.ConsoleProxyAlertEventArgs;
import com.cloud.consoleproxy.ConsoleProxyManager;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.events.SubscriptionMgr;
import com.cloud.vm.ConsoleProxyVO;
import com.cloud.vm.dao.ConsoleProxyDao;
@Local(value=AlertAdapter.class)
public class ConsoleProxyAlertAdapter implements AlertAdapter {
private static final Logger s_logger = Logger.getLogger(ConsoleProxyAlertAdapter.class);
private AlertManager _alertMgr;
private String _name;
private DataCenterDao _dcDao;
private ConsoleProxyDao _consoleProxyDao;
public void onProxyAlert(Object sender, ConsoleProxyAlertEventArgs args) {
if(s_logger.isDebugEnabled())
s_logger.debug("received console proxy alert");
DataCenterVO dc = _dcDao.findById(args.getZoneId());
ConsoleProxyVO proxy = args.getProxy();
if(proxy == null)
proxy = _consoleProxyDao.findById(args.getProxyId());
switch(args.getType()) {
case ConsoleProxyAlertEventArgs.PROXY_CREATED :
if(s_logger.isDebugEnabled())
s_logger.debug("New console proxy created, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
proxy.getPrivateIpAddress());
break;
case ConsoleProxyAlertEventArgs.PROXY_UP :
if(s_logger.isDebugEnabled())
s_logger.debug("Console proxy is up, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
proxy.getPrivateIpAddress());
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_CONSOLE_PROXY,
args.getZoneId(),
proxy.getPodIdToDeployIn(),
"Console proxy up in zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress()
+ ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()),
"Console proxy up (zone " + dc.getName() + ")"
);
break;
case ConsoleProxyAlertEventArgs.PROXY_DOWN :
if(s_logger.isDebugEnabled())
s_logger.debug("Console proxy is down, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
(proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_CONSOLE_PROXY,
args.getZoneId(),
proxy.getPodIdToDeployIn(),
"Console proxy down in zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress()
+ ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()),
"Console proxy down (zone " + dc.getName() + ")"
);
break;
case ConsoleProxyAlertEventArgs.PROXY_REBOOTED :
if(s_logger.isDebugEnabled())
s_logger.debug("Console proxy is rebooted, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
(proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_CONSOLE_PROXY,
args.getZoneId(),
proxy.getPodIdToDeployIn(),
"Console proxy rebooted in zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress()
+ ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()),
"Console proxy rebooted (zone " + dc.getName() + ")"
);
break;
case ConsoleProxyAlertEventArgs.PROXY_CREATE_FAILURE :
if(s_logger.isDebugEnabled())
s_logger.debug("Console proxy creation failure, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
(proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_CONSOLE_PROXY,
args.getZoneId(),
proxy.getPodIdToDeployIn(),
"Console proxy creation failure. zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress()
+ ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())
+ ", error details: " + args.getMessage(),
"Console proxy creation failure (zone " + dc.getName() + ")"
);
break;
case ConsoleProxyAlertEventArgs.PROXY_START_FAILURE :
if(s_logger.isDebugEnabled())
s_logger.debug("Console proxy startup failure, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
(proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_CONSOLE_PROXY,
args.getZoneId(),
proxy.getPodIdToDeployIn(),
"Console proxy startup failure. zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress()
+ ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())
+ ", error details: " + args.getMessage(),
"Console proxy startup failure (zone " + dc.getName() + ")"
);
break;
case ConsoleProxyAlertEventArgs.PROXY_FIREWALL_ALERT :
if(s_logger.isDebugEnabled())
s_logger.debug("Console proxy firewall alert, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
(proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_CONSOLE_PROXY,
args.getZoneId(),
proxy.getPodIdToDeployIn(),
"Failed to open console proxy firewall port. zone: " + dc.getName() + ", proxy: " + proxy.getHostName()
+ ", public IP: " + proxy.getPublicIpAddress()
+ ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()),
"Console proxy alert (zone " + dc.getName() + ")"
);
break;
case ConsoleProxyAlertEventArgs.PROXY_STORAGE_ALERT :
if(s_logger.isDebugEnabled())
s_logger.debug("Console proxy storage alert, zone: " + dc.getName() + ", proxy: " +
proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " +
proxy.getPrivateIpAddress() + ", message: " + args.getMessage());
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_STORAGE_MISC,
args.getZoneId(),
proxy.getPodIdToDeployIn(),
"Console proxy storage issue. zone: " + dc.getName() + ", message: " + args.getMessage(),
"Console proxy alert (zone " + dc.getName() + ")"
);
break;
}
}
@Override
public boolean configure(String name, Map<String, Object> params)
throws ConfigurationException {
if (s_logger.isInfoEnabled())
s_logger.info("Start configuring console proxy alert manager : " + name);
ComponentLocator locator = ComponentLocator.getCurrentLocator();
_dcDao = locator.getDao(DataCenterDao.class);
if (_dcDao == null) {
throw new ConfigurationException("Unable to get " + DataCenterDao.class.getName());
}
_consoleProxyDao = locator.getDao(ConsoleProxyDao.class);
if (_consoleProxyDao == null) {
throw new ConfigurationException("Unable to get " + ConsoleProxyDao.class.getName());
}
_alertMgr = locator.getManager(AlertManager.class);
if (_alertMgr == null) {
throw new ConfigurationException("Unable to get " + AlertManager.class.getName());
}
try {
SubscriptionMgr.getInstance().subscribe(ConsoleProxyManager.ALERT_SUBJECT, this, "onProxyAlert");
} catch (SecurityException e) {
throw new ConfigurationException("Unable to register console proxy event subscription, exception: " + e);
} catch (NoSuchMethodException e) {
throw new ConfigurationException("Unable to register console proxy event subscription, exception: " + e);
}
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
}

View File

@ -10,200 +10,200 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.alert;
import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.alert.AlertAdapter;
import com.cloud.alert.AlertManager;
import com.cloud.consoleproxy.ConsoleProxyManager;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.storage.secondary.SecStorageVmAlertEventArgs;
import com.cloud.storage.secondary.SecondaryStorageVmManager;
import com.cloud.utils.component.Inject;
import com.cloud.utils.events.SubscriptionMgr;
import com.cloud.vm.SecondaryStorageVmVO;
import com.cloud.vm.dao.SecondaryStorageVmDao;
@Local(value=AlertAdapter.class)
public class SecondaryStorageVmAlertAdapter implements AlertAdapter {
private static final Logger s_logger = Logger.getLogger(SecondaryStorageVmAlertAdapter.class);
private String _name;
@Inject private AlertManager _alertMgr;
@Inject private DataCenterDao _dcDao;
@Inject private SecondaryStorageVmDao _ssvmDao;
public void onSSVMAlert(Object sender, SecStorageVmAlertEventArgs args) {
if(s_logger.isDebugEnabled())
s_logger.debug("received secondary storage vm alert");
DataCenterVO dc = _dcDao.findById(args.getZoneId());
SecondaryStorageVmVO secStorageVm = args.getSecStorageVm();
if(secStorageVm == null)
secStorageVm = _ssvmDao.findById(args.getSecStorageVmId());
switch(args.getType()) {
case SecStorageVmAlertEventArgs.SSVM_CREATED :
if(s_logger.isDebugEnabled())
s_logger.debug("New secondary storage vm created, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
secStorageVm.getPrivateIpAddress());
break;
case SecStorageVmAlertEventArgs.SSVM_UP :
if(s_logger.isDebugEnabled())
s_logger.debug("Secondary Storage Vm is up, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
secStorageVm.getPrivateIpAddress());
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_SSVM,
args.getZoneId(),
secStorageVm.getPodIdToDeployIn(),
"Secondary Storage Vm up in zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress()
+ ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()),
"Secondary Storage Vm up (zone " + dc.getName() + ")"
);
break;
case SecStorageVmAlertEventArgs.SSVM_DOWN :
if(s_logger.isDebugEnabled())
s_logger.debug("Secondary Storage Vm is down, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
(secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_SSVM,
args.getZoneId(),
secStorageVm.getPodIdToDeployIn(),
"Secondary Storage Vm down in zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress()
+ ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()),
"Secondary Storage Vm down (zone " + dc.getName() + ")"
);
break;
case SecStorageVmAlertEventArgs.SSVM_REBOOTED :
if(s_logger.isDebugEnabled())
s_logger.debug("Secondary Storage Vm is rebooted, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
(secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_SSVM,
args.getZoneId(),
secStorageVm.getPodIdToDeployIn(),
"Secondary Storage Vm rebooted in zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress()
+ ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()),
"Secondary Storage Vm rebooted (zone " + dc.getName() + ")"
);
break;
case SecStorageVmAlertEventArgs.SSVM_CREATE_FAILURE :
if(s_logger.isDebugEnabled())
s_logger.debug("Secondary Storage Vm creation failure, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
(secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_SSVM,
args.getZoneId(),
secStorageVm.getPodIdToDeployIn(),
"Secondary Storage Vm creation failure. zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress()
+ ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress())
+ ", error details: " + args.getMessage(),
"Secondary Storage Vm creation failure (zone " + dc.getName() + ")"
);
break;
case SecStorageVmAlertEventArgs.SSVM_START_FAILURE :
if(s_logger.isDebugEnabled())
s_logger.debug("Secondary Storage Vm startup failure, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
(secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_SSVM,
args.getZoneId(),
secStorageVm.getPodIdToDeployIn(),
"Secondary Storage Vm startup failure. zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress()
+ ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress())
+ ", error details: " + args.getMessage(),
"Secondary Storage Vm startup failure (zone " + dc.getName() + ")"
);
break;
case SecStorageVmAlertEventArgs.SSVM_FIREWALL_ALERT :
if(s_logger.isDebugEnabled())
s_logger.debug("Secondary Storage Vm firewall alert, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
(secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_SSVM,
args.getZoneId(),
secStorageVm.getPodIdToDeployIn(),
"Failed to open secondary storage vm firewall port. zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName()
+ ", public IP: " + secStorageVm.getPublicIpAddress()
+ ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()),
"Secondary Storage Vm alert (zone " + dc.getName() + ")"
);
break;
case SecStorageVmAlertEventArgs.SSVM_STORAGE_ALERT :
if(s_logger.isDebugEnabled())
s_logger.debug("Secondary Storage Vm storage alert, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
secStorageVm.getPrivateIpAddress() + ", message: " + args.getMessage());
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_STORAGE_MISC,
args.getZoneId(),
secStorageVm.getPodIdToDeployIn(),
"Secondary Storage Vm storage issue. zone: " + dc.getName() + ", message: " + args.getMessage(),
"Secondary Storage Vm alert (zone " + dc.getName() + ")"
);
break;
}
}
@Override
public boolean configure(String name, Map<String, Object> params)
throws ConfigurationException {
if (s_logger.isInfoEnabled())
s_logger.info("Start configuring secondary storage vm alert manager : " + name);
try {
SubscriptionMgr.getInstance().subscribe(SecondaryStorageVmManager.ALERT_SUBJECT, this, "onSSVMAlert");
} catch (SecurityException e) {
throw new ConfigurationException("Unable to register secondary storage vm event subscription, exception: " + e);
} catch (NoSuchMethodException e) {
throw new ConfigurationException("Unable to register secondary storage vm event subscription, exception: " + e);
}
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
}
package com.cloud.alert;
import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.alert.AlertAdapter;
import com.cloud.alert.AlertManager;
import com.cloud.consoleproxy.ConsoleProxyManager;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.storage.secondary.SecStorageVmAlertEventArgs;
import com.cloud.storage.secondary.SecondaryStorageVmManager;
import com.cloud.utils.component.Inject;
import com.cloud.utils.events.SubscriptionMgr;
import com.cloud.vm.SecondaryStorageVmVO;
import com.cloud.vm.dao.SecondaryStorageVmDao;
@Local(value=AlertAdapter.class)
public class SecondaryStorageVmAlertAdapter implements AlertAdapter {
private static final Logger s_logger = Logger.getLogger(SecondaryStorageVmAlertAdapter.class);
private String _name;
@Inject private AlertManager _alertMgr;
@Inject private DataCenterDao _dcDao;
@Inject private SecondaryStorageVmDao _ssvmDao;
public void onSSVMAlert(Object sender, SecStorageVmAlertEventArgs args) {
if(s_logger.isDebugEnabled())
s_logger.debug("received secondary storage vm alert");
DataCenterVO dc = _dcDao.findById(args.getZoneId());
SecondaryStorageVmVO secStorageVm = args.getSecStorageVm();
if(secStorageVm == null)
secStorageVm = _ssvmDao.findById(args.getSecStorageVmId());
switch(args.getType()) {
case SecStorageVmAlertEventArgs.SSVM_CREATED :
if(s_logger.isDebugEnabled())
s_logger.debug("New secondary storage vm created, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
secStorageVm.getPrivateIpAddress());
break;
case SecStorageVmAlertEventArgs.SSVM_UP :
if(s_logger.isDebugEnabled())
s_logger.debug("Secondary Storage Vm is up, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
secStorageVm.getPrivateIpAddress());
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_SSVM,
args.getZoneId(),
secStorageVm.getPodIdToDeployIn(),
"Secondary Storage Vm up in zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress()
+ ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()),
"Secondary Storage Vm up (zone " + dc.getName() + ")"
);
break;
case SecStorageVmAlertEventArgs.SSVM_DOWN :
if(s_logger.isDebugEnabled())
s_logger.debug("Secondary Storage Vm is down, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
(secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_SSVM,
args.getZoneId(),
secStorageVm.getPodIdToDeployIn(),
"Secondary Storage Vm down in zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress()
+ ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()),
"Secondary Storage Vm down (zone " + dc.getName() + ")"
);
break;
case SecStorageVmAlertEventArgs.SSVM_REBOOTED :
if(s_logger.isDebugEnabled())
s_logger.debug("Secondary Storage Vm is rebooted, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
(secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_SSVM,
args.getZoneId(),
secStorageVm.getPodIdToDeployIn(),
"Secondary Storage Vm rebooted in zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress()
+ ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()),
"Secondary Storage Vm rebooted (zone " + dc.getName() + ")"
);
break;
case SecStorageVmAlertEventArgs.SSVM_CREATE_FAILURE :
if(s_logger.isDebugEnabled())
s_logger.debug("Secondary Storage Vm creation failure, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
(secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_SSVM,
args.getZoneId(),
secStorageVm.getPodIdToDeployIn(),
"Secondary Storage Vm creation failure. zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress()
+ ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress())
+ ", error details: " + args.getMessage(),
"Secondary Storage Vm creation failure (zone " + dc.getName() + ")"
);
break;
case SecStorageVmAlertEventArgs.SSVM_START_FAILURE :
if(s_logger.isDebugEnabled())
s_logger.debug("Secondary Storage Vm startup failure, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
(secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_SSVM,
args.getZoneId(),
secStorageVm.getPodIdToDeployIn(),
"Secondary Storage Vm startup failure. zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress()
+ ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress())
+ ", error details: " + args.getMessage(),
"Secondary Storage Vm startup failure (zone " + dc.getName() + ")"
);
break;
case SecStorageVmAlertEventArgs.SSVM_FIREWALL_ALERT :
if(s_logger.isDebugEnabled())
s_logger.debug("Secondary Storage Vm firewall alert, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
(secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()));
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_SSVM,
args.getZoneId(),
secStorageVm.getPodIdToDeployIn(),
"Failed to open secondary storage vm firewall port. zone: " + dc.getName() + ", secStorageVm: " + secStorageVm.getHostName()
+ ", public IP: " + secStorageVm.getPublicIpAddress()
+ ", private IP: " + (secStorageVm.getPrivateIpAddress() == null ? "N/A" : secStorageVm.getPrivateIpAddress()),
"Secondary Storage Vm alert (zone " + dc.getName() + ")"
);
break;
case SecStorageVmAlertEventArgs.SSVM_STORAGE_ALERT :
if(s_logger.isDebugEnabled())
s_logger.debug("Secondary Storage Vm storage alert, zone: " + dc.getName() + ", secStorageVm: " +
secStorageVm.getHostName() + ", public IP: " + secStorageVm.getPublicIpAddress() + ", private IP: " +
secStorageVm.getPrivateIpAddress() + ", message: " + args.getMessage());
_alertMgr.sendAlert(
AlertManager.ALERT_TYPE_STORAGE_MISC,
args.getZoneId(),
secStorageVm.getPodIdToDeployIn(),
"Secondary Storage Vm storage issue. zone: " + dc.getName() + ", message: " + args.getMessage(),
"Secondary Storage Vm alert (zone " + dc.getName() + ")"
);
break;
}
}
@Override
public boolean configure(String name, Map<String, Object> params)
throws ConfigurationException {
if (s_logger.isInfoEnabled())
s_logger.info("Start configuring secondary storage vm alert manager : " + name);
try {
SubscriptionMgr.getInstance().subscribe(SecondaryStorageVmManager.ALERT_SUBJECT, this, "onSSVMAlert");
} catch (SecurityException e) {
throw new ConfigurationException("Unable to register secondary storage vm event subscription, exception: " + e);
} catch (NoSuchMethodException e) {
throw new ConfigurationException("Unable to register secondary storage vm event subscription, exception: " + e);
}
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
}

View File

@ -10,13 +10,13 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.alert.dao;
package com.cloud.alert.dao;
import com.cloud.alert.AlertVO;
import com.cloud.utils.db.GenericDao;
public interface AlertDao extends GenericDao<AlertVO, Long> {
public interface AlertDao extends GenericDao<AlertVO, Long> {
AlertVO getLastAlert(short type, long dataCenterId, Long podId, Long clusterId);
// This is for backward compatibility
AlertVO getLastAlert(short type, long dataCenterId, Long podId);
}
AlertVO getLastAlert(short type, long dataCenterId, Long podId);
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.alert.dao;
package com.cloud.alert.dao;
import java.util.List;
import javax.ejb.Local;
@ -20,28 +20,28 @@ import com.cloud.alert.AlertVO;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchCriteria;
@Local(value = { AlertDao.class })
public class AlertDaoImpl extends GenericDaoBase<AlertVO, Long> implements AlertDao {
@Override
public AlertVO getLastAlert(short type, long dataCenterId, Long podId, Long clusterId) {
Filter searchFilter = new Filter(AlertVO.class, "createdDate", Boolean.FALSE, Long.valueOf(0), Long.valueOf(1));
SearchCriteria<AlertVO> sc = createSearchCriteria();
sc.addAnd("type", SearchCriteria.Op.EQ, Short.valueOf(type));
sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, Long.valueOf(dataCenterId));
if (podId != null) {
sc.addAnd("podId", SearchCriteria.Op.EQ, podId);
@Local(value = { AlertDao.class })
public class AlertDaoImpl extends GenericDaoBase<AlertVO, Long> implements AlertDao {
@Override
public AlertVO getLastAlert(short type, long dataCenterId, Long podId, Long clusterId) {
Filter searchFilter = new Filter(AlertVO.class, "createdDate", Boolean.FALSE, Long.valueOf(0), Long.valueOf(1));
SearchCriteria<AlertVO> sc = createSearchCriteria();
sc.addAnd("type", SearchCriteria.Op.EQ, Short.valueOf(type));
sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, Long.valueOf(dataCenterId));
if (podId != null) {
sc.addAnd("podId", SearchCriteria.Op.EQ, podId);
}
if (clusterId != null) {
sc.addAnd("clusterId", SearchCriteria.Op.EQ, clusterId);
}
List<AlertVO> alerts = listBy(sc, searchFilter);
if ((alerts != null) && !alerts.isEmpty()) {
return alerts.get(0);
}
return null;
}
List<AlertVO> alerts = listBy(sc, searchFilter);
if ((alerts != null) && !alerts.isEmpty()) {
return alerts.get(0);
}
return null;
}
@Override
@ -60,5 +60,5 @@ public class AlertDaoImpl extends GenericDaoBase<AlertVO, Long> implements Alert
return alerts.get(0);
}
return null;
}
}
}
}

View File

@ -10,23 +10,23 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.api;
package com.cloud.api;
import com.google.gson.GsonBuilder;
import com.cloud.utils.IdentityProxy;
import java.util.Map;
public class ApiGsonHelper {
private static final GsonBuilder s_gBuilder;
static {
s_gBuilder = new GsonBuilder().setDateFormat("yyyy-MM-dd'T'HH:mm:ssZ");
s_gBuilder.setVersion(1.3);
public class ApiGsonHelper {
private static final GsonBuilder s_gBuilder;
static {
s_gBuilder = new GsonBuilder().setDateFormat("yyyy-MM-dd'T'HH:mm:ssZ");
s_gBuilder.setVersion(1.3);
s_gBuilder.registerTypeAdapter(ResponseObject.class, new ResponseObjectTypeAdapter());
s_gBuilder.registerTypeAdapter(IdentityProxy.class, new IdentityTypeAdapter());
s_gBuilder.registerTypeAdapter(Map.class, new StringMapTypeAdapter());
}
public static GsonBuilder getBuilder() {
return s_gBuilder;
}
}
}
public static GsonBuilder getBuilder() {
return s_gBuilder;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -10,68 +10,68 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.api;
import org.apache.log4j.Logger;
import com.google.gson.Gson;
public class ApiSerializerHelper {
public static final Logger s_logger = Logger.getLogger(ApiSerializerHelper.class.getName());
public static String token = "/";
public static String toSerializedStringOld(Object result) {
if (result != null) {
Class<?> clz = result.getClass();
Gson gson = ApiGsonHelper.getBuilder().create();
if (result instanceof ResponseObject) {
return clz.getName() + token + ((ResponseObject) result).getObjectName() + token + gson.toJson(result);
} else {
return clz.getName() + token + gson.toJson(result);
}
}
return null;
}
public static Object fromSerializedString(String result) {
try {
if (result != null && !result.isEmpty()) {
String[] serializedParts = result.split(token);
if (serializedParts.length < 2) {
return null;
}
String clzName = serializedParts[0];
String nameField = null;
String content = null;
if (serializedParts.length == 2) {
content = serializedParts[1];
} else {
nameField = serializedParts[1];
int index = result.indexOf(token + nameField + token);
content = result.substring(index + nameField.length() + 2);
}
Class<?> clz;
try {
clz = Class.forName(clzName);
} catch (ClassNotFoundException e) {
return null;
}
Gson gson = ApiGsonHelper.getBuilder().create();
Object obj = gson.fromJson(content, clz);
if (nameField != null) {
((ResponseObject) obj).setObjectName(nameField);
}
return obj;
}
return null;
} catch (RuntimeException e) {
s_logger.error("Caught runtime exception when doing GSON deserialization on: " + result);
throw e;
}
}
}
package com.cloud.api;
import org.apache.log4j.Logger;
import com.google.gson.Gson;
public class ApiSerializerHelper {
public static final Logger s_logger = Logger.getLogger(ApiSerializerHelper.class.getName());
public static String token = "/";
public static String toSerializedStringOld(Object result) {
if (result != null) {
Class<?> clz = result.getClass();
Gson gson = ApiGsonHelper.getBuilder().create();
if (result instanceof ResponseObject) {
return clz.getName() + token + ((ResponseObject) result).getObjectName() + token + gson.toJson(result);
} else {
return clz.getName() + token + gson.toJson(result);
}
}
return null;
}
public static Object fromSerializedString(String result) {
try {
if (result != null && !result.isEmpty()) {
String[] serializedParts = result.split(token);
if (serializedParts.length < 2) {
return null;
}
String clzName = serializedParts[0];
String nameField = null;
String content = null;
if (serializedParts.length == 2) {
content = serializedParts[1];
} else {
nameField = serializedParts[1];
int index = result.indexOf(token + nameField + token);
content = result.substring(index + nameField.length() + 2);
}
Class<?> clz;
try {
clz = Class.forName(clzName);
} catch (ClassNotFoundException e) {
return null;
}
Gson gson = ApiGsonHelper.getBuilder().create();
Object obj = gson.fromJson(content, clz);
if (nameField != null) {
((ResponseObject) obj).setObjectName(nameField);
}
return obj;
}
return null;
} catch (RuntimeException e) {
s_logger.error("Caught runtime exception when doing GSON deserialization on: " + result);
throw e;
}
}
}

View File

@ -10,68 +10,68 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.api;
import java.lang.reflect.Type;
import java.util.ArrayList;
import com.cloud.uuididentity.dao.IdentityDao;
import com.cloud.uuididentity.dao.IdentityDaoImpl;
import com.google.gson.Gson;
import com.google.gson.JsonDeserializationContext;
import com.google.gson.JsonDeserializer;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParseException;
import com.google.gson.JsonPrimitive;
import com.google.gson.JsonSerializationContext;
import com.google.gson.JsonSerializer;
import com.cloud.utils.IdentityProxy;
public class IdentityTypeAdapter implements JsonSerializer<IdentityProxy>, JsonDeserializer<IdentityProxy> {
@Override
public JsonElement serialize(IdentityProxy src, Type srcType, JsonSerializationContext context) {
if(SerializationContext.current().getUuidTranslation()) {
assert(src != null);
if(src.getValue() == null)
return context.serialize(null);
IdentityDao identityDao = new IdentityDaoImpl();
if(src.getTableName() != null) {
String uuid = identityDao.getIdentityUuid(src.getTableName(), String.valueOf(src.getValue()));
if(uuid == null)
return context.serialize(null);
// Exceptions set the _idFieldName in the IdentityProxy structure. So if this field is not
// null, prepare a structure of uuid and idFieldName and return the json representation of that.
String idName = src.getidFieldName();
if (idName != null) {
// Prepare a structure.
JsonObject jsonObj = new JsonObject();
jsonObj.add("uuid", new JsonPrimitive(uuid));
jsonObj.add("uuidProperty", new JsonPrimitive(idName));
return jsonObj;
}
return new JsonPrimitive(uuid);
} else {
return new JsonPrimitive(String.valueOf(src.getValue()));
}
} else {
return new Gson().toJsonTree(src);
}
}
@Override
public IdentityProxy deserialize(JsonElement src, Type srcType,
JsonDeserializationContext context) throws JsonParseException {
IdentityProxy obj = new IdentityProxy();
JsonObject json = src.getAsJsonObject();
obj.setTableName(json.get("_tableName").getAsString());
if(json.get("_value") != null)
obj.setValue(json.get("_value").getAsLong());
return obj;
}
}
package com.cloud.api;
import java.lang.reflect.Type;
import java.util.ArrayList;
import com.cloud.uuididentity.dao.IdentityDao;
import com.cloud.uuididentity.dao.IdentityDaoImpl;
import com.google.gson.Gson;
import com.google.gson.JsonDeserializationContext;
import com.google.gson.JsonDeserializer;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParseException;
import com.google.gson.JsonPrimitive;
import com.google.gson.JsonSerializationContext;
import com.google.gson.JsonSerializer;
import com.cloud.utils.IdentityProxy;
public class IdentityTypeAdapter implements JsonSerializer<IdentityProxy>, JsonDeserializer<IdentityProxy> {
@Override
public JsonElement serialize(IdentityProxy src, Type srcType, JsonSerializationContext context) {
if(SerializationContext.current().getUuidTranslation()) {
assert(src != null);
if(src.getValue() == null)
return context.serialize(null);
IdentityDao identityDao = new IdentityDaoImpl();
if(src.getTableName() != null) {
String uuid = identityDao.getIdentityUuid(src.getTableName(), String.valueOf(src.getValue()));
if(uuid == null)
return context.serialize(null);
// Exceptions set the _idFieldName in the IdentityProxy structure. So if this field is not
// null, prepare a structure of uuid and idFieldName and return the json representation of that.
String idName = src.getidFieldName();
if (idName != null) {
// Prepare a structure.
JsonObject jsonObj = new JsonObject();
jsonObj.add("uuid", new JsonPrimitive(uuid));
jsonObj.add("uuidProperty", new JsonPrimitive(idName));
return jsonObj;
}
return new JsonPrimitive(uuid);
} else {
return new JsonPrimitive(String.valueOf(src.getValue()));
}
} else {
return new Gson().toJsonTree(src);
}
}
@Override
public IdentityProxy deserialize(JsonElement src, Type srcType,
JsonDeserializationContext context) throws JsonParseException {
IdentityProxy obj = new IdentityProxy();
JsonObject json = src.getAsJsonObject();
obj.setTableName(json.get("_tableName").getAsString());
if(json.get("_value") != null)
obj.setValue(json.get("_value").getAsLong());
return obj;
}
}

View File

@ -10,30 +10,30 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.api;
public class SerializationContext {
private static ThreadLocal<SerializationContext> s_currentContext = new ThreadLocal<SerializationContext>();
private boolean _doUuidTranslation = false;
public SerializationContext() {
}
public static SerializationContext current() {
SerializationContext context = s_currentContext.get();
if(context == null) {
context = new SerializationContext();
s_currentContext.set(context);
}
return context;
}
public boolean getUuidTranslation() {
return _doUuidTranslation;
}
public void setUuidTranslation(boolean value) {
_doUuidTranslation = value;
}
}
package com.cloud.api;
public class SerializationContext {
private static ThreadLocal<SerializationContext> s_currentContext = new ThreadLocal<SerializationContext>();
private boolean _doUuidTranslation = false;
public SerializationContext() {
}
public static SerializationContext current() {
SerializationContext context = s_currentContext.get();
if(context == null) {
context = new SerializationContext();
s_currentContext.set(context);
}
return context;
}
public boolean getUuidTranslation() {
return _doUuidTranslation;
}
public void setUuidTranslation(boolean value) {
_doUuidTranslation = value;
}
}

View File

@ -10,81 +10,81 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.api.commands;
import java.util.Date;
import org.apache.log4j.Logger;
import com.cloud.api.ApiConstants;
import com.cloud.api.BaseCmd;
package com.cloud.api.commands;
import java.util.Date;
import org.apache.log4j.Logger;
import com.cloud.api.ApiConstants;
import com.cloud.api.BaseCmd;
import com.cloud.api.IdentityMapper;
import com.cloud.api.Implementation;
import com.cloud.api.Parameter;
import com.cloud.api.ServerApiException;
import com.cloud.api.response.SuccessResponse;
import com.cloud.server.ManagementServerExt;
import com.cloud.user.Account;
@Implementation(description="Generates usage records. This will generate records only if there any records to be generated, i.e if the scheduled usage job was not run or failed", responseObject=SuccessResponse.class)
public class GenerateUsageRecordsCmd extends BaseCmd {
public static final Logger s_logger = Logger.getLogger(GenerateUsageRecordsCmd.class.getName());
private static final String s_name = "generateusagerecordsresponse";
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
import com.cloud.api.Implementation;
import com.cloud.api.Parameter;
import com.cloud.api.ServerApiException;
import com.cloud.api.response.SuccessResponse;
import com.cloud.server.ManagementServerExt;
import com.cloud.user.Account;
@Implementation(description="Generates usage records. This will generate records only if there any records to be generated, i.e if the scheduled usage job was not run or failed", responseObject=SuccessResponse.class)
public class GenerateUsageRecordsCmd extends BaseCmd {
public static final Logger s_logger = Logger.getLogger(GenerateUsageRecordsCmd.class.getName());
private static final String s_name = "generateusagerecordsresponse";
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@IdentityMapper(entityTableName="domain")
@Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.LONG, description="List events for the specified domain.")
private Long domainId;
@Parameter(name=ApiConstants.END_DATE, type=CommandType.DATE, required=true, description="End date range for usage record query. Use yyyy-MM-dd as the date format, e.g. startDate=2009-06-03.")
private Date endDate;
@Parameter(name=ApiConstants.START_DATE, type=CommandType.DATE, required=true, description="Start date range for usage record query. Use yyyy-MM-dd as the date format, e.g. startDate=2009-06-01.")
private Date startDate;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getDomainId() {
return domainId;
}
public Date getEndDate() {
return endDate;
}
public Date getStartDate() {
return startDate;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public String getCommandName() {
return s_name;
}
@Override
public long getEntityOwnerId() {
return Account.ACCOUNT_ID_SYSTEM;
}
@Override
public void execute(){
ManagementServerExt _mgrExt = (ManagementServerExt)_mgr;
boolean result = _mgrExt.generateUsageRecords(this);
if (result) {
SuccessResponse response = new SuccessResponse(getCommandName());
this.setResponseObject(response);
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to generate usage records");
}
}
}
@Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.LONG, description="List events for the specified domain.")
private Long domainId;
@Parameter(name=ApiConstants.END_DATE, type=CommandType.DATE, required=true, description="End date range for usage record query. Use yyyy-MM-dd as the date format, e.g. startDate=2009-06-03.")
private Date endDate;
@Parameter(name=ApiConstants.START_DATE, type=CommandType.DATE, required=true, description="Start date range for usage record query. Use yyyy-MM-dd as the date format, e.g. startDate=2009-06-01.")
private Date startDate;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getDomainId() {
return domainId;
}
public Date getEndDate() {
return endDate;
}
public Date getStartDate() {
return startDate;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public String getCommandName() {
return s_name;
}
@Override
public long getEntityOwnerId() {
return Account.ACCOUNT_ID_SYSTEM;
}
@Override
public void execute(){
ManagementServerExt _mgrExt = (ManagementServerExt)_mgr;
boolean result = _mgrExt.generateUsageRecords(this);
if (result) {
SuccessResponse response = new SuccessResponse(getCommandName());
this.setResponseObject(response);
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to generate usage records");
}
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.api.commands;
package com.cloud.api.commands;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Calendar;
@ -36,32 +36,32 @@ import com.cloud.usage.UsageVO;
import com.cloud.user.Account;
import com.cloud.uuididentity.dao.IdentityDao;
import com.cloud.uuididentity.dao.IdentityDaoImpl;
@Implementation(description="Lists usage records for accounts", responseObject=UsageRecordResponse.class)
public class GetUsageRecordsCmd extends BaseListCmd {
public static final Logger s_logger = Logger.getLogger(GetUsageRecordsCmd.class.getName());
@Implementation(description="Lists usage records for accounts", responseObject=UsageRecordResponse.class)
public class GetUsageRecordsCmd extends BaseListCmd {
public static final Logger s_logger = Logger.getLogger(GetUsageRecordsCmd.class.getName());
private static final String s_name = "listusagerecordsresponse";
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name=ApiConstants.ACCOUNT, type=CommandType.STRING, description="List usage records for the specified user.")
private String accountName;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name=ApiConstants.ACCOUNT, type=CommandType.STRING, description="List usage records for the specified user.")
private String accountName;
@IdentityMapper(entityTableName="domain")
@Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.LONG, description="List usage records for the specified domain.")
private Long domainId;
@Parameter(name=ApiConstants.END_DATE, type=CommandType.DATE, required=true, description="End date range for usage record query. Use yyyy-MM-dd as the date format, e.g. startDate=2009-06-03.")
private Date endDate;
@Parameter(name=ApiConstants.START_DATE, type=CommandType.DATE, required=true, description="Start date range for usage record query. Use yyyy-MM-dd as the date format, e.g. startDate=2009-06-01.")
private Date startDate;
@Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.LONG, description="List usage records for the specified domain.")
private Long domainId;
@Parameter(name=ApiConstants.END_DATE, type=CommandType.DATE, required=true, description="End date range for usage record query. Use yyyy-MM-dd as the date format, e.g. startDate=2009-06-03.")
private Date endDate;
@Parameter(name=ApiConstants.START_DATE, type=CommandType.DATE, required=true, description="Start date range for usage record query. Use yyyy-MM-dd as the date format, e.g. startDate=2009-06-01.")
private Date startDate;
@IdentityMapper(entityTableName="account")
@Parameter(name=ApiConstants.ACCOUNT_ID, type=CommandType.LONG, description="List usage records for the specified account")
@Parameter(name=ApiConstants.ACCOUNT_ID, type=CommandType.LONG, description="List usage records for the specified account")
private Long accountId;
@IdentityMapper(entityTableName="projects")
@ -69,30 +69,30 @@ public class GetUsageRecordsCmd extends BaseListCmd {
private Long projectId;
@Parameter(name=ApiConstants.TYPE, type=CommandType.LONG, description="List usage records for the specified usage type")
private Long usageType;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getAccountName() {
return accountName;
}
public Long getDomainId() {
return domainId;
}
public Date getEndDate() {
return endDate;
}
private Long usageType;
public Date getStartDate() {
return startDate;
}
public Long getAccountId() {
return accountId;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getAccountName() {
return accountName;
}
public Long getDomainId() {
return domainId;
}
public Date getEndDate() {
return endDate;
}
public Date getStartDate() {
return startDate;
}
public Long getAccountId() {
return accountId;
}
public Long getUsageType() {
@ -102,125 +102,125 @@ public class GetUsageRecordsCmd extends BaseListCmd {
public Long getProjectId() {
return projectId;
}
/////////////////////////////////////////////////////
/////////////// Misc parameters ///////////////////
/////////////////////////////////////////////////////
/////////////////////////////////////////////////////
/////////////// Misc parameters ///////////////////
/////////////////////////////////////////////////////
private TimeZone usageTimezone;
public TimeZone getUsageTimezone() {
return usageTimezone;
}
public void setUsageTimezone(TimeZone tz) {
this.usageTimezone = tz;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public String getCommandName() {
return s_name;
}
public String getDateStringInternal(Date inputDate) {
if (inputDate == null) return null;
TimeZone tz = getUsageTimezone();
Calendar cal = Calendar.getInstance(tz);
cal.setTime(inputDate);
StringBuffer sb = new StringBuffer();
sb.append(cal.get(Calendar.YEAR)+"-");
int month = cal.get(Calendar.MONTH) + 1;
if (month < 10) {
sb.append("0" + month + "-");
} else {
sb.append(month+"-");
}
int day = cal.get(Calendar.DAY_OF_MONTH);
if (day < 10) {
sb.append("0" + day);
} else {
sb.append(""+day);
}
sb.append("'T'");
int hour = cal.get(Calendar.HOUR_OF_DAY);
if (hour < 10) {
sb.append("0" + hour + ":");
} else {
sb.append(hour+":");
}
int minute = cal.get(Calendar.MINUTE);
if (minute < 10) {
sb.append("0" + minute + ":");
} else {
sb.append(minute+":");
}
int seconds = cal.get(Calendar.SECOND);
if (seconds < 10) {
sb.append("0" + seconds);
} else {
sb.append(""+seconds);
}
double offset = cal.get(Calendar.ZONE_OFFSET);
if (tz.inDaylightTime(inputDate)) {
offset += (1.0*tz.getDSTSavings()); // add the timezone's DST value (typically 1 hour expressed in milliseconds)
}
offset = offset / (1000d*60d*60d);
int hourOffset = (int)offset;
double decimalVal = Math.abs(offset) - Math.abs(hourOffset);
int minuteOffset = (int)(decimalVal * 60);
if (hourOffset < 0) {
if (hourOffset > -10) {
sb.append("-0"+Math.abs(hourOffset));
} else {
sb.append("-"+Math.abs(hourOffset));
}
} else {
if (hourOffset < 10) {
sb.append("+0" + hourOffset);
} else {
sb.append("+" + hourOffset);
}
}
sb.append(":");
if (minuteOffset == 0) {
sb.append("00");
} else if (minuteOffset < 10) {
sb.append("0" + minuteOffset);
} else {
sb.append("" + minuteOffset);
}
return sb.toString();
}
@Override
public void execute(){
ManagementServerExt _mgrExt = (ManagementServerExt)_mgr;
public TimeZone getUsageTimezone() {
return usageTimezone;
}
public void setUsageTimezone(TimeZone tz) {
this.usageTimezone = tz;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public String getCommandName() {
return s_name;
}
public String getDateStringInternal(Date inputDate) {
if (inputDate == null) return null;
TimeZone tz = getUsageTimezone();
Calendar cal = Calendar.getInstance(tz);
cal.setTime(inputDate);
StringBuffer sb = new StringBuffer();
sb.append(cal.get(Calendar.YEAR)+"-");
int month = cal.get(Calendar.MONTH) + 1;
if (month < 10) {
sb.append("0" + month + "-");
} else {
sb.append(month+"-");
}
int day = cal.get(Calendar.DAY_OF_MONTH);
if (day < 10) {
sb.append("0" + day);
} else {
sb.append(""+day);
}
sb.append("'T'");
int hour = cal.get(Calendar.HOUR_OF_DAY);
if (hour < 10) {
sb.append("0" + hour + ":");
} else {
sb.append(hour+":");
}
int minute = cal.get(Calendar.MINUTE);
if (minute < 10) {
sb.append("0" + minute + ":");
} else {
sb.append(minute+":");
}
int seconds = cal.get(Calendar.SECOND);
if (seconds < 10) {
sb.append("0" + seconds);
} else {
sb.append(""+seconds);
}
double offset = cal.get(Calendar.ZONE_OFFSET);
if (tz.inDaylightTime(inputDate)) {
offset += (1.0*tz.getDSTSavings()); // add the timezone's DST value (typically 1 hour expressed in milliseconds)
}
offset = offset / (1000d*60d*60d);
int hourOffset = (int)offset;
double decimalVal = Math.abs(offset) - Math.abs(hourOffset);
int minuteOffset = (int)(decimalVal * 60);
if (hourOffset < 0) {
if (hourOffset > -10) {
sb.append("-0"+Math.abs(hourOffset));
} else {
sb.append("-"+Math.abs(hourOffset));
}
} else {
if (hourOffset < 10) {
sb.append("+0" + hourOffset);
} else {
sb.append("+" + hourOffset);
}
}
sb.append(":");
if (minuteOffset == 0) {
sb.append("00");
} else if (minuteOffset < 10) {
sb.append("0" + minuteOffset);
} else {
sb.append("" + minuteOffset);
}
return sb.toString();
}
@Override
public void execute(){
ManagementServerExt _mgrExt = (ManagementServerExt)_mgr;
List<UsageVO> usageRecords = _mgrExt.getUsageRecords(this);
IdentityDao identityDao = new IdentityDaoImpl();
ListResponse<UsageRecordResponse> response = new ListResponse<UsageRecordResponse>();
List<UsageRecordResponse> usageResponses = new ArrayList<UsageRecordResponse>();
for (Object usageRecordGeneric : usageRecords) {
UsageRecordResponse usageRecResponse = new UsageRecordResponse();
if (usageRecordGeneric instanceof UsageVO) {
UsageVO usageRecord = (UsageVO)usageRecordGeneric;
IdentityDao identityDao = new IdentityDaoImpl();
ListResponse<UsageRecordResponse> response = new ListResponse<UsageRecordResponse>();
List<UsageRecordResponse> usageResponses = new ArrayList<UsageRecordResponse>();
for (Object usageRecordGeneric : usageRecords) {
UsageRecordResponse usageRecResponse = new UsageRecordResponse();
if (usageRecordGeneric instanceof UsageVO) {
UsageVO usageRecord = (UsageVO)usageRecordGeneric;
Account account = ApiDBUtils.findAccountByIdIncludingRemoved(usageRecord.getAccountId());
if (account.getType() == Account.ACCOUNT_TYPE_PROJECT) {
@ -234,12 +234,12 @@ public class GetUsageRecordsCmd extends BaseListCmd {
}
usageRecResponse.setDomainId(usageRecord.getDomainId());
usageRecResponse.setZoneId(usageRecord.getZoneId());
usageRecResponse.setDescription(usageRecord.getDescription());
usageRecResponse.setUsage(usageRecord.getUsageDisplay());
usageRecResponse.setUsageType(usageRecord.getUsageType());
usageRecResponse.setVirtualMachineId(usageRecord.getVmInstanceId());
usageRecResponse.setZoneId(usageRecord.getZoneId());
usageRecResponse.setDescription(usageRecord.getDescription());
usageRecResponse.setUsage(usageRecord.getUsageDisplay());
usageRecResponse.setUsageType(usageRecord.getUsageType());
usageRecResponse.setVirtualMachineId(usageRecord.getVmInstanceId());
usageRecResponse.setVmName(usageRecord.getVmName());
usageRecResponse.setTemplateId(usageRecord.getTemplateId());
@ -317,25 +317,25 @@ public class GetUsageRecordsCmd extends BaseListCmd {
usageRecResponse.setUsageId(identityDao.getIdentityUuid("security_group", usageRecord.getUsageId().toString()));
}
if (usageRecord.getRawUsage() != null) {
DecimalFormat decimalFormat = new DecimalFormat("###########.######");
usageRecResponse.setRawUsage(decimalFormat.format(usageRecord.getRawUsage()));
}
if (usageRecord.getStartDate() != null) {
usageRecResponse.setStartDate(getDateStringInternal(usageRecord.getStartDate()));
}
if (usageRecord.getEndDate() != null) {
usageRecResponse.setEndDate(getDateStringInternal(usageRecord.getEndDate()));
}
}
usageRecResponse.setObjectName("usagerecord");
usageResponses.add(usageRecResponse);
}
response.setResponses(usageResponses);
response.setResponseName(getCommandName());
this.setResponseObject(response);
}
}
if (usageRecord.getRawUsage() != null) {
DecimalFormat decimalFormat = new DecimalFormat("###########.######");
usageRecResponse.setRawUsage(decimalFormat.format(usageRecord.getRawUsage()));
}
if (usageRecord.getStartDate() != null) {
usageRecResponse.setStartDate(getDateStringInternal(usageRecord.getStartDate()));
}
if (usageRecord.getEndDate() != null) {
usageRecResponse.setEndDate(getDateStringInternal(usageRecord.getEndDate()));
}
}
usageRecResponse.setObjectName("usagerecord");
usageResponses.add(usageRecResponse);
}
response.setResponses(usageResponses);
response.setResponseName(getCommandName());
this.setResponseObject(response);
}
}

View File

@ -10,58 +10,58 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.api.response;
package com.cloud.api.response;
import com.cloud.network.security.SecurityRule.SecurityRuleType;
import com.cloud.serializer.Param;
public class SecurityGroupRuleResultObject {
@Param(name="id")
private Long id;
@Param(name="startport")
private int startPort;
@Param(name="endport")
private int endPort;
@Param(name="protocol")
private String protocol;
@Param(name="securitygroup")
private String allowedSecurityGroup = null;
@Param(name="account")
private String allowedSecGroupAcct = null;
@Param(name="cidr")
import com.cloud.serializer.Param;
public class SecurityGroupRuleResultObject {
@Param(name="id")
private Long id;
@Param(name="startport")
private int startPort;
@Param(name="endport")
private int endPort;
@Param(name="protocol")
private String protocol;
@Param(name="securitygroup")
private String allowedSecurityGroup = null;
@Param(name="account")
private String allowedSecGroupAcct = null;
@Param(name="cidr")
private String allowedSourceIpCidr = null;
private SecurityRuleType type;
public SecurityGroupRuleResultObject() { }
public SecurityGroupRuleResultObject(Long id, int startPort, int endPort, String protocol, String allowedSecurityGroup, String allowedSecGroupAcct, String allowedSourceIpCidr) {
this.id = id;
this.startPort = startPort;
this.endPort = endPort;
this.protocol = protocol;
this.allowedSecurityGroup = allowedSecurityGroup;
this.allowedSecGroupAcct = allowedSecGroupAcct;
this.allowedSourceIpCidr = allowedSourceIpCidr;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public int getStartPort() {
return startPort;
}
private SecurityRuleType type;
public SecurityGroupRuleResultObject() { }
public SecurityGroupRuleResultObject(Long id, int startPort, int endPort, String protocol, String allowedSecurityGroup, String allowedSecGroupAcct, String allowedSourceIpCidr) {
this.id = id;
this.startPort = startPort;
this.endPort = endPort;
this.protocol = protocol;
this.allowedSecurityGroup = allowedSecurityGroup;
this.allowedSecGroupAcct = allowedSecGroupAcct;
this.allowedSourceIpCidr = allowedSourceIpCidr;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public int getStartPort() {
return startPort;
}
public void setRuleType(SecurityRuleType type) {
this.type = type;
@ -71,48 +71,48 @@ public class SecurityGroupRuleResultObject {
public SecurityRuleType getRuleType() {
return type;
}
public void setStartPort(int startPort) {
this.startPort = startPort;
}
public int getEndPort() {
return endPort;
}
public void setEndPort(int endPort) {
this.endPort = endPort;
}
public String getProtocol() {
return protocol;
}
public void setProtocol(String protocol) {
this.protocol = protocol;
}
public String getAllowedSecurityGroup() {
return allowedSecurityGroup;
}
public void setAllowedSecurityGroup(String allowedSecurityGroup) {
this.allowedSecurityGroup = allowedSecurityGroup;
}
public String getAllowedSecGroupAcct() {
return allowedSecGroupAcct;
}
public void setAllowedSecGroupAcct(String allowedSecGroupAcct) {
this.allowedSecGroupAcct = allowedSecGroupAcct;
}
public String getAllowedSourceIpCidr() {
return allowedSourceIpCidr;
}
public void setAllowedSourceIpCidr(String allowedSourceIpCidr) {
this.allowedSourceIpCidr = allowedSourceIpCidr;
}
}
public void setStartPort(int startPort) {
this.startPort = startPort;
}
public int getEndPort() {
return endPort;
}
public void setEndPort(int endPort) {
this.endPort = endPort;
}
public String getProtocol() {
return protocol;
}
public void setProtocol(String protocol) {
this.protocol = protocol;
}
public String getAllowedSecurityGroup() {
return allowedSecurityGroup;
}
public void setAllowedSecurityGroup(String allowedSecurityGroup) {
this.allowedSecurityGroup = allowedSecurityGroup;
}
public String getAllowedSecGroupAcct() {
return allowedSecGroupAcct;
}
public void setAllowedSecGroupAcct(String allowedSecGroupAcct) {
this.allowedSecGroupAcct = allowedSecGroupAcct;
}
public String getAllowedSourceIpCidr() {
return allowedSourceIpCidr;
}
public void setAllowedSourceIpCidr(String allowedSourceIpCidr) {
this.allowedSourceIpCidr = allowedSourceIpCidr;
}
}

View File

@ -10,26 +10,26 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async;
package com.cloud.async;
public interface AsyncJobExecutor {
public AsyncJobManager getAsyncJobMgr();
public void setAsyncJobMgr(AsyncJobManager asyncMgr);
public SyncQueueItemVO getSyncSource();
public void setSyncSource(SyncQueueItemVO syncSource);
public AsyncJobVO getJob();
public void setJob(AsyncJobVO job);
public void setFromPreviousSession(boolean value);
public boolean isFromPreviousSession();
/**
*
* otherwise return false and once the executor finally has completed with the sync source,
* it needs to call AsyncJobManager.releaseSyncSource
*
* if executor does not have a sync source, always return true
*/
public boolean execute();
}
public interface AsyncJobExecutor {
public AsyncJobManager getAsyncJobMgr();
public void setAsyncJobMgr(AsyncJobManager asyncMgr);
public SyncQueueItemVO getSyncSource();
public void setSyncSource(SyncQueueItemVO syncSource);
public AsyncJobVO getJob();
public void setJob(AsyncJobVO job);
public void setFromPreviousSession(boolean value);
public boolean isFromPreviousSession();
/**
*
* otherwise return false and once the executor finally has completed with the sync source,
* it needs to call AsyncJobManager.releaseSyncSource
*
* if executor does not have a sync source, always return true
*/
public boolean execute();
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async;
package com.cloud.async;
import com.cloud.agent.AgentManager;
import com.cloud.async.dao.AsyncJobDao;
import com.cloud.event.dao.EventDao;
@ -29,22 +29,22 @@ import com.cloud.vm.UserVmManager;
import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.dao.DomainRouterDao;
import com.cloud.vm.dao.UserVmDao;
public interface AsyncJobExecutorContext extends Manager {
public ManagementServer getManagementServer();
public AgentManager getAgentMgr();
public NetworkManager getNetworkMgr();
public UserVmManager getVmMgr();
public SnapshotManager getSnapshotMgr();
public interface AsyncJobExecutorContext extends Manager {
public ManagementServer getManagementServer();
public AgentManager getAgentMgr();
public NetworkManager getNetworkMgr();
public UserVmManager getVmMgr();
public SnapshotManager getSnapshotMgr();
public AccountManager getAccountMgr();
public StorageManager getStorageMgr();
public EventDao getEventDao();
public UserVmDao getVmDao();
public AccountDao getAccountDao();
public VolumeDao getVolumeDao();
public DomainRouterDao getRouterDao();
public StorageManager getStorageMgr();
public EventDao getEventDao();
public UserVmDao getVmDao();
public AccountDao getAccountDao();
public VolumeDao getVolumeDao();
public DomainRouterDao getRouterDao();
public IPAddressDao getIpAddressDao();
public AsyncJobDao getJobDao();
public UserDao getUserDao();
public VirtualMachineManager getItMgr();
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async;
package com.cloud.async;
import java.util.Map;
import javax.ejb.Local;
@ -34,97 +34,97 @@ import com.cloud.vm.UserVmManager;
import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.dao.DomainRouterDao;
import com.cloud.vm.dao.UserVmDao;
@Local(value={AsyncJobExecutorContext.class})
public class AsyncJobExecutorContextImpl implements AsyncJobExecutorContext {
private String _name;
private AgentManager _agentMgr;
private NetworkManager _networkMgr;
private UserVmManager _vmMgr;
private SnapshotManager _snapMgr;
@Local(value={AsyncJobExecutorContext.class})
public class AsyncJobExecutorContextImpl implements AsyncJobExecutorContext {
private String _name;
private AgentManager _agentMgr;
private NetworkManager _networkMgr;
private UserVmManager _vmMgr;
private SnapshotManager _snapMgr;
private AccountManager _accountMgr;
private StorageManager _storageMgr;
private EventDao _eventDao;
private UserVmDao _vmDao;
private AccountDao _accountDao;
private VolumeDao _volumeDao;
private DomainRouterDao _routerDao;
private StorageManager _storageMgr;
private EventDao _eventDao;
private UserVmDao _vmDao;
private AccountDao _accountDao;
private VolumeDao _volumeDao;
private DomainRouterDao _routerDao;
private IPAddressDao _ipAddressDao;
private AsyncJobDao _jobDao;
private UserDao _userDao;
private VirtualMachineManager _itMgr;
private ManagementServer _managementServer;
@Override
public ManagementServer getManagementServer() {
return _managementServer;
}
@Override
public AgentManager getAgentMgr() {
return _agentMgr;
}
@Override
public NetworkManager getNetworkMgr() {
return _networkMgr;
}
@Override
public UserVmManager getVmMgr() {
return _vmMgr;
private VirtualMachineManager _itMgr;
private ManagementServer _managementServer;
@Override
public ManagementServer getManagementServer() {
return _managementServer;
}
@Override
public AgentManager getAgentMgr() {
return _agentMgr;
}
@Override
public NetworkManager getNetworkMgr() {
return _networkMgr;
}
@Override
public UserVmManager getVmMgr() {
return _vmMgr;
}
@Override
public StorageManager getStorageMgr() {
return _storageMgr;
}
/**server/src/com/cloud/async/AsyncJobExecutorContext.java
* @return the _snapMgr
*/
@Override
public SnapshotManager getSnapshotMgr() {
return _snapMgr;
}
@Override
public AccountManager getAccountMgr() {
return _accountMgr;
}
@Override
public EventDao getEventDao() {
return _eventDao;
}
@Override
public UserVmDao getVmDao() {
return _vmDao;
}
@Override
public AccountDao getAccountDao() {
return _accountDao;
}
@Override
public VolumeDao getVolumeDao() {
return _volumeDao;
}
@Override
public DomainRouterDao getRouterDao() {
return _routerDao;
}
@Override
public IPAddressDao getIpAddressDao() {
return _ipAddressDao;
}
/**server/src/com/cloud/async/AsyncJobExecutorContext.java
* @return the _snapMgr
*/
@Override
public SnapshotManager getSnapshotMgr() {
return _snapMgr;
}
@Override
public AccountManager getAccountMgr() {
return _accountMgr;
}
@Override
public EventDao getEventDao() {
return _eventDao;
}
@Override
public UserVmDao getVmDao() {
return _vmDao;
}
@Override
public AccountDao getAccountDao() {
return _accountDao;
}
@Override
public VolumeDao getVolumeDao() {
return _volumeDao;
}
@Override
public DomainRouterDao getRouterDao() {
return _routerDao;
}
@Override
public IPAddressDao getIpAddressDao() {
return _ipAddressDao;
}
@Override
public AsyncJobDao getJobDao() {
return _jobDao;
@ -139,75 +139,75 @@ public class AsyncJobExecutorContextImpl implements AsyncJobExecutorContext {
public VirtualMachineManager getItMgr() {
return _itMgr;
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
ComponentLocator locator = ComponentLocator.getCurrentLocator();
_managementServer = (ManagementServer)ComponentLocator.getComponent("management-server");
if (_managementServer == null) {
throw new ConfigurationException("unable to get " + ManagementServer.class.getName());
}
_agentMgr = locator.getManager(AgentManager.class);
if (_agentMgr == null) {
throw new ConfigurationException("unable to get " + AgentManager.class.getName());
}
_networkMgr = locator.getManager(NetworkManager.class);
if (_networkMgr == null) {
throw new ConfigurationException("unable to get " + NetworkManager.class.getName());
}
_vmMgr = locator.getManager(UserVmManager.class);
if (_vmMgr == null) {
throw new ConfigurationException("unable to get " + UserVmManager.class.getName());
}
_snapMgr = locator.getManager(SnapshotManager.class);
if (_snapMgr == null) {
throw new ConfigurationException("unable to get " + SnapshotManager.class.getName());
}
_accountMgr = locator.getManager(AccountManager.class);
if (_accountMgr == null) {
throw new ConfigurationException("unable to get " + AccountManager.class.getName());
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
ComponentLocator locator = ComponentLocator.getCurrentLocator();
_managementServer = (ManagementServer)ComponentLocator.getComponent("management-server");
if (_managementServer == null) {
throw new ConfigurationException("unable to get " + ManagementServer.class.getName());
}
_agentMgr = locator.getManager(AgentManager.class);
if (_agentMgr == null) {
throw new ConfigurationException("unable to get " + AgentManager.class.getName());
}
_networkMgr = locator.getManager(NetworkManager.class);
if (_networkMgr == null) {
throw new ConfigurationException("unable to get " + NetworkManager.class.getName());
}
_vmMgr = locator.getManager(UserVmManager.class);
if (_vmMgr == null) {
throw new ConfigurationException("unable to get " + UserVmManager.class.getName());
}
_snapMgr = locator.getManager(SnapshotManager.class);
if (_snapMgr == null) {
throw new ConfigurationException("unable to get " + SnapshotManager.class.getName());
}
_accountMgr = locator.getManager(AccountManager.class);
if (_accountMgr == null) {
throw new ConfigurationException("unable to get " + AccountManager.class.getName());
}
_storageMgr = locator.getManager(StorageManager.class);
if (_storageMgr == null) {
throw new ConfigurationException("unable to get " + StorageManager.class.getName());
}
_eventDao = locator.getDao(EventDao.class);
if (_eventDao == null) {
throw new ConfigurationException("unable to get " + EventDao.class.getName());
}
_vmDao = locator.getDao(UserVmDao.class);
if (_vmDao == null) {
throw new ConfigurationException("unable to get " + UserVmDao.class.getName());
}
_accountDao = locator.getDao(AccountDao.class);
if (_accountDao == null) {
throw new ConfigurationException("unable to get " + AccountDao.class.getName());
}
_volumeDao = locator.getDao(VolumeDao.class);
if (_volumeDao == null) {
throw new ConfigurationException("unable to get " + VolumeDao.class.getName());
}
_routerDao = locator.getDao(DomainRouterDao.class);
if (_routerDao == null) {
throw new ConfigurationException("unable to get " + DomainRouterDao.class.getName());
}
_ipAddressDao = locator.getDao(IPAddressDao.class);
if (_ipAddressDao == null) {
throw new ConfigurationException("unable to get " + IPAddressDao.class.getName());
}
_eventDao = locator.getDao(EventDao.class);
if (_eventDao == null) {
throw new ConfigurationException("unable to get " + EventDao.class.getName());
}
_vmDao = locator.getDao(UserVmDao.class);
if (_vmDao == null) {
throw new ConfigurationException("unable to get " + UserVmDao.class.getName());
}
_accountDao = locator.getDao(AccountDao.class);
if (_accountDao == null) {
throw new ConfigurationException("unable to get " + AccountDao.class.getName());
}
_volumeDao = locator.getDao(VolumeDao.class);
if (_volumeDao == null) {
throw new ConfigurationException("unable to get " + VolumeDao.class.getName());
}
_routerDao = locator.getDao(DomainRouterDao.class);
if (_routerDao == null) {
throw new ConfigurationException("unable to get " + DomainRouterDao.class.getName());
}
_ipAddressDao = locator.getDao(IPAddressDao.class);
if (_ipAddressDao == null) {
throw new ConfigurationException("unable to get " + IPAddressDao.class.getName());
}
_jobDao = locator.getDao(AsyncJobDao.class);
@ -223,22 +223,22 @@ public class AsyncJobExecutorContextImpl implements AsyncJobExecutorContext {
_itMgr = locator.getManager(VirtualMachineManager.class);
if (_itMgr == null) {
throw new ConfigurationException("unable to get " + VirtualMachineManager.class.getName());
}
return true;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
@Override
public String getName() {
return _name;
}
}
}
return true;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
@Override
public String getName() {
return _name;
}
}

View File

@ -10,29 +10,29 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async;
package com.cloud.async;
import java.util.List;
import com.cloud.api.commands.QueryAsyncJobResultCmd;
import com.cloud.utils.component.Manager;
public interface AsyncJobManager extends Manager {
public AsyncJobExecutorContext getExecutorContext();
public AsyncJobVO getAsyncJob(long jobId);
public AsyncJobExecutorContext getExecutorContext();
public AsyncJobVO getAsyncJob(long jobId);
public AsyncJobVO findInstancePendingAsyncJob(String instanceType, long instanceId);
public List<? extends AsyncJob> findInstancePendingAsyncJobs(AsyncJob.Type instanceType, Long accountId);
public long submitAsyncJob(AsyncJobVO job);
public long submitAsyncJob(AsyncJobVO job, boolean scheduleJobExecutionInContext);
public List<? extends AsyncJob> findInstancePendingAsyncJobs(AsyncJob.Type instanceType, Long accountId);
public long submitAsyncJob(AsyncJobVO job);
public long submitAsyncJob(AsyncJobVO job, boolean scheduleJobExecutionInContext);
public AsyncJobResult queryAsyncJobResult(long jobId);
public void completeAsyncJob(long jobId, int jobStatus, int resultCode, Object resultObject);
public void updateAsyncJobStatus(long jobId, int processStatus, Object resultObject);
public void updateAsyncJobAttachment(long jobId, String instanceType, Long instanceId);
public void completeAsyncJob(long jobId, int jobStatus, int resultCode, Object resultObject);
public void updateAsyncJobStatus(long jobId, int processStatus, Object resultObject);
public void updateAsyncJobAttachment(long jobId, String instanceType, Long instanceId);
public void releaseSyncSource(AsyncJobExecutor executor);
public void syncAsyncJobExecution(AsyncJob job, String syncObjType, long syncObjId);
@ -42,5 +42,5 @@ public interface AsyncJobManager extends Manager {
* @param cmd the command that specifies the job id
* @return an async-call result object
*/
public AsyncJob queryAsyncJobResult(QueryAsyncJobResultCmd cmd);
}
public AsyncJob queryAsyncJobResult(QueryAsyncJobResultCmd cmd);
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async;
package com.cloud.async;
import java.io.File;
import java.io.FileInputStream;
import java.lang.reflect.Type;
@ -68,182 +68,182 @@ import com.cloud.utils.mgmt.JmxUtil;
import com.cloud.utils.net.MacAddress;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
@Local(value={AsyncJobManager.class})
public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListener {
public static final Logger s_logger = Logger.getLogger(AsyncJobManagerImpl.class.getName());
@Local(value={AsyncJobManager.class})
public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListener {
public static final Logger s_logger = Logger.getLogger(AsyncJobManagerImpl.class.getName());
private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 3; // 3 seconds
private static final int MAX_ONETIME_SCHEDULE_SIZE = 50;
private static final int MAX_ONETIME_SCHEDULE_SIZE = 50;
private static final int HEARTBEAT_INTERVAL = 2000;
private static final int GC_INTERVAL = 10000; // 10 seconds
private String _name;
private AsyncJobExecutorContext _context;
private SyncQueueManager _queueMgr;
private String _name;
private AsyncJobExecutorContext _context;
private SyncQueueManager _queueMgr;
private ClusterManager _clusterMgr;
private AccountManager _accountMgr;
private AccountManager _accountMgr;
private AccountDao _accountDao;
private AsyncJobDao _jobDao;
private long _jobExpireSeconds = 86400; // 1 day
private long _jobCancelThresholdSeconds = 3600; // 1 hour
private ApiDispatcher _dispatcher;
private final ScheduledExecutorService _heartbeatScheduler =
Executors.newScheduledThreadPool(1, new NamedThreadFactory("AsyncJobMgr-Heartbeat"));
private ExecutorService _executor;
@Override
public AsyncJobExecutorContext getExecutorContext() {
return _context;
}
@Override
public AsyncJobVO getAsyncJob(long jobId) {
return _jobDao.findById(jobId);
}
@Override
public AsyncJobVO findInstancePendingAsyncJob(String instanceType, long instanceId) {
return _jobDao.findInstancePendingAsyncJob(instanceType, instanceId);
private ApiDispatcher _dispatcher;
private final ScheduledExecutorService _heartbeatScheduler =
Executors.newScheduledThreadPool(1, new NamedThreadFactory("AsyncJobMgr-Heartbeat"));
private ExecutorService _executor;
@Override
public AsyncJobExecutorContext getExecutorContext() {
return _context;
}
@Override
public AsyncJobVO getAsyncJob(long jobId) {
return _jobDao.findById(jobId);
}
@Override
public AsyncJobVO findInstancePendingAsyncJob(String instanceType, long instanceId) {
return _jobDao.findInstancePendingAsyncJob(instanceType, instanceId);
}
@Override
public List<AsyncJobVO> findInstancePendingAsyncJobs(AsyncJob.Type instanceType, Long accountId) {
return _jobDao.findInstancePendingAsyncJobs(instanceType, accountId);
}
@Override
public long submitAsyncJob(AsyncJobVO job) {
return submitAsyncJob(job, false);
}
}
@Override
public long submitAsyncJob(AsyncJobVO job) {
return submitAsyncJob(job, false);
}
@Override @DB
@Override @DB
public long submitAsyncJob(AsyncJobVO job, boolean scheduleJobExecutionInContext) {
Transaction txt = Transaction.currentTxn();
try {
txt.start();
Transaction txt = Transaction.currentTxn();
try {
txt.start();
job.setInitMsid(getMsid());
_jobDao.persist(job);
txt.commit();
_jobDao.persist(job);
txt.commit();
// no sync source originally
job.setSyncSource(null);
job.setSyncSource(null);
scheduleExecution(job, scheduleJobExecutionInContext);
if(s_logger.isDebugEnabled()) {
s_logger.debug("submit async job-" + job.getId() + ", details: " + job.toString());
}
return job.getId();
return job.getId();
} catch(Exception e) {
txt.rollback();
String errMsg = "Unable to schedule async job for command " + job.getCmd() + ", unexpected exception.";
s_logger.warn(errMsg, e);
throw new CloudRuntimeException(errMsg);
}
}
@Override @DB
public void completeAsyncJob(long jobId, int jobStatus, int resultCode, Object resultObject) {
}
}
@Override @DB
public void completeAsyncJob(long jobId, int jobStatus, int resultCode, Object resultObject) {
if(s_logger.isDebugEnabled()) {
s_logger.debug("Complete async job-" + jobId + ", jobStatus: " + jobStatus +
s_logger.debug("Complete async job-" + jobId + ", jobStatus: " + jobStatus +
", resultCode: " + resultCode + ", result: " + resultObject);
}
Transaction txt = Transaction.currentTxn();
try {
txt.start();
AsyncJobVO job = _jobDao.findById(jobId);
if(job == null) {
}
Transaction txt = Transaction.currentTxn();
try {
txt.start();
AsyncJobVO job = _jobDao.findById(jobId);
if(job == null) {
if(s_logger.isDebugEnabled()) {
s_logger.debug("job-" + jobId + " no longer exists, we just log completion info here. " + jobStatus +
s_logger.debug("job-" + jobId + " no longer exists, we just log completion info here. " + jobStatus +
", resultCode: " + resultCode + ", result: " + resultObject);
}
txt.rollback();
return;
}
}
txt.rollback();
return;
}
job.setCompleteMsid(getMsid());
job.setStatus(jobStatus);
job.setResultCode(resultCode);
job.setCompleteMsid(getMsid());
job.setStatus(jobStatus);
job.setResultCode(resultCode);
// reset attached object
job.setInstanceType(null);
job.setInstanceId(null);
// reset attached object
job.setInstanceType(null);
job.setInstanceId(null);
if (resultObject != null) {
job.setResult(ApiSerializerHelper.toSerializedStringOld(resultObject));
}
}
job.setLastUpdated(DateUtil.currentGMTTime());
_jobDao.update(jobId, job);
txt.commit();
} catch(Exception e) {
s_logger.error("Unexpected exception while completing async job-" + jobId, e);
txt.rollback();
}
}
job.setLastUpdated(DateUtil.currentGMTTime());
_jobDao.update(jobId, job);
txt.commit();
} catch(Exception e) {
s_logger.error("Unexpected exception while completing async job-" + jobId, e);
txt.rollback();
}
}
@Override @DB
public void updateAsyncJobStatus(long jobId, int processStatus, Object resultObject) {
@Override @DB
public void updateAsyncJobStatus(long jobId, int processStatus, Object resultObject) {
if(s_logger.isDebugEnabled()) {
s_logger.debug("Update async-job progress, job-" + jobId + ", processStatus: " + processStatus +
s_logger.debug("Update async-job progress, job-" + jobId + ", processStatus: " + processStatus +
", result: " + resultObject);
}
Transaction txt = Transaction.currentTxn();
try {
txt.start();
AsyncJobVO job = _jobDao.findById(jobId);
if(job == null) {
}
Transaction txt = Transaction.currentTxn();
try {
txt.start();
AsyncJobVO job = _jobDao.findById(jobId);
if(job == null) {
if(s_logger.isDebugEnabled()) {
s_logger.debug("job-" + jobId + " no longer exists, we just log progress info here. progress status: " + processStatus);
}
txt.rollback();
return;
}
job.setProcessStatus(processStatus);
}
txt.rollback();
return;
}
job.setProcessStatus(processStatus);
if(resultObject != null) {
job.setResult(ApiSerializerHelper.toSerializedStringOld(resultObject));
}
job.setLastUpdated(DateUtil.currentGMTTime());
_jobDao.update(jobId, job);
txt.commit();
} catch(Exception e) {
s_logger.error("Unexpected exception while updating async job-" + jobId + " status: ", e);
txt.rollback();
}
}
}
job.setLastUpdated(DateUtil.currentGMTTime());
_jobDao.update(jobId, job);
txt.commit();
} catch(Exception e) {
s_logger.error("Unexpected exception while updating async job-" + jobId + " status: ", e);
txt.rollback();
}
}
@Override @DB
public void updateAsyncJobAttachment(long jobId, String instanceType, Long instanceId) {
@Override @DB
public void updateAsyncJobAttachment(long jobId, String instanceType, Long instanceId) {
if(s_logger.isDebugEnabled()) {
s_logger.debug("Update async-job attachment, job-" + jobId + ", instanceType: " + instanceType +
s_logger.debug("Update async-job attachment, job-" + jobId + ", instanceType: " + instanceType +
", instanceId: " + instanceId);
}
Transaction txt = Transaction.currentTxn();
try {
txt.start();
}
Transaction txt = Transaction.currentTxn();
try {
txt.start();
AsyncJobVO job = _jobDao.createForUpdate();
//job.setInstanceType(instanceType);
job.setInstanceId(instanceId);
job.setLastUpdated(DateUtil.currentGMTTime());
_jobDao.update(jobId, job);
AsyncJobVO job = _jobDao.createForUpdate();
//job.setInstanceType(instanceType);
job.setInstanceId(instanceId);
job.setLastUpdated(DateUtil.currentGMTTime());
_jobDao.update(jobId, job);
txt.commit();
} catch(Exception e) {
s_logger.error("Unexpected exception while updating async job-" + jobId + " attachment: ", e);
txt.rollback();
}
}
txt.commit();
} catch(Exception e) {
s_logger.error("Unexpected exception while updating async job-" + jobId + " attachment: ", e);
txt.rollback();
}
}
@Override
public void syncAsyncJobExecution(AsyncJob job, String syncObjType, long syncObjId) {
@Override
public void syncAsyncJobExecution(AsyncJob job, String syncObjType, long syncObjId) {
// This method is re-entrant. If an API developer wants to synchronized on an object, e.g. the router,
// when executing business logic, they will call this method (actually a method in BaseAsyncCmd that calls this).
// This method will get called every time their business logic executes. The first time it exectues for a job
@ -309,60 +309,60 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe
return _jobDao.findById(cmd.getId());
}
@Override @DB
public AsyncJobResult queryAsyncJobResult(long jobId) {
@Override @DB
public AsyncJobResult queryAsyncJobResult(long jobId) {
if(s_logger.isTraceEnabled()) {
s_logger.trace("Query async-job status, job-" + jobId);
}
Transaction txt = Transaction.currentTxn();
AsyncJobResult jobResult = new AsyncJobResult(jobId);
try {
txt.start();
AsyncJobVO job = _jobDao.findById(jobId);
if(job != null) {
jobResult.setCmdOriginator(job.getCmdOriginator());
jobResult.setJobStatus(job.getStatus());
jobResult.setProcessStatus(job.getProcessStatus());
jobResult.setResult(job.getResult());
}
Transaction txt = Transaction.currentTxn();
AsyncJobResult jobResult = new AsyncJobResult(jobId);
try {
txt.start();
AsyncJobVO job = _jobDao.findById(jobId);
if(job != null) {
jobResult.setCmdOriginator(job.getCmdOriginator());
jobResult.setJobStatus(job.getStatus());
jobResult.setProcessStatus(job.getProcessStatus());
jobResult.setResult(job.getResult());
jobResult.setResultCode(job.getResultCode());
jobResult.setUuid(job.getUuid());
if(job.getStatus() == AsyncJobResult.STATUS_SUCCEEDED ||
job.getStatus() == AsyncJobResult.STATUS_FAILED) {
jobResult.setUuid(job.getUuid());
if(job.getStatus() == AsyncJobResult.STATUS_SUCCEEDED ||
job.getStatus() == AsyncJobResult.STATUS_FAILED) {
if(s_logger.isDebugEnabled()) {
s_logger.debug("Async job-" + jobId + " completed");
}
} else {
job.setLastPolled(DateUtil.currentGMTTime());
_jobDao.update(jobId, job);
}
} else {
}
} else {
job.setLastPolled(DateUtil.currentGMTTime());
_jobDao.update(jobId, job);
}
} else {
if(s_logger.isDebugEnabled()) {
s_logger.debug("Async job-" + jobId + " does not exist, invalid job id?");
}
jobResult.setJobStatus(AsyncJobResult.STATUS_FAILED);
jobResult.setResult("job-" + jobId + " does not exist");
}
txt.commit();
} catch(Exception e) {
s_logger.error("Unexpected exception while querying async job-" + jobId + " status: ", e);
jobResult.setJobStatus(AsyncJobResult.STATUS_FAILED);
jobResult.setResult("Exception: " + e.toString());
txt.rollback();
}
}
jobResult.setJobStatus(AsyncJobResult.STATUS_FAILED);
jobResult.setResult("job-" + jobId + " does not exist");
}
txt.commit();
} catch(Exception e) {
s_logger.error("Unexpected exception while querying async job-" + jobId + " status: ", e);
jobResult.setJobStatus(AsyncJobResult.STATUS_FAILED);
jobResult.setResult("Exception: " + e.toString());
txt.rollback();
}
if(s_logger.isTraceEnabled()) {
s_logger.trace("Job status: " + jobResult.toString());
}
return jobResult;
}
return jobResult;
}
private void scheduleExecution(final AsyncJobVO job) {
scheduleExecution(job, false);
}
@ -500,7 +500,7 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe
}
};
}
private void executeQueueItem(SyncQueueItemVO item, boolean fromPreviousSession) {
AsyncJobVO job = _jobDao.findById(item.getContentId());
if (job != null) {
@ -530,61 +530,61 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe
}
}
@Override
public void releaseSyncSource(AsyncJobExecutor executor) {
if(executor.getSyncSource() != null) {
@Override
public void releaseSyncSource(AsyncJobExecutor executor) {
if(executor.getSyncSource() != null) {
if(s_logger.isDebugEnabled()) {
s_logger.debug("Release sync source for job-" + executor.getJob().getId() + " sync source: "
+ executor.getSyncSource().getContentType() + "-"
s_logger.debug("Release sync source for job-" + executor.getJob().getId() + " sync source: "
+ executor.getSyncSource().getContentType() + "-"
+ executor.getSyncSource().getContentId());
}
_queueMgr.purgeItem(executor.getSyncSource().getId());
checkQueue(executor.getSyncSource().getQueueId());
}
}
private void checkQueue(long queueId) {
while(true) {
try {
SyncQueueItemVO item = _queueMgr.dequeueFromOne(queueId, getMsid());
if(item != null) {
}
_queueMgr.purgeItem(executor.getSyncSource().getId());
checkQueue(executor.getSyncSource().getQueueId());
}
}
private void checkQueue(long queueId) {
while(true) {
try {
SyncQueueItemVO item = _queueMgr.dequeueFromOne(queueId, getMsid());
if(item != null) {
if(s_logger.isDebugEnabled()) {
s_logger.debug("Executing sync queue item: " + item.toString());
}
executeQueueItem(item, false);
} else {
break;
}
} catch(Throwable e) {
s_logger.error("Unexpected exception when kicking sync queue-" + queueId, e);
break;
}
}
}
private Runnable getHeartbeatTask() {
return new Runnable() {
}
executeQueueItem(item, false);
} else {
break;
}
} catch(Throwable e) {
s_logger.error("Unexpected exception when kicking sync queue-" + queueId, e);
break;
}
}
}
private Runnable getHeartbeatTask() {
return new Runnable() {
@Override
public void run() {
public void run() {
try {
List<SyncQueueItemVO> l = _queueMgr.dequeueFromAny(getMsid(), MAX_ONETIME_SCHEDULE_SIZE);
if(l != null && l.size() > 0) {
for(SyncQueueItemVO item: l) {
List<SyncQueueItemVO> l = _queueMgr.dequeueFromAny(getMsid(), MAX_ONETIME_SCHEDULE_SIZE);
if(l != null && l.size() > 0) {
for(SyncQueueItemVO item: l) {
if(s_logger.isDebugEnabled()) {
s_logger.debug("Execute sync-queue item: " + item.toString());
}
executeQueueItem(item, false);
}
}
} catch(Throwable e) {
s_logger.error("Unexpected exception when trying to execute queue item, ", e);
}
}
} catch(Throwable e) {
s_logger.error("Unexpected exception when trying to execute queue item, ", e);
} finally {
StackMaid.current().exitCleanup();
}
}
};
}
}
};
}
@DB
@ -643,14 +643,14 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe
}
};
}
private long getMsid() {
private long getMsid() {
if(_clusterMgr != null) {
return _clusterMgr.getManagementNodeId();
}
return MacAddress.getMacAddress().toLong();
}
}
return MacAddress.getMacAddress().toLong();
}
private void cleanupPendingJobs(List<SyncQueueItemVO> l) {
if(l != null && l.size() > 0) {
@ -670,13 +670,13 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe
_queueMgr.purgeItem(item.getId());
}
}
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
}
ComponentLocator locator = ComponentLocator.getCurrentLocator();
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
ComponentLocator locator = ComponentLocator.getCurrentLocator();
ConfigurationDao configDao = locator.getDao(ConfigurationDao.class);
if (configDao == null) {
@ -694,25 +694,25 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe
_accountDao = locator.getDao(AccountDao.class);
if (_accountDao == null) {
throw new ConfigurationException("Unable to get " + AccountDao.class.getName());
}
_jobDao = locator.getDao(AsyncJobDao.class);
if (_jobDao == null) {
throw new ConfigurationException("Unable to get "
+ AsyncJobDao.class.getName());
}
_context = locator.getManager(AsyncJobExecutorContext.class);
if (_context == null) {
throw new ConfigurationException("Unable to get "
+ AsyncJobExecutorContext.class.getName());
}
_queueMgr = locator.getManager(SyncQueueManager.class);
if(_queueMgr == null) {
throw new ConfigurationException("Unable to get "
+ SyncQueueManager.class.getName());
}
}
_jobDao = locator.getDao(AsyncJobDao.class);
if (_jobDao == null) {
throw new ConfigurationException("Unable to get "
+ AsyncJobDao.class.getName());
}
_context = locator.getManager(AsyncJobExecutorContext.class);
if (_context == null) {
throw new ConfigurationException("Unable to get "
+ AsyncJobExecutorContext.class.getName());
}
_queueMgr = locator.getManager(SyncQueueManager.class);
if(_queueMgr == null) {
throw new ConfigurationException("Unable to get "
+ SyncQueueManager.class.getName());
}
_clusterMgr = locator.getManager(ClusterManager.class);
_accountMgr = locator.getManager(AccountManager.class);
@ -735,7 +735,7 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe
throw new ConfigurationException("Unable to load db.properties to configure AsyncJobManagerImpl");
}
return true;
return true;
}
@Override
@ -765,10 +765,10 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe
@Override
public void onManagementNodeIsolated() {
}
@Override
@Override
public boolean start() {
try {
try {
List<SyncQueueItemVO> l = _queueMgr.getActiveQueueItems(getMsid(), false);
cleanupPendingJobs(l);
_queueMgr.resetQueueProcess(getMsid());
@ -776,13 +776,13 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe
} catch(Throwable e) {
s_logger.error("Unexpected exception " + e.getMessage(), e);
}
_heartbeatScheduler.scheduleAtFixedRate(getHeartbeatTask(), HEARTBEAT_INTERVAL,
_heartbeatScheduler.scheduleAtFixedRate(getHeartbeatTask(), HEARTBEAT_INTERVAL,
HEARTBEAT_INTERVAL, TimeUnit.MILLISECONDS);
_heartbeatScheduler.scheduleAtFixedRate(getGCTask(), GC_INTERVAL,
GC_INTERVAL, TimeUnit.MILLISECONDS);
return true;
return true;
}
private static ExceptionResponse getResetResultResponse(String errorMessage) {
@ -794,17 +794,17 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe
private static String getSerializedErrorMessage(String errorMessage) {
return ApiSerializerHelper.toSerializedStringOld(getResetResultResponse(errorMessage));
}
@Override
public boolean stop() {
_heartbeatScheduler.shutdown();
_executor.shutdown();
return true;
}
@Override
public String getName() {
return _name;
}
}
}
@Override
public boolean stop() {
_heartbeatScheduler.shutdown();
_executor.shutdown();
return true;
}
@Override
public String getName() {
return _name;
}
}

View File

@ -10,45 +10,45 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async;
package com.cloud.async;
import com.cloud.api.ApiSerializerHelper;
public class AsyncJobResult {
public static final int STATUS_IN_PROGRESS = 0;
public static final int STATUS_SUCCEEDED = 1;
public static final int STATUS_FAILED = 2;
private String cmdOriginator;
private long jobId;
private int jobStatus;
private int processStatus;
private int resultCode;
public class AsyncJobResult {
public static final int STATUS_IN_PROGRESS = 0;
public static final int STATUS_SUCCEEDED = 1;
public static final int STATUS_FAILED = 2;
private String cmdOriginator;
private long jobId;
private int jobStatus;
private int processStatus;
private int resultCode;
private String result;
private String uuid;
public AsyncJobResult(long jobId) {
this.jobId = jobId;
jobStatus = STATUS_IN_PROGRESS;
processStatus = 0;
resultCode = 0;
result = "";
}
public String getCmdOriginator() {
return cmdOriginator;
}
public void setCmdOriginator(String cmdOriginator) {
this.cmdOriginator = cmdOriginator;
}
public long getJobId() {
return jobId;
}
public void setJobId(long jobId) {
this.jobId = jobId;
private String uuid;
public AsyncJobResult(long jobId) {
this.jobId = jobId;
jobStatus = STATUS_IN_PROGRESS;
processStatus = 0;
resultCode = 0;
result = "";
}
public String getCmdOriginator() {
return cmdOriginator;
}
public void setCmdOriginator(String cmdOriginator) {
this.cmdOriginator = cmdOriginator;
}
public long getJobId() {
return jobId;
}
public void setJobId(long jobId) {
this.jobId = jobId;
}
public String getUuid() {
@ -57,57 +57,57 @@ public class AsyncJobResult {
public void setUuid(String uuid) {
this.uuid = uuid;
}
public int getJobStatus() {
return jobStatus;
}
public void setJobStatus(int jobStatus) {
this.jobStatus = jobStatus;
}
public int getProcessStatus() {
return processStatus;
}
public void setProcessStatus(int processStatus) {
this.processStatus = processStatus;
}
public int getResultCode() {
return resultCode;
}
public void setResultCode(int resultCode) {
this.resultCode = resultCode;
}
public String getResult() {
return result;
}
public void setResult(String result) {
this.result = result;
}
public Object getResultObject() {
return ApiSerializerHelper.fromSerializedString(result);
}
public void setResultObject(Object result) {
this.result = ApiSerializerHelper.toSerializedStringOld(result);
}
}
public int getJobStatus() {
return jobStatus;
}
public void setJobStatus(int jobStatus) {
this.jobStatus = jobStatus;
}
public int getProcessStatus() {
return processStatus;
}
public void setProcessStatus(int processStatus) {
this.processStatus = processStatus;
}
public int getResultCode() {
return resultCode;
}
public void setResultCode(int resultCode) {
this.resultCode = resultCode;
}
public String getResult() {
return result;
}
public void setResult(String result) {
this.result = result;
}
public Object getResultObject() {
return ApiSerializerHelper.fromSerializedString(result);
}
public void setResultObject(Object result) {
this.result = ApiSerializerHelper.toSerializedStringOld(result);
}
@Override
public String toString() {
StringBuffer sb = new StringBuffer();
sb.append("AsyncJobResult {jobId:").append(getJobId());
sb.append(", jobStatus: ").append(getJobStatus());
sb.append(", processStatus: ").append(getProcessStatus());
sb.append(", resultCode: ").append(getResultCode());
sb.append(", result: ").append(result);
sb.append("}");
return sb.toString();
}
}
public String toString() {
StringBuffer sb = new StringBuffer();
sb.append("AsyncJobResult {jobId:").append(getJobId());
sb.append(", jobStatus: ").append(getJobStatus());
sb.append(", processStatus: ").append(getProcessStatus());
sb.append(", resultCode: ").append(getResultCode());
sb.append(", result: ").append(result);
sb.append("}");
return sb.toString();
}
}

View File

@ -10,56 +10,56 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async;
package com.cloud.async;
public abstract class BaseAsyncJobExecutor implements AsyncJobExecutor {
private SyncQueueItemVO _syncSource;
private AsyncJobVO _job;
private boolean _fromPreviousSession;
private AsyncJobManager _asyncJobMgr;
private static ThreadLocal<AsyncJobExecutor> s_currentExector = new ThreadLocal<AsyncJobExecutor>();
public AsyncJobManager getAsyncJobMgr() {
return _asyncJobMgr;
}
public void setAsyncJobMgr(AsyncJobManager asyncMgr) {
_asyncJobMgr = asyncMgr;
}
public SyncQueueItemVO getSyncSource() {
return _syncSource;
}
public void setSyncSource(SyncQueueItemVO syncSource) {
_syncSource = syncSource;
}
public AsyncJobVO getJob() {
return _job;
}
public void setJob(AsyncJobVO job) {
_job = job;
}
public void setFromPreviousSession(boolean value) {
_fromPreviousSession = value;
}
public boolean isFromPreviousSession() {
return _fromPreviousSession;
}
public abstract boolean execute();
public static AsyncJobExecutor getCurrentExecutor() {
return s_currentExector.get();
}
public static void setCurrentExecutor(AsyncJobExecutor currentExecutor) {
s_currentExector.set(currentExecutor);
}
}
public abstract class BaseAsyncJobExecutor implements AsyncJobExecutor {
private SyncQueueItemVO _syncSource;
private AsyncJobVO _job;
private boolean _fromPreviousSession;
private AsyncJobManager _asyncJobMgr;
private static ThreadLocal<AsyncJobExecutor> s_currentExector = new ThreadLocal<AsyncJobExecutor>();
public AsyncJobManager getAsyncJobMgr() {
return _asyncJobMgr;
}
public void setAsyncJobMgr(AsyncJobManager asyncMgr) {
_asyncJobMgr = asyncMgr;
}
public SyncQueueItemVO getSyncSource() {
return _syncSource;
}
public void setSyncSource(SyncQueueItemVO syncSource) {
_syncSource = syncSource;
}
public AsyncJobVO getJob() {
return _job;
}
public void setJob(AsyncJobVO job) {
_job = job;
}
public void setFromPreviousSession(boolean value) {
_fromPreviousSession = value;
}
public boolean isFromPreviousSession() {
return _fromPreviousSession;
}
public abstract boolean execute();
public static AsyncJobExecutor getCurrentExecutor() {
return s_currentExector.get();
}
public static void setCurrentExecutor(AsyncJobExecutor currentExecutor) {
s_currentExector.set(currentExecutor);
}
}

View File

@ -10,20 +10,20 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async;
package com.cloud.async;
import java.util.List;
import com.cloud.utils.component.Manager;
public interface SyncQueueManager extends Manager {
public SyncQueueVO queue(String syncObjType, long syncObjId, String itemType, long itemId);
public SyncQueueItemVO dequeueFromOne(long queueId, Long msid);
public List<SyncQueueItemVO> dequeueFromAny(Long msid, int maxItems);
public void purgeItem(long queueItemId);
public interface SyncQueueManager extends Manager {
public SyncQueueVO queue(String syncObjType, long syncObjId, String itemType, long itemId);
public SyncQueueItemVO dequeueFromOne(long queueId, Long msid);
public List<SyncQueueItemVO> dequeueFromAny(Long msid, int maxItems);
public void purgeItem(long queueItemId);
public void returnItem(long queueItemId);
public List<SyncQueueItemVO> getActiveQueueItems(Long msid, boolean exclusive);
public List<SyncQueueItemVO> getBlockedQueueItems(long thresholdMs, boolean exclusive);
public void resetQueueProcess(long msid);
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async;
package com.cloud.async;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
@ -29,166 +29,166 @@ import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.exception.CloudRuntimeException;
@Local(value={SyncQueueManager.class})
public class SyncQueueManagerImpl implements SyncQueueManager {
public static final Logger s_logger = Logger.getLogger(SyncQueueManagerImpl.class.getName());
private String _name;
private SyncQueueDao _syncQueueDao;
private SyncQueueItemDao _syncQueueItemDao;
@Override
@DB
public SyncQueueVO queue(String syncObjType, long syncObjId, String itemType, long itemId) {
Transaction txn = Transaction.currentTxn();
try {
txn.start();
_syncQueueDao.ensureQueue(syncObjType, syncObjId);
SyncQueueVO queueVO = _syncQueueDao.find(syncObjType, syncObjId);
if(queueVO == null)
throw new CloudRuntimeException("Unable to queue item into DB, DB is full?");
Date dt = DateUtil.currentGMTTime();
SyncQueueItemVO item = new SyncQueueItemVO();
item.setQueueId(queueVO.getId());
item.setContentType(itemType);
item.setContentId(itemId);
item.setCreated(dt);
_syncQueueItemDao.persist(item);
txn.commit();
return queueVO;
} catch(Exception e) {
s_logger.error("Unexpected exception: ", e);
txn.rollback();
}
return null;
}
@Override
@DB
public SyncQueueItemVO dequeueFromOne(long queueId, Long msid) {
Transaction txt = Transaction.currentTxn();
try {
txt.start();
SyncQueueVO queueVO = _syncQueueDao.lockRow(queueId, true);
if(queueVO == null) {
s_logger.error("Sync queue(id: " + queueId + ") does not exist");
txt.commit();
return null;
}
if(queueVO.getLastProcessTime() == null) {
SyncQueueItemVO itemVO = _syncQueueItemDao.getNextQueueItem(queueVO.getId());
if(itemVO != null) {
Long processNumber = queueVO.getLastProcessNumber();
if(processNumber == null)
processNumber = new Long(1);
else
processNumber = processNumber + 1;
Date dt = DateUtil.currentGMTTime();
queueVO.setLastProcessMsid(msid);
queueVO.setLastProcessNumber(processNumber);
queueVO.setLastProcessTime(dt);
queueVO.setLastUpdated(dt);
_syncQueueDao.update(queueVO.getId(), queueVO);
itemVO.setLastProcessMsid(msid);
itemVO.setLastProcessNumber(processNumber);
_syncQueueItemDao.update(itemVO.getId(), itemVO);
txt.commit();
return itemVO;
} else {
if(s_logger.isDebugEnabled())
s_logger.debug("Sync queue (" + queueId + ") is currently empty");
}
} else {
if(s_logger.isDebugEnabled())
s_logger.debug("There is a pending process in sync queue(id: " + queueId + ")");
}
txt.commit();
} catch(Exception e) {
s_logger.error("Unexpected exception: ", e);
txt.rollback();
}
return null;
}
@Override
@DB
public List<SyncQueueItemVO> dequeueFromAny(Long msid, int maxItems) {
List<SyncQueueItemVO> resultList = new ArrayList<SyncQueueItemVO>();
Transaction txt = Transaction.currentTxn();
try {
txt.start();
List<SyncQueueItemVO> l = _syncQueueItemDao.getNextQueueItems(maxItems);
if(l != null && l.size() > 0) {
for(SyncQueueItemVO item : l) {
SyncQueueVO queueVO = _syncQueueDao.lockRow(item.getQueueId(), true);
SyncQueueItemVO itemVO = _syncQueueItemDao.lockRow(item.getId(), true);
if(queueVO.getLastProcessTime() == null && itemVO.getLastProcessNumber() == null) {
Long processNumber = queueVO.getLastProcessNumber();
if(processNumber == null)
processNumber = new Long(1);
else
processNumber = processNumber + 1;
Date dt = DateUtil.currentGMTTime();
queueVO.setLastProcessMsid(msid);
queueVO.setLastProcessNumber(processNumber);
queueVO.setLastProcessTime(dt);
queueVO.setLastUpdated(dt);
_syncQueueDao.update(queueVO.getId(), queueVO);
itemVO.setLastProcessMsid(msid);
itemVO.setLastProcessNumber(processNumber);
_syncQueueItemDao.update(item.getId(), itemVO);
resultList.add(item);
}
}
}
txt.commit();
return resultList;
} catch(Exception e) {
s_logger.error("Unexpected exception: ", e);
txt.rollback();
}
return null;
}
@Override
@DB
public void purgeItem(long queueItemId) {
Transaction txt = Transaction.currentTxn();
try {
txt.start();
SyncQueueItemVO itemVO = _syncQueueItemDao.findById(queueItemId);
if(itemVO != null) {
SyncQueueVO queueVO = _syncQueueDao.lockRow(itemVO.getQueueId(), true);
_syncQueueItemDao.expunge(itemVO.getId());
queueVO.setLastProcessTime(null);
queueVO.setLastUpdated(DateUtil.currentGMTTime());
_syncQueueDao.update(queueVO.getId(), queueVO);
}
txt.commit();
} catch(Exception e) {
s_logger.error("Unexpected exception: ", e);
txt.rollback();
}
@Local(value={SyncQueueManager.class})
public class SyncQueueManagerImpl implements SyncQueueManager {
public static final Logger s_logger = Logger.getLogger(SyncQueueManagerImpl.class.getName());
private String _name;
private SyncQueueDao _syncQueueDao;
private SyncQueueItemDao _syncQueueItemDao;
@Override
@DB
public SyncQueueVO queue(String syncObjType, long syncObjId, String itemType, long itemId) {
Transaction txn = Transaction.currentTxn();
try {
txn.start();
_syncQueueDao.ensureQueue(syncObjType, syncObjId);
SyncQueueVO queueVO = _syncQueueDao.find(syncObjType, syncObjId);
if(queueVO == null)
throw new CloudRuntimeException("Unable to queue item into DB, DB is full?");
Date dt = DateUtil.currentGMTTime();
SyncQueueItemVO item = new SyncQueueItemVO();
item.setQueueId(queueVO.getId());
item.setContentType(itemType);
item.setContentId(itemId);
item.setCreated(dt);
_syncQueueItemDao.persist(item);
txn.commit();
return queueVO;
} catch(Exception e) {
s_logger.error("Unexpected exception: ", e);
txn.rollback();
}
return null;
}
@Override
@DB
public SyncQueueItemVO dequeueFromOne(long queueId, Long msid) {
Transaction txt = Transaction.currentTxn();
try {
txt.start();
SyncQueueVO queueVO = _syncQueueDao.lockRow(queueId, true);
if(queueVO == null) {
s_logger.error("Sync queue(id: " + queueId + ") does not exist");
txt.commit();
return null;
}
if(queueVO.getLastProcessTime() == null) {
SyncQueueItemVO itemVO = _syncQueueItemDao.getNextQueueItem(queueVO.getId());
if(itemVO != null) {
Long processNumber = queueVO.getLastProcessNumber();
if(processNumber == null)
processNumber = new Long(1);
else
processNumber = processNumber + 1;
Date dt = DateUtil.currentGMTTime();
queueVO.setLastProcessMsid(msid);
queueVO.setLastProcessNumber(processNumber);
queueVO.setLastProcessTime(dt);
queueVO.setLastUpdated(dt);
_syncQueueDao.update(queueVO.getId(), queueVO);
itemVO.setLastProcessMsid(msid);
itemVO.setLastProcessNumber(processNumber);
_syncQueueItemDao.update(itemVO.getId(), itemVO);
txt.commit();
return itemVO;
} else {
if(s_logger.isDebugEnabled())
s_logger.debug("Sync queue (" + queueId + ") is currently empty");
}
} else {
if(s_logger.isDebugEnabled())
s_logger.debug("There is a pending process in sync queue(id: " + queueId + ")");
}
txt.commit();
} catch(Exception e) {
s_logger.error("Unexpected exception: ", e);
txt.rollback();
}
return null;
}
@Override
@DB
public List<SyncQueueItemVO> dequeueFromAny(Long msid, int maxItems) {
List<SyncQueueItemVO> resultList = new ArrayList<SyncQueueItemVO>();
Transaction txt = Transaction.currentTxn();
try {
txt.start();
List<SyncQueueItemVO> l = _syncQueueItemDao.getNextQueueItems(maxItems);
if(l != null && l.size() > 0) {
for(SyncQueueItemVO item : l) {
SyncQueueVO queueVO = _syncQueueDao.lockRow(item.getQueueId(), true);
SyncQueueItemVO itemVO = _syncQueueItemDao.lockRow(item.getId(), true);
if(queueVO.getLastProcessTime() == null && itemVO.getLastProcessNumber() == null) {
Long processNumber = queueVO.getLastProcessNumber();
if(processNumber == null)
processNumber = new Long(1);
else
processNumber = processNumber + 1;
Date dt = DateUtil.currentGMTTime();
queueVO.setLastProcessMsid(msid);
queueVO.setLastProcessNumber(processNumber);
queueVO.setLastProcessTime(dt);
queueVO.setLastUpdated(dt);
_syncQueueDao.update(queueVO.getId(), queueVO);
itemVO.setLastProcessMsid(msid);
itemVO.setLastProcessNumber(processNumber);
_syncQueueItemDao.update(item.getId(), itemVO);
resultList.add(item);
}
}
}
txt.commit();
return resultList;
} catch(Exception e) {
s_logger.error("Unexpected exception: ", e);
txt.rollback();
}
return null;
}
@Override
@DB
public void purgeItem(long queueItemId) {
Transaction txt = Transaction.currentTxn();
try {
txt.start();
SyncQueueItemVO itemVO = _syncQueueItemDao.findById(queueItemId);
if(itemVO != null) {
SyncQueueVO queueVO = _syncQueueDao.lockRow(itemVO.getQueueId(), true);
_syncQueueItemDao.expunge(itemVO.getId());
queueVO.setLastProcessTime(null);
queueVO.setLastUpdated(DateUtil.currentGMTTime());
_syncQueueDao.update(queueVO.getId(), queueVO);
}
txt.commit();
} catch(Exception e) {
s_logger.error("Unexpected exception: ", e);
txt.rollback();
}
}
@Override
@ -215,11 +215,11 @@ public class SyncQueueManagerImpl implements SyncQueueManager {
s_logger.error("Unexpected exception: ", e);
txt.rollback();
}
}
@Override
public List<SyncQueueItemVO> getActiveQueueItems(Long msid, boolean exclusive) {
return _syncQueueItemDao.getActiveQueueItems(msid, exclusive);
}
@Override
public List<SyncQueueItemVO> getActiveQueueItems(Long msid, boolean exclusive) {
return _syncQueueItemDao.getActiveQueueItems(msid, exclusive);
}
@Override
@ -232,39 +232,39 @@ public class SyncQueueManagerImpl implements SyncQueueManager {
_syncQueueDao.resetQueueProcessing(msid);
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
ComponentLocator locator = ComponentLocator.getCurrentLocator();
_syncQueueDao = locator.getDao(SyncQueueDao.class);
if (_syncQueueDao == null) {
throw new ConfigurationException("Unable to get "
+ SyncQueueDao.class.getName());
}
_syncQueueItemDao = locator.getDao(SyncQueueItemDao.class);
if (_syncQueueItemDao == null) {
throw new ConfigurationException("Unable to get "
+ SyncQueueDao.class.getName());
}
return true;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
@Override
public String getName() {
return _name;
}
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
ComponentLocator locator = ComponentLocator.getCurrentLocator();
_syncQueueDao = locator.getDao(SyncQueueDao.class);
if (_syncQueueDao == null) {
throw new ConfigurationException("Unable to get "
+ SyncQueueDao.class.getName());
}
_syncQueueItemDao = locator.getDao(SyncQueueItemDao.class);
if (_syncQueueItemDao == null) {
throw new ConfigurationException("Unable to get "
+ SyncQueueDao.class.getName());
}
return true;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
@Override
public String getName() {
return _name;
}
}

View File

@ -10,18 +10,18 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async.dao;
package com.cloud.async.dao;
import java.util.Date;
import java.util.List;
import com.cloud.async.AsyncJob;
import com.cloud.async.AsyncJobVO;
import com.cloud.utils.db.GenericDao;
public interface AsyncJobDao extends GenericDao<AsyncJobVO, Long> {
public interface AsyncJobDao extends GenericDao<AsyncJobVO, Long> {
AsyncJobVO findInstancePendingAsyncJob(String instanceType, long instanceId);
List<AsyncJobVO> findInstancePendingAsyncJobs(AsyncJob.Type instanceType, Long accountId);
List<AsyncJobVO> getExpiredJobs(Date cutTime, int limit);
void resetJobProcess(long msid, int jobResultCode, String jobResultMessage);
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async.dao;
package com.cloud.async.dao;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Date;
@ -30,23 +30,23 @@ import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.Transaction;
@Local(value = { AsyncJobDao.class })
public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements AsyncJobDao {
private static final Logger s_logger = Logger.getLogger(AsyncJobDaoImpl.class.getName());
@Local(value = { AsyncJobDao.class })
public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements AsyncJobDao {
private static final Logger s_logger = Logger.getLogger(AsyncJobDaoImpl.class.getName());
private final SearchBuilder<AsyncJobVO> pendingAsyncJobSearch;
private final SearchBuilder<AsyncJobVO> pendingAsyncJobsSearch;
private final SearchBuilder<AsyncJobVO> pendingAsyncJobsSearch;
private final SearchBuilder<AsyncJobVO> expiringAsyncJobSearch;
public AsyncJobDaoImpl() {
pendingAsyncJobSearch = createSearchBuilder();
pendingAsyncJobSearch.and("instanceType", pendingAsyncJobSearch.entity().getInstanceType(),
SearchCriteria.Op.EQ);
pendingAsyncJobSearch.and("instanceId", pendingAsyncJobSearch.entity().getInstanceId(),
SearchCriteria.Op.EQ);
pendingAsyncJobSearch.and("status", pendingAsyncJobSearch.entity().getStatus(),
SearchCriteria.Op.EQ);
public AsyncJobDaoImpl() {
pendingAsyncJobSearch = createSearchBuilder();
pendingAsyncJobSearch.and("instanceType", pendingAsyncJobSearch.entity().getInstanceType(),
SearchCriteria.Op.EQ);
pendingAsyncJobSearch.and("instanceId", pendingAsyncJobSearch.entity().getInstanceId(),
SearchCriteria.Op.EQ);
pendingAsyncJobSearch.and("status", pendingAsyncJobSearch.entity().getStatus(),
SearchCriteria.Op.EQ);
pendingAsyncJobSearch.done();
pendingAsyncJobsSearch = createSearchBuilder();
@ -62,23 +62,23 @@ public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements
expiringAsyncJobSearch.and("created", expiringAsyncJobSearch.entity().getCreated(),
SearchCriteria.Op.LTEQ);
expiringAsyncJobSearch.done();
}
public AsyncJobVO findInstancePendingAsyncJob(String instanceType, long instanceId) {
SearchCriteria<AsyncJobVO> sc = pendingAsyncJobSearch.create();
sc.setParameters("instanceType", instanceType);
sc.setParameters("instanceId", instanceId);
sc.setParameters("status", AsyncJobResult.STATUS_IN_PROGRESS);
List<AsyncJobVO> l = listIncludingRemovedBy(sc);
if(l != null && l.size() > 0) {
if(l.size() > 1) {
s_logger.warn("Instance " + instanceType + "-" + instanceId + " has multiple pending async-job");
}
return l.get(0);
}
return null;
}
public AsyncJobVO findInstancePendingAsyncJob(String instanceType, long instanceId) {
SearchCriteria<AsyncJobVO> sc = pendingAsyncJobSearch.create();
sc.setParameters("instanceType", instanceType);
sc.setParameters("instanceId", instanceId);
sc.setParameters("status", AsyncJobResult.STATUS_IN_PROGRESS);
List<AsyncJobVO> l = listIncludingRemovedBy(sc);
if(l != null && l.size() > 0) {
if(l.size() > 1) {
s_logger.warn("Instance " + instanceType + "-" + instanceId + " has multiple pending async-job");
}
return l.get(0);
}
return null;
}
public List<AsyncJobVO> findInstancePendingAsyncJobs(AsyncJob.Type instanceType, Long accountId) {
@ -118,4 +118,4 @@ public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements
s_logger.warn("Unable to reset job status for management server " + msid, e);
}
}
}
}

View File

@ -10,13 +10,13 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async.dao;
package com.cloud.async.dao;
import com.cloud.async.SyncQueueVO;
import com.cloud.utils.db.GenericDao;
public interface SyncQueueDao extends GenericDao<SyncQueueVO, Long>{
public void ensureQueue(String syncObjType, long syncObjId);
public interface SyncQueueDao extends GenericDao<SyncQueueVO, Long>{
public void ensureQueue(String syncObjType, long syncObjId);
public SyncQueueVO find(String syncObjType, long syncObjId);
public void resetQueueProcessing(long msid);
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async.dao;
package com.cloud.async.dao;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Date;
@ -28,40 +28,40 @@ import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.Transaction;
@Local(value = { SyncQueueDao.class })
public class SyncQueueDaoImpl extends GenericDaoBase<SyncQueueVO, Long> implements SyncQueueDao {
@Local(value = { SyncQueueDao.class })
public class SyncQueueDaoImpl extends GenericDaoBase<SyncQueueVO, Long> implements SyncQueueDao {
private static final Logger s_logger = Logger.getLogger(SyncQueueDaoImpl.class.getName());
SearchBuilder<SyncQueueVO> TypeIdSearch = createSearchBuilder();
@Override
public void ensureQueue(String syncObjType, long syncObjId) {
Date dt = DateUtil.currentGMTTime();
String sql = "INSERT IGNORE INTO sync_queue(sync_objtype, sync_objid, created, last_updated) values(?, ?, ?, ?)";
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql);
pstmt.setString(1, syncObjType);
pstmt.setLong(2, syncObjId);
pstmt.setString(3, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), dt));
pstmt.setString(4, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), dt));
pstmt.execute();
} catch (SQLException e) {
s_logger.warn("Unable to create sync queue " + syncObjType + "-" + syncObjId + ":" + e.getMessage(), e);
} catch (Throwable e) {
s_logger.warn("Unable to create sync queue " + syncObjType + "-" + syncObjId + ":" + e.getMessage(), e);
}
}
@Override
public SyncQueueVO find(String syncObjType, long syncObjId) {
SearchCriteria<SyncQueueVO> sc = TypeIdSearch.create();
sc.setParameters("syncObjType", syncObjType);
sc.setParameters("syncObjId", syncObjId);
return findOneBy(sc);
@Override
public void ensureQueue(String syncObjType, long syncObjId) {
Date dt = DateUtil.currentGMTTime();
String sql = "INSERT IGNORE INTO sync_queue(sync_objtype, sync_objid, created, last_updated) values(?, ?, ?, ?)";
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql);
pstmt.setString(1, syncObjType);
pstmt.setLong(2, syncObjId);
pstmt.setString(3, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), dt));
pstmt.setString(4, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), dt));
pstmt.execute();
} catch (SQLException e) {
s_logger.warn("Unable to create sync queue " + syncObjType + "-" + syncObjId + ":" + e.getMessage(), e);
} catch (Throwable e) {
s_logger.warn("Unable to create sync queue " + syncObjType + "-" + syncObjId + ":" + e.getMessage(), e);
}
}
@Override
public SyncQueueVO find(String syncObjType, long syncObjId) {
SearchCriteria<SyncQueueVO> sc = TypeIdSearch.create();
sc.setParameters("syncObjType", syncObjType);
sc.setParameters("syncObjId", syncObjId);
return findOneBy(sc);
}
@Override @DB
@ -87,5 +87,5 @@ public class SyncQueueDaoImpl extends GenericDaoBase<SyncQueueVO, Long> implemen
TypeIdSearch.and("syncObjType", TypeIdSearch.entity().getSyncObjType(), SearchCriteria.Op.EQ);
TypeIdSearch.and("syncObjId", TypeIdSearch.entity().getSyncObjId(), SearchCriteria.Op.EQ);
TypeIdSearch.done();
}
}
}
}

View File

@ -10,16 +10,16 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async.dao;
package com.cloud.async.dao;
import java.util.List;
import com.cloud.async.SyncQueueItemVO;
import com.cloud.utils.db.GenericDao;
public interface SyncQueueItemDao extends GenericDao<SyncQueueItemVO, Long> {
public SyncQueueItemVO getNextQueueItem(long queueId);
public List<SyncQueueItemVO> getNextQueueItems(int maxItems);
public interface SyncQueueItemDao extends GenericDao<SyncQueueItemVO, Long> {
public SyncQueueItemVO getNextQueueItem(long queueId);
public List<SyncQueueItemVO> getNextQueueItems(int maxItems);
public List<SyncQueueItemVO> getActiveQueueItems(Long msid, boolean exclusive);
public List<SyncQueueItemVO> getBlockedQueueItems(long thresholdMs, boolean exclusive);
}
public List<SyncQueueItemVO> getBlockedQueueItems(long thresholdMs, boolean exclusive);
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async.dao;
package com.cloud.async.dao;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
@ -33,81 +33,81 @@ import com.cloud.utils.db.JoinBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.Transaction;
@Local(value = { SyncQueueItemDao.class })
@Local(value = { SyncQueueItemDao.class })
public class SyncQueueItemDaoImpl extends GenericDaoBase<SyncQueueItemVO, Long> implements SyncQueueItemDao {
private static final Logger s_logger = Logger.getLogger(SyncQueueItemDaoImpl.class);
private final SyncQueueDao _syncQueueDao = new SyncQueueDaoImpl();
@Override
public SyncQueueItemVO getNextQueueItem(long queueId) {
SearchBuilder<SyncQueueItemVO> sb = createSearchBuilder();
sb.and("queueId", sb.entity().getQueueId(), SearchCriteria.Op.EQ);
sb.and("lastProcessNumber", sb.entity().getLastProcessNumber(), SearchCriteria.Op.NULL);
sb.done();
SearchCriteria<SyncQueueItemVO> sc = sb.create();
sc.setParameters("queueId", queueId);
Filter filter = new Filter(SyncQueueItemVO.class, "created", true, 0L, 1L);
List<SyncQueueItemVO> l = listBy(sc, filter);
if(l != null && l.size() > 0)
return l.get(0);
return null;
}
@Override
public List<SyncQueueItemVO> getNextQueueItems(int maxItems) {
List<SyncQueueItemVO> l = new ArrayList<SyncQueueItemVO>();
String sql = "SELECT i.id, i.queue_id, i.content_type, i.content_id, i.created " +
" FROM sync_queue AS q JOIN sync_queue_item AS i ON q.id = i.queue_id " +
" WHERE q.queue_proc_time IS NULL AND i.queue_proc_number IS NULL " +
" GROUP BY q.id " +
" ORDER BY i.id " +
" LIMIT 0, ?";
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql);
pstmt.setInt(1, maxItems);
ResultSet rs = pstmt.executeQuery();
while(rs.next()) {
SyncQueueItemVO item = new SyncQueueItemVO();
item.setId(rs.getLong(1));
item.setQueueId(rs.getLong(2));
item.setContentType(rs.getString(3));
item.setContentId(rs.getLong(4));
item.setCreated(DateUtil.parseDateString(TimeZone.getTimeZone("GMT"), rs.getString(5)));
l.add(item);
}
} catch (SQLException e) {
s_logger.error("Unexpected sql excetpion, ", e);
} catch (Throwable e) {
s_logger.error("Unexpected excetpion, ", e);
}
return l;
}
@Override
public List<SyncQueueItemVO> getActiveQueueItems(Long msid, boolean exclusive) {
SearchBuilder<SyncQueueItemVO> sb = createSearchBuilder();
sb.and("lastProcessMsid", sb.entity().getLastProcessMsid(),
SearchCriteria.Op.EQ);
sb.done();
SearchCriteria<SyncQueueItemVO> sc = sb.create();
sc.setParameters("lastProcessMsid", msid);
private final SyncQueueDao _syncQueueDao = new SyncQueueDaoImpl();
@Override
public SyncQueueItemVO getNextQueueItem(long queueId) {
SearchBuilder<SyncQueueItemVO> sb = createSearchBuilder();
sb.and("queueId", sb.entity().getQueueId(), SearchCriteria.Op.EQ);
sb.and("lastProcessNumber", sb.entity().getLastProcessNumber(), SearchCriteria.Op.NULL);
sb.done();
SearchCriteria<SyncQueueItemVO> sc = sb.create();
sc.setParameters("queueId", queueId);
Filter filter = new Filter(SyncQueueItemVO.class, "created", true, 0L, 1L);
List<SyncQueueItemVO> l = listBy(sc, filter);
if(l != null && l.size() > 0)
return l.get(0);
return null;
}
@Override
public List<SyncQueueItemVO> getNextQueueItems(int maxItems) {
List<SyncQueueItemVO> l = new ArrayList<SyncQueueItemVO>();
String sql = "SELECT i.id, i.queue_id, i.content_type, i.content_id, i.created " +
" FROM sync_queue AS q JOIN sync_queue_item AS i ON q.id = i.queue_id " +
" WHERE q.queue_proc_time IS NULL AND i.queue_proc_number IS NULL " +
" GROUP BY q.id " +
" ORDER BY i.id " +
" LIMIT 0, ?";
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql);
pstmt.setInt(1, maxItems);
ResultSet rs = pstmt.executeQuery();
while(rs.next()) {
SyncQueueItemVO item = new SyncQueueItemVO();
item.setId(rs.getLong(1));
item.setQueueId(rs.getLong(2));
item.setContentType(rs.getString(3));
item.setContentId(rs.getLong(4));
item.setCreated(DateUtil.parseDateString(TimeZone.getTimeZone("GMT"), rs.getString(5)));
l.add(item);
}
} catch (SQLException e) {
s_logger.error("Unexpected sql excetpion, ", e);
} catch (Throwable e) {
s_logger.error("Unexpected excetpion, ", e);
}
return l;
}
@Override
public List<SyncQueueItemVO> getActiveQueueItems(Long msid, boolean exclusive) {
SearchBuilder<SyncQueueItemVO> sb = createSearchBuilder();
sb.and("lastProcessMsid", sb.entity().getLastProcessMsid(),
SearchCriteria.Op.EQ);
sb.done();
SearchCriteria<SyncQueueItemVO> sc = sb.create();
sc.setParameters("lastProcessMsid", msid);
Filter filter = new Filter(SyncQueueItemVO.class, "created", true, null, null);
if(exclusive)
return lockRows(sc, filter, true);
return listBy(sc, filter);
return lockRows(sc, filter, true);
return listBy(sc, filter);
}
@Override
@ -134,4 +134,4 @@ public class SyncQueueItemDaoImpl extends GenericDaoBase<SyncQueueItemVO, Long>
return lockRows(sc, null, true);
return listBy(sc, null);
}
}
}

View File

@ -10,170 +10,170 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.async.executor;
import java.util.Date;
import com.cloud.async.AsyncInstanceCreateStatus;
import com.cloud.serializer.Param;
import com.cloud.storage.Volume.Type;
import com.cloud.storage.upload.UploadState;
public class ExtractJobResultObject {
public ExtractJobResultObject(Long accountId, String typeName, String currState, int uploadPercent, Long uploadId){
this.accountId = accountId;
this.name = typeName;
this.state = currState;
this.id = uploadId;
this.uploadPercent = uploadPercent;
}
public ExtractJobResultObject(Long accountId, String typeName, String currState, Long uploadId, String url){
this.accountId = accountId;
this.name = typeName;
this.state = currState;
this.id = uploadId;
this.url = url;
}
public ExtractJobResultObject(){
}
@Param(name="id")
private long id;
@Param(name="name")
private String name;
@Param(name="uploadPercentage")
private int uploadPercent;
@Param(name="uploadStatus")
private String uploadStatus;
@Param(name="accountid")
long accountId;
@Param(name="result_string")
String result_string;
@Param(name="created")
private Date createdDate;
@Param(name="state")
private String state;
@Param(name="storagetype")
String storageType;
@Param(name="storage")
private String storage;
@Param(name="zoneid")
private Long zoneId;
@Param(name="zonename")
private String zoneName;
@Param(name="url")
private String url;
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public int getUploadPercent() {
return uploadPercent;
}
public void setUploadPercent(int i) {
this.uploadPercent = i;
}
public String getUploadStatus() {
return uploadStatus;
}
public void setUploadStatus(String uploadStatus) {
this.uploadStatus = uploadStatus;
}
public String getResult_string() {
return result_string;
}
public void setResult_string(String resultString) {
result_string = resultString;
}
public Long getZoneId() {
return zoneId;
}
public void setZoneId(Long zoneId) {
this.zoneId = zoneId;
}
public String getZoneName() {
return zoneName;
}
public void setZoneName(String zoneName) {
this.zoneName = zoneName;
}
public String getStorage() {
return storage;
}
public void setStorage(String storage) {
this.storage = storage;
}
public void setId(long id) {
this.id = id;
}
public long getId() {
return id;
}
public void setName(String name) {
this.name = name;
}
public String getName() {
return name;
}
public void setCreatedDate(Date createdDate) {
this.createdDate = createdDate;
}
public Date getCreatedDate() {
return createdDate;
}
public void setState(String status) {
this.state = status;
}
public String getState() {
return state;
}
public void setStorageType (String storageType) {
this.storageType = storageType;
}
public String getStorageType() {
return storageType;
}
}
package com.cloud.async.executor;
import java.util.Date;
import com.cloud.async.AsyncInstanceCreateStatus;
import com.cloud.serializer.Param;
import com.cloud.storage.Volume.Type;
import com.cloud.storage.upload.UploadState;
public class ExtractJobResultObject {
public ExtractJobResultObject(Long accountId, String typeName, String currState, int uploadPercent, Long uploadId){
this.accountId = accountId;
this.name = typeName;
this.state = currState;
this.id = uploadId;
this.uploadPercent = uploadPercent;
}
public ExtractJobResultObject(Long accountId, String typeName, String currState, Long uploadId, String url){
this.accountId = accountId;
this.name = typeName;
this.state = currState;
this.id = uploadId;
this.url = url;
}
public ExtractJobResultObject(){
}
@Param(name="id")
private long id;
@Param(name="name")
private String name;
@Param(name="uploadPercentage")
private int uploadPercent;
@Param(name="uploadStatus")
private String uploadStatus;
@Param(name="accountid")
long accountId;
@Param(name="result_string")
String result_string;
@Param(name="created")
private Date createdDate;
@Param(name="state")
private String state;
@Param(name="storagetype")
String storageType;
@Param(name="storage")
private String storage;
@Param(name="zoneid")
private Long zoneId;
@Param(name="zonename")
private String zoneName;
@Param(name="url")
private String url;
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public int getUploadPercent() {
return uploadPercent;
}
public void setUploadPercent(int i) {
this.uploadPercent = i;
}
public String getUploadStatus() {
return uploadStatus;
}
public void setUploadStatus(String uploadStatus) {
this.uploadStatus = uploadStatus;
}
public String getResult_string() {
return result_string;
}
public void setResult_string(String resultString) {
result_string = resultString;
}
public Long getZoneId() {
return zoneId;
}
public void setZoneId(Long zoneId) {
this.zoneId = zoneId;
}
public String getZoneName() {
return zoneName;
}
public void setZoneName(String zoneName) {
this.zoneName = zoneName;
}
public String getStorage() {
return storage;
}
public void setStorage(String storage) {
this.storage = storage;
}
public void setId(long id) {
this.id = id;
}
public long getId() {
return id;
}
public void setName(String name) {
this.name = name;
}
public String getName() {
return name;
}
public void setCreatedDate(Date createdDate) {
this.createdDate = createdDate;
}
public Date getCreatedDate() {
return createdDate;
}
public void setState(String status) {
this.state = status;
}
public String getState() {
return state;
}
public void setStorageType (String storageType) {
this.storageType = storageType;
}
public String getStorageType() {
return storageType;
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.capacity.dao;
package com.cloud.capacity.dao;
import java.util.List;
import java.util.Map;
@ -19,7 +19,7 @@ import com.cloud.capacity.CapacityVO;
import com.cloud.capacity.dao.CapacityDaoImpl.SummedCapacity;
import com.cloud.utils.Pair;
import com.cloud.utils.db.GenericDao;
public interface CapacityDao extends GenericDao<CapacityVO, Long> {
CapacityVO findByHostIdType(Long hostId, short capacityType);
List<Long> listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor);
@ -36,5 +36,5 @@ public interface CapacityDao extends GenericDao<CapacityVO, Long> {
Long podId, Long clusterId, String resourceState);
List<SummedCapacity> listCapacitiesGroupedByLevelAndType(Integer capacityType, Long zoneId, Long podId, Long clusterId, int level, Long limit);
void updateCapacityState(Long dcId, Long podId, Long clusterId,
Long hostId, String capacityState);
}
Long hostId, String capacityState);
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.capacity.dao;
package com.cloud.capacity.dao;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
@ -47,12 +47,12 @@ import com.cloud.utils.db.SearchCriteria.Func;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.exception.CloudRuntimeException;
@Local(value = { CapacityDao.class })
@Local(value = { CapacityDao.class })
public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements CapacityDao {
private static final Logger s_logger = Logger.getLogger(CapacityDaoImpl.class);
private static final String ADD_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity + ? WHERE host_id = ? AND capacity_type = ?";
private static final Logger s_logger = Logger.getLogger(CapacityDaoImpl.class);
private static final String ADD_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity + ? WHERE host_id = ? AND capacity_type = ?";
private static final String SUBTRACT_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity - ? WHERE host_id = ? AND capacity_type = ?";
private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART1 = "SELECT DISTINCT capacity.cluster_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster on (cluster.id = capacity.cluster_id AND cluster.removed is NULL) WHERE ";
@ -306,29 +306,29 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
return results;
}
public void updateAllocated(Long hostId, long allocatedAmount, short capacityType, boolean add) {
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
txn.start();
String sql = null;
if (add) {
sql = ADD_ALLOCATED_SQL;
} else {
sql = SUBTRACT_ALLOCATED_SQL;
}
pstmt = txn.prepareAutoCloseStatement(sql);
pstmt.setLong(1, allocatedAmount);
pstmt.setLong(2, hostId);
pstmt.setShort(3, capacityType);
pstmt.executeUpdate(); // TODO: Make sure exactly 1 row was updated?
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Exception updating capacity for host: " + hostId, e);
}
}
public void updateAllocated(Long hostId, long allocatedAmount, short capacityType, boolean add) {
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
txn.start();
String sql = null;
if (add) {
sql = ADD_ALLOCATED_SQL;
} else {
sql = SUBTRACT_ALLOCATED_SQL;
}
pstmt = txn.prepareAutoCloseStatement(sql);
pstmt.setLong(1, allocatedAmount);
pstmt.setLong(2, hostId);
pstmt.setShort(3, capacityType);
pstmt.executeUpdate(); // TODO: Make sure exactly 1 row was updated?
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Exception updating capacity for host: " + hostId, e);
}
}
@Override
@ -717,5 +717,5 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
} catch (Exception e) {
s_logger.warn("Error updating CapacityVO", e);
}
}
}
}
}

View File

@ -10,39 +10,39 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
/**
* TaskManager helps business logic deal with clustering failover.
* Say you're writing code that introduces an inconsistent state over
* of your operation? Who will come back to cleanup this state? TaskManager
* with different content during your process. If the server dies, TaskManager
* running elsewhere. If there are no clustered servers, then TaskManager will
* cleanup when the dead server resumes.
*
*/
public interface CheckPointManager {
/**
* responsible for cleaning up.
*
* @param context context information to be stored.
* @return Check point id.
*/
long pushCheckPoint(CleanupMaid context);
/**
* update the task with new context
* @param taskId
* @param updatedContext new updated context.
*/
void updateCheckPointState(long taskId, CleanupMaid updatedContext);
/**
* removes the task as it is completed.
*
* @param taskId
*/
void popCheckPoint(long taskId);
}
package com.cloud.cluster;
/**
* TaskManager helps business logic deal with clustering failover.
* Say you're writing code that introduces an inconsistent state over
* of your operation? Who will come back to cleanup this state? TaskManager
* with different content during your process. If the server dies, TaskManager
* running elsewhere. If there are no clustered servers, then TaskManager will
* cleanup when the dead server resumes.
*
*/
public interface CheckPointManager {
/**
* responsible for cleaning up.
*
* @param context context information to be stored.
* @return Check point id.
*/
long pushCheckPoint(CleanupMaid context);
/**
* update the task with new context
* @param taskId
* @param updatedContext new updated context.
*/
void updateCheckPointState(long taskId, CleanupMaid updatedContext);
/**
* removes the task as it is completed.
*
* @param taskId
*/
void popCheckPoint(long taskId);
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
package com.cloud.cluster;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
@ -37,206 +37,206 @@ import com.cloud.utils.component.Manager;
import com.cloud.utils.concurrency.NamedThreadFactory;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.GlobalLock;
@Local(value=CheckPointManager.class)
public class CheckPointManagerImpl implements CheckPointManager, Manager, ClusterManagerListener {
private static final Logger s_logger = Logger.getLogger(CheckPointManagerImpl.class);
private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 3; // 3 seconds
private int _cleanupRetryInterval;
private String _name;
@Inject
private StackMaidDao _maidDao;
@Inject
private ClusterManager _clusterMgr;
long _msId;
private final ScheduledExecutorService _cleanupScheduler = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Task-Cleanup"));
protected CheckPointManagerImpl() {
}
@Override
public boolean configure(String name, Map<String, Object> xmlParams) throws ConfigurationException {
_name = name;
if (s_logger.isInfoEnabled()) {
s_logger.info("Start configuring StackMaidManager : " + name);
}
StackMaid.init(ManagementServerNode.getManagementServerId());
_msId = ManagementServerNode.getManagementServerId();
_clusterMgr.registerListener(this);
ComponentLocator locator = ComponentLocator.getCurrentLocator();
ConfigurationDao configDao = locator.getDao(ConfigurationDao.class);
Map<String, String> params = configDao.getConfiguration(xmlParams);
_cleanupRetryInterval = NumbersUtil.parseInt(params.get(Config.TaskCleanupRetryInterval.key()), 600);
_maidDao.takeover(_msId, _msId);
return true;
}
private void cleanupLeftovers(List<CheckPointVO> l) {
for (CheckPointVO maid : l) {
if (StackMaid.doCleanup(maid)) {
_maidDao.expunge(maid.getId());
}
}
}
@Override
public void onManagementNodeIsolated() {
}
@DB
private Runnable getGCTask() {
return new Runnable() {
@Override
public void run() {
GlobalLock scanLock = GlobalLock.getInternLock("StackMaidManagerGC");
try {
if (scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) {
try {
reallyRun();
} finally {
scanLock.unlock();
}
}
} finally {
scanLock.releaseRef();
}
}
public void reallyRun() {
try {
Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - 7200000);
List<CheckPointVO> l = _maidDao.listLeftoversByCutTime(cutTime);
cleanupLeftovers(l);
} catch (Throwable e) {
s_logger.error("Unexpected exception when trying to execute queue item, ", e);
}
}
};
}
@Override
public boolean start() {
_cleanupScheduler.schedule(new CleanupTask(), _cleanupRetryInterval > 0 ? _cleanupRetryInterval : 600, TimeUnit.SECONDS);
return true;
}
@Override
public boolean stop() {
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public void onManagementNodeJoined(List<ManagementServerHostVO> nodeList, long selfNodeId) {
// Nothing to do
}
@Override
public void onManagementNodeLeft(List<ManagementServerHostVO> nodeList, long selfNodeId) {
for (ManagementServerHostVO node : nodeList) {
if (_maidDao.takeover(node.getMsid(), selfNodeId)) {
s_logger.info("Taking over from " + node.getMsid());
_cleanupScheduler.execute(new CleanupTask());
}
}
}
@Local(value=CheckPointManager.class)
public class CheckPointManagerImpl implements CheckPointManager, Manager, ClusterManagerListener {
private static final Logger s_logger = Logger.getLogger(CheckPointManagerImpl.class);
private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 3; // 3 seconds
private int _cleanupRetryInterval;
private String _name;
@Inject
private StackMaidDao _maidDao;
@Inject
private ClusterManager _clusterMgr;
long _msId;
private final ScheduledExecutorService _cleanupScheduler = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Task-Cleanup"));
protected CheckPointManagerImpl() {
}
@Override
@DB
public long pushCheckPoint(CleanupMaid context) {
long seq = _maidDao.pushCleanupDelegate(_msId, 0, context.getClass().getName(), context);
return seq;
}
@Override
public boolean configure(String name, Map<String, Object> xmlParams) throws ConfigurationException {
_name = name;
if (s_logger.isInfoEnabled()) {
s_logger.info("Start configuring StackMaidManager : " + name);
}
StackMaid.init(ManagementServerNode.getManagementServerId());
_msId = ManagementServerNode.getManagementServerId();
_clusterMgr.registerListener(this);
ComponentLocator locator = ComponentLocator.getCurrentLocator();
ConfigurationDao configDao = locator.getDao(ConfigurationDao.class);
Map<String, String> params = configDao.getConfiguration(xmlParams);
_cleanupRetryInterval = NumbersUtil.parseInt(params.get(Config.TaskCleanupRetryInterval.key()), 600);
_maidDao.takeover(_msId, _msId);
return true;
}
private void cleanupLeftovers(List<CheckPointVO> l) {
for (CheckPointVO maid : l) {
if (StackMaid.doCleanup(maid)) {
_maidDao.expunge(maid.getId());
}
}
}
@Override
public void onManagementNodeIsolated() {
}
@DB
public void updateCheckPointState(long taskId, CleanupMaid updatedContext) {
CheckPointVO task = _maidDao.createForUpdate();
task.setDelegate(updatedContext.getClass().getName());
task.setContext(SerializerHelper.toSerializedStringOld(updatedContext));
_maidDao.update(taskId, task);
}
@Override
private Runnable getGCTask() {
return new Runnable() {
@Override
public void run() {
GlobalLock scanLock = GlobalLock.getInternLock("StackMaidManagerGC");
try {
if (scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) {
try {
reallyRun();
} finally {
scanLock.unlock();
}
}
} finally {
scanLock.releaseRef();
}
}
public void reallyRun() {
try {
Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - 7200000);
List<CheckPointVO> l = _maidDao.listLeftoversByCutTime(cutTime);
cleanupLeftovers(l);
} catch (Throwable e) {
s_logger.error("Unexpected exception when trying to execute queue item, ", e);
}
}
};
}
@Override
public boolean start() {
_cleanupScheduler.schedule(new CleanupTask(), _cleanupRetryInterval > 0 ? _cleanupRetryInterval : 600, TimeUnit.SECONDS);
return true;
}
@Override
public boolean stop() {
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public void onManagementNodeJoined(List<ManagementServerHostVO> nodeList, long selfNodeId) {
// Nothing to do
}
@Override
public void onManagementNodeLeft(List<ManagementServerHostVO> nodeList, long selfNodeId) {
for (ManagementServerHostVO node : nodeList) {
if (_maidDao.takeover(node.getMsid(), selfNodeId)) {
s_logger.info("Taking over from " + node.getMsid());
_cleanupScheduler.execute(new CleanupTask());
}
}
}
@Override
@DB
public void popCheckPoint(long taskId) {
_maidDao.remove(taskId);
}
protected boolean cleanup(CheckPointVO task) {
s_logger.info("Cleaning up " + task);
CleanupMaid delegate = (CleanupMaid)SerializerHelper.fromSerializedString(task.getContext());
assert delegate.getClass().getName().equals(task.getDelegate()) : "Deserializer says " + delegate.getClass().getName() + " but it's suppose to be " + task.getDelegate();
int result = delegate.cleanup(this);
if (result <= 0) {
if (result == 0) {
s_logger.info("Successfully cleaned up " + task.getId());
} else {
s_logger.warn("Unsuccessful in cleaning up " + task + ". Procedure to cleanup manaully: " + delegate.getCleanupProcedure());
}
popCheckPoint(task.getId());
return true;
} else {
s_logger.error("Unable to cleanup " + task.getId());
return false;
}
}
class CleanupTask implements Runnable {
private Date _curDate;
public long pushCheckPoint(CleanupMaid context) {
long seq = _maidDao.pushCleanupDelegate(_msId, 0, context.getClass().getName(), context);
return seq;
}
@Override
@DB
public void updateCheckPointState(long taskId, CleanupMaid updatedContext) {
CheckPointVO task = _maidDao.createForUpdate();
task.setDelegate(updatedContext.getClass().getName());
task.setContext(SerializerHelper.toSerializedStringOld(updatedContext));
_maidDao.update(taskId, task);
}
@Override
@DB
public void popCheckPoint(long taskId) {
_maidDao.remove(taskId);
}
protected boolean cleanup(CheckPointVO task) {
s_logger.info("Cleaning up " + task);
CleanupMaid delegate = (CleanupMaid)SerializerHelper.fromSerializedString(task.getContext());
assert delegate.getClass().getName().equals(task.getDelegate()) : "Deserializer says " + delegate.getClass().getName() + " but it's suppose to be " + task.getDelegate();
int result = delegate.cleanup(this);
if (result <= 0) {
if (result == 0) {
s_logger.info("Successfully cleaned up " + task.getId());
} else {
s_logger.warn("Unsuccessful in cleaning up " + task + ". Procedure to cleanup manaully: " + delegate.getCleanupProcedure());
}
popCheckPoint(task.getId());
return true;
} else {
s_logger.error("Unable to cleanup " + task.getId());
return false;
}
}
class CleanupTask implements Runnable {
private Date _curDate;
public CleanupTask() {
_curDate = new Date();
}
@Override
public void run() {
_curDate = new Date();
}
@Override
public void run() {
try {
List<CheckPointVO> tasks = _maidDao.listLeftoversByCutTime(_curDate, _msId);
tasks.addAll(_maidDao.listCleanupTasks(_msId));
List<CheckPointVO> retries = new ArrayList<CheckPointVO>();
for (CheckPointVO task : tasks) {
try {
if (!cleanup(task)) {
retries.add(task);
}
} catch (Exception e) {
s_logger.warn("Unable to clean up " + task, e);
}
}
if (retries.size() > 0) {
if (_cleanupRetryInterval > 0) {
_cleanupScheduler.schedule(this, _cleanupRetryInterval, TimeUnit.SECONDS);
} else {
for (CheckPointVO task : retries) {
s_logger.warn("Cleanup procedure for " + task + ": " + ((CleanupMaid)SerializerHelper.fromSerializedString(task.getContext())).getCleanupProcedure());
}
}
}
} catch (Exception e) {
s_logger.error("Unable to cleanup all of the tasks for " + _msId, e);
}
}
}
}
List<CheckPointVO> tasks = _maidDao.listLeftoversByCutTime(_curDate, _msId);
tasks.addAll(_maidDao.listCleanupTasks(_msId));
List<CheckPointVO> retries = new ArrayList<CheckPointVO>();
for (CheckPointVO task : tasks) {
try {
if (!cleanup(task)) {
retries.add(task);
}
} catch (Exception e) {
s_logger.warn("Unable to clean up " + task, e);
}
}
if (retries.size() > 0) {
if (_cleanupRetryInterval > 0) {
_cleanupScheduler.schedule(this, _cleanupRetryInterval, TimeUnit.SECONDS);
} else {
for (CheckPointVO task : retries) {
s_logger.warn("Cleanup procedure for " + task + ": " + ((CleanupMaid)SerializerHelper.fromSerializedString(task.getContext())).getCleanupProcedure());
}
}
}
} catch (Exception e) {
s_logger.error("Unable to cleanup all of the tasks for " + _msId, e);
}
}
}
}

View File

@ -10,107 +10,107 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import com.cloud.utils.db.GenericDao;
@Entity
@Table(name="stack_maid")
public class CheckPointVO {
@Id
@GeneratedValue(strategy=GenerationType.IDENTITY)
@Column(name="id")
private long id;
@Column(name="msid")
private long msid;
@Column(name="thread_id")
private long threadId;
@Column(name="seq")
private long seq;
@Column(name="cleanup_delegate", length=128)
private String delegate;
@Column(name="cleanup_context", length=65535)
private String context;
@Column(name=GenericDao.CREATED_COLUMN)
private Date created;
public CheckPointVO() {
}
public CheckPointVO(long seq) {
this.seq = seq;
}
public long getId() {
return id;
}
public long getMsid() {
return msid;
}
public void setMsid(long msid) {
this.msid = msid;
}
public long getThreadId() {
return threadId;
}
public void setThreadId(long threadId) {
this.threadId = threadId;
}
public long getSeq() {
return seq;
}
public void setSeq(long seq) {
this.seq = seq;
}
public String getDelegate() {
return delegate;
}
public void setDelegate(String delegate) {
this.delegate = delegate;
}
public String getContext() {
return context;
}
public void setContext(String context) {
this.context = context;
}
public Date getCreated() {
return this.created;
}
public void setCreated(Date created) {
this.created = created;
}
@Override
public String toString() {
return new StringBuilder("Task[").append(id).append("-").append(context).append("-").append(delegate).append("]").toString();
}
}
package com.cloud.cluster;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import com.cloud.utils.db.GenericDao;
@Entity
@Table(name="stack_maid")
public class CheckPointVO {
@Id
@GeneratedValue(strategy=GenerationType.IDENTITY)
@Column(name="id")
private long id;
@Column(name="msid")
private long msid;
@Column(name="thread_id")
private long threadId;
@Column(name="seq")
private long seq;
@Column(name="cleanup_delegate", length=128)
private String delegate;
@Column(name="cleanup_context", length=65535)
private String context;
@Column(name=GenericDao.CREATED_COLUMN)
private Date created;
public CheckPointVO() {
}
public CheckPointVO(long seq) {
this.seq = seq;
}
public long getId() {
return id;
}
public long getMsid() {
return msid;
}
public void setMsid(long msid) {
this.msid = msid;
}
public long getThreadId() {
return threadId;
}
public void setThreadId(long threadId) {
this.threadId = threadId;
}
public long getSeq() {
return seq;
}
public void setSeq(long seq) {
this.seq = seq;
}
public String getDelegate() {
return delegate;
}
public void setDelegate(String delegate) {
this.delegate = delegate;
}
public String getContext() {
return context;
}
public void setContext(String context) {
this.context = context;
}
public Date getCreated() {
return this.created;
}
public void setCreated(Date created) {
this.created = created;
}
@Override
public String toString() {
return new StringBuilder("Task[").append(id).append("-").append(context).append("-").append(delegate).append("]").toString();
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
package com.cloud.cluster;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.Command;
import com.cloud.exception.AgentUnavailableException;
@ -19,45 +19,45 @@ import com.cloud.exception.OperationTimedoutException;
import com.cloud.host.Status.Event;
import com.cloud.resource.ResourceState;
import com.cloud.utils.component.Manager;
public interface ClusterManager extends Manager {
public static final int DEFAULT_HEARTBEAT_INTERVAL = 1500;
public static final int DEFAULT_HEARTBEAT_THRESHOLD = 150000;
public static final String ALERT_SUBJECT = "cluster-alert";
public interface ClusterManager extends Manager {
public static final int DEFAULT_HEARTBEAT_INTERVAL = 1500;
public static final int DEFAULT_HEARTBEAT_THRESHOLD = 150000;
public static final String ALERT_SUBJECT = "cluster-alert";
public void OnReceiveClusterServicePdu(ClusterServicePdu pdu);
public void executeAsync(String strPeer, long agentId, Command [] cmds, boolean stopOnError);
public Answer[] execute(String strPeer, long agentId, Command [] cmds, boolean stopOnError);
public Answer[] execute(String strPeer, long agentId, Command [] cmds, boolean stopOnError);
public Answer[] sendToAgent(Long hostId, Command [] cmds, boolean stopOnError) throws AgentUnavailableException, OperationTimedoutException;
public boolean executeAgentUserRequest(long agentId, Event event) throws AgentUnavailableException;
public boolean executeAgentUserRequest(long agentId, Event event) throws AgentUnavailableException;
public Boolean propagateAgentEvent(long agentId, Event event) throws AgentUnavailableException;
public Boolean propagateResourceEvent(long agentId, ResourceState.Event event) throws AgentUnavailableException;
public boolean executeResourceUserRequest(long hostId, ResourceState.Event event) throws AgentUnavailableException;
public boolean executeResourceUserRequest(long hostId, ResourceState.Event event) throws AgentUnavailableException;
public int getHeartbeatThreshold();
public long getManagementNodeId(); // msid of current management server node
public boolean isManagementNodeAlive(long msid);
public boolean pingManagementNode(long msid);
public long getCurrentRunId();
public String getSelfPeerName();
public String getSelfNodeIP();
public String getPeerName(long agentHostId);
public void registerListener(ClusterManagerListener listener);
public void unregisterListener(ClusterManagerListener listener);
public ManagementServerHostVO getPeer(String peerName);
/**
* Broadcast the command to all of the management server nodes.
* @param agentId agent id this broadcast is regarding
* @param cmds commands to broadcast
*/
public String getSelfPeerName();
public String getSelfNodeIP();
public String getPeerName(long agentHostId);
public void registerListener(ClusterManagerListener listener);
public void unregisterListener(ClusterManagerListener listener);
public ManagementServerHostVO getPeer(String peerName);
/**
* Broadcast the command to all of the management server nodes.
* @param agentId agent id this broadcast is regarding
* @param cmds commands to broadcast
*/
public void broadcast(long agentId, Command[] cmds);
boolean rebalanceAgent(long agentId, Event event, long currentOwnerId, long futureOwnerId) throws AgentUnavailableException, OperationTimedoutException;
boolean isAgentRebalanceEnabled();
}
boolean isAgentRebalanceEnabled();
}

View File

@ -10,12 +10,12 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
package com.cloud.cluster;
import java.util.List;
public interface ClusterManagerListener {
void onManagementNodeJoined(List<ManagementServerHostVO> nodeList, long selfNodeId);
public interface ClusterManagerListener {
void onManagementNodeJoined(List<ManagementServerHostVO> nodeList, long selfNodeId);
void onManagementNodeLeft(List<ManagementServerHostVO> nodeList, long selfNodeId);
void onManagementNodeIsolated();
}
void onManagementNodeIsolated();
}

View File

@ -10,30 +10,30 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
import java.util.List;
package com.cloud.cluster;
import java.util.List;
import com.cloud.utils.events.EventArgs;
public class ClusterNodeJoinEventArgs extends EventArgs {
private static final long serialVersionUID = 6284545402661799476L;
private List<ManagementServerHostVO> joinedNodes;
private Long self;
public ClusterNodeJoinEventArgs(Long self, List<ManagementServerHostVO> joinedNodes) {
super(ClusterManager.ALERT_SUBJECT);
this.self = self;
this.joinedNodes = joinedNodes;
}
public List<ManagementServerHostVO> getJoinedNodes() {
return joinedNodes;
}
public Long getSelf() {
return self;
}
}
public class ClusterNodeJoinEventArgs extends EventArgs {
private static final long serialVersionUID = 6284545402661799476L;
private List<ManagementServerHostVO> joinedNodes;
private Long self;
public ClusterNodeJoinEventArgs(Long self, List<ManagementServerHostVO> joinedNodes) {
super(ClusterManager.ALERT_SUBJECT);
this.self = self;
this.joinedNodes = joinedNodes;
}
public List<ManagementServerHostVO> getJoinedNodes() {
return joinedNodes;
}
public Long getSelf() {
return self;
}
}

View File

@ -10,31 +10,31 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
import java.util.List;
package com.cloud.cluster;
import java.util.List;
import com.cloud.utils.events.EventArgs;
public class ClusterNodeLeftEventArgs extends EventArgs {
private static final long serialVersionUID = 7236743316223611935L;
private List<ManagementServerHostVO> leftNodes;
private Long self;
public ClusterNodeLeftEventArgs(Long self, List<ManagementServerHostVO> leftNodes) {
super(ClusterManager.ALERT_SUBJECT);
this.self = self;
this.leftNodes = leftNodes;
}
public List<ManagementServerHostVO> getLeftNodes() {
return leftNodes;
}
public Long getSelf() {
return self;
}
}
public class ClusterNodeLeftEventArgs extends EventArgs {
private static final long serialVersionUID = 7236743316223611935L;
private List<ManagementServerHostVO> leftNodes;
private Long self;
public ClusterNodeLeftEventArgs(Long self, List<ManagementServerHostVO> leftNodes) {
super(ClusterManager.ALERT_SUBJECT);
this.self = self;
this.leftNodes = leftNodes;
}
public List<ManagementServerHostVO> getLeftNodes() {
return leftNodes;
}
public Long getSelf() {
return self;
}
}

View File

@ -10,12 +10,12 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
package com.cloud.cluster;
import java.rmi.Remote;
import java.rmi.RemoteException;
public interface ClusterService extends Remote {
String execute(ClusterServicePdu pdu) throws RemoteException;
boolean ping(String callingPeer) throws RemoteException;
}
}

View File

@ -10,15 +10,15 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
import java.rmi.RemoteException;
import com.cloud.cluster.ClusterService;
import com.cloud.utils.component.Adapter;
public interface ClusterServiceAdapter extends Adapter {
public ClusterService getPeerService(String strPeer) throws RemoteException;
public String getServiceEndpointName(String strPeer);
public int getServicePort();
}
package com.cloud.cluster;
import java.rmi.RemoteException;
import com.cloud.cluster.ClusterService;
import com.cloud.utils.component.Adapter;
public interface ClusterServiceAdapter extends Adapter {
public ClusterService getPeerService(String strPeer) throws RemoteException;
public String getServiceEndpointName(String strPeer);
public int getServicePort();
}

View File

@ -10,49 +10,49 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.rmi.RemoteException;
import java.util.Map;
import java.util.Properties;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.cluster.dao.ManagementServerHostDao;
package com.cloud.cluster;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.rmi.RemoteException;
import java.util.Map;
import java.util.Properties;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.cluster.dao.ManagementServerHostDao;
import com.cloud.configuration.Config;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.component.ComponentLocator;
@Local(value={ClusterServiceAdapter.class})
public class ClusterServiceServletAdapter implements ClusterServiceAdapter {
private static final Logger s_logger = Logger.getLogger(ClusterServiceServletAdapter.class);
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.component.ComponentLocator;
@Local(value={ClusterServiceAdapter.class})
public class ClusterServiceServletAdapter implements ClusterServiceAdapter {
private static final Logger s_logger = Logger.getLogger(ClusterServiceServletAdapter.class);
private static final int DEFAULT_SERVICE_PORT = 9090;
private static final int DEFAULT_REQUEST_TIMEOUT = 300; // 300 seconds
private ClusterManager _manager;
private static final int DEFAULT_REQUEST_TIMEOUT = 300; // 300 seconds
private ClusterManager _manager;
private ManagementServerHostDao _mshostDao;
private ConfigurationDao _configDao;
private ClusterServiceServletContainer _servletContainer;
private String _name;
private ClusterServiceServletContainer _servletContainer;
private String _name;
private int _clusterServicePort = DEFAULT_SERVICE_PORT;
private int _clusterRequestTimeoutSeconds = DEFAULT_REQUEST_TIMEOUT;
@Override
@Override
public ClusterService getPeerService(String strPeer) throws RemoteException {
try {
init();
@ -60,68 +60,68 @@ public class ClusterServiceServletAdapter implements ClusterServiceAdapter {
s_logger.error("Unable to init ClusterServiceServletAdapter");
throw new RemoteException("Unable to init ClusterServiceServletAdapter");
}
String serviceUrl = getServiceEndpointName(strPeer);
if(serviceUrl == null)
return null;
return new ClusterServiceServletImpl(serviceUrl, _clusterRequestTimeoutSeconds);
}
@Override
public String getServiceEndpointName(String strPeer) {
String serviceUrl = getServiceEndpointName(strPeer);
if(serviceUrl == null)
return null;
return new ClusterServiceServletImpl(serviceUrl, _clusterRequestTimeoutSeconds);
}
@Override
public String getServiceEndpointName(String strPeer) {
try {
init();
} catch (ConfigurationException e) {
s_logger.error("Unable to init ClusterServiceServletAdapter");
return null;
}
long msid = Long.parseLong(strPeer);
ManagementServerHostVO mshost = _mshostDao.findByMsid(msid);
if(mshost == null)
return null;
return composeEndpointName(mshost.getServiceIP(), mshost.getServicePort());
}
@Override
public int getServicePort() {
return _clusterServicePort;
}
private String composeEndpointName(String nodeIP, int port) {
StringBuffer sb = new StringBuffer();
sb.append("http://").append(nodeIP).append(":").append(port).append("/clusterservice");
return sb.toString();
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
init();
return true;
long msid = Long.parseLong(strPeer);
ManagementServerHostVO mshost = _mshostDao.findByMsid(msid);
if(mshost == null)
return null;
return composeEndpointName(mshost.getServiceIP(), mshost.getServicePort());
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
_servletContainer = new ClusterServiceServletContainer();
_servletContainer.start(new ClusterServiceServletHttpHandler(_manager), _clusterServicePort);
return true;
}
@Override
public boolean stop() {
if(_servletContainer != null)
_servletContainer.stop();
return true;
@Override
public int getServicePort() {
return _clusterServicePort;
}
private String composeEndpointName(String nodeIP, int port) {
StringBuffer sb = new StringBuffer();
sb.append("http://").append(nodeIP).append(":").append(port).append("/clusterservice");
return sb.toString();
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
init();
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
_servletContainer = new ClusterServiceServletContainer();
_servletContainer.start(new ClusterServiceServletHttpHandler(_manager), _clusterServicePort);
return true;
}
@Override
public boolean stop() {
if(_servletContainer != null)
_servletContainer.stop();
return true;
}
private void init() throws ConfigurationException {
@ -163,4 +163,4 @@ public class ClusterServiceServletAdapter implements ClusterServiceAdapter {
if(s_logger.isInfoEnabled())
s_logger.info("Cluster servlet port : " + _clusterServicePort);
}
}
}

View File

@ -10,98 +10,98 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
import java.io.IOException;
import java.net.ServerSocket;
import java.net.Socket;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.apache.http.ConnectionClosedException;
import org.apache.http.HttpException;
import org.apache.http.impl.DefaultConnectionReuseStrategy;
import org.apache.http.impl.DefaultHttpResponseFactory;
import org.apache.http.impl.DefaultHttpServerConnection;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.CoreConnectionPNames;
import org.apache.http.params.CoreProtocolPNames;
import org.apache.http.params.HttpParams;
import org.apache.http.protocol.BasicHttpContext;
import org.apache.http.protocol.BasicHttpProcessor;
import org.apache.http.protocol.HttpContext;
import org.apache.http.protocol.HttpRequestHandler;
import org.apache.http.protocol.HttpRequestHandlerRegistry;
import org.apache.http.protocol.HttpService;
import org.apache.http.protocol.ResponseConnControl;
import org.apache.http.protocol.ResponseContent;
import org.apache.http.protocol.ResponseDate;
import org.apache.http.protocol.ResponseServer;
import org.apache.log4j.Logger;
import com.cloud.utils.concurrency.NamedThreadFactory;
public class ClusterServiceServletContainer {
private static final Logger s_logger = Logger.getLogger(ClusterServiceServletContainer.class);
package com.cloud.cluster;
import java.io.IOException;
import java.net.ServerSocket;
import java.net.Socket;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.apache.http.ConnectionClosedException;
import org.apache.http.HttpException;
import org.apache.http.impl.DefaultConnectionReuseStrategy;
import org.apache.http.impl.DefaultHttpResponseFactory;
import org.apache.http.impl.DefaultHttpServerConnection;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.CoreConnectionPNames;
import org.apache.http.params.CoreProtocolPNames;
import org.apache.http.params.HttpParams;
import org.apache.http.protocol.BasicHttpContext;
import org.apache.http.protocol.BasicHttpProcessor;
import org.apache.http.protocol.HttpContext;
import org.apache.http.protocol.HttpRequestHandler;
import org.apache.http.protocol.HttpRequestHandlerRegistry;
import org.apache.http.protocol.HttpService;
import org.apache.http.protocol.ResponseConnControl;
import org.apache.http.protocol.ResponseContent;
import org.apache.http.protocol.ResponseDate;
import org.apache.http.protocol.ResponseServer;
import org.apache.log4j.Logger;
import com.cloud.utils.concurrency.NamedThreadFactory;
public class ClusterServiceServletContainer {
private static final Logger s_logger = Logger.getLogger(ClusterServiceServletContainer.class);
private ListenerThread listenerThread;
public ClusterServiceServletContainer() {
}
public boolean start(HttpRequestHandler requestHandler, int port) {
listenerThread = new ListenerThread(requestHandler, port);
listenerThread.start();
return true;
}
public ClusterServiceServletContainer() {
}
public boolean start(HttpRequestHandler requestHandler, int port) {
listenerThread = new ListenerThread(requestHandler, port);
listenerThread.start();
return true;
}
public void stop() {
if(listenerThread != null) {
listenerThread.stopRunning();
}
}
static class ListenerThread extends Thread {
private HttpService _httpService = null;
private volatile ServerSocket _serverSocket = null;
private HttpParams _params = null;
private ExecutorService _executor;
public ListenerThread(HttpRequestHandler requestHandler, int port) {
_executor = Executors.newCachedThreadPool(new NamedThreadFactory("Cluster-Listener"));
try {
_serverSocket = new ServerSocket(port);
} catch (IOException ioex) {
s_logger.error("error initializing cluster service servlet container", ioex);
return;
}
_params = new BasicHttpParams();
_params
.setIntParameter(CoreConnectionPNames.SO_TIMEOUT, 5000)
.setIntParameter(CoreConnectionPNames.SOCKET_BUFFER_SIZE, 8 * 1024)
.setBooleanParameter(CoreConnectionPNames.STALE_CONNECTION_CHECK, false)
.setBooleanParameter(CoreConnectionPNames.TCP_NODELAY, true)
.setParameter(CoreProtocolPNames.ORIGIN_SERVER, "HttpComponents/1.1");
// Set up the HTTP protocol processor
BasicHttpProcessor httpproc = new BasicHttpProcessor();
httpproc.addInterceptor(new ResponseDate());
httpproc.addInterceptor(new ResponseServer());
httpproc.addInterceptor(new ResponseContent());
httpproc.addInterceptor(new ResponseConnControl());
// Set up request handlers
HttpRequestHandlerRegistry reqistry = new HttpRequestHandlerRegistry();
reqistry.register("/clusterservice", requestHandler);
// Set up the HTTP service
_httpService = new HttpService(httpproc, new DefaultConnectionReuseStrategy(), new DefaultHttpResponseFactory());
_httpService.setParams(_params);
_httpService.setHandlerResolver(reqistry);
listenerThread.stopRunning();
}
}
static class ListenerThread extends Thread {
private HttpService _httpService = null;
private volatile ServerSocket _serverSocket = null;
private HttpParams _params = null;
private ExecutorService _executor;
public ListenerThread(HttpRequestHandler requestHandler, int port) {
_executor = Executors.newCachedThreadPool(new NamedThreadFactory("Cluster-Listener"));
try {
_serverSocket = new ServerSocket(port);
} catch (IOException ioex) {
s_logger.error("error initializing cluster service servlet container", ioex);
return;
}
_params = new BasicHttpParams();
_params
.setIntParameter(CoreConnectionPNames.SO_TIMEOUT, 5000)
.setIntParameter(CoreConnectionPNames.SOCKET_BUFFER_SIZE, 8 * 1024)
.setBooleanParameter(CoreConnectionPNames.STALE_CONNECTION_CHECK, false)
.setBooleanParameter(CoreConnectionPNames.TCP_NODELAY, true)
.setParameter(CoreProtocolPNames.ORIGIN_SERVER, "HttpComponents/1.1");
// Set up the HTTP protocol processor
BasicHttpProcessor httpproc = new BasicHttpProcessor();
httpproc.addInterceptor(new ResponseDate());
httpproc.addInterceptor(new ResponseServer());
httpproc.addInterceptor(new ResponseContent());
httpproc.addInterceptor(new ResponseConnControl());
// Set up request handlers
HttpRequestHandlerRegistry reqistry = new HttpRequestHandlerRegistry();
reqistry.register("/clusterservice", requestHandler);
// Set up the HTTP service
_httpService = new HttpService(httpproc, new DefaultConnectionReuseStrategy(), new DefaultHttpResponseFactory());
_httpService.setParams(_params);
_httpService.setHandlerResolver(reqistry);
}
public void stopRunning() {
@ -112,50 +112,50 @@ public class ClusterServiceServletContainer {
}
_serverSocket = null;
}
}
public void run() {
if(s_logger.isInfoEnabled())
s_logger.info("Cluster service servlet container listening on port " + _serverSocket.getLocalPort());
while (_serverSocket != null) {
try {
// Set up HTTP connection
Socket socket = _serverSocket.accept();
final DefaultHttpServerConnection conn = new DefaultHttpServerConnection();
conn.bind(socket, _params);
_executor.execute(new Runnable() {
public void run() {
HttpContext context = new BasicHttpContext(null);
}
public void run() {
if(s_logger.isInfoEnabled())
s_logger.info("Cluster service servlet container listening on port " + _serverSocket.getLocalPort());
while (_serverSocket != null) {
try {
// Set up HTTP connection
Socket socket = _serverSocket.accept();
final DefaultHttpServerConnection conn = new DefaultHttpServerConnection();
conn.bind(socket, _params);
_executor.execute(new Runnable() {
public void run() {
HttpContext context = new BasicHttpContext(null);
try {
while(!Thread.interrupted() && conn.isOpen()) {
if(s_logger.isTraceEnabled())
s_logger.trace("dispatching cluster request from " + conn.getRemoteAddress().toString());
_httpService.handleRequest(conn, context);
if(s_logger.isTraceEnabled())
while(!Thread.interrupted() && conn.isOpen()) {
if(s_logger.isTraceEnabled())
s_logger.trace("dispatching cluster request from " + conn.getRemoteAddress().toString());
_httpService.handleRequest(conn, context);
if(s_logger.isTraceEnabled())
s_logger.trace("Cluster request from " + conn.getRemoteAddress().toString() + " is processed");
}
}
} catch (ConnectionClosedException ex) {
// client close and read time out exceptions are expected
// when KEEP-AVLIE is enabled
s_logger.trace("Client closed connection", ex);
} catch (IOException ex) {
s_logger.trace("I/O error", ex);
} catch (HttpException ex) {
s_logger.error("Unrecoverable HTTP protocol violation", ex);
} finally {
try {
// when KEEP-AVLIE is enabled
s_logger.trace("Client closed connection", ex);
} catch (IOException ex) {
s_logger.trace("I/O error", ex);
} catch (HttpException ex) {
s_logger.error("Unrecoverable HTTP protocol violation", ex);
} finally {
try {
conn.shutdown();
} catch (IOException ignore) {
s_logger.error("unexpected exception", ignore);
}
}
}
}
}
});
});
} catch (Throwable e) {
s_logger.error("Unexpected exception ", e);
@ -163,13 +163,13 @@ public class ClusterServiceServletContainer {
try {
Thread.sleep(1000);
} catch (InterruptedException e1) {
}
}
}
_executor.shutdown();
if(s_logger.isInfoEnabled())
s_logger.info("Cluster service servlet container shutdown");
}
}
}
}
}
}
_executor.shutdown();
if(s_logger.isInfoEnabled())
s_logger.info("Cluster service servlet container shutdown");
}
}
}

View File

@ -10,7 +10,7 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
package com.cloud.cluster;
import java.io.ByteArrayInputStream;
import java.io.IOException;
@ -27,129 +27,129 @@ import org.apache.http.protocol.HttpRequestHandler;
import org.apache.http.util.EntityUtils;
import org.apache.log4j.Logger;
public class ClusterServiceServletHttpHandler implements HttpRequestHandler {
private static final Logger s_logger = Logger.getLogger(ClusterServiceServletHttpHandler.class);
private final ClusterManager manager;
public ClusterServiceServletHttpHandler(ClusterManager manager) {
this.manager = manager;
}
@Override
public void handle(HttpRequest request, HttpResponse response, HttpContext context)
throws HttpException, IOException {
try {
public class ClusterServiceServletHttpHandler implements HttpRequestHandler {
private static final Logger s_logger = Logger.getLogger(ClusterServiceServletHttpHandler.class);
private final ClusterManager manager;
public ClusterServiceServletHttpHandler(ClusterManager manager) {
this.manager = manager;
}
@Override
public void handle(HttpRequest request, HttpResponse response, HttpContext context)
throws HttpException, IOException {
try {
if(s_logger.isTraceEnabled()) {
s_logger.trace("Start Handling cluster HTTP request");
}
parseRequest(request);
handleRequest(request, response);
}
parseRequest(request);
handleRequest(request, response);
if(s_logger.isTraceEnabled()) {
s_logger.trace("Handle cluster HTTP request done");
}
} catch(Throwable e) {
}
} catch(Throwable e) {
if(s_logger.isDebugEnabled()) {
s_logger.debug("Exception " + e.toString());
}
}
try {
try {
writeResponse(response, HttpStatus.SC_INTERNAL_SERVER_ERROR, null);
} catch(Throwable e2) {
if(s_logger.isDebugEnabled()) {
s_logger.debug("Exception " + e2.toString());
}
}
}
}
@SuppressWarnings("deprecation")
private void parseRequest(HttpRequest request) throws IOException {
if(request instanceof HttpEntityEnclosingRequest) {
HttpEntityEnclosingRequest entityRequest = (HttpEntityEnclosingRequest)request;
String body = EntityUtils.toString(entityRequest.getEntity());
if(body != null) {
String[] paramArray = body.split("&");
if(paramArray != null) {
for (String paramEntry : paramArray) {
String[] paramValue = paramEntry.split("=");
}
}
}
@SuppressWarnings("deprecation")
private void parseRequest(HttpRequest request) throws IOException {
if(request instanceof HttpEntityEnclosingRequest) {
HttpEntityEnclosingRequest entityRequest = (HttpEntityEnclosingRequest)request;
String body = EntityUtils.toString(entityRequest.getEntity());
if(body != null) {
String[] paramArray = body.split("&");
if(paramArray != null) {
for (String paramEntry : paramArray) {
String[] paramValue = paramEntry.split("=");
if (paramValue.length != 2) {
continue;
}
String name = URLDecoder.decode(paramValue[0]);
String value = URLDecoder.decode(paramValue[1]);
}
String name = URLDecoder.decode(paramValue[0]);
String value = URLDecoder.decode(paramValue[1]);
if(s_logger.isTraceEnabled()) {
s_logger.trace("Parsed request parameter " + name + "=" + value);
}
request.getParams().setParameter(name, value);
}
}
}
}
}
private void writeResponse(HttpResponse response, int statusCode, String content) {
}
request.getParams().setParameter(name, value);
}
}
}
}
}
private void writeResponse(HttpResponse response, int statusCode, String content) {
if(content == null) {
content = "";
}
response.setStatusCode(statusCode);
BasicHttpEntity body = new BasicHttpEntity();
body.setContentType("text/html; charset=UTF-8");
byte[] bodyData = content.getBytes();
body.setContent(new ByteArrayInputStream(bodyData));
body.setContentLength(bodyData.length);
response.setEntity(body);
}
protected void handleRequest(HttpRequest req, HttpResponse response) {
String method = (String)req.getParams().getParameter("method");
int nMethod = RemoteMethodConstants.METHOD_UNKNOWN;
String responseContent = null;
try {
}
response.setStatusCode(statusCode);
BasicHttpEntity body = new BasicHttpEntity();
body.setContentType("text/html; charset=UTF-8");
byte[] bodyData = content.getBytes();
body.setContent(new ByteArrayInputStream(bodyData));
body.setContentLength(bodyData.length);
response.setEntity(body);
}
protected void handleRequest(HttpRequest req, HttpResponse response) {
String method = (String)req.getParams().getParameter("method");
int nMethod = RemoteMethodConstants.METHOD_UNKNOWN;
String responseContent = null;
try {
if(method != null) {
nMethod = Integer.parseInt(method);
}
switch(nMethod) {
}
switch(nMethod) {
case RemoteMethodConstants.METHOD_DELIVER_PDU :
responseContent = handleDeliverPduMethodCall(req);
break;
case RemoteMethodConstants.METHOD_PING :
responseContent = handlePingMethodCall(req);
break;
case RemoteMethodConstants.METHOD_UNKNOWN :
default :
assert(false);
s_logger.error("unrecognized method " + nMethod);
break;
}
} catch(Throwable e) {
s_logger.error("Unexpected exception when processing cluster service request : ", e);
}
case RemoteMethodConstants.METHOD_PING :
responseContent = handlePingMethodCall(req);
break;
case RemoteMethodConstants.METHOD_UNKNOWN :
default :
assert(false);
s_logger.error("unrecognized method " + nMethod);
break;
}
} catch(Throwable e) {
s_logger.error("Unexpected exception when processing cluster service request : ", e);
}
if(responseContent != null) {
if(s_logger.isTraceEnabled())
s_logger.trace("Write reponse with HTTP OK " + responseContent);
writeResponse(response, HttpStatus.SC_OK, responseContent);
writeResponse(response, HttpStatus.SC_OK, responseContent);
} else {
if(s_logger.isTraceEnabled())
s_logger.trace("Write reponse with HTTP Bad request");
writeResponse(response, HttpStatus.SC_BAD_REQUEST, null);
}
}
writeResponse(response, HttpStatus.SC_BAD_REQUEST, null);
}
}
private String handleDeliverPduMethodCall(HttpRequest req) {
@ -175,14 +175,14 @@ public class ClusterServiceServletHttpHandler implements HttpRequestHandler {
manager.OnReceiveClusterServicePdu(pdu);
return "true";
}
private String handlePingMethodCall(HttpRequest req) {
String callingPeer = (String)req.getParams().getParameter("callingPeer");
private String handlePingMethodCall(HttpRequest req) {
String callingPeer = (String)req.getParams().getParameter("callingPeer");
if(s_logger.isDebugEnabled()) {
s_logger.debug("Handle ping request from " + callingPeer);
}
return "true";
}
}
}
return "true";
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
package com.cloud.cluster;
import java.io.IOException;
import java.rmi.RemoteException;
@ -23,26 +23,26 @@ import org.apache.commons.httpclient.methods.PostMethod;
import org.apache.commons.httpclient.params.HttpClientParams;
import org.apache.log4j.Logger;
public class ClusterServiceServletImpl implements ClusterService {
public class ClusterServiceServletImpl implements ClusterService {
private static final long serialVersionUID = 4574025200012566153L;
private static final Logger s_logger = Logger.getLogger(ClusterServiceServletImpl.class);
private static final Logger s_logger = Logger.getLogger(ClusterServiceServletImpl.class);
private String _serviceUrl;
private int _requestTimeoutSeconds;
protected static HttpClient s_client = null;
public ClusterServiceServletImpl() {
}
public ClusterServiceServletImpl() {
}
public ClusterServiceServletImpl(String serviceUrl, int requestTimeoutSeconds) {
s_logger.info("Setup cluster service servlet. service url: " + serviceUrl + ", request timeout: " + requestTimeoutSeconds + " seconds");
this._serviceUrl = serviceUrl;
this._requestTimeoutSeconds = requestTimeoutSeconds;
this._requestTimeoutSeconds = requestTimeoutSeconds;
}
@Override
@Override
public String execute(ClusterServicePdu pdu) throws RemoteException {
HttpClient client = getHttpClient();
@ -66,54 +66,54 @@ public class ClusterServiceServletImpl implements ClusterService {
}
@Override
public boolean ping(String callingPeer) throws RemoteException {
public boolean ping(String callingPeer) throws RemoteException {
if(s_logger.isDebugEnabled()) {
s_logger.debug("Ping at " + _serviceUrl);
}
}
HttpClient client = getHttpClient();
PostMethod method = new PostMethod(_serviceUrl);
method.addParameter("method", Integer.toString(RemoteMethodConstants.METHOD_PING));
HttpClient client = getHttpClient();
PostMethod method = new PostMethod(_serviceUrl);
method.addParameter("method", Integer.toString(RemoteMethodConstants.METHOD_PING));
method.addParameter("callingPeer", callingPeer);
try {
try {
String returnVal = executePostMethod(client, method);
if("true".equalsIgnoreCase(returnVal)) {
return true;
}
}
return false;
} finally {
method.releaseConnection();
}
}
private String executePostMethod(HttpClient client, PostMethod method) {
int response = 0;
String result = null;
try {
long startTick = System.currentTimeMillis();
response = client.executeMethod(method);
if(response == HttpStatus.SC_OK) {
result = method.getResponseBodyAsString();
if(s_logger.isDebugEnabled()) {
s_logger.debug("POST " + _serviceUrl + " response :" + result + ", responding time: "
+ (System.currentTimeMillis() - startTick) + " ms");
}
} else {
s_logger.error("Invalid response code : " + response + ", from : "
+ _serviceUrl + ", method : " + method.getParameter("method")
+ " responding time: " + (System.currentTimeMillis() - startTick));
}
} catch (HttpException e) {
s_logger.error("HttpException from : " + _serviceUrl + ", method : " + method.getParameter("method"));
} catch (IOException e) {
s_logger.error("IOException from : " + _serviceUrl + ", method : " + method.getParameter("method"));
} catch(Throwable e) {
s_logger.error("Exception from : " + _serviceUrl + ", method : " + method.getParameter("method") + ", exception :", e);
}
}
}
return result;
private String executePostMethod(HttpClient client, PostMethod method) {
int response = 0;
String result = null;
try {
long startTick = System.currentTimeMillis();
response = client.executeMethod(method);
if(response == HttpStatus.SC_OK) {
result = method.getResponseBodyAsString();
if(s_logger.isDebugEnabled()) {
s_logger.debug("POST " + _serviceUrl + " response :" + result + ", responding time: "
+ (System.currentTimeMillis() - startTick) + " ms");
}
} else {
s_logger.error("Invalid response code : " + response + ", from : "
+ _serviceUrl + ", method : " + method.getParameter("method")
+ " responding time: " + (System.currentTimeMillis() - startTick));
}
} catch (HttpException e) {
s_logger.error("HttpException from : " + _serviceUrl + ", method : " + method.getParameter("method"));
} catch (IOException e) {
s_logger.error("IOException from : " + _serviceUrl + ", method : " + method.getParameter("method"));
} catch(Throwable e) {
s_logger.error("Exception from : " + _serviceUrl + ", method : " + method.getParameter("method") + ", exception :", e);
}
return result;
}
private HttpClient getHttpClient() {
@ -133,16 +133,16 @@ public class ClusterServiceServletImpl implements ClusterService {
}
return s_client;
}
// for test purpose only
public static void main(String[] args) {
// for test purpose only
public static void main(String[] args) {
/*
ClusterServiceServletImpl service = new ClusterServiceServletImpl("http://localhost:9090/clusterservice", 300);
try {
String result = service.execute("test", 1, "{ p1:v1, p2:v2 }", true);
System.out.println(result);
} catch (RemoteException e) {
ClusterServiceServletImpl service = new ClusterServiceServletImpl("http://localhost:9090/clusterservice", 300);
try {
String result = service.execute("test", 1, "{ p1:v1, p2:v2 }", true);
System.out.println(result);
} catch (RemoteException e) {
}
*/
}
}
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
package com.cloud.cluster;
import java.util.Map;
import javax.ejb.Local;
@ -28,15 +28,15 @@ import com.cloud.host.Status.Event;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.net.MacAddress;
@Local(value={ClusterManager.class})
public class DummyClusterManagerImpl implements ClusterManager {
private static final Logger s_logger = Logger.getLogger(DummyClusterManagerImpl.class);
@Local(value={ClusterManager.class})
public class DummyClusterManagerImpl implements ClusterManager {
private static final Logger s_logger = Logger.getLogger(DummyClusterManagerImpl.class);
protected long _id = MacAddress.getMacAddress().toLong();
protected long _runId = System.currentTimeMillis();
private String _name;
private final String _clusterNodeIP = "127.0.0.1";
private String _name;
private final String _clusterNodeIP = "127.0.0.1";
@Override
public void OnReceiveClusterServicePdu(ClusterServicePdu pdu) {
@ -47,63 +47,63 @@ public class DummyClusterManagerImpl implements ClusterManager {
public void executeAsync(String strPeer, long agentId, Command [] cmds, boolean stopOnError) {
throw new CloudRuntimeException("Unsupported feature");
}
@Override
public Answer[] execute(String strPeer, long agentId, Command [] cmds, boolean stopOnError) {
throw new CloudRuntimeException("Unsupported feature");
public Answer[] execute(String strPeer, long agentId, Command [] cmds, boolean stopOnError) {
throw new CloudRuntimeException("Unsupported feature");
}
@Override
public Answer[] sendToAgent(Long hostId, Command [] cmds, boolean stopOnError)
throws AgentUnavailableException, OperationTimedoutException {
throw new CloudRuntimeException("Unsupported feature");
}
public Answer[] sendToAgent(Long hostId, Command [] cmds, boolean stopOnError)
throws AgentUnavailableException, OperationTimedoutException {
throw new CloudRuntimeException("Unsupported feature");
}
/*
/*
@Override
public long sendToAgent(Long hostId, Command[] cmds, boolean stopOnError, Listener listener) throws AgentUnavailableException {
throw new CloudRuntimeException("Unsupported feature");
}
*/
public long sendToAgent(Long hostId, Command[] cmds, boolean stopOnError, Listener listener) throws AgentUnavailableException {
throw new CloudRuntimeException("Unsupported feature");
}
*/
@Override
public boolean executeAgentUserRequest(long agentId, Event event) throws AgentUnavailableException {
throw new CloudRuntimeException("Unsupported feature");
}
public boolean executeAgentUserRequest(long agentId, Event event) throws AgentUnavailableException {
throw new CloudRuntimeException("Unsupported feature");
}
@Override
public Boolean propagateAgentEvent(long agentId, Event event) throws AgentUnavailableException {
throw new CloudRuntimeException("Unsupported feature");
}
public Boolean propagateAgentEvent(long agentId, Event event) throws AgentUnavailableException {
throw new CloudRuntimeException("Unsupported feature");
}
@Override
public int getHeartbeatThreshold() {
return ClusterManager.DEFAULT_HEARTBEAT_INTERVAL;
}
public int getHeartbeatThreshold() {
return ClusterManager.DEFAULT_HEARTBEAT_INTERVAL;
}
@Override
public long getManagementNodeId() {
return _id;
public long getManagementNodeId() {
return _id;
}
@Override
public long getCurrentRunId() {
return _runId;
}
@Override
public ManagementServerHostVO getPeer(String str) {
return null;
}
@Override
public String getSelfPeerName() {
return Long.toString(_id);
}
public ManagementServerHostVO getPeer(String str) {
return null;
}
@Override
public String getSelfNodeIP() {
return _clusterNodeIP;
}
public String getSelfPeerName() {
return Long.toString(_id);
}
@Override
public String getSelfNodeIP() {
return _clusterNodeIP;
}
@Override
public boolean isManagementNodeAlive(long msid) {
@ -114,46 +114,46 @@ public class DummyClusterManagerImpl implements ClusterManager {
public boolean pingManagementNode(long msid) {
return false;
}
@Override
public String getPeerName(long agentHostId) {
throw new CloudRuntimeException("Unsupported feature");
}
public String getPeerName(long agentHostId) {
throw new CloudRuntimeException("Unsupported feature");
}
@Override
public void registerListener(ClusterManagerListener listener) {
}
public void registerListener(ClusterManagerListener listener) {
}
@Override
public void unregisterListener(ClusterManagerListener listener) {
}
@Override
public boolean configure(String name, Map<String, Object> params)
throws ConfigurationException {
return true;
}
@Override
public void broadcast(long hostId, Command[] cmds) {
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
if(s_logger.isInfoEnabled())
s_logger.info("Starting cluster manager, msid : " + _id);
return true;
}
@Override
public boolean stop() {
return true;
public void unregisterListener(ClusterManagerListener listener) {
}
@Override
public boolean configure(String name, Map<String, Object> params)
throws ConfigurationException {
return true;
}
@Override
public void broadcast(long hostId, Command[] cmds) {
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
if(s_logger.isInfoEnabled())
s_logger.info("Starting cluster manager, msid : " + _id);
return true;
}
@Override
public boolean stop() {
return true;
}
@Override
@ -176,5 +176,5 @@ public class DummyClusterManagerImpl implements ClusterManager {
public boolean executeResourceUserRequest(long hostId, com.cloud.resource.ResourceState.Event event) throws AgentUnavailableException {
// TODO Auto-generated method stub
return false;
}
}
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
package com.cloud.cluster;
import java.util.Date;
import javax.persistence.Column;
@ -26,65 +26,65 @@ import javax.persistence.Temporal;
import javax.persistence.TemporalType;
import com.cloud.utils.db.GenericDao;
@Entity
@Table(name="mshost")
public class ManagementServerHostVO implements ManagementServerHost{
@Id
@GeneratedValue(strategy=GenerationType.IDENTITY)
@Column(name="id")
private long id;
@Column(name="msid", updatable=true, nullable=false)
@Entity
@Table(name="mshost")
public class ManagementServerHostVO implements ManagementServerHost{
@Id
@GeneratedValue(strategy=GenerationType.IDENTITY)
@Column(name="id")
private long id;
@Column(name="msid", updatable=true, nullable=false)
private long msid;
@Column(name="runid", updatable=true, nullable=false)
private long runid;
@Column(name="name", updatable=true, nullable=true)
private long runid;
@Column(name="name", updatable=true, nullable=true)
private String name;
@Column(name="state", updatable = true, nullable=false)
@Enumerated(value=EnumType.STRING)
private ManagementServerHost.State state;
@Column(name="version", updatable=true, nullable=true)
private String version;
@Column(name="service_ip", updatable=true, nullable=false)
private String serviceIP;
@Column(name="service_port", updatable=true, nullable=false)
private int servicePort;
@Temporal(TemporalType.TIMESTAMP)
@Column(name="last_update", updatable=true, nullable=true)
private Date lastUpdateTime;
@Column(name=GenericDao.REMOVED_COLUMN)
private Date removed;
@Column(name="alert_count", updatable=true, nullable=false)
private int alertCount;
public ManagementServerHostVO() {
}
public ManagementServerHostVO(long msid, long runid, String serviceIP, int servicePort, Date updateTime) {
private ManagementServerHost.State state;
@Column(name="version", updatable=true, nullable=true)
private String version;
@Column(name="service_ip", updatable=true, nullable=false)
private String serviceIP;
@Column(name="service_port", updatable=true, nullable=false)
private int servicePort;
@Temporal(TemporalType.TIMESTAMP)
@Column(name="last_update", updatable=true, nullable=true)
private Date lastUpdateTime;
@Column(name=GenericDao.REMOVED_COLUMN)
private Date removed;
@Column(name="alert_count", updatable=true, nullable=false)
private int alertCount;
public ManagementServerHostVO() {
}
public ManagementServerHostVO(long msid, long runid, String serviceIP, int servicePort, Date updateTime) {
this.msid = msid;
this.runid = runid;
this.serviceIP = serviceIP;
this.servicePort = servicePort;
this.lastUpdateTime = updateTime;
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
this.runid = runid;
this.serviceIP = serviceIP;
this.servicePort = servicePort;
this.lastUpdateTime = updateTime;
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public long getRunid() {
@ -93,23 +93,23 @@ public class ManagementServerHostVO implements ManagementServerHost{
public void setRunid(long runid) {
this.runid = runid;
}
}
@Override
public long getMsid() {
return msid;
}
public void setMsid(long msid) {
this.msid = msid;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
@Override
public long getMsid() {
return msid;
}
public void setMsid(long msid) {
this.msid = msid;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
@ -119,59 +119,59 @@ public class ManagementServerHostVO implements ManagementServerHost{
public void setState(ManagementServerHost.State state) {
this.state = state;
}
}
@Override
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public String getServiceIP() {
return serviceIP;
}
public void setServiceIP(String serviceIP) {
this.serviceIP = serviceIP;
}
public int getServicePort() {
return servicePort;
}
public void setServicePort(int servicePort) {
this.servicePort = servicePort;
}
public Date getLastUpdateTime() {
return lastUpdateTime;
}
public void setLastUpdateTime(Date lastUpdateTime) {
this.lastUpdateTime = lastUpdateTime;
}
public Date getRemoved() {
return removed;
}
public void setRemoved(Date removedTime) {
removed = removedTime;
}
public int getAlertCount() {
return alertCount;
}
public void setAlertCount(int count) {
alertCount = count;
@Override
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public String getServiceIP() {
return serviceIP;
}
public void setServiceIP(String serviceIP) {
this.serviceIP = serviceIP;
}
public int getServicePort() {
return servicePort;
}
public void setServicePort(int servicePort) {
this.servicePort = servicePort;
}
public Date getLastUpdateTime() {
return lastUpdateTime;
}
public void setLastUpdateTime(Date lastUpdateTime) {
this.lastUpdateTime = lastUpdateTime;
}
public Date getRemoved() {
return removed;
}
public void setRemoved(Date removedTime) {
removed = removedTime;
}
public int getAlertCount() {
return alertCount;
}
public void setAlertCount(int count) {
alertCount = count;
}
@Override
public String toString() {
return new StringBuilder("ManagementServer[").append("-").append(id).append("-").append(msid).append("-").append(state).append("]").toString();
}
}
}
}

View File

@ -10,10 +10,10 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
public interface RemoteMethodConstants {
package com.cloud.cluster;
public interface RemoteMethodConstants {
public static final int METHOD_UNKNOWN = 0;
public static final int METHOD_PING = 4;
public static final int METHOD_PING = 4;
public static final int METHOD_DELIVER_PDU = 5;
}
}

View File

@ -10,140 +10,140 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster;
import java.util.HashMap;
import java.util.Map;
import org.apache.log4j.Logger;
import com.cloud.cluster.dao.StackMaidDao;
import com.cloud.cluster.dao.StackMaidDaoImpl;
import com.cloud.serializer.SerializerHelper;
import com.cloud.utils.CleanupDelegate;
import com.cloud.utils.db.Transaction;
public class StackMaid {
protected final static Logger s_logger = Logger.getLogger(StackMaid.class);
private static ThreadLocal<StackMaid> threadMaid = new ThreadLocal<StackMaid>();
private static long msid_setby_manager = 0;
private StackMaidDao maidDao = new StackMaidDaoImpl();
private int currentSeq = 0;
private Map<String, Object> context = new HashMap<String, Object>();
public static void init(long msid) {
msid_setby_manager = msid;
}
public static StackMaid current() {
StackMaid maid = threadMaid.get();
if(maid == null) {
maid = new StackMaid();
threadMaid.set(maid);
}
return maid;
}
public void registerContext(String key, Object contextObject) {
assert(!context.containsKey(key)) : "Context key has already been registered";
context.put(key, contextObject);
}
public Object getContext(String key) {
return context.get(key);
}
public void expungeMaidItem(long maidId) {
// this is a bit ugly, but when it is not loaded by component locator, this is just a workable way for now
Transaction txn = Transaction.open(Transaction.CLOUD_DB);
try {
maidDao.expunge(maidId);
} finally {
txn.close();
}
}
public int push(String delegateClzName, Object context) {
assert(msid_setby_manager != 0) : "Fatal, make sure StackMaidManager is loaded";
if(msid_setby_manager == 0)
s_logger.error("Fatal, make sure StackMaidManager is loaded");
return push(msid_setby_manager, delegateClzName, context);
}
public int push(long currentMsid, String delegateClzName, Object context) {
int savePoint = currentSeq;
maidDao.pushCleanupDelegate(currentMsid, currentSeq++, delegateClzName, context);
return savePoint;
}
public void pop(int savePoint) {
assert(msid_setby_manager != 0) : "Fatal, make sure StackMaidManager is loaded";
if(msid_setby_manager == 0)
s_logger.error("Fatal, make sure StackMaidManager is loaded");
pop(msid_setby_manager, savePoint);
}
public void pop() {
if(currentSeq > 0)
pop(currentSeq -1);
}
/**
* must be called within thread context
* @param currentMsid
*/
public void pop(long currentMsid, int savePoint) {
while(currentSeq > savePoint) {
maidDao.popCleanupDelegate(currentMsid);
currentSeq--;
}
}
public void exitCleanup() {
exitCleanup(msid_setby_manager);
}
public void exitCleanup(long currentMsid) {
if(currentSeq > 0) {
CheckPointVO maid = null;
while((maid = maidDao.popCleanupDelegate(currentMsid)) != null) {
doCleanup(maid);
}
currentSeq = 0;
}
context.clear();
}
public static boolean doCleanup(CheckPointVO maid) {
if(maid.getDelegate() != null) {
try {
Class<?> clz = Class.forName(maid.getDelegate());
Object delegate = clz.newInstance();
if(delegate instanceof CleanupDelegate) {
return ((CleanupDelegate)delegate).cleanup(SerializerHelper.fromSerializedString(maid.getContext()), maid);
} else {
assert(false);
}
} catch (final ClassNotFoundException e) {
s_logger.error("Unable to load StackMaid delegate class: " + maid.getDelegate(), e);
} catch (final SecurityException e) {
s_logger.error("Security excetion when loading resource: " + maid.getDelegate());
} catch (final IllegalArgumentException e) {
s_logger.error("Illegal argument excetion when loading resource: " + maid.getDelegate());
} catch (final InstantiationException e) {
s_logger.error("Instantiation excetion when loading resource: " + maid.getDelegate());
} catch (final IllegalAccessException e) {
s_logger.error("Illegal access exception when loading resource: " + maid.getDelegate());
}
return false;
}
return true;
}
}
package com.cloud.cluster;
import java.util.HashMap;
import java.util.Map;
import org.apache.log4j.Logger;
import com.cloud.cluster.dao.StackMaidDao;
import com.cloud.cluster.dao.StackMaidDaoImpl;
import com.cloud.serializer.SerializerHelper;
import com.cloud.utils.CleanupDelegate;
import com.cloud.utils.db.Transaction;
public class StackMaid {
protected final static Logger s_logger = Logger.getLogger(StackMaid.class);
private static ThreadLocal<StackMaid> threadMaid = new ThreadLocal<StackMaid>();
private static long msid_setby_manager = 0;
private StackMaidDao maidDao = new StackMaidDaoImpl();
private int currentSeq = 0;
private Map<String, Object> context = new HashMap<String, Object>();
public static void init(long msid) {
msid_setby_manager = msid;
}
public static StackMaid current() {
StackMaid maid = threadMaid.get();
if(maid == null) {
maid = new StackMaid();
threadMaid.set(maid);
}
return maid;
}
public void registerContext(String key, Object contextObject) {
assert(!context.containsKey(key)) : "Context key has already been registered";
context.put(key, contextObject);
}
public Object getContext(String key) {
return context.get(key);
}
public void expungeMaidItem(long maidId) {
// this is a bit ugly, but when it is not loaded by component locator, this is just a workable way for now
Transaction txn = Transaction.open(Transaction.CLOUD_DB);
try {
maidDao.expunge(maidId);
} finally {
txn.close();
}
}
public int push(String delegateClzName, Object context) {
assert(msid_setby_manager != 0) : "Fatal, make sure StackMaidManager is loaded";
if(msid_setby_manager == 0)
s_logger.error("Fatal, make sure StackMaidManager is loaded");
return push(msid_setby_manager, delegateClzName, context);
}
public int push(long currentMsid, String delegateClzName, Object context) {
int savePoint = currentSeq;
maidDao.pushCleanupDelegate(currentMsid, currentSeq++, delegateClzName, context);
return savePoint;
}
public void pop(int savePoint) {
assert(msid_setby_manager != 0) : "Fatal, make sure StackMaidManager is loaded";
if(msid_setby_manager == 0)
s_logger.error("Fatal, make sure StackMaidManager is loaded");
pop(msid_setby_manager, savePoint);
}
public void pop() {
if(currentSeq > 0)
pop(currentSeq -1);
}
/**
* must be called within thread context
* @param currentMsid
*/
public void pop(long currentMsid, int savePoint) {
while(currentSeq > savePoint) {
maidDao.popCleanupDelegate(currentMsid);
currentSeq--;
}
}
public void exitCleanup() {
exitCleanup(msid_setby_manager);
}
public void exitCleanup(long currentMsid) {
if(currentSeq > 0) {
CheckPointVO maid = null;
while((maid = maidDao.popCleanupDelegate(currentMsid)) != null) {
doCleanup(maid);
}
currentSeq = 0;
}
context.clear();
}
public static boolean doCleanup(CheckPointVO maid) {
if(maid.getDelegate() != null) {
try {
Class<?> clz = Class.forName(maid.getDelegate());
Object delegate = clz.newInstance();
if(delegate instanceof CleanupDelegate) {
return ((CleanupDelegate)delegate).cleanup(SerializerHelper.fromSerializedString(maid.getContext()), maid);
} else {
assert(false);
}
} catch (final ClassNotFoundException e) {
s_logger.error("Unable to load StackMaid delegate class: " + maid.getDelegate(), e);
} catch (final SecurityException e) {
s_logger.error("Security excetion when loading resource: " + maid.getDelegate());
} catch (final IllegalArgumentException e) {
s_logger.error("Illegal argument excetion when loading resource: " + maid.getDelegate());
} catch (final InstantiationException e) {
s_logger.error("Instantiation excetion when loading resource: " + maid.getDelegate());
} catch (final IllegalAccessException e) {
s_logger.error("Illegal access exception when loading resource: " + maid.getDelegate());
}
return false;
}
return true;
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster.dao;
package com.cloud.cluster.dao;
import java.util.Date;
import java.util.List;
@ -20,17 +20,17 @@ import com.cloud.cluster.ManagementServerHost.State;
import com.cloud.cluster.ManagementServerHostVO;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDao;
public interface ManagementServerHostDao extends GenericDao<ManagementServerHostVO, Long> {
public interface ManagementServerHostDao extends GenericDao<ManagementServerHostVO, Long> {
@Override
boolean remove(Long id);
ManagementServerHostVO findByMsid(long msid);
ManagementServerHostVO findByMsid(long msid);
int increaseAlertCount(long id);
void update(long id, long runid, String name, String version, String serviceIP, int servicePort, Date lastUpdate);
void update(long id, long runid, Date lastUpdate);
List<ManagementServerHostVO> getActiveList(Date cutTime);
void update(long id, long runid, String name, String version, String serviceIP, int servicePort, Date lastUpdate);
void update(long id, long runid, Date lastUpdate);
List<ManagementServerHostVO> getActiveList(Date cutTime);
List<ManagementServerHostVO> getInactiveList(Date cutTime);
void invalidateRunSession(long id, long runid);
@ -41,4 +41,4 @@ public interface ManagementServerHostDao extends GenericDao<ManagementServerHost
public List<Long> listOrphanMsids();
ManagementServerHostVO findOneInUpState(Filter filter);
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster.dao;
package com.cloud.cluster.dao;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
@ -36,12 +36,12 @@ import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.exception.CloudRuntimeException;
@Local(value={ManagementServerHostDao.class})
public class ManagementServerHostDaoImpl extends GenericDaoBase<ManagementServerHostVO, Long> implements ManagementServerHostDao {
private static final Logger s_logger = Logger.getLogger(ManagementServerHostDaoImpl.class);
private final SearchBuilder<ManagementServerHostVO> MsIdSearch;
@Local(value={ManagementServerHostDao.class})
public class ManagementServerHostDaoImpl extends GenericDaoBase<ManagementServerHostVO, Long> implements ManagementServerHostDao {
private static final Logger s_logger = Logger.getLogger(ManagementServerHostDaoImpl.class);
private final SearchBuilder<ManagementServerHostVO> MsIdSearch;
private final SearchBuilder<ManagementServerHostVO> ActiveSearch;
private final SearchBuilder<ManagementServerHostVO> InactiveSearch;
private final SearchBuilder<ManagementServerHostVO> StateSearch;
@ -76,27 +76,27 @@ public class ManagementServerHostDaoImpl extends GenericDaoBase<ManagementServer
@Override
@DB
public void update(long id, long runid, String name, String version, String serviceIP, int servicePort, Date lastUpdate) {
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
txn.start();
pstmt = txn.prepareAutoCloseStatement("update mshost set name=?, version=?, service_ip=?, service_port=?, last_update=?, removed=null, alert_count=0, runid=?, state=? where id=?");
pstmt.setString(1, name);
pstmt.setString(2, version);
pstmt.setString(3, serviceIP);
pstmt.setInt(4, servicePort);
pstmt.setString(5, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), lastUpdate));
public void update(long id, long runid, String name, String version, String serviceIP, int servicePort, Date lastUpdate) {
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
txn.start();
pstmt = txn.prepareAutoCloseStatement("update mshost set name=?, version=?, service_ip=?, service_port=?, last_update=?, removed=null, alert_count=0, runid=?, state=? where id=?");
pstmt.setString(1, name);
pstmt.setString(2, version);
pstmt.setString(3, serviceIP);
pstmt.setInt(4, servicePort);
pstmt.setString(5, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), lastUpdate));
pstmt.setLong(6, runid);
pstmt.setString(7, ManagementServerHost.State.Up.toString());
pstmt.setLong(8, id);
pstmt.executeUpdate();
txn.commit();
} catch(Exception e) {
s_logger.warn("Unexpected exception, ", e);
}
pstmt.setLong(8, id);
pstmt.executeUpdate();
txn.commit();
} catch(Exception e) {
s_logger.warn("Unexpected exception, ", e);
}
}
@Override
@ -122,34 +122,34 @@ public class ManagementServerHostDaoImpl extends GenericDaoBase<ManagementServer
@Override
@DB
public void update(long id, long runid, Date lastUpdate) {
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
txn.start();
pstmt = txn.prepareAutoCloseStatement("update mshost set last_update=?, removed=null, alert_count=0 where id=? and runid=?");
pstmt.setString(1, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), lastUpdate));
pstmt.setLong(2, id);
public void update(long id, long runid, Date lastUpdate) {
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
txn.start();
pstmt = txn.prepareAutoCloseStatement("update mshost set last_update=?, removed=null, alert_count=0 where id=? and runid=?");
pstmt.setString(1, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), lastUpdate));
pstmt.setLong(2, id);
pstmt.setLong(3, runid);
int count = pstmt.executeUpdate();
int count = pstmt.executeUpdate();
txn.commit();
if(count < 1) {
throw new CloudRuntimeException("Invalid cluster session detected", new ClusterInvalidSessionException("runid " + runid + " is no longer valid"));
}
} catch(Exception e) {
s_logger.warn("Unexpected exception, ", e);
}
} catch(Exception e) {
s_logger.warn("Unexpected exception, ", e);
}
}
@Override
public List<ManagementServerHostVO> getActiveList(Date cutTime) {
SearchCriteria<ManagementServerHostVO> sc = ActiveSearch.create();
sc.setParameters("lastUpdateTime", cutTime);
return listIncludingRemovedBy(sc);
SearchCriteria<ManagementServerHostVO> sc = ActiveSearch.create();
sc.setParameters("lastUpdateTime", cutTime);
return listIncludingRemovedBy(sc);
}
@Override
@ -161,30 +161,30 @@ public class ManagementServerHostDaoImpl extends GenericDaoBase<ManagementServer
}
@Override
@DB
public int increaseAlertCount(long id) {
Transaction txn = Transaction.currentTxn();
@DB
public int increaseAlertCount(long id) {
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
int changedRows = 0;
try {
txn.start();
pstmt = txn.prepareAutoCloseStatement("update mshost set alert_count=alert_count+1 where id=? and alert_count=0");
pstmt.setLong(1, id);
changedRows = pstmt.executeUpdate();
txn.commit();
} catch(Exception e) {
s_logger.warn("Unexpected exception, ", e);
txn.rollback();
int changedRows = 0;
try {
txn.start();
pstmt = txn.prepareAutoCloseStatement("update mshost set alert_count=alert_count+1 where id=? and alert_count=0");
pstmt.setLong(1, id);
changedRows = pstmt.executeUpdate();
txn.commit();
} catch(Exception e) {
s_logger.warn("Unexpected exception, ", e);
txn.rollback();
}
return changedRows;
}
protected ManagementServerHostDaoImpl() {
MsIdSearch = createSearchBuilder();
MsIdSearch.and("msid", MsIdSearch.entity().getMsid(), SearchCriteria.Op.EQ);
return changedRows;
}
protected ManagementServerHostDaoImpl() {
MsIdSearch = createSearchBuilder();
MsIdSearch.and("msid", MsIdSearch.entity().getMsid(), SearchCriteria.Op.EQ);
MsIdSearch.done();
ActiveSearch = createSearchBuilder();
@ -252,7 +252,7 @@ public class ManagementServerHostDaoImpl extends GenericDaoBase<ManagementServer
}
return orphanList;
}
}
@Override
public ManagementServerHostVO findOneInUpState(Filter filter) {
@ -267,4 +267,4 @@ public class ManagementServerHostDaoImpl extends GenericDaoBase<ManagementServer
return null;
}
}
}

View File

@ -10,31 +10,31 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster.dao;
import java.util.Date;
import java.util.List;
import com.cloud.cluster.CheckPointVO;
import com.cloud.utils.db.GenericDao;
public interface StackMaidDao extends GenericDao<CheckPointVO, Long> {
public long pushCleanupDelegate(long msid, int seq, String delegateClzName, Object context);
public CheckPointVO popCleanupDelegate(long msid);
public void clearStack(long msid);
public List<CheckPointVO> listLeftoversByMsid(long msid);
public List<CheckPointVO> listLeftoversByCutTime(Date cutTime);
/**
* Take over the task items of another management server and clean them up.
*
* @param takeOverMsid management server id to take over.
* @param selfId the management server id of this node.
* @return list of tasks to take over.
*/
boolean takeover(long takeOverMsid, long selfId);
package com.cloud.cluster.dao;
import java.util.Date;
import java.util.List;
import com.cloud.cluster.CheckPointVO;
import com.cloud.utils.db.GenericDao;
public interface StackMaidDao extends GenericDao<CheckPointVO, Long> {
public long pushCleanupDelegate(long msid, int seq, String delegateClzName, Object context);
public CheckPointVO popCleanupDelegate(long msid);
public void clearStack(long msid);
public List<CheckPointVO> listLeftoversByMsid(long msid);
public List<CheckPointVO> listLeftoversByCutTime(Date cutTime);
/**
* Take over the task items of another management server and clean them up.
*
* @param takeOverMsid management server id to take over.
* @param selfId the management server id of this node.
* @return list of tasks to take over.
*/
boolean takeover(long takeOverMsid, long selfId);
List<CheckPointVO> listCleanupTasks(long selfId);
List<CheckPointVO> listLeftoversByCutTime(Date cutTime, long msid);
}
List<CheckPointVO> listLeftoversByCutTime(Date cutTime, long msid);
}

View File

@ -10,164 +10,164 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.cluster.dao;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.TimeZone;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import com.cloud.cluster.CheckPointVO;
import com.cloud.serializer.SerializerHelper;
import com.cloud.utils.DateUtil;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.Transaction;
@Local(value = { StackMaidDao.class }) @DB(txn=false)
public class StackMaidDaoImpl extends GenericDaoBase<CheckPointVO, Long> implements StackMaidDao {
private static final Logger s_logger = Logger.getLogger(StackMaidDaoImpl.class);
private SearchBuilder<CheckPointVO> popSearch;
private SearchBuilder<CheckPointVO> clearSearch;
private final SearchBuilder<CheckPointVO> AllFieldsSearch;
public StackMaidDaoImpl() {
popSearch = createSearchBuilder();
popSearch.and("msid", popSearch.entity().getMsid(), SearchCriteria.Op.EQ);
popSearch.and("threadId", popSearch.entity().getThreadId(), SearchCriteria.Op.EQ);
clearSearch = createSearchBuilder();
clearSearch.and("msid", clearSearch.entity().getMsid(), SearchCriteria.Op.EQ);
AllFieldsSearch = createSearchBuilder();
AllFieldsSearch.and("msid", AllFieldsSearch.entity().getMsid(), Op.EQ);
AllFieldsSearch.and("thread", AllFieldsSearch.entity().getThreadId(), Op.EQ);
AllFieldsSearch.done();
}
@Override
public boolean takeover(long takeOverMsid, long selfId) {
CheckPointVO task = createForUpdate();
task.setMsid(selfId);
task.setThreadId(0);
SearchCriteria<CheckPointVO> sc = AllFieldsSearch.create();
sc.setParameters("msid", takeOverMsid);
return update(task, sc) > 0;
}
@Override
public List<CheckPointVO> listCleanupTasks(long msId) {
SearchCriteria<CheckPointVO> sc = AllFieldsSearch.create();
sc.setParameters("msid", msId);
sc.setParameters("thread", 0);
return this.search(sc, null);
}
@Override
public long pushCleanupDelegate(long msid, int seq, String delegateClzName, Object context) {
CheckPointVO delegateItem = new CheckPointVO();
delegateItem.setMsid(msid);
delegateItem.setThreadId(Thread.currentThread().getId());
delegateItem.setSeq(seq);
delegateItem.setDelegate(delegateClzName);
delegateItem.setContext(SerializerHelper.toSerializedStringOld(context));
delegateItem.setCreated(DateUtil.currentGMTTime());
super.persist(delegateItem);
return delegateItem.getId();
}
@Override
public CheckPointVO popCleanupDelegate(long msid) {
SearchCriteria<CheckPointVO> sc = popSearch.create();
sc.setParameters("msid", msid);
sc.setParameters("threadId", Thread.currentThread().getId());
Filter filter = new Filter(CheckPointVO.class, "seq", false, 0L, (long)1);
List<CheckPointVO> l = listIncludingRemovedBy(sc, filter);
if(l != null && l.size() > 0) {
expunge(l.get(0).getId());
return l.get(0);
}
return null;
}
@Override
public void clearStack(long msid) {
SearchCriteria<CheckPointVO> sc = clearSearch.create();
sc.setParameters("msid", msid);
expunge(sc);
}
@Override
@DB
public List<CheckPointVO> listLeftoversByMsid(long msid) {
List<CheckPointVO> l = new ArrayList<CheckPointVO>();
String sql = "select * from stack_maid where msid=? order by msid asc, thread_id asc, seq desc";
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql);
pstmt.setLong(1, msid);
ResultSet rs = pstmt.executeQuery();
while(rs.next()) {
l.add(toEntityBean(rs, false));
}
} catch (SQLException e) {
s_logger.error("unexcpected exception " + e.getMessage(), e);
} catch (Throwable e) {
s_logger.error("unexcpected exception " + e.getMessage(), e);
} finally {
txn.close();
}
return l;
}
@Override
@DB
public List<CheckPointVO> listLeftoversByCutTime(Date cutTime) {
List<CheckPointVO> l = new ArrayList<CheckPointVO>();
String sql = "select * from stack_maid where created < ? order by msid asc, thread_id asc, seq desc";
Transaction txn = Transaction.open(Transaction.CLOUD_DB);
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql);
String gmtCutTime = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime);
pstmt.setString(1, gmtCutTime);
ResultSet rs = pstmt.executeQuery();
while(rs.next()) {
l.add(toEntityBean(rs, false));
}
} catch (SQLException e) {
s_logger.error("unexcpected exception " + e.getMessage(), e);
} catch (Throwable e) {
s_logger.error("unexcpected exception " + e.getMessage(), e);
} finally {
txn.close();
}
return l;
package com.cloud.cluster.dao;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.TimeZone;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import com.cloud.cluster.CheckPointVO;
import com.cloud.serializer.SerializerHelper;
import com.cloud.utils.DateUtil;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.Transaction;
@Local(value = { StackMaidDao.class }) @DB(txn=false)
public class StackMaidDaoImpl extends GenericDaoBase<CheckPointVO, Long> implements StackMaidDao {
private static final Logger s_logger = Logger.getLogger(StackMaidDaoImpl.class);
private SearchBuilder<CheckPointVO> popSearch;
private SearchBuilder<CheckPointVO> clearSearch;
private final SearchBuilder<CheckPointVO> AllFieldsSearch;
public StackMaidDaoImpl() {
popSearch = createSearchBuilder();
popSearch.and("msid", popSearch.entity().getMsid(), SearchCriteria.Op.EQ);
popSearch.and("threadId", popSearch.entity().getThreadId(), SearchCriteria.Op.EQ);
clearSearch = createSearchBuilder();
clearSearch.and("msid", clearSearch.entity().getMsid(), SearchCriteria.Op.EQ);
AllFieldsSearch = createSearchBuilder();
AllFieldsSearch.and("msid", AllFieldsSearch.entity().getMsid(), Op.EQ);
AllFieldsSearch.and("thread", AllFieldsSearch.entity().getThreadId(), Op.EQ);
AllFieldsSearch.done();
}
@Override
public boolean takeover(long takeOverMsid, long selfId) {
CheckPointVO task = createForUpdate();
task.setMsid(selfId);
task.setThreadId(0);
SearchCriteria<CheckPointVO> sc = AllFieldsSearch.create();
sc.setParameters("msid", takeOverMsid);
return update(task, sc) > 0;
}
@Override
public List<CheckPointVO> listCleanupTasks(long msId) {
SearchCriteria<CheckPointVO> sc = AllFieldsSearch.create();
sc.setParameters("msid", msId);
sc.setParameters("thread", 0);
return this.search(sc, null);
}
@Override
public long pushCleanupDelegate(long msid, int seq, String delegateClzName, Object context) {
CheckPointVO delegateItem = new CheckPointVO();
delegateItem.setMsid(msid);
delegateItem.setThreadId(Thread.currentThread().getId());
delegateItem.setSeq(seq);
delegateItem.setDelegate(delegateClzName);
delegateItem.setContext(SerializerHelper.toSerializedStringOld(context));
delegateItem.setCreated(DateUtil.currentGMTTime());
super.persist(delegateItem);
return delegateItem.getId();
}
@Override
public CheckPointVO popCleanupDelegate(long msid) {
SearchCriteria<CheckPointVO> sc = popSearch.create();
sc.setParameters("msid", msid);
sc.setParameters("threadId", Thread.currentThread().getId());
Filter filter = new Filter(CheckPointVO.class, "seq", false, 0L, (long)1);
List<CheckPointVO> l = listIncludingRemovedBy(sc, filter);
if(l != null && l.size() > 0) {
expunge(l.get(0).getId());
return l.get(0);
}
return null;
}
@Override
public void clearStack(long msid) {
SearchCriteria<CheckPointVO> sc = clearSearch.create();
sc.setParameters("msid", msid);
expunge(sc);
}
@Override
@DB
public List<CheckPointVO> listLeftoversByMsid(long msid) {
List<CheckPointVO> l = new ArrayList<CheckPointVO>();
String sql = "select * from stack_maid where msid=? order by msid asc, thread_id asc, seq desc";
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql);
pstmt.setLong(1, msid);
ResultSet rs = pstmt.executeQuery();
while(rs.next()) {
l.add(toEntityBean(rs, false));
}
} catch (SQLException e) {
s_logger.error("unexcpected exception " + e.getMessage(), e);
} catch (Throwable e) {
s_logger.error("unexcpected exception " + e.getMessage(), e);
} finally {
txn.close();
}
return l;
}
@Override
@DB
public List<CheckPointVO> listLeftoversByCutTime(Date cutTime) {
List<CheckPointVO> l = new ArrayList<CheckPointVO>();
String sql = "select * from stack_maid where created < ? order by msid asc, thread_id asc, seq desc";
Transaction txn = Transaction.open(Transaction.CLOUD_DB);
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql);
String gmtCutTime = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime);
pstmt.setString(1, gmtCutTime);
ResultSet rs = pstmt.executeQuery();
while(rs.next()) {
l.add(toEntityBean(rs, false));
}
} catch (SQLException e) {
s_logger.error("unexcpected exception " + e.getMessage(), e);
} catch (Throwable e) {
s_logger.error("unexcpected exception " + e.getMessage(), e);
} finally {
txn.close();
}
return l;
}
@Override
@ -197,6 +197,6 @@ public class StackMaidDaoImpl extends GenericDaoBase<CheckPointVO, Long> impleme
txn.close();
}
return l;
}
}
}
}

View File

@ -10,53 +10,53 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.configuration.dao;
package com.cloud.configuration.dao;
import java.util.Map;
import com.cloud.configuration.ConfigurationVO;
import com.cloud.utils.db.GenericDao;
public interface ConfigurationDao extends GenericDao<ConfigurationVO, String> {
/**
*
* 1. params passed in.
* 2. configuration for the instance.
* 3. configuration for the DEFAULT instance.
*
* @param params parameters from the components.xml which will override the database values.
* @return a consolidated look at the configuration parameters.
*/
public interface ConfigurationDao extends GenericDao<ConfigurationVO, String> {
/**
*
* 1. params passed in.
* 2. configuration for the instance.
* 3. configuration for the DEFAULT instance.
*
* @param params parameters from the components.xml which will override the database values.
* @return a consolidated look at the configuration parameters.
*/
public Map<String, String> getConfiguration(String instance, Map<String, ? extends Object> params);
public Map<String, String> getConfiguration(Map<String, ? extends Object> params);
public Map<String, String> getConfiguration();
/**
* Updates a configuration value
* @param value the new value
* @return true if success, false if failure
*/
public boolean update(String name, String value);
/**
* Gets the value for the specified configuration name
* @return value
*/
public Map<String, String> getConfiguration();
/**
* Updates a configuration value
* @param value the new value
* @return true if success, false if failure
*/
public boolean update(String name, String value);
/**
* Gets the value for the specified configuration name
* @return value
*/
public String getValue(String name);
public String getValueAndInitIfNotExist(String name, String category, String initValue);
/**
* returns whether or not this is a premium configuration
* @return true if premium configuration, false otherwise
*/
*/
boolean isPremium();
ConfigurationVO findByName(String name);
boolean update(String name, String category, String value);
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.configuration.dao;
package com.cloud.configuration.dao;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.util.HashMap;
@ -31,65 +31,65 @@ import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.exception.CloudRuntimeException;
@Local(value={ConfigurationDao.class})
public class ConfigurationDaoImpl extends GenericDaoBase<ConfigurationVO, String> implements ConfigurationDao {
@Local(value={ConfigurationDao.class})
public class ConfigurationDaoImpl extends GenericDaoBase<ConfigurationVO, String> implements ConfigurationDao {
private static final Logger s_logger = Logger.getLogger(ConfigurationDaoImpl.class);
private Map<String, String> _configs = null;
private boolean _premium;
final SearchBuilder<ConfigurationVO> InstanceSearch;
final SearchBuilder<ConfigurationVO> NameSearch;
public static final String UPDATE_CONFIGURATION_SQL = "UPDATE configuration SET value = ? WHERE name = ?";
private boolean _premium;
final SearchBuilder<ConfigurationVO> InstanceSearch;
final SearchBuilder<ConfigurationVO> NameSearch;
public static final String UPDATE_CONFIGURATION_SQL = "UPDATE configuration SET value = ? WHERE name = ?";
public ConfigurationDaoImpl () {
InstanceSearch = createSearchBuilder();
InstanceSearch.and("instance", InstanceSearch.entity().getInstance(), SearchCriteria.Op.EQ);
NameSearch = createSearchBuilder();
NameSearch.and("name", NameSearch.entity().getName(), SearchCriteria.Op.EQ);
}
}
@Override
public boolean isPremium() {
return _premium;
}
@Override
public Map<String, String> getConfiguration(String instance, Map<String, ? extends Object> params) {
if (_configs == null) {
_configs = new HashMap<String, String>();
SearchCriteria<ConfigurationVO> sc = InstanceSearch.create();
sc.setParameters("instance", "DEFAULT");
List<ConfigurationVO> configurations = listIncludingRemovedBy(sc);
for (ConfigurationVO config : configurations) {
if (config.getValue() != null)
_configs.put(config.getName(), config.getValue());
}
@Override
public Map<String, String> getConfiguration(String instance, Map<String, ? extends Object> params) {
if (_configs == null) {
_configs = new HashMap<String, String>();
SearchCriteria<ConfigurationVO> sc = InstanceSearch.create();
sc.setParameters("instance", "DEFAULT");
List<ConfigurationVO> configurations = listIncludingRemovedBy(sc);
for (ConfigurationVO config : configurations) {
if (config.getValue() != null)
_configs.put(config.getName(), config.getValue());
}
if(!"DEFAULT".equals(instance)){
//Default instance params are already added, need not add again
sc = InstanceSearch.create();
sc.setParameters("instance", instance);
configurations = listIncludingRemovedBy(sc);
for (ConfigurationVO config : configurations) {
if (config.getValue() != null)
_configs.put(config.getName(), config.getValue());
//Default instance params are already added, need not add again
sc = InstanceSearch.create();
sc.setParameters("instance", instance);
configurations = listIncludingRemovedBy(sc);
for (ConfigurationVO config : configurations) {
if (config.getValue() != null)
_configs.put(config.getName(), config.getValue());
}
}
}
mergeConfigs(_configs, params);
return _configs;
}
}
}
mergeConfigs(_configs, params);
return _configs;
}
@Override
public Map<String, String> getConfiguration(Map<String, ? extends Object> params) {
return getConfiguration("DEFAULT", params);
@ -100,37 +100,37 @@ public class ConfigurationDaoImpl extends GenericDaoBase<ConfigurationVO, String
return getConfiguration("DEFAULT", new HashMap<String, Object>());
}
protected void mergeConfigs(Map<String, String> dbParams, Map<String, ? extends Object> xmlParams) {
for (Map.Entry<String, ? extends Object> param : xmlParams.entrySet()) {
dbParams.put(param.getKey(), (String)param.getValue());
}
}
protected void mergeConfigs(Map<String, String> dbParams, Map<String, ? extends Object> xmlParams) {
for (Map.Entry<String, ? extends Object> param : xmlParams.entrySet()) {
dbParams.put(param.getKey(), (String)param.getValue());
}
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
Object premium = params.get("premium");
_premium = (premium != null) && ((String) premium).equals("true");
return true;
}
return true;
}
//Use update method with category instead
@Override @Deprecated
public boolean update(String name, String value) {
Transaction txn = Transaction.currentTxn();
try {
PreparedStatement stmt = txn.prepareStatement(UPDATE_CONFIGURATION_SQL);
stmt.setString(1, value);
stmt.setString(2, name);
stmt.executeUpdate();
return true;
} catch (Exception e) {
s_logger.warn("Unable to update Configuration Value", e);
}
return false;
}
@Override @Deprecated
public boolean update(String name, String value) {
Transaction txn = Transaction.currentTxn();
try {
PreparedStatement stmt = txn.prepareStatement(UPDATE_CONFIGURATION_SQL);
stmt.setString(1, value);
stmt.setString(2, name);
stmt.executeUpdate();
return true;
} catch (Exception e) {
s_logger.warn("Unable to update Configuration Value", e);
}
return false;
}
@Override
public boolean update(String name, String category, String value) {
@ -147,11 +147,11 @@ public class ConfigurationDaoImpl extends GenericDaoBase<ConfigurationVO, String
}
return false;
}
@Override
public String getValue(String name) {
@Override
public String getValue(String name) {
ConfigurationVO config = findByName(name);
return (config == null) ? null : config.getValue();
return (config == null) ? null : config.getValue();
}
@Override
@ -208,4 +208,4 @@ public class ConfigurationDaoImpl extends GenericDaoBase<ConfigurationVO, String
return findOneIncludingRemovedBy(sc);
}
}
}

View File

@ -341,4 +341,4 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu
@Override
public void finalizeExpunge(ConsoleProxyVO proxy) {
}
}
}

View File

@ -10,58 +10,58 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.consoleproxy;
package com.cloud.consoleproxy;
import com.cloud.utils.events.EventArgs;
import com.cloud.vm.ConsoleProxyVO;
public class ConsoleProxyAlertEventArgs extends EventArgs {
private static final long serialVersionUID = 23773987551479885L;
public static final int PROXY_CREATED = 1;
public static final int PROXY_UP = 2;
public static final int PROXY_DOWN = 3;
public static final int PROXY_CREATE_FAILURE = 4;
public static final int PROXY_START_FAILURE = 5;
public static final int PROXY_FIREWALL_ALERT = 6;
public static final int PROXY_STORAGE_ALERT = 7;
public static final int PROXY_REBOOTED = 8;
private int type;
private long zoneId;
private long proxyId;
private ConsoleProxyVO proxy;
private String message;
public ConsoleProxyAlertEventArgs(int type, long zoneId,
long proxyId, ConsoleProxyVO proxy, String message) {
super(ConsoleProxyManager.ALERT_SUBJECT);
this.type = type;
this.zoneId = zoneId;
this.proxyId = proxyId;
this.proxy = proxy;
this.message = message;
}
public int getType() {
return type;
}
public long getZoneId() {
return zoneId;
}
public long getProxyId() {
return proxyId;
}
public ConsoleProxyVO getProxy() {
return proxy;
}
public String getMessage() {
return message;
}
}
public class ConsoleProxyAlertEventArgs extends EventArgs {
private static final long serialVersionUID = 23773987551479885L;
public static final int PROXY_CREATED = 1;
public static final int PROXY_UP = 2;
public static final int PROXY_DOWN = 3;
public static final int PROXY_CREATE_FAILURE = 4;
public static final int PROXY_START_FAILURE = 5;
public static final int PROXY_FIREWALL_ALERT = 6;
public static final int PROXY_STORAGE_ALERT = 7;
public static final int PROXY_REBOOTED = 8;
private int type;
private long zoneId;
private long proxyId;
private ConsoleProxyVO proxy;
private String message;
public ConsoleProxyAlertEventArgs(int type, long zoneId,
long proxyId, ConsoleProxyVO proxy, String message) {
super(ConsoleProxyManager.ALERT_SUBJECT);
this.type = type;
this.zoneId = zoneId;
this.proxyId = proxyId;
this.proxy = proxy;
this.message = message;
}
public int getType() {
return type;
}
public long getZoneId() {
return zoneId;
}
public long getProxyId() {
return proxyId;
}
public ConsoleProxyVO getProxy() {
return proxy;
}
public String getMessage() {
return message;
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.consoleproxy;
package com.cloud.consoleproxy;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
@ -27,20 +27,20 @@ import com.cloud.utils.component.ComponentLocator;
import com.cloud.vm.ConsoleProxyVO;
import edu.emory.mathcs.backport.java.util.Collections;
@Local(value={ConsoleProxyAllocator.class})
public class ConsoleProxyBalanceAllocator implements ConsoleProxyAllocator {
private String _name;
private final Random _rand = new Random(System.currentTimeMillis());
@Override
public ConsoleProxyVO allocProxy(List<ConsoleProxyVO> candidates, final Map<Long, Integer> loadInfo, long dataCenterId) {
if(candidates != null) {
List<ConsoleProxyVO> allocationList = new ArrayList<ConsoleProxyVO>();
for(ConsoleProxyVO proxy : candidates) {
allocationList.add(proxy);
@Local(value={ConsoleProxyAllocator.class})
public class ConsoleProxyBalanceAllocator implements ConsoleProxyAllocator {
private String _name;
private final Random _rand = new Random(System.currentTimeMillis());
@Override
public ConsoleProxyVO allocProxy(List<ConsoleProxyVO> candidates, final Map<Long, Integer> loadInfo, long dataCenterId) {
if(candidates != null) {
List<ConsoleProxyVO> allocationList = new ArrayList<ConsoleProxyVO>();
for(ConsoleProxyVO proxy : candidates) {
allocationList.add(proxy);
}
Collections.sort(candidates, new Comparator<ConsoleProxyVO> () {
@ -63,40 +63,40 @@ public class ConsoleProxyBalanceAllocator implements ConsoleProxyAllocator {
return 1;
}
}
});
if(allocationList.size() > 0)
return allocationList.get(0);
}
return null;
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
ComponentLocator locator = ComponentLocator.getCurrentLocator();
ConfigurationDao configDao = locator.getDao(ConfigurationDao.class);
if (configDao == null) {
throw new ConfigurationException("Unable to get the configuration dao.");
}
Map<String, String> configs = configDao.getConfiguration();
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
}
});
if(allocationList.size() > 0)
return allocationList.get(0);
}
return null;
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
ComponentLocator locator = ComponentLocator.getCurrentLocator();
ConfigurationDao configDao = locator.getDao(ConfigurationDao.class);
if (configDao == null) {
throw new ConfigurationException("Unable to get the configuration dao.");
}
Map<String, String> configs = configDao.getConfiguration();
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.dc;
package com.cloud.dc;
import java.util.Date;
import java.util.Map;
import java.util.UUID;
@ -32,37 +32,37 @@ import com.cloud.network.Network.Provider;
import com.cloud.org.Grouping;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.db.GenericDao;
@Entity
@Table(name="data_center")
public class DataCenterVO implements DataCenter, Identity {
@Id
@GeneratedValue(strategy=GenerationType.IDENTITY)
@Column(name="id")
private long id;
@Column(name="name")
private String name = null;
@Column(name="description")
private String description = null;
@Column(name="dns1")
private String dns1 = null;
@Column(name="dns2")
private String dns2 = null;
@Column(name="internal_dns1")
private String internalDns1 = null;
@Column(name="internal_dns2")
private String internalDns2 = null;
@Column(name="router_mac_address", updatable = false, nullable=false)
private String routerMacAddress = "02:00:00:00:00:01";
@Entity
@Table(name="data_center")
public class DataCenterVO implements DataCenter, Identity {
@Id
@GeneratedValue(strategy=GenerationType.IDENTITY)
@Column(name="id")
private long id;
@Column(name="name")
private String name = null;
@Column(name="description")
private String description = null;
@Column(name="dns1")
private String dns1 = null;
@Column(name="dns2")
private String dns2 = null;
@Column(name="internal_dns1")
private String internalDns1 = null;
@Column(name="internal_dns2")
private String internalDns2 = null;
@Column(name="router_mac_address", updatable = false, nullable=false)
private String routerMacAddress = "02:00:00:00:00:01";
@Column(name="guest_network_cidr")
private String guestNetworkCidr = null;
@ -169,13 +169,13 @@ public class DataCenterVO implements DataCenter, Identity {
}
public DataCenterVO(long id, String name, String description, String dns1, String dns2, String dns3, String dns4,String guestCidr, String domain, Long domainId, NetworkType zoneType, String zoneToken, String domainSuffix) {
this(name, description, dns1, dns2, dns3, dns4, guestCidr, domain, domainId, zoneType, zoneToken, domainSuffix, false);
this(name, description, dns1, dns2, dns3, dns4, guestCidr, domain, domainId, zoneType, zoneToken, domainSuffix, false);
this.id = id;
this.allocationState = Grouping.AllocationState.Enabled;
this.uuid = UUID.randomUUID().toString();
}
public DataCenterVO(String name, String description, String dns1, String dns2, String dns3, String dns4, String guestCidr, String domain, Long domainId, NetworkType zoneType, String zoneToken, String domainSuffix, boolean securityGroupEnabled) {
this.uuid = UUID.randomUUID().toString();
}
public DataCenterVO(String name, String description, String dns1, String dns2, String dns3, String dns4, String guestCidr, String domain, Long domainId, NetworkType zoneType, String zoneToken, String domainSuffix, boolean securityGroupEnabled) {
this.name = name;
this.description = description;
this.dns1 = dns1;
@ -246,72 +246,72 @@ public class DataCenterVO implements DataCenter, Identity {
public void setDomainId(Long domainId) {
this.domainId = domainId;
}
}
@Override
public String getDescription() {
return description;
}
public String getRouterMacAddress() {
return routerMacAddress;
}
public String getDescription() {
return description;
}
public String getRouterMacAddress() {
return routerMacAddress;
}
@Override
public String getDns1() {
return dns1;
}
public String getDns1() {
return dns1;
}
@Override
public String getDns2() {
return dns2;
}
public String getDns2() {
return dns2;
}
@Override
public String getInternalDns1() {
return internalDns1;
}
public String getInternalDns1() {
return internalDns1;
}
@Override
public String getInternalDns2() {
return internalDns2;
}
protected DataCenterVO() {
}
public String getInternalDns2() {
return internalDns2;
}
protected DataCenterVO() {
}
@Override
public long getId() {
return id;
}
public long getId() {
return id;
}
@Override
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public void setDns1(String dns1) {
this.dns1 = dns1;
}
public void setDns2(String dns2) {
this.dns2 = dns2;
}
public void setInternalDns1(String dns3) {
this.internalDns1 = dns3;
}
public void setInternalDns2(String dns4) {
this.internalDns2 = dns4;
}
public void setRouterMacAddress(String routerMacAddress) {
this.routerMacAddress = routerMacAddress;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public void setDns1(String dns1) {
this.dns1 = dns1;
}
public void setDns2(String dns2) {
this.dns2 = dns2;
}
public void setInternalDns1(String dns3) {
this.internalDns1 = dns3;
}
public void setInternalDns2(String dns4) {
this.internalDns2 = dns4;
}
public void setRouterMacAddress(String routerMacAddress) {
this.routerMacAddress = routerMacAddress;
}
@Override
@ -406,4 +406,4 @@ public class DataCenterVO implements DataCenter, Identity {
public void setUuid(String uuid) {
this.uuid = uuid;
}
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.dc;
package com.cloud.dc;
import java.util.Date;
import java.util.UUID;
@ -28,101 +28,101 @@ import com.cloud.api.Identity;
import com.cloud.org.Grouping;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.db.GenericDao;
@Entity
@Table(name = "host_pod_ref")
public class HostPodVO implements Pod, Identity {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
long id;
@Column(name = "name")
private String name = null;
@Column(name = "data_center_id")
@Entity
@Table(name = "host_pod_ref")
public class HostPodVO implements Pod, Identity {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
long id;
@Column(name = "name")
private String name = null;
@Column(name = "data_center_id")
private long dataCenterId;
@Column(name = "gateway")
private String gateway;
@Column(name = "cidr_address")
private String cidrAddress;
@Column(name = "cidr_size")
private int cidrSize;
@Column(name = "description")
private String gateway;
@Column(name = "cidr_address")
private String cidrAddress;
@Column(name = "cidr_size")
private int cidrSize;
@Column(name = "description")
private String description;
@Column(name="allocation_state")
@Enumerated(value=EnumType.STRING)
AllocationState allocationState;
AllocationState allocationState;
@Column(name = "external_dhcp")
private Boolean externalDhcp;
@Column(name=GenericDao.REMOVED_COLUMN)
private Date removed;
@Column(name = "uuid")
private String uuid;
public HostPodVO(String name, long dcId, String gateway, String cidrAddress, int cidrSize, String description) {
this.name = name;
public HostPodVO(String name, long dcId, String gateway, String cidrAddress, int cidrSize, String description) {
this.name = name;
this.dataCenterId = dcId;
this.gateway = gateway;
this.cidrAddress = cidrAddress;
this.cidrSize = cidrSize;
this.gateway = gateway;
this.cidrAddress = cidrAddress;
this.cidrSize = cidrSize;
this.description = description;
this.allocationState = Grouping.AllocationState.Enabled;
this.externalDhcp = false;
this.uuid = UUID.randomUUID().toString();
}
/*
* public HostPodVO(String name, long dcId) { this(null, name, dcId); }
*/
protected HostPodVO() {
this.uuid = UUID.randomUUID().toString();
}
}
/*
* public HostPodVO(String name, long dcId) { this(null, name, dcId); }
*/
protected HostPodVO() {
this.uuid = UUID.randomUUID().toString();
}
@Override
public long getId() {
return id;
}
public long getDataCenterId() {
return dataCenterId;
}
public void setDataCenterId(long dataCenterId) {
this.dataCenterId = dataCenterId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public long getId() {
return id;
}
public long getDataCenterId() {
return dataCenterId;
}
public void setDataCenterId(long dataCenterId) {
this.dataCenterId = dataCenterId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public String getCidrAddress() {
return cidrAddress;
}
public void setCidrAddress(String cidrAddress) {
this.cidrAddress = cidrAddress;
}
public String getCidrAddress() {
return cidrAddress;
}
public void setCidrAddress(String cidrAddress) {
this.cidrAddress = cidrAddress;
}
@Override
public int getCidrSize() {
return cidrSize;
}
public void setCidrSize(int cidrSize) {
this.cidrSize = cidrSize;
public int getCidrSize() {
return cidrSize;
}
public void setCidrSize(int cidrSize) {
this.cidrSize = cidrSize;
}
@Override
@ -132,14 +132,14 @@ public class HostPodVO implements Pod, Identity {
public void setGateway(String gateway) {
this.gateway = gateway;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public AllocationState getAllocationState() {
@ -188,5 +188,5 @@ public class HostPodVO implements Pod, Identity {
public void setUuid(String uuid) {
this.uuid = uuid;
}
}
}
}

View File

@ -10,105 +10,105 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.dc;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.PrimaryKeyJoinColumn;
import javax.persistence.SecondaryTable;
import javax.persistence.SecondaryTables;
import javax.persistence.Table;
import javax.persistence.Temporal;
import javax.persistence.TemporalType;
@Entity
@Table(name="op_dc_storage_network_ip_address")
@SecondaryTables({@SecondaryTable(name = "dc_storage_network_ip_range", pkJoinColumns = { @PrimaryKeyJoinColumn(name = "range_id", referencedColumnName = "id")})})
public class StorageNetworkIpAddressVO {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id")
long id;
@Column(name = "range_id")
long rangeId;
@Column(name = "ip_address", updatable = false, nullable = false)
String ipAddress;
@Column(name = "taken")
@Temporal(value = TemporalType.TIMESTAMP)
private Date takenAt;
@Column(name = "netmask", table = "dc_storage_network_ip_range", insertable = false, updatable = false)
private String netmask;
@Column(name = "mac_address")
long mac;
@Column(name = "vlan", table = "dc_storage_network_ip_range", insertable = false, updatable = false)
Integer vlan;
@Column(name = "gateway", table = "dc_storage_network_ip_range", insertable = false, updatable = false)
String gateway;
protected StorageNetworkIpAddressVO() {
}
public Long getId() {
return id;
}
public void setTakenAt(Date takenDate) {
this.takenAt = takenDate;
}
public String getIpAddress() {
return ipAddress;
}
public void setIpAddress(String ip) {
this.ipAddress = ip;
}
public Date getTakenAt() {
return takenAt;
}
public long getRangeId() {
return rangeId;
}
public void setRangeId(long id) {
this.rangeId = id;
}
public long getMac() {
return mac;
}
public void setMac(long mac) {
this.mac = mac;
}
public String getNetmask() {
return netmask;
}
public void setNetmask(String netmask) {
this.netmask = netmask;
}
public Integer getVlan() {
return vlan;
}
public String getGateway() {
return gateway;
}
}
package com.cloud.dc;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.PrimaryKeyJoinColumn;
import javax.persistence.SecondaryTable;
import javax.persistence.SecondaryTables;
import javax.persistence.Table;
import javax.persistence.Temporal;
import javax.persistence.TemporalType;
@Entity
@Table(name="op_dc_storage_network_ip_address")
@SecondaryTables({@SecondaryTable(name = "dc_storage_network_ip_range", pkJoinColumns = { @PrimaryKeyJoinColumn(name = "range_id", referencedColumnName = "id")})})
public class StorageNetworkIpAddressVO {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id")
long id;
@Column(name = "range_id")
long rangeId;
@Column(name = "ip_address", updatable = false, nullable = false)
String ipAddress;
@Column(name = "taken")
@Temporal(value = TemporalType.TIMESTAMP)
private Date takenAt;
@Column(name = "netmask", table = "dc_storage_network_ip_range", insertable = false, updatable = false)
private String netmask;
@Column(name = "mac_address")
long mac;
@Column(name = "vlan", table = "dc_storage_network_ip_range", insertable = false, updatable = false)
Integer vlan;
@Column(name = "gateway", table = "dc_storage_network_ip_range", insertable = false, updatable = false)
String gateway;
protected StorageNetworkIpAddressVO() {
}
public Long getId() {
return id;
}
public void setTakenAt(Date takenDate) {
this.takenAt = takenDate;
}
public String getIpAddress() {
return ipAddress;
}
public void setIpAddress(String ip) {
this.ipAddress = ip;
}
public Date getTakenAt() {
return takenAt;
}
public long getRangeId() {
return rangeId;
}
public void setRangeId(long id) {
this.rangeId = id;
}
public long getMac() {
return mac;
}
public void setMac(long mac) {
this.mac = mac;
}
public String getNetmask() {
return netmask;
}
public void setNetmask(String netmask) {
this.netmask = netmask;
}
public Integer getVlan() {
return vlan;
}
public String getGateway() {
return gateway;
}
}

View File

@ -10,169 +10,169 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.dc;
import java.util.UUID;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.PrimaryKeyJoinColumn;
import javax.persistence.SecondaryTable;
import javax.persistence.SecondaryTables;
import javax.persistence.Table;
@Entity
@Table(name="dc_storage_network_ip_range")
@SecondaryTables({@SecondaryTable(name="networks", pkJoinColumns={@PrimaryKeyJoinColumn(name="network_id", referencedColumnName="id")}),
@SecondaryTable(name="host_pod_ref", pkJoinColumns={@PrimaryKeyJoinColumn(name="pod_id", referencedColumnName="id")}),
@SecondaryTable(name="data_center", pkJoinColumns={@PrimaryKeyJoinColumn(name="data_center_id", referencedColumnName="id")})
})
public class StorageNetworkIpRangeVO implements StorageNetworkIpRange {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id")
private long id;
@Column(name = "uuid")
String uuid;
@Column(name = "vlan")
private Integer vlan;
@Column(name = "data_center_id")
private long dataCenterId;
@Column(name = "pod_id")
private long podId;
@Column(name = "start_ip")
private String startIp;
@Column(name = "end_ip")
private String endIp;
@Column(name = "gateway")
package com.cloud.dc;
import java.util.UUID;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.PrimaryKeyJoinColumn;
import javax.persistence.SecondaryTable;
import javax.persistence.SecondaryTables;
import javax.persistence.Table;
@Entity
@Table(name="dc_storage_network_ip_range")
@SecondaryTables({@SecondaryTable(name="networks", pkJoinColumns={@PrimaryKeyJoinColumn(name="network_id", referencedColumnName="id")}),
@SecondaryTable(name="host_pod_ref", pkJoinColumns={@PrimaryKeyJoinColumn(name="pod_id", referencedColumnName="id")}),
@SecondaryTable(name="data_center", pkJoinColumns={@PrimaryKeyJoinColumn(name="data_center_id", referencedColumnName="id")})
})
public class StorageNetworkIpRangeVO implements StorageNetworkIpRange {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id")
private long id;
@Column(name = "uuid")
String uuid;
@Column(name = "vlan")
private Integer vlan;
@Column(name = "data_center_id")
private long dataCenterId;
@Column(name = "pod_id")
private long podId;
@Column(name = "start_ip")
private String startIp;
@Column(name = "end_ip")
private String endIp;
@Column(name = "gateway")
private String gateway;
@Column(name = "network_id")
private long networkId;
@Column(name="netmask")
@Column(name = "network_id")
private long networkId;
@Column(name="netmask")
private String netmask;
@Column(name = "uuid", table = "networks", insertable = false, updatable = false)
String networkUuid;
@Column(name = "uuid", table = "host_pod_ref", insertable = false, updatable = false)
String podUuid;
@Column(name = "uuid", table = "data_center", insertable = false, updatable = false)
String zoneUuid;
public StorageNetworkIpRangeVO(long dcId, long podId, long networkId, String startIp, String endIp, Integer vlan, String netmask, String gateway) {
this();
this.dataCenterId = dcId;
this.podId = podId;
this.networkId = networkId;
this.startIp = startIp;
this.endIp = endIp;
this.vlan = vlan;
@Column(name = "uuid", table = "networks", insertable = false, updatable = false)
String networkUuid;
@Column(name = "uuid", table = "host_pod_ref", insertable = false, updatable = false)
String podUuid;
@Column(name = "uuid", table = "data_center", insertable = false, updatable = false)
String zoneUuid;
public StorageNetworkIpRangeVO(long dcId, long podId, long networkId, String startIp, String endIp, Integer vlan, String netmask, String gateway) {
this();
this.dataCenterId = dcId;
this.podId = podId;
this.networkId = networkId;
this.startIp = startIp;
this.endIp = endIp;
this.vlan = vlan;
this.netmask = netmask;
this.gateway = gateway;
}
protected StorageNetworkIpRangeVO() {
this.uuid = UUID.randomUUID().toString();
}
public long getId() {
return id;
}
public long getDataCenterId() {
return dataCenterId;
}
public void setDataCenterId(long dcId) {
this.dataCenterId = dcId;
}
public long getPodId() {
return podId;
}
public void setPodId(long podId) {
this.podId = podId;
}
public long getNetworkId() {
return networkId;
}
public void setNetworkId(long nwId) {
this.networkId = nwId;
}
public Integer getVlan() {
return vlan;
}
public void setVlan(int vlan) {
this.vlan = vlan;
}
public void setStartIp(String start) {
this.startIp = start;
}
public String getStartIp() {
return startIp;
}
public void setEndIp(String end) {
this.endIp = end;
}
public String getEndIp() {
return endIp;
}
public String getNetmask() {
return netmask;
this.gateway = gateway;
}
public String getGateway() {
return this.gateway;
protected StorageNetworkIpRangeVO() {
this.uuid = UUID.randomUUID().toString();
}
public void setGateway(String gateway) {
this.gateway = gateway;
}
public void setNetmask(String netmask) {
this.netmask = netmask;
}
@Override
public String getUuid() {
return uuid;
}
@Override
public String getPodUuid() {
return podUuid;
}
@Override
public String getNetworkUuid() {
return networkUuid;
}
@Override
public String getZoneUuid() {
return zoneUuid;
}
}
public long getId() {
return id;
}
public long getDataCenterId() {
return dataCenterId;
}
public void setDataCenterId(long dcId) {
this.dataCenterId = dcId;
}
public long getPodId() {
return podId;
}
public void setPodId(long podId) {
this.podId = podId;
}
public long getNetworkId() {
return networkId;
}
public void setNetworkId(long nwId) {
this.networkId = nwId;
}
public Integer getVlan() {
return vlan;
}
public void setVlan(int vlan) {
this.vlan = vlan;
}
public void setStartIp(String start) {
this.startIp = start;
}
public String getStartIp() {
return startIp;
}
public void setEndIp(String end) {
this.endIp = end;
}
public String getEndIp() {
return endIp;
}
public String getNetmask() {
return netmask;
}
public String getGateway() {
return this.gateway;
}
public void setGateway(String gateway) {
this.gateway = gateway;
}
public void setNetmask(String netmask) {
this.netmask = netmask;
}
@Override
public String getUuid() {
return uuid;
}
@Override
public String getPodUuid() {
return podUuid;
}
@Override
public String getNetworkUuid() {
return networkUuid;
}
@Override
public String getZoneUuid() {
return zoneUuid;
}
}

View File

@ -10,21 +10,21 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.dc.dao;
package com.cloud.dc.dao;
import java.util.HashMap;
import java.util.List;
import com.cloud.dc.HostPodVO;
import com.cloud.utils.db.GenericDao;
public interface HostPodDao extends GenericDao<HostPodVO, Long> {
public List<HostPodVO> listByDataCenterId(long id);
public HostPodVO findByName(String name, long dcId);
public interface HostPodDao extends GenericDao<HostPodVO, Long> {
public List<HostPodVO> listByDataCenterId(long id);
public HostPodVO findByName(String name, long dcId);
public HashMap<Long, List<Object>> getCurrentPodCidrSubnets(long zoneId, long podIdToSkip);
public List<Long> listDisabledPods(long zoneId);
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.dc.dao;
package com.cloud.dc.dao;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
@ -31,71 +31,71 @@ import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.Transaction;
@Local(value={HostPodDao.class})
public class HostPodDaoImpl extends GenericDaoBase<HostPodVO, Long> implements HostPodDao {
private static final Logger s_logger = Logger.getLogger(HostPodDaoImpl.class);
protected SearchBuilder<HostPodVO> DataCenterAndNameSearch;
@Local(value={HostPodDao.class})
public class HostPodDaoImpl extends GenericDaoBase<HostPodVO, Long> implements HostPodDao {
private static final Logger s_logger = Logger.getLogger(HostPodDaoImpl.class);
protected SearchBuilder<HostPodVO> DataCenterAndNameSearch;
protected SearchBuilder<HostPodVO> DataCenterIdSearch;
protected HostPodDaoImpl() {
DataCenterAndNameSearch = createSearchBuilder();
DataCenterAndNameSearch.and("dc", DataCenterAndNameSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
DataCenterAndNameSearch.and("name", DataCenterAndNameSearch.entity().getName(), SearchCriteria.Op.EQ);
DataCenterAndNameSearch.done();
DataCenterIdSearch = createSearchBuilder();
DataCenterIdSearch.and("dcId", DataCenterIdSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
protected HostPodDaoImpl() {
DataCenterAndNameSearch = createSearchBuilder();
DataCenterAndNameSearch.and("dc", DataCenterAndNameSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
DataCenterAndNameSearch.and("name", DataCenterAndNameSearch.entity().getName(), SearchCriteria.Op.EQ);
DataCenterAndNameSearch.done();
DataCenterIdSearch = createSearchBuilder();
DataCenterIdSearch.and("dcId", DataCenterIdSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
DataCenterIdSearch.done();
}
}
@Override
public List<HostPodVO> listByDataCenterId(long id) {
SearchCriteria<HostPodVO> sc = DataCenterIdSearch.create();
sc.setParameters("dcId", id);
return listBy(sc);
}
public List<HostPodVO> listByDataCenterId(long id) {
SearchCriteria<HostPodVO> sc = DataCenterIdSearch.create();
sc.setParameters("dcId", id);
return listBy(sc);
}
@Override
public HostPodVO findByName(String name, long dcId) {
SearchCriteria<HostPodVO> sc = DataCenterAndNameSearch.create();
sc.setParameters("dc", dcId);
sc.setParameters("name", name);
return findOneBy(sc);
}
@Override
public HashMap<Long, List<Object>> getCurrentPodCidrSubnets(long zoneId, long podIdToSkip) {
HashMap<Long, List<Object>> currentPodCidrSubnets = new HashMap<Long, List<Object>>();
String selectSql = "SELECT id, cidr_address, cidr_size FROM host_pod_ref WHERE data_center_id=" + zoneId +" and removed IS NULL";
Transaction txn = Transaction.currentTxn();
try {
PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql);
ResultSet rs = stmt.executeQuery();
while (rs.next()) {
Long podId = rs.getLong("id");
public HostPodVO findByName(String name, long dcId) {
SearchCriteria<HostPodVO> sc = DataCenterAndNameSearch.create();
sc.setParameters("dc", dcId);
sc.setParameters("name", name);
return findOneBy(sc);
}
@Override
public HashMap<Long, List<Object>> getCurrentPodCidrSubnets(long zoneId, long podIdToSkip) {
HashMap<Long, List<Object>> currentPodCidrSubnets = new HashMap<Long, List<Object>>();
String selectSql = "SELECT id, cidr_address, cidr_size FROM host_pod_ref WHERE data_center_id=" + zoneId +" and removed IS NULL";
Transaction txn = Transaction.currentTxn();
try {
PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql);
ResultSet rs = stmt.executeQuery();
while (rs.next()) {
Long podId = rs.getLong("id");
if (podId.longValue() == podIdToSkip) {
continue;
}
String cidrAddress = rs.getString("cidr_address");
long cidrSize = rs.getLong("cidr_size");
List<Object> cidrPair = new ArrayList<Object>();
cidrPair.add(0, cidrAddress);
cidrPair.add(1, new Long(cidrSize));
currentPodCidrSubnets.put(podId, cidrPair);
}
} catch (SQLException ex) {
s_logger.warn("DB exception " + ex.getMessage(), ex);
return null;
}
return currentPodCidrSubnets;
}
}
String cidrAddress = rs.getString("cidr_address");
long cidrSize = rs.getLong("cidr_size");
List<Object> cidrPair = new ArrayList<Object>();
cidrPair.add(0, cidrAddress);
cidrPair.add(1, new Long(cidrSize));
currentPodCidrSubnets.put(podId, cidrPair);
}
} catch (SQLException ex) {
s_logger.warn("DB exception " + ex.getMessage(), ex);
return null;
}
return currentPodCidrSubnets;
}
@Override
public boolean remove(Long id) {
Transaction txn = Transaction.currentTxn();
@ -125,4 +125,4 @@ public class HostPodDaoImpl extends GenericDaoBase<HostPodVO, Long> implements H
return customSearch(sc, null);
}
}
}

View File

@ -10,19 +10,19 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.dc.dao;
import java.util.List;
import com.cloud.dc.StorageNetworkIpAddressVO;
import com.cloud.utils.db.GenericDao;
public interface StorageNetworkIpAddressDao extends GenericDao<StorageNetworkIpAddressVO, Long> {
long countInUseIpByRangeId(long rangeId);
List<String> listInUseIpByRangeId(long rangeId);
StorageNetworkIpAddressVO takeIpAddress(long rangeId);
void releaseIpAddress(String ip);
}
package com.cloud.dc.dao;
import java.util.List;
import com.cloud.dc.StorageNetworkIpAddressVO;
import com.cloud.utils.db.GenericDao;
public interface StorageNetworkIpAddressDao extends GenericDao<StorageNetworkIpAddressVO, Long> {
long countInUseIpByRangeId(long rangeId);
List<String> listInUseIpByRangeId(long rangeId);
StorageNetworkIpAddressVO takeIpAddress(long rangeId);
void releaseIpAddress(String ip);
}

View File

@ -10,98 +10,98 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.dc.dao;
import java.util.Date;
import java.util.List;
import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import com.cloud.dc.DataCenterIpAddressVO;
import com.cloud.dc.StorageNetworkIpAddressVO;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria2;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.SearchCriteria.Func;
import com.cloud.utils.db.SearchCriteria.Op;
@Local(value={StorageNetworkIpAddressDao.class})
@DB(txn=false)
public class StorageNetworkIpAddressDaoImpl extends GenericDaoBase<StorageNetworkIpAddressVO, Long> implements StorageNetworkIpAddressDao {
protected final GenericSearchBuilder<StorageNetworkIpAddressVO, Long> countInUserIp;
protected final GenericSearchBuilder<StorageNetworkIpAddressVO, String> listInUseIp;
protected final SearchBuilder<StorageNetworkIpAddressVO> untakenIp;
protected final SearchBuilder<StorageNetworkIpAddressVO> ipSearch;
protected StorageNetworkIpAddressDaoImpl() {
countInUserIp = createSearchBuilder(Long.class);
countInUserIp.select(null, Func.COUNT, null);
countInUserIp.and("rangeId", countInUserIp.entity().getRangeId(), Op.EQ);
countInUserIp.and("taken", countInUserIp.entity().getTakenAt(), Op.NNULL);
countInUserIp.done();
listInUseIp = createSearchBuilder(String.class);
listInUseIp.selectField(listInUseIp.entity().getIpAddress());
listInUseIp.and("rangeId", listInUseIp.entity().getRangeId(), Op.EQ);
listInUseIp.and("taken", listInUseIp.entity().getTakenAt(), Op.NNULL);
listInUseIp.done();
untakenIp = createSearchBuilder();
untakenIp.and("rangeId", untakenIp.entity().getRangeId(), Op.EQ);
untakenIp.and("taken", untakenIp.entity().getTakenAt(), Op.NULL);
untakenIp.done();
ipSearch = createSearchBuilder();
ipSearch.and("ipAddress", ipSearch.entity().getIpAddress(), Op.EQ);
ipSearch.done();
}
@Override
public long countInUseIpByRangeId(long rangeId) {
SearchCriteria<Long> sc = countInUserIp.create();
sc.setParameters("rangeId", rangeId);
return customSearch(sc, null).get(0);
}
@Override
public List<String> listInUseIpByRangeId(long rangeId) {
SearchCriteria<String> sc = listInUseIp.create();
sc.setParameters("rangeId", rangeId);
return customSearch(sc, null);
}
@Override
@DB
public StorageNetworkIpAddressVO takeIpAddress(long rangeId) {
SearchCriteria<StorageNetworkIpAddressVO> sc = untakenIp.create();
sc.setParameters("rangeId", rangeId);
Transaction txn = Transaction.currentTxn();
txn.start();
StorageNetworkIpAddressVO ip = lockOneRandomRow(sc, true);
if (ip == null) {
txn.rollback();
return null;
}
ip.setTakenAt(new Date());
update(ip.getId(), ip);
txn.commit();
return ip;
}
@Override
public void releaseIpAddress(String ip) {
SearchCriteria<StorageNetworkIpAddressVO> sc = ipSearch.create();
sc.setParameters("ipAddress", ip);
StorageNetworkIpAddressVO vo = createForUpdate();
vo.setTakenAt(null);
update(vo, sc);
}
}
package com.cloud.dc.dao;
import java.util.Date;
import java.util.List;
import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import com.cloud.dc.DataCenterIpAddressVO;
import com.cloud.dc.StorageNetworkIpAddressVO;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria2;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.SearchCriteria.Func;
import com.cloud.utils.db.SearchCriteria.Op;
@Local(value={StorageNetworkIpAddressDao.class})
@DB(txn=false)
public class StorageNetworkIpAddressDaoImpl extends GenericDaoBase<StorageNetworkIpAddressVO, Long> implements StorageNetworkIpAddressDao {
protected final GenericSearchBuilder<StorageNetworkIpAddressVO, Long> countInUserIp;
protected final GenericSearchBuilder<StorageNetworkIpAddressVO, String> listInUseIp;
protected final SearchBuilder<StorageNetworkIpAddressVO> untakenIp;
protected final SearchBuilder<StorageNetworkIpAddressVO> ipSearch;
protected StorageNetworkIpAddressDaoImpl() {
countInUserIp = createSearchBuilder(Long.class);
countInUserIp.select(null, Func.COUNT, null);
countInUserIp.and("rangeId", countInUserIp.entity().getRangeId(), Op.EQ);
countInUserIp.and("taken", countInUserIp.entity().getTakenAt(), Op.NNULL);
countInUserIp.done();
listInUseIp = createSearchBuilder(String.class);
listInUseIp.selectField(listInUseIp.entity().getIpAddress());
listInUseIp.and("rangeId", listInUseIp.entity().getRangeId(), Op.EQ);
listInUseIp.and("taken", listInUseIp.entity().getTakenAt(), Op.NNULL);
listInUseIp.done();
untakenIp = createSearchBuilder();
untakenIp.and("rangeId", untakenIp.entity().getRangeId(), Op.EQ);
untakenIp.and("taken", untakenIp.entity().getTakenAt(), Op.NULL);
untakenIp.done();
ipSearch = createSearchBuilder();
ipSearch.and("ipAddress", ipSearch.entity().getIpAddress(), Op.EQ);
ipSearch.done();
}
@Override
public long countInUseIpByRangeId(long rangeId) {
SearchCriteria<Long> sc = countInUserIp.create();
sc.setParameters("rangeId", rangeId);
return customSearch(sc, null).get(0);
}
@Override
public List<String> listInUseIpByRangeId(long rangeId) {
SearchCriteria<String> sc = listInUseIp.create();
sc.setParameters("rangeId", rangeId);
return customSearch(sc, null);
}
@Override
@DB
public StorageNetworkIpAddressVO takeIpAddress(long rangeId) {
SearchCriteria<StorageNetworkIpAddressVO> sc = untakenIp.create();
sc.setParameters("rangeId", rangeId);
Transaction txn = Transaction.currentTxn();
txn.start();
StorageNetworkIpAddressVO ip = lockOneRandomRow(sc, true);
if (ip == null) {
txn.rollback();
return null;
}
ip.setTakenAt(new Date());
update(ip.getId(), ip);
txn.commit();
return ip;
}
@Override
public void releaseIpAddress(String ip) {
SearchCriteria<StorageNetworkIpAddressVO> sc = ipSearch.create();
sc.setParameters("ipAddress", ip);
StorageNetworkIpAddressVO vo = createForUpdate();
vo.setTakenAt(null);
update(vo, sc);
}
}

View File

@ -10,19 +10,19 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.dc.dao;
import java.util.List;
import com.cloud.dc.StorageNetworkIpRangeVO;
import com.cloud.utils.db.GenericDao;
public interface StorageNetworkIpRangeDao extends GenericDao<StorageNetworkIpRangeVO, Long> {
List<StorageNetworkIpRangeVO> listByRangeId(long rangeId);
List<StorageNetworkIpRangeVO> listByPodId(long podId);
List<StorageNetworkIpRangeVO> listByDataCenterId(long dcId);
long countRanges();
}
package com.cloud.dc.dao;
import java.util.List;
import com.cloud.dc.StorageNetworkIpRangeVO;
import com.cloud.utils.db.GenericDao;
public interface StorageNetworkIpRangeDao extends GenericDao<StorageNetworkIpRangeVO, Long> {
List<StorageNetworkIpRangeVO> listByRangeId(long rangeId);
List<StorageNetworkIpRangeVO> listByPodId(long podId);
List<StorageNetworkIpRangeVO> listByDataCenterId(long dcId);
long countRanges();
}

View File

@ -10,62 +10,62 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.dc.dao;
import java.util.List;
import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import com.cloud.dc.StorageNetworkIpRangeVO;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria2;
import com.cloud.utils.db.SearchCriteriaService;
import com.cloud.utils.db.SearchCriteria.Func;
import com.cloud.utils.db.SearchCriteria.Op;
@Local(value={StorageNetworkIpRangeDao.class})
@DB(txn=false)
public class StorageNetworkIpRangeDaoImpl extends GenericDaoBase<StorageNetworkIpRangeVO, Long> implements StorageNetworkIpRangeDao {
protected final GenericSearchBuilder<StorageNetworkIpRangeVO, Long> countRanges;
protected StorageNetworkIpRangeDaoImpl() {
countRanges = createSearchBuilder(Long.class);
countRanges.select(null, Func.COUNT, null);
countRanges.done();
}
@Override
public List<StorageNetworkIpRangeVO> listByPodId(long podId) {
SearchCriteriaService<StorageNetworkIpRangeVO, StorageNetworkIpRangeVO> sc = SearchCriteria2.create(StorageNetworkIpRangeVO.class);
sc.addAnd(sc.getEntity().getPodId(), Op.EQ, podId);
return sc.list();
}
@Override
public List<StorageNetworkIpRangeVO> listByRangeId(long rangeId) {
SearchCriteriaService<StorageNetworkIpRangeVO, StorageNetworkIpRangeVO> sc = SearchCriteria2.create(StorageNetworkIpRangeVO.class);
sc.addAnd(sc.getEntity().getId(), Op.EQ, rangeId);
return sc.list();
}
@Override
public List<StorageNetworkIpRangeVO> listByDataCenterId(long dcId) {
SearchCriteriaService<StorageNetworkIpRangeVO, StorageNetworkIpRangeVO> sc = SearchCriteria2.create(StorageNetworkIpRangeVO.class);
sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId);
return sc.list();
}
@Override
public long countRanges() {
SearchCriteria<Long> sc = countRanges.create();
return customSearch(sc, null).get(0);
}
}
package com.cloud.dc.dao;
import java.util.List;
import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import com.cloud.dc.StorageNetworkIpRangeVO;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria2;
import com.cloud.utils.db.SearchCriteriaService;
import com.cloud.utils.db.SearchCriteria.Func;
import com.cloud.utils.db.SearchCriteria.Op;
@Local(value={StorageNetworkIpRangeDao.class})
@DB(txn=false)
public class StorageNetworkIpRangeDaoImpl extends GenericDaoBase<StorageNetworkIpRangeVO, Long> implements StorageNetworkIpRangeDao {
protected final GenericSearchBuilder<StorageNetworkIpRangeVO, Long> countRanges;
protected StorageNetworkIpRangeDaoImpl() {
countRanges = createSearchBuilder(Long.class);
countRanges.select(null, Func.COUNT, null);
countRanges.done();
}
@Override
public List<StorageNetworkIpRangeVO> listByPodId(long podId) {
SearchCriteriaService<StorageNetworkIpRangeVO, StorageNetworkIpRangeVO> sc = SearchCriteria2.create(StorageNetworkIpRangeVO.class);
sc.addAnd(sc.getEntity().getPodId(), Op.EQ, podId);
return sc.list();
}
@Override
public List<StorageNetworkIpRangeVO> listByRangeId(long rangeId) {
SearchCriteriaService<StorageNetworkIpRangeVO, StorageNetworkIpRangeVO> sc = SearchCriteria2.create(StorageNetworkIpRangeVO.class);
sc.addAnd(sc.getEntity().getId(), Op.EQ, rangeId);
return sc.list();
}
@Override
public List<StorageNetworkIpRangeVO> listByDataCenterId(long dcId) {
SearchCriteriaService<StorageNetworkIpRangeVO, StorageNetworkIpRangeVO> sc = SearchCriteria2.create(StorageNetworkIpRangeVO.class);
sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId);
return sc.list();
}
@Override
public long countRanges() {
SearchCriteria<Long> sc = countRanges.create();
return customSearch(sc, null).get(0);
}
}

View File

@ -10,142 +10,142 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.deploy;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.utils.Pair;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
@Local(value=DeploymentPlanner.class)
public class UserConcentratedPodPlanner extends FirstFitPlanner implements DeploymentPlanner {
private static final Logger s_logger = Logger.getLogger(UserConcentratedPodPlanner.class);
/**
* This method should reorder the given list of Cluster Ids by applying any necessary heuristic
* for this planner
* For UserConcentratedPodPlanner we need to order the clusters in a zone across pods, by considering those pods first which have more number of VMs for this account
* This reordering is not done incase the clusters within single pod are passed when the allocation is applied at pod-level.
* @return List<Long> ordered list of Cluster Ids
*/
@Override
protected List<Long> reorderClusters(long id, boolean isZone, Pair<List<Long>, Map<Long, Double>> clusterCapacityInfo, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
List<Long> clusterIdsByCapacity = clusterCapacityInfo.first();
if(vmProfile.getOwner() == null || !isZone){
return clusterIdsByCapacity;
}
return applyUserConcentrationPodHeuristicToClusters(id, clusterIdsByCapacity, vmProfile.getOwner().getAccountId());
}
private List<Long> applyUserConcentrationPodHeuristicToClusters(long zoneId, List<Long> prioritizedClusterIds, long accountId){
//user has VMs in certain pods. - prioritize those pods first
//UserConcentratedPod strategy
List<Long> clusterList = new ArrayList<Long>();
List<Long> podIds = listPodsByUserConcentration(zoneId, accountId);
if(!podIds.isEmpty()){
clusterList = reorderClustersByPods(prioritizedClusterIds, podIds);
}else{
clusterList = prioritizedClusterIds;
}
return clusterList;
}
private List<Long> reorderClustersByPods(List<Long> clusterIds, List<Long> podIds) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Reordering cluster list as per pods ordered by user concentration");
}
Map<Long, List<Long>> podClusterMap = _clusterDao.getPodClusterIdMap(clusterIds);
if (s_logger.isTraceEnabled()) {
s_logger.trace("Pod To cluster Map is: "+podClusterMap );
}
List<Long> reorderedClusters = new ArrayList<Long>();
for (Long pod : podIds){
if(podClusterMap.containsKey(pod)){
List<Long> clustersOfThisPod = podClusterMap.get(pod);
if(clustersOfThisPod != null){
for(Long clusterId : clusterIds){
if(clustersOfThisPod.contains(clusterId)){
reorderedClusters.add(clusterId);
}
}
clusterIds.removeAll(clustersOfThisPod);
}
}
}
reorderedClusters.addAll(clusterIds);
if (s_logger.isTraceEnabled()) {
s_logger.trace("Reordered cluster list: " + reorderedClusters);
}
return reorderedClusters;
}
protected List<Long> listPodsByUserConcentration(long zoneId, long accountId){
if (s_logger.isDebugEnabled()) {
s_logger.debug("Applying UserConcentratedPod heuristic for account: "+ accountId);
}
List<Long> prioritizedPods = _vmDao.listPodIdsHavingVmsforAccount(zoneId, accountId);
if (s_logger.isTraceEnabled()) {
s_logger.trace("List of pods to be considered, after applying UserConcentratedPod heuristic: "+ prioritizedPods);
}
return prioritizedPods;
}
/**
* This method should reorder the given list of Pod Ids by applying any necessary heuristic
* for this planner
* For UserConcentratedPodPlanner we need to order the pods by considering those pods first which have more number of VMs for this account
* @return List<Long> ordered list of Pod Ids
*/
@Override
protected List<Long> reorderPods(Pair<List<Long>, Map<Long, Double>> podCapacityInfo, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
List<Long> podIdsByCapacity = podCapacityInfo.first();
if(vmProfile.getOwner() == null){
return podIdsByCapacity;
}
long accountId = vmProfile.getOwner().getAccountId();
//user has VMs in certain pods. - prioritize those pods first
//UserConcentratedPod strategy
List<Long> podIds = listPodsByUserConcentration(plan.getDataCenterId(), accountId);
if(!podIds.isEmpty()){
//remove pods that dont have capacity for this vm
podIds.retainAll(podIdsByCapacity);
podIdsByCapacity.removeAll(podIds);
podIds.addAll(podIdsByCapacity);
return podIds;
}else{
return podIdsByCapacity;
}
}
@Override
public boolean canHandle(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) {
if(vm.getHypervisorType() != HypervisorType.BareMetal){
//check the allocation strategy
if (_allocationAlgorithm != null && (_allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_random.toString()) || _allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_firstfit.toString()))){
return true;
}
}
return false;
}
}
package com.cloud.deploy;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.utils.Pair;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
@Local(value=DeploymentPlanner.class)
public class UserConcentratedPodPlanner extends FirstFitPlanner implements DeploymentPlanner {
private static final Logger s_logger = Logger.getLogger(UserConcentratedPodPlanner.class);
/**
* This method should reorder the given list of Cluster Ids by applying any necessary heuristic
* for this planner
* For UserConcentratedPodPlanner we need to order the clusters in a zone across pods, by considering those pods first which have more number of VMs for this account
* This reordering is not done incase the clusters within single pod are passed when the allocation is applied at pod-level.
* @return List<Long> ordered list of Cluster Ids
*/
@Override
protected List<Long> reorderClusters(long id, boolean isZone, Pair<List<Long>, Map<Long, Double>> clusterCapacityInfo, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
List<Long> clusterIdsByCapacity = clusterCapacityInfo.first();
if(vmProfile.getOwner() == null || !isZone){
return clusterIdsByCapacity;
}
return applyUserConcentrationPodHeuristicToClusters(id, clusterIdsByCapacity, vmProfile.getOwner().getAccountId());
}
private List<Long> applyUserConcentrationPodHeuristicToClusters(long zoneId, List<Long> prioritizedClusterIds, long accountId){
//user has VMs in certain pods. - prioritize those pods first
//UserConcentratedPod strategy
List<Long> clusterList = new ArrayList<Long>();
List<Long> podIds = listPodsByUserConcentration(zoneId, accountId);
if(!podIds.isEmpty()){
clusterList = reorderClustersByPods(prioritizedClusterIds, podIds);
}else{
clusterList = prioritizedClusterIds;
}
return clusterList;
}
private List<Long> reorderClustersByPods(List<Long> clusterIds, List<Long> podIds) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Reordering cluster list as per pods ordered by user concentration");
}
Map<Long, List<Long>> podClusterMap = _clusterDao.getPodClusterIdMap(clusterIds);
if (s_logger.isTraceEnabled()) {
s_logger.trace("Pod To cluster Map is: "+podClusterMap );
}
List<Long> reorderedClusters = new ArrayList<Long>();
for (Long pod : podIds){
if(podClusterMap.containsKey(pod)){
List<Long> clustersOfThisPod = podClusterMap.get(pod);
if(clustersOfThisPod != null){
for(Long clusterId : clusterIds){
if(clustersOfThisPod.contains(clusterId)){
reorderedClusters.add(clusterId);
}
}
clusterIds.removeAll(clustersOfThisPod);
}
}
}
reorderedClusters.addAll(clusterIds);
if (s_logger.isTraceEnabled()) {
s_logger.trace("Reordered cluster list: " + reorderedClusters);
}
return reorderedClusters;
}
protected List<Long> listPodsByUserConcentration(long zoneId, long accountId){
if (s_logger.isDebugEnabled()) {
s_logger.debug("Applying UserConcentratedPod heuristic for account: "+ accountId);
}
List<Long> prioritizedPods = _vmDao.listPodIdsHavingVmsforAccount(zoneId, accountId);
if (s_logger.isTraceEnabled()) {
s_logger.trace("List of pods to be considered, after applying UserConcentratedPod heuristic: "+ prioritizedPods);
}
return prioritizedPods;
}
/**
* This method should reorder the given list of Pod Ids by applying any necessary heuristic
* for this planner
* For UserConcentratedPodPlanner we need to order the pods by considering those pods first which have more number of VMs for this account
* @return List<Long> ordered list of Pod Ids
*/
@Override
protected List<Long> reorderPods(Pair<List<Long>, Map<Long, Double>> podCapacityInfo, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
List<Long> podIdsByCapacity = podCapacityInfo.first();
if(vmProfile.getOwner() == null){
return podIdsByCapacity;
}
long accountId = vmProfile.getOwner().getAccountId();
//user has VMs in certain pods. - prioritize those pods first
//UserConcentratedPod strategy
List<Long> podIds = listPodsByUserConcentration(plan.getDataCenterId(), accountId);
if(!podIds.isEmpty()){
//remove pods that dont have capacity for this vm
podIds.retainAll(podIdsByCapacity);
podIdsByCapacity.removeAll(podIds);
podIds.addAll(podIdsByCapacity);
return podIds;
}else{
return podIdsByCapacity;
}
}
@Override
public boolean canHandle(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) {
if(vm.getHypervisorType() != HypervisorType.BareMetal){
//check the allocation strategy
if (_allocationAlgorithm != null && (_allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_random.toString()) || _allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_firstfit.toString()))){
return true;
}
}
return false;
}
}

View File

@ -10,206 +10,206 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.deploy;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.configuration.Config;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
@Local(value=DeploymentPlanner.class)
public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentPlanner {
private static final Logger s_logger = Logger.getLogger(UserDispersingPlanner.class);
/**
* This method should reorder the given list of Cluster Ids by applying any necessary heuristic
* for this planner
* For UserDispersingPlanner we need to order the clusters by considering the number of VMs for this account
* @return List<Long> ordered list of Cluster Ids
*/
@Override
protected List<Long> reorderClusters(long id, boolean isZone, Pair<List<Long>, Map<Long, Double>> clusterCapacityInfo, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
List<Long> clusterIdsByCapacity = clusterCapacityInfo.first();
if(vmProfile.getOwner() == null){
return clusterIdsByCapacity;
}
long accountId = vmProfile.getOwner().getAccountId();
Pair<List<Long>, Map<Long, Double>> clusterIdsVmCountInfo = listClustersByUserDispersion(id, isZone, accountId);
//now we have 2 cluster lists - one ordered by capacity and the other by number of VMs for this account
//need to apply weights to these to find the correct ordering to follow
if(_userDispersionWeight == 1.0f){
List<Long> clusterIds = clusterIdsVmCountInfo.first();
clusterIds.retainAll(clusterIdsByCapacity);
return clusterIds;
}else{
//apply weights to the two lists
return orderByApplyingWeights(clusterCapacityInfo, clusterIdsVmCountInfo, accountId);
}
}
/**
* This method should reorder the given list of Pod Ids by applying any necessary heuristic
* for this planner
* For UserDispersingPlanner we need to order the pods by considering the number of VMs for this account
* @return List<Long> ordered list of Pod Ids
*/
@Override
protected List<Long> reorderPods(Pair<List<Long>, Map<Long, Double>> podCapacityInfo, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
List<Long> podIdsByCapacity = podCapacityInfo.first();
if(vmProfile.getOwner() == null){
return podIdsByCapacity;
}
long accountId = vmProfile.getOwner().getAccountId();
Pair<List<Long>, Map<Long, Double>> podIdsVmCountInfo = listPodsByUserDispersion(plan.getDataCenterId(), accountId);
//now we have 2 pod lists - one ordered by capacity and the other by number of VMs for this account
//need to apply weights to these to find the correct ordering to follow
if(_userDispersionWeight == 1.0f){
List<Long> podIds = podIdsVmCountInfo.first();
podIds.retainAll(podIdsByCapacity);
return podIds;
}else{
//apply weights to the two lists
return orderByApplyingWeights(podCapacityInfo, podIdsVmCountInfo, accountId);
}
}
protected Pair<List<Long>, Map<Long, Double>> listClustersByUserDispersion(long id, boolean isZone, long accountId){
if (s_logger.isDebugEnabled()) {
s_logger.debug("Applying Userdispersion heuristic to clusters for account: "+ accountId);
}
Pair<List<Long>, Map<Long, Double>> clusterIdsVmCountInfo;
if(isZone){
clusterIdsVmCountInfo = _vmInstanceDao.listClusterIdsInZoneByVmCount(id, accountId);
}else{
clusterIdsVmCountInfo = _vmInstanceDao.listClusterIdsInPodByVmCount(id, accountId);
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("List of clusters in ascending order of number of VMs: "+ clusterIdsVmCountInfo.first());
}
return clusterIdsVmCountInfo;
}
protected Pair<List<Long>, Map<Long, Double>> listPodsByUserDispersion(long dataCenterId, long accountId) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Applying Userdispersion heuristic to pods for account: "+ accountId);
}
Pair<List<Long>, Map<Long, Double>> podIdsVmCountInfo = _vmInstanceDao.listPodIdsInZoneByVmCount(dataCenterId, accountId);
if (s_logger.isTraceEnabled()) {
s_logger.trace("List of pods in ascending order of number of VMs: "+ podIdsVmCountInfo.first());
}
return podIdsVmCountInfo;
}
private List<Long> orderByApplyingWeights(Pair<List<Long>, Map<Long, Double>> capacityInfo, Pair<List<Long>, Map<Long, Double>> vmCountInfo, long accountId){
List<Long> capacityOrderedIds = capacityInfo.first();
List<Long> vmCountOrderedIds = vmCountInfo.first();
Map<Long, Double> capacityMap = capacityInfo.second();
Map<Long, Double> vmCountMap = vmCountInfo.second();
if (s_logger.isTraceEnabled()) {
s_logger.trace("Capacity Id list: "+ capacityOrderedIds + " , capacityMap:"+capacityMap);
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("Vm Count Id list: "+ vmCountOrderedIds + " , vmCountMap:"+vmCountMap);
}
List<Long> idsReorderedByWeights = new ArrayList<Long>();
float capacityWeight = (1.0f -_userDispersionWeight);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Applying userDispersionWeight: "+ _userDispersionWeight);
}
//normalize the vmCountMap
LinkedHashMap<Long, Double> normalisedVmCountIdMap= new LinkedHashMap<Long, Double>();
Long totalVmsOfAccount = _vmInstanceDao.countRunningByAccount(accountId);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Total VMs for account: "+ totalVmsOfAccount);
}
for(Long id : vmCountOrderedIds){
Double normalisedCount = vmCountMap.get(id) / totalVmsOfAccount;
normalisedVmCountIdMap.put(id, normalisedCount);
}
//consider only those ids that are in capacity map.
SortedMap<Double, List<Long>> sortedMap= new TreeMap<Double, List<Long>>();
for(Long id : capacityOrderedIds){
Double weightedCapacityValue = capacityMap.get(id) * capacityWeight;
Double weightedVmCountValue = normalisedVmCountIdMap.get(id) * _userDispersionWeight;
Double totalWeight = weightedCapacityValue + weightedVmCountValue;
if(sortedMap.containsKey(totalWeight)){
List<Long> idList = sortedMap.get(totalWeight);
idList.add(id);
sortedMap.put(totalWeight, idList);
}else{
List<Long> idList = new ArrayList<Long>();
idList.add(id);
sortedMap.put(totalWeight, idList);
}
}
for(List<Long> idList : sortedMap.values()){
idsReorderedByWeights.addAll(idList);
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("Reordered Id list: "+ idsReorderedByWeights);
}
return idsReorderedByWeights;
}
@Override
public boolean canHandle(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) {
if(vm.getHypervisorType() != HypervisorType.BareMetal){
//check the allocation strategy
if (_allocationAlgorithm != null && _allocationAlgorithm.equals(AllocationAlgorithm.userdispersing.toString())) {
return true;
}
}
return false;
}
float _userDispersionWeight;
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
String weight = _configDao.getValue(Config.VmUserDispersionWeight.key());
_userDispersionWeight = NumbersUtil.parseFloat(weight, 1.0f);
return true;
}
}
package com.cloud.deploy;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.configuration.Config;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
@Local(value=DeploymentPlanner.class)
public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentPlanner {
private static final Logger s_logger = Logger.getLogger(UserDispersingPlanner.class);
/**
* This method should reorder the given list of Cluster Ids by applying any necessary heuristic
* for this planner
* For UserDispersingPlanner we need to order the clusters by considering the number of VMs for this account
* @return List<Long> ordered list of Cluster Ids
*/
@Override
protected List<Long> reorderClusters(long id, boolean isZone, Pair<List<Long>, Map<Long, Double>> clusterCapacityInfo, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
List<Long> clusterIdsByCapacity = clusterCapacityInfo.first();
if(vmProfile.getOwner() == null){
return clusterIdsByCapacity;
}
long accountId = vmProfile.getOwner().getAccountId();
Pair<List<Long>, Map<Long, Double>> clusterIdsVmCountInfo = listClustersByUserDispersion(id, isZone, accountId);
//now we have 2 cluster lists - one ordered by capacity and the other by number of VMs for this account
//need to apply weights to these to find the correct ordering to follow
if(_userDispersionWeight == 1.0f){
List<Long> clusterIds = clusterIdsVmCountInfo.first();
clusterIds.retainAll(clusterIdsByCapacity);
return clusterIds;
}else{
//apply weights to the two lists
return orderByApplyingWeights(clusterCapacityInfo, clusterIdsVmCountInfo, accountId);
}
}
/**
* This method should reorder the given list of Pod Ids by applying any necessary heuristic
* for this planner
* For UserDispersingPlanner we need to order the pods by considering the number of VMs for this account
* @return List<Long> ordered list of Pod Ids
*/
@Override
protected List<Long> reorderPods(Pair<List<Long>, Map<Long, Double>> podCapacityInfo, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
List<Long> podIdsByCapacity = podCapacityInfo.first();
if(vmProfile.getOwner() == null){
return podIdsByCapacity;
}
long accountId = vmProfile.getOwner().getAccountId();
Pair<List<Long>, Map<Long, Double>> podIdsVmCountInfo = listPodsByUserDispersion(plan.getDataCenterId(), accountId);
//now we have 2 pod lists - one ordered by capacity and the other by number of VMs for this account
//need to apply weights to these to find the correct ordering to follow
if(_userDispersionWeight == 1.0f){
List<Long> podIds = podIdsVmCountInfo.first();
podIds.retainAll(podIdsByCapacity);
return podIds;
}else{
//apply weights to the two lists
return orderByApplyingWeights(podCapacityInfo, podIdsVmCountInfo, accountId);
}
}
protected Pair<List<Long>, Map<Long, Double>> listClustersByUserDispersion(long id, boolean isZone, long accountId){
if (s_logger.isDebugEnabled()) {
s_logger.debug("Applying Userdispersion heuristic to clusters for account: "+ accountId);
}
Pair<List<Long>, Map<Long, Double>> clusterIdsVmCountInfo;
if(isZone){
clusterIdsVmCountInfo = _vmInstanceDao.listClusterIdsInZoneByVmCount(id, accountId);
}else{
clusterIdsVmCountInfo = _vmInstanceDao.listClusterIdsInPodByVmCount(id, accountId);
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("List of clusters in ascending order of number of VMs: "+ clusterIdsVmCountInfo.first());
}
return clusterIdsVmCountInfo;
}
protected Pair<List<Long>, Map<Long, Double>> listPodsByUserDispersion(long dataCenterId, long accountId) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Applying Userdispersion heuristic to pods for account: "+ accountId);
}
Pair<List<Long>, Map<Long, Double>> podIdsVmCountInfo = _vmInstanceDao.listPodIdsInZoneByVmCount(dataCenterId, accountId);
if (s_logger.isTraceEnabled()) {
s_logger.trace("List of pods in ascending order of number of VMs: "+ podIdsVmCountInfo.first());
}
return podIdsVmCountInfo;
}
private List<Long> orderByApplyingWeights(Pair<List<Long>, Map<Long, Double>> capacityInfo, Pair<List<Long>, Map<Long, Double>> vmCountInfo, long accountId){
List<Long> capacityOrderedIds = capacityInfo.first();
List<Long> vmCountOrderedIds = vmCountInfo.first();
Map<Long, Double> capacityMap = capacityInfo.second();
Map<Long, Double> vmCountMap = vmCountInfo.second();
if (s_logger.isTraceEnabled()) {
s_logger.trace("Capacity Id list: "+ capacityOrderedIds + " , capacityMap:"+capacityMap);
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("Vm Count Id list: "+ vmCountOrderedIds + " , vmCountMap:"+vmCountMap);
}
List<Long> idsReorderedByWeights = new ArrayList<Long>();
float capacityWeight = (1.0f -_userDispersionWeight);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Applying userDispersionWeight: "+ _userDispersionWeight);
}
//normalize the vmCountMap
LinkedHashMap<Long, Double> normalisedVmCountIdMap= new LinkedHashMap<Long, Double>();
Long totalVmsOfAccount = _vmInstanceDao.countRunningByAccount(accountId);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Total VMs for account: "+ totalVmsOfAccount);
}
for(Long id : vmCountOrderedIds){
Double normalisedCount = vmCountMap.get(id) / totalVmsOfAccount;
normalisedVmCountIdMap.put(id, normalisedCount);
}
//consider only those ids that are in capacity map.
SortedMap<Double, List<Long>> sortedMap= new TreeMap<Double, List<Long>>();
for(Long id : capacityOrderedIds){
Double weightedCapacityValue = capacityMap.get(id) * capacityWeight;
Double weightedVmCountValue = normalisedVmCountIdMap.get(id) * _userDispersionWeight;
Double totalWeight = weightedCapacityValue + weightedVmCountValue;
if(sortedMap.containsKey(totalWeight)){
List<Long> idList = sortedMap.get(totalWeight);
idList.add(id);
sortedMap.put(totalWeight, idList);
}else{
List<Long> idList = new ArrayList<Long>();
idList.add(id);
sortedMap.put(totalWeight, idList);
}
}
for(List<Long> idList : sortedMap.values()){
idsReorderedByWeights.addAll(idList);
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("Reordered Id list: "+ idsReorderedByWeights);
}
return idsReorderedByWeights;
}
@Override
public boolean canHandle(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) {
if(vm.getHypervisorType() != HypervisorType.BareMetal){
//check the allocation strategy
if (_allocationAlgorithm != null && _allocationAlgorithm.equals(AllocationAlgorithm.userdispersing.toString())) {
return true;
}
}
return false;
}
float _userDispersionWeight;
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
String weight = _configDao.getValue(Config.VmUserDispersionWeight.key());
_userDispersionWeight = NumbersUtil.parseFloat(weight, 1.0f);
return true;
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.domain;
package com.cloud.domain;
import java.util.Date;
import java.util.UUID;
@ -26,42 +26,42 @@ import org.apache.log4j.Logger;
import com.cloud.api.Identity;
import com.cloud.utils.db.GenericDao;
@Entity
@Table(name="domain")
@Entity
@Table(name="domain")
public class DomainVO implements Domain, Identity {
public static final Logger s_logger = Logger.getLogger(DomainVO.class.getName());
@Id
@GeneratedValue(strategy=GenerationType.IDENTITY)
@Column(name="id")
private long id;
@Column(name="parent")
private Long parent = null;
@Column(name="name")
private String name = null;
@Column(name="owner")
private long accountId;
@Column(name="path")
private String path = null;
@Column(name="level")
private int level;
@Column(name=GenericDao.REMOVED_COLUMN)
private Date removed;
@Column(name="child_count")
private int childCount = 0;
@Column(name="next_child_seq")
@Id
@GeneratedValue(strategy=GenerationType.IDENTITY)
@Column(name="id")
private long id;
@Column(name="parent")
private Long parent = null;
@Column(name="name")
private String name = null;
@Column(name="owner")
private long accountId;
@Column(name="path")
private String path = null;
@Column(name="level")
private int level;
@Column(name=GenericDao.REMOVED_COLUMN)
private Date removed;
@Column(name="child_count")
private int childCount = 0;
@Column(name="next_child_seq")
private long nextChildSeq = 1L;
@Column(name="state")
@Column(name="state")
private Domain.State state;
@Column(name="network_domain")
@ -69,103 +69,103 @@ public class DomainVO implements Domain, Identity {
@Column(name="uuid")
private String uuid;
public DomainVO() {}
public DomainVO(long id, String name, long owner, Long parentId, String networkDomain) {
this(name, owner, parentId, networkDomain);
this.id = id;
this.uuid = UUID.randomUUID().toString();
}
public DomainVO(String name, long owner, Long parentId, String networkDomain) {
this.parent = parentId;
this.name = name;
this.accountId = owner;
this.path ="";
}
public DomainVO(String name, long owner, Long parentId, String networkDomain) {
this.parent = parentId;
this.name = name;
this.accountId = owner;
this.path ="";
this.level = 0;
this.state = Domain.State.Active;
this.networkDomain = networkDomain;
this.uuid = UUID.randomUUID().toString();
}
}
@Override
public long getId() {
return id;
}
public long getId() {
return id;
}
@Override
public Long getParent() {
return parent;
}
public Long getParent() {
return parent;
}
@Override
public void setParent(Long parent) {
if(parent == null) {
this.parent = DomainVO.ROOT_DOMAIN;
} else {
if(parent.longValue() <= DomainVO.ROOT_DOMAIN)
this.parent = DomainVO.ROOT_DOMAIN;
else
this.parent = parent;
}
}
public void setParent(Long parent) {
if(parent == null) {
this.parent = DomainVO.ROOT_DOMAIN;
} else {
if(parent.longValue() <= DomainVO.ROOT_DOMAIN)
this.parent = DomainVO.ROOT_DOMAIN;
else
this.parent = parent;
}
}
@Override
public String getName() {
return name;
}
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
public void setName(String name) {
this.name = name;
}
@Override
public long getAccountId() {
return accountId;
}
public long getAccountId() {
return accountId;
}
@Override
public Date getRemoved() {
return removed;
}
public Date getRemoved() {
return removed;
}
@Override
public String getPath() {
return path;
}
public String getPath() {
return path;
}
@Override
public void setPath(String path) {
this.path = path;
}
public void setPath(String path) {
this.path = path;
}
@Override
public int getLevel() {
return level;
}
public void setLevel(int level) {
this.level = level;
}
public int getLevel() {
return level;
}
public void setLevel(int level) {
this.level = level;
}
@Override
public int getChildCount() {
return childCount;
}
public void setChildCount(int count) {
childCount = count;
}
public int getChildCount() {
return childCount;
}
public void setChildCount(int count) {
childCount = count;
}
@Override
public long getNextChildSeq() {
return nextChildSeq;
}
public void setNextChildSeq(long seq) {
nextChildSeq = seq;
public long getNextChildSeq() {
return nextChildSeq;
}
public void setNextChildSeq(long seq) {
nextChildSeq = seq;
}
@Override
@ -199,6 +199,6 @@ public class DomainVO implements Domain, Identity {
public void setUuid(String uuid) {
this.uuid = uuid;
}
}
}
}

View File

@ -10,15 +10,15 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.domain.dao;
package com.cloud.domain.dao;
import java.util.List;
import java.util.Set;
import com.cloud.domain.DomainVO;
import com.cloud.utils.db.GenericDao;
public interface DomainDao extends GenericDao<DomainVO, Long> {
public interface DomainDao extends GenericDao<DomainVO, Long> {
public DomainVO create(DomainVO domain);
public DomainVO findDomainByPath(String domainPath);
public boolean isChildDomain(Long parentId, Long childId);
@ -26,5 +26,5 @@ public interface DomainDao extends GenericDao<DomainVO, Long> {
List<DomainVO> findImmediateChildrenForParent(Long parentId);
List<DomainVO> findAllChildren(String path, Long parentId);
List<DomainVO> findInactiveDomains();
Set<Long> getDomainParentIds(long domainId);
}
Set<Long> getDomainParentIds(long domainId);
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.domain.dao;
package com.cloud.domain.dao;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
@ -31,30 +31,30 @@ import com.cloud.utils.db.GlobalLock;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.Transaction;
@Local(value={DomainDao.class})
public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements DomainDao {
@Local(value={DomainDao.class})
public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements DomainDao {
private static final Logger s_logger = Logger.getLogger(DomainDaoImpl.class);
protected SearchBuilder<DomainVO> DomainNameLikeSearch;
protected SearchBuilder<DomainVO> ParentDomainNameLikeSearch;
protected SearchBuilder<DomainVO> DomainNameLikeSearch;
protected SearchBuilder<DomainVO> ParentDomainNameLikeSearch;
protected SearchBuilder<DomainVO> DomainPairSearch;
protected SearchBuilder<DomainVO> ImmediateChildDomainSearch;
protected SearchBuilder<DomainVO> ImmediateChildDomainSearch;
protected SearchBuilder<DomainVO> FindAllChildrenSearch;
protected SearchBuilder<DomainVO> AllFieldsSearch;
public DomainDaoImpl () {
DomainNameLikeSearch = createSearchBuilder();
DomainNameLikeSearch.and("name", DomainNameLikeSearch.entity().getName(), SearchCriteria.Op.LIKE);
DomainNameLikeSearch.done();
ParentDomainNameLikeSearch = createSearchBuilder();
ParentDomainNameLikeSearch.and("name", ParentDomainNameLikeSearch.entity().getName(), SearchCriteria.Op.LIKE);
ParentDomainNameLikeSearch.and("parent", ParentDomainNameLikeSearch.entity().getName(), SearchCriteria.Op.EQ);
ParentDomainNameLikeSearch.done();
DomainPairSearch = createSearchBuilder();
DomainPairSearch.and("id", DomainPairSearch.entity().getId(), SearchCriteria.Op.IN);
public DomainDaoImpl () {
DomainNameLikeSearch = createSearchBuilder();
DomainNameLikeSearch.and("name", DomainNameLikeSearch.entity().getName(), SearchCriteria.Op.LIKE);
DomainNameLikeSearch.done();
ParentDomainNameLikeSearch = createSearchBuilder();
ParentDomainNameLikeSearch.and("name", ParentDomainNameLikeSearch.entity().getName(), SearchCriteria.Op.LIKE);
ParentDomainNameLikeSearch.and("parent", ParentDomainNameLikeSearch.entity().getName(), SearchCriteria.Op.EQ);
ParentDomainNameLikeSearch.done();
DomainPairSearch = createSearchBuilder();
DomainPairSearch.and("id", DomainPairSearch.entity().getId(), SearchCriteria.Op.IN);
DomainPairSearch.done();
ImmediateChildDomainSearch = createSearchBuilder();
@ -74,15 +74,15 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
AllFieldsSearch.and("parent", AllFieldsSearch.entity().getParent(), SearchCriteria.Op.EQ);
AllFieldsSearch.done();
}
}
private static String allocPath(DomainVO parentDomain, String name) {
String parentPath = parentDomain.getPath();
return parentPath + name + "/";
}
@Override
return parentPath + name + "/";
}
@Override
public synchronized DomainVO create(DomainVO domain) {
// make sure domain name is valid
String domainName = domain.getName();
@ -94,19 +94,19 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
throw new IllegalArgumentException("Domain name is null. Please specify a valid domain name.");
}
long parent = DomainVO.ROOT_DOMAIN;
if(domain.getParent() != null && domain.getParent().longValue() >= DomainVO.ROOT_DOMAIN) {
parent = domain.getParent().longValue();
}
DomainVO parentDomain = findById(parent);
if(parentDomain == null) {
s_logger.error("Unable to load parent domain: " + parent);
return null;
}
Transaction txn = Transaction.currentTxn();
try {
long parent = DomainVO.ROOT_DOMAIN;
if(domain.getParent() != null && domain.getParent().longValue() >= DomainVO.ROOT_DOMAIN) {
parent = domain.getParent().longValue();
}
DomainVO parentDomain = findById(parent);
if(parentDomain == null) {
s_logger.error("Unable to load parent domain: " + parent);
return null;
}
Transaction txn = Transaction.currentTxn();
try {
txn.start();
parentDomain = this.lockRow(parent, true);
@ -114,49 +114,49 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
s_logger.error("Unable to lock parent domain: " + parent);
return null;
}
domain.setPath(allocPath(parentDomain, domain.getName()));
domain.setLevel(parentDomain.getLevel() + 1);
parentDomain.setNextChildSeq(parentDomain.getNextChildSeq() + 1); // FIXME: remove sequence number?
parentDomain.setChildCount(parentDomain.getChildCount() + 1);
persist(domain);
update(parentDomain.getId(), parentDomain);
txn.commit();
return domain;
} catch(Exception e) {
s_logger.error("Unable to create domain due to " + e.getMessage(), e);
txn.rollback();
return null;
domain.setPath(allocPath(parentDomain, domain.getName()));
domain.setLevel(parentDomain.getLevel() + 1);
parentDomain.setNextChildSeq(parentDomain.getNextChildSeq() + 1); // FIXME: remove sequence number?
parentDomain.setChildCount(parentDomain.getChildCount() + 1);
persist(domain);
update(parentDomain.getId(), parentDomain);
txn.commit();
return domain;
} catch(Exception e) {
s_logger.error("Unable to create domain due to " + e.getMessage(), e);
txn.rollback();
return null;
}
}
@Override
@DB
public boolean remove(Long id) {
// check for any active users / domains assigned to the given domain id and don't remove the domain if there are any
if (id != null && id.longValue() == DomainVO.ROOT_DOMAIN) {
s_logger.error("Can not remove domain " + id + " as it is ROOT domain");
return false;
}
DomainVO domain = findById(id);
if(domain == null) {
s_logger.error("Unable to remove domain as domain " + id + " no longer exists");
return false;
}
if(domain.getParent() == null) {
s_logger.error("Invalid domain " + id + ", orphan?");
return false;
}
String sql = "SELECT * from account where domain_id = " + id + " and removed is null";
String sql1 = "SELECT * from domain where parent = " + id + " and removed is null";
}
boolean success = false;
Transaction txn = Transaction.currentTxn();
@Override
@DB
public boolean remove(Long id) {
// check for any active users / domains assigned to the given domain id and don't remove the domain if there are any
if (id != null && id.longValue() == DomainVO.ROOT_DOMAIN) {
s_logger.error("Can not remove domain " + id + " as it is ROOT domain");
return false;
}
DomainVO domain = findById(id);
if(domain == null) {
s_logger.error("Unable to remove domain as domain " + id + " no longer exists");
return false;
}
if(domain.getParent() == null) {
s_logger.error("Invalid domain " + id + ", orphan?");
return false;
}
String sql = "SELECT * from account where domain_id = " + id + " and removed is null";
String sql1 = "SELECT * from domain where parent = " + id + " and removed is null";
boolean success = false;
Transaction txn = Transaction.currentTxn();
try {
txn.start();
DomainVO parentDomain = super.lockRow(domain.getParent(), true);
@ -165,29 +165,29 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
return false;
}
PreparedStatement stmt = txn.prepareAutoCloseStatement(sql);
ResultSet rs = stmt.executeQuery();
if (rs.next()) {
return false;
}
stmt = txn.prepareAutoCloseStatement(sql1);
rs = stmt.executeQuery();
if (rs.next()) {
return false;
}
parentDomain.setChildCount(parentDomain.getChildCount() - 1);
update(parentDomain.getId(), parentDomain);
success = super.remove(id);
txn.commit();
PreparedStatement stmt = txn.prepareAutoCloseStatement(sql);
ResultSet rs = stmt.executeQuery();
if (rs.next()) {
return false;
}
stmt = txn.prepareAutoCloseStatement(sql1);
rs = stmt.executeQuery();
if (rs.next()) {
return false;
}
parentDomain.setChildCount(parentDomain.getChildCount() - 1);
update(parentDomain.getId(), parentDomain);
success = super.remove(id);
txn.commit();
} catch (SQLException ex) {
success = false;
s_logger.error("error removing domain: " + id, ex);
txn.rollback();
}
return success;
}
success = false;
s_logger.error("error removing domain: " + id, ex);
txn.rollback();
}
return success;
}
@Override
public DomainVO findDomainByPath(String domainPath) {
SearchCriteria<DomainVO> sc = createSearchCriteria();
@ -217,33 +217,33 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
return listBy(sc);
}
@Override
public boolean isChildDomain(Long parentId, Long childId) {
if ((parentId == null) || (childId == null)) {
return false;
}
if (parentId.equals(childId)) {
return true;
}
boolean result = false;
SearchCriteria<DomainVO> sc = DomainPairSearch.create();
sc.setParameters("id", parentId, childId);
List<DomainVO> domainPair = listBy(sc);
if ((domainPair != null) && (domainPair.size() == 2)) {
DomainVO d1 = domainPair.get(0);
DomainVO d2 = domainPair.get(1);
if (d1.getId() == parentId) {
result = d2.getPath().startsWith(d1.getPath());
} else {
result = d1.getPath().startsWith(d2.getPath());
}
}
return result;
@Override
public boolean isChildDomain(Long parentId, Long childId) {
if ((parentId == null) || (childId == null)) {
return false;
}
if (parentId.equals(childId)) {
return true;
}
boolean result = false;
SearchCriteria<DomainVO> sc = DomainPairSearch.create();
sc.setParameters("id", parentId, childId);
List<DomainVO> domainPair = listBy(sc);
if ((domainPair != null) && (domainPair.size() == 2)) {
DomainVO d1 = domainPair.get(0);
DomainVO d2 = domainPair.get(1);
if (d1.getId() == parentId) {
result = d2.getPath().startsWith(d1.getPath());
} else {
result = d1.getPath().startsWith(d2.getPath());
}
}
return result;
}
@Override
@ -265,5 +265,5 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
}
return parentDomains;
}
}
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.ha;
package com.cloud.ha;
import java.util.ArrayList;
import java.util.List;
@ -36,39 +36,39 @@ import com.cloud.utils.component.Inject;
import com.cloud.utils.db.SearchCriteria2;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.SearchCriteriaService;
public abstract class AbstractInvestigatorImpl implements Investigator {
private static final Logger s_logger = Logger.getLogger(AbstractInvestigatorImpl.class);
private String _name = null;
@Inject private HostDao _hostDao = null;
public abstract class AbstractInvestigatorImpl implements Investigator {
private static final Logger s_logger = Logger.getLogger(AbstractInvestigatorImpl.class);
private String _name = null;
@Inject private HostDao _hostDao = null;
@Inject private AgentManager _agentMgr = null;
@Inject private ResourceManager _resourceMgr = null;
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
return true;
}
// Host.status is up and Host.type is routing
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
// Host.status is up and Host.type is routing
protected List<Long> findHostByPod(long podId, Long excludeHostId) {
SearchCriteriaService<HostVO, HostVO> sc = SearchCriteria2.create(HostVO.class);
sc.addAnd(sc.getEntity().getType(), Op.EQ, Type.Routing);
@ -86,34 +86,34 @@ public abstract class AbstractInvestigatorImpl implements Investigator {
}
return hostIds;
}
protected Status testIpAddress(Long hostId, String testHostIp) {
try {
Answer pingTestAnswer = _agentMgr.send(hostId, new PingTestCommand(testHostIp));
if(pingTestAnswer == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("host (" + testHostIp + ") returns null answer");
}
return null;
}
if (pingTestAnswer.getResult()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("host (" + testHostIp + ") has been successfully pinged, returning that host is up");
}
// computing host is available, but could not reach agent, return false
return Status.Up;
} else {
}
protected Status testIpAddress(Long hostId, String testHostIp) {
try {
Answer pingTestAnswer = _agentMgr.send(hostId, new PingTestCommand(testHostIp));
if(pingTestAnswer == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("host (" + testHostIp + ") returns null answer");
}
return null;
}
if (pingTestAnswer.getResult()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("host (" + testHostIp + ") has been successfully pinged, returning that host is up");
}
// computing host is available, but could not reach agent, return false
return Status.Up;
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("host (" + testHostIp + ") cannot be pinged, returning null ('I don't know')");
}
return null;
}
} catch (AgentUnavailableException e) {
return null;
} catch (OperationTimedoutException e) {
return null;
}
}
}
return null;
}
} catch (AgentUnavailableException e) {
return null;
} catch (OperationTimedoutException e) {
return null;
}
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.ha;
package com.cloud.ha;
import java.util.List;
import java.util.Map;
@ -29,17 +29,17 @@ import com.cloud.utils.component.Inject;
import com.cloud.vm.Nic;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
@Local(value={Investigator.class})
public class ManagementIPSystemVMInvestigator extends AbstractInvestigatorImpl {
private static final Logger s_logger = Logger.getLogger(ManagementIPSystemVMInvestigator.class);
private String _name = null;
@Inject private HostDao _hostDao = null;
@Local(value={Investigator.class})
public class ManagementIPSystemVMInvestigator extends AbstractInvestigatorImpl {
private static final Logger s_logger = Logger.getLogger(ManagementIPSystemVMInvestigator.class);
private String _name = null;
@Inject private HostDao _hostDao = null;
@Inject private NetworkManager _networkMgr = null;
@Override
@Override
public Boolean isVmAlive(VMInstanceVO vm, HostVO host) {
if (!VirtualMachine.Type.isSystemVM(vm.getType())) {
s_logger.debug("Not a System Vm, unable to determine state of " + vm + " returning null");
@ -78,10 +78,10 @@ public class ManagementIPSystemVMInvestigator extends AbstractInvestigatorImpl {
Status vmState = testIpAddress(otherHost, nic.getIp4Address());
if (vmState == null) {
// can't get information from that host, try the next one
continue;
}
if (vmState == Status.Up) {
if (s_logger.isDebugEnabled()) {
continue;
}
if (vmState == Status.Up) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("successfully pinged vm's private IP (" + vm.getPrivateIpAddress() + "), returning that the VM is up");
}
return Boolean.TRUE;
@ -93,43 +93,43 @@ public class ManagementIPSystemVMInvestigator extends AbstractInvestigatorImpl {
if (s_logger.isDebugEnabled()) {
s_logger.debug("successfully pinged vm's host IP (" + vmHost.getPrivateIpAddress() + "), but could not ping VM, returning that the VM is down");
}
return Boolean.FALSE;
}
}
return Boolean.FALSE;
}
}
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("unable to determine state of " + vm + " returning null");
}
return null;
}
@Override
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("unable to determine state of " + vm + " returning null");
}
return null;
}
@Override
public Status isAgentAlive(HostVO agent) {
return null;
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
}
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.ha;
package com.cloud.ha;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@ -37,18 +37,18 @@ import com.cloud.vm.UserVmVO;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.dao.UserVmDao;
@Local(value={Investigator.class})
public class UserVmDomRInvestigator extends AbstractInvestigatorImpl {
private static final Logger s_logger = Logger.getLogger(UserVmDomRInvestigator.class);
private String _name = null;
@Inject private final UserVmDao _userVmDao = null;
@Local(value={Investigator.class})
public class UserVmDomRInvestigator extends AbstractInvestigatorImpl {
private static final Logger s_logger = Logger.getLogger(UserVmDomRInvestigator.class);
private String _name = null;
@Inject private final UserVmDao _userVmDao = null;
@Inject private final AgentManager _agentMgr = null;
@Inject private final NetworkManager _networkMgr = null;
@Inject private final VirtualNetworkApplianceManager _vnaMgr = null;
@Override
@Inject private final VirtualNetworkApplianceManager _vnaMgr = null;
@Override
public Boolean isVmAlive(VMInstanceVO vm, HostVO host) {
if (vm.getType() != VirtualMachine.Type.User) {
if (s_logger.isDebugEnabled()) {
@ -57,9 +57,9 @@ public class UserVmDomRInvestigator extends AbstractInvestigatorImpl {
return null;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("testing if " + vm + " is alive");
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("testing if " + vm + " is alive");
}
// to verify that the VM is alive, we ask the domR (router) to ping the VM (private IP)
UserVmVO userVm = _userVmDao.findById(vm.getId());
@ -93,76 +93,76 @@ public class UserVmDomRInvestigator extends AbstractInvestigatorImpl {
return result;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Returning null since we're unable to determine state of " + vm);
}
return null;
}
@Override
public Status isAgentAlive(HostVO agent) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("checking if agent (" + agent.getId() + ") is alive");
}
if (agent.getPodId() == null) {
return null;
}
List<Long> otherHosts = findHostByPod(agent.getPodId(), agent.getId());
for (Long hostId : otherHosts) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("sending ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + ")");
}
Status hostState = testIpAddress(hostId, agent.getPrivateIpAddress());
if (hostState == null) {
continue;
}
if (hostState == Status.Up) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + ") successful, returning that agent is disconnected");
}
return Status.Disconnected; // the computing host ip is ping-able, but the computing agent is down, report that the agent is disconnected
} else if (hostState == Status.Down) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("returning host state: " + hostState);
}
return hostState;
}
}
// could not reach agent, could not reach agent's host, unclear what the problem is but it'll require more investigation...
if (s_logger.isDebugEnabled()) {
s_logger.debug("could not reach agent, could not reach agent's host, returning that we don't have enough information");
}
return null;
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
if (s_logger.isDebugEnabled()) {
s_logger.debug("Returning null since we're unable to determine state of " + vm);
}
return null;
}
private Boolean testUserVM(VMInstanceVO vm, Nic nic, VirtualRouter router) {
@Override
public Status isAgentAlive(HostVO agent) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("checking if agent (" + agent.getId() + ") is alive");
}
if (agent.getPodId() == null) {
return null;
}
List<Long> otherHosts = findHostByPod(agent.getPodId(), agent.getId());
for (Long hostId : otherHosts) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("sending ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + ")");
}
Status hostState = testIpAddress(hostId, agent.getPrivateIpAddress());
if (hostState == null) {
continue;
}
if (hostState == Status.Up) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + ") successful, returning that agent is disconnected");
}
return Status.Disconnected; // the computing host ip is ping-able, but the computing agent is down, report that the agent is disconnected
} else if (hostState == Status.Down) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("returning host state: " + hostState);
}
return hostState;
}
}
// could not reach agent, could not reach agent's host, unclear what the problem is but it'll require more investigation...
if (s_logger.isDebugEnabled()) {
s_logger.debug("could not reach agent, could not reach agent's host, returning that we don't have enough information");
}
return null;
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_name = name;
return true;
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
private Boolean testUserVM(VMInstanceVO vm, Nic nic, VirtualRouter router) {
String privateIp = nic.getIp4Address();
String routerPrivateIp = router.getPrivateIpAddress();
@ -194,5 +194,5 @@ public class UserVmDomRInvestigator extends AbstractInvestigatorImpl {
}
return null;
}
}
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.host.dao;
package com.cloud.host.dao;
import java.util.Date;
import java.util.List;
@ -25,11 +25,11 @@ import com.cloud.info.RunningHostCountInfo;
import com.cloud.resource.ResourceState;
import com.cloud.utils.db.GenericDao;
import com.cloud.utils.fsm.StateDao;
/**
* Data Access Object for server
*
*/
/**
* Data Access Object for server
*
*/
public interface HostDao extends GenericDao<HostVO, Long>, StateDao<Status, Status.Event, Host> {
long countBy(long clusterId, ResourceState... states);
@ -38,17 +38,17 @@ public interface HostDao extends GenericDao<HostVO, Long>, StateDao<Status, Stat
* as disconnected.
*
* @param msId management server id.
*/
void markHostsAsDisconnected(long msId, long lastPing);
List<HostVO> findLostHosts(long timeout);
*/
void markHostsAsDisconnected(long msId, long lastPing);
List<HostVO> findLostHosts(long timeout);
List<HostVO> findAndUpdateDirectAgentToLoad(long lastPingSecondsAfter, Long limit, long managementServerId);
List<RunningHostCountInfo> getRunningHostCounts(Date cutTime);
long getNextSequence(long hostId);
List<RunningHostCountInfo> getRunningHostCounts(Date cutTime);
long getNextSequence(long hostId);
void loadDetails(HostVO host);
void saveDetails(HostVO host);
@ -65,5 +65,5 @@ public interface HostDao extends GenericDao<HostVO, Long>, StateDao<Status, Stat
HostVO findByGuid(String guid);
HostVO findByTypeNameAndZoneId(long zoneId, String name, Host.Type type);
}
HostVO findByTypeNameAndZoneId(long zoneId, String name, Host.Type type);
}

View File

@ -323,14 +323,14 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
return findOneBy(sc);
}
@Override @DB
@Override @DB
public List<HostVO> findAndUpdateDirectAgentToLoad(long lastPingSecondsAfter, Long limit, long managementServerId) {
Transaction txn = Transaction.currentTxn();
txn.start();
txn.start();
SearchCriteria<HostVO> sc = UnmanagedDirectConnectSearch.create();
sc.setParameters("lastPinged", lastPingSecondsAfter);
//sc.setParameters("resourceStates", ResourceState.ErrorInMaintenance, ResourceState.Maintenance, ResourceState.PrepareForMaintenance, ResourceState.Disabled);
sc.setJoinParameters("ClusterManagedSearch", "managed", Managed.ManagedState.Managed);
sc.setJoinParameters("ClusterManagedSearch", "managed", Managed.ManagedState.Managed);
List<HostVO> hosts = lockRows(sc, new Filter(HostVO.class, "clusterId", true, 0L, limit), true);
for (HostVO host : hosts) {
@ -340,7 +340,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
txn.commit();
return hosts;
return hosts;
}
@Override @DB
@ -362,12 +362,12 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
return hosts;
}
@Override
public void markHostsAsDisconnected(long msId, long lastPing) {
SearchCriteria<HostVO> sc = MsStatusSearch.create();
@Override
public void markHostsAsDisconnected(long msId, long lastPing) {
SearchCriteria<HostVO> sc = MsStatusSearch.create();
sc.setParameters("ms", msId);
HostVO host = createForUpdate();
host.setLastPinged(lastPing);
host.setDisconnectedOn(new Date());

View File

@ -10,17 +10,17 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.host.dao;
import java.util.List;
import com.cloud.host.HostTagVO;
import com.cloud.utils.db.GenericDao;
public interface HostTagsDao extends GenericDao<HostTagVO, Long> {
void persist(long hostId, List<String> hostTags);
List<String> gethostTags(long hostId);
}
package com.cloud.host.dao;
import java.util.List;
import com.cloud.host.HostTagVO;
import com.cloud.utils.db.GenericDao;
public interface HostTagsDao extends GenericDao<HostTagVO, Long> {
void persist(long hostId, List<String> hostTags);
List<String> gethostTags(long hostId);
}

View File

@ -10,20 +10,20 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.hypervisor.dao;
import java.util.List;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.hypervisor.HypervisorCapabilitiesVO;
import com.cloud.utils.db.GenericDao;
public interface HypervisorCapabilitiesDao extends GenericDao<HypervisorCapabilitiesVO, Long> {
List<HypervisorCapabilitiesVO> listAllByHypervisorType(HypervisorType hypervisorType);
HypervisorCapabilitiesVO findByHypervisorTypeAndVersion(HypervisorType hypervisorType, String hypervisorVersion);
Long getMaxGuestsLimit(HypervisorType hypervisorType, String hypervisorVersion);
}
package com.cloud.hypervisor.dao;
import java.util.List;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.hypervisor.HypervisorCapabilitiesVO;
import com.cloud.utils.db.GenericDao;
public interface HypervisorCapabilitiesDao extends GenericDao<HypervisorCapabilitiesVO, Long> {
List<HypervisorCapabilitiesVO> listAllByHypervisorType(HypervisorType hypervisorType);
HypervisorCapabilitiesVO findByHypervisorTypeAndVersion(HypervisorType hypervisorType, String hypervisorVersion);
Long getMaxGuestsLimit(HypervisorType hypervisorType, String hypervisorVersion);
}

View File

@ -10,94 +10,94 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.hypervisor.dao;
import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.hypervisor.HypervisorCapabilitiesVO;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
@Local(value=HypervisorCapabilitiesDao.class)
public class HypervisorCapabilitiesDaoImpl extends GenericDaoBase<HypervisorCapabilitiesVO, Long> implements HypervisorCapabilitiesDao {
private static final Logger s_logger = Logger.getLogger(HypervisorCapabilitiesDaoImpl.class);
protected final SearchBuilder<HypervisorCapabilitiesVO> HypervisorTypeSearch;
protected final SearchBuilder<HypervisorCapabilitiesVO> HypervisorTypeAndVersionSearch;
protected final GenericSearchBuilder<HypervisorCapabilitiesVO, Long> MaxGuestLimitByHypervisorSearch;
private static final String DEFAULT_VERSION = "default";
protected HypervisorCapabilitiesDaoImpl() {
HypervisorTypeSearch = createSearchBuilder();
HypervisorTypeSearch.and("hypervisorType", HypervisorTypeSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ);
HypervisorTypeSearch.done();
HypervisorTypeAndVersionSearch = createSearchBuilder();
HypervisorTypeAndVersionSearch.and("hypervisorType", HypervisorTypeAndVersionSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ);
HypervisorTypeAndVersionSearch.and("hypervisorVersion", HypervisorTypeAndVersionSearch.entity().getHypervisorVersion(), SearchCriteria.Op.EQ);
HypervisorTypeAndVersionSearch.done();
MaxGuestLimitByHypervisorSearch = createSearchBuilder(Long.class);
MaxGuestLimitByHypervisorSearch.selectField(MaxGuestLimitByHypervisorSearch.entity().getMaxGuestsLimit());
MaxGuestLimitByHypervisorSearch.and("hypervisorType", MaxGuestLimitByHypervisorSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ);
MaxGuestLimitByHypervisorSearch.and("hypervisorVersion", MaxGuestLimitByHypervisorSearch.entity().getHypervisorVersion(), SearchCriteria.Op.EQ);
MaxGuestLimitByHypervisorSearch.done();
}
@Override
public List<HypervisorCapabilitiesVO> listAllByHypervisorType(HypervisorType hypervisorType){
SearchCriteria<HypervisorCapabilitiesVO> sc = HypervisorTypeSearch.create();
sc.setParameters("hypervisorType", hypervisorType);
return search(sc, null);
}
@Override
public HypervisorCapabilitiesVO findByHypervisorTypeAndVersion(HypervisorType hypervisorType, String hypervisorVersion){
SearchCriteria<HypervisorCapabilitiesVO> sc = HypervisorTypeAndVersionSearch.create();
sc.setParameters("hypervisorType", hypervisorType);
sc.setParameters("hypervisorVersion", hypervisorVersion);
return findOneBy(sc);
}
@Override
public Long getMaxGuestsLimit(HypervisorType hypervisorType, String hypervisorVersion){
Long defaultLimit = new Long(50);
Long result = null;
boolean useDefault = false;
if(hypervisorVersion != null){
SearchCriteria<Long> sc = MaxGuestLimitByHypervisorSearch.create();
sc.setParameters("hypervisorType", hypervisorType);
sc.setParameters("hypervisorVersion", hypervisorVersion);
List<Long> limitList = customSearch(sc, null);
if(!limitList.isEmpty()){
result = limitList.get(0);
}else{
useDefault = true;
}
}else{
useDefault = true;
}
if(useDefault){
SearchCriteria<Long> sc = MaxGuestLimitByHypervisorSearch.create();
sc.setParameters("hypervisorType", hypervisorType);
sc.setParameters("hypervisorVersion", DEFAULT_VERSION);
List<Long> limitList = customSearch(sc, null);
if(!limitList.isEmpty()){
result = limitList.get(0);
}
}
if(result == null){
return defaultLimit;
}
return result;
}
package com.cloud.hypervisor.dao;
import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.hypervisor.HypervisorCapabilitiesVO;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
@Local(value=HypervisorCapabilitiesDao.class)
public class HypervisorCapabilitiesDaoImpl extends GenericDaoBase<HypervisorCapabilitiesVO, Long> implements HypervisorCapabilitiesDao {
private static final Logger s_logger = Logger.getLogger(HypervisorCapabilitiesDaoImpl.class);
protected final SearchBuilder<HypervisorCapabilitiesVO> HypervisorTypeSearch;
protected final SearchBuilder<HypervisorCapabilitiesVO> HypervisorTypeAndVersionSearch;
protected final GenericSearchBuilder<HypervisorCapabilitiesVO, Long> MaxGuestLimitByHypervisorSearch;
private static final String DEFAULT_VERSION = "default";
protected HypervisorCapabilitiesDaoImpl() {
HypervisorTypeSearch = createSearchBuilder();
HypervisorTypeSearch.and("hypervisorType", HypervisorTypeSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ);
HypervisorTypeSearch.done();
HypervisorTypeAndVersionSearch = createSearchBuilder();
HypervisorTypeAndVersionSearch.and("hypervisorType", HypervisorTypeAndVersionSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ);
HypervisorTypeAndVersionSearch.and("hypervisorVersion", HypervisorTypeAndVersionSearch.entity().getHypervisorVersion(), SearchCriteria.Op.EQ);
HypervisorTypeAndVersionSearch.done();
MaxGuestLimitByHypervisorSearch = createSearchBuilder(Long.class);
MaxGuestLimitByHypervisorSearch.selectField(MaxGuestLimitByHypervisorSearch.entity().getMaxGuestsLimit());
MaxGuestLimitByHypervisorSearch.and("hypervisorType", MaxGuestLimitByHypervisorSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ);
MaxGuestLimitByHypervisorSearch.and("hypervisorVersion", MaxGuestLimitByHypervisorSearch.entity().getHypervisorVersion(), SearchCriteria.Op.EQ);
MaxGuestLimitByHypervisorSearch.done();
}
@Override
public List<HypervisorCapabilitiesVO> listAllByHypervisorType(HypervisorType hypervisorType){
SearchCriteria<HypervisorCapabilitiesVO> sc = HypervisorTypeSearch.create();
sc.setParameters("hypervisorType", hypervisorType);
return search(sc, null);
}
@Override
public HypervisorCapabilitiesVO findByHypervisorTypeAndVersion(HypervisorType hypervisorType, String hypervisorVersion){
SearchCriteria<HypervisorCapabilitiesVO> sc = HypervisorTypeAndVersionSearch.create();
sc.setParameters("hypervisorType", hypervisorType);
sc.setParameters("hypervisorVersion", hypervisorVersion);
return findOneBy(sc);
}
@Override
public Long getMaxGuestsLimit(HypervisorType hypervisorType, String hypervisorVersion){
Long defaultLimit = new Long(50);
Long result = null;
boolean useDefault = false;
if(hypervisorVersion != null){
SearchCriteria<Long> sc = MaxGuestLimitByHypervisorSearch.create();
sc.setParameters("hypervisorType", hypervisorType);
sc.setParameters("hypervisorVersion", hypervisorVersion);
List<Long> limitList = customSearch(sc, null);
if(!limitList.isEmpty()){
result = limitList.get(0);
}else{
useDefault = true;
}
}else{
useDefault = true;
}
if(useDefault){
SearchCriteria<Long> sc = MaxGuestLimitByHypervisorSearch.create();
sc.setParameters("hypervisorType", hypervisorType);
sc.setParameters("hypervisorVersion", DEFAULT_VERSION);
List<Long> limitList = customSearch(sc, null);
if(!limitList.isEmpty()){
result = limitList.get(0);
}
}
if(result == null){
return defaultLimit;
}
return result;
}
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.hypervisor.guru;
package com.cloud.hypervisor.guru;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
@ -66,34 +66,34 @@ import com.cloud.vm.SecondaryStorageVmVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
import com.cloud.vm.VmDetailConstants;
@Local(value=HypervisorGuru.class)
public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru {
private static final Logger s_logger = Logger.getLogger(VMwareGuru.class);
@Local(value=HypervisorGuru.class)
public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru {
private static final Logger s_logger = Logger.getLogger(VMwareGuru.class);
@Inject NetworkDao _networkDao;
@Inject GuestOSDao _guestOsDao;
@Inject HostDao _hostDao;
@Inject HostDetailsDao _hostDetailsDao;
@Inject CommandExecLogDao _cmdExecLogDao;
@Inject ClusterManager _clusterMgr;
@Inject VmwareManager _vmwareMgr;
@Inject GuestOSDao _guestOsDao;
@Inject HostDao _hostDao;
@Inject HostDetailsDao _hostDetailsDao;
@Inject CommandExecLogDao _cmdExecLogDao;
@Inject ClusterManager _clusterMgr;
@Inject VmwareManager _vmwareMgr;
@Inject SecondaryStorageVmManager _secStorageMgr;
@Inject CheckPointManager _checkPointMgr;
@Inject NetworkManager _networkMgr;
protected VMwareGuru() {
super();
}
@Override
public HypervisorType getHypervisorType() {
return HypervisorType.VMware;
}
@Override
@Inject NetworkManager _networkMgr;
protected VMwareGuru() {
super();
}
@Override
public HypervisorType getHypervisorType() {
return HypervisorType.VMware;
}
@Override
public <T extends VirtualMachine> VirtualMachineTO implement(VirtualMachineProfile<T> vm) {
VirtualMachineTO to = toVirtualMachineTO(vm);
VirtualMachineTO to = toVirtualMachineTO(vm);
to.setBootloader(BootloaderType.HVM);
Map<String, String> details = to.getDetails();
@ -126,7 +126,7 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru {
details.put(VmDetailConstants.NIC_ADAPTER, VirtualEthernetCardType.E1000.toString());
}
}
}
}
to.setDetails(details);
if(vm.getVirtualMachine() instanceof DomainRouterVO) {
@ -200,12 +200,12 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru {
String bootArgs = to.getBootArgs();
to.setBootArgs(bootArgs + " nic_macs=" + sbMacSequence.toString());
}
// Determine the VM's OS description
GuestOSVO guestOS = _guestOsDao.findById(vm.getVirtualMachine().getGuestOSId());
to.setOs(guestOS.getDisplayName());
return to;
}
// Determine the VM's OS description
GuestOSVO guestOS = _guestOsDao.findById(vm.getVirtualMachine().getGuestOSId());
to.setOs(guestOS.getDisplayName());
return to;
}
private NicTO[] sortNicsByDeviceId(NicTO[] nics) {
@ -229,39 +229,39 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru {
return listForSort.toArray(new NicTO[0]);
}
@Override @DB
public long getCommandHostDelegation(long hostId, Command cmd) {
boolean needDelegation = false;
if(cmd instanceof PrimaryStorageDownloadCommand ||
cmd instanceof BackupSnapshotCommand ||
cmd instanceof CreatePrivateTemplateFromVolumeCommand ||
cmd instanceof CreatePrivateTemplateFromSnapshotCommand ||
cmd instanceof CopyVolumeCommand ||
cmd instanceof CreateVolumeFromSnapshotCommand) {
needDelegation = true;
}
if(needDelegation) {
HostVO host = _hostDao.findById(hostId);
assert(host != null);
assert(host.getHypervisorType() == HypervisorType.VMware);
long dcId = host.getDataCenterId();
Pair<HostVO, SecondaryStorageVmVO> cmdTarget = _secStorageMgr.assignSecStorageVm(dcId, cmd);
if(cmdTarget != null) {
// TODO, we need to make sure agent is actually connected too
cmd.setContextParam("hypervisor", HypervisorType.VMware.toString());
Map<String, String> hostDetails = _hostDetailsDao.findDetails(hostId);
cmd.setContextParam("guid", resolveNameInGuid(hostDetails.get("guid")));
cmd.setContextParam("username", hostDetails.get("username"));
cmd.setContextParam("password", hostDetails.get("password"));
cmd.setContextParam("serviceconsole", _vmwareMgr.getServiceConsolePortGroupName());
@Override @DB
public long getCommandHostDelegation(long hostId, Command cmd) {
boolean needDelegation = false;
if(cmd instanceof PrimaryStorageDownloadCommand ||
cmd instanceof BackupSnapshotCommand ||
cmd instanceof CreatePrivateTemplateFromVolumeCommand ||
cmd instanceof CreatePrivateTemplateFromSnapshotCommand ||
cmd instanceof CopyVolumeCommand ||
cmd instanceof CreateVolumeFromSnapshotCommand) {
needDelegation = true;
}
if(needDelegation) {
HostVO host = _hostDao.findById(hostId);
assert(host != null);
assert(host.getHypervisorType() == HypervisorType.VMware);
long dcId = host.getDataCenterId();
Pair<HostVO, SecondaryStorageVmVO> cmdTarget = _secStorageMgr.assignSecStorageVm(dcId, cmd);
if(cmdTarget != null) {
// TODO, we need to make sure agent is actually connected too
cmd.setContextParam("hypervisor", HypervisorType.VMware.toString());
Map<String, String> hostDetails = _hostDetailsDao.findDetails(hostId);
cmd.setContextParam("guid", resolveNameInGuid(hostDetails.get("guid")));
cmd.setContextParam("username", hostDetails.get("username"));
cmd.setContextParam("password", hostDetails.get("password"));
cmd.setContextParam("serviceconsole", _vmwareMgr.getServiceConsolePortGroupName());
cmd.setContextParam("manageportgroup", _vmwareMgr.getManagementPortGroupName());
CommandExecLogVO execLog = new CommandExecLogVO(cmdTarget.first().getId(), cmdTarget.second().getId(), cmd.getClass().getSimpleName(), 1);
_cmdExecLogDao.persist(execLog);
CommandExecLogVO execLog = new CommandExecLogVO(cmdTarget.first().getId(), cmdTarget.second().getId(), cmd.getClass().getSimpleName(), 1);
_cmdExecLogDao.persist(execLog);
cmd.setContextParam("execid", String.valueOf(execLog.getId()));
if(cmd instanceof BackupSnapshotCommand ||
@ -281,31 +281,31 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru {
cmd.setContextParam("worker2", workerName2);
cmd.setContextParam("checkpoint2", String.valueOf(checkPointId2));
}
return cmdTarget.first().getId();
}
}
return hostId;
return cmdTarget.first().getId();
}
}
return hostId;
}
public boolean trackVmHostChange() {
return true;
}
private static String resolveNameInGuid(String guid) {
String tokens[] = guid.split("@");
assert(tokens.length == 2);
String vCenterIp = NetUtils.resolveToIp(tokens[1]);
if(vCenterIp == null) {
s_logger.error("Fatal : unable to resolve vCenter address " + tokens[1] + ", please check your DNS configuration");
return guid;
}
if(vCenterIp.equals(tokens[1]))
return guid;
return tokens[0] + "@" + vCenterIp;
}
}
private static String resolveNameInGuid(String guid) {
String tokens[] = guid.split("@");
assert(tokens.length == 2);
String vCenterIp = NetUtils.resolveToIp(tokens[1]);
if(vCenterIp == null) {
s_logger.error("Fatal : unable to resolve vCenter address " + tokens[1] + ", please check your DNS configuration");
return guid;
}
if(vCenterIp.equals(tokens[1]))
return guid;
return tokens[0] + "@" + vCenterIp;
}
}

View File

@ -817,9 +817,9 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis
}
}
}
protected final int DEFAULT_DOMR_SSHPORT = 3922;
protected boolean shutdownRouterVM(DomainRouterVO router) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Try to shutdown router VM " + router.getInstanceName() + " directly.");

View File

@ -10,87 +10,87 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.hypervisor.vmware;
import java.net.URI;
import java.net.URLDecoder;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
package com.cloud.hypervisor.vmware;
import java.net.URI;
import java.net.URLDecoder;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.agent.api.StartupCommand;
import com.cloud.agent.api.StartupRoutingCommand;
import com.cloud.alert.AlertManager;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.alert.AlertManager;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.exception.DiscoveredWithErrorException;
import com.cloud.exception.DiscoveryException;
import com.cloud.host.HostVO;
import com.cloud.exception.DiscoveryException;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.hypervisor.vmware.manager.VmwareManager;
import com.cloud.hypervisor.vmware.mo.ClusterMO;
import com.cloud.hypervisor.vmware.mo.HostMO;
import com.cloud.hypervisor.vmware.resource.VmwareContextFactory;
import com.cloud.hypervisor.vmware.resource.VmwareResource;
import com.cloud.hypervisor.vmware.util.VmwareContext;
import com.cloud.resource.Discoverer;
import com.cloud.resource.DiscovererBase;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.hypervisor.vmware.manager.VmwareManager;
import com.cloud.hypervisor.vmware.mo.ClusterMO;
import com.cloud.hypervisor.vmware.mo.HostMO;
import com.cloud.hypervisor.vmware.resource.VmwareContextFactory;
import com.cloud.hypervisor.vmware.resource.VmwareResource;
import com.cloud.hypervisor.vmware.util.VmwareContext;
import com.cloud.resource.Discoverer;
import com.cloud.resource.DiscovererBase;
import com.cloud.resource.ResourceManager;
import com.cloud.resource.ResourceStateAdapter;
import com.cloud.resource.ServerResource;
import com.cloud.resource.ServerResource;
import com.cloud.resource.UnableDeleteHostException;
import com.cloud.resource.ResourceStateAdapter.DeleteHostAnswer;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.TemplateType;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.user.Account;
import com.cloud.utils.UriUtils;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.component.Inject;
import com.vmware.vim25.ClusterDasConfigInfo;
import com.vmware.vim25.ManagedObjectReference;
@Local(value=Discoverer.class)
public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer, ResourceStateAdapter {
private static final Logger s_logger = Logger.getLogger(VmwareServerDiscoverer.class);
@Inject ClusterDao _clusterDao;
@Inject VmwareManager _vmwareMgr;
@Inject AlertManager _alertMgr;
@Inject VMTemplateDao _tmpltDao;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.TemplateType;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.user.Account;
import com.cloud.utils.UriUtils;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.component.Inject;
import com.vmware.vim25.ClusterDasConfigInfo;
import com.vmware.vim25.ManagedObjectReference;
@Local(value=Discoverer.class)
public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer, ResourceStateAdapter {
private static final Logger s_logger = Logger.getLogger(VmwareServerDiscoverer.class);
@Inject ClusterDao _clusterDao;
@Inject VmwareManager _vmwareMgr;
@Inject AlertManager _alertMgr;
@Inject VMTemplateDao _tmpltDao;
@Inject ClusterDetailsDao _clusterDetailsDao;
@Inject HostDao _hostDao;
@Inject HostDao _hostDao;
@Inject ResourceManager _resourceMgr;
@Override
public Map<? extends ServerResource, Map<String, String>> find(long dcId, Long podId, Long clusterId, URI url,
String username, String password, List<String> hostTags) throws DiscoveryException {
if(s_logger.isInfoEnabled())
s_logger.info("Discover host. dc: " + dcId + ", pod: " + podId + ", cluster: " + clusterId + ", uri host: " + url.getHost());
if(podId == null) {
if(s_logger.isInfoEnabled())
s_logger.info("No pod is assigned, assuming that it is not for vmware and skip it to next discoverer");
return null;
}
ClusterVO cluster = _clusterDao.findById(clusterId);
if(cluster == null || cluster.getHypervisorType() != HypervisorType.VMware) {
if(s_logger.isInfoEnabled())
s_logger.info("invalid cluster id or cluster is not for VMware hypervisors");
return null;
@Override
public Map<? extends ServerResource, Map<String, String>> find(long dcId, Long podId, Long clusterId, URI url,
String username, String password, List<String> hostTags) throws DiscoveryException {
if(s_logger.isInfoEnabled())
s_logger.info("Discover host. dc: " + dcId + ", pod: " + podId + ", cluster: " + clusterId + ", uri host: " + url.getHost());
if(podId == null) {
if(s_logger.isInfoEnabled())
s_logger.info("No pod is assigned, assuming that it is not for vmware and skip it to next discoverer");
return null;
}
ClusterVO cluster = _clusterDao.findById(clusterId);
if(cluster == null || cluster.getHypervisorType() != HypervisorType.VMware) {
if(s_logger.isInfoEnabled())
s_logger.info("invalid cluster id or cluster is not for VMware hypervisors");
return null;
}
List<HostVO> hosts = _resourceMgr.listAllHostsInCluster(clusterId);
@ -99,169 +99,169 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer
s_logger.error(msg);
throw new DiscoveredWithErrorException(msg);
}
VmwareContext context = null;
try {
context = VmwareContextFactory.create(url.getHost(), username, password);
List<ManagedObjectReference> morHosts = _vmwareMgr.addHostToPodCluster(context, dcId, podId, clusterId,
URLDecoder.decode(url.getPath()));
if(morHosts == null) {
s_logger.error("Unable to find host or cluster based on url: " + URLDecoder.decode(url.getPath()));
return null;
}
ManagedObjectReference morCluster = null;
Map<String, String> clusterDetails = _clusterDetailsDao.findDetails(clusterId);
if(clusterDetails.get("url") != null) {
URI uriFromCluster = new URI(UriUtils.encodeURIComponent(clusterDetails.get("url")));
morCluster = context.getHostMorByPath(URLDecoder.decode(uriFromCluster.getPath()));
if(morCluster == null || !morCluster.getType().equalsIgnoreCase("ClusterComputeResource")) {
s_logger.warn("Cluster url does not point to a valid vSphere cluster, url: " + clusterDetails.get("url"));
return null;
} else {
ClusterMO clusterMo = new ClusterMO(context, morCluster);
ClusterDasConfigInfo dasConfig = clusterMo.getDasConfig();
if(dasConfig != null && dasConfig.getEnabled() != null && dasConfig.getEnabled().booleanValue()) {
clusterDetails.put("NativeHA", "true");
_clusterDetailsDao.persist(clusterId, clusterDetails);
}
}
}
if(!validateDiscoveredHosts(context, morCluster, morHosts)) {
if(morCluster == null)
s_logger.warn("The discovered host is not standalone host, can not be added to a standalone cluster");
else
s_logger.warn("The discovered host does not belong to the cluster");
return null;
}
Map<VmwareResource, Map<String, String>> resources = new HashMap<VmwareResource, Map<String, String>>();
for(ManagedObjectReference morHost : morHosts) {
Map<String, String> details = new HashMap<String, String>();
Map<String, Object> params = new HashMap<String, Object>();
HostMO hostMo = new HostMO(context, morHost);
details.put("url", hostMo.getHostName());
details.put("username", username);
details.put("password", password);
String guid = morHost.getType() + ":" + morHost.get_value() + "@"+ url.getHost();
details.put("guid", guid);
params.put("url", hostMo.getHostName());
params.put("username", username);
params.put("password", password);
params.put("zone", Long.toString(dcId));
params.put("pod", Long.toString(podId));
params.put("cluster", Long.toString(clusterId));
params.put("guid", guid);
VmwareResource resource = new VmwareResource();
try {
resource.configure("VMware", params);
} catch (ConfigurationException e) {
_alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, dcId, podId, "Unable to add " + url.getHost(), "Error is " + e.getMessage());
s_logger.warn("Unable to instantiate " + url.getHost(), e);
}
resource.start();
resources.put(resource, details);
}
// place a place holder guid derived from cluster ID
cluster.setGuid(UUID.nameUUIDFromBytes(String.valueOf(clusterId).getBytes()).toString());
_clusterDao.update(clusterId, cluster);
return resources;
VmwareContext context = null;
try {
context = VmwareContextFactory.create(url.getHost(), username, password);
List<ManagedObjectReference> morHosts = _vmwareMgr.addHostToPodCluster(context, dcId, podId, clusterId,
URLDecoder.decode(url.getPath()));
if(morHosts == null) {
s_logger.error("Unable to find host or cluster based on url: " + URLDecoder.decode(url.getPath()));
return null;
}
ManagedObjectReference morCluster = null;
Map<String, String> clusterDetails = _clusterDetailsDao.findDetails(clusterId);
if(clusterDetails.get("url") != null) {
URI uriFromCluster = new URI(UriUtils.encodeURIComponent(clusterDetails.get("url")));
morCluster = context.getHostMorByPath(URLDecoder.decode(uriFromCluster.getPath()));
if(morCluster == null || !morCluster.getType().equalsIgnoreCase("ClusterComputeResource")) {
s_logger.warn("Cluster url does not point to a valid vSphere cluster, url: " + clusterDetails.get("url"));
return null;
} else {
ClusterMO clusterMo = new ClusterMO(context, morCluster);
ClusterDasConfigInfo dasConfig = clusterMo.getDasConfig();
if(dasConfig != null && dasConfig.getEnabled() != null && dasConfig.getEnabled().booleanValue()) {
clusterDetails.put("NativeHA", "true");
_clusterDetailsDao.persist(clusterId, clusterDetails);
}
}
}
if(!validateDiscoveredHosts(context, morCluster, morHosts)) {
if(morCluster == null)
s_logger.warn("The discovered host is not standalone host, can not be added to a standalone cluster");
else
s_logger.warn("The discovered host does not belong to the cluster");
return null;
}
Map<VmwareResource, Map<String, String>> resources = new HashMap<VmwareResource, Map<String, String>>();
for(ManagedObjectReference morHost : morHosts) {
Map<String, String> details = new HashMap<String, String>();
Map<String, Object> params = new HashMap<String, Object>();
HostMO hostMo = new HostMO(context, morHost);
details.put("url", hostMo.getHostName());
details.put("username", username);
details.put("password", password);
String guid = morHost.getType() + ":" + morHost.get_value() + "@"+ url.getHost();
details.put("guid", guid);
params.put("url", hostMo.getHostName());
params.put("username", username);
params.put("password", password);
params.put("zone", Long.toString(dcId));
params.put("pod", Long.toString(podId));
params.put("cluster", Long.toString(clusterId));
params.put("guid", guid);
VmwareResource resource = new VmwareResource();
try {
resource.configure("VMware", params);
} catch (ConfigurationException e) {
_alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, dcId, podId, "Unable to add " + url.getHost(), "Error is " + e.getMessage());
s_logger.warn("Unable to instantiate " + url.getHost(), e);
}
resource.start();
resources.put(resource, details);
}
// place a place holder guid derived from cluster ID
cluster.setGuid(UUID.nameUUIDFromBytes(String.valueOf(clusterId).getBytes()).toString());
_clusterDao.update(clusterId, cluster);
return resources;
} catch (DiscoveredWithErrorException e) {
throw e;
} catch (Exception e) {
s_logger.warn("Unable to connect to Vmware vSphere server. service address: " + url.getHost());
return null;
} finally {
if(context != null)
context.close();
}
}
private boolean validateDiscoveredHosts(VmwareContext context, ManagedObjectReference morCluster, List<ManagedObjectReference> morHosts) throws Exception {
if(morCluster == null) {
for(ManagedObjectReference morHost : morHosts) {
ManagedObjectReference morParent = (ManagedObjectReference)context.getServiceUtil().getDynamicProperty(morHost, "parent");
if(morParent.getType().equalsIgnoreCase("ClusterComputeResource"))
return false;
}
} else {
for(ManagedObjectReference morHost : morHosts) {
ManagedObjectReference morParent = (ManagedObjectReference)context.getServiceUtil().getDynamicProperty(morHost, "parent");
if(!morParent.getType().equalsIgnoreCase("ClusterComputeResource"))
return false;
if(!morParent.get_value().equals(morCluster.get_value()))
return false;
}
}
return true;
}
@Override
public void postDiscovery(List<HostVO> hosts, long msId) {
// do nothing
}
@Override
public boolean matchHypervisor(String hypervisor) {
if(hypervisor == null)
return true;
return Hypervisor.HypervisorType.VMware.toString().equalsIgnoreCase(hypervisor);
}
@Override
public Hypervisor.HypervisorType getHypervisorType() {
return Hypervisor.HypervisorType.VMware;
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
if(s_logger.isInfoEnabled())
s_logger.info("Configure VmwareServerDiscoverer, discover name: " + name);
super.configure(name, params);
ComponentLocator locator = ComponentLocator.getCurrentLocator();
ConfigurationDao configDao = locator.getDao(ConfigurationDao.class);
if (configDao == null) {
throw new ConfigurationException("Unable to get the configuration dao.");
}
createVmwareToolsIso();
if(s_logger.isInfoEnabled()) {
} catch (Exception e) {
s_logger.warn("Unable to connect to Vmware vSphere server. service address: " + url.getHost());
return null;
} finally {
if(context != null)
context.close();
}
}
private boolean validateDiscoveredHosts(VmwareContext context, ManagedObjectReference morCluster, List<ManagedObjectReference> morHosts) throws Exception {
if(morCluster == null) {
for(ManagedObjectReference morHost : morHosts) {
ManagedObjectReference morParent = (ManagedObjectReference)context.getServiceUtil().getDynamicProperty(morHost, "parent");
if(morParent.getType().equalsIgnoreCase("ClusterComputeResource"))
return false;
}
} else {
for(ManagedObjectReference morHost : morHosts) {
ManagedObjectReference morParent = (ManagedObjectReference)context.getServiceUtil().getDynamicProperty(morHost, "parent");
if(!morParent.getType().equalsIgnoreCase("ClusterComputeResource"))
return false;
if(!morParent.get_value().equals(morCluster.get_value()))
return false;
}
}
return true;
}
@Override
public void postDiscovery(List<HostVO> hosts, long msId) {
// do nothing
}
@Override
public boolean matchHypervisor(String hypervisor) {
if(hypervisor == null)
return true;
return Hypervisor.HypervisorType.VMware.toString().equalsIgnoreCase(hypervisor);
}
@Override
public Hypervisor.HypervisorType getHypervisorType() {
return Hypervisor.HypervisorType.VMware;
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
if(s_logger.isInfoEnabled())
s_logger.info("Configure VmwareServerDiscoverer, discover name: " + name);
super.configure(name, params);
ComponentLocator locator = ComponentLocator.getCurrentLocator();
ConfigurationDao configDao = locator.getDao(ConfigurationDao.class);
if (configDao == null) {
throw new ConfigurationException("Unable to get the configuration dao.");
}
createVmwareToolsIso();
if(s_logger.isInfoEnabled()) {
s_logger.info("VmwareServerDiscoverer has been successfully configured");
}
_resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this);
return true;
}
private void createVmwareToolsIso() {
String isoName = "vmware-tools.iso";
VMTemplateVO tmplt = _tmpltDao.findByTemplateName(isoName);
Long id;
if (tmplt == null) {
id = _tmpltDao.getNextInSequence(Long.class, "id");
VMTemplateVO template = new VMTemplateVO(id, isoName, isoName, ImageFormat.ISO, true, true,
TemplateType.PERHOST, null, null, true, 64,
Account.ACCOUNT_ID_SYSTEM, null, "VMware Tools Installer ISO", false, 1, false, HypervisorType.VMware);
_tmpltDao.persist(template);
} else {
id = tmplt.getId();
tmplt.setTemplateType(TemplateType.PERHOST);
tmplt.setUrl(null);
_tmpltDao.update(id, tmplt);
}
_resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this);
return true;
}
private void createVmwareToolsIso() {
String isoName = "vmware-tools.iso";
VMTemplateVO tmplt = _tmpltDao.findByTemplateName(isoName);
Long id;
if (tmplt == null) {
id = _tmpltDao.getNextInSequence(Long.class, "id");
VMTemplateVO template = new VMTemplateVO(id, isoName, isoName, ImageFormat.ISO, true, true,
TemplateType.PERHOST, null, null, true, 64,
Account.ACCOUNT_ID_SYSTEM, null, "VMware Tools Installer ISO", false, 1, false, HypervisorType.VMware);
_tmpltDao.persist(template);
} else {
id = tmplt.getId();
tmplt.setTemplateType(TemplateType.PERHOST);
tmplt.setUrl(null);
_tmpltDao.update(id, tmplt);
}
}
@Override
@ -300,6 +300,6 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer
public boolean stop() {
_resourceMgr.unregisterResourceStateAdapter(this.getClass().getSimpleName());
return super.stop();
}
}
}
}

View File

@ -10,9 +10,9 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.migration;
import com.cloud.utils.db.GenericDao;
public interface DiskOffering20Dao extends GenericDao<DiskOffering20VO, Long> {
}
package com.cloud.migration;
import com.cloud.utils.db.GenericDao;
public interface DiskOffering20Dao extends GenericDao<DiskOffering20VO, Long> {
}

View File

@ -10,12 +10,12 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.migration;
import javax.ejb.Local;
import com.cloud.utils.db.GenericDaoBase;
@Local(value={DiskOffering20Dao.class})
public class DiskOffering20DaoImpl extends GenericDaoBase<DiskOffering20VO, Long> implements DiskOffering20Dao {
}
package com.cloud.migration;
import javax.ejb.Local;
import com.cloud.utils.db.GenericDaoBase;
@Local(value={DiskOffering20Dao.class})
public class DiskOffering20DaoImpl extends GenericDaoBase<DiskOffering20VO, Long> implements DiskOffering20Dao {
}

View File

@ -10,99 +10,99 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.migration;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import com.cloud.utils.db.GenericDao;
@Entity
@Table(name="disk_offering")
public class DiskOffering20VO {
@Id
@GeneratedValue(strategy=GenerationType.IDENTITY)
@Column(name="id")
Long id;
@Column(name="domain_id")
long domainId;
@Column(name="name")
private String name = null;
@Column(name="display_text")
private String displayText = null;
@Column(name="disk_size")
long diskSize;
@Column(name="mirrored")
boolean mirrored;
@Column(name=GenericDao.REMOVED_COLUMN)
private Date removed;
public DiskOffering20VO() {
}
public DiskOffering20VO(long domainId, String name, String displayText, long diskSize, boolean mirrored) {
this.domainId = domainId;
this.name = name;
this.displayText = displayText;
this.diskSize = diskSize;
this.mirrored = mirrored;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public long getDomainId() {
return domainId;
}
public void setDomainId(long domainId) {
this.domainId = domainId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDisplayText() {
return displayText;
}
public void setDisplayText(String displayText) {
this.displayText = displayText;
}
public long getDiskSize() {
return diskSize;
}
public void setDiskSize(long diskSize) {
this.diskSize = diskSize;
}
public boolean getMirrored() {
return mirrored;
}
public void setMirrored(boolean mirrored) {
this.mirrored = mirrored;
}
public Date getRemoved() {
return removed;
}
}
package com.cloud.migration;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import com.cloud.utils.db.GenericDao;
@Entity
@Table(name="disk_offering")
public class DiskOffering20VO {
@Id
@GeneratedValue(strategy=GenerationType.IDENTITY)
@Column(name="id")
Long id;
@Column(name="domain_id")
long domainId;
@Column(name="name")
private String name = null;
@Column(name="display_text")
private String displayText = null;
@Column(name="disk_size")
long diskSize;
@Column(name="mirrored")
boolean mirrored;
@Column(name=GenericDao.REMOVED_COLUMN)
private Date removed;
public DiskOffering20VO() {
}
public DiskOffering20VO(long domainId, String name, String displayText, long diskSize, boolean mirrored) {
this.domainId = domainId;
this.name = name;
this.displayText = displayText;
this.diskSize = diskSize;
this.mirrored = mirrored;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public long getDomainId() {
return domainId;
}
public void setDomainId(long domainId) {
this.domainId = domainId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDisplayText() {
return displayText;
}
public void setDisplayText(String displayText) {
this.displayText = displayText;
}
public long getDiskSize() {
return diskSize;
}
public void setDiskSize(long diskSize) {
this.diskSize = diskSize;
}
public boolean getMirrored() {
return mirrored;
}
public void setMirrored(boolean mirrored) {
this.mirrored = mirrored;
}
public Date getRemoved() {
return removed;
}
}

View File

@ -10,10 +10,10 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.migration;
import com.cloud.utils.db.GenericDao;
public interface DiskOffering21Dao extends GenericDao<DiskOffering21VO, Long> {
}
package com.cloud.migration;
import com.cloud.utils.db.GenericDao;
public interface DiskOffering21Dao extends GenericDao<DiskOffering21VO, Long> {
}

View File

@ -10,12 +10,12 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.migration;
import javax.ejb.Local;
import com.cloud.utils.db.GenericDaoBase;
@Local(value={DiskOffering21Dao.class})
public class DiskOffering21DaoImpl extends GenericDaoBase<DiskOffering21VO, Long> implements DiskOffering21Dao {
}
package com.cloud.migration;
import javax.ejb.Local;
import com.cloud.utils.db.GenericDaoBase;
@Local(value={DiskOffering21Dao.class})
public class DiskOffering21DaoImpl extends GenericDaoBase<DiskOffering21VO, Long> implements DiskOffering21Dao {
}

View File

@ -10,116 +10,116 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.migration;
import java.util.Date;
import java.util.List;
import javax.persistence.Column;
import javax.persistence.DiscriminatorColumn;
import javax.persistence.DiscriminatorType;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Inheritance;
import javax.persistence.InheritanceType;
import javax.persistence.Table;
import javax.persistence.Transient;
import com.cloud.utils.db.GenericDao;
@Entity
@Table(name="disk_offering_21")
@Inheritance(strategy=InheritanceType.JOINED)
@DiscriminatorColumn(name="type", discriminatorType=DiscriminatorType.STRING, length=32)
public class DiskOffering21VO {
public enum Type {
Disk,
Service
};
@Id
@GeneratedValue(strategy=GenerationType.AUTO)
@Column(name="id")
long id;
@Column(name="domain_id")
Long domainId;
@Column(name="unique_name")
private String uniqueName;
@Column(name="name")
private String name = null;
@Column(name="display_text")
private String displayText = null;
@Column(name="disk_size")
long diskSize;
@Column(name="mirrored")
boolean mirrored;
@Column(name="tags")
String tags;
@Column(name="type")
Type type;
@Column(name=GenericDao.REMOVED_COLUMN)
private Date removed;
@Column(name=GenericDao.CREATED_COLUMN)
private Date created;
@Column(name="recreatable")
private boolean recreatable;
@Column(name="use_local_storage")
private boolean useLocalStorage;
package com.cloud.migration;
import java.util.Date;
import java.util.List;
import javax.persistence.Column;
import javax.persistence.DiscriminatorColumn;
import javax.persistence.DiscriminatorType;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Inheritance;
import javax.persistence.InheritanceType;
import javax.persistence.Table;
import javax.persistence.Transient;
import com.cloud.utils.db.GenericDao;
@Entity
@Table(name="disk_offering_21")
@Inheritance(strategy=InheritanceType.JOINED)
@DiscriminatorColumn(name="type", discriminatorType=DiscriminatorType.STRING, length=32)
public class DiskOffering21VO {
public enum Type {
Disk,
Service
};
@Id
@GeneratedValue(strategy=GenerationType.AUTO)
@Column(name="id")
long id;
@Column(name="domain_id")
Long domainId;
@Column(name="unique_name")
private String uniqueName;
@Column(name="name")
private String name = null;
@Column(name="display_text")
private String displayText = null;
@Column(name="disk_size")
long diskSize;
@Column(name="mirrored")
boolean mirrored;
@Column(name="tags")
String tags;
@Column(name="type")
Type type;
@Column(name=GenericDao.REMOVED_COLUMN)
private Date removed;
@Column(name=GenericDao.CREATED_COLUMN)
private Date created;
@Column(name="recreatable")
private boolean recreatable;
@Column(name="use_local_storage")
private boolean useLocalStorage;
@Column(name="system_use")
protected boolean systemUse;
public DiskOffering21VO() {
}
public DiskOffering21VO(long domainId, String name, String displayText, long diskSize, boolean mirrored, String tags) {
this.domainId = domainId;
this.name = name;
this.displayText = displayText;
this.diskSize = diskSize;
this.mirrored = mirrored;
this.tags = tags;
this.recreatable = false;
this.type = Type.Disk;
this.useLocalStorage = false;
}
public DiskOffering21VO(String name, String displayText, boolean mirrored, String tags, boolean recreatable, boolean useLocalStorage) {
this.domainId = null;
this.type = Type.Service;
this.name = name;
this.displayText = displayText;
this.mirrored = mirrored;
this.tags = tags;
this.recreatable = recreatable;
this.useLocalStorage = useLocalStorage;
}
public long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getUniqueName() {
return uniqueName;
public DiskOffering21VO() {
}
public DiskOffering21VO(long domainId, String name, String displayText, long diskSize, boolean mirrored, String tags) {
this.domainId = domainId;
this.name = name;
this.displayText = displayText;
this.diskSize = diskSize;
this.mirrored = mirrored;
this.tags = tags;
this.recreatable = false;
this.type = Type.Disk;
this.useLocalStorage = false;
}
public DiskOffering21VO(String name, String displayText, boolean mirrored, String tags, boolean recreatable, boolean useLocalStorage) {
this.domainId = null;
this.type = Type.Service;
this.name = name;
this.displayText = displayText;
this.mirrored = mirrored;
this.tags = tags;
this.recreatable = recreatable;
this.useLocalStorage = useLocalStorage;
}
public long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getUniqueName() {
return uniqueName;
}
public boolean getSystemUse() {
@ -128,125 +128,125 @@ public class DiskOffering21VO {
public void setSystemUse(boolean systemUse) {
this.systemUse = systemUse;
}
public boolean getUseLocalStorage() {
return useLocalStorage;
}
public void setUserLocalStorage(boolean useLocalStorage) {
this.useLocalStorage = useLocalStorage;
}
public Long getDomainId() {
return domainId;
}
public Type getType() {
return type;
}
public void setType(Type type) {
this.type = type;
}
public boolean isRecreatable() {
return recreatable;
}
public void setDomainId(Long domainId) {
this.domainId = domainId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDisplayText() {
return displayText;
}
public void setDisplayText(String displayText) {
this.displayText = displayText;
}
public long getDiskSizeInBytes() {
return diskSize * 1024 * 1024;
}
public void setDiskSize(long diskSize) {
this.diskSize = diskSize;
}
public boolean isMirrored() {
return mirrored;
}
public void setMirrored(boolean mirrored) {
this.mirrored = mirrored;
}
public Date getRemoved() {
return removed;
}
public Date getCreated() {
return created;
}
protected void setTags(String tags) {
this.tags = tags;
}
public String getTags() {
return tags;
}
public void setUniqueName(String name) {
this.uniqueName = name;
}
@Transient
public String[] getTagsArray() {
String tags = getTags();
if (tags == null || tags.isEmpty()) {
return new String[0];
}
return tags.split(",");
}
@Transient
public boolean containsTag(String... tags) {
if (this.tags == null) {
return false;
}
for (String tag : tags) {
if (!this.tags.matches(tag)) {
return false;
}
}
return true;
}
@Transient
public void setTagsArray(List<String> newTags) {
if (newTags.isEmpty()) {
setTags(null);
return;
}
StringBuilder buf = new StringBuilder();
for (String tag : newTags) {
buf.append(tag).append(",");
}
buf.delete(buf.length() - 1, buf.length());
setTags(buf.toString());
}
}
}
public boolean getUseLocalStorage() {
return useLocalStorage;
}
public void setUserLocalStorage(boolean useLocalStorage) {
this.useLocalStorage = useLocalStorage;
}
public Long getDomainId() {
return domainId;
}
public Type getType() {
return type;
}
public void setType(Type type) {
this.type = type;
}
public boolean isRecreatable() {
return recreatable;
}
public void setDomainId(Long domainId) {
this.domainId = domainId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDisplayText() {
return displayText;
}
public void setDisplayText(String displayText) {
this.displayText = displayText;
}
public long getDiskSizeInBytes() {
return diskSize * 1024 * 1024;
}
public void setDiskSize(long diskSize) {
this.diskSize = diskSize;
}
public boolean isMirrored() {
return mirrored;
}
public void setMirrored(boolean mirrored) {
this.mirrored = mirrored;
}
public Date getRemoved() {
return removed;
}
public Date getCreated() {
return created;
}
protected void setTags(String tags) {
this.tags = tags;
}
public String getTags() {
return tags;
}
public void setUniqueName(String name) {
this.uniqueName = name;
}
@Transient
public String[] getTagsArray() {
String tags = getTags();
if (tags == null || tags.isEmpty()) {
return new String[0];
}
return tags.split(",");
}
@Transient
public boolean containsTag(String... tags) {
if (this.tags == null) {
return false;
}
for (String tag : tags) {
if (!this.tags.matches(tag)) {
return false;
}
}
return true;
}
@Transient
public void setTagsArray(List<String> newTags) {
if (newTags.isEmpty()) {
setTags(null);
return;
}
StringBuilder buf = new StringBuilder();
for (String tag : newTags) {
buf.append(tag).append(",");
}
buf.delete(buf.length() - 1, buf.length());
setTags(buf.toString());
}
}

View File

@ -10,9 +10,9 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.migration;
import com.cloud.utils.db.GenericDao;
public interface ServiceOffering20Dao extends GenericDao<ServiceOffering20VO, Long> {
}
package com.cloud.migration;
import com.cloud.utils.db.GenericDao;
public interface ServiceOffering20Dao extends GenericDao<ServiceOffering20VO, Long> {
}

View File

@ -10,12 +10,12 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.migration;
import javax.ejb.Local;
import com.cloud.utils.db.GenericDaoBase;
@Local(value={ServiceOffering20Dao.class})
public class ServiceOffering20DaoImpl extends GenericDaoBase<ServiceOffering20VO, Long> implements ServiceOffering20Dao {
}
package com.cloud.migration;
import javax.ejb.Local;
import com.cloud.utils.db.GenericDaoBase;
@Local(value={ServiceOffering20Dao.class})
public class ServiceOffering20DaoImpl extends GenericDaoBase<ServiceOffering20VO, Long> implements ServiceOffering20Dao {
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.migration;
package com.cloud.migration;
import java.util.Date;
import javax.persistence.Column;
@ -26,169 +26,169 @@ import javax.persistence.Table;
import com.cloud.dc.Vlan;
import com.cloud.dc.Vlan.VlanType;
import com.cloud.utils.db.GenericDao;
@Entity
@Table(name="service_offering")
public class ServiceOffering20VO {
@Id
@GeneratedValue(strategy=GenerationType.IDENTITY)
@Column(name="id")
private Long id = null;
@Column(name="name")
private String name = null;
@Column(name="cpu")
private int cpu;
@Column(name="speed")
private int speed;
@Column(name="ram_size")
private int ramSize;
@Column(name="nw_rate")
private int rateMbps;
@Column(name="mc_rate")
private int multicastRateMbps;
@Column(name="mirrored")
private boolean mirroredVolumes;
@Column(name="ha_enabled")
private boolean offerHA;
@Column(name="display_text")
private String displayText = null;
@Column(name="guest_ip_type")
@Enumerated(EnumType.STRING)
private Vlan.VlanType guestIpType = Vlan.VlanType.VirtualNetwork;
@Column(name="use_local_storage")
private boolean useLocalStorage;
@Column(name=GenericDao.CREATED_COLUMN)
private Date created;
@Column(name=GenericDao.REMOVED_COLUMN)
private Date removed;
protected ServiceOffering20VO() {
}
public ServiceOffering20VO(Long id, String name, int cpu, int ramSize, int speed, int rateMbps, int multicastRateMbps, boolean offerHA, String displayText, boolean localStorageRequired) {
this(id, name, cpu, ramSize, speed, rateMbps, multicastRateMbps, offerHA, displayText, Vlan.VlanType.VirtualNetwork, localStorageRequired);
}
public ServiceOffering20VO(Long id, String name, int cpu, int ramSize, int speed, int rateMbps, int multicastRateMbps, boolean offerHA, String displayText, VlanType guestIpType, boolean useLocalStorage) {
this.id = id;
this.name = name;
this.cpu = cpu;
this.ramSize = ramSize;
this.speed = speed;
this.rateMbps = rateMbps;
this.multicastRateMbps = multicastRateMbps;
this.offerHA = offerHA;
this.displayText = displayText;
this.guestIpType = guestIpType;
this.useLocalStorage = useLocalStorage;
}
public boolean getOfferHA() {
return offerHA;
}
public void setOfferHA(boolean offerHA) {
this.offerHA = offerHA;
}
public Long getId() {
return id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getCpu() {
return cpu;
}
public void setCpu(int cpu) {
this.cpu = cpu;
}
public void setSpeed(int speed) {
this.speed = speed;
}
public void setRamSize(int ramSize) {
this.ramSize = ramSize;
}
public int getSpeed() {
return speed;
}
public int getRamSize() {
return ramSize;
}
public Date getCreated() {
return created;
}
public Date getRemoved() {
return removed;
}
public void setMirroredVolumes(boolean mirroredVolumes) {
this.mirroredVolumes = mirroredVolumes;
}
public boolean isMirroredVolumes() {
return mirroredVolumes;
}
public String getDisplayText() {
return displayText;
}
public void setDisplayText(String displayText) {
this.displayText = displayText;
}
public void setRateMbps(int rateMbps) {
this.rateMbps = rateMbps;
}
public int getRateMbps() {
return rateMbps;
}
public void setMulticastRateMbps(int multicastRateMbps) {
this.multicastRateMbps = multicastRateMbps;
}
public int getMulticastRateMbps() {
return multicastRateMbps;
}
public void setGuestIpType(Vlan.VlanType guestIpType) {
this.guestIpType = guestIpType;
}
public Vlan.VlanType getGuestIpType() {
return guestIpType;
}
public boolean getUseLocalStorage() {
return useLocalStorage;
}
}
@Entity
@Table(name="service_offering")
public class ServiceOffering20VO {
@Id
@GeneratedValue(strategy=GenerationType.IDENTITY)
@Column(name="id")
private Long id = null;
@Column(name="name")
private String name = null;
@Column(name="cpu")
private int cpu;
@Column(name="speed")
private int speed;
@Column(name="ram_size")
private int ramSize;
@Column(name="nw_rate")
private int rateMbps;
@Column(name="mc_rate")
private int multicastRateMbps;
@Column(name="mirrored")
private boolean mirroredVolumes;
@Column(name="ha_enabled")
private boolean offerHA;
@Column(name="display_text")
private String displayText = null;
@Column(name="guest_ip_type")
@Enumerated(EnumType.STRING)
private Vlan.VlanType guestIpType = Vlan.VlanType.VirtualNetwork;
@Column(name="use_local_storage")
private boolean useLocalStorage;
@Column(name=GenericDao.CREATED_COLUMN)
private Date created;
@Column(name=GenericDao.REMOVED_COLUMN)
private Date removed;
protected ServiceOffering20VO() {
}
public ServiceOffering20VO(Long id, String name, int cpu, int ramSize, int speed, int rateMbps, int multicastRateMbps, boolean offerHA, String displayText, boolean localStorageRequired) {
this(id, name, cpu, ramSize, speed, rateMbps, multicastRateMbps, offerHA, displayText, Vlan.VlanType.VirtualNetwork, localStorageRequired);
}
public ServiceOffering20VO(Long id, String name, int cpu, int ramSize, int speed, int rateMbps, int multicastRateMbps, boolean offerHA, String displayText, VlanType guestIpType, boolean useLocalStorage) {
this.id = id;
this.name = name;
this.cpu = cpu;
this.ramSize = ramSize;
this.speed = speed;
this.rateMbps = rateMbps;
this.multicastRateMbps = multicastRateMbps;
this.offerHA = offerHA;
this.displayText = displayText;
this.guestIpType = guestIpType;
this.useLocalStorage = useLocalStorage;
}
public boolean getOfferHA() {
return offerHA;
}
public void setOfferHA(boolean offerHA) {
this.offerHA = offerHA;
}
public Long getId() {
return id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getCpu() {
return cpu;
}
public void setCpu(int cpu) {
this.cpu = cpu;
}
public void setSpeed(int speed) {
this.speed = speed;
}
public void setRamSize(int ramSize) {
this.ramSize = ramSize;
}
public int getSpeed() {
return speed;
}
public int getRamSize() {
return ramSize;
}
public Date getCreated() {
return created;
}
public Date getRemoved() {
return removed;
}
public void setMirroredVolumes(boolean mirroredVolumes) {
this.mirroredVolumes = mirroredVolumes;
}
public boolean isMirroredVolumes() {
return mirroredVolumes;
}
public String getDisplayText() {
return displayText;
}
public void setDisplayText(String displayText) {
this.displayText = displayText;
}
public void setRateMbps(int rateMbps) {
this.rateMbps = rateMbps;
}
public int getRateMbps() {
return rateMbps;
}
public void setMulticastRateMbps(int multicastRateMbps) {
this.multicastRateMbps = multicastRateMbps;
}
public int getMulticastRateMbps() {
return multicastRateMbps;
}
public void setGuestIpType(Vlan.VlanType guestIpType) {
this.guestIpType = guestIpType;
}
public Vlan.VlanType getGuestIpType() {
return guestIpType;
}
public boolean getUseLocalStorage() {
return useLocalStorage;
}
}

View File

@ -10,9 +10,9 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.migration;
import com.cloud.utils.db.GenericDao;
public interface ServiceOffering21Dao extends GenericDao<ServiceOffering21VO, Long> {
}
package com.cloud.migration;
import com.cloud.utils.db.GenericDao;
public interface ServiceOffering21Dao extends GenericDao<ServiceOffering21VO, Long> {
}

View File

@ -10,12 +10,12 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.migration;
import javax.ejb.Local;
import com.cloud.utils.db.GenericDaoBase;
@Local(value={ServiceOffering21Dao.class})
public class ServiceOffering21DaoImpl extends GenericDaoBase<ServiceOffering21VO, Long> implements ServiceOffering21Dao {
}
package com.cloud.migration;
import javax.ejb.Local;
import com.cloud.utils.db.GenericDaoBase;
@Local(value={ServiceOffering21Dao.class})
public class ServiceOffering21DaoImpl extends GenericDaoBase<ServiceOffering21VO, Long> implements ServiceOffering21Dao {
}

View File

@ -10,8 +10,8 @@
// limitations under the License.
//
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.migration;
package com.cloud.migration;
import javax.persistence.Column;
import javax.persistence.DiscriminatorValue;
import javax.persistence.Entity;
@ -20,133 +20,133 @@ import javax.persistence.Table;
import javax.persistence.Transient;
import com.cloud.offering.ServiceOffering;
@Entity
@Table(name="service_offering_21")
@DiscriminatorValue(value="Service")
@PrimaryKeyJoinColumn(name="id")
public class ServiceOffering21VO extends DiskOffering21VO implements ServiceOffering {
@Column(name="cpu")
private int cpu;
@Column(name="speed")
private int speed;
@Column(name="ram_size")
private int ramSize;
@Column(name="nw_rate")
private Integer rateMbps;
@Column(name="mc_rate")
private Integer multicastRateMbps;
@Column(name="ha_enabled")
private boolean offerHA;
@Column(name="host_tag")
@Entity
@Table(name="service_offering_21")
@DiscriminatorValue(value="Service")
@PrimaryKeyJoinColumn(name="id")
public class ServiceOffering21VO extends DiskOffering21VO implements ServiceOffering {
@Column(name="cpu")
private int cpu;
@Column(name="speed")
private int speed;
@Column(name="ram_size")
private int ramSize;
@Column(name="nw_rate")
private Integer rateMbps;
@Column(name="mc_rate")
private Integer multicastRateMbps;
@Column(name="ha_enabled")
private boolean offerHA;
@Column(name="host_tag")
private String hostTag;
protected ServiceOffering21VO() {
super();
}
public ServiceOffering21VO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps, boolean offerHA, String displayText, boolean useLocalStorage, boolean recreatable, String tags) {
super(name, displayText, false, tags, recreatable, useLocalStorage);
this.cpu = cpu;
this.ramSize = ramSize;
this.speed = speed;
this.rateMbps = rateMbps;
this.multicastRateMbps = multicastRateMbps;
this.offerHA = offerHA;
}
public ServiceOffering21VO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps, boolean offerHA, String displayText, boolean useLocalStorage, boolean recreatable, String tags, String hostTag) {
this(name, cpu, ramSize, speed, rateMbps, multicastRateMbps, offerHA, displayText, useLocalStorage, recreatable, tags);
this.hostTag = hostTag;
protected ServiceOffering21VO() {
super();
}
@Override
public boolean getOfferHA() {
return offerHA;
}
public ServiceOffering21VO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps, boolean offerHA, String displayText, boolean useLocalStorage, boolean recreatable, String tags) {
super(name, displayText, false, tags, recreatable, useLocalStorage);
this.cpu = cpu;
this.ramSize = ramSize;
this.speed = speed;
this.rateMbps = rateMbps;
this.multicastRateMbps = multicastRateMbps;
this.offerHA = offerHA;
}
public ServiceOffering21VO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps, boolean offerHA, String displayText, boolean useLocalStorage, boolean recreatable, String tags, String hostTag) {
this(name, cpu, ramSize, speed, rateMbps, multicastRateMbps, offerHA, displayText, useLocalStorage, recreatable, tags);
this.hostTag = hostTag;
}
@Override
public boolean getOfferHA() {
return offerHA;
}
@Override
public boolean getLimitCpuUse() {
return false;
}
public void setOfferHA(boolean offerHA) {
this.offerHA = offerHA;
}
@Override
@Transient
public String[] getTagsArray() {
String tags = getTags();
if (tags == null || tags.length() == 0) {
return new String[0];
}
return tags.split(",");
}
@Override
public int getCpu() {
return cpu;
}
public void setCpu(int cpu) {
this.cpu = cpu;
}
public void setSpeed(int speed) {
this.speed = speed;
}
public void setRamSize(int ramSize) {
this.ramSize = ramSize;
}
@Override
public int getSpeed() {
return speed;
}
@Override
public int getRamSize() {
return ramSize;
}
public void setRateMbps(Integer rateMbps) {
this.rateMbps = rateMbps;
}
@Override
public Integer getRateMbps() {
return rateMbps;
}
public void setMulticastRateMbps(Integer multicastRateMbps) {
this.multicastRateMbps = multicastRateMbps;
}
@Override
public Integer getMulticastRateMbps() {
return multicastRateMbps;
}
public String gethypervisorType() {
return null;
}
public void setHostTag(String hostTag) {
this.hostTag = hostTag;
}
public String getHostTag() {
return hostTag;
public void setOfferHA(boolean offerHA) {
this.offerHA = offerHA;
}
@Override
@Transient
public String[] getTagsArray() {
String tags = getTags();
if (tags == null || tags.length() == 0) {
return new String[0];
}
return tags.split(",");
}
@Override
public int getCpu() {
return cpu;
}
public void setCpu(int cpu) {
this.cpu = cpu;
}
public void setSpeed(int speed) {
this.speed = speed;
}
public void setRamSize(int ramSize) {
this.ramSize = ramSize;
}
@Override
public int getSpeed() {
return speed;
}
@Override
public int getRamSize() {
return ramSize;
}
public void setRateMbps(Integer rateMbps) {
this.rateMbps = rateMbps;
}
@Override
public Integer getRateMbps() {
return rateMbps;
}
public void setMulticastRateMbps(Integer multicastRateMbps) {
this.multicastRateMbps = multicastRateMbps;
}
@Override
public Integer getMulticastRateMbps() {
return multicastRateMbps;
}
public String gethypervisorType() {
return null;
}
public void setHostTag(String hostTag) {
this.hostTag = hostTag;
}
public String getHostTag() {
return hostTag;
}
@Override
@ -157,5 +157,5 @@ public class ServiceOffering21VO extends DiskOffering21VO implements ServiceOffe
@Override
public String getSystemVmType() {
return null;
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More