mirror of https://github.com/apache/cloudstack.git
cloudbyte storage plugin for master commit
This commit is contained in:
parent
c56df036e8
commit
f3c7c607bd
|
|
@ -35,6 +35,11 @@
|
|||
<artifactId>cloud-plugin-storage-volume-solidfire</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-plugin-storage-volume-cloudbyte</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-server</artifactId>
|
||||
|
|
|
|||
|
|
@ -78,6 +78,7 @@
|
|||
<module>storage/image/sample</module>
|
||||
<module>storage/volume/nexenta</module>
|
||||
<module>storage/volume/solidfire</module>
|
||||
<module>storage/volume/cloudbyte</module>
|
||||
<module>storage/volume/default</module>
|
||||
<module>storage/volume/sample</module>
|
||||
<module>alert-handlers/snmp-alerts</module>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,71 @@
|
|||
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor
|
||||
license agreements. See the NOTICE file distributed with this work for additional
|
||||
information regarding copyright ownership. The ASF licenses this file to
|
||||
you under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
this file except in compliance with the License. You may obtain a copy of
|
||||
the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
|
||||
by applicable law or agreed to in writing, software distributed under the
|
||||
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
|
||||
OF ANY KIND, either express or implied. See the License for the specific
|
||||
language governing permissions and limitations under the License. -->
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>cloud-plugin-storage-volume-cloudbyte</artifactId>
|
||||
<name>Apache CloudStack Plugin - Storage Volume CloudByte Provider</name>
|
||||
<parent>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloudstack-plugins</artifactId>
|
||||
<version>4.4.0-SNAPSHOT</version>
|
||||
<relativePath>../../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-plugin-storage-volume-default</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-engine-storage-volume</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>mysql</groupId>
|
||||
<artifactId>mysql-connector-java</artifactId>
|
||||
<version>${cs.mysql.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.code.gson</groupId>
|
||||
<artifactId>gson</artifactId>
|
||||
<version>${cs.gson.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-bundle</artifactId>
|
||||
<version>1.17.1</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<defaultGoal>install</defaultGoal>
|
||||
<sourceDirectory>src</sourceDirectory>
|
||||
<testSourceDirectory>test</testSourceDirectory>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<skipTests>true</skipTests>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>integration-test</phase>
|
||||
<goals>
|
||||
<goal>test</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
name=storage-volume-cloudbyte
|
||||
parent=storage
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
<beans xmlns="http://www.springframework.org/schema/beans"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xmlns:context="http://www.springframework.org/schema/context"
|
||||
xmlns:aop="http://www.springframework.org/schema/aop"
|
||||
xsi:schemaLocation="http://www.springframework.org/schema/beans
|
||||
http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
|
||||
http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop-3.0.xsd
|
||||
http://www.springframework.org/schema/context
|
||||
http://www.springframework.org/schema/context/spring-context-3.0.xsd"
|
||||
>
|
||||
|
||||
<bean id="elastistorDataStoreProvider"
|
||||
class="org.apache.cloudstack.storage.datastore.provider.ElastistorPrimaryDataStoreProvider">
|
||||
<property name="esmanagementip" value="10.10.171.180"/>
|
||||
<property name="esapikey" value="PubSInZaCji8hrRfOsCxgbug2I2k_sRJ0i2a9qmAzZIiCTcFPmZelzx6uNK9TYgqkdohCmq1L2J9eYmUe9YO6A"/>
|
||||
<property name="esaccountid" value="9e9f67d5-e06f-4d63-a0b8-e7255cba84b8"/>
|
||||
<property name="espoolid" value="d2d15d11-0f06-3426-a097-3e6e8b36f85c"/>
|
||||
<property name="esdefaultgateway" value="10.10.1.1"/>
|
||||
<property name="essubnet" value="8"/>
|
||||
<property name="estntinterface" value="em0"/>
|
||||
</bean>
|
||||
</beans>
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
package org.apache.cloudstack.storage.datastore.driver;
|
||||
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
import org.apache.cloudstack.storage.command.CommandResult;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.storage.dao.DiskOfferingDao;
|
||||
import com.cloud.user.AccountManager;
|
||||
|
||||
/**
|
||||
* The implementation class for <code>PrimaryDataStoreDriver</code>. This
|
||||
* directs the public interface methods to use CloudByte's Elastistor based
|
||||
* volumes.
|
||||
*
|
||||
* @author amit.das@cloudbyte.com
|
||||
* @author punith.s@cloudbyte.com
|
||||
*
|
||||
*/
|
||||
public class ElastistorPrimaryDataStoreDriver extends CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver{
|
||||
|
||||
private static final Logger s_logger = Logger.getLogger(ElastistorPrimaryDataStoreDriver.class);
|
||||
|
||||
@Inject
|
||||
AccountManager _accountMgr;
|
||||
@Inject
|
||||
DiskOfferingDao _diskOfferingDao;
|
||||
@Override
|
||||
public DataTO getTO(DataObject data) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataStoreTO getStoreTO(DataStore store) {
|
||||
return null;
|
||||
}
|
||||
|
||||
public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback<CreateCmdResult> callback) {
|
||||
super.createAsync(dataStore, dataObject, callback);
|
||||
}
|
||||
|
||||
public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback<CommandResult> callback) {
|
||||
super.deleteAsync(dataStore, dataObject, callback);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) {
|
||||
throw new UnsupportedOperationException();
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canCopy(DataObject srcData, DataObject destData) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void resize(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
public ChapInfo getChapInfo(VolumeInfo volumeInfo) {
|
||||
return super.getChapInfo(volumeInfo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CreateCmdResult> callback) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revertSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CommandResult> callback) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,479 @@
|
|||
package org.apache.cloudstack.storage.datastore.lifecycle;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.StringTokenizer;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.datastore.util.ElastistorUtil;
|
||||
import org.apache.cloudstack.storage.datastore.util.ElastistorUtil.CreateTsmCmdResponse;
|
||||
import org.apache.cloudstack.storage.datastore.util.ElastistorUtil.CreateVolumeCmdResponse;
|
||||
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.CreateStoragePoolCommand;
|
||||
import com.cloud.agent.api.DeleteStoragePoolCommand;
|
||||
import com.cloud.agent.api.StoragePoolInfo;
|
||||
import com.cloud.dc.dao.DataCenterDao;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.resource.ResourceManager;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
import com.cloud.storage.StorageManager;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.StoragePoolAutomation;
|
||||
import com.cloud.storage.StoragePoolHostVO;
|
||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
public class ElastistorPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
|
||||
private static final Logger s_logger = Logger.getLogger(ElastistorPrimaryDataStoreLifeCycle.class);
|
||||
|
||||
@Inject
|
||||
HostDao _hostDao;
|
||||
@Inject
|
||||
StoragePoolHostDao _storagePoolHostDao;
|
||||
@Inject
|
||||
protected ResourceManager _resourceMgr;
|
||||
@Inject
|
||||
PrimaryDataStoreDao primaryDataStoreDao;
|
||||
@Inject
|
||||
AgentManager agentMgr;
|
||||
@Inject
|
||||
StorageManager storageMgr;
|
||||
@Inject
|
||||
PrimaryDataStoreHelper dataStoreHelper;
|
||||
@Inject
|
||||
PrimaryDataStoreDao _storagePoolDao;
|
||||
@Inject
|
||||
PrimaryDataStoreHelper _dataStoreHelper;
|
||||
@Inject
|
||||
StoragePoolAutomation _storagePoolAutomation;
|
||||
@Inject
|
||||
StoragePoolDetailsDao _storagePoolDetailsDao;
|
||||
@Inject
|
||||
DataCenterDao _zoneDao;
|
||||
|
||||
@Override
|
||||
public DataStore initialize(Map<String, Object> dsInfos) {
|
||||
|
||||
String url = (String) dsInfos.get("url");
|
||||
Long zoneId = (Long) dsInfos.get("zoneId");
|
||||
Long podId = (Long) dsInfos.get("podId");
|
||||
Long clusterId = (Long) dsInfos.get("clusterId");
|
||||
String storagePoolName = (String) dsInfos.get("name");
|
||||
String providerName = (String) dsInfos.get("providerName");
|
||||
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
|
||||
Long capacityIops = (Long) dsInfos.get("capacityIops");
|
||||
String tags = (String) dsInfos.get("tags");
|
||||
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
|
||||
String storageIp = getStorageIp(url);
|
||||
int storagePort = getDefaultStoragePort(url);
|
||||
StoragePoolType storagetype = getStorageType(url);
|
||||
String accesspath = getAccessPath(url);
|
||||
String protocoltype = getProtocolType(url);
|
||||
String[] mp = accesspath.split("/");
|
||||
String mountpoint = mp[1];
|
||||
String uuid = null ;
|
||||
|
||||
/**
|
||||
* if the elastistor params which are required for plugin configuration
|
||||
* are not injected through spring-storage-volume-cloudbyte-context.xml, it can be set from details map.
|
||||
*/
|
||||
if(details.get("esaccountid") != null)
|
||||
ElastistorUtil.setElastistorAccountId(details.get("esaccountid"));
|
||||
if(details.get("esapikey") != null)
|
||||
ElastistorUtil.setElastistorApiKey(details.get("esapikey"));
|
||||
if(details.get("esdefaultgateway") != null)
|
||||
ElastistorUtil.setElastistorGateway(details.get("esdefaultgateway"));
|
||||
if(details.get("estntinterface") != null)
|
||||
ElastistorUtil.setElastistorInterface(details.get("estntinterface"));
|
||||
if(details.get("esmanagementip") != null)
|
||||
ElastistorUtil.setElastistorManagementIp(details.get("esmanagementip"));
|
||||
if(details.get("espoolid") != null)
|
||||
ElastistorUtil.setElastistorPoolId(details.get("espoolid"));
|
||||
if(details.get("essubnet") != null)
|
||||
ElastistorUtil.setElastistorSubnet(details.get("essubnet"));
|
||||
|
||||
if (capacityBytes == null || capacityBytes <= 0) {
|
||||
throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
|
||||
}
|
||||
|
||||
if (capacityIops == null || capacityIops <= 0) {
|
||||
throw new IllegalArgumentException("'capacityIops' must be present and greater than 0.");
|
||||
}
|
||||
|
||||
// elastistor does not allow same name and ip pools.
|
||||
List<StoragePoolVO> storagePoolVO = _storagePoolDao.listAll();
|
||||
for(StoragePoolVO poolVO : storagePoolVO){
|
||||
if (storagePoolName.equals(poolVO.getName())) {
|
||||
throw new IllegalArgumentException("storage pool with that name already exists in elastistor,please specify a unique name .");
|
||||
}
|
||||
if (storageIp.equals(poolVO.getHostAddress())) {
|
||||
throw new IllegalArgumentException("storage pool with that ip already exists in elastistor,please specify a unique ip .");
|
||||
}
|
||||
}
|
||||
|
||||
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
|
||||
|
||||
// creates the volume in elastistor
|
||||
parameters = createElastistorVolume(parameters, storagePoolName, storageIp, capacityBytes, capacityIops, protocoltype, mountpoint);
|
||||
|
||||
parameters.setHost(storageIp);
|
||||
parameters.setPort(storagePort);
|
||||
if(protocoltype.contentEquals("nfs")){
|
||||
parameters.setPath(accesspath);
|
||||
}
|
||||
parameters.setType(storagetype);
|
||||
parameters.setZoneId(zoneId);
|
||||
parameters.setPodId(podId);
|
||||
parameters.setName(storagePoolName);
|
||||
parameters.setProviderName(providerName);
|
||||
parameters.setManaged(false);
|
||||
parameters.setCapacityBytes(capacityBytes);
|
||||
parameters.setUsedBytes(0);
|
||||
parameters.setCapacityIops(capacityIops);
|
||||
parameters.setHypervisorType(HypervisorType.Any);
|
||||
parameters.setTags(tags);
|
||||
parameters.setDetails(details);
|
||||
parameters.setClusterId(clusterId);
|
||||
|
||||
return _dataStoreHelper.createPrimaryDataStore(parameters);
|
||||
}
|
||||
|
||||
private PrimaryDataStoreParameters createElastistorVolume(PrimaryDataStoreParameters parameters, String storagePoolName, String storageIp, Long capacityBytes, Long capacityIops, String protocoltype, String mountpoint){
|
||||
|
||||
s_logger.info("creation of elastistor volume started");
|
||||
try {
|
||||
|
||||
CreateTsmCmdResponse tsmCmdResponse = ElastistorUtil.createElastistorTsm(storagePoolName, storageIp, capacityBytes, capacityIops);
|
||||
|
||||
String uuid = tsmCmdResponse.getTsm().getUuid();
|
||||
parameters.setUuid(uuid);
|
||||
|
||||
CreateVolumeCmdResponse volumeCmdResponse = ElastistorUtil.createElastistorVolume(storagePoolName, tsmCmdResponse, capacityBytes, capacityIops, protocoltype ,mountpoint);
|
||||
|
||||
if(protocoltype.contentEquals("iscsi")){
|
||||
String accesspath = "/"+volumeCmdResponse.getFileSystem().getIqn()+"/0";
|
||||
parameters.setPath(accesspath);
|
||||
}
|
||||
s_logger.info("creation of elastistor volume complete");
|
||||
|
||||
return parameters;
|
||||
} catch (Throwable e) {
|
||||
throw new CloudRuntimeException("Failed to create volume in elastistor" + e.toString());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private String getAccessPath(String url) {
|
||||
StringTokenizer st = new StringTokenizer(url ,"/");
|
||||
int count = 0;
|
||||
while (st.hasMoreElements()) {
|
||||
if(count == 2)
|
||||
{ String s = "/" ;
|
||||
return s.concat(st.nextElement().toString());
|
||||
}
|
||||
st.nextElement();
|
||||
count++;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
private StoragePoolType getStorageType(String url) {
|
||||
|
||||
StringTokenizer st = new StringTokenizer(url ,":");
|
||||
|
||||
while (st.hasMoreElements())
|
||||
{
|
||||
String accessprotocol = st.nextElement().toString();
|
||||
|
||||
if(accessprotocol.contentEquals("nfs"))
|
||||
{
|
||||
return StoragePoolType.NetworkFilesystem;
|
||||
}
|
||||
else if(accessprotocol.contentEquals("iscsi"))
|
||||
{
|
||||
return StoragePoolType.IscsiLUN;
|
||||
}
|
||||
|
||||
else
|
||||
|
||||
break;
|
||||
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private String getProtocolType(String url) {
|
||||
StringTokenizer st = new StringTokenizer(url ,":");
|
||||
|
||||
while (st.hasMoreElements())
|
||||
{
|
||||
String accessprotocol = st.nextElement().toString();
|
||||
|
||||
if(accessprotocol.contentEquals("nfs")){
|
||||
return "nfs";
|
||||
}else if(accessprotocol.contentEquals("iscsi")){
|
||||
return "iscsi";
|
||||
} else
|
||||
break;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// this method parses the url and gets the default storage port based on access protocol
|
||||
private int getDefaultStoragePort(String url) {
|
||||
|
||||
StringTokenizer st = new StringTokenizer(url ,":");
|
||||
|
||||
while (st.hasMoreElements())
|
||||
{
|
||||
|
||||
String accessprotocol = st.nextElement().toString();
|
||||
|
||||
if(accessprotocol.contentEquals("nfs")){
|
||||
return 2049;
|
||||
}
|
||||
else if(accessprotocol.contentEquals("iscsi")){
|
||||
return 3260;
|
||||
}
|
||||
else
|
||||
break;
|
||||
|
||||
}
|
||||
return -1;
|
||||
|
||||
}
|
||||
|
||||
// parses the url and returns the storage volume ip
|
||||
private String getStorageIp(String url) {
|
||||
|
||||
StringTokenizer st = new StringTokenizer(url ,"/");
|
||||
int count = 0;
|
||||
|
||||
while (st.hasMoreElements()) {
|
||||
if(count == 1)
|
||||
return st.nextElement().toString();
|
||||
|
||||
st.nextElement();
|
||||
count++;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean attachCluster(DataStore store, ClusterScope scope) {
|
||||
|
||||
dataStoreHelper.attachCluster(store);
|
||||
|
||||
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store;
|
||||
// Check if there is host up in this cluster
|
||||
List<HostVO> allHosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, primarystore.getClusterId(),
|
||||
primarystore.getPodId(), primarystore.getDataCenterId());
|
||||
if (allHosts.isEmpty()) {
|
||||
primaryDataStoreDao.expunge(primarystore.getId());
|
||||
throw new CloudRuntimeException("No host up to associate a storage pool with in cluster "
|
||||
+ primarystore.getClusterId());
|
||||
}
|
||||
|
||||
|
||||
boolean success = false;
|
||||
for (HostVO h : allHosts) {
|
||||
success = createStoragePool(h.getId(), primarystore);
|
||||
if (success) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
s_logger.debug("In createPool Adding the pool to each of the hosts");
|
||||
List<HostVO> poolHosts = new ArrayList<HostVO>();
|
||||
for (HostVO h : allHosts) {
|
||||
try {
|
||||
storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId());
|
||||
poolHosts.add(h);
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e);
|
||||
}
|
||||
|
||||
if (poolHosts.isEmpty()) {
|
||||
s_logger.warn("No host can access storage pool " + primarystore + " on cluster "
|
||||
+ primarystore.getClusterId());
|
||||
primaryDataStoreDao.expunge(primarystore.getId());
|
||||
throw new CloudRuntimeException("Failed to access storage pool");
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private boolean createStoragePool(long hostId, StoragePool pool) {
|
||||
s_logger.debug("creating pool " + pool.getName() + " on host " + hostId);
|
||||
if (pool.getPoolType() != StoragePoolType.NetworkFilesystem && pool.getPoolType() != StoragePoolType.Filesystem
|
||||
&& pool.getPoolType() != StoragePoolType.IscsiLUN && pool.getPoolType() != StoragePoolType.Iscsi
|
||||
&& pool.getPoolType() != StoragePoolType.VMFS && pool.getPoolType() != StoragePoolType.SharedMountPoint
|
||||
&& pool.getPoolType() != StoragePoolType.PreSetup && pool.getPoolType() != StoragePoolType.OCFS2
|
||||
&& pool.getPoolType() != StoragePoolType.RBD && pool.getPoolType() != StoragePoolType.CLVM) {
|
||||
s_logger.warn(" Doesn't support storage pool type " + pool.getPoolType());
|
||||
return false;
|
||||
}
|
||||
CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool);
|
||||
final Answer answer = agentMgr.easySend(hostId, cmd);
|
||||
if (answer != null && answer.getResult()) {
|
||||
return true;
|
||||
} else {
|
||||
primaryDataStoreDao.expunge(pool.getId());
|
||||
String msg = "";
|
||||
if (answer != null) {
|
||||
msg = "Can not create storage pool through host " + hostId + " due to " + answer.getDetails();
|
||||
s_logger.warn(msg);
|
||||
} else {
|
||||
msg = "Can not create storage pool through host " + hostId + " due to CreateStoragePoolCommand returns null";
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
|
||||
_dataStoreHelper.attachHost(store, scope, existingInfo);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
|
||||
List<HostVO> hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId());
|
||||
s_logger.debug("In createPool. Attaching the pool to each of the hosts.");
|
||||
List<HostVO> poolHosts = new ArrayList<HostVO>();
|
||||
for (HostVO host : hosts) {
|
||||
try {
|
||||
storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId());
|
||||
poolHosts.add(host);
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
|
||||
}
|
||||
}
|
||||
if (poolHosts.isEmpty()) {
|
||||
s_logger.warn("No host can access storage pool " + dataStore + " in this zone.");
|
||||
primaryDataStoreDao.expunge(dataStore.getId());
|
||||
throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts.");
|
||||
}
|
||||
dataStoreHelper.attachZone(dataStore, hypervisorType);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean maintain(DataStore store) {
|
||||
_storagePoolAutomation.maintain(store);
|
||||
_dataStoreHelper.maintain(store);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancelMaintain(DataStore store) {
|
||||
_dataStoreHelper.cancelMaintain(store);
|
||||
_storagePoolAutomation.cancelMaintain(store);
|
||||
return true;
|
||||
}
|
||||
|
||||
@SuppressWarnings("finally")
|
||||
@Override
|
||||
public boolean deleteDataStore(DataStore store) {
|
||||
List<StoragePoolHostVO> hostPoolRecords = _storagePoolHostDao.listByPoolId(store.getId());
|
||||
StoragePool pool = (StoragePool) store;
|
||||
|
||||
// find the hypervisor where the storage is attached to.
|
||||
HypervisorType hType = null;
|
||||
if(hostPoolRecords.size() > 0 ){
|
||||
hType = getHypervisorType(hostPoolRecords.get(0).getHostId());
|
||||
}
|
||||
|
||||
// Remove the SR associated with the Xenserver
|
||||
for (StoragePoolHostVO host : hostPoolRecords) {
|
||||
DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand(pool);
|
||||
final Answer answer = agentMgr.easySend(host.getHostId(), deleteCmd);
|
||||
|
||||
if (answer != null && answer.getResult()) {
|
||||
// if host is KVM hypervisor then send deleteStoragepoolcmd to all the kvm hosts.
|
||||
if (HypervisorType.KVM != hType) {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (answer != null) {
|
||||
s_logger.error("Failed to delete storage pool: " + answer.getResult());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//delete the Elastistor volume at backend
|
||||
deleteElastistorVolume(pool);
|
||||
|
||||
return _dataStoreHelper.deletePrimaryDataStore(store);
|
||||
}
|
||||
|
||||
private void deleteElastistorVolume(StoragePool pool){
|
||||
|
||||
String poolip = pool.getHostAddress();
|
||||
String esip = null;
|
||||
String apikey = null;
|
||||
|
||||
// check if apikey and managentip is empty, if so getting it from stragepooldetails
|
||||
if(ElastistorUtil.s_esIPVAL == "" && ElastistorUtil.s_esAPIKEYVAL == ""){
|
||||
Map<String, String> detailsMap = _storagePoolDetailsDao.listDetailsKeyPairs(pool.getId());
|
||||
ElastistorUtil.setElastistorManagementIp(detailsMap.get("esmanagementip"));
|
||||
esip=ElastistorUtil.s_esIPVAL;
|
||||
ElastistorUtil.setElastistorApiKey(detailsMap.get("esapikey"));
|
||||
apikey = ElastistorUtil.s_esAPIKEYVAL;
|
||||
}else{
|
||||
esip = ElastistorUtil.s_esIPVAL;
|
||||
apikey = ElastistorUtil.s_esAPIKEYVAL;
|
||||
}
|
||||
|
||||
boolean status;
|
||||
try {
|
||||
status = ElastistorUtil.deleteElastistorVolume(poolip,esip,apikey);
|
||||
} catch (Throwable e) {
|
||||
throw new CloudRuntimeException("Failed to delete primary storage on elastistor" + e);
|
||||
}
|
||||
|
||||
if(status == true){
|
||||
s_logger.info("deletion of elastistor primary storage complete");
|
||||
}else{
|
||||
s_logger.error("deletion of elastistor volume failed");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private HypervisorType getHypervisorType(long hostId) {
|
||||
HostVO host = _hostDao.findById(hostId);
|
||||
if (host != null)
|
||||
return host.getHypervisorType();
|
||||
return HypervisorType.None;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean migrateToObjectStore(DataStore store) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
package org.apache.cloudstack.storage.datastore.provider;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.ModifyStoragePoolAnswer;
|
||||
import com.cloud.agent.api.ModifyStoragePoolCommand;
|
||||
import com.cloud.alert.AlertManager;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.StoragePoolHostVO;
|
||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
public class ElastistorHostListener implements HypervisorHostListener {
|
||||
private static final Logger s_logger = Logger.getLogger(DefaultHostListener.class);
|
||||
@Inject
|
||||
AgentManager agentMgr;
|
||||
@Inject
|
||||
DataStoreManager dataStoreMgr;
|
||||
@Inject
|
||||
AlertManager alertMgr;
|
||||
@Inject
|
||||
StoragePoolHostDao storagePoolHostDao;
|
||||
@Inject
|
||||
PrimaryDataStoreDao primaryStoreDao;
|
||||
|
||||
@Override
|
||||
public boolean hostConnect(long hostId, long poolId) {
|
||||
StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
|
||||
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool);
|
||||
final Answer answer = agentMgr.easySend(hostId, cmd);
|
||||
|
||||
if (answer == null) {
|
||||
throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command" + pool.getId());
|
||||
}
|
||||
|
||||
if (!answer.getResult()) {
|
||||
String msg = "Unable to attach storage pool" + poolId + " to the host" + hostId;
|
||||
|
||||
alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST,pool.getDataCenterId(), pool.getPodId(), msg, msg);
|
||||
|
||||
throw new CloudRuntimeException("Unable establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails() + pool.getId());
|
||||
}
|
||||
|
||||
assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + pool.getId() + "Host=" + hostId;
|
||||
|
||||
s_logger.info("Connection established between " + pool + " host + " + hostId);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hostDisconnected(long hostId, long poolId) {
|
||||
StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(
|
||||
poolId, hostId);
|
||||
|
||||
if (storagePoolHost != null) {
|
||||
storagePoolHostDao.deleteStoragePoolHostDetails(hostId, poolId);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,165 @@
|
|||
package org.apache.cloudstack.storage.datastore.provider;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.driver.ElastistorPrimaryDataStoreDriver;
|
||||
import org.apache.cloudstack.storage.datastore.lifecycle.ElastistorPrimaryDataStoreLifeCycle;
|
||||
import org.apache.cloudstack.storage.datastore.util.ElastistorUtil;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.alert.AlertManager;
|
||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||
import com.cloud.utils.component.ComponentContext;
|
||||
|
||||
/**
|
||||
* This is the starting point of the elastistor storage plugin. This bean will
|
||||
* be detected by Spring container & initialized. This will be one of the
|
||||
* providers available via {@link DataStoreProviderManagerImpl} object.
|
||||
*
|
||||
* @author amit.das@cloudbyte.com
|
||||
* @author punith.s@cloudbyte.com
|
||||
*/
|
||||
@Component
|
||||
public class ElastistorPrimaryDataStoreProvider implements PrimaryDataStoreProvider {
|
||||
|
||||
private static final Logger s_logger = Logger.getLogger(DefaultHostListener.class);
|
||||
|
||||
//these classes will be injected by spring
|
||||
private ElastistorPrimaryDataStoreLifeCycle lifecycle;
|
||||
private PrimaryDataStoreDriver driver;
|
||||
private HypervisorHostListener listener;
|
||||
|
||||
// these params will be initialized with respective values given in spring-storage-volume-cloudbyte-context.xml bean for the elastistor porpose only.
|
||||
private String esmanagementip;
|
||||
private String esapikey;
|
||||
private String esaccountid;
|
||||
private String espoolid;
|
||||
private String esdefaultgateway;
|
||||
private String essubnet;
|
||||
private String estntinterface;
|
||||
|
||||
@Inject
|
||||
AgentManager agentMgr;
|
||||
@Inject
|
||||
DataStoreManager dataStoreMgr;
|
||||
@Inject
|
||||
AlertManager alertMgr;
|
||||
@Inject
|
||||
StoragePoolHostDao storagePoolHostDao;
|
||||
@Inject
|
||||
PrimaryDataStoreDao primaryStoreDao;
|
||||
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return ElastistorUtil.ES_PROVIDER_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataStoreLifeCycle getDataStoreLifeCycle() {
|
||||
return lifecycle;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PrimaryDataStoreDriver getDataStoreDriver() {
|
||||
return driver;
|
||||
}
|
||||
|
||||
@Override
|
||||
public HypervisorHostListener getHostListener() {
|
||||
return listener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean configure(Map<String, Object> params) {
|
||||
|
||||
lifecycle = ComponentContext.inject(ElastistorPrimaryDataStoreLifeCycle.class);
|
||||
driver = ComponentContext.inject(ElastistorPrimaryDataStoreDriver.class);
|
||||
listener = ComponentContext.inject(ElastistorHostListener.class);
|
||||
|
||||
ElastistorUtil.setElastistorAccountId(esaccountid);
|
||||
ElastistorUtil.setElastistorApiKey(esapikey);
|
||||
ElastistorUtil.setElastistorManagementIp(esmanagementip);
|
||||
ElastistorUtil.setElastistorPoolId(espoolid);
|
||||
ElastistorUtil.setElastistorGateway(esdefaultgateway);
|
||||
ElastistorUtil.setElastistorInterface(estntinterface);
|
||||
ElastistorUtil.setElastistorSubnet(essubnet);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<DataStoreProviderType> getTypes() {
|
||||
Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
|
||||
|
||||
types.add(DataStoreProviderType.PRIMARY);
|
||||
|
||||
return types;
|
||||
}
|
||||
public String getEspoolid() {
|
||||
return espoolid;
|
||||
}
|
||||
|
||||
public void setEspoolid(String espoolid) {
|
||||
this.espoolid = espoolid;
|
||||
}
|
||||
|
||||
public String getEsmanagementip() {
|
||||
return esmanagementip;
|
||||
}
|
||||
|
||||
public void setEsmanagementip(String esmanagementip) {
|
||||
this.esmanagementip = esmanagementip;
|
||||
}
|
||||
|
||||
public String getEsaccountid() {
|
||||
return esaccountid;
|
||||
}
|
||||
|
||||
public void setEsaccountid(String esaccountid) {
|
||||
this.esaccountid = esaccountid;
|
||||
}
|
||||
|
||||
public String getEsapikey() {
|
||||
return esapikey;
|
||||
}
|
||||
|
||||
public void setEsapikey(String esapikey) {
|
||||
this.esapikey = esapikey;
|
||||
}
|
||||
|
||||
public String getesdefaultgateway() {
|
||||
return esdefaultgateway;
|
||||
}
|
||||
|
||||
public void setesdefaultgateway(String esdefaultgateway) {
|
||||
this.esdefaultgateway = esdefaultgateway;
|
||||
}
|
||||
public String getEssubnet() {
|
||||
return essubnet;
|
||||
}
|
||||
|
||||
public void setEssubnet(String essubnet) {
|
||||
this.essubnet = essubnet;
|
||||
}
|
||||
|
||||
public String getEstntinterface(){
|
||||
return estntinterface;
|
||||
}
|
||||
|
||||
public void setEstntinterface(String estntinterface){
|
||||
this.estntinterface = estntinterface;
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue