mirror of https://github.com/apache/cloudstack.git
testClient is now tracked under tools/marvin
This commit is contained in:
parent
ffec10d6ab
commit
4587299d02
|
|
@ -1,17 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<projectDescription>
|
||||
<name>testClient</name>
|
||||
<comment></comment>
|
||||
<projects>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
<buildCommand>
|
||||
<name>org.python.pydev.PyDevBuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>org.python.pydev.pythonNature</nature>
|
||||
</natures>
|
||||
</projectDescription>
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<?eclipse-pydev version="1.0"?>
|
||||
|
||||
<pydev_project>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
|
||||
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
|
||||
<path>/testClient/</path>
|
||||
</pydev_pathproperty>
|
||||
</pydev_project>
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
CloudStack Test Client
|
||||
|
||||
0. Generate API XML spec file
|
||||
ant build-apidocs, the output xml file is dist/commands.xml
|
||||
|
||||
1. generate Cloudstack API python code from an API XML spec file generated by step 0
|
||||
python codegenerator.py -o where-to-put-the-cloudstack-api -s where-the-spec-file
|
||||
|
||||
1a. If you will be running XML based tests, you will need to run them through the
|
||||
translator script. To do that execute translator.py -h for command line help.
|
||||
Example:
|
||||
python translator.py -i example.xml
|
||||
|
||||
this will create an example.xml.py script in the current directory. Run
|
||||
that to run the test.
|
||||
|
||||
|
||||
2. Facility it provides:
|
||||
1. very handy cloudstack API python wrapper
|
||||
2. support async job executing in parallel
|
||||
3. remote ssh login/execute command
|
||||
4. mysql query
|
||||
|
||||
3. sample code is under unitTest
|
||||
|
|
@ -1,74 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
try:
|
||||
import unittest2 as unittest
|
||||
except ImportError:
|
||||
import unittest
|
||||
|
||||
from functools import partial
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
|
||||
def testCaseLogger(message, logger=None):
|
||||
if logger is not None:
|
||||
logger.debug(message)
|
||||
|
||||
class TestCaseExecuteEngine(object):
|
||||
def __init__(self, testclient, testCaseFolder, testcaseLogFile=None, testResultLogFile=None):
|
||||
self.testclient = testclient
|
||||
self.testCaseFolder = testCaseFolder
|
||||
self.logformat = logging.Formatter("%(asctime)s - %(levelname)s - %(name)s - %(message)s")
|
||||
|
||||
if testcaseLogFile is not None:
|
||||
self.logfile = testcaseLogFile
|
||||
self.logger = logging.getLogger("TestCaseExecuteEngine")
|
||||
fh = logging.FileHandler(self.logfile)
|
||||
fh.setFormatter(self.logformat)
|
||||
self.logger.addHandler(fh)
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
if testResultLogFile is not None:
|
||||
ch = logging.StreamHandler()
|
||||
ch.setLevel(logging.ERROR)
|
||||
ch.setFormatter(self.logformat)
|
||||
self.logger.addHandler(ch)
|
||||
fp = open(testResultLogFile, "w")
|
||||
self.testResultLogFile = fp
|
||||
else:
|
||||
self.testResultLogFile = sys.stdout
|
||||
|
||||
def injectTestCase(self, testSuites):
|
||||
for test in testSuites:
|
||||
if isinstance(test, unittest.BaseTestSuite):
|
||||
self.injectTestCase(test)
|
||||
else:
|
||||
#logger bears the name of the test class
|
||||
testcaselogger = logging.getLogger("testclient.testcase.%s"%test.__class__.__name__)
|
||||
fh = logging.FileHandler(self.logfile)
|
||||
fh.setFormatter(self.logformat)
|
||||
testcaselogger.addHandler(fh)
|
||||
testcaselogger.setLevel(logging.DEBUG)
|
||||
|
||||
#inject testclient and logger into each unittest
|
||||
setattr(test, "testClient", self.testclient)
|
||||
setattr(test, "debug", partial(testCaseLogger, logger=testcaselogger))
|
||||
setattr(test.__class__, "clstestclient", self.testclient)
|
||||
if hasattr(test, "UserName"):
|
||||
self.testclient.createNewApiClient(test.UserName, test.DomainName, test.AcctType)
|
||||
|
||||
def run(self):
|
||||
loader = unittest.loader.TestLoader()
|
||||
suite = loader.discover(self.testCaseFolder)
|
||||
self.injectTestCase(suite)
|
||||
|
||||
unittest.TextTestRunner(stream=self.testResultLogFile, verbosity=2).run(suite)
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
|
@ -1,230 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import threading
|
||||
import cloudstackException
|
||||
import time
|
||||
import Queue
|
||||
import copy
|
||||
import sys
|
||||
import jsonHelper
|
||||
import datetime
|
||||
|
||||
class job(object):
|
||||
def __init__(self):
|
||||
self.id = None
|
||||
self.cmd = None
|
||||
class jobStatus(object):
|
||||
def __init__(self):
|
||||
self.result = None
|
||||
self.status = None
|
||||
self.startTime = None
|
||||
self.endTime = None
|
||||
self.duration = None
|
||||
self.jobId = None
|
||||
self.responsecls = None
|
||||
def __str__(self):
|
||||
return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v) in self.__dict__.iteritems()))
|
||||
class workThread(threading.Thread):
|
||||
def __init__(self, in_queue, outqueue, apiClient, db=None, lock=None):
|
||||
threading.Thread.__init__(self)
|
||||
self.inqueue = in_queue
|
||||
self.output = outqueue
|
||||
self.connection = apiClient.connection.__copy__()
|
||||
self.db = None
|
||||
self.lock = lock
|
||||
|
||||
def queryAsynJob(self, job):
|
||||
if job.jobId is None:
|
||||
return job
|
||||
|
||||
try:
|
||||
self.lock.acquire()
|
||||
result = self.connection.pollAsyncJob(job.jobId, job.responsecls).jobresult
|
||||
except cloudstackException.cloudstackAPIException, e:
|
||||
result = str(e)
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
job.result = result
|
||||
return job
|
||||
|
||||
def executeCmd(self, job):
|
||||
cmd = job.cmd
|
||||
|
||||
jobstatus = jobStatus()
|
||||
jobId = None
|
||||
try:
|
||||
self.lock.acquire()
|
||||
|
||||
if cmd.isAsync == "false":
|
||||
jobstatus.startTime = datetime.datetime.now()
|
||||
|
||||
result = self.connection.make_request(cmd)
|
||||
jobstatus.result = result
|
||||
jobstatus.endTime = datetime.datetime.now()
|
||||
jobstatus.duration = time.mktime(jobstatus.endTime.timetuple()) - time.mktime(jobstatus.startTime.timetuple())
|
||||
else:
|
||||
result = self.connection.make_request(cmd, None, True)
|
||||
if result is None:
|
||||
jobstatus.status = False
|
||||
else:
|
||||
jobId = result.jobid
|
||||
jobstatus.jobId = jobId
|
||||
try:
|
||||
responseName = cmd.__class__.__name__.replace("Cmd", "Response")
|
||||
jobstatus.responsecls = jsonHelper.getclassFromName(cmd, responseName)
|
||||
except:
|
||||
pass
|
||||
jobstatus.status = True
|
||||
except cloudstackException.cloudstackAPIException, e:
|
||||
jobstatus.result = str(e)
|
||||
jobstatus.status = False
|
||||
except:
|
||||
jobstatus.status = False
|
||||
jobstatus.result = sys.exc_info()
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
return jobstatus
|
||||
|
||||
def run(self):
|
||||
while self.inqueue.qsize() > 0:
|
||||
job = self.inqueue.get()
|
||||
if isinstance(job, jobStatus):
|
||||
jobstatus = self.queryAsynJob(job)
|
||||
else:
|
||||
jobstatus = self.executeCmd(job)
|
||||
|
||||
self.output.put(jobstatus)
|
||||
self.inqueue.task_done()
|
||||
|
||||
'''release the resource'''
|
||||
self.connection.close()
|
||||
|
||||
class jobThread(threading.Thread):
|
||||
def __init__(self, inqueue, interval):
|
||||
threading.Thread.__init__(self)
|
||||
self.inqueue = inqueue
|
||||
self.interval = interval
|
||||
def run(self):
|
||||
while self.inqueue.qsize() > 0:
|
||||
job = self.inqueue.get()
|
||||
try:
|
||||
job.run()
|
||||
'''release the api connection'''
|
||||
job.apiClient.connection.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
self.inqueue.task_done()
|
||||
time.sleep(self.interval)
|
||||
|
||||
class outputDict(object):
|
||||
def __init__(self):
|
||||
self.lock = threading.Condition()
|
||||
self.dict = {}
|
||||
|
||||
class asyncJobMgr(object):
|
||||
def __init__(self, apiClient, db):
|
||||
self.inqueue = Queue.Queue()
|
||||
self.output = outputDict()
|
||||
self.outqueue = Queue.Queue()
|
||||
self.apiClient = apiClient
|
||||
self.db = db
|
||||
|
||||
def submitCmds(self, cmds):
|
||||
if not self.inqueue.empty():
|
||||
return False
|
||||
id = 0
|
||||
ids = []
|
||||
for cmd in cmds:
|
||||
asyncjob = job()
|
||||
asyncjob.id = id
|
||||
asyncjob.cmd = cmd
|
||||
self.inqueue.put(asyncjob)
|
||||
id += 1
|
||||
ids.append(id)
|
||||
return ids
|
||||
|
||||
def updateTimeStamp(self, jobstatus):
|
||||
jobId = jobstatus.jobId
|
||||
if jobId is not None and self.db is not None:
|
||||
result = self.db.execute("select job_status, created, last_updated from async_job where id=%s"%jobId)
|
||||
if result is not None and len(result) > 0:
|
||||
if result[0][0] == 1:
|
||||
jobstatus.status = True
|
||||
else:
|
||||
jobstatus.status = False
|
||||
jobstatus.startTime = result[0][1]
|
||||
jobstatus.endTime = result[0][2]
|
||||
delta = jobstatus.endTime - jobstatus.startTime
|
||||
jobstatus.duration = delta.total_seconds()
|
||||
|
||||
def waitForComplete(self, workers=10):
|
||||
self.inqueue.join()
|
||||
lock = threading.Lock()
|
||||
resultQueue = Queue.Queue()
|
||||
'''intermediate result is stored in self.outqueue'''
|
||||
for i in range(workers):
|
||||
worker = workThread(self.outqueue, resultQueue, self.apiClient, self.db, lock)
|
||||
worker.start()
|
||||
|
||||
self.outqueue.join()
|
||||
|
||||
asyncJobResult = []
|
||||
while resultQueue.qsize() > 0:
|
||||
jobstatus = resultQueue.get()
|
||||
self.updateTimeStamp(jobstatus)
|
||||
asyncJobResult.append(jobstatus)
|
||||
|
||||
return asyncJobResult
|
||||
|
||||
'''put commands into a queue at first, then start workers numbers threads to execute this commands'''
|
||||
def submitCmdsAndWait(self, cmds, workers=10):
|
||||
self.submitCmds(cmds)
|
||||
lock = threading.Lock()
|
||||
for i in range(workers):
|
||||
worker = workThread(self.inqueue, self.outqueue, self.apiClient, self.db, lock)
|
||||
worker.start()
|
||||
|
||||
return self.waitForComplete(workers)
|
||||
|
||||
'''submit one job and execute the same job ntimes, with nums_threads of threads'''
|
||||
def submitJobExecuteNtimes(self, job, ntimes=1, nums_threads=1, interval=1):
|
||||
inqueue1 = Queue.Queue()
|
||||
lock = threading.Condition()
|
||||
for i in range(ntimes):
|
||||
newjob = copy.copy(job)
|
||||
setattr(newjob, "apiClient", copy.copy(self.apiClient))
|
||||
setattr(newjob, "lock", lock)
|
||||
inqueue1.put(newjob)
|
||||
|
||||
for i in range(nums_threads):
|
||||
work = jobThread(inqueue1, interval)
|
||||
work.start()
|
||||
inqueue1.join()
|
||||
|
||||
'''submit n jobs, execute them with nums_threads of threads'''
|
||||
def submitJobs(self, jobs, nums_threads=1, interval=1):
|
||||
inqueue1 = Queue.Queue()
|
||||
lock = threading.Condition()
|
||||
|
||||
for job in jobs:
|
||||
setattr(job, "apiClient", copy.copy(self.apiClient))
|
||||
setattr(job, "lock", lock)
|
||||
inqueue1.put(job)
|
||||
|
||||
for i in range(nums_threads):
|
||||
work = jobThread(inqueue1, interval)
|
||||
work.start()
|
||||
inqueue1.join()
|
||||
|
|
@ -1,60 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
||||
# wec
|
||||
export CATALINA_HOME=${HOME}/automated
|
||||
# macos tomcat
|
||||
#ORIG_TOMCAT=/usr/local/tomcat
|
||||
# linux/jenkins host tomcat
|
||||
ORIG_TOMCAT=/usr/share/tomcat6
|
||||
|
||||
mkdir_copy_files() {
|
||||
if [ -z "$1" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "Copying $1 files to $2..."
|
||||
|
||||
mkdir -p $2
|
||||
cp -R $1/* $2
|
||||
return $?
|
||||
}
|
||||
|
||||
if [ ! -d ${ORIG_TOMCAT} ]; then
|
||||
echo "Tomcat must be installed on this system"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -d ${CATALINA_HOME} ]; then
|
||||
echo "Existing test Tomcat exists!!!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# now let's copy over the required files...
|
||||
mkdir_copy_files ${ORIG_TOMCAT}/conf ${CATALINA_HOME}/conf
|
||||
mkdir_copy_files ${ORIG_TOMCAT}/bin ${CATALINA_HOME}/bin
|
||||
mkdir_copy_files ${ORIG_TOMCAT}/lib ${CATALINA_HOME}/lib
|
||||
mkdir_copy_files ${ORIG_TOMCAT}/logs ${CATALINA_HOME}/logs
|
||||
mkdir_copy_files ${ORIG_TOMCAT}/temp ${CATALINA_HOME}/temp
|
||||
mkdir_copy_files ${ORIG_TOMCAT}/webapps ${CATALINA_HOME}/webapps
|
||||
mkdir_copy_files ${ORIG_TOMCAT}/work ${CATALINA_HOME}/work
|
||||
|
||||
ant clean-all
|
||||
|
||||
ant automated-test-run
|
||||
|
||||
# clean up our temp tomcat!
|
||||
rm -rf ${CATALINA_HOME}
|
||||
|
||||
exit $?
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,174 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import urllib2
|
||||
import urllib
|
||||
import base64
|
||||
import hmac
|
||||
import hashlib
|
||||
import json
|
||||
import xml.dom.minidom
|
||||
import types
|
||||
import time
|
||||
import inspect
|
||||
import cloudstackException
|
||||
from cloudstackAPI import *
|
||||
import jsonHelper
|
||||
|
||||
class cloudConnection(object):
|
||||
def __init__(self, mgtSvr, port=8096, apiKey = None, securityKey = None, asyncTimeout=3600, logging=None):
|
||||
self.apiKey = apiKey
|
||||
self.securityKey = securityKey
|
||||
self.mgtSvr = mgtSvr
|
||||
self.port = port
|
||||
self.logging = logging
|
||||
if port == 8096:
|
||||
self.auth = False
|
||||
else:
|
||||
self.auth = True
|
||||
|
||||
self.retries = 5
|
||||
self.asyncTimeout = asyncTimeout
|
||||
|
||||
def close(self):
|
||||
try:
|
||||
self.connection.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
def __copy__(self):
|
||||
return cloudConnection(self.mgtSvr, self.port, self.apiKey, self.securityKey, self.asyncTimeout, self.logging)
|
||||
|
||||
def make_request_with_auth(self, command, requests={}):
|
||||
requests["command"] = command
|
||||
requests["apiKey"] = self.apiKey
|
||||
requests["response"] = "json"
|
||||
request = zip(requests.keys(), requests.values())
|
||||
request.sort(key=lambda x: str.lower(x[0]))
|
||||
|
||||
requestUrl = "&".join(["=".join([r[0], urllib.quote_plus(str(r[1]))]) for r in request])
|
||||
hashStr = "&".join(["=".join([str.lower(r[0]), str.lower(urllib.quote_plus(str(r[1]))).replace("+", "%20")]) for r in request])
|
||||
|
||||
sig = urllib.quote_plus(base64.encodestring(hmac.new(self.securityKey, hashStr, hashlib.sha1).digest()).strip())
|
||||
requestUrl += "&signature=%s"%sig
|
||||
|
||||
try:
|
||||
self.connection = urllib2.urlopen("http://%s:%d/client/api?%s"%(self.mgtSvr, self.port, requestUrl))
|
||||
self.logging.debug("sending GET request: %s"%requestUrl)
|
||||
response = self.connection.read()
|
||||
self.logging.info("got response: %s"%response)
|
||||
except IOError, e:
|
||||
if hasattr(e, 'reason'):
|
||||
self.logging.critical("failed to reach %s because of %s"%(self.mgtSvr, e.reason))
|
||||
elif hasattr(e, 'code'):
|
||||
self.logging.critical("server returned %d error code"%e.code)
|
||||
except HTTPException, h:
|
||||
self.logging.debug("encountered http Exception %s"%h.args)
|
||||
if self.retries > 0:
|
||||
self.retries = self.retries - 1
|
||||
self.make_request_with_auth(command, requests)
|
||||
else:
|
||||
self.retries = 5
|
||||
raise h
|
||||
else:
|
||||
return response
|
||||
|
||||
def make_request_without_auth(self, command, requests={}):
|
||||
requests["command"] = command
|
||||
requests["response"] = "json"
|
||||
requests = zip(requests.keys(), requests.values())
|
||||
requestUrl = "&".join(["=".join([request[0], urllib.quote_plus(str(request[1]))]) for request in requests])
|
||||
|
||||
self.connection = urllib2.urlopen("http://%s:%d/client/api?%s"%(self.mgtSvr, self.port, requestUrl))
|
||||
self.logging.debug("sending GET request without auth: %s"%requestUrl)
|
||||
response = self.connection.read()
|
||||
self.logging.info("got response: %s"%response)
|
||||
return response
|
||||
|
||||
def pollAsyncJob(self, jobId, response):
|
||||
cmd = queryAsyncJobResult.queryAsyncJobResultCmd()
|
||||
cmd.jobid = jobId
|
||||
timeout = self.asyncTimeout
|
||||
|
||||
while timeout > 0:
|
||||
asyncResonse = self.make_request(cmd, response, True)
|
||||
|
||||
if asyncResonse.jobstatus == 2:
|
||||
raise cloudstackException.cloudstackAPIException("asyncquery", asyncResonse.jobresult)
|
||||
elif asyncResonse.jobstatus == 1:
|
||||
return asyncResonse
|
||||
|
||||
time.sleep(5)
|
||||
self.logging.debug("job: %s still processing, will timeout in %ds"%(jobId, timeout))
|
||||
timeout = timeout - 5
|
||||
|
||||
raise cloudstackException.cloudstackAPIException("asyncquery", "Async job timeout %s"%jobId)
|
||||
|
||||
def make_request(self, cmd, response = None, raw=False):
|
||||
commandName = cmd.__class__.__name__.replace("Cmd", "")
|
||||
isAsync = "false"
|
||||
requests = {}
|
||||
required = []
|
||||
for attribute in dir(cmd):
|
||||
if attribute != "__doc__" and attribute != "__init__" and attribute != "__module__":
|
||||
if attribute == "isAsync":
|
||||
isAsync = getattr(cmd, attribute)
|
||||
elif attribute == "required":
|
||||
required = getattr(cmd, attribute)
|
||||
else:
|
||||
requests[attribute] = getattr(cmd, attribute)
|
||||
|
||||
for requiredPara in required:
|
||||
if requests[requiredPara] is None:
|
||||
raise cloudstackException.cloudstackAPIException(commandName, "%s is required"%requiredPara)
|
||||
'''remove none value'''
|
||||
for param, value in requests.items():
|
||||
if value is None:
|
||||
requests.pop(param)
|
||||
elif isinstance(value, list):
|
||||
if len(value) == 0:
|
||||
requests.pop(param)
|
||||
else:
|
||||
if not isinstance(value[0], dict):
|
||||
requests[param] = ",".join(value)
|
||||
else:
|
||||
requests.pop(param)
|
||||
i = 0
|
||||
for v in value:
|
||||
for key, val in v.iteritems():
|
||||
requests["%s[%d].%s"%(param,i,key)] = val
|
||||
i = i + 1
|
||||
|
||||
if self.logging is not None:
|
||||
self.logging.info("sending command: %s %s"%(commandName, str(requests)))
|
||||
result = None
|
||||
if self.auth:
|
||||
result = self.make_request_with_auth(commandName, requests)
|
||||
else:
|
||||
result = self.make_request_without_auth(commandName, requests)
|
||||
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
result = jsonHelper.getResultObj(result, response)
|
||||
if raw or isAsync == "false":
|
||||
return result
|
||||
else:
|
||||
asynJobId = result.jobid
|
||||
result = self.pollAsyncJob(asynJobId, response)
|
||||
return result.jobresult
|
||||
|
||||
if __name__ == '__main__':
|
||||
xml = '<?xml version="1.0" encoding="ISO-8859-1"?><deployVirtualMachineResponse><virtualmachine><id>407</id><name>i-1-407-RS3</name><displayname>i-1-407-RS3</displayname><account>system</account><domainid>1</domainid><domain>ROOT</domain><created>2011-07-30T14:45:19-0700</created><state>Running</state><haenable>false</haenable><zoneid>1</zoneid><zonename>CA1</zonename><hostid>3</hostid><hostname>kvm-50-205</hostname><templateid>4</templateid><templatename>CentOS 5.5(64-bit) no GUI (KVM)</templatename><templatedisplaytext>CentOS 5.5(64-bit) no GUI (KVM)</templatedisplaytext><passwordenabled>false</passwordenabled><serviceofferingid>1</serviceofferingid><serviceofferingname>Small Instance</serviceofferingname><cpunumber>1</cpunumber><cpuspeed>500</cpuspeed><memory>512</memory><guestosid>112</guestosid><rootdeviceid>0</rootdeviceid><rootdevicetype>NetworkFilesystem</rootdevicetype><nic><id>380</id><networkid>203</networkid><netmask>255.255.255.0</netmask><gateway>65.19.181.1</gateway><ipaddress>65.19.181.110</ipaddress><isolationuri>vlan://65</isolationuri><broadcasturi>vlan://65</broadcasturi><traffictype>Guest</traffictype><type>Direct</type><isdefault>true</isdefault><macaddress>06:52:da:00:00:08</macaddress></nic><hypervisor>KVM</hypervisor></virtualmachine></deployVirtualMachineResponse>'
|
||||
conn = cloudConnection(None)
|
||||
|
||||
print conn.paraseReturnXML(xml, deployVirtualMachine.deployVirtualMachineResponse())
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
||||
class cloudstackAPIException(Exception):
|
||||
def __init__(self, cmd = "", result = ""):
|
||||
self.errorMsg = "Execute cmd: %s failed, due to: %s"%(cmd, result)
|
||||
def __str__(self):
|
||||
return self.errorMsg
|
||||
|
||||
class InvalidParameterException(Exception):
|
||||
def __init__(self, msg=''):
|
||||
self.errorMsg = msg
|
||||
def __str__(self):
|
||||
return self.errorMsg
|
||||
|
||||
class dbException(Exception):
|
||||
def __init__(self, msg=''):
|
||||
self.errorMsg = msg
|
||||
def __str__(self):
|
||||
return self.errorMsg
|
||||
|
||||
class internalError(Exception):
|
||||
def __init__(self, msg=''):
|
||||
self.errorMsg = msg
|
||||
def __str__(self):
|
||||
return self.errorMsg
|
||||
|
|
@ -1,53 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
from cloudstackAPI import *
|
||||
try:
|
||||
import unittest2 as unittest
|
||||
except ImportError:
|
||||
import unittest
|
||||
import cloudstackTestClient
|
||||
|
||||
#class UserName(object):
|
||||
# def __init__(self, account, domain, type=0):
|
||||
# self.account = account
|
||||
# self.domain = domain
|
||||
# self.accounttype = type
|
||||
# def __call__(self, cls):
|
||||
# class Wrapped(cls):
|
||||
# cls.UserName = self.account
|
||||
# cls.DomainName = self.domain
|
||||
# cls.AcctType = self.accounttype
|
||||
# return Wrapped
|
||||
|
||||
def UserName(Name, DomainName, AcctType):
|
||||
def wrapper(cls):
|
||||
orig_init = cls.__init__
|
||||
def __init__(self, *args, **kws):
|
||||
cls.UserName = Name
|
||||
cls.DomainName = DomainName
|
||||
cls.AcctType = AcctType
|
||||
orig_init(self, *args, **kws)
|
||||
cls.__init__ = __init__
|
||||
return cls
|
||||
return wrapper
|
||||
|
||||
class cloudstackTestCase(unittest.case.TestCase):
|
||||
clstestclient = None
|
||||
|
||||
def __init__(self, args):
|
||||
unittest.case.TestCase.__init__(self, args)
|
||||
self.testClient = cloudstackTestClient.cloudstackTestClient()
|
||||
|
||||
@classmethod
|
||||
def getClsTestClient(cls):
|
||||
return cls.clstestclient
|
||||
|
|
@ -1,153 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import cloudstackConnection
|
||||
import asyncJobMgr
|
||||
import dbConnection
|
||||
from cloudstackAPI import *
|
||||
import random
|
||||
import string
|
||||
import hashlib
|
||||
|
||||
class cloudstackTestClient(object):
|
||||
def __init__(self, mgtSvr=None, port=8096, apiKey = None, securityKey = None, asyncTimeout=3600, defaultWorkerThreads=10, logging=None):
|
||||
self.connection = cloudstackConnection.cloudConnection(mgtSvr, port, apiKey, securityKey, asyncTimeout, logging)
|
||||
self.apiClient = cloudstackAPIClient.CloudStackAPIClient(self.connection)
|
||||
self.dbConnection = None
|
||||
self.asyncJobMgr = None
|
||||
self.ssh = None
|
||||
self.defaultWorkerThreads = defaultWorkerThreads
|
||||
|
||||
|
||||
def dbConfigure(self, host="localhost", port=3306, user='cloud', passwd='cloud', db='cloud'):
|
||||
self.dbConnection = dbConnection.dbConnection(host, port, user, passwd, db)
|
||||
|
||||
def isAdminContext(self):
|
||||
"""
|
||||
A user is a regular user if he fails to listDomains;
|
||||
if he is a domain-admin, he can list only domains that are non-ROOT;
|
||||
if he is an admin, he can list the ROOT domain successfully
|
||||
"""
|
||||
try:
|
||||
listdom = listDomains.listDomainsCmd()
|
||||
listdom.name = 'ROOT'
|
||||
listdomres = self.apiClient.listDomains(listdom)
|
||||
rootdom = listdomres[0].name
|
||||
if rootdom == 'ROOT':
|
||||
return 1 #admin
|
||||
else:
|
||||
return 2 #domain-admin
|
||||
except:
|
||||
return 0 #user
|
||||
|
||||
def random_gen(self, size=6, chars=string.ascii_uppercase + string.digits):
|
||||
"""Generate Random Strings of variable length"""
|
||||
return ''.join(random.choice(chars) for x in range(size))
|
||||
|
||||
def createNewApiClient(self, UserName, DomainName, acctType=0):
|
||||
if not self.isAdminContext():
|
||||
return self.apiClient
|
||||
|
||||
listDomain = listDomains.listDomainsCmd()
|
||||
listDomain.listall = True
|
||||
listDomain.name = DomainName
|
||||
try:
|
||||
domains = self.apiClient.listDomains(listDomain)
|
||||
domId = domains[0].id
|
||||
except:
|
||||
cdomain = createDomain.createDomainCmd()
|
||||
cdomain.name = DomainName
|
||||
domain = self.apiClient.createDomain(cdomain)
|
||||
domId = domain.id
|
||||
|
||||
mdf = hashlib.md5()
|
||||
mdf.update("password")
|
||||
mdf_pass = mdf.hexdigest()
|
||||
|
||||
cmd = listAccounts.listAccountsCmd()
|
||||
cmd.name = UserName
|
||||
cmd.domainid = domId
|
||||
try:
|
||||
accounts = self.apiClient.listAccounts(cmd)
|
||||
acctId = accounts[0].id
|
||||
except:
|
||||
createAcctCmd = createAccount.createAccountCmd()
|
||||
createAcctCmd.accounttype = acctType
|
||||
createAcctCmd.domainid = domId
|
||||
createAcctCmd.email = "test-" + self.random_gen() + "@citrix.com"
|
||||
createAcctCmd.firstname = UserName
|
||||
createAcctCmd.lastname = UserName
|
||||
createAcctCmd.password = mdf_pass
|
||||
createAcctCmd.username = UserName
|
||||
acct = self.apiClient.createAccount(createAcctCmd)
|
||||
acctId = acct.id
|
||||
|
||||
listuser = listUsers.listUsersCmd()
|
||||
listuser.username = UserName
|
||||
|
||||
listuserRes = self.apiClient.listUsers(listuser)
|
||||
userId = listuserRes[0].id
|
||||
apiKey = listuserRes[0].apikey
|
||||
securityKey = listuserRes[0].secretkey
|
||||
|
||||
if apiKey is None:
|
||||
registerUser = registerUserKeys.registerUserKeysCmd()
|
||||
registerUser.id = userId
|
||||
registerUserRes = self.apiClient.registerUserKeys(registerUser)
|
||||
apiKey = registerUserRes.apikey
|
||||
securityKey = registerUserRes.secretkey
|
||||
|
||||
nConnection = cloudstackConnection.cloudConnection(self.connection.mgtSvr, self.connection.port, apiKey, securityKey, self.connection.asyncTimeout, self.connection.logging)
|
||||
self.connection.close()
|
||||
self.connection = nConnection
|
||||
self.apiClient = cloudstackAPIClient.CloudStackAPIClient(self.connection)
|
||||
|
||||
def close(self):
|
||||
if self.connection is not None:
|
||||
self.connection.close()
|
||||
if self.dbConnection is not None:
|
||||
self.dbConnection.close()
|
||||
|
||||
def getDbConnection(self):
|
||||
return self.dbConnection
|
||||
|
||||
def executeSql(self, sql=None):
|
||||
if sql is None or self.dbConnection is None:
|
||||
return None
|
||||
|
||||
return self.dbConnection.execute()
|
||||
|
||||
def executeSqlFromFile(self, sqlFile=None):
|
||||
if sqlFile is None or self.dbConnection is None:
|
||||
return None
|
||||
return self.dbConnection.executeSqlFromFile(sqlFile)
|
||||
|
||||
def getApiClient(self):
|
||||
return self.apiClient
|
||||
|
||||
'''FixME, httplib has issue if more than one thread submitted'''
|
||||
def submitCmdsAndWait(self, cmds, workers=1):
|
||||
if self.asyncJobMgr is None:
|
||||
self.asyncJobMgr = asyncJobMgr.asyncJobMgr(self.apiClient, self.dbConnection)
|
||||
return self.asyncJobMgr.submitCmdsAndWait(cmds, workers)
|
||||
|
||||
'''submit one job and execute the same job ntimes, with nums_threads of threads'''
|
||||
def submitJob(self, job, ntimes=1, nums_threads=10, interval=1):
|
||||
if self.asyncJobMgr is None:
|
||||
self.asyncJobMgr = asyncJobMgr.asyncJobMgr(self.apiClient, self.dbConnection)
|
||||
self.asyncJobMgr.submitJobExecuteNtimes(job, ntimes, nums_threads, interval)
|
||||
|
||||
'''submit n jobs, execute them with nums_threads of threads'''
|
||||
def submitJobs(self, jobs, nums_threads=10, interval=1):
|
||||
if self.asyncJobMgr is None:
|
||||
self.asyncJobMgr = asyncJobMgr.asyncJobMgr(self.apiClient, self.dbConnection)
|
||||
self.asyncJobMgr.submitJobs(jobs, nums_threads, interval)
|
||||
|
|
@ -1,289 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import xml.dom.minidom
|
||||
from optparse import OptionParser
|
||||
import os
|
||||
import sys
|
||||
class cmdParameterProperty(object):
|
||||
def __init__(self):
|
||||
self.name = None
|
||||
self.required = False
|
||||
self.desc = ""
|
||||
self.type = "planObject"
|
||||
self.subProperties = []
|
||||
|
||||
class cloudStackCmd:
|
||||
def __init__(self):
|
||||
self.name = ""
|
||||
self.desc = ""
|
||||
self.async = "false"
|
||||
self.request = []
|
||||
self.response = []
|
||||
|
||||
class codeGenerator:
|
||||
space = " "
|
||||
|
||||
cmdsName = []
|
||||
|
||||
def __init__(self, outputFolder, apiSpecFile):
|
||||
self.cmd = None
|
||||
self.code = ""
|
||||
self.required = []
|
||||
self.subclass = []
|
||||
self.outputFolder = outputFolder
|
||||
self.apiSpecFile = apiSpecFile
|
||||
|
||||
def addAttribute(self, attr, pro):
|
||||
value = pro.value
|
||||
if pro.required:
|
||||
self.required.append(attr)
|
||||
desc = pro.desc
|
||||
if desc is not None:
|
||||
self.code += self.space
|
||||
self.code += "''' " + pro.desc + " '''"
|
||||
self.code += "\n"
|
||||
|
||||
self.code += self.space
|
||||
self.code += attr + " = " + str(value)
|
||||
self.code += "\n"
|
||||
|
||||
def generateSubClass(self, name, properties):
|
||||
'''generate code for sub list'''
|
||||
subclass = 'class %s:\n'%name
|
||||
subclass += self.space + "def __init__(self):\n"
|
||||
for pro in properties:
|
||||
if pro.desc is not None:
|
||||
subclass += self.space + self.space + '""""%s"""\n'%pro.desc
|
||||
if len (pro.subProperties) > 0:
|
||||
subclass += self.space + self.space + 'self.%s = []\n'%pro.name
|
||||
self.generateSubClass(pro.name, pro.subProperties)
|
||||
else:
|
||||
subclass += self.space + self.space + 'self.%s = None\n'%pro.name
|
||||
|
||||
self.subclass.append(subclass)
|
||||
def generate(self, cmd):
|
||||
|
||||
self.cmd = cmd
|
||||
self.cmdsName.append(self.cmd.name)
|
||||
self.code += "\n"
|
||||
self.code += '"""%s"""\n'%self.cmd.desc
|
||||
self.code += 'from baseCmd import *\n'
|
||||
self.code += 'from baseResponse import *\n'
|
||||
self.code += "class %sCmd (baseCmd):\n"%self.cmd.name
|
||||
self.code += self.space + "def __init__(self):\n"
|
||||
|
||||
self.code += self.space + self.space + 'self.isAsync = "%s"\n' %self.cmd.async
|
||||
|
||||
for req in self.cmd.request:
|
||||
if req.desc is not None:
|
||||
self.code += self.space + self.space + '"""%s"""\n'%req.desc
|
||||
if req.required == "true":
|
||||
self.code += self.space + self.space + '"""Required"""\n'
|
||||
|
||||
value = "None"
|
||||
if req.type == "list" or req.type == "map":
|
||||
value = "[]"
|
||||
|
||||
self.code += self.space + self.space + 'self.%s = %s\n'%(req.name,value)
|
||||
if req.required == "true":
|
||||
self.required.append(req.name)
|
||||
|
||||
self.code += self.space + self.space + "self.required = ["
|
||||
for require in self.required:
|
||||
self.code += '"' + require + '",'
|
||||
self.code += "]\n"
|
||||
self.required = []
|
||||
|
||||
|
||||
"""generate response code"""
|
||||
subItems = {}
|
||||
self.code += "\n"
|
||||
self.code += 'class %sResponse (baseResponse):\n'%self.cmd.name
|
||||
self.code += self.space + "def __init__(self):\n"
|
||||
if len(self.cmd.response) == 0:
|
||||
self.code += self.space + self.space + "pass"
|
||||
else:
|
||||
for res in self.cmd.response:
|
||||
if res.desc is not None:
|
||||
self.code += self.space + self.space + '"""%s"""\n'%res.desc
|
||||
|
||||
if len(res.subProperties) > 0:
|
||||
self.code += self.space + self.space + 'self.%s = []\n'%res.name
|
||||
self.generateSubClass(res.name, res.subProperties)
|
||||
else:
|
||||
self.code += self.space + self.space + 'self.%s = None\n'%res.name
|
||||
self.code += '\n'
|
||||
|
||||
for subclass in self.subclass:
|
||||
self.code += subclass + "\n"
|
||||
|
||||
fp = open(self.outputFolder + "/cloudstackAPI/%s.py"%self.cmd.name, "w")
|
||||
fp.write(self.code)
|
||||
fp.close()
|
||||
self.code = ""
|
||||
self.subclass = []
|
||||
|
||||
|
||||
def finalize(self):
|
||||
'''generate an api call'''
|
||||
|
||||
header = '"""Test Client for CloudStack API"""\n'
|
||||
imports = "import copy\n"
|
||||
initCmdsList = '__all__ = ['
|
||||
body = ''
|
||||
body += "class CloudStackAPIClient:\n"
|
||||
body += self.space + 'def __init__(self, connection):\n'
|
||||
body += self.space + self.space + 'self.connection = connection\n'
|
||||
body += "\n"
|
||||
|
||||
body += self.space + 'def __copy__(self):\n'
|
||||
body += self.space + self.space + 'return CloudStackAPIClient(copy.copy(self.connection))\n'
|
||||
body += "\n"
|
||||
|
||||
for cmdName in self.cmdsName:
|
||||
body += self.space + 'def %s(self,command):\n'%cmdName
|
||||
body += self.space + self.space + 'response = %sResponse()\n'%cmdName
|
||||
body += self.space + self.space + 'response = self.connection.make_request(command, response)\n'
|
||||
body += self.space + self.space + 'return response\n'
|
||||
body += '\n'
|
||||
|
||||
imports += 'from %s import %sResponse\n'%(cmdName, cmdName)
|
||||
initCmdsList += '"%s",'%cmdName
|
||||
|
||||
fp = open(self.outputFolder + '/cloudstackAPI/cloudstackAPIClient.py', 'w')
|
||||
for item in [header, imports, body]:
|
||||
fp.write(item)
|
||||
fp.close()
|
||||
|
||||
'''generate __init__.py'''
|
||||
initCmdsList += '"cloudstackAPIClient"]'
|
||||
fp = open(self.outputFolder + '/cloudstackAPI/__init__.py', 'w')
|
||||
fp.write(initCmdsList)
|
||||
fp.close()
|
||||
|
||||
fp = open(self.outputFolder + '/cloudstackAPI/baseCmd.py', 'w')
|
||||
basecmd = '"""Base Command"""\n'
|
||||
basecmd += 'class baseCmd:\n'
|
||||
basecmd += self.space + 'pass\n'
|
||||
fp.write(basecmd)
|
||||
fp.close()
|
||||
|
||||
fp = open(self.outputFolder + '/cloudstackAPI/baseResponse.py', 'w')
|
||||
basecmd = '"""Base class for response"""\n'
|
||||
basecmd += 'class baseResponse:\n'
|
||||
basecmd += self.space + 'pass\n'
|
||||
fp.write(basecmd)
|
||||
fp.close()
|
||||
|
||||
|
||||
def constructResponse(self, response):
|
||||
paramProperty = cmdParameterProperty()
|
||||
paramProperty.name = getText(response.getElementsByTagName('name'))
|
||||
paramProperty.desc = getText(response.getElementsByTagName('description'))
|
||||
if paramProperty.name.find('(*)') != -1:
|
||||
'''This is a list'''
|
||||
paramProperty.name = paramProperty.name.split('(*)')[0]
|
||||
for subresponse in response.getElementsByTagName('arguments')[0].getElementsByTagName('arg'):
|
||||
subProperty = self.constructResponse(subresponse)
|
||||
paramProperty.subProperties.append(subProperty)
|
||||
return paramProperty
|
||||
|
||||
def loadCmdFromXML(self):
|
||||
dom = xml.dom.minidom.parse(self.apiSpecFile)
|
||||
cmds = []
|
||||
for cmd in dom.getElementsByTagName("command"):
|
||||
csCmd = cloudStackCmd()
|
||||
csCmd.name = getText(cmd.getElementsByTagName('name'))
|
||||
assert csCmd.name
|
||||
|
||||
desc = getText(cmd.getElementsByTagName('description'))
|
||||
if desc:
|
||||
csCmd.desc = desc
|
||||
|
||||
async = getText(cmd.getElementsByTagName('isAsync'))
|
||||
if async:
|
||||
csCmd.async = async
|
||||
|
||||
for param in cmd.getElementsByTagName("request")[0].getElementsByTagName("arg"):
|
||||
paramProperty = cmdParameterProperty()
|
||||
|
||||
paramProperty.name = getText(param.getElementsByTagName('name'))
|
||||
assert paramProperty.name
|
||||
|
||||
required = param.getElementsByTagName('required')
|
||||
if required:
|
||||
paramProperty.required = getText(required)
|
||||
|
||||
requestDescription = param.getElementsByTagName('description')
|
||||
if requestDescription:
|
||||
paramProperty.desc = getText(requestDescription)
|
||||
|
||||
type = param.getElementsByTagName("type")
|
||||
if type:
|
||||
paramProperty.type = getText(type)
|
||||
|
||||
csCmd.request.append(paramProperty)
|
||||
|
||||
responseEle = cmd.getElementsByTagName("response")[0]
|
||||
for response in responseEle.getElementsByTagName("arg"):
|
||||
if response.parentNode != responseEle:
|
||||
continue
|
||||
|
||||
paramProperty = self.constructResponse(response)
|
||||
csCmd.response.append(paramProperty)
|
||||
|
||||
cmds.append(csCmd)
|
||||
return cmds
|
||||
|
||||
def generateCode(self):
|
||||
cmds = self.loadCmdFromXML()
|
||||
for cmd in cmds:
|
||||
self.generate(cmd)
|
||||
self.finalize()
|
||||
|
||||
def getText(elements):
|
||||
return elements[0].childNodes[0].nodeValue.strip()
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = OptionParser()
|
||||
|
||||
parser.add_option("-o", "--output", dest="output", help="the root path where code genereted, default is .")
|
||||
parser.add_option("-s", "--specfile", dest="spec", help="the path and name of the api spec xml file, default is /etc/cloud/cli/commands.xml")
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
apiSpecFile = "/etc/cloud/cli/commands.xml"
|
||||
if options.spec is not None:
|
||||
apiSpecFile = options.spec
|
||||
|
||||
if not os.path.exists(apiSpecFile):
|
||||
print "the spec file %s does not exists"%apiSpecFile
|
||||
print parser.print_help()
|
||||
exit(1)
|
||||
|
||||
|
||||
folder = "."
|
||||
if options.output is not None:
|
||||
folder = options.output
|
||||
apiModule=folder + "/cloudstackAPI"
|
||||
if not os.path.exists(apiModule):
|
||||
try:
|
||||
os.mkdir(apiModule)
|
||||
except:
|
||||
print "Failed to create folder %s, due to %s"%(apiModule,sys.exc_info())
|
||||
print parser.print_help()
|
||||
exit(2)
|
||||
|
||||
cg = codeGenerator(folder, apiSpecFile)
|
||||
cg.generateCode()
|
||||
|
||||
|
|
@ -1,396 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import json
|
||||
import os
|
||||
from optparse import OptionParser
|
||||
import jsonHelper
|
||||
|
||||
class managementServer():
|
||||
def __init__(self):
|
||||
self.mgtSvrIp = None
|
||||
self.port = 8096
|
||||
self.apiKey = None
|
||||
self.securityKey = None
|
||||
|
||||
class dbServer():
|
||||
def __init__(self):
|
||||
self.dbSvr = None
|
||||
self.port = 3306
|
||||
self.user = "cloud"
|
||||
self.passwd = "cloud"
|
||||
self.db = "cloud"
|
||||
|
||||
class configuration():
|
||||
def __init__(self):
|
||||
self.name = None
|
||||
self.value = None
|
||||
|
||||
class logger():
|
||||
def __init__(self):
|
||||
'''TestCase/TestClient'''
|
||||
self.name = None
|
||||
self.file = None
|
||||
|
||||
class cloudstackConfiguration():
|
||||
def __init__(self):
|
||||
self.zones = []
|
||||
self.mgtSvr = []
|
||||
self.dbSvr = None
|
||||
self.globalConfig = []
|
||||
self.logger = []
|
||||
|
||||
class zone():
|
||||
def __init__(self):
|
||||
self.dns1 = None
|
||||
self.internaldns1 = None
|
||||
self.name = None
|
||||
'''Basic or Advanced'''
|
||||
self.networktype = None
|
||||
self.dns2 = None
|
||||
self.internaldns2 = None
|
||||
self.securitygroupenabled = None
|
||||
''' Guest Vlan range - only advanced zone'''
|
||||
self.vlan = None
|
||||
'''default public network, in advanced mode'''
|
||||
self.ipranges = []
|
||||
self.networks = []
|
||||
self.pods = []
|
||||
self.secondaryStorages = []
|
||||
'''enable default virtual router provider'''
|
||||
vrouter = provider()
|
||||
vrouter.name = 'VirtualRouter'
|
||||
self.providers = [vrouter]
|
||||
|
||||
class provider():
|
||||
def __init__(self):
|
||||
self.name = None
|
||||
self.state = None
|
||||
self.broadcastdomainrange = 'ZONE'
|
||||
self.zoneid = None
|
||||
self.servicelist = []
|
||||
|
||||
class pod():
|
||||
def __init__(self):
|
||||
self.gateway = None
|
||||
self.name = None
|
||||
self.netmask = None
|
||||
self.startip = None
|
||||
self.endip = None
|
||||
self.zoneid = None
|
||||
self.clusters = []
|
||||
'''Used in basic network mode'''
|
||||
self.guestIpRanges = []
|
||||
|
||||
class cluster():
|
||||
def __init__(self):
|
||||
self.clustername = None
|
||||
self.clustertype = None
|
||||
self.hypervisor = None
|
||||
self.zoneid = None
|
||||
self.podid = None
|
||||
self.password = None
|
||||
self.url = None
|
||||
self.username = None
|
||||
self.hosts = []
|
||||
self.primaryStorages = []
|
||||
|
||||
class host():
|
||||
def __init__(self):
|
||||
self.hypervisor = None
|
||||
self.password = None
|
||||
self.url = None
|
||||
self.username = None
|
||||
self.zoneid = None
|
||||
self.podid = None
|
||||
self.clusterid = None
|
||||
self.clustername = None
|
||||
self.cpunumber = None
|
||||
self.cpuspeed = None
|
||||
self.hostmac = None
|
||||
self.hosttags = None
|
||||
self.memory = None
|
||||
|
||||
class network():
|
||||
def __init__(self):
|
||||
self.displaytext = None
|
||||
self.name = None
|
||||
self.zoneid = None
|
||||
self.acltype = None
|
||||
self.domainid = None
|
||||
self.networkdomain = None
|
||||
self.networkofferingid = None
|
||||
self.ipranges = []
|
||||
|
||||
class iprange():
|
||||
def __init__(self):
|
||||
'''tagged/untagged'''
|
||||
self.gateway = None
|
||||
self.netmask = None
|
||||
self.startip = None
|
||||
self.endip = None
|
||||
self.vlan = None
|
||||
'''for account specific '''
|
||||
self.account = None
|
||||
self.domain = None
|
||||
|
||||
class primaryStorage():
|
||||
def __init__(self):
|
||||
self.name = None
|
||||
self.url = None
|
||||
|
||||
class secondaryStorage():
|
||||
def __init__(self):
|
||||
self.url = None
|
||||
|
||||
'''sample code to generate setup configuration file'''
|
||||
def describe_setup_in_basic_mode():
|
||||
zs = cloudstackConfiguration()
|
||||
|
||||
for l in range(1):
|
||||
z = zone()
|
||||
z.dns1 = "8.8.8.8"
|
||||
z.dns2 = "4.4.4.4"
|
||||
z.internaldns1 = "192.168.110.254"
|
||||
z.internaldns2 = "192.168.110.253"
|
||||
z.name = "test"+str(l)
|
||||
z.networktype = 'Basic'
|
||||
|
||||
'''create 10 pods'''
|
||||
for i in range(2):
|
||||
p = pod()
|
||||
p.name = "test" +str(l) + str(i)
|
||||
p.gateway = "192.168.%d.1"%i
|
||||
p.netmask = "255.255.255.0"
|
||||
p.startip = "192.168.%d.150"%i
|
||||
p.endip = "192.168.%d.220"%i
|
||||
|
||||
'''add two pod guest ip ranges'''
|
||||
for j in range(2):
|
||||
ip = iprange()
|
||||
ip.gateway = p.gateway
|
||||
ip.netmask = p.netmask
|
||||
ip.startip = "192.168.%d.%d"%(i,j*20)
|
||||
ip.endip = "192.168.%d.%d"%(i,j*20+10)
|
||||
|
||||
p.guestIpRanges.append(ip)
|
||||
|
||||
'''add 10 clusters'''
|
||||
for j in range(2):
|
||||
c = cluster()
|
||||
c.clustername = "test"+str(l)+str(i) + str(j)
|
||||
c.clustertype = "CloudManaged"
|
||||
c.hypervisor = "Simulator"
|
||||
|
||||
'''add 10 hosts'''
|
||||
for k in range(2):
|
||||
h = host()
|
||||
h.username = "root"
|
||||
h.password = "password"
|
||||
memory = 8*1024*1024*1024
|
||||
localstorage=1*1024*1024*1024*1024
|
||||
#h.url = "http://Sim/%d%d%d%d/cpucore=1&cpuspeed=8000&memory=%d&localstorage=%d"%(l,i,j,k,memory,localstorage)
|
||||
h.url = "http://Sim/%d%d%d%d"%(l,i,j,k)
|
||||
c.hosts.append(h)
|
||||
|
||||
'''add 2 primary storages'''
|
||||
for m in range(2):
|
||||
primary = primaryStorage()
|
||||
size=1*1024*1024*1024*1024
|
||||
primary.name = "primary"+str(l) + str(i) + str(j) + str(m)
|
||||
#primary.url = "nfs://localhost/path%s/size=%d"%(str(l) + str(i) + str(j) + str(m), size)
|
||||
primary.url = "nfs://localhost/path%s"%(str(l) + str(i) + str(j) + str(m))
|
||||
c.primaryStorages.append(primary)
|
||||
|
||||
p.clusters.append(c)
|
||||
|
||||
z.pods.append(p)
|
||||
|
||||
'''add two secondary'''
|
||||
for i in range(5):
|
||||
secondary = secondaryStorage()
|
||||
secondary.url = "nfs://localhost/path"+str(l) + str(i)
|
||||
z.secondaryStorages.append(secondary)
|
||||
|
||||
zs.zones.append(z)
|
||||
|
||||
'''Add one mgt server'''
|
||||
mgt = managementServer()
|
||||
mgt.mgtSvrIp = "localhost"
|
||||
zs.mgtSvr.append(mgt)
|
||||
|
||||
'''Add a database'''
|
||||
db = dbServer()
|
||||
db.dbSvr = "localhost"
|
||||
|
||||
zs.dbSvr = db
|
||||
|
||||
'''add global configuration'''
|
||||
global_settings = {'expunge.delay': '60',
|
||||
'expunge.interval': '60',
|
||||
'expunge.workers': '3',
|
||||
}
|
||||
for k,v in global_settings.iteritems():
|
||||
cfg = configuration()
|
||||
cfg.name = k
|
||||
cfg.value = v
|
||||
zs.globalConfig.append(cfg)
|
||||
|
||||
''''add loggers'''
|
||||
testClientLogger = logger()
|
||||
testClientLogger.name = "TestClient"
|
||||
testClientLogger.file = "/tmp/testclient.log"
|
||||
|
||||
testCaseLogger = logger()
|
||||
testCaseLogger.name = "TestCase"
|
||||
testCaseLogger.file = "/tmp/testcase.log"
|
||||
|
||||
zs.logger.append(testClientLogger)
|
||||
zs.logger.append(testCaseLogger)
|
||||
|
||||
return zs
|
||||
|
||||
'''sample code to generate setup configuration file'''
|
||||
def describe_setup_in_advanced_mode():
|
||||
zs = cloudstackConfiguration()
|
||||
|
||||
for l in range(1):
|
||||
z = zone()
|
||||
z.dns1 = "8.8.8.8"
|
||||
z.dns2 = "4.4.4.4"
|
||||
z.internaldns1 = "192.168.110.254"
|
||||
z.internaldns2 = "192.168.110.253"
|
||||
z.name = "test"+str(l)
|
||||
z.networktype = 'Advanced'
|
||||
z.guestcidraddress = "10.1.1.0/24"
|
||||
z.vlan = "100-2000"
|
||||
|
||||
'''create 10 pods'''
|
||||
for i in range(2):
|
||||
p = pod()
|
||||
p.name = "test" +str(l) + str(i)
|
||||
p.gateway = "192.168.%d.1"%i
|
||||
p.netmask = "255.255.255.0"
|
||||
p.startip = "192.168.%d.200"%i
|
||||
p.endip = "192.168.%d.220"%i
|
||||
|
||||
'''add 10 clusters'''
|
||||
for j in range(2):
|
||||
c = cluster()
|
||||
c.clustername = "test"+str(l)+str(i) + str(j)
|
||||
c.clustertype = "CloudManaged"
|
||||
c.hypervisor = "Simulator"
|
||||
|
||||
'''add 10 hosts'''
|
||||
for k in range(2):
|
||||
h = host()
|
||||
h.username = "root"
|
||||
h.password = "password"
|
||||
memory = 8*1024*1024*1024
|
||||
localstorage=1*1024*1024*1024*1024
|
||||
#h.url = "http://Sim/%d%d%d%d/cpucore=1&cpuspeed=8000&memory=%d&localstorage=%d"%(l,i,j,k,memory,localstorage)
|
||||
h.url = "http://Sim/%d%d%d%d"%(l,i,j,k)
|
||||
c.hosts.append(h)
|
||||
|
||||
'''add 2 primary storages'''
|
||||
for m in range(2):
|
||||
primary = primaryStorage()
|
||||
size=1*1024*1024*1024*1024
|
||||
primary.name = "primary"+str(l) + str(i) + str(j) + str(m)
|
||||
#primary.url = "nfs://localhost/path%s/size=%d"%(str(l) + str(i) + str(j) + str(m), size)
|
||||
primary.url = "nfs://localhost/path%s"%(str(l) + str(i) + str(j) + str(m))
|
||||
c.primaryStorages.append(primary)
|
||||
|
||||
p.clusters.append(c)
|
||||
|
||||
z.pods.append(p)
|
||||
|
||||
'''add two secondary'''
|
||||
for i in range(5):
|
||||
secondary = secondaryStorage()
|
||||
secondary.url = "nfs://localhost/path"+str(l) + str(i)
|
||||
z.secondaryStorages.append(secondary)
|
||||
|
||||
'''add default public network'''
|
||||
ips = iprange()
|
||||
ips.vlan = "26"
|
||||
ips.startip = "172.16.26.2"
|
||||
ips.endip = "172.16.26.100"
|
||||
ips.gateway = "172.16.26.1"
|
||||
ips.netmask = "255.255.255.0"
|
||||
z.ipranges.append(ips)
|
||||
|
||||
|
||||
zs.zones.append(z)
|
||||
|
||||
'''Add one mgt server'''
|
||||
mgt = managementServer()
|
||||
mgt.mgtSvrIp = "localhost"
|
||||
zs.mgtSvr.append(mgt)
|
||||
|
||||
'''Add a database'''
|
||||
db = dbServer()
|
||||
db.dbSvr = "localhost"
|
||||
|
||||
zs.dbSvr = db
|
||||
|
||||
'''add global configuration'''
|
||||
global_settings = {'expunge.delay': '60',
|
||||
'expunge.interval': '60',
|
||||
'expunge.workers': '3',
|
||||
}
|
||||
for k,v in global_settings.iteritems():
|
||||
cfg = configuration()
|
||||
cfg.name = k
|
||||
cfg.value = v
|
||||
zs.globalConfig.append(cfg)
|
||||
|
||||
''''add loggers'''
|
||||
testClientLogger = logger()
|
||||
testClientLogger.name = "TestClient"
|
||||
testClientLogger.file = "/tmp/testclient.log"
|
||||
|
||||
testCaseLogger = logger()
|
||||
testCaseLogger.name = "TestCase"
|
||||
testCaseLogger.file = "/tmp/testcase.log"
|
||||
|
||||
zs.logger.append(testClientLogger)
|
||||
zs.logger.append(testCaseLogger)
|
||||
|
||||
return zs
|
||||
|
||||
def generate_setup_config(config, file=None):
|
||||
describe = config
|
||||
if file is None:
|
||||
return json.dumps(jsonHelper.jsonDump.dump(describe))
|
||||
else:
|
||||
fp = open(file, 'w')
|
||||
json.dump(jsonHelper.jsonDump.dump(describe), fp, indent=4)
|
||||
fp.close()
|
||||
|
||||
|
||||
def get_setup_config(file):
|
||||
if not os.path.exists(file):
|
||||
return None
|
||||
config = cloudstackConfiguration()
|
||||
fp = open(file, 'r')
|
||||
config = json.load(fp)
|
||||
return jsonHelper.jsonLoader(config)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = OptionParser()
|
||||
|
||||
parser.add_option("-o", "--output", action="store", default="./datacenterCfg", dest="output", help="the path where the json config file generated, by default is ./datacenterCfg")
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
config = describe_setup_in_basic_mode()
|
||||
generate_setup_config(config, options.output)
|
||||
|
|
@ -1,92 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import pymysql
|
||||
import cloudstackException
|
||||
import sys
|
||||
import os
|
||||
import traceback
|
||||
class dbConnection(object):
|
||||
def __init__(self, host="localhost", port=3306, user='cloud', passwd='cloud', db='cloud'):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.user = user
|
||||
self.passwd = passwd
|
||||
self.database = db
|
||||
|
||||
try:
|
||||
self.db = pymysql.Connect(host=host, port=port, user=user, passwd=passwd, db=db)
|
||||
except:
|
||||
traceback.print_exc()
|
||||
raise cloudstackException.InvalidParameterException(sys.exc_info())
|
||||
|
||||
def __copy__(self):
|
||||
return dbConnection(self.host, self.port, self.user, self.passwd, self.database)
|
||||
|
||||
def close(self):
|
||||
try:
|
||||
self.db.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
def execute(self, sql=None):
|
||||
if sql is None:
|
||||
return None
|
||||
|
||||
resultRow = []
|
||||
cursor = None
|
||||
try:
|
||||
# commit to restart the transaction, else we don't get fresh data
|
||||
self.db.commit()
|
||||
cursor = self.db.cursor()
|
||||
cursor.execute(sql)
|
||||
|
||||
result = cursor.fetchall()
|
||||
if result is not None:
|
||||
for r in result:
|
||||
resultRow.append(r)
|
||||
return resultRow
|
||||
except pymysql.MySQLError, e:
|
||||
raise cloudstackException.dbException("db Exception:%s"%e)
|
||||
except:
|
||||
raise cloudstackException.internalError(sys.exc_info())
|
||||
finally:
|
||||
if cursor is not None:
|
||||
cursor.close()
|
||||
|
||||
def executeSqlFromFile(self, fileName=None):
|
||||
if fileName is None:
|
||||
raise cloudstackException.InvalidParameterException("file can't not none")
|
||||
|
||||
if not os.path.exists(fileName):
|
||||
raise cloudstackException.InvalidParameterException("%s not exists"%fileName)
|
||||
|
||||
sqls = open(fileName, "r").read()
|
||||
return self.execute(sqls)
|
||||
|
||||
if __name__ == "__main__":
|
||||
db = dbConnection()
|
||||
'''
|
||||
try:
|
||||
|
||||
result = db.executeSqlFromFile("/tmp/server-setup.sql")
|
||||
if result is not None:
|
||||
for r in result:
|
||||
print r[0], r[1]
|
||||
except cloudstackException.dbException, e:
|
||||
print e
|
||||
'''
|
||||
print db.execute("update vm_template set name='fjkd' where id=200")
|
||||
for i in range(10):
|
||||
result = db.execute("select job_status, created, last_updated from async_job where id=%d"%i)
|
||||
print result
|
||||
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import deployDataCenter
|
||||
import TestCaseExecuteEngine
|
||||
from optparse import OptionParser
|
||||
import os
|
||||
if __name__ == "__main__":
|
||||
parser = OptionParser()
|
||||
|
||||
parser.add_option("-c", "--config", action="store", default="./datacenterCfg", dest="config", help="the path where the json config file generated, by default is ./datacenterCfg")
|
||||
parser.add_option("-d", "--directory", dest="testCaseFolder", help="the test case directory")
|
||||
parser.add_option("-r", "--result", dest="result", help="test result log file")
|
||||
parser.add_option("-t", dest="testcaselog", help="test case log file")
|
||||
parser.add_option("-l", "--load", dest="load", action="store_true", help="only load config, do not deploy, it will only run testcase")
|
||||
(options, args) = parser.parse_args()
|
||||
if options.testCaseFolder is None:
|
||||
parser.print_usage()
|
||||
exit(1)
|
||||
|
||||
testResultLogFile = None
|
||||
if options.result is not None:
|
||||
testResultLogFile = options.result
|
||||
|
||||
testCaseLogFile = None
|
||||
if options.testcaselog is not None:
|
||||
testCaseLogFile = options.testcaselog
|
||||
deploy = deployDataCenter.deployDataCenters(options.config)
|
||||
if options.load:
|
||||
deploy.loadCfg()
|
||||
else:
|
||||
deploy.deploy()
|
||||
|
||||
testcaseEngine = TestCaseExecuteEngine.TestCaseExecuteEngine(deploy.testClient, options.testCaseFolder, testCaseLogFile, testResultLogFile)
|
||||
testcaseEngine.run()
|
||||
|
|
@ -1,395 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
"""Deploy datacenters according to a json configuration file"""
|
||||
import configGenerator
|
||||
import cloudstackException
|
||||
import cloudstackTestClient
|
||||
import sys
|
||||
import logging
|
||||
from cloudstackAPI import *
|
||||
from optparse import OptionParser
|
||||
|
||||
class deployDataCenters():
|
||||
|
||||
def __init__(self, cfgFile):
|
||||
self.configFile = cfgFile
|
||||
|
||||
def addHosts(self, hosts, zoneId, podId, clusterId, hypervisor):
|
||||
if hosts is None:
|
||||
return
|
||||
for host in hosts:
|
||||
hostcmd = addHost.addHostCmd()
|
||||
hostcmd.clusterid = clusterId
|
||||
hostcmd.cpunumber = host.cpunumer
|
||||
hostcmd.cpuspeed = host.cpuspeed
|
||||
hostcmd.hostmac = host.hostmac
|
||||
hostcmd.hosttags = host.hosttags
|
||||
hostcmd.hypervisor = host.hypervisor
|
||||
hostcmd.memory = host.memory
|
||||
hostcmd.password = host.password
|
||||
hostcmd.podid = podId
|
||||
hostcmd.url = host.url
|
||||
hostcmd.username = host.username
|
||||
hostcmd.zoneid = zoneId
|
||||
hostcmd.hypervisor = hypervisor
|
||||
self.apiClient.addHost(hostcmd)
|
||||
|
||||
def createClusters(self, clusters, zoneId, podId):
|
||||
if clusters is None:
|
||||
return
|
||||
|
||||
for cluster in clusters:
|
||||
clustercmd = addCluster.addClusterCmd()
|
||||
clustercmd.clustername = cluster.clustername
|
||||
clustercmd.clustertype = cluster.clustertype
|
||||
clustercmd.hypervisor = cluster.hypervisor
|
||||
clustercmd.password = cluster.password
|
||||
clustercmd.podid = podId
|
||||
clustercmd.url = cluster.url
|
||||
clustercmd.username = cluster.username
|
||||
clustercmd.zoneid = zoneId
|
||||
clusterresponse = self.apiClient.addCluster(clustercmd)
|
||||
clusterId = clusterresponse[0].id
|
||||
|
||||
self.addHosts(cluster.hosts, zoneId, podId, clusterId,\
|
||||
cluster.hypervisor)
|
||||
self.createPrimaryStorages(cluster.primaryStorages, zoneId, podId,\
|
||||
clusterId)
|
||||
|
||||
def createPrimaryStorages(self, primaryStorages, zoneId, podId, clusterId):
|
||||
if primaryStorages is None:
|
||||
return
|
||||
for primary in primaryStorages:
|
||||
primarycmd = createStoragePool.createStoragePoolCmd()
|
||||
primarycmd.details = primary.details
|
||||
primarycmd.name = primary.name
|
||||
primarycmd.podid = podId
|
||||
primarycmd.tags = primary.tags
|
||||
primarycmd.url = primary.url
|
||||
primarycmd.zoneid = zoneId
|
||||
primarycmd.clusterid = clusterId
|
||||
self.apiClient.createStoragePool(primarycmd)
|
||||
|
||||
def createpods(self, pods, zone, zoneId):
|
||||
if pods is None:
|
||||
return
|
||||
for pod in pods:
|
||||
createpod = createPod.createPodCmd()
|
||||
createpod.name = pod.name
|
||||
createpod.gateway = pod.gateway
|
||||
createpod.netmask = pod.netmask
|
||||
createpod.startip = pod.startip
|
||||
createpod.endip = pod.endip
|
||||
createpod.zoneid = zoneId
|
||||
createpodResponse = self.apiClient.createPod(createpod)
|
||||
podId = createpodResponse.id
|
||||
|
||||
if pod.guestIpRanges is not None:
|
||||
self.createVlanIpRanges("Basic", pod.guestIpRanges, zoneId,\
|
||||
podId)
|
||||
|
||||
self.createClusters(pod.clusters, zoneId, podId)
|
||||
|
||||
def createVlanIpRanges(self, mode, ipranges, zoneId, podId=None,\
|
||||
networkId=None):
|
||||
if ipranges is None:
|
||||
return
|
||||
for iprange in ipranges:
|
||||
vlanipcmd = createVlanIpRange.createVlanIpRangeCmd()
|
||||
vlanipcmd.account = iprange.account
|
||||
vlanipcmd.domainid = iprange.domainid
|
||||
vlanipcmd.endip = iprange.endip
|
||||
vlanipcmd.gateway = iprange.gateway
|
||||
vlanipcmd.netmask = iprange.netmask
|
||||
vlanipcmd.networkid = networkId
|
||||
vlanipcmd.podid = podId
|
||||
vlanipcmd.startip = iprange.startip
|
||||
vlanipcmd.zoneid = zoneId
|
||||
vlanipcmd.vlan = iprange.vlan
|
||||
if mode == "Basic":
|
||||
vlanipcmd.forvirtualnetwork = "false"
|
||||
else:
|
||||
vlanipcmd.forvirtualnetwork = "true"
|
||||
|
||||
self.apiClient.createVlanIpRange(vlanipcmd)
|
||||
|
||||
def createSecondaryStorages(self, secondaryStorages, zoneId):
|
||||
if secondaryStorages is None:
|
||||
return
|
||||
for secondary in secondaryStorages:
|
||||
secondarycmd = addSecondaryStorage.addSecondaryStorageCmd()
|
||||
secondarycmd.url = secondary.url
|
||||
secondarycmd.zoneid = zoneId
|
||||
self.apiClient.addSecondaryStorage(secondarycmd)
|
||||
|
||||
def createnetworks(self, networks, zoneId, mode):
|
||||
if networks is None:
|
||||
return
|
||||
for network in networks:
|
||||
networkcmd = createNetwork.createNetworkCmd()
|
||||
networkcmd.displaytext = network.displaytext
|
||||
networkcmd.name = network.name
|
||||
networkcmd.networkofferingid = network.networkofferingid
|
||||
networkcmd.zoneid = zoneId
|
||||
|
||||
ipranges = network.ipranges
|
||||
if ipranges:
|
||||
iprange = ipranges.pop()
|
||||
networkcmd.startip = iprange.startip
|
||||
networkcmd.endip = iprange.endip
|
||||
networkcmd.gateway = iprange.gateway
|
||||
networkcmd.netmask = iprange.netmask
|
||||
|
||||
networkcmdresponse = self.apiClient.createNetwork(networkcmd)
|
||||
networkId = networkcmdresponse.id
|
||||
|
||||
self.createVlanIpRanges(mode, ipranges, zoneId, networkId)
|
||||
|
||||
def createPhysicalNetwork(self, name, zoneid, vlan=None):
|
||||
phynet = createPhysicalNetwork.createPhysicalNetworkCmd()
|
||||
phynet.zoneid = zoneid
|
||||
phynet.name = name
|
||||
if vlan:
|
||||
phynet.vlan = vlan
|
||||
return self.apiClient.createPhysicalNetwork(phynet)
|
||||
|
||||
def updatePhysicalNetwork(self, networkid, state="Enabled", vlan=None):
|
||||
upnet = updatePhysicalNetwork.updatePhysicalNetworkCmd()
|
||||
upnet.id = networkid
|
||||
upnet.state = state
|
||||
if vlan:
|
||||
upnet.vlan = vlan
|
||||
return self.apiClient.updatePhysicalNetwork(upnet)
|
||||
|
||||
def configureProviders(self, phynetwrk, zone):
|
||||
pnetprov = listNetworkServiceProviders.listNetworkServiceProvidersCmd()
|
||||
pnetprov.physicalnetworkid = phynetwrk.id
|
||||
pnetprov.state = "Disabled"
|
||||
pnetprov.name = "VirtualRouter"
|
||||
pnetprovres = self.apiClient.listNetworkServiceProviders(pnetprov)
|
||||
|
||||
vrprov = listVirtualRouterElements.listVirtualRouterElementsCmd()
|
||||
vrprov.nspid = pnetprovres[0].id
|
||||
vrprovresponse = self.apiClient.listVirtualRouterElements(vrprov)
|
||||
vrprovid = vrprovresponse[0].id
|
||||
|
||||
vrconfig = \
|
||||
configureVirtualRouterElement.configureVirtualRouterElementCmd()
|
||||
vrconfig.enabled = "true"
|
||||
vrconfig.id = vrprovid
|
||||
vrconfigresponse = \
|
||||
self.apiClient.configureVirtualRouterElement(vrconfig)
|
||||
|
||||
if zone.networktype == "Basic" and zone.securitygroupenabled:
|
||||
sgprovider = configGenerator.provider()
|
||||
sgprovider.name = "SecurityGroupProvider"
|
||||
zone.providers.append(sgprovider)
|
||||
|
||||
for prov in zone.providers:
|
||||
pnetprov = \
|
||||
listNetworkServiceProviders.listNetworkServiceProvidersCmd()
|
||||
pnetprov.physicalnetworkid = phynetwrk.id
|
||||
pnetprov.name = prov.name
|
||||
pnetprov.state = "Disabled"
|
||||
pnetprovs = self.apiClient.listNetworkServiceProviders(pnetprov)
|
||||
|
||||
upnetprov = \
|
||||
updateNetworkServiceProvider.updateNetworkServiceProviderCmd()
|
||||
upnetprov.id = pnetprovs[0].id
|
||||
upnetprov.state = "Enabled"
|
||||
upnetprovresponse = \
|
||||
self.apiClient.updateNetworkServiceProvider(upnetprov)
|
||||
|
||||
def addTrafficTypes(self, physical_network_id, traffictypes=None, \
|
||||
network_labels=None):
|
||||
[self.addTrafficType(physical_network_id, traffictype) for \
|
||||
traffictype in traffictypes]
|
||||
|
||||
def addTrafficType(self, physical_network_id, traffictype, \
|
||||
network_label=None):
|
||||
traffic_type = addTrafficType.addTrafficTypeCmd()
|
||||
traffic_type.physicalnetworkid = physical_network_id
|
||||
traffic_type.traffictype = traffictype
|
||||
return self.apiClient.addTrafficType(traffic_type)
|
||||
|
||||
def enableZone(self, zoneid, allocation_state="Enabled"):
|
||||
zoneCmd = updateZone.updateZoneCmd()
|
||||
zoneCmd.id = zoneid
|
||||
zoneCmd.allocationstate = allocation_state
|
||||
return self.apiClient.updateZone(zoneCmd)
|
||||
|
||||
def createZones(self, zones):
|
||||
for zone in zones:
|
||||
createzone = createZone.createZoneCmd()
|
||||
createzone.dns1 = zone.dns1
|
||||
createzone.dns2 = zone.dns2
|
||||
createzone.internaldns1 = zone.internaldns1
|
||||
createzone.internaldns2 = zone.internaldns2
|
||||
createzone.name = zone.name
|
||||
createzone.securitygroupenabled = zone.securitygroupenabled
|
||||
createzone.networktype = zone.networktype
|
||||
createzone.guestcidraddress = zone.guestcidraddress
|
||||
|
||||
zoneresponse = self.apiClient.createZone(createzone)
|
||||
zoneId = zoneresponse.id
|
||||
|
||||
phynetwrk = self.createPhysicalNetwork(zone.name + "-pnet", \
|
||||
zoneId)
|
||||
|
||||
self.addTrafficTypes(phynetwrk.id, ["Guest", "Public", \
|
||||
"Management"])
|
||||
|
||||
self.configureProviders(phynetwrk, zone)
|
||||
self.updatePhysicalNetwork(phynetwrk.id, "Enabled", vlan=zone.vlan)
|
||||
|
||||
if zone.networktype == "Basic":
|
||||
listnetworkoffering = \
|
||||
listNetworkOfferings.listNetworkOfferingsCmd()
|
||||
|
||||
listnetworkoffering.name = \
|
||||
"DefaultSharedNetworkOfferingWithSGService"
|
||||
|
||||
listnetworkofferingresponse = \
|
||||
self.apiClient.listNetworkOfferings(listnetworkoffering)
|
||||
|
||||
guestntwrk = configGenerator.network()
|
||||
guestntwrk.displaytext = "guestNetworkForBasicZone"
|
||||
guestntwrk.name = "guestNetworkForBasicZone"
|
||||
guestntwrk.zoneid = zoneId
|
||||
guestntwrk.networkofferingid = \
|
||||
listnetworkofferingresponse[0].id
|
||||
self.createnetworks([guestntwrk], zoneId, zone.networktype)
|
||||
|
||||
self.createpods(zone.pods, zone, zoneId)
|
||||
|
||||
if zone.networktype == "Advanced":
|
||||
self.createVlanIpRanges(zone.networktype, zone.ipranges, \
|
||||
zoneId)
|
||||
|
||||
self.createSecondaryStorages(zone.secondaryStorages, zoneId)
|
||||
self.enableZone(zoneId, "Enabled")
|
||||
return
|
||||
|
||||
def registerApiKey(self):
|
||||
listuser = listUsers.listUsersCmd()
|
||||
listuser.account = "admin"
|
||||
listuserRes = self.testClient.getApiClient().listUsers(listuser)
|
||||
userId = listuserRes[0].id
|
||||
apiKey = listuserRes[0].apikey
|
||||
securityKey = listuserRes[0].secretkey
|
||||
if apiKey is None:
|
||||
registerUser = registerUserKeys.registerUserKeysCmd()
|
||||
registerUser.id = userId
|
||||
registerUserRes = \
|
||||
self.testClient.getApiClient().registerUserKeys(registerUser)
|
||||
|
||||
apiKey = registerUserRes.apikey
|
||||
securityKey = registerUserRes.secretkey
|
||||
|
||||
self.config.mgtSvr[0].port = 8080
|
||||
self.config.mgtSvr[0].apiKey = apiKey
|
||||
self.config.mgtSvr[0].securityKey = securityKey
|
||||
return apiKey, securityKey
|
||||
|
||||
def loadCfg(self):
|
||||
try:
|
||||
self.config = configGenerator.get_setup_config(self.configFile)
|
||||
except:
|
||||
raise cloudstackException.InvalidParameterException( \
|
||||
"Failed to load config %s" %sys.exc_info())
|
||||
|
||||
mgt = self.config.mgtSvr[0]
|
||||
|
||||
loggers = self.config.logger
|
||||
testClientLogFile = None
|
||||
self.testCaseLogFile = None
|
||||
self.testResultLogFile = None
|
||||
if loggers is not None and len(loggers) > 0:
|
||||
for log in loggers:
|
||||
if log.name == "TestClient":
|
||||
testClientLogFile = log.file
|
||||
elif log.name == "TestCase":
|
||||
self.testCaseLogFile = log.file
|
||||
elif log.name == "TestResult":
|
||||
self.testResultLogFile = log.file
|
||||
|
||||
testClientLogger = None
|
||||
if testClientLogFile is not None:
|
||||
testClientLogger = logging.getLogger("testclient.testengine.run")
|
||||
fh = logging.FileHandler(testClientLogFile)
|
||||
fh.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(name)s - %(message)s"))
|
||||
testClientLogger.addHandler(fh)
|
||||
testClientLogger.setLevel(logging.INFO)
|
||||
self.testClientLogger = testClientLogger
|
||||
|
||||
self.testClient = \
|
||||
cloudstackTestClient.cloudstackTestClient(mgt.mgtSvrIp, mgt.port, \
|
||||
mgt.apiKey, \
|
||||
mgt.securityKey, \
|
||||
logging=self.testClientLogger)
|
||||
if mgt.apiKey is None:
|
||||
apiKey, securityKey = self.registerApiKey()
|
||||
self.testClient.close()
|
||||
self.testClient = \
|
||||
cloudstackTestClient.cloudstackTestClient(mgt.mgtSvrIp, 8080, \
|
||||
apiKey, securityKey, \
|
||||
logging=self.testClientLogger)
|
||||
|
||||
"""config database"""
|
||||
dbSvr = self.config.dbSvr
|
||||
self.testClient.dbConfigure(dbSvr.dbSvr, dbSvr.port, dbSvr.user, \
|
||||
dbSvr.passwd, dbSvr.db)
|
||||
self.apiClient = self.testClient.getApiClient()
|
||||
|
||||
def updateConfiguration(self, globalCfg):
|
||||
if globalCfg is None:
|
||||
return None
|
||||
|
||||
for config in globalCfg:
|
||||
updateCfg = updateConfiguration.updateConfigurationCmd()
|
||||
updateCfg.name = config.name
|
||||
updateCfg.value = config.value
|
||||
self.apiClient.updateConfiguration(updateCfg)
|
||||
|
||||
def deploy(self):
|
||||
self.loadCfg()
|
||||
self.createZones(self.config.zones)
|
||||
self.updateConfiguration(self.config.globalConfig)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
parser = OptionParser()
|
||||
|
||||
parser.add_option("-i", "--intput", action="store", \
|
||||
default="./datacenterCfg", dest="input", help="the path \
|
||||
where the json config file generated, by default is \
|
||||
./datacenterCfg")
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
deploy = deployDataCenters(options.input)
|
||||
deploy.deploy()
|
||||
|
||||
"""
|
||||
create = createStoragePool.createStoragePoolCmd()
|
||||
create.clusterid = 1
|
||||
create.podid = 2
|
||||
create.name = "fdffdf"
|
||||
create.url = "nfs://jfkdjf/fdkjfkd"
|
||||
create.zoneid = 2
|
||||
|
||||
deploy = deployDataCenters("./datacenterCfg")
|
||||
deploy.loadCfg()
|
||||
deploy.apiClient.createStoragePool(create)
|
||||
"""
|
||||
|
|
@ -1,192 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import cloudstackException
|
||||
import json
|
||||
import inspect
|
||||
from cloudstackAPI import *
|
||||
import pdb
|
||||
|
||||
class jsonLoader:
|
||||
'''The recursive class for building and representing objects with.'''
|
||||
def __init__(self, obj):
|
||||
for k in obj:
|
||||
v = obj[k]
|
||||
if isinstance(v, dict):
|
||||
setattr(self, k, jsonLoader(v))
|
||||
elif isinstance(v, (list, tuple)):
|
||||
if len(v) > 0 and isinstance(v[0], dict):
|
||||
setattr(self, k, [jsonLoader(elem) for elem in v])
|
||||
else:
|
||||
setattr(self, k, v)
|
||||
else:
|
||||
setattr(self,k,v)
|
||||
def __getattr__(self, val):
|
||||
if val in self.__dict__:
|
||||
return self.__dict__[val]
|
||||
else:
|
||||
return None
|
||||
def __repr__(self):
|
||||
return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v) in self.__dict__.iteritems()))
|
||||
def __str__(self):
|
||||
return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v) in self.__dict__.iteritems()))
|
||||
|
||||
|
||||
class jsonDump:
|
||||
@staticmethod
|
||||
def __serialize(obj):
|
||||
"""Recursively walk object's hierarchy."""
|
||||
if isinstance(obj, (bool, int, long, float, basestring)):
|
||||
return obj
|
||||
elif isinstance(obj, dict):
|
||||
obj = obj.copy()
|
||||
newobj = {}
|
||||
for key in obj:
|
||||
if obj[key] is not None:
|
||||
if (isinstance(obj[key], list) and len(obj[key]) == 0):
|
||||
continue
|
||||
newobj[key] = jsonDump.__serialize(obj[key])
|
||||
|
||||
return newobj
|
||||
elif isinstance(obj, list):
|
||||
return [jsonDump.__serialize(item) for item in obj]
|
||||
elif isinstance(obj, tuple):
|
||||
return tuple(jsonDump.__serialize([item for item in obj]))
|
||||
elif hasattr(obj, '__dict__'):
|
||||
return jsonDump.__serialize(obj.__dict__)
|
||||
else:
|
||||
return repr(obj) # Don't know how to handle, convert to string
|
||||
|
||||
@staticmethod
|
||||
def dump(obj):
|
||||
return jsonDump.__serialize(obj)
|
||||
|
||||
def getclassFromName(cmd, name):
|
||||
module = inspect.getmodule(cmd)
|
||||
return getattr(module, name)()
|
||||
|
||||
def finalizeResultObj(result, responseName, responsecls):
|
||||
if responsecls is None and responseName.endswith("response") and responseName != "queryasyncjobresultresponse":
|
||||
'''infer the response class from the name'''
|
||||
moduleName = responseName.replace("response", "")
|
||||
try:
|
||||
responsecls = getclassFromName(moduleName, responseName)
|
||||
except:
|
||||
pass
|
||||
|
||||
if responseName is not None and responseName == "queryasyncjobresultresponse" and responsecls is not None and result.jobresult is not None:
|
||||
result.jobresult = finalizeResultObj(result.jobresult, None, responsecls)
|
||||
return result
|
||||
elif responsecls is not None:
|
||||
for k,v in result.__dict__.iteritems():
|
||||
if k in responsecls.__dict__:
|
||||
return result
|
||||
|
||||
attr = result.__dict__.keys()[0]
|
||||
|
||||
value = getattr(result, attr)
|
||||
if not isinstance(value, jsonLoader):
|
||||
return result
|
||||
|
||||
findObj = False
|
||||
for k,v in value.__dict__.iteritems():
|
||||
if k in responsecls.__dict__:
|
||||
findObj = True
|
||||
break
|
||||
if findObj:
|
||||
return value
|
||||
else:
|
||||
return result
|
||||
else:
|
||||
return result
|
||||
|
||||
|
||||
|
||||
def getResultObj(returnObj, responsecls=None):
|
||||
returnObj = json.loads(returnObj)
|
||||
|
||||
if len(returnObj) == 0:
|
||||
return None
|
||||
responseName = returnObj.keys()[0]
|
||||
|
||||
response = returnObj[responseName]
|
||||
if len(response) == 0:
|
||||
return None
|
||||
|
||||
result = jsonLoader(response)
|
||||
if result.errorcode is not None:
|
||||
errMsg = "errorCode: %s, errorText:%s"%(result.errorcode, result.errortext)
|
||||
raise cloudstackException.cloudstackAPIException(responseName.replace("response", ""), errMsg)
|
||||
|
||||
if result.count is not None:
|
||||
for key in result.__dict__.iterkeys():
|
||||
if key == "count":
|
||||
continue
|
||||
else:
|
||||
return getattr(result, key)
|
||||
else:
|
||||
return finalizeResultObj(result, responseName, responsecls)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
result = '{ "listnetworkserviceprovidersresponse" : { "count":1 ,"networkserviceprovider" : [ {"name":"VirtualRouter","physicalnetworkid":"ad2948fc-1054-46c7-b1c7-61d990b86710","destinationphysicalnetworkid":"0","state":"Disabled","id":"d827cae4-4998-4037-95a2-55b92b6318b1","servicelist":["Vpn","Dhcp","Dns","Gateway","Firewall","Lb","SourceNat","StaticNat","PortForwarding","UserData"]} ] } }'
|
||||
nsp = getResultObj(result)
|
||||
print nsp[0].id
|
||||
|
||||
result = '{ "listzonesresponse" : { "count":1 ,"zone" : [ {"id":1,"name":"test0","dns1":"8.8.8.8","dns2":"4.4.4.4","internaldns1":"192.168.110.254","internaldns2":"192.168.110.253","networktype":"Basic","securitygroupsenabled":true,"allocationstate":"Enabled","zonetoken":"5e818a11-6b00-3429-9a07-e27511d3169a","dhcpprovider":"DhcpServer"} ] } }'
|
||||
zones = getResultObj(result)
|
||||
print zones[0].id
|
||||
res = authorizeSecurityGroupIngress.authorizeSecurityGroupIngressResponse()
|
||||
result = '{ "queryasyncjobresultresponse" : {"jobid":10,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"securitygroup":{"id":1,"name":"default","description":"Default Security Group","account":"admin","domainid":1,"domain":"ROOT","ingressrule":[{"ruleid":1,"protocol":"tcp","startport":22,"endport":22,"securitygroupname":"default","account":"a"},{"ruleid":2,"protocol":"tcp","startport":22,"endport":22,"securitygroupname":"default","account":"b"}]}}} }'
|
||||
asynJob = getResultObj(result, res)
|
||||
print asynJob.jobid, repr(asynJob.jobresult)
|
||||
print asynJob.jobresult.ingressrule[0].account
|
||||
|
||||
result = '{ "queryasyncjobresultresponse" : {"errorcode" : 431, "errortext" : "Unable to execute API command queryasyncjobresultresponse due to missing parameter jobid"} }'
|
||||
try:
|
||||
asynJob = getResultObj(result)
|
||||
except cloudstackException.cloudstackAPIException, e:
|
||||
print e
|
||||
|
||||
result = '{ "queryasyncjobresultresponse" : {} }'
|
||||
asynJob = getResultObj(result)
|
||||
print asynJob
|
||||
|
||||
result = '{}'
|
||||
asynJob = getResultObj(result)
|
||||
print asynJob
|
||||
|
||||
result = '{ "createzoneresponse" : { "zone" : {"id":1,"name":"test0","dns1":"8.8.8.8","dns2":"4.4.4.4","internaldns1":"192.168.110.254","internaldns2":"192.168.110.253","networktype":"Basic","securitygroupsenabled":true,"allocationstate":"Enabled","zonetoken":"3442f287-e932-3111-960b-514d1f9c4610","dhcpprovider":"DhcpServer"} } }'
|
||||
res = createZone.createZoneResponse()
|
||||
zone = getResultObj(result, res)
|
||||
print zone.id
|
||||
|
||||
result = '{ "attachvolumeresponse" : {"jobid":24} }'
|
||||
res = attachVolume.attachVolumeResponse()
|
||||
res = getResultObj(result, res)
|
||||
print res
|
||||
|
||||
result = '{ "listtemplatesresponse" : { } }'
|
||||
print getResultObj(result, listTemplates.listTemplatesResponse())
|
||||
|
||||
result = '{ "queryasyncjobresultresponse" : {"jobid":34,"jobstatus":2,"jobprocstatus":0,"jobresultcode":530,"jobresulttype":"object","jobresult":{"errorcode":431,"errortext":"Please provide either a volume id, or a tuple(device id, instance id)"}} }'
|
||||
print getResultObj(result, listTemplates.listTemplatesResponse())
|
||||
result = '{ "queryasyncjobresultresponse" : {"jobid":41,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":37,"name":"i-2-37-TEST","displayname":"i-2-37-TEST","account":"admin","domainid":1,"domain":"ROOT","created":"2011-08-25T11:13:42-0700","state":"Running","haenable":false,"zoneid":1,"zonename":"test0","hostid":5,"hostname":"SimulatedAgent.1e629060-f547-40dd-b792-57cdc4b7d611","templateid":10,"templatename":"CentOS 5.3(64-bit) no GUI (Simulator)","templatedisplaytext":"CentOS 5.3(64-bit) no GUI (Simulator)","passwordenabled":false,"serviceofferingid":7,"serviceofferingname":"Small Instance","cpunumber":1,"cpuspeed":500,"memory":512,"guestosid":11,"rootdeviceid":0,"rootdevicetype":"NetworkFilesystem","securitygroup":[{"id":1,"name":"default","description":"Default Security Group"}],"nic":[{"id":43,"networkid":204,"netmask":"255.255.255.0","gateway":"192.168.1.1","ipaddress":"192.168.1.27","isolationuri":"ec2://untagged","broadcasturi":"vlan://untagged","traffictype":"Guest","type":"Direct","isdefault":true,"macaddress":"06:56:b8:00:00:53"}],"hypervisor":"Simulator"}}} }'
|
||||
|
||||
result='{ "queryasyncjobresultresponse" : {"accountid":"30910093-22e4-4d3c-a464-8b36b60c8001","userid":"cb0aeca3-42ee-47c4-838a-2cd9053441f2","cmd":"com.cloud.api.commands.DeployVMCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":"d2e4d724-e089-4e59-be8e-647674059016","name":"i-2-14-TEST","displayname":"i-2-14-TEST","account":"admin","domainid":"8cfafe79-81eb-445e-8608-c5b7c31fc3a5","domain":"ROOT","created":"2012-01-15T18:30:11+0530","state":"Running","haenable":false,"zoneid":"30a397e2-1c85-40c0-8463-70278952b046","zonename":"Sandbox-simulator","hostid":"cc0105aa-a2a9-427a-8ad7-4d835483b8a9","hostname":"SimulatedAgent.9fee20cc-95ca-48b1-8268-5513d6e83a1b","templateid":"d92570fa-bf40-44db-9dff-45cc7042604d","templatename":"CentOS 5.3(64-bit) no GUI (Simulator)","templatedisplaytext":"CentOS 5.3(64-bit) no GUI (Simulator)","passwordenabled":false,"serviceofferingid":"3734d632-797b-4f1d-ac62-33f9cf70d005","serviceofferingname":"Sample SO","cpunumber":1,"cpuspeed":100,"memory":128,"guestosid":"1e36f523-23e5-4e90-869b-a1b5e9ba674d","rootdeviceid":0,"rootdevicetype":"NetworkFilesystem","nic":[{"id":"4d3ab903-f511-4dab-8a6d-c2a3b51de7e0","networkid":"faeb7f24-a4b9-447d-bec6-c4956c4ab0f6","netmask":"255.255.240.0","gateway":"10.6.240.1","ipaddress":"10.6.253.89","isolationuri":"vlan://211","broadcasturi":"vlan://211","traffictype":"Guest","type":"Isolated","isdefault":true,"macaddress":"02:00:04:74:00:09"}],"hypervisor":"Simulator"}},"created":"2012-01-15T18:30:11+0530","jobid":"f4a13f28-fcd6-4d7f-b9cd-ba7eb5a5701f"} }'
|
||||
vm = getResultObj(result, deployVirtualMachine.deployVirtualMachineResponse())
|
||||
print vm.jobresult.id
|
||||
|
||||
cmd = deployVirtualMachine.deployVirtualMachineCmd()
|
||||
responsename = cmd.__class__.__name__.replace("Cmd", "Response")
|
||||
response = getclassFromName(cmd, responsename)
|
||||
print response.id
|
||||
|
|
@ -1,143 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
'''
|
||||
PyMySQL: A pure-Python drop-in replacement for MySQLdb.
|
||||
|
||||
Copyright (c) 2010 PyMySQL contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
'''
|
||||
|
||||
VERSION = (0, 4, None)
|
||||
|
||||
from constants import FIELD_TYPE
|
||||
from converters import escape_dict, escape_sequence, escape_string
|
||||
from err import Warning, Error, InterfaceError, DataError, \
|
||||
DatabaseError, OperationalError, IntegrityError, InternalError, \
|
||||
NotSupportedError, ProgrammingError, MySQLError
|
||||
from times import Date, Time, Timestamp, \
|
||||
DateFromTicks, TimeFromTicks, TimestampFromTicks
|
||||
|
||||
import sys
|
||||
|
||||
try:
|
||||
frozenset
|
||||
except NameError:
|
||||
from sets import ImmutableSet as frozenset
|
||||
try:
|
||||
from sets import BaseSet as set
|
||||
except ImportError:
|
||||
from sets import Set as set
|
||||
|
||||
threadsafety = 1
|
||||
apilevel = "2.0"
|
||||
paramstyle = "format"
|
||||
|
||||
class DBAPISet(frozenset):
|
||||
|
||||
|
||||
def __ne__(self, other):
|
||||
if isinstance(other, set):
|
||||
return super(DBAPISet, self).__ne__(self, other)
|
||||
else:
|
||||
return other not in self
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, frozenset):
|
||||
return frozenset.__eq__(self, other)
|
||||
else:
|
||||
return other in self
|
||||
|
||||
def __hash__(self):
|
||||
return frozenset.__hash__(self)
|
||||
|
||||
|
||||
STRING = DBAPISet([FIELD_TYPE.ENUM, FIELD_TYPE.STRING,
|
||||
FIELD_TYPE.VAR_STRING])
|
||||
BINARY = DBAPISet([FIELD_TYPE.BLOB, FIELD_TYPE.LONG_BLOB,
|
||||
FIELD_TYPE.MEDIUM_BLOB, FIELD_TYPE.TINY_BLOB])
|
||||
NUMBER = DBAPISet([FIELD_TYPE.DECIMAL, FIELD_TYPE.DOUBLE, FIELD_TYPE.FLOAT,
|
||||
FIELD_TYPE.INT24, FIELD_TYPE.LONG, FIELD_TYPE.LONGLONG,
|
||||
FIELD_TYPE.TINY, FIELD_TYPE.YEAR])
|
||||
DATE = DBAPISet([FIELD_TYPE.DATE, FIELD_TYPE.NEWDATE])
|
||||
TIME = DBAPISet([FIELD_TYPE.TIME])
|
||||
TIMESTAMP = DBAPISet([FIELD_TYPE.TIMESTAMP, FIELD_TYPE.DATETIME])
|
||||
DATETIME = TIMESTAMP
|
||||
ROWID = DBAPISet()
|
||||
|
||||
def Binary(x):
|
||||
"""Return x as a binary type."""
|
||||
return str(x)
|
||||
|
||||
def Connect(*args, **kwargs):
|
||||
"""
|
||||
Connect to the database; see connections.Connection.__init__() for
|
||||
more information.
|
||||
"""
|
||||
from connections import Connection
|
||||
return Connection(*args, **kwargs)
|
||||
|
||||
def get_client_info(): # for MySQLdb compatibility
|
||||
return '%s.%s.%s' % VERSION
|
||||
|
||||
connect = Connection = Connect
|
||||
|
||||
# we include a doctored version_info here for MySQLdb compatibility
|
||||
version_info = (1,2,2,"final",0)
|
||||
|
||||
NULL = "NULL"
|
||||
|
||||
__version__ = get_client_info()
|
||||
|
||||
def thread_safe():
|
||||
return True # match MySQLdb.thread_safe()
|
||||
|
||||
def install_as_MySQLdb():
|
||||
"""
|
||||
After this function is called, any application that imports MySQLdb or
|
||||
_mysql will unwittingly actually use
|
||||
"""
|
||||
sys.modules["MySQLdb"] = sys.modules["_mysql"] = sys.modules["pymysql"]
|
||||
|
||||
__all__ = [
|
||||
'BINARY', 'Binary', 'Connect', 'Connection', 'DATE', 'Date',
|
||||
'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks',
|
||||
'DataError', 'DatabaseError', 'Error', 'FIELD_TYPE', 'IntegrityError',
|
||||
'InterfaceError', 'InternalError', 'MySQLError', 'NULL', 'NUMBER',
|
||||
'NotSupportedError', 'DBAPISet', 'OperationalError', 'ProgrammingError',
|
||||
'ROWID', 'STRING', 'TIME', 'TIMESTAMP', 'Warning', 'apilevel', 'connect',
|
||||
'connections', 'constants', 'converters', 'cursors',
|
||||
'escape_dict', 'escape_sequence', 'escape_string', 'get_client_info',
|
||||
'paramstyle', 'threadsafety', 'version_info',
|
||||
|
||||
"install_as_MySQLdb",
|
||||
|
||||
"NULL","__version__",
|
||||
]
|
||||
|
|
@ -1,186 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
MBLENGTH = {
|
||||
8:1,
|
||||
33:3,
|
||||
88:2,
|
||||
91:2
|
||||
}
|
||||
|
||||
class Charset:
|
||||
def __init__(self, id, name, collation, is_default):
|
||||
self.id, self.name, self.collation = id, name, collation
|
||||
self.is_default = is_default == 'Yes'
|
||||
|
||||
class Charsets:
|
||||
def __init__(self):
|
||||
self._by_id = {}
|
||||
|
||||
def add(self, c):
|
||||
self._by_id[c.id] = c
|
||||
|
||||
def by_id(self, id):
|
||||
return self._by_id[id]
|
||||
|
||||
def by_name(self, name):
|
||||
for c in self._by_id.values():
|
||||
if c.name == name and c.is_default:
|
||||
return c
|
||||
|
||||
_charsets = Charsets()
|
||||
"""
|
||||
Generated with:
|
||||
|
||||
mysql -N -s -e "select id, character_set_name, collation_name, is_default
|
||||
from information_schema.collations order by id;" | python -c "import sys
|
||||
for l in sys.stdin.readlines():
|
||||
id, name, collation, is_default = l.split(chr(9))
|
||||
print '_charsets.add(Charset(%s, \'%s\', \'%s\', \'%s\'))' \
|
||||
% (id, name, collation, is_default.strip())
|
||||
"
|
||||
|
||||
"""
|
||||
_charsets.add(Charset(1, 'big5', 'big5_chinese_ci', 'Yes'))
|
||||
_charsets.add(Charset(2, 'latin2', 'latin2_czech_cs', ''))
|
||||
_charsets.add(Charset(3, 'dec8', 'dec8_swedish_ci', 'Yes'))
|
||||
_charsets.add(Charset(4, 'cp850', 'cp850_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(5, 'latin1', 'latin1_german1_ci', ''))
|
||||
_charsets.add(Charset(6, 'hp8', 'hp8_english_ci', 'Yes'))
|
||||
_charsets.add(Charset(7, 'koi8r', 'koi8r_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(8, 'latin1', 'latin1_swedish_ci', 'Yes'))
|
||||
_charsets.add(Charset(9, 'latin2', 'latin2_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(10, 'swe7', 'swe7_swedish_ci', 'Yes'))
|
||||
_charsets.add(Charset(11, 'ascii', 'ascii_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(12, 'ujis', 'ujis_japanese_ci', 'Yes'))
|
||||
_charsets.add(Charset(13, 'sjis', 'sjis_japanese_ci', 'Yes'))
|
||||
_charsets.add(Charset(14, 'cp1251', 'cp1251_bulgarian_ci', ''))
|
||||
_charsets.add(Charset(15, 'latin1', 'latin1_danish_ci', ''))
|
||||
_charsets.add(Charset(16, 'hebrew', 'hebrew_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(18, 'tis620', 'tis620_thai_ci', 'Yes'))
|
||||
_charsets.add(Charset(19, 'euckr', 'euckr_korean_ci', 'Yes'))
|
||||
_charsets.add(Charset(20, 'latin7', 'latin7_estonian_cs', ''))
|
||||
_charsets.add(Charset(21, 'latin2', 'latin2_hungarian_ci', ''))
|
||||
_charsets.add(Charset(22, 'koi8u', 'koi8u_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(23, 'cp1251', 'cp1251_ukrainian_ci', ''))
|
||||
_charsets.add(Charset(24, 'gb2312', 'gb2312_chinese_ci', 'Yes'))
|
||||
_charsets.add(Charset(25, 'greek', 'greek_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(26, 'cp1250', 'cp1250_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(27, 'latin2', 'latin2_croatian_ci', ''))
|
||||
_charsets.add(Charset(28, 'gbk', 'gbk_chinese_ci', 'Yes'))
|
||||
_charsets.add(Charset(29, 'cp1257', 'cp1257_lithuanian_ci', ''))
|
||||
_charsets.add(Charset(30, 'latin5', 'latin5_turkish_ci', 'Yes'))
|
||||
_charsets.add(Charset(31, 'latin1', 'latin1_german2_ci', ''))
|
||||
_charsets.add(Charset(32, 'armscii8', 'armscii8_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(33, 'utf8', 'utf8_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(34, 'cp1250', 'cp1250_czech_cs', ''))
|
||||
_charsets.add(Charset(35, 'ucs2', 'ucs2_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(36, 'cp866', 'cp866_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(37, 'keybcs2', 'keybcs2_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(38, 'macce', 'macce_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(39, 'macroman', 'macroman_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(40, 'cp852', 'cp852_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(41, 'latin7', 'latin7_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(42, 'latin7', 'latin7_general_cs', ''))
|
||||
_charsets.add(Charset(43, 'macce', 'macce_bin', ''))
|
||||
_charsets.add(Charset(44, 'cp1250', 'cp1250_croatian_ci', ''))
|
||||
_charsets.add(Charset(47, 'latin1', 'latin1_bin', ''))
|
||||
_charsets.add(Charset(48, 'latin1', 'latin1_general_ci', ''))
|
||||
_charsets.add(Charset(49, 'latin1', 'latin1_general_cs', ''))
|
||||
_charsets.add(Charset(50, 'cp1251', 'cp1251_bin', ''))
|
||||
_charsets.add(Charset(51, 'cp1251', 'cp1251_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(52, 'cp1251', 'cp1251_general_cs', ''))
|
||||
_charsets.add(Charset(53, 'macroman', 'macroman_bin', ''))
|
||||
_charsets.add(Charset(57, 'cp1256', 'cp1256_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(58, 'cp1257', 'cp1257_bin', ''))
|
||||
_charsets.add(Charset(59, 'cp1257', 'cp1257_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(63, 'binary', 'binary', 'Yes'))
|
||||
_charsets.add(Charset(64, 'armscii8', 'armscii8_bin', ''))
|
||||
_charsets.add(Charset(65, 'ascii', 'ascii_bin', ''))
|
||||
_charsets.add(Charset(66, 'cp1250', 'cp1250_bin', ''))
|
||||
_charsets.add(Charset(67, 'cp1256', 'cp1256_bin', ''))
|
||||
_charsets.add(Charset(68, 'cp866', 'cp866_bin', ''))
|
||||
_charsets.add(Charset(69, 'dec8', 'dec8_bin', ''))
|
||||
_charsets.add(Charset(70, 'greek', 'greek_bin', ''))
|
||||
_charsets.add(Charset(71, 'hebrew', 'hebrew_bin', ''))
|
||||
_charsets.add(Charset(72, 'hp8', 'hp8_bin', ''))
|
||||
_charsets.add(Charset(73, 'keybcs2', 'keybcs2_bin', ''))
|
||||
_charsets.add(Charset(74, 'koi8r', 'koi8r_bin', ''))
|
||||
_charsets.add(Charset(75, 'koi8u', 'koi8u_bin', ''))
|
||||
_charsets.add(Charset(77, 'latin2', 'latin2_bin', ''))
|
||||
_charsets.add(Charset(78, 'latin5', 'latin5_bin', ''))
|
||||
_charsets.add(Charset(79, 'latin7', 'latin7_bin', ''))
|
||||
_charsets.add(Charset(80, 'cp850', 'cp850_bin', ''))
|
||||
_charsets.add(Charset(81, 'cp852', 'cp852_bin', ''))
|
||||
_charsets.add(Charset(82, 'swe7', 'swe7_bin', ''))
|
||||
_charsets.add(Charset(83, 'utf8', 'utf8_bin', ''))
|
||||
_charsets.add(Charset(84, 'big5', 'big5_bin', ''))
|
||||
_charsets.add(Charset(85, 'euckr', 'euckr_bin', ''))
|
||||
_charsets.add(Charset(86, 'gb2312', 'gb2312_bin', ''))
|
||||
_charsets.add(Charset(87, 'gbk', 'gbk_bin', ''))
|
||||
_charsets.add(Charset(88, 'sjis', 'sjis_bin', ''))
|
||||
_charsets.add(Charset(89, 'tis620', 'tis620_bin', ''))
|
||||
_charsets.add(Charset(90, 'ucs2', 'ucs2_bin', ''))
|
||||
_charsets.add(Charset(91, 'ujis', 'ujis_bin', ''))
|
||||
_charsets.add(Charset(92, 'geostd8', 'geostd8_general_ci', 'Yes'))
|
||||
_charsets.add(Charset(93, 'geostd8', 'geostd8_bin', ''))
|
||||
_charsets.add(Charset(94, 'latin1', 'latin1_spanish_ci', ''))
|
||||
_charsets.add(Charset(95, 'cp932', 'cp932_japanese_ci', 'Yes'))
|
||||
_charsets.add(Charset(96, 'cp932', 'cp932_bin', ''))
|
||||
_charsets.add(Charset(97, 'eucjpms', 'eucjpms_japanese_ci', 'Yes'))
|
||||
_charsets.add(Charset(98, 'eucjpms', 'eucjpms_bin', ''))
|
||||
_charsets.add(Charset(99, 'cp1250', 'cp1250_polish_ci', ''))
|
||||
_charsets.add(Charset(128, 'ucs2', 'ucs2_unicode_ci', ''))
|
||||
_charsets.add(Charset(129, 'ucs2', 'ucs2_icelandic_ci', ''))
|
||||
_charsets.add(Charset(130, 'ucs2', 'ucs2_latvian_ci', ''))
|
||||
_charsets.add(Charset(131, 'ucs2', 'ucs2_romanian_ci', ''))
|
||||
_charsets.add(Charset(132, 'ucs2', 'ucs2_slovenian_ci', ''))
|
||||
_charsets.add(Charset(133, 'ucs2', 'ucs2_polish_ci', ''))
|
||||
_charsets.add(Charset(134, 'ucs2', 'ucs2_estonian_ci', ''))
|
||||
_charsets.add(Charset(135, 'ucs2', 'ucs2_spanish_ci', ''))
|
||||
_charsets.add(Charset(136, 'ucs2', 'ucs2_swedish_ci', ''))
|
||||
_charsets.add(Charset(137, 'ucs2', 'ucs2_turkish_ci', ''))
|
||||
_charsets.add(Charset(138, 'ucs2', 'ucs2_czech_ci', ''))
|
||||
_charsets.add(Charset(139, 'ucs2', 'ucs2_danish_ci', ''))
|
||||
_charsets.add(Charset(140, 'ucs2', 'ucs2_lithuanian_ci', ''))
|
||||
_charsets.add(Charset(141, 'ucs2', 'ucs2_slovak_ci', ''))
|
||||
_charsets.add(Charset(142, 'ucs2', 'ucs2_spanish2_ci', ''))
|
||||
_charsets.add(Charset(143, 'ucs2', 'ucs2_roman_ci', ''))
|
||||
_charsets.add(Charset(144, 'ucs2', 'ucs2_persian_ci', ''))
|
||||
_charsets.add(Charset(145, 'ucs2', 'ucs2_esperanto_ci', ''))
|
||||
_charsets.add(Charset(146, 'ucs2', 'ucs2_hungarian_ci', ''))
|
||||
_charsets.add(Charset(192, 'utf8', 'utf8_unicode_ci', ''))
|
||||
_charsets.add(Charset(193, 'utf8', 'utf8_icelandic_ci', ''))
|
||||
_charsets.add(Charset(194, 'utf8', 'utf8_latvian_ci', ''))
|
||||
_charsets.add(Charset(195, 'utf8', 'utf8_romanian_ci', ''))
|
||||
_charsets.add(Charset(196, 'utf8', 'utf8_slovenian_ci', ''))
|
||||
_charsets.add(Charset(197, 'utf8', 'utf8_polish_ci', ''))
|
||||
_charsets.add(Charset(198, 'utf8', 'utf8_estonian_ci', ''))
|
||||
_charsets.add(Charset(199, 'utf8', 'utf8_spanish_ci', ''))
|
||||
_charsets.add(Charset(200, 'utf8', 'utf8_swedish_ci', ''))
|
||||
_charsets.add(Charset(201, 'utf8', 'utf8_turkish_ci', ''))
|
||||
_charsets.add(Charset(202, 'utf8', 'utf8_czech_ci', ''))
|
||||
_charsets.add(Charset(203, 'utf8', 'utf8_danish_ci', ''))
|
||||
_charsets.add(Charset(204, 'utf8', 'utf8_lithuanian_ci', ''))
|
||||
_charsets.add(Charset(205, 'utf8', 'utf8_slovak_ci', ''))
|
||||
_charsets.add(Charset(206, 'utf8', 'utf8_spanish2_ci', ''))
|
||||
_charsets.add(Charset(207, 'utf8', 'utf8_roman_ci', ''))
|
||||
_charsets.add(Charset(208, 'utf8', 'utf8_persian_ci', ''))
|
||||
_charsets.add(Charset(209, 'utf8', 'utf8_esperanto_ci', ''))
|
||||
_charsets.add(Charset(210, 'utf8', 'utf8_hungarian_ci', ''))
|
||||
|
||||
def charset_by_name(name):
|
||||
return _charsets.by_name(name)
|
||||
|
||||
def charset_by_id(id):
|
||||
return _charsets.by_id(id)
|
||||
|
||||
|
|
@ -1,940 +0,0 @@
|
|||
# Python implementation of the MySQL client-server protocol
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
# http://forge.mysql.com/wiki/MySQL_Internals_ClientServer_Protocol
|
||||
|
||||
try:
|
||||
import hashlib
|
||||
sha_new = lambda *args, **kwargs: hashlib.new("sha1", *args, **kwargs)
|
||||
except ImportError:
|
||||
import sha
|
||||
sha_new = sha.new
|
||||
|
||||
import socket
|
||||
try:
|
||||
import ssl
|
||||
SSL_ENABLED = True
|
||||
except ImportError:
|
||||
SSL_ENABLED = False
|
||||
|
||||
import struct
|
||||
import sys
|
||||
import os
|
||||
import ConfigParser
|
||||
|
||||
try:
|
||||
import cStringIO as StringIO
|
||||
except ImportError:
|
||||
import StringIO
|
||||
|
||||
from charset import MBLENGTH, charset_by_name, charset_by_id
|
||||
from cursors import Cursor
|
||||
from constants import FIELD_TYPE, FLAG
|
||||
from constants import SERVER_STATUS
|
||||
from constants.CLIENT import *
|
||||
from constants.COMMAND import *
|
||||
from util import join_bytes, byte2int, int2byte
|
||||
from converters import escape_item, encoders, decoders
|
||||
from err import raise_mysql_exception, Warning, Error, \
|
||||
InterfaceError, DataError, DatabaseError, OperationalError, \
|
||||
IntegrityError, InternalError, NotSupportedError, ProgrammingError
|
||||
|
||||
DEBUG = False
|
||||
|
||||
NULL_COLUMN = 251
|
||||
UNSIGNED_CHAR_COLUMN = 251
|
||||
UNSIGNED_SHORT_COLUMN = 252
|
||||
UNSIGNED_INT24_COLUMN = 253
|
||||
UNSIGNED_INT64_COLUMN = 254
|
||||
UNSIGNED_CHAR_LENGTH = 1
|
||||
UNSIGNED_SHORT_LENGTH = 2
|
||||
UNSIGNED_INT24_LENGTH = 3
|
||||
UNSIGNED_INT64_LENGTH = 8
|
||||
|
||||
DEFAULT_CHARSET = 'latin1'
|
||||
|
||||
|
||||
def dump_packet(data):
|
||||
|
||||
def is_ascii(data):
|
||||
if byte2int(data) >= 65 and byte2int(data) <= 122: #data.isalnum():
|
||||
return data
|
||||
return '.'
|
||||
print "packet length %d" % len(data)
|
||||
print "method call[1]: %s" % sys._getframe(1).f_code.co_name
|
||||
print "method call[2]: %s" % sys._getframe(2).f_code.co_name
|
||||
print "method call[3]: %s" % sys._getframe(3).f_code.co_name
|
||||
print "method call[4]: %s" % sys._getframe(4).f_code.co_name
|
||||
print "method call[5]: %s" % sys._getframe(5).f_code.co_name
|
||||
print "-" * 88
|
||||
dump_data = [data[i:i+16] for i in xrange(len(data)) if i%16 == 0]
|
||||
for d in dump_data:
|
||||
print ' '.join(map(lambda x:"%02X" % byte2int(x), d)) + \
|
||||
' ' * (16 - len(d)) + ' ' * 2 + \
|
||||
' '.join(map(lambda x:"%s" % is_ascii(x), d))
|
||||
print "-" * 88
|
||||
print ""
|
||||
|
||||
def _scramble(password, message):
|
||||
if password == None or len(password) == 0:
|
||||
return int2byte(0)
|
||||
if DEBUG: print 'password=' + password
|
||||
stage1 = sha_new(password).digest()
|
||||
stage2 = sha_new(stage1).digest()
|
||||
s = sha_new()
|
||||
s.update(message)
|
||||
s.update(stage2)
|
||||
result = s.digest()
|
||||
return _my_crypt(result, stage1)
|
||||
|
||||
def _my_crypt(message1, message2):
|
||||
length = len(message1)
|
||||
result = struct.pack('B', length)
|
||||
for i in xrange(length):
|
||||
x = (struct.unpack('B', message1[i:i+1])[0] ^ \
|
||||
struct.unpack('B', message2[i:i+1])[0])
|
||||
result += struct.pack('B', x)
|
||||
return result
|
||||
|
||||
# old_passwords support ported from libmysql/password.c
|
||||
SCRAMBLE_LENGTH_323 = 8
|
||||
|
||||
class RandStruct_323(object):
|
||||
def __init__(self, seed1, seed2):
|
||||
self.max_value = 0x3FFFFFFFL
|
||||
self.seed1 = seed1 % self.max_value
|
||||
self.seed2 = seed2 % self.max_value
|
||||
|
||||
def my_rnd(self):
|
||||
self.seed1 = (self.seed1 * 3L + self.seed2) % self.max_value
|
||||
self.seed2 = (self.seed1 + self.seed2 + 33L) % self.max_value
|
||||
return float(self.seed1) / float(self.max_value)
|
||||
|
||||
def _scramble_323(password, message):
|
||||
hash_pass = _hash_password_323(password)
|
||||
hash_message = _hash_password_323(message[:SCRAMBLE_LENGTH_323])
|
||||
hash_pass_n = struct.unpack(">LL", hash_pass)
|
||||
hash_message_n = struct.unpack(">LL", hash_message)
|
||||
|
||||
rand_st = RandStruct_323(hash_pass_n[0] ^ hash_message_n[0],
|
||||
hash_pass_n[1] ^ hash_message_n[1])
|
||||
outbuf = StringIO.StringIO()
|
||||
for _ in xrange(min(SCRAMBLE_LENGTH_323, len(message))):
|
||||
outbuf.write(int2byte(int(rand_st.my_rnd() * 31) + 64))
|
||||
extra = int2byte(int(rand_st.my_rnd() * 31))
|
||||
out = outbuf.getvalue()
|
||||
outbuf = StringIO.StringIO()
|
||||
for c in out:
|
||||
outbuf.write(int2byte(byte2int(c) ^ byte2int(extra)))
|
||||
return outbuf.getvalue()
|
||||
|
||||
def _hash_password_323(password):
|
||||
nr = 1345345333L
|
||||
add = 7L
|
||||
nr2 = 0x12345671L
|
||||
|
||||
for c in [byte2int(x) for x in password if x not in (' ', '\t')]:
|
||||
nr^= (((nr & 63)+add)*c)+ (nr << 8) & 0xFFFFFFFF
|
||||
nr2= (nr2 + ((nr2 << 8) ^ nr)) & 0xFFFFFFFF
|
||||
add= (add + c) & 0xFFFFFFFF
|
||||
|
||||
r1 = nr & ((1L << 31) - 1L) # kill sign bits
|
||||
r2 = nr2 & ((1L << 31) - 1L)
|
||||
|
||||
# pack
|
||||
return struct.pack(">LL", r1, r2)
|
||||
|
||||
def pack_int24(n):
|
||||
return struct.pack('BBB', n&0xFF, (n>>8)&0xFF, (n>>16)&0xFF)
|
||||
|
||||
def unpack_uint16(n):
|
||||
return struct.unpack('<H', n[0:2])[0]
|
||||
|
||||
|
||||
# TODO: stop using bit-shifting in these functions...
|
||||
# TODO: rename to "uint" to make it clear they're unsigned...
|
||||
def unpack_int24(n):
|
||||
return struct.unpack('B',n[0])[0] + (struct.unpack('B', n[1])[0] << 8) +\
|
||||
(struct.unpack('B',n[2])[0] << 16)
|
||||
|
||||
def unpack_int32(n):
|
||||
return struct.unpack('B',n[0])[0] + (struct.unpack('B', n[1])[0] << 8) +\
|
||||
(struct.unpack('B',n[2])[0] << 16) + (struct.unpack('B', n[3])[0] << 24)
|
||||
|
||||
def unpack_int64(n):
|
||||
return struct.unpack('B',n[0])[0] + (struct.unpack('B', n[1])[0]<<8) +\
|
||||
(struct.unpack('B',n[2])[0] << 16) + (struct.unpack('B',n[3])[0]<<24)+\
|
||||
(struct.unpack('B',n[4])[0] << 32) + (struct.unpack('B',n[5])[0]<<40)+\
|
||||
(struct.unpack('B',n[6])[0] << 48) + (struct.unpack('B',n[7])[0]<<56)
|
||||
|
||||
def defaulterrorhandler(connection, cursor, errorclass, errorvalue):
|
||||
err = errorclass, errorvalue
|
||||
if DEBUG:
|
||||
raise
|
||||
|
||||
if cursor:
|
||||
cursor.messages.append(err)
|
||||
else:
|
||||
connection.messages.append(err)
|
||||
del cursor
|
||||
del connection
|
||||
|
||||
if not issubclass(errorclass, Error):
|
||||
raise Error(errorclass, errorvalue)
|
||||
else:
|
||||
raise errorclass, errorvalue
|
||||
|
||||
|
||||
class MysqlPacket(object):
|
||||
"""Representation of a MySQL response packet. Reads in the packet
|
||||
from the network socket, removes packet header and provides an interface
|
||||
for reading/parsing the packet results."""
|
||||
|
||||
def __init__(self, connection):
|
||||
self.connection = connection
|
||||
self.__position = 0
|
||||
self.__recv_packet()
|
||||
|
||||
def __recv_packet(self):
|
||||
"""Parse the packet header and read entire packet payload into buffer."""
|
||||
packet_header = self.connection.rfile.read(4)
|
||||
if len(packet_header) < 4:
|
||||
raise OperationalError(2013, "Lost connection to MySQL server during query")
|
||||
|
||||
if DEBUG: dump_packet(packet_header)
|
||||
packet_length_bin = packet_header[:3]
|
||||
self.__packet_number = byte2int(packet_header[3])
|
||||
# TODO: check packet_num is correct (+1 from last packet)
|
||||
|
||||
bin_length = packet_length_bin + int2byte(0) # pad little-endian number
|
||||
bytes_to_read = struct.unpack('<I', bin_length)[0]
|
||||
recv_data = self.connection.rfile.read(bytes_to_read)
|
||||
if len(recv_data) < bytes_to_read:
|
||||
raise OperationalError(2013, "Lost connection to MySQL server during query")
|
||||
if DEBUG: dump_packet(recv_data)
|
||||
self.__data = recv_data
|
||||
|
||||
def packet_number(self): return self.__packet_number
|
||||
|
||||
def get_all_data(self): return self.__data
|
||||
|
||||
def read(self, size):
|
||||
"""Read the first 'size' bytes in packet and advance cursor past them."""
|
||||
result = self.peek(size)
|
||||
self.advance(size)
|
||||
return result
|
||||
|
||||
def read_all(self):
|
||||
"""Read all remaining data in the packet.
|
||||
|
||||
(Subsequent read() or peek() will return errors.)
|
||||
"""
|
||||
result = self.__data[self.__position:]
|
||||
self.__position = None # ensure no subsequent read() or peek()
|
||||
return result
|
||||
|
||||
def advance(self, length):
|
||||
"""Advance the cursor in data buffer 'length' bytes."""
|
||||
new_position = self.__position + length
|
||||
if new_position < 0 or new_position > len(self.__data):
|
||||
raise Exception('Invalid advance amount (%s) for cursor. '
|
||||
'Position=%s' % (length, new_position))
|
||||
self.__position = new_position
|
||||
|
||||
def rewind(self, position=0):
|
||||
"""Set the position of the data buffer cursor to 'position'."""
|
||||
if position < 0 or position > len(self.__data):
|
||||
raise Exception("Invalid position to rewind cursor to: %s." % position)
|
||||
self.__position = position
|
||||
|
||||
def peek(self, size):
|
||||
"""Look at the first 'size' bytes in packet without moving cursor."""
|
||||
result = self.__data[self.__position:(self.__position+size)]
|
||||
if len(result) != size:
|
||||
error = ('Result length not requested length:\n'
|
||||
'Expected=%s. Actual=%s. Position: %s. Data Length: %s'
|
||||
% (size, len(result), self.__position, len(self.__data)))
|
||||
if DEBUG:
|
||||
print error
|
||||
self.dump()
|
||||
raise AssertionError(error)
|
||||
return result
|
||||
|
||||
def get_bytes(self, position, length=1):
|
||||
"""Get 'length' bytes starting at 'position'.
|
||||
|
||||
Position is start of payload (first four packet header bytes are not
|
||||
included) starting at index '0'.
|
||||
|
||||
No error checking is done. If requesting outside end of buffer
|
||||
an empty string (or string shorter than 'length') may be returned!
|
||||
"""
|
||||
return self.__data[position:(position+length)]
|
||||
|
||||
def read_length_coded_binary(self):
|
||||
"""Read a 'Length Coded Binary' number from the data buffer.
|
||||
|
||||
Length coded numbers can be anywhere from 1 to 9 bytes depending
|
||||
on the value of the first byte.
|
||||
"""
|
||||
c = byte2int(self.read(1))
|
||||
if c == NULL_COLUMN:
|
||||
return None
|
||||
if c < UNSIGNED_CHAR_COLUMN:
|
||||
return c
|
||||
elif c == UNSIGNED_SHORT_COLUMN:
|
||||
return unpack_uint16(self.read(UNSIGNED_SHORT_LENGTH))
|
||||
elif c == UNSIGNED_INT24_COLUMN:
|
||||
return unpack_int24(self.read(UNSIGNED_INT24_LENGTH))
|
||||
elif c == UNSIGNED_INT64_COLUMN:
|
||||
# TODO: what was 'longlong'? confirm it wasn't used?
|
||||
return unpack_int64(self.read(UNSIGNED_INT64_LENGTH))
|
||||
|
||||
def read_length_coded_string(self):
|
||||
"""Read a 'Length Coded String' from the data buffer.
|
||||
|
||||
A 'Length Coded String' consists first of a length coded
|
||||
(unsigned, positive) integer represented in 1-9 bytes followed by
|
||||
that many bytes of binary data. (For example "cat" would be "3cat".)
|
||||
"""
|
||||
length = self.read_length_coded_binary()
|
||||
if length is None:
|
||||
return None
|
||||
return self.read(length)
|
||||
|
||||
def is_ok_packet(self):
|
||||
return byte2int(self.get_bytes(0)) == 0
|
||||
|
||||
def is_eof_packet(self):
|
||||
return byte2int(self.get_bytes(0)) == 254 # 'fe'
|
||||
|
||||
def is_resultset_packet(self):
|
||||
field_count = byte2int(self.get_bytes(0))
|
||||
return field_count >= 1 and field_count <= 250
|
||||
|
||||
def is_error_packet(self):
|
||||
return byte2int(self.get_bytes(0)) == 255
|
||||
|
||||
def check_error(self):
|
||||
if self.is_error_packet():
|
||||
self.rewind()
|
||||
self.advance(1) # field_count == error (we already know that)
|
||||
errno = unpack_uint16(self.read(2))
|
||||
if DEBUG: print "errno = %d" % errno
|
||||
raise_mysql_exception(self.__data)
|
||||
|
||||
def dump(self):
|
||||
dump_packet(self.__data)
|
||||
|
||||
|
||||
class FieldDescriptorPacket(MysqlPacket):
|
||||
"""A MysqlPacket that represents a specific column's metadata in the result.
|
||||
|
||||
Parsing is automatically done and the results are exported via public
|
||||
attributes on the class such as: db, table_name, name, length, type_code.
|
||||
"""
|
||||
|
||||
def __init__(self, *args):
|
||||
MysqlPacket.__init__(self, *args)
|
||||
self.__parse_field_descriptor()
|
||||
|
||||
def __parse_field_descriptor(self):
|
||||
"""Parse the 'Field Descriptor' (Metadata) packet.
|
||||
|
||||
This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0).
|
||||
"""
|
||||
self.catalog = self.read_length_coded_string()
|
||||
self.db = self.read_length_coded_string()
|
||||
self.table_name = self.read_length_coded_string()
|
||||
self.org_table = self.read_length_coded_string()
|
||||
self.name = self.read_length_coded_string().decode(self.connection.charset)
|
||||
self.org_name = self.read_length_coded_string()
|
||||
self.advance(1) # non-null filler
|
||||
self.charsetnr = struct.unpack('<H', self.read(2))[0]
|
||||
self.length = struct.unpack('<I', self.read(4))[0]
|
||||
self.type_code = byte2int(self.read(1))
|
||||
self.flags = struct.unpack('<H', self.read(2))[0]
|
||||
self.scale = byte2int(self.read(1)) # "decimals"
|
||||
self.advance(2) # filler (always 0x00)
|
||||
|
||||
# 'default' is a length coded binary and is still in the buffer?
|
||||
# not used for normal result sets...
|
||||
|
||||
def description(self):
|
||||
"""Provides a 7-item tuple compatible with the Python PEP249 DB Spec."""
|
||||
desc = []
|
||||
desc.append(self.name)
|
||||
desc.append(self.type_code)
|
||||
desc.append(None) # TODO: display_length; should this be self.length?
|
||||
desc.append(self.get_column_length()) # 'internal_size'
|
||||
desc.append(self.get_column_length()) # 'precision' # TODO: why!?!?
|
||||
desc.append(self.scale)
|
||||
|
||||
# 'null_ok' -- can this be True/False rather than 1/0?
|
||||
# if so just do: desc.append(bool(self.flags % 2 == 0))
|
||||
if self.flags % 2 == 0:
|
||||
desc.append(1)
|
||||
else:
|
||||
desc.append(0)
|
||||
return tuple(desc)
|
||||
|
||||
def get_column_length(self):
|
||||
if self.type_code == FIELD_TYPE.VAR_STRING:
|
||||
mblen = MBLENGTH.get(self.charsetnr, 1)
|
||||
return self.length // mblen
|
||||
return self.length
|
||||
|
||||
def __str__(self):
|
||||
return ('%s %s.%s.%s, type=%s'
|
||||
% (self.__class__, self.db, self.table_name, self.name,
|
||||
self.type_code))
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""
|
||||
Representation of a socket with a mysql server.
|
||||
|
||||
The proper way to get an instance of this class is to call
|
||||
connect()."""
|
||||
errorhandler = defaulterrorhandler
|
||||
|
||||
def __init__(self, host="localhost", user=None, passwd="",
|
||||
db=None, port=3306, unix_socket=None,
|
||||
charset='', sql_mode=None,
|
||||
read_default_file=None, conv=decoders, use_unicode=None,
|
||||
client_flag=0, cursorclass=Cursor, init_command=None,
|
||||
connect_timeout=None, ssl=None, read_default_group=None,
|
||||
compress=None, named_pipe=None):
|
||||
"""
|
||||
Establish a connection to the MySQL database. Accepts several
|
||||
arguments:
|
||||
|
||||
host: Host where the database server is located
|
||||
user: Username to log in as
|
||||
passwd: Password to use.
|
||||
db: Database to use, None to not use a particular one.
|
||||
port: MySQL port to use, default is usually OK.
|
||||
unix_socket: Optionally, you can use a unix socket rather than TCP/IP.
|
||||
charset: Charset you want to use.
|
||||
sql_mode: Default SQL_MODE to use.
|
||||
read_default_file: Specifies my.cnf file to read these parameters from under the [client] section.
|
||||
conv: Decoders dictionary to use instead of the default one. This is used to provide custom marshalling of types. See converters.
|
||||
use_unicode: Whether or not to default to unicode strings. This option defaults to true for Py3k.
|
||||
client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.
|
||||
cursorclass: Custom cursor class to use.
|
||||
init_command: Initial SQL statement to run when connection is established.
|
||||
connect_timeout: Timeout before throwing an exception when connecting.
|
||||
ssl: A dict of arguments similar to mysql_ssl_set()'s parameters. For now the capath and cipher arguments are not supported.
|
||||
read_default_group: Group to read from in the configuration file.
|
||||
compress; Not supported
|
||||
named_pipe: Not supported
|
||||
"""
|
||||
|
||||
if use_unicode is None and sys.version_info[0] > 2:
|
||||
use_unicode = True
|
||||
|
||||
if compress or named_pipe:
|
||||
raise NotImplementedError, "compress and named_pipe arguments are not supported"
|
||||
|
||||
if ssl and (ssl.has_key('capath') or ssl.has_key('cipher')):
|
||||
raise NotImplementedError, 'ssl options capath and cipher are not supported'
|
||||
|
||||
self.ssl = False
|
||||
if ssl:
|
||||
if not SSL_ENABLED:
|
||||
raise NotImplementedError, "ssl module not found"
|
||||
self.ssl = True
|
||||
client_flag |= SSL
|
||||
for k in ('key', 'cert', 'ca'):
|
||||
v = None
|
||||
if ssl.has_key(k):
|
||||
v = ssl[k]
|
||||
setattr(self, k, v)
|
||||
|
||||
if read_default_group and not read_default_file:
|
||||
if sys.platform.startswith("win"):
|
||||
read_default_file = "c:\\my.ini"
|
||||
else:
|
||||
read_default_file = "/etc/my.cnf"
|
||||
|
||||
if read_default_file:
|
||||
if not read_default_group:
|
||||
read_default_group = "client"
|
||||
|
||||
cfg = ConfigParser.RawConfigParser()
|
||||
cfg.read(os.path.expanduser(read_default_file))
|
||||
|
||||
def _config(key, default):
|
||||
try:
|
||||
return cfg.get(read_default_group,key)
|
||||
except:
|
||||
return default
|
||||
|
||||
user = _config("user",user)
|
||||
passwd = _config("password",passwd)
|
||||
host = _config("host", host)
|
||||
db = _config("db",db)
|
||||
unix_socket = _config("socket",unix_socket)
|
||||
port = _config("port", port)
|
||||
charset = _config("default-character-set", charset)
|
||||
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.user = user
|
||||
self.password = passwd
|
||||
self.db = db
|
||||
self.unix_socket = unix_socket
|
||||
if charset:
|
||||
self.charset = charset
|
||||
self.use_unicode = True
|
||||
else:
|
||||
self.charset = DEFAULT_CHARSET
|
||||
self.use_unicode = False
|
||||
|
||||
if use_unicode is not None:
|
||||
self.use_unicode = use_unicode
|
||||
|
||||
client_flag |= CAPABILITIES
|
||||
client_flag |= MULTI_STATEMENTS
|
||||
if self.db:
|
||||
client_flag |= CONNECT_WITH_DB
|
||||
self.client_flag = client_flag
|
||||
|
||||
self.cursorclass = cursorclass
|
||||
self.connect_timeout = connect_timeout
|
||||
|
||||
self._connect()
|
||||
|
||||
self.messages = []
|
||||
self.set_charset(charset)
|
||||
self.encoders = encoders
|
||||
self.decoders = conv
|
||||
|
||||
self._result = None
|
||||
self._affected_rows = 0
|
||||
self.host_info = "Not connected"
|
||||
|
||||
self.autocommit(False)
|
||||
|
||||
if sql_mode is not None:
|
||||
c = self.cursor()
|
||||
c.execute("SET sql_mode=%s", (sql_mode,))
|
||||
|
||||
self.commit()
|
||||
|
||||
if init_command is not None:
|
||||
c = self.cursor()
|
||||
c.execute(init_command)
|
||||
|
||||
self.commit()
|
||||
|
||||
|
||||
def close(self):
|
||||
''' Send the quit message and close the socket '''
|
||||
if self.socket is None:
|
||||
raise Error("Already closed")
|
||||
send_data = struct.pack('<i',1) + int2byte(COM_QUIT)
|
||||
self.wfile.write(send_data)
|
||||
self.wfile.close()
|
||||
self.rfile.close()
|
||||
self.socket.close()
|
||||
self.socket = None
|
||||
self.rfile = None
|
||||
self.wfile = None
|
||||
|
||||
def autocommit(self, value):
|
||||
''' Set whether or not to commit after every execute() '''
|
||||
try:
|
||||
self._execute_command(COM_QUERY, "SET AUTOCOMMIT = %s" % \
|
||||
self.escape(value))
|
||||
self.read_packet()
|
||||
except:
|
||||
exc,value,tb = sys.exc_info()
|
||||
self.errorhandler(None, exc, value)
|
||||
|
||||
def commit(self):
|
||||
''' Commit changes to stable storage '''
|
||||
try:
|
||||
self._execute_command(COM_QUERY, "COMMIT")
|
||||
self.read_packet()
|
||||
except:
|
||||
exc,value,tb = sys.exc_info()
|
||||
self.errorhandler(None, exc, value)
|
||||
|
||||
def rollback(self):
|
||||
''' Roll back the current transaction '''
|
||||
try:
|
||||
self._execute_command(COM_QUERY, "ROLLBACK")
|
||||
self.read_packet()
|
||||
except:
|
||||
exc,value,tb = sys.exc_info()
|
||||
self.errorhandler(None, exc, value)
|
||||
|
||||
def escape(self, obj):
|
||||
''' Escape whatever value you pass to it '''
|
||||
return escape_item(obj, self.charset)
|
||||
|
||||
def literal(self, obj):
|
||||
''' Alias for escape() '''
|
||||
return escape_item(obj, self.charset)
|
||||
|
||||
def cursor(self, cursor=None):
|
||||
''' Create a new cursor to execute queries with '''
|
||||
if cursor:
|
||||
return cursor(self)
|
||||
return self.cursorclass(self)
|
||||
|
||||
def __enter__(self):
|
||||
''' Context manager that returns a Cursor '''
|
||||
return self.cursor()
|
||||
|
||||
def __exit__(self, exc, value, traceback):
|
||||
''' On successful exit, commit. On exception, rollback. '''
|
||||
if exc:
|
||||
self.rollback()
|
||||
else:
|
||||
self.commit()
|
||||
|
||||
# The following methods are INTERNAL USE ONLY (called from Cursor)
|
||||
def query(self, sql):
|
||||
if DEBUG:
|
||||
print "sending query: %s" % sql
|
||||
self._execute_command(COM_QUERY, sql)
|
||||
self._affected_rows = self._read_query_result()
|
||||
return self._affected_rows
|
||||
|
||||
def next_result(self):
|
||||
self._affected_rows = self._read_query_result()
|
||||
return self._affected_rows
|
||||
|
||||
def affected_rows(self):
|
||||
return self._affected_rows
|
||||
|
||||
def kill(self, thread_id):
|
||||
arg = struct.pack('<I', thread_id)
|
||||
try:
|
||||
self._execute_command(COM_PROCESS_KILL, arg)
|
||||
except:
|
||||
exc,value,tb = sys.exc_info()
|
||||
self.errorhandler(None, exc, value)
|
||||
return
|
||||
pkt = self.read_packet()
|
||||
return pkt.is_ok_packet()
|
||||
|
||||
def ping(self, reconnect=True):
|
||||
''' Check if the server is alive '''
|
||||
try:
|
||||
self._execute_command(COM_PING, "")
|
||||
except:
|
||||
if reconnect:
|
||||
self._connect()
|
||||
return self.ping(False)
|
||||
else:
|
||||
exc,value,tb = sys.exc_info()
|
||||
self.errorhandler(None, exc, value)
|
||||
return
|
||||
|
||||
pkt = self.read_packet()
|
||||
return pkt.is_ok_packet()
|
||||
|
||||
def set_charset(self, charset):
|
||||
try:
|
||||
if charset:
|
||||
self._execute_command(COM_QUERY, "SET NAMES %s" %
|
||||
self.escape(charset))
|
||||
self.read_packet()
|
||||
self.charset = charset
|
||||
except:
|
||||
exc,value,tb = sys.exc_info()
|
||||
self.errorhandler(None, exc, value)
|
||||
|
||||
def _connect(self):
|
||||
try:
|
||||
if self.unix_socket and (self.host == 'localhost' or self.host == '127.0.0.1'):
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
t = sock.gettimeout()
|
||||
sock.settimeout(self.connect_timeout)
|
||||
sock.connect(self.unix_socket)
|
||||
sock.settimeout(t)
|
||||
self.host_info = "Localhost via UNIX socket"
|
||||
if DEBUG: print 'connected using unix_socket'
|
||||
else:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
t = sock.gettimeout()
|
||||
sock.settimeout(self.connect_timeout)
|
||||
sock.connect((self.host, self.port))
|
||||
sock.settimeout(t)
|
||||
self.host_info = "socket %s:%d" % (self.host, self.port)
|
||||
if DEBUG: print 'connected using socket'
|
||||
self.socket = sock
|
||||
self.rfile = self.socket.makefile("rb")
|
||||
self.wfile = self.socket.makefile("wb")
|
||||
self._get_server_information()
|
||||
self._request_authentication()
|
||||
except socket.error, e:
|
||||
raise OperationalError(2003, "Can't connect to MySQL server on %r (%s)" % (self.host, e.args[0]))
|
||||
|
||||
def read_packet(self, packet_type=MysqlPacket):
|
||||
"""Read an entire "mysql packet" in its entirety from the network
|
||||
and return a MysqlPacket type that represents the results."""
|
||||
|
||||
packet = packet_type(self)
|
||||
packet.check_error()
|
||||
return packet
|
||||
|
||||
def _read_query_result(self):
|
||||
result = MySQLResult(self)
|
||||
result.read()
|
||||
self._result = result
|
||||
return result.affected_rows
|
||||
|
||||
def insert_id(self):
|
||||
if self._result:
|
||||
return self._result.insert_id
|
||||
else:
|
||||
return 0
|
||||
|
||||
def _send_command(self, command, sql):
|
||||
#send_data = struct.pack('<i', len(sql) + 1) + command + sql
|
||||
# could probably be more efficient, at least it's correct
|
||||
if not self.socket:
|
||||
self.errorhandler(None, InterfaceError, "(0, '')")
|
||||
|
||||
if isinstance(sql, unicode):
|
||||
sql = sql.encode(self.charset)
|
||||
|
||||
prelude = struct.pack('<i', len(sql)+1) + int2byte(command)
|
||||
self.wfile.write(prelude + sql)
|
||||
self.wfile.flush()
|
||||
if DEBUG: dump_packet(prelude + sql)
|
||||
|
||||
def _execute_command(self, command, sql):
|
||||
self._send_command(command, sql)
|
||||
|
||||
def _request_authentication(self):
|
||||
self._send_authentication()
|
||||
|
||||
def _send_authentication(self):
|
||||
self.client_flag |= CAPABILITIES
|
||||
if self.server_version.startswith('5'):
|
||||
self.client_flag |= MULTI_RESULTS
|
||||
|
||||
if self.user is None:
|
||||
raise ValueError, "Did not specify a username"
|
||||
|
||||
charset_id = charset_by_name(self.charset).id
|
||||
self.user = self.user.encode(self.charset)
|
||||
|
||||
data_init = struct.pack('<i', self.client_flag) + struct.pack("<I", 1) + \
|
||||
int2byte(charset_id) + int2byte(0)*23
|
||||
|
||||
next_packet = 1
|
||||
|
||||
if self.ssl:
|
||||
data = pack_int24(len(data_init)) + int2byte(next_packet) + data_init
|
||||
next_packet += 1
|
||||
|
||||
if DEBUG: dump_packet(data)
|
||||
|
||||
self.wfile.write(data)
|
||||
self.wfile.flush()
|
||||
self.socket = ssl.wrap_self.socketet(self.socket, keyfile=self.key,
|
||||
certfile=self.cert,
|
||||
ssl_version=ssl.PROTOCOL_TLSv1,
|
||||
cert_reqs=ssl.CERT_REQUIRED,
|
||||
ca_certs=self.ca)
|
||||
self.rfile = self.socket.makefile("rb")
|
||||
self.wfile = self.socket.makefile("wb")
|
||||
|
||||
data = data_init + self.user+int2byte(0) + _scramble(self.password.encode(self.charset), self.salt)
|
||||
|
||||
if self.db:
|
||||
self.db = self.db.encode(self.charset)
|
||||
data += self.db + int2byte(0)
|
||||
|
||||
data = pack_int24(len(data)) + int2byte(next_packet) + data
|
||||
next_packet += 2
|
||||
|
||||
if DEBUG: dump_packet(data)
|
||||
|
||||
self.wfile.write(data)
|
||||
self.wfile.flush()
|
||||
|
||||
auth_packet = MysqlPacket(self)
|
||||
auth_packet.check_error()
|
||||
if DEBUG: auth_packet.dump()
|
||||
|
||||
# if old_passwords is enabled the packet will be 1 byte long and
|
||||
# have the octet 254
|
||||
|
||||
if auth_packet.is_eof_packet():
|
||||
# send legacy handshake
|
||||
#raise NotImplementedError, "old_passwords are not supported. Check to see if mysqld was started with --old-passwords, if old-passwords=1 in a my.cnf file, or if there are some short hashes in your mysql.user table."
|
||||
# TODO: is this the correct charset?
|
||||
data = _scramble_323(self.password.encode(self.charset), self.salt.encode(self.charset)) + int2byte(0)
|
||||
data = pack_int24(len(data)) + int2byte(next_packet) + data
|
||||
|
||||
self.wfile.write(data)
|
||||
self.wfile.flush()
|
||||
auth_packet = MysqlPacket(self)
|
||||
auth_packet.check_error()
|
||||
if DEBUG: auth_packet.dump()
|
||||
|
||||
|
||||
# _mysql support
|
||||
def thread_id(self):
|
||||
return self.server_thread_id[0]
|
||||
|
||||
def character_set_name(self):
|
||||
return self.charset
|
||||
|
||||
def get_host_info(self):
|
||||
return self.host_info
|
||||
|
||||
def get_proto_info(self):
|
||||
return self.protocol_version
|
||||
|
||||
def _get_server_information(self):
|
||||
i = 0
|
||||
packet = MysqlPacket(self)
|
||||
data = packet.get_all_data()
|
||||
|
||||
if DEBUG: dump_packet(data)
|
||||
#packet_len = byte2int(data[i:i+1])
|
||||
#i += 4
|
||||
self.protocol_version = byte2int(data[i:i+1])
|
||||
|
||||
i += 1
|
||||
server_end = data.find(int2byte(0), i)
|
||||
# TODO: is this the correct charset? should it be default_charset?
|
||||
self.server_version = data[i:server_end].decode(self.charset)
|
||||
|
||||
i = server_end + 1
|
||||
self.server_thread_id = struct.unpack('<h', data[i:i+2])
|
||||
|
||||
i += 4
|
||||
self.salt = data[i:i+8]
|
||||
|
||||
i += 9
|
||||
if len(data) >= i + 1:
|
||||
i += 1
|
||||
|
||||
self.server_capabilities = struct.unpack('<h', data[i:i+2])[0]
|
||||
|
||||
i += 1
|
||||
self.server_language = byte2int(data[i:i+1])
|
||||
self.server_charset = charset_by_id(self.server_language).name
|
||||
|
||||
i += 16
|
||||
if len(data) >= i+12-1:
|
||||
rest_salt = data[i:i+12]
|
||||
self.salt += rest_salt
|
||||
|
||||
def get_server_info(self):
|
||||
return self.server_version
|
||||
|
||||
Warning = Warning
|
||||
Error = Error
|
||||
InterfaceError = InterfaceError
|
||||
DatabaseError = DatabaseError
|
||||
DataError = DataError
|
||||
OperationalError = OperationalError
|
||||
IntegrityError = IntegrityError
|
||||
InternalError = InternalError
|
||||
ProgrammingError = ProgrammingError
|
||||
NotSupportedError = NotSupportedError
|
||||
|
||||
# TODO: move OK and EOF packet parsing/logic into a proper subclass
|
||||
# of MysqlPacket like has been done with FieldDescriptorPacket.
|
||||
class MySQLResult(object):
|
||||
|
||||
def __init__(self, connection):
|
||||
from weakref import proxy
|
||||
self.connection = proxy(connection)
|
||||
self.affected_rows = None
|
||||
self.insert_id = None
|
||||
self.server_status = 0
|
||||
self.warning_count = 0
|
||||
self.message = None
|
||||
self.field_count = 0
|
||||
self.description = None
|
||||
self.rows = None
|
||||
self.has_next = None
|
||||
|
||||
def read(self):
|
||||
self.first_packet = self.connection.read_packet()
|
||||
|
||||
# TODO: use classes for different packet types?
|
||||
if self.first_packet.is_ok_packet():
|
||||
self._read_ok_packet()
|
||||
else:
|
||||
self._read_result_packet()
|
||||
|
||||
def _read_ok_packet(self):
|
||||
self.first_packet.advance(1) # field_count (always '0')
|
||||
self.affected_rows = self.first_packet.read_length_coded_binary()
|
||||
self.insert_id = self.first_packet.read_length_coded_binary()
|
||||
self.server_status = struct.unpack('<H', self.first_packet.read(2))[0]
|
||||
self.warning_count = struct.unpack('<H', self.first_packet.read(2))[0]
|
||||
self.message = self.first_packet.read_all()
|
||||
|
||||
def _read_result_packet(self):
|
||||
self.field_count = byte2int(self.first_packet.read(1))
|
||||
self._get_descriptions()
|
||||
self._read_rowdata_packet()
|
||||
|
||||
# TODO: implement this as an iteratable so that it is more
|
||||
# memory efficient and lower-latency to client...
|
||||
def _read_rowdata_packet(self):
|
||||
"""Read a rowdata packet for each data row in the result set."""
|
||||
rows = []
|
||||
while True:
|
||||
packet = self.connection.read_packet()
|
||||
if packet.is_eof_packet():
|
||||
self.warning_count = packet.read(2)
|
||||
server_status = struct.unpack('<h', packet.read(2))[0]
|
||||
self.has_next = (server_status
|
||||
& SERVER_STATUS.SERVER_MORE_RESULTS_EXISTS)
|
||||
break
|
||||
|
||||
row = []
|
||||
for field in self.fields:
|
||||
if field.type_code in self.connection.decoders:
|
||||
converter = self.connection.decoders[field.type_code]
|
||||
|
||||
if DEBUG: print "DEBUG: field=%s, converter=%s" % (field, converter)
|
||||
data = packet.read_length_coded_string()
|
||||
converted = None
|
||||
if data != None:
|
||||
converted = converter(self.connection, field, data)
|
||||
|
||||
row.append(converted)
|
||||
|
||||
rows.append(tuple(row))
|
||||
|
||||
self.affected_rows = len(rows)
|
||||
self.rows = tuple(rows)
|
||||
if DEBUG: self.rows
|
||||
|
||||
def _get_descriptions(self):
|
||||
"""Read a column descriptor packet for each column in the result."""
|
||||
self.fields = []
|
||||
description = []
|
||||
for i in xrange(self.field_count):
|
||||
field = self.connection.read_packet(FieldDescriptorPacket)
|
||||
self.fields.append(field)
|
||||
description.append(field.description())
|
||||
|
||||
eof_packet = self.connection.read_packet()
|
||||
assert eof_packet.is_eof_packet(), 'Protocol error, expecting EOF'
|
||||
self.description = tuple(description)
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
||||
LONG_PASSWORD = 1
|
||||
FOUND_ROWS = 1 << 1
|
||||
LONG_FLAG = 1 << 2
|
||||
CONNECT_WITH_DB = 1 << 3
|
||||
NO_SCHEMA = 1 << 4
|
||||
COMPRESS = 1 << 5
|
||||
ODBC = 1 << 6
|
||||
LOCAL_FILES = 1 << 7
|
||||
IGNORE_SPACE = 1 << 8
|
||||
PROTOCOL_41 = 1 << 9
|
||||
INTERACTIVE = 1 << 10
|
||||
SSL = 1 << 11
|
||||
IGNORE_SIGPIPE = 1 << 12
|
||||
TRANSACTIONS = 1 << 13
|
||||
SECURE_CONNECTION = 1 << 15
|
||||
MULTI_STATEMENTS = 1 << 16
|
||||
MULTI_RESULTS = 1 << 17
|
||||
CAPABILITIES = LONG_PASSWORD|LONG_FLAG|TRANSACTIONS| \
|
||||
PROTOCOL_41|SECURE_CONNECTION
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
||||
COM_SLEEP = 0x00
|
||||
COM_QUIT = 0x01
|
||||
COM_INIT_DB = 0x02
|
||||
COM_QUERY = 0x03
|
||||
COM_FIELD_LIST = 0x04
|
||||
COM_CREATE_DB = 0x05
|
||||
COM_DROP_DB = 0x06
|
||||
COM_REFRESH = 0x07
|
||||
COM_SHUTDOWN = 0x08
|
||||
COM_STATISTICS = 0x09
|
||||
COM_PROCESS_INFO = 0x0a
|
||||
COM_CONNECT = 0x0b
|
||||
COM_PROCESS_KILL = 0x0c
|
||||
COM_DEBUG = 0x0d
|
||||
COM_PING = 0x0e
|
||||
COM_TIME = 0x0f
|
||||
COM_DELAYED_INSERT = 0x10
|
||||
COM_CHANGE_USER = 0x11
|
||||
COM_BINLOG_DUMP = 0x12
|
||||
COM_TABLE_DUMP = 0x13
|
||||
COM_CONNECT_OUT = 0x14
|
||||
COM_REGISTER_SLAVE = 0x15
|
||||
|
|
@ -1,484 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
||||
ERROR_FIRST = 1000
|
||||
HASHCHK = 1000
|
||||
NISAMCHK = 1001
|
||||
NO = 1002
|
||||
YES = 1003
|
||||
CANT_CREATE_FILE = 1004
|
||||
CANT_CREATE_TABLE = 1005
|
||||
CANT_CREATE_DB = 1006
|
||||
DB_CREATE_EXISTS = 1007
|
||||
DB_DROP_EXISTS = 1008
|
||||
DB_DROP_DELETE = 1009
|
||||
DB_DROP_RMDIR = 1010
|
||||
CANT_DELETE_FILE = 1011
|
||||
CANT_FIND_SYSTEM_REC = 1012
|
||||
CANT_GET_STAT = 1013
|
||||
CANT_GET_WD = 1014
|
||||
CANT_LOCK = 1015
|
||||
CANT_OPEN_FILE = 1016
|
||||
FILE_NOT_FOUND = 1017
|
||||
CANT_READ_DIR = 1018
|
||||
CANT_SET_WD = 1019
|
||||
CHECKREAD = 1020
|
||||
DISK_FULL = 1021
|
||||
DUP_KEY = 1022
|
||||
ERROR_ON_CLOSE = 1023
|
||||
ERROR_ON_READ = 1024
|
||||
ERROR_ON_RENAME = 1025
|
||||
ERROR_ON_WRITE = 1026
|
||||
FILE_USED = 1027
|
||||
FILSORT_ABORT = 1028
|
||||
FORM_NOT_FOUND = 1029
|
||||
GET_ERRNO = 1030
|
||||
ILLEGAL_HA = 1031
|
||||
KEY_NOT_FOUND = 1032
|
||||
NOT_FORM_FILE = 1033
|
||||
NOT_KEYFILE = 1034
|
||||
OLD_KEYFILE = 1035
|
||||
OPEN_AS_READONLY = 1036
|
||||
OUTOFMEMORY = 1037
|
||||
OUT_OF_SORTMEMORY = 1038
|
||||
UNEXPECTED_EOF = 1039
|
||||
CON_COUNT_ERROR = 1040
|
||||
OUT_OF_RESOURCES = 1041
|
||||
BAD_HOST_ERROR = 1042
|
||||
HANDSHAKE_ERROR = 1043
|
||||
DBACCESS_DENIED_ERROR = 1044
|
||||
ACCESS_DENIED_ERROR = 1045
|
||||
NO_DB_ERROR = 1046
|
||||
UNKNOWN_COM_ERROR = 1047
|
||||
BAD_NULL_ERROR = 1048
|
||||
BAD_DB_ERROR = 1049
|
||||
TABLE_EXISTS_ERROR = 1050
|
||||
BAD_TABLE_ERROR = 1051
|
||||
NON_UNIQ_ERROR = 1052
|
||||
SERVER_SHUTDOWN = 1053
|
||||
BAD_FIELD_ERROR = 1054
|
||||
WRONG_FIELD_WITH_GROUP = 1055
|
||||
WRONG_GROUP_FIELD = 1056
|
||||
WRONG_SUM_SELECT = 1057
|
||||
WRONG_VALUE_COUNT = 1058
|
||||
TOO_LONG_IDENT = 1059
|
||||
DUP_FIELDNAME = 1060
|
||||
DUP_KEYNAME = 1061
|
||||
DUP_ENTRY = 1062
|
||||
WRONG_FIELD_SPEC = 1063
|
||||
PARSE_ERROR = 1064
|
||||
EMPTY_QUERY = 1065
|
||||
NONUNIQ_TABLE = 1066
|
||||
INVALID_DEFAULT = 1067
|
||||
MULTIPLE_PRI_KEY = 1068
|
||||
TOO_MANY_KEYS = 1069
|
||||
TOO_MANY_KEY_PARTS = 1070
|
||||
TOO_LONG_KEY = 1071
|
||||
KEY_COLUMN_DOES_NOT_EXITS = 1072
|
||||
BLOB_USED_AS_KEY = 1073
|
||||
TOO_BIG_FIELDLENGTH = 1074
|
||||
WRONG_AUTO_KEY = 1075
|
||||
READY = 1076
|
||||
NORMAL_SHUTDOWN = 1077
|
||||
GOT_SIGNAL = 1078
|
||||
SHUTDOWN_COMPLETE = 1079
|
||||
FORCING_CLOSE = 1080
|
||||
IPSOCK_ERROR = 1081
|
||||
NO_SUCH_INDEX = 1082
|
||||
WRONG_FIELD_TERMINATORS = 1083
|
||||
BLOBS_AND_NO_TERMINATED = 1084
|
||||
TEXTFILE_NOT_READABLE = 1085
|
||||
FILE_EXISTS_ERROR = 1086
|
||||
LOAD_INFO = 1087
|
||||
ALTER_INFO = 1088
|
||||
WRONG_SUB_KEY = 1089
|
||||
CANT_REMOVE_ALL_FIELDS = 1090
|
||||
CANT_DROP_FIELD_OR_KEY = 1091
|
||||
INSERT_INFO = 1092
|
||||
UPDATE_TABLE_USED = 1093
|
||||
NO_SUCH_THREAD = 1094
|
||||
KILL_DENIED_ERROR = 1095
|
||||
NO_TABLES_USED = 1096
|
||||
TOO_BIG_SET = 1097
|
||||
NO_UNIQUE_LOGFILE = 1098
|
||||
TABLE_NOT_LOCKED_FOR_WRITE = 1099
|
||||
TABLE_NOT_LOCKED = 1100
|
||||
BLOB_CANT_HAVE_DEFAULT = 1101
|
||||
WRONG_DB_NAME = 1102
|
||||
WRONG_TABLE_NAME = 1103
|
||||
TOO_BIG_SELECT = 1104
|
||||
UNKNOWN_ERROR = 1105
|
||||
UNKNOWN_PROCEDURE = 1106
|
||||
WRONG_PARAMCOUNT_TO_PROCEDURE = 1107
|
||||
WRONG_PARAMETERS_TO_PROCEDURE = 1108
|
||||
UNKNOWN_TABLE = 1109
|
||||
FIELD_SPECIFIED_TWICE = 1110
|
||||
INVALID_GROUP_FUNC_USE = 1111
|
||||
UNSUPPORTED_EXTENSION = 1112
|
||||
TABLE_MUST_HAVE_COLUMNS = 1113
|
||||
RECORD_FILE_FULL = 1114
|
||||
UNKNOWN_CHARACTER_SET = 1115
|
||||
TOO_MANY_TABLES = 1116
|
||||
TOO_MANY_FIELDS = 1117
|
||||
TOO_BIG_ROWSIZE = 1118
|
||||
STACK_OVERRUN = 1119
|
||||
WRONG_OUTER_JOIN = 1120
|
||||
NULL_COLUMN_IN_INDEX = 1121
|
||||
CANT_FIND_UDF = 1122
|
||||
CANT_INITIALIZE_UDF = 1123
|
||||
UDF_NO_PATHS = 1124
|
||||
UDF_EXISTS = 1125
|
||||
CANT_OPEN_LIBRARY = 1126
|
||||
CANT_FIND_DL_ENTRY = 1127
|
||||
FUNCTION_NOT_DEFINED = 1128
|
||||
HOST_IS_BLOCKED = 1129
|
||||
HOST_NOT_PRIVILEGED = 1130
|
||||
PASSWORD_ANONYMOUS_USER = 1131
|
||||
PASSWORD_NOT_ALLOWED = 1132
|
||||
PASSWORD_NO_MATCH = 1133
|
||||
UPDATE_INFO = 1134
|
||||
CANT_CREATE_THREAD = 1135
|
||||
WRONG_VALUE_COUNT_ON_ROW = 1136
|
||||
CANT_REOPEN_TABLE = 1137
|
||||
INVALID_USE_OF_NULL = 1138
|
||||
REGEXP_ERROR = 1139
|
||||
MIX_OF_GROUP_FUNC_AND_FIELDS = 1140
|
||||
NONEXISTING_GRANT = 1141
|
||||
TABLEACCESS_DENIED_ERROR = 1142
|
||||
COLUMNACCESS_DENIED_ERROR = 1143
|
||||
ILLEGAL_GRANT_FOR_TABLE = 1144
|
||||
GRANT_WRONG_HOST_OR_USER = 1145
|
||||
NO_SUCH_TABLE = 1146
|
||||
NONEXISTING_TABLE_GRANT = 1147
|
||||
NOT_ALLOWED_COMMAND = 1148
|
||||
SYNTAX_ERROR = 1149
|
||||
DELAYED_CANT_CHANGE_LOCK = 1150
|
||||
TOO_MANY_DELAYED_THREADS = 1151
|
||||
ABORTING_CONNECTION = 1152
|
||||
NET_PACKET_TOO_LARGE = 1153
|
||||
NET_READ_ERROR_FROM_PIPE = 1154
|
||||
NET_FCNTL_ERROR = 1155
|
||||
NET_PACKETS_OUT_OF_ORDER = 1156
|
||||
NET_UNCOMPRESS_ERROR = 1157
|
||||
NET_READ_ERROR = 1158
|
||||
NET_READ_INTERRUPTED = 1159
|
||||
NET_ERROR_ON_WRITE = 1160
|
||||
NET_WRITE_INTERRUPTED = 1161
|
||||
TOO_LONG_STRING = 1162
|
||||
TABLE_CANT_HANDLE_BLOB = 1163
|
||||
TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164
|
||||
DELAYED_INSERT_TABLE_LOCKED = 1165
|
||||
WRONG_COLUMN_NAME = 1166
|
||||
WRONG_KEY_COLUMN = 1167
|
||||
WRONG_MRG_TABLE = 1168
|
||||
DUP_UNIQUE = 1169
|
||||
BLOB_KEY_WITHOUT_LENGTH = 1170
|
||||
PRIMARY_CANT_HAVE_NULL = 1171
|
||||
TOO_MANY_ROWS = 1172
|
||||
REQUIRES_PRIMARY_KEY = 1173
|
||||
NO_RAID_COMPILED = 1174
|
||||
UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175
|
||||
KEY_DOES_NOT_EXITS = 1176
|
||||
CHECK_NO_SUCH_TABLE = 1177
|
||||
CHECK_NOT_IMPLEMENTED = 1178
|
||||
CANT_DO_THIS_DURING_AN_TRANSACTION = 1179
|
||||
ERROR_DURING_COMMIT = 1180
|
||||
ERROR_DURING_ROLLBACK = 1181
|
||||
ERROR_DURING_FLUSH_LOGS = 1182
|
||||
ERROR_DURING_CHECKPOINT = 1183
|
||||
NEW_ABORTING_CONNECTION = 1184
|
||||
DUMP_NOT_IMPLEMENTED = 1185
|
||||
FLUSH_MASTER_BINLOG_CLOSED = 1186
|
||||
INDEX_REBUILD = 1187
|
||||
MASTER = 1188
|
||||
MASTER_NET_READ = 1189
|
||||
MASTER_NET_WRITE = 1190
|
||||
FT_MATCHING_KEY_NOT_FOUND = 1191
|
||||
LOCK_OR_ACTIVE_TRANSACTION = 1192
|
||||
UNKNOWN_SYSTEM_VARIABLE = 1193
|
||||
CRASHED_ON_USAGE = 1194
|
||||
CRASHED_ON_REPAIR = 1195
|
||||
WARNING_NOT_COMPLETE_ROLLBACK = 1196
|
||||
TRANS_CACHE_FULL = 1197
|
||||
SLAVE_MUST_STOP = 1198
|
||||
SLAVE_NOT_RUNNING = 1199
|
||||
BAD_SLAVE = 1200
|
||||
MASTER_INFO = 1201
|
||||
SLAVE_THREAD = 1202
|
||||
TOO_MANY_USER_CONNECTIONS = 1203
|
||||
SET_CONSTANTS_ONLY = 1204
|
||||
LOCK_WAIT_TIMEOUT = 1205
|
||||
LOCK_TABLE_FULL = 1206
|
||||
READ_ONLY_TRANSACTION = 1207
|
||||
DROP_DB_WITH_READ_LOCK = 1208
|
||||
CREATE_DB_WITH_READ_LOCK = 1209
|
||||
WRONG_ARGUMENTS = 1210
|
||||
NO_PERMISSION_TO_CREATE_USER = 1211
|
||||
UNION_TABLES_IN_DIFFERENT_DIR = 1212
|
||||
LOCK_DEADLOCK = 1213
|
||||
TABLE_CANT_HANDLE_FT = 1214
|
||||
CANNOT_ADD_FOREIGN = 1215
|
||||
NO_REFERENCED_ROW = 1216
|
||||
ROW_IS_REFERENCED = 1217
|
||||
CONNECT_TO_MASTER = 1218
|
||||
QUERY_ON_MASTER = 1219
|
||||
ERROR_WHEN_EXECUTING_COMMAND = 1220
|
||||
WRONG_USAGE = 1221
|
||||
WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222
|
||||
CANT_UPDATE_WITH_READLOCK = 1223
|
||||
MIXING_NOT_ALLOWED = 1224
|
||||
DUP_ARGUMENT = 1225
|
||||
USER_LIMIT_REACHED = 1226
|
||||
SPECIFIC_ACCESS_DENIED_ERROR = 1227
|
||||
LOCAL_VARIABLE = 1228
|
||||
GLOBAL_VARIABLE = 1229
|
||||
NO_DEFAULT = 1230
|
||||
WRONG_VALUE_FOR_VAR = 1231
|
||||
WRONG_TYPE_FOR_VAR = 1232
|
||||
VAR_CANT_BE_READ = 1233
|
||||
CANT_USE_OPTION_HERE = 1234
|
||||
NOT_SUPPORTED_YET = 1235
|
||||
MASTER_FATAL_ERROR_READING_BINLOG = 1236
|
||||
SLAVE_IGNORED_TABLE = 1237
|
||||
INCORRECT_GLOBAL_LOCAL_VAR = 1238
|
||||
WRONG_FK_DEF = 1239
|
||||
KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240
|
||||
OPERAND_COLUMNS = 1241
|
||||
SUBQUERY_NO_1_ROW = 1242
|
||||
UNKNOWN_STMT_HANDLER = 1243
|
||||
CORRUPT_HELP_DB = 1244
|
||||
CYCLIC_REFERENCE = 1245
|
||||
AUTO_CONVERT = 1246
|
||||
ILLEGAL_REFERENCE = 1247
|
||||
DERIVED_MUST_HAVE_ALIAS = 1248
|
||||
SELECT_REDUCED = 1249
|
||||
TABLENAME_NOT_ALLOWED_HERE = 1250
|
||||
NOT_SUPPORTED_AUTH_MODE = 1251
|
||||
SPATIAL_CANT_HAVE_NULL = 1252
|
||||
COLLATION_CHARSET_MISMATCH = 1253
|
||||
SLAVE_WAS_RUNNING = 1254
|
||||
SLAVE_WAS_NOT_RUNNING = 1255
|
||||
TOO_BIG_FOR_UNCOMPRESS = 1256
|
||||
ZLIB_Z_MEM_ERROR = 1257
|
||||
ZLIB_Z_BUF_ERROR = 1258
|
||||
ZLIB_Z_DATA_ERROR = 1259
|
||||
CUT_VALUE_GROUP_CONCAT = 1260
|
||||
WARN_TOO_FEW_RECORDS = 1261
|
||||
WARN_TOO_MANY_RECORDS = 1262
|
||||
WARN_NULL_TO_NOTNULL = 1263
|
||||
WARN_DATA_OUT_OF_RANGE = 1264
|
||||
WARN_DATA_TRUNCATED = 1265
|
||||
WARN_USING_OTHER_HANDLER = 1266
|
||||
CANT_AGGREGATE_2COLLATIONS = 1267
|
||||
DROP_USER = 1268
|
||||
REVOKE_GRANTS = 1269
|
||||
CANT_AGGREGATE_3COLLATIONS = 1270
|
||||
CANT_AGGREGATE_NCOLLATIONS = 1271
|
||||
VARIABLE_IS_NOT_STRUCT = 1272
|
||||
UNKNOWN_COLLATION = 1273
|
||||
SLAVE_IGNORED_SSL_PARAMS = 1274
|
||||
SERVER_IS_IN_SECURE_AUTH_MODE = 1275
|
||||
WARN_FIELD_RESOLVED = 1276
|
||||
BAD_SLAVE_UNTIL_COND = 1277
|
||||
MISSING_SKIP_SLAVE = 1278
|
||||
UNTIL_COND_IGNORED = 1279
|
||||
WRONG_NAME_FOR_INDEX = 1280
|
||||
WRONG_NAME_FOR_CATALOG = 1281
|
||||
WARN_QC_RESIZE = 1282
|
||||
BAD_FT_COLUMN = 1283
|
||||
UNKNOWN_KEY_CACHE = 1284
|
||||
WARN_HOSTNAME_WONT_WORK = 1285
|
||||
UNKNOWN_STORAGE_ENGINE = 1286
|
||||
WARN_DEPRECATED_SYNTAX = 1287
|
||||
NON_UPDATABLE_TABLE = 1288
|
||||
FEATURE_DISABLED = 1289
|
||||
OPTION_PREVENTS_STATEMENT = 1290
|
||||
DUPLICATED_VALUE_IN_TYPE = 1291
|
||||
TRUNCATED_WRONG_VALUE = 1292
|
||||
TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293
|
||||
INVALID_ON_UPDATE = 1294
|
||||
UNSUPPORTED_PS = 1295
|
||||
GET_ERRMSG = 1296
|
||||
GET_TEMPORARY_ERRMSG = 1297
|
||||
UNKNOWN_TIME_ZONE = 1298
|
||||
WARN_INVALID_TIMESTAMP = 1299
|
||||
INVALID_CHARACTER_STRING = 1300
|
||||
WARN_ALLOWED_PACKET_OVERFLOWED = 1301
|
||||
CONFLICTING_DECLARATIONS = 1302
|
||||
SP_NO_RECURSIVE_CREATE = 1303
|
||||
SP_ALREADY_EXISTS = 1304
|
||||
SP_DOES_NOT_EXIST = 1305
|
||||
SP_DROP_FAILED = 1306
|
||||
SP_STORE_FAILED = 1307
|
||||
SP_LILABEL_MISMATCH = 1308
|
||||
SP_LABEL_REDEFINE = 1309
|
||||
SP_LABEL_MISMATCH = 1310
|
||||
SP_UNINIT_VAR = 1311
|
||||
SP_BADSELECT = 1312
|
||||
SP_BADRETURN = 1313
|
||||
SP_BADSTATEMENT = 1314
|
||||
UPDATE_LOG_DEPRECATED_IGNORED = 1315
|
||||
UPDATE_LOG_DEPRECATED_TRANSLATED = 1316
|
||||
QUERY_INTERRUPTED = 1317
|
||||
SP_WRONG_NO_OF_ARGS = 1318
|
||||
SP_COND_MISMATCH = 1319
|
||||
SP_NORETURN = 1320
|
||||
SP_NORETURNEND = 1321
|
||||
SP_BAD_CURSOR_QUERY = 1322
|
||||
SP_BAD_CURSOR_SELECT = 1323
|
||||
SP_CURSOR_MISMATCH = 1324
|
||||
SP_CURSOR_ALREADY_OPEN = 1325
|
||||
SP_CURSOR_NOT_OPEN = 1326
|
||||
SP_UNDECLARED_VAR = 1327
|
||||
SP_WRONG_NO_OF_FETCH_ARGS = 1328
|
||||
SP_FETCH_NO_DATA = 1329
|
||||
SP_DUP_PARAM = 1330
|
||||
SP_DUP_VAR = 1331
|
||||
SP_DUP_COND = 1332
|
||||
SP_DUP_CURS = 1333
|
||||
SP_CANT_ALTER = 1334
|
||||
SP_SUBSELECT_NYI = 1335
|
||||
STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336
|
||||
SP_VARCOND_AFTER_CURSHNDLR = 1337
|
||||
SP_CURSOR_AFTER_HANDLER = 1338
|
||||
SP_CASE_NOT_FOUND = 1339
|
||||
FPARSER_TOO_BIG_FILE = 1340
|
||||
FPARSER_BAD_HEADER = 1341
|
||||
FPARSER_EOF_IN_COMMENT = 1342
|
||||
FPARSER_ERROR_IN_PARAMETER = 1343
|
||||
FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344
|
||||
VIEW_NO_EXPLAIN = 1345
|
||||
FRM_UNKNOWN_TYPE = 1346
|
||||
WRONG_OBJECT = 1347
|
||||
NONUPDATEABLE_COLUMN = 1348
|
||||
VIEW_SELECT_DERIVED = 1349
|
||||
VIEW_SELECT_CLAUSE = 1350
|
||||
VIEW_SELECT_VARIABLE = 1351
|
||||
VIEW_SELECT_TMPTABLE = 1352
|
||||
VIEW_WRONG_LIST = 1353
|
||||
WARN_VIEW_MERGE = 1354
|
||||
WARN_VIEW_WITHOUT_KEY = 1355
|
||||
VIEW_INVALID = 1356
|
||||
SP_NO_DROP_SP = 1357
|
||||
SP_GOTO_IN_HNDLR = 1358
|
||||
TRG_ALREADY_EXISTS = 1359
|
||||
TRG_DOES_NOT_EXIST = 1360
|
||||
TRG_ON_VIEW_OR_TEMP_TABLE = 1361
|
||||
TRG_CANT_CHANGE_ROW = 1362
|
||||
TRG_NO_SUCH_ROW_IN_TRG = 1363
|
||||
NO_DEFAULT_FOR_FIELD = 1364
|
||||
DIVISION_BY_ZERO = 1365
|
||||
TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366
|
||||
ILLEGAL_VALUE_FOR_TYPE = 1367
|
||||
VIEW_NONUPD_CHECK = 1368
|
||||
VIEW_CHECK_FAILED = 1369
|
||||
PROCACCESS_DENIED_ERROR = 1370
|
||||
RELAY_LOG_FAIL = 1371
|
||||
PASSWD_LENGTH = 1372
|
||||
UNKNOWN_TARGET_BINLOG = 1373
|
||||
IO_ERR_LOG_INDEX_READ = 1374
|
||||
BINLOG_PURGE_PROHIBITED = 1375
|
||||
FSEEK_FAIL = 1376
|
||||
BINLOG_PURGE_FATAL_ERR = 1377
|
||||
LOG_IN_USE = 1378
|
||||
LOG_PURGE_UNKNOWN_ERR = 1379
|
||||
RELAY_LOG_INIT = 1380
|
||||
NO_BINARY_LOGGING = 1381
|
||||
RESERVED_SYNTAX = 1382
|
||||
WSAS_FAILED = 1383
|
||||
DIFF_GROUPS_PROC = 1384
|
||||
NO_GROUP_FOR_PROC = 1385
|
||||
ORDER_WITH_PROC = 1386
|
||||
LOGGING_PROHIBIT_CHANGING_OF = 1387
|
||||
NO_FILE_MAPPING = 1388
|
||||
WRONG_MAGIC = 1389
|
||||
PS_MANY_PARAM = 1390
|
||||
KEY_PART_0 = 1391
|
||||
VIEW_CHECKSUM = 1392
|
||||
VIEW_MULTIUPDATE = 1393
|
||||
VIEW_NO_INSERT_FIELD_LIST = 1394
|
||||
VIEW_DELETE_MERGE_VIEW = 1395
|
||||
CANNOT_USER = 1396
|
||||
XAER_NOTA = 1397
|
||||
XAER_INVAL = 1398
|
||||
XAER_RMFAIL = 1399
|
||||
XAER_OUTSIDE = 1400
|
||||
XAER_RMERR = 1401
|
||||
XA_RBROLLBACK = 1402
|
||||
NONEXISTING_PROC_GRANT = 1403
|
||||
PROC_AUTO_GRANT_FAIL = 1404
|
||||
PROC_AUTO_REVOKE_FAIL = 1405
|
||||
DATA_TOO_LONG = 1406
|
||||
SP_BAD_SQLSTATE = 1407
|
||||
STARTUP = 1408
|
||||
LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409
|
||||
CANT_CREATE_USER_WITH_GRANT = 1410
|
||||
WRONG_VALUE_FOR_TYPE = 1411
|
||||
TABLE_DEF_CHANGED = 1412
|
||||
SP_DUP_HANDLER = 1413
|
||||
SP_NOT_VAR_ARG = 1414
|
||||
SP_NO_RETSET = 1415
|
||||
CANT_CREATE_GEOMETRY_OBJECT = 1416
|
||||
FAILED_ROUTINE_BREAK_BINLOG = 1417
|
||||
BINLOG_UNSAFE_ROUTINE = 1418
|
||||
BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419
|
||||
EXEC_STMT_WITH_OPEN_CURSOR = 1420
|
||||
STMT_HAS_NO_OPEN_CURSOR = 1421
|
||||
COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422
|
||||
NO_DEFAULT_FOR_VIEW_FIELD = 1423
|
||||
SP_NO_RECURSION = 1424
|
||||
TOO_BIG_SCALE = 1425
|
||||
TOO_BIG_PRECISION = 1426
|
||||
M_BIGGER_THAN_D = 1427
|
||||
WRONG_LOCK_OF_SYSTEM_TABLE = 1428
|
||||
CONNECT_TO_FOREIGN_DATA_SOURCE = 1429
|
||||
QUERY_ON_FOREIGN_DATA_SOURCE = 1430
|
||||
FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431
|
||||
FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432
|
||||
FOREIGN_DATA_STRING_INVALID = 1433
|
||||
CANT_CREATE_FEDERATED_TABLE = 1434
|
||||
TRG_IN_WRONG_SCHEMA = 1435
|
||||
STACK_OVERRUN_NEED_MORE = 1436
|
||||
TOO_LONG_BODY = 1437
|
||||
WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438
|
||||
TOO_BIG_DISPLAYWIDTH = 1439
|
||||
XAER_DUPID = 1440
|
||||
DATETIME_FUNCTION_OVERFLOW = 1441
|
||||
CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442
|
||||
VIEW_PREVENT_UPDATE = 1443
|
||||
PS_NO_RECURSION = 1444
|
||||
SP_CANT_SET_AUTOCOMMIT = 1445
|
||||
MALFORMED_DEFINER = 1446
|
||||
VIEW_FRM_NO_USER = 1447
|
||||
VIEW_OTHER_USER = 1448
|
||||
NO_SUCH_USER = 1449
|
||||
FORBID_SCHEMA_CHANGE = 1450
|
||||
ROW_IS_REFERENCED_2 = 1451
|
||||
NO_REFERENCED_ROW_2 = 1452
|
||||
SP_BAD_VAR_SHADOW = 1453
|
||||
TRG_NO_DEFINER = 1454
|
||||
OLD_FILE_FORMAT = 1455
|
||||
SP_RECURSION_LIMIT = 1456
|
||||
SP_PROC_TABLE_CORRUPT = 1457
|
||||
SP_WRONG_NAME = 1458
|
||||
TABLE_NEEDS_UPGRADE = 1459
|
||||
SP_NO_AGGREGATE = 1460
|
||||
MAX_PREPARED_STMT_COUNT_REACHED = 1461
|
||||
VIEW_RECURSIVE = 1462
|
||||
NON_GROUPING_FIELD_USED = 1463
|
||||
TABLE_CANT_HANDLE_SPKEYS = 1464
|
||||
NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465
|
||||
USERNAME = 1466
|
||||
HOSTNAME = 1467
|
||||
WRONG_STRING_LENGTH = 1468
|
||||
ERROR_LAST = 1468
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
||||
|
||||
DECIMAL = 0
|
||||
TINY = 1
|
||||
SHORT = 2
|
||||
LONG = 3
|
||||
FLOAT = 4
|
||||
DOUBLE = 5
|
||||
NULL = 6
|
||||
TIMESTAMP = 7
|
||||
LONGLONG = 8
|
||||
INT24 = 9
|
||||
DATE = 10
|
||||
TIME = 11
|
||||
DATETIME = 12
|
||||
YEAR = 13
|
||||
NEWDATE = 14
|
||||
VARCHAR = 15
|
||||
BIT = 16
|
||||
NEWDECIMAL = 246
|
||||
ENUM = 247
|
||||
SET = 248
|
||||
TINY_BLOB = 249
|
||||
MEDIUM_BLOB = 250
|
||||
LONG_BLOB = 251
|
||||
BLOB = 252
|
||||
VAR_STRING = 253
|
||||
STRING = 254
|
||||
GEOMETRY = 255
|
||||
|
||||
CHAR = TINY
|
||||
INTERVAL = ENUM
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
NOT_NULL = 1
|
||||
PRI_KEY = 2
|
||||
UNIQUE_KEY = 4
|
||||
MULTIPLE_KEY = 8
|
||||
BLOB = 16
|
||||
UNSIGNED = 32
|
||||
ZEROFILL = 64
|
||||
BINARY = 128
|
||||
ENUM = 256
|
||||
AUTO_INCREMENT = 512
|
||||
TIMESTAMP = 1024
|
||||
SET = 2048
|
||||
PART_KEY = 16384
|
||||
GROUP = 32767
|
||||
UNIQUE = 65536
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
||||
SERVER_STATUS_IN_TRANS = 1
|
||||
SERVER_STATUS_AUTOCOMMIT = 2
|
||||
SERVER_MORE_RESULTS_EXISTS = 8
|
||||
SERVER_QUERY_NO_GOOD_INDEX_USED = 16
|
||||
SERVER_QUERY_NO_INDEX_USED = 32
|
||||
SERVER_STATUS_CURSOR_EXISTS = 64
|
||||
SERVER_STATUS_LAST_ROW_SENT = 128
|
||||
SERVER_STATUS_DB_DROPPED = 256
|
||||
SERVER_STATUS_NO_BACKSLASH_ESCAPES = 512
|
||||
SERVER_STATUS_METADATA_CHANGED = 1024
|
||||
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
|
@ -1,360 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import re
|
||||
import datetime
|
||||
import time
|
||||
import sys
|
||||
|
||||
from constants import FIELD_TYPE, FLAG
|
||||
from charset import charset_by_id
|
||||
|
||||
PYTHON3 = sys.version_info[0] > 2
|
||||
|
||||
try:
|
||||
set
|
||||
except NameError:
|
||||
try:
|
||||
from sets import BaseSet as set
|
||||
except ImportError:
|
||||
from sets import Set as set
|
||||
|
||||
ESCAPE_REGEX = re.compile(r"[\0\n\r\032\'\"\\]")
|
||||
ESCAPE_MAP = {'\0': '\\0', '\n': '\\n', '\r': '\\r', '\032': '\\Z',
|
||||
'\'': '\\\'', '"': '\\"', '\\': '\\\\'}
|
||||
|
||||
def escape_item(val, charset):
|
||||
if type(val) in [tuple, list, set]:
|
||||
return escape_sequence(val, charset)
|
||||
if type(val) is dict:
|
||||
return escape_dict(val, charset)
|
||||
if PYTHON3 and hasattr(val, "decode") and not isinstance(val, unicode):
|
||||
# deal with py3k bytes
|
||||
val = val.decode(charset)
|
||||
encoder = encoders[type(val)]
|
||||
val = encoder(val)
|
||||
if type(val) is str:
|
||||
return val
|
||||
val = val.encode(charset)
|
||||
return val
|
||||
|
||||
def escape_dict(val, charset):
|
||||
n = {}
|
||||
for k, v in val.items():
|
||||
quoted = escape_item(v, charset)
|
||||
n[k] = quoted
|
||||
return n
|
||||
|
||||
def escape_sequence(val, charset):
|
||||
n = []
|
||||
for item in val:
|
||||
quoted = escape_item(item, charset)
|
||||
n.append(quoted)
|
||||
return "(" + ",".join(n) + ")"
|
||||
|
||||
def escape_set(val, charset):
|
||||
val = map(lambda x: escape_item(x, charset), val)
|
||||
return ','.join(val)
|
||||
|
||||
def escape_bool(value):
|
||||
return str(int(value))
|
||||
|
||||
def escape_object(value):
|
||||
return str(value)
|
||||
|
||||
escape_int = escape_long = escape_object
|
||||
|
||||
def escape_float(value):
|
||||
return ('%.15g' % value)
|
||||
|
||||
def escape_string(value):
|
||||
return ("'%s'" % ESCAPE_REGEX.sub(
|
||||
lambda match: ESCAPE_MAP.get(match.group(0)), value))
|
||||
|
||||
def escape_unicode(value):
|
||||
return escape_string(value)
|
||||
|
||||
def escape_None(value):
|
||||
return 'NULL'
|
||||
|
||||
def escape_timedelta(obj):
|
||||
seconds = int(obj.seconds) % 60
|
||||
minutes = int(obj.seconds // 60) % 60
|
||||
hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24
|
||||
return escape_string('%02d:%02d:%02d' % (hours, minutes, seconds))
|
||||
|
||||
def escape_time(obj):
|
||||
s = "%02d:%02d:%02d" % (int(obj.hour), int(obj.minute),
|
||||
int(obj.second))
|
||||
if obj.microsecond:
|
||||
s += ".%f" % obj.microsecond
|
||||
|
||||
return escape_string(s)
|
||||
|
||||
def escape_datetime(obj):
|
||||
return escape_string(obj.strftime("%Y-%m-%d %H:%M:%S"))
|
||||
|
||||
def escape_date(obj):
|
||||
return escape_string(obj.strftime("%Y-%m-%d"))
|
||||
|
||||
def escape_struct_time(obj):
|
||||
return escape_datetime(datetime.datetime(*obj[:6]))
|
||||
|
||||
def convert_datetime(connection, field, obj):
|
||||
"""Returns a DATETIME or TIMESTAMP column value as a datetime object:
|
||||
|
||||
>>> datetime_or_None('2007-02-25 23:06:20')
|
||||
datetime.datetime(2007, 2, 25, 23, 6, 20)
|
||||
>>> datetime_or_None('2007-02-25T23:06:20')
|
||||
datetime.datetime(2007, 2, 25, 23, 6, 20)
|
||||
|
||||
Illegal values are returned as None:
|
||||
|
||||
>>> datetime_or_None('2007-02-31T23:06:20') is None
|
||||
True
|
||||
>>> datetime_or_None('0000-00-00 00:00:00') is None
|
||||
True
|
||||
|
||||
"""
|
||||
if not isinstance(obj, unicode):
|
||||
obj = obj.decode(connection.charset)
|
||||
if ' ' in obj:
|
||||
sep = ' '
|
||||
elif 'T' in obj:
|
||||
sep = 'T'
|
||||
else:
|
||||
return convert_date(connection, field, obj)
|
||||
|
||||
try:
|
||||
ymd, hms = obj.split(sep, 1)
|
||||
return datetime.datetime(*[ int(x) for x in ymd.split('-')+hms.split(':') ])
|
||||
except ValueError:
|
||||
return convert_date(connection, field, obj)
|
||||
|
||||
def convert_timedelta(connection, field, obj):
|
||||
"""Returns a TIME column as a timedelta object:
|
||||
|
||||
>>> timedelta_or_None('25:06:17')
|
||||
datetime.timedelta(1, 3977)
|
||||
>>> timedelta_or_None('-25:06:17')
|
||||
datetime.timedelta(-2, 83177)
|
||||
|
||||
Illegal values are returned as None:
|
||||
|
||||
>>> timedelta_or_None('random crap') is None
|
||||
True
|
||||
|
||||
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
|
||||
can accept values as (+|-)DD HH:MM:SS. The latter format will not
|
||||
be parsed correctly by this function.
|
||||
"""
|
||||
from math import modf
|
||||
try:
|
||||
if not isinstance(obj, unicode):
|
||||
obj = obj.decode(connection.charset)
|
||||
hours, minutes, seconds = tuple([int(x) for x in obj.split(':')])
|
||||
tdelta = datetime.timedelta(
|
||||
hours = int(hours),
|
||||
minutes = int(minutes),
|
||||
seconds = int(seconds),
|
||||
microseconds = int(modf(float(seconds))[0]*1000000),
|
||||
)
|
||||
return tdelta
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
def convert_time(connection, field, obj):
|
||||
"""Returns a TIME column as a time object:
|
||||
|
||||
>>> time_or_None('15:06:17')
|
||||
datetime.time(15, 6, 17)
|
||||
|
||||
Illegal values are returned as None:
|
||||
|
||||
>>> time_or_None('-25:06:17') is None
|
||||
True
|
||||
>>> time_or_None('random crap') is None
|
||||
True
|
||||
|
||||
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
|
||||
can accept values as (+|-)DD HH:MM:SS. The latter format will not
|
||||
be parsed correctly by this function.
|
||||
|
||||
Also note that MySQL's TIME column corresponds more closely to
|
||||
Python's timedelta and not time. However if you want TIME columns
|
||||
to be treated as time-of-day and not a time offset, then you can
|
||||
use set this function as the converter for FIELD_TYPE.TIME.
|
||||
"""
|
||||
from math import modf
|
||||
try:
|
||||
hour, minute, second = obj.split(':')
|
||||
return datetime.time(hour=int(hour), minute=int(minute),
|
||||
second=int(second),
|
||||
microsecond=int(modf(float(second))[0]*1000000))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
def convert_date(connection, field, obj):
|
||||
"""Returns a DATE column as a date object:
|
||||
|
||||
>>> date_or_None('2007-02-26')
|
||||
datetime.date(2007, 2, 26)
|
||||
|
||||
Illegal values are returned as None:
|
||||
|
||||
>>> date_or_None('2007-02-31') is None
|
||||
True
|
||||
>>> date_or_None('0000-00-00') is None
|
||||
True
|
||||
|
||||
"""
|
||||
try:
|
||||
if not isinstance(obj, unicode):
|
||||
obj = obj.decode(connection.charset)
|
||||
return datetime.date(*[ int(x) for x in obj.split('-', 2) ])
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
def convert_mysql_timestamp(connection, field, timestamp):
|
||||
"""Convert a MySQL TIMESTAMP to a Timestamp object.
|
||||
|
||||
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
|
||||
|
||||
>>> mysql_timestamp_converter('2007-02-25 22:32:17')
|
||||
datetime.datetime(2007, 2, 25, 22, 32, 17)
|
||||
|
||||
MySQL < 4.1 uses a big string of numbers:
|
||||
|
||||
>>> mysql_timestamp_converter('20070225223217')
|
||||
datetime.datetime(2007, 2, 25, 22, 32, 17)
|
||||
|
||||
Illegal values are returned as None:
|
||||
|
||||
>>> mysql_timestamp_converter('2007-02-31 22:32:17') is None
|
||||
True
|
||||
>>> mysql_timestamp_converter('00000000000000') is None
|
||||
True
|
||||
|
||||
"""
|
||||
if not isinstance(timestamp, unicode):
|
||||
timestamp = timestamp.decode(connection.charset)
|
||||
|
||||
if timestamp[4] == '-':
|
||||
return convert_datetime(connection, field, timestamp)
|
||||
timestamp += "0"*(14-len(timestamp)) # padding
|
||||
year, month, day, hour, minute, second = \
|
||||
int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \
|
||||
int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14])
|
||||
try:
|
||||
return datetime.datetime(year, month, day, hour, minute, second)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
def convert_set(s):
|
||||
return set(s.split(","))
|
||||
|
||||
def convert_bit(connection, field, b):
|
||||
#b = "\x00" * (8 - len(b)) + b # pad w/ zeroes
|
||||
#return struct.unpack(">Q", b)[0]
|
||||
#
|
||||
# the snippet above is right, but MySQLdb doesn't process bits,
|
||||
# so we shouldn't either
|
||||
return b
|
||||
|
||||
def convert_characters(connection, field, data):
|
||||
field_charset = charset_by_id(field.charsetnr).name
|
||||
if field.flags & FLAG.SET:
|
||||
return convert_set(data.decode(field_charset))
|
||||
if field.flags & FLAG.BINARY:
|
||||
return data
|
||||
|
||||
if connection.use_unicode:
|
||||
data = data.decode(field_charset)
|
||||
elif connection.charset != field_charset:
|
||||
data = data.decode(field_charset)
|
||||
data = data.encode(connection.charset)
|
||||
return data
|
||||
|
||||
def convert_int(connection, field, data):
|
||||
return int(data)
|
||||
|
||||
def convert_long(connection, field, data):
|
||||
return long(data)
|
||||
|
||||
def convert_float(connection, field, data):
|
||||
return float(data)
|
||||
|
||||
encoders = {
|
||||
bool: escape_bool,
|
||||
int: escape_int,
|
||||
long: escape_long,
|
||||
float: escape_float,
|
||||
str: escape_string,
|
||||
unicode: escape_unicode,
|
||||
tuple: escape_sequence,
|
||||
list:escape_sequence,
|
||||
set:escape_sequence,
|
||||
dict:escape_dict,
|
||||
type(None):escape_None,
|
||||
datetime.date: escape_date,
|
||||
datetime.datetime : escape_datetime,
|
||||
datetime.timedelta : escape_timedelta,
|
||||
datetime.time : escape_time,
|
||||
time.struct_time : escape_struct_time,
|
||||
}
|
||||
|
||||
decoders = {
|
||||
FIELD_TYPE.BIT: convert_bit,
|
||||
FIELD_TYPE.TINY: convert_int,
|
||||
FIELD_TYPE.SHORT: convert_int,
|
||||
FIELD_TYPE.LONG: convert_long,
|
||||
FIELD_TYPE.FLOAT: convert_float,
|
||||
FIELD_TYPE.DOUBLE: convert_float,
|
||||
FIELD_TYPE.DECIMAL: convert_float,
|
||||
FIELD_TYPE.NEWDECIMAL: convert_float,
|
||||
FIELD_TYPE.LONGLONG: convert_long,
|
||||
FIELD_TYPE.INT24: convert_int,
|
||||
FIELD_TYPE.YEAR: convert_int,
|
||||
FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp,
|
||||
FIELD_TYPE.DATETIME: convert_datetime,
|
||||
FIELD_TYPE.TIME: convert_timedelta,
|
||||
FIELD_TYPE.DATE: convert_date,
|
||||
FIELD_TYPE.SET: convert_set,
|
||||
FIELD_TYPE.BLOB: convert_characters,
|
||||
FIELD_TYPE.TINY_BLOB: convert_characters,
|
||||
FIELD_TYPE.MEDIUM_BLOB: convert_characters,
|
||||
FIELD_TYPE.LONG_BLOB: convert_characters,
|
||||
FIELD_TYPE.STRING: convert_characters,
|
||||
FIELD_TYPE.VAR_STRING: convert_characters,
|
||||
FIELD_TYPE.VARCHAR: convert_characters,
|
||||
#FIELD_TYPE.BLOB: str,
|
||||
#FIELD_TYPE.STRING: str,
|
||||
#FIELD_TYPE.VAR_STRING: str,
|
||||
#FIELD_TYPE.VARCHAR: str
|
||||
}
|
||||
conversions = decoders # for MySQLdb compatibility
|
||||
|
||||
try:
|
||||
# python version > 2.3
|
||||
from decimal import Decimal
|
||||
def convert_decimal(connection, field, data):
|
||||
data = data.decode(connection.charset)
|
||||
return Decimal(data)
|
||||
decoders[FIELD_TYPE.DECIMAL] = convert_decimal
|
||||
decoders[FIELD_TYPE.NEWDECIMAL] = convert_decimal
|
||||
|
||||
def escape_decimal(obj):
|
||||
return unicode(obj)
|
||||
encoders[Decimal] = escape_decimal
|
||||
|
||||
except ImportError:
|
||||
pass
|
||||
|
|
@ -1,309 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import struct
|
||||
import re
|
||||
|
||||
try:
|
||||
import cStringIO as StringIO
|
||||
except ImportError:
|
||||
import StringIO
|
||||
|
||||
from err import Warning, Error, InterfaceError, DataError, \
|
||||
DatabaseError, OperationalError, IntegrityError, InternalError, \
|
||||
NotSupportedError, ProgrammingError
|
||||
|
||||
insert_values = re.compile(r'\svalues\s*(\(.+\))', re.IGNORECASE)
|
||||
|
||||
class Cursor(object):
|
||||
'''
|
||||
This is the object you use to interact with the database.
|
||||
'''
|
||||
def __init__(self, connection):
|
||||
'''
|
||||
Do not create an instance of a Cursor yourself. Call
|
||||
connections.Connection.cursor().
|
||||
'''
|
||||
from weakref import proxy
|
||||
self.connection = proxy(connection)
|
||||
self.description = None
|
||||
self.rownumber = 0
|
||||
self.rowcount = -1
|
||||
self.arraysize = 1
|
||||
self._executed = None
|
||||
self.messages = []
|
||||
self.errorhandler = connection.errorhandler
|
||||
self._has_next = None
|
||||
self._rows = ()
|
||||
|
||||
def __del__(self):
|
||||
'''
|
||||
When this gets GC'd close it.
|
||||
'''
|
||||
self.close()
|
||||
|
||||
def close(self):
|
||||
'''
|
||||
Closing a cursor just exhausts all remaining data.
|
||||
'''
|
||||
if not self.connection:
|
||||
return
|
||||
try:
|
||||
while self.nextset():
|
||||
pass
|
||||
except:
|
||||
pass
|
||||
|
||||
self.connection = None
|
||||
|
||||
def _get_db(self):
|
||||
if not self.connection:
|
||||
self.errorhandler(self, ProgrammingError, "cursor closed")
|
||||
return self.connection
|
||||
|
||||
def _check_executed(self):
|
||||
if not self._executed:
|
||||
self.errorhandler(self, ProgrammingError, "execute() first")
|
||||
|
||||
def setinputsizes(self, *args):
|
||||
"""Does nothing, required by DB API."""
|
||||
|
||||
def setoutputsizes(self, *args):
|
||||
"""Does nothing, required by DB API."""
|
||||
|
||||
def nextset(self):
|
||||
''' Get the next query set '''
|
||||
if self._executed:
|
||||
self.fetchall()
|
||||
del self.messages[:]
|
||||
|
||||
if not self._has_next:
|
||||
return None
|
||||
connection = self._get_db()
|
||||
connection.next_result()
|
||||
self._do_get_result()
|
||||
return True
|
||||
|
||||
def execute(self, query, args=None):
|
||||
''' Execute a query '''
|
||||
from sys import exc_info
|
||||
|
||||
conn = self._get_db()
|
||||
charset = conn.charset
|
||||
del self.messages[:]
|
||||
|
||||
# TODO: make sure that conn.escape is correct
|
||||
|
||||
if args is not None:
|
||||
if isinstance(args, tuple) or isinstance(args, list):
|
||||
escaped_args = tuple(conn.escape(arg) for arg in args)
|
||||
elif isinstance(args, dict):
|
||||
escaped_args = dict((key, conn.escape(val)) for (key, val) in args.items())
|
||||
else:
|
||||
#If it's not a dictionary let's try escaping it anyways.
|
||||
#Worst case it will throw a Value error
|
||||
escaped_args = conn.escape(args)
|
||||
|
||||
query = query % escaped_args
|
||||
|
||||
if isinstance(query, unicode):
|
||||
query = query.encode(charset)
|
||||
|
||||
result = 0
|
||||
try:
|
||||
result = self._query(query)
|
||||
except:
|
||||
exc, value, tb = exc_info()
|
||||
del tb
|
||||
self.messages.append((exc,value))
|
||||
self.errorhandler(self, exc, value)
|
||||
|
||||
self._executed = query
|
||||
return result
|
||||
|
||||
def executemany(self, query, args):
|
||||
''' Run several data against one query '''
|
||||
del self.messages[:]
|
||||
#conn = self._get_db()
|
||||
if not args:
|
||||
return
|
||||
#charset = conn.charset
|
||||
#if isinstance(query, unicode):
|
||||
# query = query.encode(charset)
|
||||
|
||||
self.rowcount = sum([ self.execute(query, arg) for arg in args ])
|
||||
return self.rowcount
|
||||
|
||||
|
||||
def callproc(self, procname, args=()):
|
||||
"""Execute stored procedure procname with args
|
||||
|
||||
procname -- string, name of procedure to execute on server
|
||||
|
||||
args -- Sequence of parameters to use with procedure
|
||||
|
||||
Returns the original args.
|
||||
|
||||
Compatibility warning: PEP-249 specifies that any modified
|
||||
parameters must be returned. This is currently impossible
|
||||
as they are only available by storing them in a server
|
||||
variable and then retrieved by a query. Since stored
|
||||
procedures return zero or more result sets, there is no
|
||||
reliable way to get at OUT or INOUT parameters via callproc.
|
||||
The server variables are named @_procname_n, where procname
|
||||
is the parameter above and n is the position of the parameter
|
||||
(from zero). Once all result sets generated by the procedure
|
||||
have been fetched, you can issue a SELECT @_procname_0, ...
|
||||
query using .execute() to get any OUT or INOUT values.
|
||||
|
||||
Compatibility warning: The act of calling a stored procedure
|
||||
itself creates an empty result set. This appears after any
|
||||
result sets generated by the procedure. This is non-standard
|
||||
behavior with respect to the DB-API. Be sure to use nextset()
|
||||
to advance through all result sets; otherwise you may get
|
||||
disconnected.
|
||||
"""
|
||||
conn = self._get_db()
|
||||
for index, arg in enumerate(args):
|
||||
q = "SET @_%s_%d=%s" % (procname, index, conn.escape(arg))
|
||||
if isinstance(q, unicode):
|
||||
q = q.encode(conn.charset)
|
||||
self._query(q)
|
||||
self.nextset()
|
||||
|
||||
q = "CALL %s(%s)" % (procname,
|
||||
','.join(['@_%s_%d' % (procname, i)
|
||||
for i in range(len(args))]))
|
||||
if isinstance(q, unicode):
|
||||
q = q.encode(conn.charset)
|
||||
self._query(q)
|
||||
self._executed = q
|
||||
|
||||
return args
|
||||
|
||||
def fetchone(self):
|
||||
''' Fetch the next row '''
|
||||
self._check_executed()
|
||||
if self._rows is None or self.rownumber >= len(self._rows):
|
||||
return None
|
||||
result = self._rows[self.rownumber]
|
||||
self.rownumber += 1
|
||||
return result
|
||||
|
||||
def fetchmany(self, size=None):
|
||||
''' Fetch several rows '''
|
||||
self._check_executed()
|
||||
end = self.rownumber + (size or self.arraysize)
|
||||
result = self._rows[self.rownumber:end]
|
||||
if self._rows is None:
|
||||
return None
|
||||
self.rownumber = min(end, len(self._rows))
|
||||
return result
|
||||
|
||||
def fetchall(self):
|
||||
''' Fetch all the rows '''
|
||||
self._check_executed()
|
||||
if self._rows is None:
|
||||
return None
|
||||
if self.rownumber:
|
||||
result = self._rows[self.rownumber:]
|
||||
else:
|
||||
result = self._rows
|
||||
self.rownumber = len(self._rows)
|
||||
return result
|
||||
|
||||
def scroll(self, value, mode='relative'):
|
||||
self._check_executed()
|
||||
if mode == 'relative':
|
||||
r = self.rownumber + value
|
||||
elif mode == 'absolute':
|
||||
r = value
|
||||
else:
|
||||
self.errorhandler(self, ProgrammingError,
|
||||
"unknown scroll mode %s" % mode)
|
||||
|
||||
if r < 0 or r >= len(self._rows):
|
||||
self.errorhandler(self, IndexError, "out of range")
|
||||
self.rownumber = r
|
||||
|
||||
def _query(self, q):
|
||||
conn = self._get_db()
|
||||
self._last_executed = q
|
||||
conn.query(q)
|
||||
self._do_get_result()
|
||||
return self.rowcount
|
||||
|
||||
def _do_get_result(self):
|
||||
conn = self._get_db()
|
||||
self.rowcount = conn._result.affected_rows
|
||||
|
||||
self.rownumber = 0
|
||||
self.description = conn._result.description
|
||||
self.lastrowid = conn._result.insert_id
|
||||
self._rows = conn._result.rows
|
||||
self._has_next = conn._result.has_next
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.fetchone, None)
|
||||
|
||||
Warning = Warning
|
||||
Error = Error
|
||||
InterfaceError = InterfaceError
|
||||
DatabaseError = DatabaseError
|
||||
DataError = DataError
|
||||
OperationalError = OperationalError
|
||||
IntegrityError = IntegrityError
|
||||
InternalError = InternalError
|
||||
ProgrammingError = ProgrammingError
|
||||
NotSupportedError = NotSupportedError
|
||||
|
||||
class DictCursor(Cursor):
|
||||
"""A cursor which returns results as a dictionary"""
|
||||
|
||||
def execute(self, query, args=None):
|
||||
result = super(DictCursor, self).execute(query, args)
|
||||
if self.description:
|
||||
self._fields = [ field[0] for field in self.description ]
|
||||
return result
|
||||
|
||||
def fetchone(self):
|
||||
''' Fetch the next row '''
|
||||
self._check_executed()
|
||||
if self._rows is None or self.rownumber >= len(self._rows):
|
||||
return None
|
||||
result = dict(zip(self._fields, self._rows[self.rownumber]))
|
||||
self.rownumber += 1
|
||||
return result
|
||||
|
||||
def fetchmany(self, size=None):
|
||||
''' Fetch several rows '''
|
||||
self._check_executed()
|
||||
if self._rows is None:
|
||||
return None
|
||||
end = self.rownumber + (size or self.arraysize)
|
||||
result = [ dict(zip(self._fields, r)) for r in self._rows[self.rownumber:end] ]
|
||||
self.rownumber = min(end, len(self._rows))
|
||||
return tuple(result)
|
||||
|
||||
def fetchall(self):
|
||||
''' Fetch all the rows '''
|
||||
self._check_executed()
|
||||
if self._rows is None:
|
||||
return None
|
||||
if self.rownumber:
|
||||
result = [ dict(zip(self._fields, r)) for r in self._rows[self.rownumber:] ]
|
||||
else:
|
||||
result = [ dict(zip(self._fields, r)) for r in self._rows ]
|
||||
self.rownumber = len(self._rows)
|
||||
return tuple(result)
|
||||
|
||||
|
|
@ -1,159 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import struct
|
||||
|
||||
|
||||
try:
|
||||
StandardError, Warning
|
||||
except ImportError:
|
||||
try:
|
||||
from exceptions import StandardError, Warning
|
||||
except ImportError:
|
||||
import sys
|
||||
e = sys.modules['exceptions']
|
||||
StandardError = e.StandardError
|
||||
Warning = e.Warning
|
||||
|
||||
from constants import ER
|
||||
import sys
|
||||
|
||||
class MySQLError(StandardError):
|
||||
|
||||
"""Exception related to operation with MySQL."""
|
||||
|
||||
|
||||
class Warning(Warning, MySQLError):
|
||||
|
||||
"""Exception raised for important warnings like data truncations
|
||||
while inserting, etc."""
|
||||
|
||||
class Error(MySQLError):
|
||||
|
||||
"""Exception that is the base class of all other error exceptions
|
||||
(not Warning)."""
|
||||
|
||||
|
||||
class InterfaceError(Error):
|
||||
|
||||
"""Exception raised for errors that are related to the database
|
||||
interface rather than the database itself."""
|
||||
|
||||
|
||||
class DatabaseError(Error):
|
||||
|
||||
"""Exception raised for errors that are related to the
|
||||
database."""
|
||||
|
||||
|
||||
class DataError(DatabaseError):
|
||||
|
||||
"""Exception raised for errors that are due to problems with the
|
||||
processed data like division by zero, numeric value out of range,
|
||||
etc."""
|
||||
|
||||
|
||||
class OperationalError(DatabaseError):
|
||||
|
||||
"""Exception raised for errors that are related to the database's
|
||||
operation and not necessarily under the control of the programmer,
|
||||
e.g. an unexpected disconnect occurs, the data source name is not
|
||||
found, a transaction could not be processed, a memory allocation
|
||||
error occurred during processing, etc."""
|
||||
|
||||
|
||||
class IntegrityError(DatabaseError):
|
||||
|
||||
"""Exception raised when the relational integrity of the database
|
||||
is affected, e.g. a foreign key check fails, duplicate key,
|
||||
etc."""
|
||||
|
||||
|
||||
class InternalError(DatabaseError):
|
||||
|
||||
"""Exception raised when the database encounters an internal
|
||||
error, e.g. the cursor is not valid anymore, the transaction is
|
||||
out of sync, etc."""
|
||||
|
||||
|
||||
class ProgrammingError(DatabaseError):
|
||||
|
||||
"""Exception raised for programming errors, e.g. table not found
|
||||
or already exists, syntax error in the SQL statement, wrong number
|
||||
of parameters specified, etc."""
|
||||
|
||||
|
||||
class NotSupportedError(DatabaseError):
|
||||
|
||||
"""Exception raised in case a method or database API was used
|
||||
which is not supported by the database, e.g. requesting a
|
||||
.rollback() on a connection that does not support transaction or
|
||||
has transactions turned off."""
|
||||
|
||||
|
||||
error_map = {}
|
||||
|
||||
def _map_error(exc, *errors):
|
||||
for error in errors:
|
||||
error_map[error] = exc
|
||||
|
||||
_map_error(ProgrammingError, ER.DB_CREATE_EXISTS, ER.SYNTAX_ERROR,
|
||||
ER.PARSE_ERROR, ER.NO_SUCH_TABLE, ER.WRONG_DB_NAME,
|
||||
ER.WRONG_TABLE_NAME, ER.FIELD_SPECIFIED_TWICE,
|
||||
ER.INVALID_GROUP_FUNC_USE, ER.UNSUPPORTED_EXTENSION,
|
||||
ER.TABLE_MUST_HAVE_COLUMNS, ER.CANT_DO_THIS_DURING_AN_TRANSACTION)
|
||||
_map_error(DataError, ER.WARN_DATA_TRUNCATED, ER.WARN_NULL_TO_NOTNULL,
|
||||
ER.WARN_DATA_OUT_OF_RANGE, ER.NO_DEFAULT, ER.PRIMARY_CANT_HAVE_NULL,
|
||||
ER.DATA_TOO_LONG, ER.DATETIME_FUNCTION_OVERFLOW)
|
||||
_map_error(IntegrityError, ER.DUP_ENTRY, ER.NO_REFERENCED_ROW,
|
||||
ER.NO_REFERENCED_ROW_2, ER.ROW_IS_REFERENCED, ER.ROW_IS_REFERENCED_2,
|
||||
ER.CANNOT_ADD_FOREIGN)
|
||||
_map_error(NotSupportedError, ER.WARNING_NOT_COMPLETE_ROLLBACK,
|
||||
ER.NOT_SUPPORTED_YET, ER.FEATURE_DISABLED, ER.UNKNOWN_STORAGE_ENGINE)
|
||||
_map_error(OperationalError, ER.DBACCESS_DENIED_ERROR, ER.ACCESS_DENIED_ERROR,
|
||||
ER.TABLEACCESS_DENIED_ERROR, ER.COLUMNACCESS_DENIED_ERROR)
|
||||
|
||||
del _map_error, ER
|
||||
|
||||
|
||||
def _get_error_info(data):
|
||||
errno = struct.unpack('<h', data[1:3])[0]
|
||||
if sys.version_info[0] == 3:
|
||||
is_41 = data[3] == ord("#")
|
||||
else:
|
||||
is_41 = data[3] == "#"
|
||||
if is_41:
|
||||
# version 4.1
|
||||
sqlstate = data[4:9].decode("utf8")
|
||||
errorvalue = data[9:].decode("utf8")
|
||||
return (errno, sqlstate, errorvalue)
|
||||
else:
|
||||
# version 4.0
|
||||
return (errno, None, data[3:].decode("utf8"))
|
||||
|
||||
def _check_mysql_exception(errinfo):
|
||||
errno, sqlstate, errorvalue = errinfo
|
||||
errorclass = error_map.get(errno, None)
|
||||
if errorclass:
|
||||
raise errorclass, (errno,errorvalue)
|
||||
|
||||
# couldn't find the right error number
|
||||
raise InternalError, (errno, errorvalue)
|
||||
|
||||
def raise_mysql_exception(data):
|
||||
errinfo = _get_error_info(data)
|
||||
_check_mysql_exception(errinfo)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
from pymysql.tests.test_issues import *
|
||||
from pymysql.tests.test_example import *
|
||||
from pymysql.tests.test_basic import *
|
||||
from pymysql.tests.test_DictCursor import *
|
||||
|
||||
import sys
|
||||
if sys.version_info[0] == 2:
|
||||
# MySQLdb tests were designed for Python 3
|
||||
from pymysql.tests.thirdparty import *
|
||||
|
||||
if __name__ == "__main__":
|
||||
import unittest
|
||||
unittest.main()
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import pymysql
|
||||
import unittest
|
||||
|
||||
class PyMySQLTestCase(unittest.TestCase):
|
||||
# Edit this to suit your test environment.
|
||||
databases = [
|
||||
{"host":"localhost","user":"root",
|
||||
"passwd":"","db":"test_pymysql", "use_unicode": True},
|
||||
{"host":"localhost","user":"root","passwd":"","db":"test_pymysql2"}]
|
||||
|
||||
def setUp(self):
|
||||
self.connections = []
|
||||
|
||||
for params in self.databases:
|
||||
self.connections.append(pymysql.connect(**params))
|
||||
|
||||
def tearDown(self):
|
||||
for connection in self.connections:
|
||||
connection.close()
|
||||
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
from pymysql.tests import base
|
||||
import pymysql.cursors
|
||||
|
||||
import datetime
|
||||
|
||||
class TestDictCursor(base.PyMySQLTestCase):
|
||||
|
||||
def test_DictCursor(self):
|
||||
#all assert test compare to the structure as would come out from MySQLdb
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor(pymysql.cursors.DictCursor)
|
||||
# create a table ane some data to query
|
||||
c.execute("""CREATE TABLE dictcursor (name char(20), age int , DOB datetime)""")
|
||||
data = (("bob",21,"1990-02-06 23:04:56"),
|
||||
("jim",56,"1955-05-09 13:12:45"),
|
||||
("fred",100,"1911-09-12 01:01:01"))
|
||||
bob = {'name':'bob','age':21,'DOB':datetime.datetime(1990, 02, 6, 23, 04, 56)}
|
||||
jim = {'name':'jim','age':56,'DOB':datetime.datetime(1955, 05, 9, 13, 12, 45)}
|
||||
fred = {'name':'fred','age':100,'DOB':datetime.datetime(1911, 9, 12, 1, 1, 1)}
|
||||
try:
|
||||
c.executemany("insert into dictcursor values (%s,%s,%s)", data)
|
||||
# try an update which should return no rows
|
||||
c.execute("update dictcursor set age=20 where name='bob'")
|
||||
bob['age'] = 20
|
||||
# pull back the single row dict for bob and check
|
||||
c.execute("SELECT * from dictcursor where name='bob'")
|
||||
r = c.fetchone()
|
||||
self.assertEqual(bob,r,"fetchone via DictCursor failed")
|
||||
# same again, but via fetchall => tuple)
|
||||
c.execute("SELECT * from dictcursor where name='bob'")
|
||||
r = c.fetchall()
|
||||
self.assertEqual((bob,),r,"fetch a 1 row result via fetchall failed via DictCursor")
|
||||
# same test again but iterate over the
|
||||
c.execute("SELECT * from dictcursor where name='bob'")
|
||||
for r in c:
|
||||
self.assertEqual(bob, r,"fetch a 1 row result via iteration failed via DictCursor")
|
||||
# get all 3 row via fetchall
|
||||
c.execute("SELECT * from dictcursor")
|
||||
r = c.fetchall()
|
||||
self.assertEqual((bob,jim,fred), r, "fetchall failed via DictCursor")
|
||||
#same test again but do a list comprehension
|
||||
c.execute("SELECT * from dictcursor")
|
||||
r = [x for x in c]
|
||||
self.assertEqual([bob,jim,fred], r, "list comprehension failed via DictCursor")
|
||||
# get all 2 row via fetchmany
|
||||
c.execute("SELECT * from dictcursor")
|
||||
r = c.fetchmany(2)
|
||||
self.assertEqual((bob,jim), r, "fetchmany failed via DictCursor")
|
||||
finally:
|
||||
c.execute("drop table dictcursor")
|
||||
|
||||
__all__ = ["TestDictCursor"]
|
||||
|
||||
if __name__ == "__main__":
|
||||
import unittest
|
||||
unittest.main()
|
||||
|
|
@ -1,205 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
from pymysql.tests import base
|
||||
from pymysql import util
|
||||
|
||||
import time
|
||||
import datetime
|
||||
|
||||
class TestConversion(base.PyMySQLTestCase):
|
||||
def test_datatypes(self):
|
||||
""" test every data type """
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
c.execute("create table test_datatypes (b bit, i int, l bigint, f real, s varchar(32), u varchar(32), bb blob, d date, dt datetime, ts timestamp, td time, t time, st datetime)")
|
||||
try:
|
||||
# insert values
|
||||
v = (True, -3, 123456789012, 5.7, "hello'\" world", u"Espa\xc3\xb1ol", "binary\x00data".encode(conn.charset), datetime.date(1988,2,2), datetime.datetime.now(), datetime.timedelta(5,6), datetime.time(16,32), time.localtime())
|
||||
c.execute("insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", v)
|
||||
c.execute("select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes")
|
||||
r = c.fetchone()
|
||||
self.assertEqual(util.int2byte(1), r[0])
|
||||
self.assertEqual(v[1:8], r[1:8])
|
||||
# mysql throws away microseconds so we need to check datetimes
|
||||
# specially. additionally times are turned into timedeltas.
|
||||
self.assertEqual(datetime.datetime(*v[8].timetuple()[:6]), r[8])
|
||||
self.assertEqual(v[9], r[9]) # just timedeltas
|
||||
self.assertEqual(datetime.timedelta(0, 60 * (v[10].hour * 60 + v[10].minute)), r[10])
|
||||
self.assertEqual(datetime.datetime(*v[-1][:6]), r[-1])
|
||||
|
||||
c.execute("delete from test_datatypes")
|
||||
|
||||
# check nulls
|
||||
c.execute("insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", [None] * 12)
|
||||
c.execute("select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes")
|
||||
r = c.fetchone()
|
||||
self.assertEqual(tuple([None] * 12), r)
|
||||
|
||||
c.execute("delete from test_datatypes")
|
||||
|
||||
# check sequence type
|
||||
c.execute("insert into test_datatypes (i, l) values (2,4), (6,8), (10,12)")
|
||||
c.execute("select l from test_datatypes where i in %s order by i", ((2,6),))
|
||||
r = c.fetchall()
|
||||
self.assertEqual(((4,),(8,)), r)
|
||||
finally:
|
||||
c.execute("drop table test_datatypes")
|
||||
|
||||
def test_dict(self):
|
||||
""" test dict escaping """
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
c.execute("create table test_dict (a integer, b integer, c integer)")
|
||||
try:
|
||||
c.execute("insert into test_dict (a,b,c) values (%(a)s, %(b)s, %(c)s)", {"a":1,"b":2,"c":3})
|
||||
c.execute("select a,b,c from test_dict")
|
||||
self.assertEqual((1,2,3), c.fetchone())
|
||||
finally:
|
||||
c.execute("drop table test_dict")
|
||||
|
||||
def test_string(self):
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
c.execute("create table test_dict (a text)")
|
||||
test_value = "I am a test string"
|
||||
try:
|
||||
c.execute("insert into test_dict (a) values (%s)", test_value)
|
||||
c.execute("select a from test_dict")
|
||||
self.assertEqual((test_value,), c.fetchone())
|
||||
finally:
|
||||
c.execute("drop table test_dict")
|
||||
|
||||
def test_integer(self):
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
c.execute("create table test_dict (a integer)")
|
||||
test_value = 12345
|
||||
try:
|
||||
c.execute("insert into test_dict (a) values (%s)", test_value)
|
||||
c.execute("select a from test_dict")
|
||||
self.assertEqual((test_value,), c.fetchone())
|
||||
finally:
|
||||
c.execute("drop table test_dict")
|
||||
|
||||
|
||||
def test_big_blob(self):
|
||||
""" test tons of data """
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
c.execute("create table test_big_blob (b blob)")
|
||||
try:
|
||||
data = "pymysql" * 1024
|
||||
c.execute("insert into test_big_blob (b) values (%s)", (data,))
|
||||
c.execute("select b from test_big_blob")
|
||||
self.assertEqual(data.encode(conn.charset), c.fetchone()[0])
|
||||
finally:
|
||||
c.execute("drop table test_big_blob")
|
||||
|
||||
class TestCursor(base.PyMySQLTestCase):
|
||||
# this test case does not work quite right yet, however,
|
||||
# we substitute in None for the erroneous field which is
|
||||
# compatible with the DB-API 2.0 spec and has not broken
|
||||
# any unit tests for anything we've tried.
|
||||
|
||||
#def test_description(self):
|
||||
# """ test description attribute """
|
||||
# # result is from MySQLdb module
|
||||
# r = (('Host', 254, 11, 60, 60, 0, 0),
|
||||
# ('User', 254, 16, 16, 16, 0, 0),
|
||||
# ('Password', 254, 41, 41, 41, 0, 0),
|
||||
# ('Select_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Insert_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Update_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Delete_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Create_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Drop_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Reload_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Shutdown_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Process_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('File_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Grant_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('References_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Index_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Alter_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Show_db_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Super_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Create_tmp_table_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Lock_tables_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Execute_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Repl_slave_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Repl_client_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Create_view_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Show_view_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Create_routine_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Alter_routine_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Create_user_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Event_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('Trigger_priv', 254, 1, 1, 1, 0, 0),
|
||||
# ('ssl_type', 254, 0, 9, 9, 0, 0),
|
||||
# ('ssl_cipher', 252, 0, 65535, 65535, 0, 0),
|
||||
# ('x509_issuer', 252, 0, 65535, 65535, 0, 0),
|
||||
# ('x509_subject', 252, 0, 65535, 65535, 0, 0),
|
||||
# ('max_questions', 3, 1, 11, 11, 0, 0),
|
||||
# ('max_updates', 3, 1, 11, 11, 0, 0),
|
||||
# ('max_connections', 3, 1, 11, 11, 0, 0),
|
||||
# ('max_user_connections', 3, 1, 11, 11, 0, 0))
|
||||
# conn = self.connections[0]
|
||||
# c = conn.cursor()
|
||||
# c.execute("select * from mysql.user")
|
||||
#
|
||||
# self.assertEqual(r, c.description)
|
||||
|
||||
def test_fetch_no_result(self):
|
||||
""" test a fetchone() with no rows """
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
c.execute("create table test_nr (b varchar(32))")
|
||||
try:
|
||||
data = "pymysql"
|
||||
c.execute("insert into test_nr (b) values (%s)", (data,))
|
||||
self.assertEqual(None, c.fetchone())
|
||||
finally:
|
||||
c.execute("drop table test_nr")
|
||||
|
||||
def test_aggregates(self):
|
||||
""" test aggregate functions """
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
try:
|
||||
c.execute('create table test_aggregates (i integer)')
|
||||
for i in xrange(0, 10):
|
||||
c.execute('insert into test_aggregates (i) values (%s)', (i,))
|
||||
c.execute('select sum(i) from test_aggregates')
|
||||
r, = c.fetchone()
|
||||
self.assertEqual(sum(range(0,10)), r)
|
||||
finally:
|
||||
c.execute('drop table test_aggregates')
|
||||
|
||||
def test_single_tuple(self):
|
||||
""" test a single tuple """
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
try:
|
||||
c.execute("create table mystuff (id integer primary key)")
|
||||
c.execute("insert into mystuff (id) values (1)")
|
||||
c.execute("insert into mystuff (id) values (2)")
|
||||
c.execute("select id from mystuff where id in %s", ((1,),))
|
||||
self.assertEqual([(1,)], list(c.fetchall()))
|
||||
finally:
|
||||
c.execute("drop table mystuff")
|
||||
|
||||
__all__ = ["TestConversion","TestCursor"]
|
||||
|
||||
if __name__ == "__main__":
|
||||
import unittest
|
||||
unittest.main()
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import pymysql
|
||||
from pymysql.tests import base
|
||||
|
||||
class TestExample(base.PyMySQLTestCase):
|
||||
def test_example(self):
|
||||
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='mysql')
|
||||
|
||||
|
||||
cur = conn.cursor()
|
||||
|
||||
cur.execute("SELECT Host,User FROM user")
|
||||
|
||||
# print cur.description
|
||||
|
||||
# r = cur.fetchall()
|
||||
# print r
|
||||
# ...or...
|
||||
u = False
|
||||
|
||||
for r in cur.fetchall():
|
||||
u = u or conn.user in r
|
||||
|
||||
self.assertTrue(u)
|
||||
|
||||
cur.close()
|
||||
conn.close()
|
||||
|
||||
__all__ = ["TestExample"]
|
||||
|
||||
if __name__ == "__main__":
|
||||
import unittest
|
||||
unittest.main()
|
||||
|
|
@ -1,280 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import pymysql
|
||||
from pymysql.tests import base
|
||||
|
||||
import sys
|
||||
|
||||
try:
|
||||
import imp
|
||||
reload = imp.reload
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
import datetime
|
||||
|
||||
class TestOldIssues(base.PyMySQLTestCase):
|
||||
def test_issue_3(self):
|
||||
""" undefined methods datetime_or_None, date_or_None """
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
c.execute("create table issue3 (d date, t time, dt datetime, ts timestamp)")
|
||||
try:
|
||||
c.execute("insert into issue3 (d, t, dt, ts) values (%s,%s,%s,%s)", (None, None, None, None))
|
||||
c.execute("select d from issue3")
|
||||
self.assertEqual(None, c.fetchone()[0])
|
||||
c.execute("select t from issue3")
|
||||
self.assertEqual(None, c.fetchone()[0])
|
||||
c.execute("select dt from issue3")
|
||||
self.assertEqual(None, c.fetchone()[0])
|
||||
c.execute("select ts from issue3")
|
||||
self.assertTrue(isinstance(c.fetchone()[0], datetime.datetime))
|
||||
finally:
|
||||
c.execute("drop table issue3")
|
||||
|
||||
def test_issue_4(self):
|
||||
""" can't retrieve TIMESTAMP fields """
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
c.execute("create table issue4 (ts timestamp)")
|
||||
try:
|
||||
c.execute("insert into issue4 (ts) values (now())")
|
||||
c.execute("select ts from issue4")
|
||||
self.assertTrue(isinstance(c.fetchone()[0], datetime.datetime))
|
||||
finally:
|
||||
c.execute("drop table issue4")
|
||||
|
||||
def test_issue_5(self):
|
||||
""" query on information_schema.tables fails """
|
||||
con = self.connections[0]
|
||||
cur = con.cursor()
|
||||
cur.execute("select * from information_schema.tables")
|
||||
|
||||
def test_issue_6(self):
|
||||
""" exception: TypeError: ord() expected a character, but string of length 0 found """
|
||||
conn = pymysql.connect(host="localhost",user="root",passwd="",db="mysql")
|
||||
c = conn.cursor()
|
||||
c.execute("select * from user")
|
||||
conn.close()
|
||||
|
||||
def test_issue_8(self):
|
||||
""" Primary Key and Index error when selecting data """
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
c.execute("""CREATE TABLE `test` (`station` int(10) NOT NULL DEFAULT '0', `dh`
|
||||
datetime NOT NULL DEFAULT '0000-00-00 00:00:00', `echeance` int(1) NOT NULL
|
||||
DEFAULT '0', `me` double DEFAULT NULL, `mo` double DEFAULT NULL, PRIMARY
|
||||
KEY (`station`,`dh`,`echeance`)) ENGINE=MyISAM DEFAULT CHARSET=latin1;""")
|
||||
try:
|
||||
self.assertEqual(0, c.execute("SELECT * FROM test"))
|
||||
c.execute("ALTER TABLE `test` ADD INDEX `idx_station` (`station`)")
|
||||
self.assertEqual(0, c.execute("SELECT * FROM test"))
|
||||
finally:
|
||||
c.execute("drop table test")
|
||||
|
||||
def test_issue_9(self):
|
||||
""" sets DeprecationWarning in Python 2.6 """
|
||||
try:
|
||||
reload(pymysql)
|
||||
except DeprecationWarning:
|
||||
self.fail()
|
||||
|
||||
def test_issue_10(self):
|
||||
""" Allocate a variable to return when the exception handler is permissive """
|
||||
conn = self.connections[0]
|
||||
conn.errorhandler = lambda cursor, errorclass, errorvalue: None
|
||||
cur = conn.cursor()
|
||||
cur.execute( "create table t( n int )" )
|
||||
cur.execute( "create table t( n int )" )
|
||||
|
||||
def test_issue_13(self):
|
||||
""" can't handle large result fields """
|
||||
conn = self.connections[0]
|
||||
cur = conn.cursor()
|
||||
try:
|
||||
cur.execute("create table issue13 (t text)")
|
||||
# ticket says 18k
|
||||
size = 18*1024
|
||||
cur.execute("insert into issue13 (t) values (%s)", ("x" * size,))
|
||||
cur.execute("select t from issue13")
|
||||
# use assert_ so that obscenely huge error messages don't print
|
||||
r = cur.fetchone()[0]
|
||||
self.assert_("x" * size == r)
|
||||
finally:
|
||||
cur.execute("drop table issue13")
|
||||
|
||||
def test_issue_14(self):
|
||||
""" typo in converters.py """
|
||||
self.assertEqual('1', pymysql.converters.escape_item(1, "utf8"))
|
||||
self.assertEqual('1', pymysql.converters.escape_item(1L, "utf8"))
|
||||
|
||||
self.assertEqual('1', pymysql.converters.escape_object(1))
|
||||
self.assertEqual('1', pymysql.converters.escape_object(1L))
|
||||
|
||||
def test_issue_15(self):
|
||||
""" query should be expanded before perform character encoding """
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
c.execute("create table issue15 (t varchar(32))")
|
||||
try:
|
||||
c.execute("insert into issue15 (t) values (%s)", (u'\xe4\xf6\xfc',))
|
||||
c.execute("select t from issue15")
|
||||
self.assertEqual(u'\xe4\xf6\xfc', c.fetchone()[0])
|
||||
finally:
|
||||
c.execute("drop table issue15")
|
||||
|
||||
def test_issue_16(self):
|
||||
""" Patch for string and tuple escaping """
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
c.execute("create table issue16 (name varchar(32) primary key, email varchar(32))")
|
||||
try:
|
||||
c.execute("insert into issue16 (name, email) values ('pete', 'floydophone')")
|
||||
c.execute("select email from issue16 where name=%s", ("pete",))
|
||||
self.assertEqual("floydophone", c.fetchone()[0])
|
||||
finally:
|
||||
c.execute("drop table issue16")
|
||||
|
||||
def test_issue_17(self):
|
||||
""" could not connect mysql use passwod """
|
||||
conn = self.connections[0]
|
||||
host = self.databases[0]["host"]
|
||||
db = self.databases[0]["db"]
|
||||
c = conn.cursor()
|
||||
# grant access to a table to a user with a password
|
||||
try:
|
||||
c.execute("create table issue17 (x varchar(32) primary key)")
|
||||
c.execute("insert into issue17 (x) values ('hello, world!')")
|
||||
c.execute("grant all privileges on %s.issue17 to 'issue17user'@'%%' identified by '1234'" % db)
|
||||
conn.commit()
|
||||
|
||||
conn2 = pymysql.connect(host=host, user="issue17user", passwd="1234", db=db)
|
||||
c2 = conn2.cursor()
|
||||
c2.execute("select x from issue17")
|
||||
self.assertEqual("hello, world!", c2.fetchone()[0])
|
||||
finally:
|
||||
c.execute("drop table issue17")
|
||||
|
||||
def _uni(s, e):
|
||||
# hack for py3
|
||||
if sys.version_info[0] > 2:
|
||||
return unicode(bytes(s, sys.getdefaultencoding()), e)
|
||||
else:
|
||||
return unicode(s, e)
|
||||
|
||||
class TestNewIssues(base.PyMySQLTestCase):
|
||||
def test_issue_34(self):
|
||||
try:
|
||||
pymysql.connect(host="localhost", port=1237, user="root")
|
||||
self.fail()
|
||||
except pymysql.OperationalError, e:
|
||||
self.assertEqual(2003, e.args[0])
|
||||
except:
|
||||
self.fail()
|
||||
|
||||
def test_issue_33(self):
|
||||
conn = pymysql.connect(host="localhost", user="root", db=self.databases[0]["db"], charset="utf8")
|
||||
c = conn.cursor()
|
||||
try:
|
||||
c.execute(_uni("create table hei\xc3\x9fe (name varchar(32))", "utf8"))
|
||||
c.execute(_uni("insert into hei\xc3\x9fe (name) values ('Pi\xc3\xb1ata')", "utf8"))
|
||||
c.execute(_uni("select name from hei\xc3\x9fe", "utf8"))
|
||||
self.assertEqual(_uni("Pi\xc3\xb1ata","utf8"), c.fetchone()[0])
|
||||
finally:
|
||||
c.execute(_uni("drop table hei\xc3\x9fe", "utf8"))
|
||||
|
||||
# Will fail without manual intervention:
|
||||
#def test_issue_35(self):
|
||||
#
|
||||
# conn = self.connections[0]
|
||||
# c = conn.cursor()
|
||||
# print "sudo killall -9 mysqld within the next 10 seconds"
|
||||
# try:
|
||||
# c.execute("select sleep(10)")
|
||||
# self.fail()
|
||||
# except pymysql.OperationalError, e:
|
||||
# self.assertEqual(2013, e.args[0])
|
||||
|
||||
def test_issue_36(self):
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
# kill connections[0]
|
||||
original_count = c.execute("show processlist")
|
||||
kill_id = None
|
||||
for id,user,host,db,command,time,state,info in c.fetchall():
|
||||
if info == "show processlist":
|
||||
kill_id = id
|
||||
break
|
||||
# now nuke the connection
|
||||
conn.kill(kill_id)
|
||||
# make sure this connection has broken
|
||||
try:
|
||||
c.execute("show tables")
|
||||
self.fail()
|
||||
except:
|
||||
pass
|
||||
# check the process list from the other connection
|
||||
self.assertEqual(original_count - 1, self.connections[1].cursor().execute("show processlist"))
|
||||
del self.connections[0]
|
||||
|
||||
def test_issue_37(self):
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
self.assertEqual(1, c.execute("SELECT @foo"))
|
||||
self.assertEqual((None,), c.fetchone())
|
||||
self.assertEqual(0, c.execute("SET @foo = 'bar'"))
|
||||
c.execute("set @foo = 'bar'")
|
||||
|
||||
def test_issue_38(self):
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
datum = "a" * 1024 * 1023 # reduced size for most default mysql installs
|
||||
|
||||
try:
|
||||
c.execute("create table issue38 (id integer, data mediumblob)")
|
||||
c.execute("insert into issue38 values (1, %s)", (datum,))
|
||||
finally:
|
||||
c.execute("drop table issue38")
|
||||
|
||||
def disabled_test_issue_54(self):
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
big_sql = "select * from issue54 where "
|
||||
big_sql += " and ".join("%d=%d" % (i,i) for i in xrange(0, 100000))
|
||||
|
||||
try:
|
||||
c.execute("create table issue54 (id integer primary key)")
|
||||
c.execute("insert into issue54 (id) values (7)")
|
||||
c.execute(big_sql)
|
||||
self.assertEquals(7, c.fetchone()[0])
|
||||
finally:
|
||||
c.execute("drop table issue54")
|
||||
|
||||
class TestGitHubIssues(base.PyMySQLTestCase):
|
||||
def test_issue_66(self):
|
||||
conn = self.connections[0]
|
||||
c = conn.cursor()
|
||||
self.assertEquals(0, conn.insert_id())
|
||||
try:
|
||||
c.execute("create table issue66 (id integer primary key auto_increment, x integer)")
|
||||
c.execute("insert into issue66 (x) values (1)")
|
||||
c.execute("insert into issue66 (x) values (1)")
|
||||
self.assertEquals(2, conn.insert_id())
|
||||
finally:
|
||||
c.execute("drop table issue66")
|
||||
|
||||
__all__ = ["TestOldIssues", "TestNewIssues", "TestGitHubIssues"]
|
||||
|
||||
if __name__ == "__main__":
|
||||
import unittest
|
||||
unittest.main()
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
from test_MySQLdb import *
|
||||
|
||||
if __name__ == "__main__":
|
||||
import unittest
|
||||
unittest.main()
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
from test_MySQLdb_capabilities import test_MySQLdb as test_capabilities
|
||||
from test_MySQLdb_nonstandard import *
|
||||
from test_MySQLdb_dbapi20 import test_MySQLdb as test_dbapi2
|
||||
|
||||
if __name__ == "__main__":
|
||||
import unittest
|
||||
unittest.main()
|
||||
|
|
@ -1,304 +0,0 @@
|
|||
#!/usr/bin/env python -O
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
""" Script to test database capabilities and the DB-API interface
|
||||
for functionality and memory leaks.
|
||||
|
||||
Adapted from a script by M-A Lemburg.
|
||||
|
||||
"""
|
||||
from time import time
|
||||
import array
|
||||
import unittest
|
||||
|
||||
|
||||
class DatabaseTest(unittest.TestCase):
|
||||
|
||||
db_module = None
|
||||
connect_args = ()
|
||||
connect_kwargs = dict(use_unicode=True, charset="utf8")
|
||||
create_table_extra = "ENGINE=INNODB CHARACTER SET UTF8"
|
||||
rows = 10
|
||||
debug = False
|
||||
|
||||
def setUp(self):
|
||||
import gc
|
||||
db = self.db_module.connect(*self.connect_args, **self.connect_kwargs)
|
||||
self.connection = db
|
||||
self.cursor = db.cursor()
|
||||
self.BLOBText = ''.join([chr(i) for i in range(256)] * 100);
|
||||
self.BLOBUText = u''.join([unichr(i) for i in range(16834)])
|
||||
self.BLOBBinary = self.db_module.Binary(''.join([chr(i) for i in range(256)] * 16))
|
||||
|
||||
leak_test = True
|
||||
|
||||
def tearDown(self):
|
||||
if self.leak_test:
|
||||
import gc
|
||||
del self.cursor
|
||||
orphans = gc.collect()
|
||||
self.assertFalse(orphans, "%d orphaned objects found after deleting cursor" % orphans)
|
||||
|
||||
del self.connection
|
||||
orphans = gc.collect()
|
||||
self.assertFalse(orphans, "%d orphaned objects found after deleting connection" % orphans)
|
||||
|
||||
def table_exists(self, name):
|
||||
try:
|
||||
self.cursor.execute('select * from %s where 1=0' % name)
|
||||
except:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def quote_identifier(self, ident):
|
||||
return '"%s"' % ident
|
||||
|
||||
def new_table_name(self):
|
||||
i = id(self.cursor)
|
||||
while True:
|
||||
name = self.quote_identifier('tb%08x' % i)
|
||||
if not self.table_exists(name):
|
||||
return name
|
||||
i = i + 1
|
||||
|
||||
def create_table(self, columndefs):
|
||||
|
||||
""" Create a table using a list of column definitions given in
|
||||
columndefs.
|
||||
|
||||
generator must be a function taking arguments (row_number,
|
||||
col_number) returning a suitable data object for insertion
|
||||
into the table.
|
||||
|
||||
"""
|
||||
self.table = self.new_table_name()
|
||||
self.cursor.execute('CREATE TABLE %s (%s) %s' %
|
||||
(self.table,
|
||||
',\n'.join(columndefs),
|
||||
self.create_table_extra))
|
||||
|
||||
def check_data_integrity(self, columndefs, generator):
|
||||
# insert
|
||||
self.create_table(columndefs)
|
||||
insert_statement = ('INSERT INTO %s VALUES (%s)' %
|
||||
(self.table,
|
||||
','.join(['%s'] * len(columndefs))))
|
||||
data = [ [ generator(i,j) for j in range(len(columndefs)) ]
|
||||
for i in range(self.rows) ]
|
||||
if self.debug:
|
||||
print data
|
||||
self.cursor.executemany(insert_statement, data)
|
||||
self.connection.commit()
|
||||
# verify
|
||||
self.cursor.execute('select * from %s' % self.table)
|
||||
l = self.cursor.fetchall()
|
||||
if self.debug:
|
||||
print l
|
||||
self.assertEquals(len(l), self.rows)
|
||||
try:
|
||||
for i in range(self.rows):
|
||||
for j in range(len(columndefs)):
|
||||
self.assertEquals(l[i][j], generator(i,j))
|
||||
finally:
|
||||
if not self.debug:
|
||||
self.cursor.execute('drop table %s' % (self.table))
|
||||
|
||||
def test_transactions(self):
|
||||
columndefs = ( 'col1 INT', 'col2 VARCHAR(255)')
|
||||
def generator(row, col):
|
||||
if col == 0: return row
|
||||
else: return ('%i' % (row%10))*255
|
||||
self.create_table(columndefs)
|
||||
insert_statement = ('INSERT INTO %s VALUES (%s)' %
|
||||
(self.table,
|
||||
','.join(['%s'] * len(columndefs))))
|
||||
data = [ [ generator(i,j) for j in range(len(columndefs)) ]
|
||||
for i in range(self.rows) ]
|
||||
self.cursor.executemany(insert_statement, data)
|
||||
# verify
|
||||
self.connection.commit()
|
||||
self.cursor.execute('select * from %s' % self.table)
|
||||
l = self.cursor.fetchall()
|
||||
self.assertEquals(len(l), self.rows)
|
||||
for i in range(self.rows):
|
||||
for j in range(len(columndefs)):
|
||||
self.assertEquals(l[i][j], generator(i,j))
|
||||
delete_statement = 'delete from %s where col1=%%s' % self.table
|
||||
self.cursor.execute(delete_statement, (0,))
|
||||
self.cursor.execute('select col1 from %s where col1=%s' % \
|
||||
(self.table, 0))
|
||||
l = self.cursor.fetchall()
|
||||
self.assertFalse(l, "DELETE didn't work")
|
||||
self.connection.rollback()
|
||||
self.cursor.execute('select col1 from %s where col1=%s' % \
|
||||
(self.table, 0))
|
||||
l = self.cursor.fetchall()
|
||||
self.assertTrue(len(l) == 1, "ROLLBACK didn't work")
|
||||
self.cursor.execute('drop table %s' % (self.table))
|
||||
|
||||
def test_truncation(self):
|
||||
columndefs = ( 'col1 INT', 'col2 VARCHAR(255)')
|
||||
def generator(row, col):
|
||||
if col == 0: return row
|
||||
else: return ('%i' % (row%10))*((255-self.rows/2)+row)
|
||||
self.create_table(columndefs)
|
||||
insert_statement = ('INSERT INTO %s VALUES (%s)' %
|
||||
(self.table,
|
||||
','.join(['%s'] * len(columndefs))))
|
||||
|
||||
try:
|
||||
self.cursor.execute(insert_statement, (0, '0'*256))
|
||||
except Warning:
|
||||
if self.debug: print self.cursor.messages
|
||||
except self.connection.DataError:
|
||||
pass
|
||||
else:
|
||||
self.fail("Over-long column did not generate warnings/exception with single insert")
|
||||
|
||||
self.connection.rollback()
|
||||
|
||||
try:
|
||||
for i in range(self.rows):
|
||||
data = []
|
||||
for j in range(len(columndefs)):
|
||||
data.append(generator(i,j))
|
||||
self.cursor.execute(insert_statement,tuple(data))
|
||||
except Warning:
|
||||
if self.debug: print self.cursor.messages
|
||||
except self.connection.DataError:
|
||||
pass
|
||||
else:
|
||||
self.fail("Over-long columns did not generate warnings/exception with execute()")
|
||||
|
||||
self.connection.rollback()
|
||||
|
||||
try:
|
||||
data = [ [ generator(i,j) for j in range(len(columndefs)) ]
|
||||
for i in range(self.rows) ]
|
||||
self.cursor.executemany(insert_statement, data)
|
||||
except Warning:
|
||||
if self.debug: print self.cursor.messages
|
||||
except self.connection.DataError:
|
||||
pass
|
||||
else:
|
||||
self.fail("Over-long columns did not generate warnings/exception with executemany()")
|
||||
|
||||
self.connection.rollback()
|
||||
self.cursor.execute('drop table %s' % (self.table))
|
||||
|
||||
def test_CHAR(self):
|
||||
# Character data
|
||||
def generator(row,col):
|
||||
return ('%i' % ((row+col) % 10)) * 255
|
||||
self.check_data_integrity(
|
||||
('col1 char(255)','col2 char(255)'),
|
||||
generator)
|
||||
|
||||
def test_INT(self):
|
||||
# Number data
|
||||
def generator(row,col):
|
||||
return row*row
|
||||
self.check_data_integrity(
|
||||
('col1 INT',),
|
||||
generator)
|
||||
|
||||
def test_DECIMAL(self):
|
||||
# DECIMAL
|
||||
def generator(row,col):
|
||||
from decimal import Decimal
|
||||
return Decimal("%d.%02d" % (row, col))
|
||||
self.check_data_integrity(
|
||||
('col1 DECIMAL(5,2)',),
|
||||
generator)
|
||||
|
||||
def test_DATE(self):
|
||||
ticks = time()
|
||||
def generator(row,col):
|
||||
return self.db_module.DateFromTicks(ticks+row*86400-col*1313)
|
||||
self.check_data_integrity(
|
||||
('col1 DATE',),
|
||||
generator)
|
||||
|
||||
def test_TIME(self):
|
||||
ticks = time()
|
||||
def generator(row,col):
|
||||
return self.db_module.TimeFromTicks(ticks+row*86400-col*1313)
|
||||
self.check_data_integrity(
|
||||
('col1 TIME',),
|
||||
generator)
|
||||
|
||||
def test_DATETIME(self):
|
||||
ticks = time()
|
||||
def generator(row,col):
|
||||
return self.db_module.TimestampFromTicks(ticks+row*86400-col*1313)
|
||||
self.check_data_integrity(
|
||||
('col1 DATETIME',),
|
||||
generator)
|
||||
|
||||
def test_TIMESTAMP(self):
|
||||
ticks = time()
|
||||
def generator(row,col):
|
||||
return self.db_module.TimestampFromTicks(ticks+row*86400-col*1313)
|
||||
self.check_data_integrity(
|
||||
('col1 TIMESTAMP',),
|
||||
generator)
|
||||
|
||||
def test_fractional_TIMESTAMP(self):
|
||||
ticks = time()
|
||||
def generator(row,col):
|
||||
return self.db_module.TimestampFromTicks(ticks+row*86400-col*1313+row*0.7*col/3.0)
|
||||
self.check_data_integrity(
|
||||
('col1 TIMESTAMP',),
|
||||
generator)
|
||||
|
||||
def test_LONG(self):
|
||||
def generator(row,col):
|
||||
if col == 0:
|
||||
return row
|
||||
else:
|
||||
return self.BLOBUText # 'BLOB Text ' * 1024
|
||||
self.check_data_integrity(
|
||||
('col1 INT', 'col2 LONG'),
|
||||
generator)
|
||||
|
||||
def test_TEXT(self):
|
||||
def generator(row,col):
|
||||
if col == 0:
|
||||
return row
|
||||
else:
|
||||
return self.BLOBUText[:5192] # 'BLOB Text ' * 1024
|
||||
self.check_data_integrity(
|
||||
('col1 INT', 'col2 TEXT'),
|
||||
generator)
|
||||
|
||||
def test_LONG_BYTE(self):
|
||||
def generator(row,col):
|
||||
if col == 0:
|
||||
return row
|
||||
else:
|
||||
return self.BLOBBinary # 'BLOB\000Binary ' * 1024
|
||||
self.check_data_integrity(
|
||||
('col1 INT','col2 LONG BYTE'),
|
||||
generator)
|
||||
|
||||
def test_BLOB(self):
|
||||
def generator(row,col):
|
||||
if col == 0:
|
||||
return row
|
||||
else:
|
||||
return self.BLOBBinary # 'BLOB\000Binary ' * 1024
|
||||
self.check_data_integrity(
|
||||
('col1 INT','col2 BLOB'),
|
||||
generator)
|
||||
|
||||
|
|
@ -1,856 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
''' Python DB API 2.0 driver compliance unit test suite.
|
||||
|
||||
This software is Public Domain and may be used without restrictions.
|
||||
|
||||
"Now we have booze and barflies entering the discussion, plus rumours of
|
||||
DBAs on drugs... and I won't tell you what flashes through my mind each
|
||||
time I read the subject line with 'Anal Compliance' in it. All around
|
||||
this is turning out to be a thoroughly unwholesome unit test."
|
||||
|
||||
-- Ian Bicking
|
||||
'''
|
||||
|
||||
__rcs_id__ = '$Id$'
|
||||
__version__ = '$Revision$'[11:-2]
|
||||
__author__ = 'Stuart Bishop <zen@shangri-la.dropbear.id.au>'
|
||||
|
||||
import unittest
|
||||
import time
|
||||
|
||||
# $Log$
|
||||
# Revision 1.1.2.1 2006/02/25 03:44:32 adustman
|
||||
# Generic DB-API unit test module
|
||||
# Revision 1.10 2003/10/09 03:14:14 zenzen
|
||||
# Add test for DB API 2.0 optional extension, where database exceptions
|
||||
# are exposed as attributes on the Connection object.
|
||||
# Revision 1.9 2003/08/13 01:16:36 zenzen
|
||||
# Minor tweak from Stefan Fleiter
|
||||
# Revision 1.8 2003/04/10 00:13:25 zenzen
|
||||
# Changes, as per suggestions by M.-A. Lemburg
|
||||
# Revision 1.7 2003/02/26 23:33:37 zenzen
|
||||
# Break out DDL into helper functions, as per request by David Rushby
|
||||
# Revision 1.6 2003/02/21 03:04:33 zenzen
|
||||
# Stuff from Henrik Ekelund:
|
||||
# added test_None
|
||||
# added test_nextset & hooks
|
||||
# Revision 1.5 2003/02/17 22:08:43 zenzen
|
||||
# defaults to 1 & generic cursor.callproc test added
|
||||
# Revision 1.4 2003/02/15 00:16:33 zenzen
|
||||
# Changes, as per suggestions and bug reports by M.-A. Lemburg,
|
||||
# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar
|
||||
# - Class renamed
|
||||
# - Now a subclass of TestCase, to avoid requiring the driver stub
|
||||
# to use multiple inheritance
|
||||
# - Reversed the polarity of buggy test in test_description
|
||||
# - Test exception heirarchy correctly
|
||||
# - self.populate is now self._populate(), so if a driver stub
|
||||
# overrides self.ddl1 this change propogates
|
||||
# - VARCHAR columns now have a width, which will hopefully make the
|
||||
# DDL even more portible (this will be reversed if it causes more problems)
|
||||
# - cursor.rowcount being checked after various execute and fetchXXX methods
|
||||
# - Check for fetchall and fetchmany returning empty lists after results
|
||||
# are exhausted (already checking for empty lists if select retrieved
|
||||
# nothing
|
||||
# - Fix bugs in test_setoutputsize_basic and test_setinputsizes
|
||||
#
|
||||
|
||||
class DatabaseAPI20Test(unittest.TestCase):
|
||||
''' Test a database self.driver for DB API 2.0 compatibility.
|
||||
This implementation tests Gadfly, but the TestCase
|
||||
is structured so that other self.drivers can subclass this
|
||||
test case to ensure compiliance with the DB-API. It is
|
||||
expected that this TestCase may be expanded in the future
|
||||
if ambiguities or edge conditions are discovered.
|
||||
|
||||
The 'Optional Extensions' are not yet being tested.
|
||||
|
||||
self.drivers should subclass this test, overriding setUp, tearDown,
|
||||
self.driver, connect_args and connect_kw_args. Class specification
|
||||
should be as follows:
|
||||
|
||||
import dbapi20
|
||||
class mytest(dbapi20.DatabaseAPI20Test):
|
||||
[...]
|
||||
|
||||
Don't 'import DatabaseAPI20Test from dbapi20', or you will
|
||||
confuse the unit tester - just 'import dbapi20'.
|
||||
'''
|
||||
|
||||
# The self.driver module. This should be the module where the 'connect'
|
||||
# method is to be found
|
||||
driver = None
|
||||
connect_args = () # List of arguments to pass to connect
|
||||
connect_kw_args = {} # Keyword arguments for connect
|
||||
table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables
|
||||
|
||||
ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix
|
||||
ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix
|
||||
xddl1 = 'drop table %sbooze' % table_prefix
|
||||
xddl2 = 'drop table %sbarflys' % table_prefix
|
||||
|
||||
lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase
|
||||
|
||||
# Some drivers may need to override these helpers, for example adding
|
||||
# a 'commit' after the execute.
|
||||
def executeDDL1(self,cursor):
|
||||
cursor.execute(self.ddl1)
|
||||
|
||||
def executeDDL2(self,cursor):
|
||||
cursor.execute(self.ddl2)
|
||||
|
||||
def setUp(self):
|
||||
''' self.drivers should override this method to perform required setup
|
||||
if any is necessary, such as creating the database.
|
||||
'''
|
||||
pass
|
||||
|
||||
def tearDown(self):
|
||||
''' self.drivers should override this method to perform required cleanup
|
||||
if any is necessary, such as deleting the test database.
|
||||
The default drops the tables that may be created.
|
||||
'''
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
for ddl in (self.xddl1,self.xddl2):
|
||||
try:
|
||||
cur.execute(ddl)
|
||||
con.commit()
|
||||
except self.driver.Error:
|
||||
# Assume table didn't exist. Other tests will check if
|
||||
# execute is busted.
|
||||
pass
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def _connect(self):
|
||||
try:
|
||||
return self.driver.connect(
|
||||
*self.connect_args,**self.connect_kw_args
|
||||
)
|
||||
except AttributeError:
|
||||
self.fail("No connect method found in self.driver module")
|
||||
|
||||
def test_connect(self):
|
||||
con = self._connect()
|
||||
con.close()
|
||||
|
||||
def test_apilevel(self):
|
||||
try:
|
||||
# Must exist
|
||||
apilevel = self.driver.apilevel
|
||||
# Must equal 2.0
|
||||
self.assertEqual(apilevel,'2.0')
|
||||
except AttributeError:
|
||||
self.fail("Driver doesn't define apilevel")
|
||||
|
||||
def test_threadsafety(self):
|
||||
try:
|
||||
# Must exist
|
||||
threadsafety = self.driver.threadsafety
|
||||
# Must be a valid value
|
||||
self.assertTrue(threadsafety in (0,1,2,3))
|
||||
except AttributeError:
|
||||
self.fail("Driver doesn't define threadsafety")
|
||||
|
||||
def test_paramstyle(self):
|
||||
try:
|
||||
# Must exist
|
||||
paramstyle = self.driver.paramstyle
|
||||
# Must be a valid value
|
||||
self.assertTrue(paramstyle in (
|
||||
'qmark','numeric','named','format','pyformat'
|
||||
))
|
||||
except AttributeError:
|
||||
self.fail("Driver doesn't define paramstyle")
|
||||
|
||||
def test_Exceptions(self):
|
||||
# Make sure required exceptions exist, and are in the
|
||||
# defined heirarchy.
|
||||
self.assertTrue(issubclass(self.driver.Warning,StandardError))
|
||||
self.assertTrue(issubclass(self.driver.Error,StandardError))
|
||||
self.assertTrue(
|
||||
issubclass(self.driver.InterfaceError,self.driver.Error)
|
||||
)
|
||||
self.assertTrue(
|
||||
issubclass(self.driver.DatabaseError,self.driver.Error)
|
||||
)
|
||||
self.assertTrue(
|
||||
issubclass(self.driver.OperationalError,self.driver.Error)
|
||||
)
|
||||
self.assertTrue(
|
||||
issubclass(self.driver.IntegrityError,self.driver.Error)
|
||||
)
|
||||
self.assertTrue(
|
||||
issubclass(self.driver.InternalError,self.driver.Error)
|
||||
)
|
||||
self.assertTrue(
|
||||
issubclass(self.driver.ProgrammingError,self.driver.Error)
|
||||
)
|
||||
self.assertTrue(
|
||||
issubclass(self.driver.NotSupportedError,self.driver.Error)
|
||||
)
|
||||
|
||||
def test_ExceptionsAsConnectionAttributes(self):
|
||||
# OPTIONAL EXTENSION
|
||||
# Test for the optional DB API 2.0 extension, where the exceptions
|
||||
# are exposed as attributes on the Connection object
|
||||
# I figure this optional extension will be implemented by any
|
||||
# driver author who is using this test suite, so it is enabled
|
||||
# by default.
|
||||
con = self._connect()
|
||||
drv = self.driver
|
||||
self.assertTrue(con.Warning is drv.Warning)
|
||||
self.assertTrue(con.Error is drv.Error)
|
||||
self.assertTrue(con.InterfaceError is drv.InterfaceError)
|
||||
self.assertTrue(con.DatabaseError is drv.DatabaseError)
|
||||
self.assertTrue(con.OperationalError is drv.OperationalError)
|
||||
self.assertTrue(con.IntegrityError is drv.IntegrityError)
|
||||
self.assertTrue(con.InternalError is drv.InternalError)
|
||||
self.assertTrue(con.ProgrammingError is drv.ProgrammingError)
|
||||
self.assertTrue(con.NotSupportedError is drv.NotSupportedError)
|
||||
|
||||
|
||||
def test_commit(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
# Commit must work, even if it doesn't do anything
|
||||
con.commit()
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def test_rollback(self):
|
||||
con = self._connect()
|
||||
# If rollback is defined, it should either work or throw
|
||||
# the documented exception
|
||||
if hasattr(con,'rollback'):
|
||||
try:
|
||||
con.rollback()
|
||||
except self.driver.NotSupportedError:
|
||||
pass
|
||||
|
||||
def test_cursor(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def test_cursor_isolation(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
# Make sure cursors created from the same connection have
|
||||
# the documented transaction isolation level
|
||||
cur1 = con.cursor()
|
||||
cur2 = con.cursor()
|
||||
self.executeDDL1(cur1)
|
||||
cur1.execute("insert into %sbooze values ('Victoria Bitter')" % (
|
||||
self.table_prefix
|
||||
))
|
||||
cur2.execute("select name from %sbooze" % self.table_prefix)
|
||||
booze = cur2.fetchall()
|
||||
self.assertEqual(len(booze),1)
|
||||
self.assertEqual(len(booze[0]),1)
|
||||
self.assertEqual(booze[0][0],'Victoria Bitter')
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def test_description(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
self.executeDDL1(cur)
|
||||
self.assertEqual(cur.description,None,
|
||||
'cursor.description should be none after executing a '
|
||||
'statement that can return no rows (such as DDL)'
|
||||
)
|
||||
cur.execute('select name from %sbooze' % self.table_prefix)
|
||||
self.assertEqual(len(cur.description),1,
|
||||
'cursor.description describes too many columns'
|
||||
)
|
||||
self.assertEqual(len(cur.description[0]),7,
|
||||
'cursor.description[x] tuples must have 7 elements'
|
||||
)
|
||||
self.assertEqual(cur.description[0][0].lower(),'name',
|
||||
'cursor.description[x][0] must return column name'
|
||||
)
|
||||
self.assertEqual(cur.description[0][1],self.driver.STRING,
|
||||
'cursor.description[x][1] must return column type. Got %r'
|
||||
% cur.description[0][1]
|
||||
)
|
||||
|
||||
# Make sure self.description gets reset
|
||||
self.executeDDL2(cur)
|
||||
self.assertEqual(cur.description,None,
|
||||
'cursor.description not being set to None when executing '
|
||||
'no-result statements (eg. DDL)'
|
||||
)
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def test_rowcount(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
self.executeDDL1(cur)
|
||||
self.assertEqual(cur.rowcount,-1,
|
||||
'cursor.rowcount should be -1 after executing no-result '
|
||||
'statements'
|
||||
)
|
||||
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
|
||||
self.table_prefix
|
||||
))
|
||||
self.assertTrue(cur.rowcount in (-1,1),
|
||||
'cursor.rowcount should == number or rows inserted, or '
|
||||
'set to -1 after executing an insert statement'
|
||||
)
|
||||
cur.execute("select name from %sbooze" % self.table_prefix)
|
||||
self.assertTrue(cur.rowcount in (-1,1),
|
||||
'cursor.rowcount should == number of rows returned, or '
|
||||
'set to -1 after executing a select statement'
|
||||
)
|
||||
self.executeDDL2(cur)
|
||||
self.assertEqual(cur.rowcount,-1,
|
||||
'cursor.rowcount not being reset to -1 after executing '
|
||||
'no-result statements'
|
||||
)
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
lower_func = 'lower'
|
||||
def test_callproc(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
if self.lower_func and hasattr(cur,'callproc'):
|
||||
r = cur.callproc(self.lower_func,('FOO',))
|
||||
self.assertEqual(len(r),1)
|
||||
self.assertEqual(r[0],'FOO')
|
||||
r = cur.fetchall()
|
||||
self.assertEqual(len(r),1,'callproc produced no result set')
|
||||
self.assertEqual(len(r[0]),1,
|
||||
'callproc produced invalid result set'
|
||||
)
|
||||
self.assertEqual(r[0][0],'foo',
|
||||
'callproc produced invalid results'
|
||||
)
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def test_close(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
# cursor.execute should raise an Error if called after connection
|
||||
# closed
|
||||
self.assertRaises(self.driver.Error,self.executeDDL1,cur)
|
||||
|
||||
# connection.commit should raise an Error if called after connection'
|
||||
# closed.'
|
||||
self.assertRaises(self.driver.Error,con.commit)
|
||||
|
||||
# connection.close should raise an Error if called more than once
|
||||
self.assertRaises(self.driver.Error,con.close)
|
||||
|
||||
def test_execute(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
self._paraminsert(cur)
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def _paraminsert(self,cur):
|
||||
self.executeDDL1(cur)
|
||||
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
|
||||
self.table_prefix
|
||||
))
|
||||
self.assertTrue(cur.rowcount in (-1,1))
|
||||
|
||||
if self.driver.paramstyle == 'qmark':
|
||||
cur.execute(
|
||||
'insert into %sbooze values (?)' % self.table_prefix,
|
||||
("Cooper's",)
|
||||
)
|
||||
elif self.driver.paramstyle == 'numeric':
|
||||
cur.execute(
|
||||
'insert into %sbooze values (:1)' % self.table_prefix,
|
||||
("Cooper's",)
|
||||
)
|
||||
elif self.driver.paramstyle == 'named':
|
||||
cur.execute(
|
||||
'insert into %sbooze values (:beer)' % self.table_prefix,
|
||||
{'beer':"Cooper's"}
|
||||
)
|
||||
elif self.driver.paramstyle == 'format':
|
||||
cur.execute(
|
||||
'insert into %sbooze values (%%s)' % self.table_prefix,
|
||||
("Cooper's",)
|
||||
)
|
||||
elif self.driver.paramstyle == 'pyformat':
|
||||
cur.execute(
|
||||
'insert into %sbooze values (%%(beer)s)' % self.table_prefix,
|
||||
{'beer':"Cooper's"}
|
||||
)
|
||||
else:
|
||||
self.fail('Invalid paramstyle')
|
||||
self.assertTrue(cur.rowcount in (-1,1))
|
||||
|
||||
cur.execute('select name from %sbooze' % self.table_prefix)
|
||||
res = cur.fetchall()
|
||||
self.assertEqual(len(res),2,'cursor.fetchall returned too few rows')
|
||||
beers = [res[0][0],res[1][0]]
|
||||
beers.sort()
|
||||
self.assertEqual(beers[0],"Cooper's",
|
||||
'cursor.fetchall retrieved incorrect data, or data inserted '
|
||||
'incorrectly'
|
||||
)
|
||||
self.assertEqual(beers[1],"Victoria Bitter",
|
||||
'cursor.fetchall retrieved incorrect data, or data inserted '
|
||||
'incorrectly'
|
||||
)
|
||||
|
||||
def test_executemany(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
self.executeDDL1(cur)
|
||||
largs = [ ("Cooper's",) , ("Boag's",) ]
|
||||
margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ]
|
||||
if self.driver.paramstyle == 'qmark':
|
||||
cur.executemany(
|
||||
'insert into %sbooze values (?)' % self.table_prefix,
|
||||
largs
|
||||
)
|
||||
elif self.driver.paramstyle == 'numeric':
|
||||
cur.executemany(
|
||||
'insert into %sbooze values (:1)' % self.table_prefix,
|
||||
largs
|
||||
)
|
||||
elif self.driver.paramstyle == 'named':
|
||||
cur.executemany(
|
||||
'insert into %sbooze values (:beer)' % self.table_prefix,
|
||||
margs
|
||||
)
|
||||
elif self.driver.paramstyle == 'format':
|
||||
cur.executemany(
|
||||
'insert into %sbooze values (%%s)' % self.table_prefix,
|
||||
largs
|
||||
)
|
||||
elif self.driver.paramstyle == 'pyformat':
|
||||
cur.executemany(
|
||||
'insert into %sbooze values (%%(beer)s)' % (
|
||||
self.table_prefix
|
||||
),
|
||||
margs
|
||||
)
|
||||
else:
|
||||
self.fail('Unknown paramstyle')
|
||||
self.assertTrue(cur.rowcount in (-1,2),
|
||||
'insert using cursor.executemany set cursor.rowcount to '
|
||||
'incorrect value %r' % cur.rowcount
|
||||
)
|
||||
cur.execute('select name from %sbooze' % self.table_prefix)
|
||||
res = cur.fetchall()
|
||||
self.assertEqual(len(res),2,
|
||||
'cursor.fetchall retrieved incorrect number of rows'
|
||||
)
|
||||
beers = [res[0][0],res[1][0]]
|
||||
beers.sort()
|
||||
self.assertEqual(beers[0],"Boag's",'incorrect data retrieved')
|
||||
self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved')
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def test_fetchone(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
|
||||
# cursor.fetchone should raise an Error if called before
|
||||
# executing a select-type query
|
||||
self.assertRaises(self.driver.Error,cur.fetchone)
|
||||
|
||||
# cursor.fetchone should raise an Error if called after
|
||||
# executing a query that cannnot return rows
|
||||
self.executeDDL1(cur)
|
||||
self.assertRaises(self.driver.Error,cur.fetchone)
|
||||
|
||||
cur.execute('select name from %sbooze' % self.table_prefix)
|
||||
self.assertEqual(cur.fetchone(),None,
|
||||
'cursor.fetchone should return None if a query retrieves '
|
||||
'no rows'
|
||||
)
|
||||
self.assertTrue(cur.rowcount in (-1,0))
|
||||
|
||||
# cursor.fetchone should raise an Error if called after
|
||||
# executing a query that cannnot return rows
|
||||
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
|
||||
self.table_prefix
|
||||
))
|
||||
self.assertRaises(self.driver.Error,cur.fetchone)
|
||||
|
||||
cur.execute('select name from %sbooze' % self.table_prefix)
|
||||
r = cur.fetchone()
|
||||
self.assertEqual(len(r),1,
|
||||
'cursor.fetchone should have retrieved a single row'
|
||||
)
|
||||
self.assertEqual(r[0],'Victoria Bitter',
|
||||
'cursor.fetchone retrieved incorrect data'
|
||||
)
|
||||
self.assertEqual(cur.fetchone(),None,
|
||||
'cursor.fetchone should return None if no more rows available'
|
||||
)
|
||||
self.assertTrue(cur.rowcount in (-1,1))
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
samples = [
|
||||
'Carlton Cold',
|
||||
'Carlton Draft',
|
||||
'Mountain Goat',
|
||||
'Redback',
|
||||
'Victoria Bitter',
|
||||
'XXXX'
|
||||
]
|
||||
|
||||
def _populate(self):
|
||||
''' Return a list of sql commands to setup the DB for the fetch
|
||||
tests.
|
||||
'''
|
||||
populate = [
|
||||
"insert into %sbooze values ('%s')" % (self.table_prefix,s)
|
||||
for s in self.samples
|
||||
]
|
||||
return populate
|
||||
|
||||
def test_fetchmany(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
|
||||
# cursor.fetchmany should raise an Error if called without
|
||||
#issuing a query
|
||||
self.assertRaises(self.driver.Error,cur.fetchmany,4)
|
||||
|
||||
self.executeDDL1(cur)
|
||||
for sql in self._populate():
|
||||
cur.execute(sql)
|
||||
|
||||
cur.execute('select name from %sbooze' % self.table_prefix)
|
||||
r = cur.fetchmany()
|
||||
self.assertEqual(len(r),1,
|
||||
'cursor.fetchmany retrieved incorrect number of rows, '
|
||||
'default of arraysize is one.'
|
||||
)
|
||||
cur.arraysize=10
|
||||
r = cur.fetchmany(3) # Should get 3 rows
|
||||
self.assertEqual(len(r),3,
|
||||
'cursor.fetchmany retrieved incorrect number of rows'
|
||||
)
|
||||
r = cur.fetchmany(4) # Should get 2 more
|
||||
self.assertEqual(len(r),2,
|
||||
'cursor.fetchmany retrieved incorrect number of rows'
|
||||
)
|
||||
r = cur.fetchmany(4) # Should be an empty sequence
|
||||
self.assertEqual(len(r),0,
|
||||
'cursor.fetchmany should return an empty sequence after '
|
||||
'results are exhausted'
|
||||
)
|
||||
self.assertTrue(cur.rowcount in (-1,6))
|
||||
|
||||
# Same as above, using cursor.arraysize
|
||||
cur.arraysize=4
|
||||
cur.execute('select name from %sbooze' % self.table_prefix)
|
||||
r = cur.fetchmany() # Should get 4 rows
|
||||
self.assertEqual(len(r),4,
|
||||
'cursor.arraysize not being honoured by fetchmany'
|
||||
)
|
||||
r = cur.fetchmany() # Should get 2 more
|
||||
self.assertEqual(len(r),2)
|
||||
r = cur.fetchmany() # Should be an empty sequence
|
||||
self.assertEqual(len(r),0)
|
||||
self.assertTrue(cur.rowcount in (-1,6))
|
||||
|
||||
cur.arraysize=6
|
||||
cur.execute('select name from %sbooze' % self.table_prefix)
|
||||
rows = cur.fetchmany() # Should get all rows
|
||||
self.assertTrue(cur.rowcount in (-1,6))
|
||||
self.assertEqual(len(rows),6)
|
||||
self.assertEqual(len(rows),6)
|
||||
rows = [r[0] for r in rows]
|
||||
rows.sort()
|
||||
|
||||
# Make sure we get the right data back out
|
||||
for i in range(0,6):
|
||||
self.assertEqual(rows[i],self.samples[i],
|
||||
'incorrect data retrieved by cursor.fetchmany'
|
||||
)
|
||||
|
||||
rows = cur.fetchmany() # Should return an empty list
|
||||
self.assertEqual(len(rows),0,
|
||||
'cursor.fetchmany should return an empty sequence if '
|
||||
'called after the whole result set has been fetched'
|
||||
)
|
||||
self.assertTrue(cur.rowcount in (-1,6))
|
||||
|
||||
self.executeDDL2(cur)
|
||||
cur.execute('select name from %sbarflys' % self.table_prefix)
|
||||
r = cur.fetchmany() # Should get empty sequence
|
||||
self.assertEqual(len(r),0,
|
||||
'cursor.fetchmany should return an empty sequence if '
|
||||
'query retrieved no rows'
|
||||
)
|
||||
self.assertTrue(cur.rowcount in (-1,0))
|
||||
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def test_fetchall(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
# cursor.fetchall should raise an Error if called
|
||||
# without executing a query that may return rows (such
|
||||
# as a select)
|
||||
self.assertRaises(self.driver.Error, cur.fetchall)
|
||||
|
||||
self.executeDDL1(cur)
|
||||
for sql in self._populate():
|
||||
cur.execute(sql)
|
||||
|
||||
# cursor.fetchall should raise an Error if called
|
||||
# after executing a a statement that cannot return rows
|
||||
self.assertRaises(self.driver.Error,cur.fetchall)
|
||||
|
||||
cur.execute('select name from %sbooze' % self.table_prefix)
|
||||
rows = cur.fetchall()
|
||||
self.assertTrue(cur.rowcount in (-1,len(self.samples)))
|
||||
self.assertEqual(len(rows),len(self.samples),
|
||||
'cursor.fetchall did not retrieve all rows'
|
||||
)
|
||||
rows = [r[0] for r in rows]
|
||||
rows.sort()
|
||||
for i in range(0,len(self.samples)):
|
||||
self.assertEqual(rows[i],self.samples[i],
|
||||
'cursor.fetchall retrieved incorrect rows'
|
||||
)
|
||||
rows = cur.fetchall()
|
||||
self.assertEqual(
|
||||
len(rows),0,
|
||||
'cursor.fetchall should return an empty list if called '
|
||||
'after the whole result set has been fetched'
|
||||
)
|
||||
self.assertTrue(cur.rowcount in (-1,len(self.samples)))
|
||||
|
||||
self.executeDDL2(cur)
|
||||
cur.execute('select name from %sbarflys' % self.table_prefix)
|
||||
rows = cur.fetchall()
|
||||
self.assertTrue(cur.rowcount in (-1,0))
|
||||
self.assertEqual(len(rows),0,
|
||||
'cursor.fetchall should return an empty list if '
|
||||
'a select query returns no rows'
|
||||
)
|
||||
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def test_mixedfetch(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
self.executeDDL1(cur)
|
||||
for sql in self._populate():
|
||||
cur.execute(sql)
|
||||
|
||||
cur.execute('select name from %sbooze' % self.table_prefix)
|
||||
rows1 = cur.fetchone()
|
||||
rows23 = cur.fetchmany(2)
|
||||
rows4 = cur.fetchone()
|
||||
rows56 = cur.fetchall()
|
||||
self.assertTrue(cur.rowcount in (-1,6))
|
||||
self.assertEqual(len(rows23),2,
|
||||
'fetchmany returned incorrect number of rows'
|
||||
)
|
||||
self.assertEqual(len(rows56),2,
|
||||
'fetchall returned incorrect number of rows'
|
||||
)
|
||||
|
||||
rows = [rows1[0]]
|
||||
rows.extend([rows23[0][0],rows23[1][0]])
|
||||
rows.append(rows4[0])
|
||||
rows.extend([rows56[0][0],rows56[1][0]])
|
||||
rows.sort()
|
||||
for i in range(0,len(self.samples)):
|
||||
self.assertEqual(rows[i],self.samples[i],
|
||||
'incorrect data retrieved or inserted'
|
||||
)
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def help_nextset_setUp(self,cur):
|
||||
''' Should create a procedure called deleteme
|
||||
that returns two result sets, first the
|
||||
number of rows in booze then "name from booze"
|
||||
'''
|
||||
raise NotImplementedError,'Helper not implemented'
|
||||
#sql="""
|
||||
# create procedure deleteme as
|
||||
# begin
|
||||
# select count(*) from booze
|
||||
# select name from booze
|
||||
# end
|
||||
#"""
|
||||
#cur.execute(sql)
|
||||
|
||||
def help_nextset_tearDown(self,cur):
|
||||
'If cleaning up is needed after nextSetTest'
|
||||
raise NotImplementedError,'Helper not implemented'
|
||||
#cur.execute("drop procedure deleteme")
|
||||
|
||||
def test_nextset(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
if not hasattr(cur,'nextset'):
|
||||
return
|
||||
|
||||
try:
|
||||
self.executeDDL1(cur)
|
||||
sql=self._populate()
|
||||
for sql in self._populate():
|
||||
cur.execute(sql)
|
||||
|
||||
self.help_nextset_setUp(cur)
|
||||
|
||||
cur.callproc('deleteme')
|
||||
numberofrows=cur.fetchone()
|
||||
assert numberofrows[0]== len(self.samples)
|
||||
assert cur.nextset()
|
||||
names=cur.fetchall()
|
||||
assert len(names) == len(self.samples)
|
||||
s=cur.nextset()
|
||||
assert s == None,'No more return sets, should return None'
|
||||
finally:
|
||||
self.help_nextset_tearDown(cur)
|
||||
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def test_nextset(self):
|
||||
raise NotImplementedError,'Drivers need to override this test'
|
||||
|
||||
def test_arraysize(self):
|
||||
# Not much here - rest of the tests for this are in test_fetchmany
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
self.assertTrue(hasattr(cur,'arraysize'),
|
||||
'cursor.arraysize must be defined'
|
||||
)
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def test_setinputsizes(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
cur.setinputsizes( (25,) )
|
||||
self._paraminsert(cur) # Make sure cursor still works
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def test_setoutputsize_basic(self):
|
||||
# Basic test is to make sure setoutputsize doesn't blow up
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
cur.setoutputsize(1000)
|
||||
cur.setoutputsize(2000,0)
|
||||
self._paraminsert(cur) # Make sure the cursor still works
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def test_setoutputsize(self):
|
||||
# Real test for setoutputsize is driver dependant
|
||||
raise NotImplementedError,'Driver need to override this test'
|
||||
|
||||
def test_None(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
self.executeDDL1(cur)
|
||||
cur.execute('insert into %sbooze values (NULL)' % self.table_prefix)
|
||||
cur.execute('select name from %sbooze' % self.table_prefix)
|
||||
r = cur.fetchall()
|
||||
self.assertEqual(len(r),1)
|
||||
self.assertEqual(len(r[0]),1)
|
||||
self.assertEqual(r[0][0],None,'NULL value not returned as None')
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def test_Date(self):
|
||||
d1 = self.driver.Date(2002,12,25)
|
||||
d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0)))
|
||||
# Can we assume this? API doesn't specify, but it seems implied
|
||||
# self.assertEqual(str(d1),str(d2))
|
||||
|
||||
def test_Time(self):
|
||||
t1 = self.driver.Time(13,45,30)
|
||||
t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0)))
|
||||
# Can we assume this? API doesn't specify, but it seems implied
|
||||
# self.assertEqual(str(t1),str(t2))
|
||||
|
||||
def test_Timestamp(self):
|
||||
t1 = self.driver.Timestamp(2002,12,25,13,45,30)
|
||||
t2 = self.driver.TimestampFromTicks(
|
||||
time.mktime((2002,12,25,13,45,30,0,0,0))
|
||||
)
|
||||
# Can we assume this? API doesn't specify, but it seems implied
|
||||
# self.assertEqual(str(t1),str(t2))
|
||||
|
||||
def test_Binary(self):
|
||||
b = self.driver.Binary('Something')
|
||||
b = self.driver.Binary('')
|
||||
|
||||
def test_STRING(self):
|
||||
self.assertTrue(hasattr(self.driver,'STRING'),
|
||||
'module.STRING must be defined'
|
||||
)
|
||||
|
||||
def test_BINARY(self):
|
||||
self.assertTrue(hasattr(self.driver,'BINARY'),
|
||||
'module.BINARY must be defined.'
|
||||
)
|
||||
|
||||
def test_NUMBER(self):
|
||||
self.assertTrue(hasattr(self.driver,'NUMBER'),
|
||||
'module.NUMBER must be defined.'
|
||||
)
|
||||
|
||||
def test_DATETIME(self):
|
||||
self.assertTrue(hasattr(self.driver,'DATETIME'),
|
||||
'module.DATETIME must be defined.'
|
||||
)
|
||||
|
||||
def test_ROWID(self):
|
||||
self.assertTrue(hasattr(self.driver,'ROWID'),
|
||||
'module.ROWID must be defined.'
|
||||
)
|
||||
|
||||
|
|
@ -1,127 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import capabilities
|
||||
import unittest
|
||||
import pymysql
|
||||
from pymysql.tests import base
|
||||
import warnings
|
||||
|
||||
warnings.filterwarnings('error')
|
||||
|
||||
class test_MySQLdb(capabilities.DatabaseTest):
|
||||
|
||||
db_module = pymysql
|
||||
connect_args = ()
|
||||
connect_kwargs = base.PyMySQLTestCase.databases[0].copy()
|
||||
connect_kwargs.update(dict(read_default_file='~/.my.cnf',
|
||||
use_unicode=True,
|
||||
charset='utf8', sql_mode="ANSI,STRICT_TRANS_TABLES,TRADITIONAL"))
|
||||
|
||||
create_table_extra = "ENGINE=INNODB CHARACTER SET UTF8"
|
||||
leak_test = False
|
||||
|
||||
def quote_identifier(self, ident):
|
||||
return "`%s`" % ident
|
||||
|
||||
def test_TIME(self):
|
||||
from datetime import timedelta
|
||||
def generator(row,col):
|
||||
return timedelta(0, row*8000)
|
||||
self.check_data_integrity(
|
||||
('col1 TIME',),
|
||||
generator)
|
||||
|
||||
def test_TINYINT(self):
|
||||
# Number data
|
||||
def generator(row,col):
|
||||
v = (row*row) % 256
|
||||
if v > 127:
|
||||
v = v-256
|
||||
return v
|
||||
self.check_data_integrity(
|
||||
('col1 TINYINT',),
|
||||
generator)
|
||||
|
||||
def test_stored_procedures(self):
|
||||
db = self.connection
|
||||
c = self.cursor
|
||||
try:
|
||||
self.create_table(('pos INT', 'tree CHAR(20)'))
|
||||
c.executemany("INSERT INTO %s (pos,tree) VALUES (%%s,%%s)" % self.table,
|
||||
list(enumerate('ash birch cedar larch pine'.split())))
|
||||
db.commit()
|
||||
|
||||
c.execute("""
|
||||
CREATE PROCEDURE test_sp(IN t VARCHAR(255))
|
||||
BEGIN
|
||||
SELECT pos FROM %s WHERE tree = t;
|
||||
END
|
||||
""" % self.table)
|
||||
db.commit()
|
||||
|
||||
c.callproc('test_sp', ('larch',))
|
||||
rows = c.fetchall()
|
||||
self.assertEquals(len(rows), 1)
|
||||
self.assertEquals(rows[0][0], 3)
|
||||
c.nextset()
|
||||
finally:
|
||||
c.execute("DROP PROCEDURE IF EXISTS test_sp")
|
||||
c.execute('drop table %s' % (self.table))
|
||||
|
||||
def test_small_CHAR(self):
|
||||
# Character data
|
||||
def generator(row,col):
|
||||
i = ((row+1)*(col+1)+62)%256
|
||||
if i == 62: return ''
|
||||
if i == 63: return None
|
||||
return chr(i)
|
||||
self.check_data_integrity(
|
||||
('col1 char(1)','col2 char(1)'),
|
||||
generator)
|
||||
|
||||
def test_bug_2671682(self):
|
||||
from pymysql.constants import ER
|
||||
try:
|
||||
self.cursor.execute("describe some_non_existent_table");
|
||||
except self.connection.ProgrammingError, msg:
|
||||
self.assertTrue(msg.args[0] == ER.NO_SUCH_TABLE)
|
||||
|
||||
def test_insert_values(self):
|
||||
from pymysql.cursors import insert_values
|
||||
query = """INSERT FOO (a, b, c) VALUES (a, b, c)"""
|
||||
matched = insert_values.search(query)
|
||||
self.assertTrue(matched)
|
||||
values = matched.group(1)
|
||||
self.assertTrue(values == "(a, b, c)")
|
||||
|
||||
def test_ping(self):
|
||||
self.connection.ping()
|
||||
|
||||
def test_literal_int(self):
|
||||
self.assertTrue("2" == self.connection.literal(2))
|
||||
|
||||
def test_literal_float(self):
|
||||
self.assertTrue("3.1415" == self.connection.literal(3.1415))
|
||||
|
||||
def test_literal_string(self):
|
||||
self.assertTrue("'foo'" == self.connection.literal("foo"))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if test_MySQLdb.leak_test:
|
||||
import gc
|
||||
gc.enable()
|
||||
gc.set_debug(gc.DEBUG_LEAK)
|
||||
unittest.main()
|
||||
|
||||
|
|
@ -1,217 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import dbapi20
|
||||
import unittest
|
||||
import pymysql
|
||||
from pymysql.tests import base
|
||||
|
||||
class test_MySQLdb(dbapi20.DatabaseAPI20Test):
|
||||
driver = pymysql
|
||||
connect_args = ()
|
||||
connect_kw_args = base.PyMySQLTestCase.databases[0].copy()
|
||||
connect_kw_args.update(dict(read_default_file='~/.my.cnf',
|
||||
charset='utf8',
|
||||
sql_mode="ANSI,STRICT_TRANS_TABLES,TRADITIONAL"))
|
||||
|
||||
def test_setoutputsize(self): pass
|
||||
def test_setoutputsize_basic(self): pass
|
||||
def test_nextset(self): pass
|
||||
|
||||
"""The tests on fetchone and fetchall and rowcount bogusly
|
||||
test for an exception if the statement cannot return a
|
||||
result set. MySQL always returns a result set; it's just that
|
||||
some things return empty result sets."""
|
||||
|
||||
def test_fetchall(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
# cursor.fetchall should raise an Error if called
|
||||
# without executing a query that may return rows (such
|
||||
# as a select)
|
||||
self.assertRaises(self.driver.Error, cur.fetchall)
|
||||
|
||||
self.executeDDL1(cur)
|
||||
for sql in self._populate():
|
||||
cur.execute(sql)
|
||||
|
||||
# cursor.fetchall should raise an Error if called
|
||||
# after executing a a statement that cannot return rows
|
||||
## self.assertRaises(self.driver.Error,cur.fetchall)
|
||||
|
||||
cur.execute('select name from %sbooze' % self.table_prefix)
|
||||
rows = cur.fetchall()
|
||||
self.assertTrue(cur.rowcount in (-1,len(self.samples)))
|
||||
self.assertEqual(len(rows),len(self.samples),
|
||||
'cursor.fetchall did not retrieve all rows'
|
||||
)
|
||||
rows = [r[0] for r in rows]
|
||||
rows.sort()
|
||||
for i in range(0,len(self.samples)):
|
||||
self.assertEqual(rows[i],self.samples[i],
|
||||
'cursor.fetchall retrieved incorrect rows'
|
||||
)
|
||||
rows = cur.fetchall()
|
||||
self.assertEqual(
|
||||
len(rows),0,
|
||||
'cursor.fetchall should return an empty list if called '
|
||||
'after the whole result set has been fetched'
|
||||
)
|
||||
self.assertTrue(cur.rowcount in (-1,len(self.samples)))
|
||||
|
||||
self.executeDDL2(cur)
|
||||
cur.execute('select name from %sbarflys' % self.table_prefix)
|
||||
rows = cur.fetchall()
|
||||
self.assertTrue(cur.rowcount in (-1,0))
|
||||
self.assertEqual(len(rows),0,
|
||||
'cursor.fetchall should return an empty list if '
|
||||
'a select query returns no rows'
|
||||
)
|
||||
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def test_fetchone(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
|
||||
# cursor.fetchone should raise an Error if called before
|
||||
# executing a select-type query
|
||||
self.assertRaises(self.driver.Error,cur.fetchone)
|
||||
|
||||
# cursor.fetchone should raise an Error if called after
|
||||
# executing a query that cannnot return rows
|
||||
self.executeDDL1(cur)
|
||||
## self.assertRaises(self.driver.Error,cur.fetchone)
|
||||
|
||||
cur.execute('select name from %sbooze' % self.table_prefix)
|
||||
self.assertEqual(cur.fetchone(),None,
|
||||
'cursor.fetchone should return None if a query retrieves '
|
||||
'no rows'
|
||||
)
|
||||
self.assertTrue(cur.rowcount in (-1,0))
|
||||
|
||||
# cursor.fetchone should raise an Error if called after
|
||||
# executing a query that cannnot return rows
|
||||
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
|
||||
self.table_prefix
|
||||
))
|
||||
## self.assertRaises(self.driver.Error,cur.fetchone)
|
||||
|
||||
cur.execute('select name from %sbooze' % self.table_prefix)
|
||||
r = cur.fetchone()
|
||||
self.assertEqual(len(r),1,
|
||||
'cursor.fetchone should have retrieved a single row'
|
||||
)
|
||||
self.assertEqual(r[0],'Victoria Bitter',
|
||||
'cursor.fetchone retrieved incorrect data'
|
||||
)
|
||||
## self.assertEqual(cur.fetchone(),None,
|
||||
## 'cursor.fetchone should return None if no more rows available'
|
||||
## )
|
||||
self.assertTrue(cur.rowcount in (-1,1))
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
# Same complaint as for fetchall and fetchone
|
||||
def test_rowcount(self):
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
self.executeDDL1(cur)
|
||||
## self.assertEqual(cur.rowcount,-1,
|
||||
## 'cursor.rowcount should be -1 after executing no-result '
|
||||
## 'statements'
|
||||
## )
|
||||
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
|
||||
self.table_prefix
|
||||
))
|
||||
## self.assertTrue(cur.rowcount in (-1,1),
|
||||
## 'cursor.rowcount should == number or rows inserted, or '
|
||||
## 'set to -1 after executing an insert statement'
|
||||
## )
|
||||
cur.execute("select name from %sbooze" % self.table_prefix)
|
||||
self.assertTrue(cur.rowcount in (-1,1),
|
||||
'cursor.rowcount should == number of rows returned, or '
|
||||
'set to -1 after executing a select statement'
|
||||
)
|
||||
self.executeDDL2(cur)
|
||||
## self.assertEqual(cur.rowcount,-1,
|
||||
## 'cursor.rowcount not being reset to -1 after executing '
|
||||
## 'no-result statements'
|
||||
## )
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
def test_callproc(self):
|
||||
pass # performed in test_MySQL_capabilities
|
||||
|
||||
def help_nextset_setUp(self,cur):
|
||||
''' Should create a procedure called deleteme
|
||||
that returns two result sets, first the
|
||||
number of rows in booze then "name from booze"
|
||||
'''
|
||||
sql="""
|
||||
create procedure deleteme()
|
||||
begin
|
||||
select count(*) from %(tp)sbooze;
|
||||
select name from %(tp)sbooze;
|
||||
end
|
||||
""" % dict(tp=self.table_prefix)
|
||||
cur.execute(sql)
|
||||
|
||||
def help_nextset_tearDown(self,cur):
|
||||
'If cleaning up is needed after nextSetTest'
|
||||
cur.execute("drop procedure deleteme")
|
||||
|
||||
def test_nextset(self):
|
||||
from warnings import warn
|
||||
con = self._connect()
|
||||
try:
|
||||
cur = con.cursor()
|
||||
if not hasattr(cur,'nextset'):
|
||||
return
|
||||
|
||||
try:
|
||||
self.executeDDL1(cur)
|
||||
sql=self._populate()
|
||||
for sql in self._populate():
|
||||
cur.execute(sql)
|
||||
|
||||
self.help_nextset_setUp(cur)
|
||||
|
||||
cur.callproc('deleteme')
|
||||
numberofrows=cur.fetchone()
|
||||
assert numberofrows[0]== len(self.samples)
|
||||
assert cur.nextset()
|
||||
names=cur.fetchall()
|
||||
assert len(names) == len(self.samples)
|
||||
s=cur.nextset()
|
||||
if s:
|
||||
empty = cur.fetchall()
|
||||
self.assertEquals(len(empty), 0,
|
||||
"non-empty result set after other result sets")
|
||||
#warn("Incompatibility: MySQL returns an empty result set for the CALL itself",
|
||||
# Warning)
|
||||
#assert s == None,'No more return sets, should return None'
|
||||
finally:
|
||||
self.help_nextset_tearDown(cur)
|
||||
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
@ -1,102 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import unittest
|
||||
|
||||
import pymysql
|
||||
_mysql = pymysql
|
||||
from pymysql.constants import FIELD_TYPE
|
||||
from pymysql.tests import base
|
||||
|
||||
|
||||
class TestDBAPISet(unittest.TestCase):
|
||||
def test_set_equality(self):
|
||||
self.assertTrue(pymysql.STRING == pymysql.STRING)
|
||||
|
||||
def test_set_inequality(self):
|
||||
self.assertTrue(pymysql.STRING != pymysql.NUMBER)
|
||||
|
||||
def test_set_equality_membership(self):
|
||||
self.assertTrue(FIELD_TYPE.VAR_STRING == pymysql.STRING)
|
||||
|
||||
def test_set_inequality_membership(self):
|
||||
self.assertTrue(FIELD_TYPE.DATE != pymysql.STRING)
|
||||
|
||||
|
||||
class CoreModule(unittest.TestCase):
|
||||
"""Core _mysql module features."""
|
||||
|
||||
def test_NULL(self):
|
||||
"""Should have a NULL constant."""
|
||||
self.assertEqual(_mysql.NULL, 'NULL')
|
||||
|
||||
def test_version(self):
|
||||
"""Version information sanity."""
|
||||
self.assertTrue(isinstance(_mysql.__version__, str))
|
||||
|
||||
self.assertTrue(isinstance(_mysql.version_info, tuple))
|
||||
self.assertEqual(len(_mysql.version_info), 5)
|
||||
|
||||
def test_client_info(self):
|
||||
self.assertTrue(isinstance(_mysql.get_client_info(), str))
|
||||
|
||||
def test_thread_safe(self):
|
||||
self.assertTrue(isinstance(_mysql.thread_safe(), int))
|
||||
|
||||
|
||||
class CoreAPI(unittest.TestCase):
|
||||
"""Test _mysql interaction internals."""
|
||||
|
||||
def setUp(self):
|
||||
kwargs = base.PyMySQLTestCase.databases[0].copy()
|
||||
kwargs["read_default_file"] = "~/.my.cnf"
|
||||
self.conn = _mysql.connect(**kwargs)
|
||||
|
||||
def tearDown(self):
|
||||
self.conn.close()
|
||||
|
||||
def test_thread_id(self):
|
||||
tid = self.conn.thread_id()
|
||||
self.assertTrue(isinstance(tid, int),
|
||||
"thread_id didn't return an int.")
|
||||
|
||||
self.assertRaises(TypeError, self.conn.thread_id, ('evil',),
|
||||
"thread_id shouldn't accept arguments.")
|
||||
|
||||
def test_affected_rows(self):
|
||||
self.assertEquals(self.conn.affected_rows(), 0,
|
||||
"Should return 0 before we do anything.")
|
||||
|
||||
|
||||
#def test_debug(self):
|
||||
## FIXME Only actually tests if you lack SUPER
|
||||
#self.assertRaises(pymysql.OperationalError,
|
||||
#self.conn.dump_debug_info)
|
||||
|
||||
def test_charset_name(self):
|
||||
self.assertTrue(isinstance(self.conn.character_set_name(), str),
|
||||
"Should return a string.")
|
||||
|
||||
def test_host_info(self):
|
||||
self.assertTrue(isinstance(self.conn.get_host_info(), str),
|
||||
"Should return a string.")
|
||||
|
||||
def test_proto_info(self):
|
||||
self.assertTrue(isinstance(self.conn.get_proto_info(), int),
|
||||
"Should return an int.")
|
||||
|
||||
def test_server_info(self):
|
||||
self.assertTrue(isinstance(self.conn.get_server_info(), basestring),
|
||||
"Should return an str.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
from time import localtime
|
||||
from datetime import date, datetime, time, timedelta
|
||||
|
||||
Date = date
|
||||
Time = time
|
||||
TimeDelta = timedelta
|
||||
Timestamp = datetime
|
||||
|
||||
def DateFromTicks(ticks):
|
||||
return date(*localtime(ticks)[:3])
|
||||
|
||||
def TimeFromTicks(ticks):
|
||||
return time(*localtime(ticks)[3:6])
|
||||
|
||||
def TimestampFromTicks(ticks):
|
||||
return datetime(*localtime(ticks)[:6])
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import struct
|
||||
|
||||
def byte2int(b):
|
||||
if isinstance(b, int):
|
||||
return b
|
||||
else:
|
||||
return struct.unpack("!B", b)[0]
|
||||
|
||||
def int2byte(i):
|
||||
return struct.pack("!B", i)
|
||||
|
||||
def join_bytes(bs):
|
||||
if len(bs) == 0:
|
||||
return ""
|
||||
else:
|
||||
rv = bs[0]
|
||||
for b in bs[1:]:
|
||||
rv += b
|
||||
return rv
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import paramiko
|
||||
import cloudstackException
|
||||
class remoteSSHClient(object):
|
||||
def __init__(self, host, port, user, passwd):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.user = user
|
||||
self.passwd = passwd
|
||||
self.ssh = paramiko.SSHClient()
|
||||
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
try:
|
||||
self.ssh.connect(str(host),int(port), user, passwd)
|
||||
except paramiko.SSHException, sshex:
|
||||
raise cloudstackException.InvalidParameterException(repr(sshex))
|
||||
|
||||
def execute(self, command):
|
||||
stdin, stdout, stderr = self.ssh.exec_command(command)
|
||||
output = stdout.readlines()
|
||||
errors = stderr.readlines()
|
||||
results = []
|
||||
if output is not None and len(output) == 0:
|
||||
if errors is not None and len(errors) > 0:
|
||||
for error in errors:
|
||||
results.append(error.rstrip())
|
||||
|
||||
else:
|
||||
for strOut in output:
|
||||
results.append(strOut.rstrip())
|
||||
|
||||
return results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
ssh = remoteSSHClient("192.168.137.2", 22, "root", "password")
|
||||
print ssh.execute("ls -l")
|
||||
print ssh.execute("rm x")
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
|
@ -1,85 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
'''
|
||||
Created on Oct 18, 2011
|
||||
|
||||
@author: frank
|
||||
'''
|
||||
from optparse import OptionParser
|
||||
from configGenerator import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = OptionParser()
|
||||
parser.add_option('-o', '--output', action='store', default='./setup.conf', dest='output', help='the path where the json config file generated')
|
||||
parser.add_option('-m', '--mshost', dest='mshost', help='hostname/ip of management server', action='store')
|
||||
|
||||
(opts, args) = parser.parse_args()
|
||||
mandatories = ['mshost']
|
||||
for m in mandatories:
|
||||
if not opts.__dict__[m]:
|
||||
parser.error("mandatory option - " + m +" missing")
|
||||
|
||||
zs = cloudstackConfiguration()
|
||||
|
||||
#Define Zone
|
||||
z = zone()
|
||||
z.dns1 = "8.8.8.8"
|
||||
z.dns2 = "4.4.4.4"
|
||||
z.internaldns1 = "192.168.110.254"
|
||||
z.internaldns2 = "192.168.110.253"
|
||||
z.name = "testZone"
|
||||
z.networktype = 'Basic'
|
||||
|
||||
#Define SecondaryStorage
|
||||
ss = secondaryStorage()
|
||||
ss.url ="nfs://172.16.15.32/export/share/secondary"
|
||||
z.secondaryStorages.append(ss)
|
||||
|
||||
p = pod()
|
||||
p.name = "POD-1"
|
||||
p.gateway = "10.223.64.1"
|
||||
p.netmask = "255.255.254.0"
|
||||
p.startip = "10.223.64.50"
|
||||
p.endip = "10.223.64.60"
|
||||
|
||||
ip = iprange()
|
||||
ip.vlan="untagged"
|
||||
ip.gateway = p.gateway
|
||||
ip.netmask = p.netmask
|
||||
ip.startip = "10.223.64.70"
|
||||
ip.endip = "10.223.64.220"
|
||||
p.guestIpRanges.append(ip)
|
||||
|
||||
c = cluster()
|
||||
c.clustername = "CLUSTER-1"
|
||||
c.clustertype = "CloudManaged"
|
||||
c.hypervisor = "Simulator"
|
||||
p.clusters.append(c)
|
||||
|
||||
z.pods.append(p)
|
||||
zs.zones.append(z)
|
||||
|
||||
'''Add one mgt server'''
|
||||
mgt = managementServer()
|
||||
mgt.mgtSvrIp = opts.mshost
|
||||
zs.mgtSvr.append(mgt)
|
||||
|
||||
'''Add a database'''
|
||||
db = dbServer()
|
||||
db.dbSvr = opts.mshost
|
||||
db.user = "root"
|
||||
db.passwd = ""
|
||||
zs.dbSvr = db
|
||||
|
||||
generate_setup_config(zs,opts.output)
|
||||
|
||||
|
|
@ -1,106 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
'''
|
||||
Created on Oct 18, 2011
|
||||
|
||||
@author: frank
|
||||
'''
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
import uuid
|
||||
import threading
|
||||
import random
|
||||
import time
|
||||
|
||||
class Task(threading.Thread):
|
||||
def __init__(self, func, param=None):
|
||||
super(Task, self).__init__()
|
||||
self.func = func
|
||||
self.param = param
|
||||
|
||||
def run(self):
|
||||
self.func(self.param)
|
||||
|
||||
def doTask(self):
|
||||
self.start()
|
||||
|
||||
class TestDeploy100Hosts(cloudstackTestCase):
|
||||
hosts = []
|
||||
def deployHost(self, url):
|
||||
apiClient = self.testClient.getApiClient()
|
||||
addHostCmd = addHost.addHostCmd()
|
||||
addHostCmd.hypervisor = "simulator"
|
||||
addHostCmd.clusterid = 1
|
||||
addHostCmd.zoneid = 1
|
||||
addHostCmd.podid = 1
|
||||
addHostCmd.url = "http://sim/%s"%url
|
||||
addHostCmd.username = "placeholder"
|
||||
addHostCmd.password = "placeholder"
|
||||
addHostResponce = apiClient.addHost(addHostCmd)
|
||||
return addHostResponce[0].id
|
||||
|
||||
def randomCancelMaintenance(self):
|
||||
def run(param):
|
||||
while(1):
|
||||
try:
|
||||
interval = random.randint(1, 2)
|
||||
time.sleep(interval)
|
||||
if len(self.hosts) == 0:
|
||||
continue
|
||||
|
||||
index = random.randint(0, len(self.hosts)-1)
|
||||
hostId = self.hosts[index]
|
||||
apiClient = self.testClient.getApiClient()
|
||||
cMaintainCmd = cancelHostMaintenance.cancelHostMaintenanceCmd()
|
||||
cMaintainCmd.id = hostId
|
||||
response = apiClient.cancelHostMaintenance(cMaintainCmd)
|
||||
id = response.id
|
||||
print "Host %s cancelled maintenance mode" % id
|
||||
except Exception, e:
|
||||
print e
|
||||
|
||||
t = Task(run)
|
||||
t.doTask()
|
||||
|
||||
def randomEnterMaintenance(self):
|
||||
def run(param):
|
||||
while(1):
|
||||
try:
|
||||
interval = random.randint(1, 2)
|
||||
time.sleep(interval)
|
||||
if len(self.hosts) == 0:
|
||||
continue
|
||||
index = random.randint(0, len(self.hosts)-1)
|
||||
hostId = self.hosts[index]
|
||||
apiClient = self.testClient.getApiClient()
|
||||
maintainCmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd()
|
||||
maintainCmd.id = hostId
|
||||
response = apiClient.prepareHostForMaintenance(maintainCmd)
|
||||
id = response.id
|
||||
print "Host %s entered maintenance mode" % id
|
||||
except Exception, e:
|
||||
print e
|
||||
|
||||
t = Task(run)
|
||||
t.doTask()
|
||||
|
||||
|
||||
def test_deploy100Hosts(self):
|
||||
#for i in range(200):
|
||||
#self.hosts.append(self.deployHost(i))
|
||||
for i in range(200):
|
||||
self.hosts.append(i)
|
||||
self.randomEnterMaintenance()
|
||||
self.randomCancelMaintenance()
|
||||
while(1): time.sleep(10000)
|
||||
|
||||
|
|
@ -1,131 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
||||
'''
|
||||
# Experimental state of scripts
|
||||
# * Need to be reviewed
|
||||
# * Only a sandbox
|
||||
'''
|
||||
|
||||
from ConfigParser import SafeConfigParser
|
||||
from optparse import OptionParser
|
||||
from configGenerator import *
|
||||
import random
|
||||
|
||||
|
||||
def getGlobalSettings(config):
|
||||
for k, v in dict(config.items('globals')).iteritems():
|
||||
cfg = configuration()
|
||||
cfg.name = k
|
||||
cfg.value = v
|
||||
yield cfg
|
||||
|
||||
|
||||
def describeResources(config):
|
||||
zs = cloudstackConfiguration()
|
||||
|
||||
z = zone()
|
||||
z.dns1 = config.get('environment', 'dns')
|
||||
z.internaldns1 = config.get('environment', 'dns')
|
||||
z.name = 'Sandbox-%s'%(config.get('environment', 'hypervisor'))
|
||||
z.networktype = 'Advanced'
|
||||
z.guestcidraddress = '10.1.1.0/24'
|
||||
|
||||
prov = provider()
|
||||
prov.vlan = config.get('cloudstack','guest.vlan')
|
||||
z.providers.append(prov)
|
||||
|
||||
p = pod()
|
||||
p.name = 'POD0'
|
||||
p.gateway = config.get('cloudstack', 'private.gateway')
|
||||
p.startip = config.get('cloudstack', 'private.pod.startip')
|
||||
p.endip = config.get('cloudstack', 'private.pod.endip')
|
||||
p.netmask = '255.255.255.0'
|
||||
|
||||
v = iprange()
|
||||
v.gateway = config.get('cloudstack', 'public.gateway')
|
||||
v.startip = config.get('cloudstack', 'public.vlan.startip')
|
||||
v.endip = config.get('cloudstack', 'public.vlan.endip')
|
||||
v.netmask = '255.255.255.0'
|
||||
v.vlan = config.get('cloudstack', 'public.vlan')
|
||||
z.ipranges.append(v)
|
||||
|
||||
c = cluster()
|
||||
c.clustername = 'C0'
|
||||
c.hypervisor = config.get('environment', 'hypervisor')
|
||||
c.clustertype = 'CloudManaged'
|
||||
|
||||
h = host()
|
||||
h.username = 'root'
|
||||
h.password = 'password'
|
||||
h.url = 'http://%s'%(config.get('cloudstack', 'host'))
|
||||
c.hosts.append(h)
|
||||
|
||||
ps = primaryStorage()
|
||||
ps.name = 'PS0'
|
||||
ps.url = config.get('cloudstack', 'pool')
|
||||
c.primaryStorages.append(ps)
|
||||
|
||||
p.clusters.append(c)
|
||||
z.pods.append(p)
|
||||
|
||||
secondary = secondaryStorage()
|
||||
secondary.url = config.get('cloudstack', 'secondary')
|
||||
z.secondaryStorages.append(secondary)
|
||||
|
||||
'''Add zone'''
|
||||
zs.zones.append(z)
|
||||
|
||||
'''Add mgt server'''
|
||||
mgt = managementServer()
|
||||
mgt.mgtSvrIp = config.get('environment', 'mshost')
|
||||
zs.mgtSvr.append(mgt)
|
||||
|
||||
'''Add a database'''
|
||||
db = dbServer()
|
||||
db.dbSvr = config.get('environment', 'database')
|
||||
zs.dbSvr = db
|
||||
|
||||
'''Add some configuration'''
|
||||
[zs.globalConfig.append(cfg) for cfg in getGlobalSettings(config)]
|
||||
|
||||
''''add loggers'''
|
||||
testClientLogger = logger()
|
||||
testClientLogger.name = 'TestClient'
|
||||
testClientLogger.file = '/var/log/testclient.log'
|
||||
|
||||
testCaseLogger = logger()
|
||||
testCaseLogger.name = 'TestCase'
|
||||
testCaseLogger.file = '/var/log/testcase.log'
|
||||
|
||||
zs.logger.append(testClientLogger)
|
||||
zs.logger.append(testCaseLogger)
|
||||
return zs
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = OptionParser()
|
||||
parser.add_option('-i', '--input', action='store', default='setup.properties', \
|
||||
dest='input', help='file containing environment setup information')
|
||||
parser.add_option('-o', '--output', action='store', default='./sandbox.cfg', \
|
||||
dest='output', help='path where environment json will be generated')
|
||||
|
||||
|
||||
(opts, args) = parser.parse_args()
|
||||
|
||||
cfg_parser = SafeConfigParser()
|
||||
cfg_parser.read(opts.input)
|
||||
|
||||
cfg = describeResources(cfg_parser)
|
||||
generate_setup_config(cfg, opts.output)
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
[globals]
|
||||
expunge.delay=60
|
||||
expunge.interval=60
|
||||
storage.cleanup.interval=300
|
||||
account.cleanup.interval=600
|
||||
expunge.workers=3
|
||||
workers=10
|
||||
use.user.concentrated.pod.allocation=false
|
||||
vm.allocation.algorithm=random
|
||||
vm.op.wait.interval=5
|
||||
guest.domain.suffix=sandbox.kvm
|
||||
instance.name=QA
|
||||
direct.agent.load.size=1000
|
||||
default.page.size=10000
|
||||
check.pod.cidrs=true
|
||||
secstorage.allowed.internal.sites=10.147.28.0/24
|
||||
[environment]
|
||||
dns=10.147.28.6
|
||||
mshost=10.147.29.111
|
||||
database=10.147.29.111
|
||||
hypervisor=kvm
|
||||
[cloudstack]
|
||||
zone.vlan=675-679
|
||||
#pod configuration
|
||||
private.gateway=10.147.29.1
|
||||
private.pod.startip=10.147.29.150
|
||||
private.pod.endip=10.147.29.159
|
||||
#public vlan range
|
||||
public.gateway=10.147.31.1
|
||||
public.vlan=31
|
||||
public.vlan.startip=10.147.31.150
|
||||
public.vlan.endip=10.147.31.159
|
||||
#hosts
|
||||
host=10.147.29.58
|
||||
#pools
|
||||
pool=nfs://10.147.28.6:/export/home/prasanna/kamakura
|
||||
secondary=nfs://10.147.28.6:/export/home/prasanna/sstor
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
[globals]
|
||||
expunge.delay=60
|
||||
expunge.interval=60
|
||||
storage.cleanup.interval=300
|
||||
account.cleanup.interval=600
|
||||
expunge.workers=3
|
||||
workers=10
|
||||
use.user.concentrated.pod.allocation=false
|
||||
vm.allocation.algorithm=random
|
||||
vm.op.wait.interval=5
|
||||
guest.domain.suffix=sandbox.kvm
|
||||
instance.name=QA
|
||||
direct.agent.load.size=1000
|
||||
default.page.size=10000
|
||||
check.pod.cidrs=true
|
||||
secstorage.allowed.internal.sites=10.147.28.0/24
|
||||
[environment]
|
||||
dns=10.147.28.6
|
||||
mshost=10.147.29.111
|
||||
database=10.147.29.111
|
||||
[cloudstack]
|
||||
zone.vlan=675-679
|
||||
#pod configuration
|
||||
private.gateway=10.147.29.1
|
||||
private.pod.startip=10.147.29.150
|
||||
private.pod.endip=10.147.29.159
|
||||
#public vlan range
|
||||
public.gateway=10.147.31.1
|
||||
public.vlan=31
|
||||
public.vlan.startip=10.147.31.150
|
||||
public.vlan.endip=10.147.31.159
|
||||
#hosts
|
||||
host=10.147.29.58
|
||||
#pools
|
||||
pool=nfs://10.147.28.6:/export/home/prasanna/kamakura
|
||||
secondary=nfs://10.147.28.6:/export/home/prasanna/sstor
|
||||
|
|
@ -1,138 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
try:
|
||||
import unittest2 as unittest
|
||||
except ImportError:
|
||||
import unittest
|
||||
|
||||
import random
|
||||
import hashlib
|
||||
from cloudstackTestCase import *
|
||||
import remoteSSHClient
|
||||
|
||||
class SampleScenarios(cloudstackTestCase):
|
||||
'''
|
||||
'''
|
||||
def setUp(self):
|
||||
pass
|
||||
|
||||
|
||||
def tearDown(self):
|
||||
pass
|
||||
|
||||
|
||||
def test_1_createAccounts(self, numberOfAccounts=2):
|
||||
'''
|
||||
Create a bunch of user accounts
|
||||
'''
|
||||
mdf = hashlib.md5()
|
||||
mdf.update('password')
|
||||
mdf_pass = mdf.hexdigest()
|
||||
api = self.testClient.getApiClient()
|
||||
for i in range(1, numberOfAccounts + 1):
|
||||
acct = createAccount.createAccountCmd()
|
||||
acct.accounttype = 0
|
||||
acct.firstname = 'user' + str(i)
|
||||
acct.lastname = 'user' + str(i)
|
||||
acct.password = mdf_pass
|
||||
acct.username = 'user' + str(i)
|
||||
acct.email = 'user@example.com'
|
||||
acct.account = 'user' + str(i)
|
||||
acct.domainid = 1
|
||||
acctResponse = api.createAccount(acct)
|
||||
self.debug("successfully created account: %s, user: %s, id: %s"%(acctResponse.account, acctResponse.username, acctResponse.id))
|
||||
|
||||
|
||||
def test_2_createServiceOffering(self):
|
||||
apiClient = self.testClient.getApiClient()
|
||||
createSOcmd=createServiceOffering.createServiceOfferingCmd()
|
||||
createSOcmd.name='Sample SO'
|
||||
createSOcmd.displaytext='Sample SO'
|
||||
createSOcmd.storagetype='shared'
|
||||
createSOcmd.cpunumber=1
|
||||
createSOcmd.cpuspeed=100
|
||||
createSOcmd.memory=128
|
||||
createSOcmd.offerha='false'
|
||||
createSOresponse = apiClient.createServiceOffering(createSOcmd)
|
||||
return createSOresponse.id
|
||||
|
||||
def deployCmd(self, account, service):
|
||||
deployVmCmd = deployVirtualMachine.deployVirtualMachineCmd()
|
||||
deployVmCmd.zoneid = 1
|
||||
deployVmCmd.account=account
|
||||
deployVmCmd.domainid=1
|
||||
deployVmCmd.templateid=2
|
||||
deployVmCmd.serviceofferingid=service
|
||||
return deployVmCmd
|
||||
|
||||
def listVmsInAccountCmd(self, acct):
|
||||
api = self.testClient.getApiClient()
|
||||
listVmCmd = listVirtualMachines.listVirtualMachinesCmd()
|
||||
listVmCmd.account = acct
|
||||
listVmCmd.zoneid = 1
|
||||
listVmCmd.domainid = 1
|
||||
listVmResponse = api.listVirtualMachines(listVmCmd)
|
||||
return listVmResponse
|
||||
|
||||
|
||||
def destroyVmCmd(self, key):
|
||||
api = self.testClient.getApiClient()
|
||||
destroyVmCmd = destroyVirtualMachine.destroyVirtualMachineCmd()
|
||||
destroyVmCmd.id = key
|
||||
api.destroyVirtualMachine(destroyVmCmd)
|
||||
|
||||
|
||||
def test_3_stressDeploy(self):
|
||||
'''
|
||||
Deploy 5 Vms in each account
|
||||
'''
|
||||
service_id = self.test_2_createServiceOffering()
|
||||
api = self.testClient.getApiClient()
|
||||
for acct in range(1, 5):
|
||||
[api.deployVirtualMachine(self.deployCmd('user'+str(acct), service_id)) for x in range(0,5)]
|
||||
|
||||
@unittest.skip("skipping destroys")
|
||||
def test_4_stressDestroy(self):
|
||||
'''
|
||||
Cleanup all Vms in every account
|
||||
'''
|
||||
api = self.testClient.getApiClient()
|
||||
for acct in range(1, 6):
|
||||
for vm in self.listVmsInAccountCmd('user'+str(acct)):
|
||||
if vm is not None:
|
||||
self.destroyVmCmd(vm.id)
|
||||
|
||||
@unittest.skip("skipping destroys")
|
||||
def test_5_combineStress(self):
|
||||
for i in range(0, 5):
|
||||
self.test_3_stressDeploy()
|
||||
self.test_4_stressDestroy()
|
||||
|
||||
def deployN(self,nargs=300,batchsize=0):
|
||||
'''
|
||||
Deploy Nargs number of VMs concurrently in batches of size {batchsize}.
|
||||
When batchsize is 0 all Vms are deployed in one batch
|
||||
VMs will be deployed in 5:2:6 ratio
|
||||
'''
|
||||
cmds = []
|
||||
|
||||
if batchsize == 0:
|
||||
self.testClient.submitCmdsAndWait(cmds)
|
||||
else:
|
||||
while len(z) > 0:
|
||||
try:
|
||||
newbatch = [cmds.pop() for b in range(batchsize)] #pop batchsize items
|
||||
self.testClient.submitCmdsAndWait(newbatch)
|
||||
except IndexError:
|
||||
break
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
[globals]
|
||||
expunge.delay=60
|
||||
expunge.interval=60
|
||||
storage.cleanup.interval=300
|
||||
account.cleanup.interval=600
|
||||
expunge.workers=3
|
||||
workers=10
|
||||
vm.allocation.algorithm=userdispersing
|
||||
vm.op.wait.interval=5
|
||||
guest.domain.suffix=sandbox.xen
|
||||
instance.name=QA
|
||||
direct.agent.load.size=1000
|
||||
default.page.size=10000
|
||||
check.pod.cidrs=true
|
||||
secstorage.allowed.internal.sites=10.147.28.0/24
|
||||
[environment]
|
||||
dns=10.147.28.6
|
||||
mshost=10.147.29.110
|
||||
database=localhost
|
||||
hypervisor=XenServer
|
||||
[cloudstack]
|
||||
guest.vlan=670-674
|
||||
#pod configuration
|
||||
private.gateway=10.147.29.1
|
||||
private.pod.startip=10.147.29.140
|
||||
private.pod.endip=10.147.29.149
|
||||
#public vlan range
|
||||
public.gateway=10.147.31.1
|
||||
public.vlan=31
|
||||
public.vlan.startip=10.147.31.140
|
||||
public.vlan.endip=10.147.31.149
|
||||
#hosts
|
||||
host=10.147.29.56
|
||||
#pools
|
||||
pool=nfs://10.147.28.6:/export/home/prasanna/taxila
|
||||
secondary=nfs://10.147.28.6:/export/home/prasanna/secondary
|
||||
|
|
@ -1,129 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
'''
|
||||
# Experimental state of scripts
|
||||
# * Need to be reviewed
|
||||
# * Only a sandbox
|
||||
'''
|
||||
|
||||
from ConfigParser import SafeConfigParser
|
||||
from optparse import OptionParser
|
||||
from configGenerator import *
|
||||
import random
|
||||
|
||||
|
||||
def getGlobalSettings(config):
|
||||
for k, v in dict(config.items('globals')).iteritems():
|
||||
cfg = configuration()
|
||||
cfg.name = k
|
||||
cfg.value = v
|
||||
yield cfg
|
||||
|
||||
|
||||
def describeResources(config):
|
||||
zs = cloudstackConfiguration()
|
||||
|
||||
z = zone()
|
||||
z.dns1 = config.get('environment', 'dns')
|
||||
z.internaldns1 = config.get('environment', 'dns')
|
||||
z.name = 'Sandbox-%s'%(config.get('environment', 'hypervisor'))
|
||||
z.networktype = 'Basic'
|
||||
z.domain = 'cloud.basic'
|
||||
|
||||
sgprov = provider() #SecurityGroup Provider
|
||||
sgprov.name = 'SecurityGroupProvider'
|
||||
z.providers.append(sgprov)
|
||||
|
||||
p = pod()
|
||||
p.name = 'POD0'
|
||||
p.gateway = config.get('cloudstack', 'private.gateway')
|
||||
p.startip = config.get('cloudstack', 'private.pod.startip')
|
||||
p.endip = config.get('cloudstack', 'private.pod.endip')
|
||||
p.netmask = '255.255.255.0'
|
||||
|
||||
v = iprange()
|
||||
v.gateway = config.get('cloudstack', 'guest.gateway')
|
||||
v.startip = config.get('cloudstack', 'guest.startip')
|
||||
v.endip = config.get('cloudstack', 'guest.endip')
|
||||
v.netmask = '255.255.255.0'
|
||||
p.guestIpRanges.append(v)
|
||||
|
||||
c = cluster()
|
||||
c.clustername = 'C0'
|
||||
c.hypervisor = config.get('environment', 'hypervisor')
|
||||
c.clustertype = 'CloudManaged'
|
||||
|
||||
h = host()
|
||||
h.username = 'root'
|
||||
h.password = 'password'
|
||||
h.url = 'http://%s'%(config.get('cloudstack', 'host'))
|
||||
c.hosts.append(h)
|
||||
|
||||
ps = primaryStorage()
|
||||
ps.name = 'PS0'
|
||||
ps.url = config.get('cloudstack', 'pool')
|
||||
c.primaryStorages.append(ps)
|
||||
|
||||
p.clusters.append(c)
|
||||
z.pods.append(p)
|
||||
|
||||
secondary = secondaryStorage()
|
||||
secondary.url = config.get('cloudstack', 'secondary')
|
||||
z.secondaryStorages.append(secondary)
|
||||
|
||||
'''Add zone'''
|
||||
zs.zones.append(z)
|
||||
|
||||
'''Add mgt server'''
|
||||
mgt = managementServer()
|
||||
mgt.mgtSvrIp = config.get('environment', 'mshost')
|
||||
zs.mgtSvr.append(mgt)
|
||||
|
||||
'''Add a database'''
|
||||
db = dbServer()
|
||||
db.dbSvr = config.get('environment', 'database')
|
||||
zs.dbSvr = db
|
||||
|
||||
'''Add some configuration'''
|
||||
[zs.globalConfig.append(cfg) for cfg in getGlobalSettings(config)]
|
||||
|
||||
''''add loggers'''
|
||||
testClientLogger = logger()
|
||||
testClientLogger.name = 'TestClient'
|
||||
testClientLogger.file = '/var/log/testclient.log'
|
||||
|
||||
testCaseLogger = logger()
|
||||
testCaseLogger.name = 'TestCase'
|
||||
testCaseLogger.file = '/var/log/testcase.log'
|
||||
|
||||
zs.logger.append(testClientLogger)
|
||||
zs.logger.append(testCaseLogger)
|
||||
return zs
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = OptionParser()
|
||||
parser.add_option('-i', '--input', action='store', default='setup.properties', \
|
||||
dest='input', help='file containing environment setup information')
|
||||
parser.add_option('-o', '--output', action='store', default='./sandbox.cfg', \
|
||||
dest='output', help='path where environment json will be generated')
|
||||
|
||||
|
||||
(opts, args) = parser.parse_args()
|
||||
|
||||
cfg_parser = SafeConfigParser()
|
||||
cfg_parser.read(opts.input)
|
||||
|
||||
cfg = describeResources(cfg_parser)
|
||||
generate_setup_config(cfg, opts.output)
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
[globals]
|
||||
expunge.delay=60
|
||||
expunge.interval=60
|
||||
storage.cleanup.interval=300
|
||||
account.cleanup.interval=600
|
||||
expunge.workers=3
|
||||
workers=10
|
||||
vm.allocation.algorithm=userdispersing
|
||||
vm.op.wait.interval=5
|
||||
guest.domain.suffix=sandbox.xen
|
||||
instance.name=QA
|
||||
direct.agent.load.size=1000
|
||||
default.page.size=10000
|
||||
check.pod.cidrs=true
|
||||
secstorage.allowed.internal.sites=10.147.28.0/24
|
||||
[environment]
|
||||
dns=10.147.28.6
|
||||
mshost=10.147.29.110
|
||||
database=10.147.29.110
|
||||
hypervisor=XenServer
|
||||
[cloudstack]
|
||||
#pod configuration
|
||||
private.gateway=10.147.29.1
|
||||
private.pod.startip=10.147.29.150
|
||||
private.pod.endip=10.147.29.153
|
||||
#public/guest ip range
|
||||
guest.gateway=10.147.29.1
|
||||
guest.startip=10.147.29.154
|
||||
guest.endip=10.147.29.159
|
||||
#hosts
|
||||
host=10.147.29.56
|
||||
#pools
|
||||
pool=nfs://10.147.28.6:/export/home/prasanna/taxila
|
||||
secondary=nfs://10.147.28.6:/export/home/prasanna/secondary
|
||||
|
|
@ -1,138 +0,0 @@
|
|||
{
|
||||
"zones": [
|
||||
{
|
||||
"name": "Sandbox-simulator",
|
||||
"guestcidraddress": "10.1.1.0/24",
|
||||
"providers": [
|
||||
{
|
||||
"broadcastdomainrange": "ZONE",
|
||||
"name": "VirtualRouter"
|
||||
}
|
||||
],
|
||||
"dns1": "10.147.28.6",
|
||||
"vlan": "100-500",
|
||||
"ipranges": [
|
||||
{
|
||||
"startip": "172.2.1.2",
|
||||
"endip": "172.2.1.252",
|
||||
"netmask": "255.255.255.0",
|
||||
"vlan": "30",
|
||||
"gateway": "172.2.1.1"
|
||||
}
|
||||
],
|
||||
"networktype": "Advanced",
|
||||
"pods": [
|
||||
{
|
||||
"endip": "172.1.2.252",
|
||||
"name": "POD0",
|
||||
"startip": "172.1.2.2",
|
||||
"netmask": "255.255.255.0",
|
||||
"clusters": [
|
||||
{
|
||||
"clustername": "C0",
|
||||
"hypervisor": "simulator",
|
||||
"hosts": [
|
||||
{
|
||||
"username": "root",
|
||||
"url": "http://sim",
|
||||
"password": "password"
|
||||
}
|
||||
],
|
||||
"clustertype": "CloudManaged",
|
||||
"primaryStorages": [
|
||||
{
|
||||
"url": "nfs://172.2.2.6:/export/home/primary",
|
||||
"name": "PS0"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"gateway": "172.1.2.1"
|
||||
}
|
||||
],
|
||||
"internaldns1": "10.147.28.6",
|
||||
"secondaryStorages": [
|
||||
{
|
||||
"url": "nfs://172.2.2.6:/export/home/secondary"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"dbSvr": {
|
||||
"dbSvr": "localhost",
|
||||
"passwd": "cloud",
|
||||
"db": "cloud",
|
||||
"port": 3306,
|
||||
"user": "cloud"
|
||||
},
|
||||
"logger": [
|
||||
{
|
||||
"name": "TestClient",
|
||||
"file": "/var/log/testclient.log"
|
||||
},
|
||||
{
|
||||
"name": "TestCase",
|
||||
"file": "/var/log/testcase.log"
|
||||
}
|
||||
],
|
||||
"globalConfig": [
|
||||
{
|
||||
"name": "storage.cleanup.interval",
|
||||
"value": "300"
|
||||
},
|
||||
{
|
||||
"name": "vm.op.wait.interval",
|
||||
"value": "5"
|
||||
},
|
||||
{
|
||||
"name": "default.page.size",
|
||||
"value": "10000"
|
||||
},
|
||||
{
|
||||
"name": "instance.name",
|
||||
"value": "QA"
|
||||
},
|
||||
{
|
||||
"name": "workers",
|
||||
"value": "10"
|
||||
},
|
||||
{
|
||||
"name": "direct.agent.load.size",
|
||||
"value": "1000"
|
||||
},
|
||||
{
|
||||
"name": "account.cleanup.interval",
|
||||
"value": "600"
|
||||
},
|
||||
{
|
||||
"name": "guest.domain.suffix",
|
||||
"value": "sandbox.simulator"
|
||||
},
|
||||
{
|
||||
"name": "expunge.delay",
|
||||
"value": "60"
|
||||
},
|
||||
{
|
||||
"name": "vm.allocation.algorithm",
|
||||
"value": "userdispersing"
|
||||
},
|
||||
{
|
||||
"name": "expunge.interval",
|
||||
"value": "60"
|
||||
},
|
||||
{
|
||||
"name": "expunge.workers",
|
||||
"value": "3"
|
||||
},
|
||||
{
|
||||
"name": "check.pod.cidrs",
|
||||
"value": "true"
|
||||
}
|
||||
],
|
||||
"mgtSvr": [
|
||||
{
|
||||
"mgtSvrIp": "localhost",
|
||||
"port": 8096
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -1,128 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
||||
'''
|
||||
# Experimental state of scripts
|
||||
# * Need to be reviewed
|
||||
# * Only a sandbox
|
||||
'''
|
||||
|
||||
from ConfigParser import SafeConfigParser
|
||||
from optparse import OptionParser
|
||||
from configGenerator import *
|
||||
import random
|
||||
|
||||
|
||||
def getGlobalSettings(config):
|
||||
for k, v in dict(config.items('globals')).iteritems():
|
||||
cfg = configuration()
|
||||
cfg.name = k
|
||||
cfg.value = v
|
||||
yield cfg
|
||||
|
||||
|
||||
def describeResources(config):
|
||||
zs = cloudstackConfiguration()
|
||||
|
||||
z = zone()
|
||||
z.dns1 = config.get('environment', 'dns')
|
||||
z.internaldns1 = config.get('environment', 'dns')
|
||||
z.name = 'Sandbox-%s'%(config.get('environment', 'hypervisor'))
|
||||
z.networktype = 'Advanced'
|
||||
z.guestcidraddress = '10.1.1.0/24'
|
||||
z.vlan = config.get('cloudstack','guest.vlan')
|
||||
|
||||
p = pod()
|
||||
p.name = 'POD0'
|
||||
p.gateway = config.get('cloudstack', 'private.gateway')
|
||||
p.startip = config.get('cloudstack', 'private.pod.startip')
|
||||
p.endip = config.get('cloudstack', 'private.pod.endip')
|
||||
p.netmask = '255.255.255.0'
|
||||
|
||||
v = iprange()
|
||||
v.gateway = config.get('cloudstack', 'public.gateway')
|
||||
v.startip = config.get('cloudstack', 'public.vlan.startip')
|
||||
v.endip = config.get('cloudstack', 'public.vlan.endip')
|
||||
v.netmask = '255.255.255.0'
|
||||
v.vlan = config.get('cloudstack', 'public.vlan')
|
||||
z.ipranges.append(v)
|
||||
|
||||
c = cluster()
|
||||
c.clustername = 'C0'
|
||||
c.hypervisor = config.get('environment', 'hypervisor')
|
||||
c.clustertype = 'CloudManaged'
|
||||
|
||||
h = host()
|
||||
h.username = 'root'
|
||||
h.password = 'password'
|
||||
h.url = 'http://%s'%(config.get('cloudstack', 'host'))
|
||||
c.hosts.append(h)
|
||||
|
||||
ps = primaryStorage()
|
||||
ps.name = 'PS0'
|
||||
ps.url = config.get('cloudstack', 'pool')
|
||||
c.primaryStorages.append(ps)
|
||||
|
||||
p.clusters.append(c)
|
||||
z.pods.append(p)
|
||||
|
||||
secondary = secondaryStorage()
|
||||
secondary.url = config.get('cloudstack', 'secondary')
|
||||
z.secondaryStorages.append(secondary)
|
||||
|
||||
'''Add zone'''
|
||||
zs.zones.append(z)
|
||||
|
||||
'''Add mgt server'''
|
||||
mgt = managementServer()
|
||||
mgt.mgtSvrIp = config.get('environment', 'mshost')
|
||||
zs.mgtSvr.append(mgt)
|
||||
|
||||
'''Add a database'''
|
||||
db = dbServer()
|
||||
db.dbSvr = config.get('environment', 'database')
|
||||
zs.dbSvr = db
|
||||
|
||||
'''Add some configuration'''
|
||||
[zs.globalConfig.append(cfg) for cfg in getGlobalSettings(config)]
|
||||
|
||||
''''add loggers'''
|
||||
testClientLogger = logger()
|
||||
testClientLogger.name = 'TestClient'
|
||||
testClientLogger.file = '/var/log/testclient.log'
|
||||
|
||||
testCaseLogger = logger()
|
||||
testCaseLogger.name = 'TestCase'
|
||||
testCaseLogger.file = '/var/log/testcase.log'
|
||||
|
||||
zs.logger.append(testClientLogger)
|
||||
zs.logger.append(testCaseLogger)
|
||||
return zs
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = OptionParser()
|
||||
parser.add_option('-i', '--input', action='store', default='setup.properties', \
|
||||
dest='input', help='file containing environment setup information')
|
||||
parser.add_option('-o', '--output', action='store', default='./sandbox.cfg', \
|
||||
dest='output', help='path where environment json will be generated')
|
||||
|
||||
|
||||
(opts, args) = parser.parse_args()
|
||||
|
||||
cfg_parser = SafeConfigParser()
|
||||
cfg_parser.read(opts.input)
|
||||
|
||||
cfg = describeResources(cfg_parser)
|
||||
generate_setup_config(cfg, opts.output)
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
[globals]
|
||||
expunge.delay=60
|
||||
expunge.interval=60
|
||||
storage.cleanup.interval=300
|
||||
account.cleanup.interval=600
|
||||
expunge.workers=3
|
||||
workers=10
|
||||
vm.allocation.algorithm=userdispersing
|
||||
vm.op.wait.interval=5
|
||||
guest.domain.suffix=sandbox.simulator
|
||||
instance.name=QA
|
||||
direct.agent.load.size=1000
|
||||
default.page.size=10000
|
||||
check.pod.cidrs=true
|
||||
[environment]
|
||||
dns=10.147.28.6
|
||||
mshost=localhost
|
||||
database=localhost
|
||||
hypervisor=simulator
|
||||
[cloudstack]
|
||||
guest.vlan=100-500
|
||||
#pod configuration
|
||||
private.gateway=172.1.2.1
|
||||
private.pod.startip=172.1.2.2
|
||||
private.pod.endip=172.1.2.252
|
||||
#public vlan range
|
||||
public.gateway=172.2.1.1
|
||||
public.vlan=30
|
||||
public.vlan.startip=172.2.1.2
|
||||
public.vlan.endip=172.2.1.252
|
||||
#hosts
|
||||
host=sim
|
||||
#pools
|
||||
pool=nfs://172.2.2.6:/export/home/primary
|
||||
secondary=nfs://172.2.2.6:/export/home/secondary
|
||||
|
|
@ -1,131 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
import random
|
||||
import hashlib
|
||||
from cloudstackTestCase import *
|
||||
|
||||
class Provision(cloudstackTestCase):
|
||||
'''
|
||||
'''
|
||||
def setUp(self):
|
||||
pass
|
||||
|
||||
|
||||
def tearDown(self):
|
||||
pass
|
||||
|
||||
|
||||
def test_1_createAccounts(self, numberOfAccounts=5):
|
||||
'''
|
||||
Create a bunch of user accounts
|
||||
'''
|
||||
mdf = hashlib.md5()
|
||||
mdf.update('password')
|
||||
mdf_pass = mdf.hexdigest()
|
||||
api = self.testClient.getApiClient()
|
||||
for i in range(1, numberOfAccounts + 1):
|
||||
acct = createAccount.createAccountCmd()
|
||||
acct.accounttype = 0
|
||||
acct.firstname = 'user' + str(i)
|
||||
acct.lastname = 'user' + str(i)
|
||||
acct.password = mdf_pass
|
||||
acct.username = 'user' + str(i)
|
||||
acct.email = 'user@example.com'
|
||||
acct.account = 'user' + str(i)
|
||||
acct.domainid = 1
|
||||
acctResponse = api.createAccount(acct)
|
||||
self.debug("successfully created account: %s, user: %s, id: %s"%(acctResponse.account, acctResponse.username, acctResponse.id))
|
||||
|
||||
|
||||
def test_2_createServiceOffering(self):
|
||||
apiClient = self.testClient.getApiClient()
|
||||
createSOcmd=createServiceOffering.createServiceOfferingCmd()
|
||||
createSOcmd.name='Sample SO'
|
||||
createSOcmd.displaytext='Sample SO'
|
||||
createSOcmd.storagetype='shared'
|
||||
createSOcmd.cpunumber=1
|
||||
createSOcmd.cpuspeed=100
|
||||
createSOcmd.memory=128
|
||||
createSOcmd.offerha='false'
|
||||
createSOresponse = apiClient.createServiceOffering(createSOcmd)
|
||||
return createSOresponse.id
|
||||
|
||||
def deployCmd(self, account, service):
|
||||
deployVmCmd = deployVirtualMachine.deployVirtualMachineCmd()
|
||||
deployVmCmd.zoneid = 1
|
||||
deployVmCmd.hypervisor='Simulator'
|
||||
deployVmCmd.account=account
|
||||
deployVmCmd.domainid=1
|
||||
deployVmCmd.templateid=10
|
||||
deployVmCmd.serviceofferingid=service
|
||||
return deployVmCmd
|
||||
|
||||
def listVmsInAccountCmd(self, acct):
|
||||
api = self.testClient.getApiClient()
|
||||
listVmCmd = listVirtualMachines.listVirtualMachinesCmd()
|
||||
listVmCmd.account = acct
|
||||
listVmCmd.zoneid = 1
|
||||
listVmCmd.domainid = 1
|
||||
listVmResponse = api.listVirtualMachines(listVmCmd)
|
||||
return listVmResponse
|
||||
|
||||
|
||||
def destroyVmCmd(self, key):
|
||||
api = self.testClient.getApiClient()
|
||||
destroyVmCmd = destroyVirtualMachine.destroyVirtualMachineCmd()
|
||||
destroyVmCmd.id = key
|
||||
api.destroyVirtualMachine(destroyVmCmd)
|
||||
|
||||
|
||||
def test_3_stressDeploy(self):
|
||||
'''
|
||||
Deploy 20 Vms in each account
|
||||
'''
|
||||
service_id = self.test_2_createServiceOffering()
|
||||
api = self.testClient.getApiClient()
|
||||
for acct in range(1, 5):
|
||||
[api.deployVirtualMachine(self.deployCmd('user'+str(acct), service_id)) for x in range(0,20)]
|
||||
|
||||
def test_4_stressDestroy(self):
|
||||
'''
|
||||
Cleanup all Vms in every account
|
||||
'''
|
||||
api = self.testClient.getApiClient()
|
||||
for acct in range(1, 6):
|
||||
for vm in self.listVmsInAccountCmd('user'+str(acct)):
|
||||
if vm is not None:
|
||||
self.destroyVmCmd(vm.id)
|
||||
|
||||
def test_5_combineStress(self):
|
||||
for i in range(0, 5):
|
||||
self.test_3_stressDeploy()
|
||||
self.test_4_stressDestroy()
|
||||
|
||||
def deployN(self,nargs=300,batchsize=0):
|
||||
'''
|
||||
Deploy Nargs number of VMs concurrently in batches of size {batchsize}.
|
||||
When batchsize is 0 all Vms are deployed in one batch
|
||||
VMs will be deployed in 5:2:6 ratio
|
||||
'''
|
||||
cmds = []
|
||||
|
||||
if batchsize == 0:
|
||||
self.testClient.submitCmdsAndWait(cmds)
|
||||
else:
|
||||
while len(z) > 0:
|
||||
try:
|
||||
newbatch = [cmds.pop() for b in range(batchsize)] #pop batchsize items
|
||||
self.testClient.submitCmdsAndWait(newbatch)
|
||||
except IndexError:
|
||||
break
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
Build Verification Testing (BVT) Cases
|
||||
--------------------------------------
|
||||
These test cases are the core functionality tests that ensure the application is stable and can be tested thoroughly.
|
||||
These BVT cases definitions are located at : https://docs.google.com/a/cloud.com/spreadsheet/ccc?key=0Ak8acbfxQG8ndEppOGZSLV9mUF9idjVkTkZkajhTZkE&invite=CPij0K0L
|
||||
|
||||
|
||||
Guidelines
|
||||
----------
|
||||
BVT test cases are being developed using Python's unittests2. Following are certain guidelines being followed
|
||||
1. Tests exercised for the same resource should ideally be present under a single suite or file.
|
||||
|
||||
2. Time-consuming operations that create new cloud resources like server creation, volume creation etc
|
||||
should not necessarily be exercised per unit test. The resources can be shared by creating them at
|
||||
the class-level using setUpClass and shared across all instances during a single run.
|
||||
|
||||
3. Certain tests pertaining to NAT, Firewall and Load Balancing warrant fresh resources per test. Hence a call should be
|
||||
taken by the stakeholders regarding sharing resources.
|
||||
|
||||
4. Ensure that the tearDown/tearDownClass functions clean up all the resources created during the test run.
|
||||
|
||||
For more information about unittests: http://docs.python.org/library/unittest.html
|
||||
|
||||
|
||||
BVT Tests
|
||||
----------
|
||||
The following files contain these BVT cases:
|
||||
|
||||
1. test_vm_life_cycle.py - VM Life Cycle tests
|
||||
2. test_volumes.py - Volumes related tests
|
||||
3. test_snapshots.py - Snapshots related tests
|
||||
4. test_disk_offerings.py - Disk Offerings related tests
|
||||
5. test_service_offerings.py - Service Offerings related tests
|
||||
6. test_hosts.py - Hosts and Clusters related tests
|
||||
7. test_iso.py - ISO related tests
|
||||
8. test_network.py - Network related tests
|
||||
9. test_primary_storage.py - Primary storage related tests
|
||||
10. test_secondary_storage.py - Secondary storage related tests
|
||||
11. test_ssvm.py - SSVM & CPVM related tests
|
||||
12. test_templates.py - Templates related tests
|
||||
13. test_routers.py - Router related tests
|
||||
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
|
@ -1,212 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
""" BVT tests for Disk offerings"""
|
||||
|
||||
#Import Local Modules
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
from testcase.libs.utils import *
|
||||
from testcase.libs.base import *
|
||||
from testcase.libs.common import *
|
||||
|
||||
class Services:
|
||||
"""Test Disk offerings Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"off": {
|
||||
"name": "Disk offering",
|
||||
"displaytext": "Disk offering",
|
||||
"disksize": 1 # in GB
|
||||
},
|
||||
}
|
||||
|
||||
class TestCreateDiskOffering(cloudstackTestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.services = Services().services
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
self.dbclient.close()
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def test_01_create_disk_offering(self):
|
||||
"""Test to create disk offering"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. createDiskOfferings should return valid info for new offering
|
||||
# 2. The Cloud Database contains the valid information
|
||||
|
||||
disk_offering = DiskOffering.create(
|
||||
self.apiclient,
|
||||
self.services["off"]
|
||||
)
|
||||
self.cleanup.append(disk_offering)
|
||||
|
||||
self.debug("Created Disk offering with ID: %s" % disk_offering.id)
|
||||
|
||||
list_disk_response = list_disk_offering(
|
||||
self.apiclient,
|
||||
id=disk_offering.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_disk_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_disk_response),
|
||||
0,
|
||||
"Check Disk offering is created"
|
||||
)
|
||||
disk_response = list_disk_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
disk_response.displaytext,
|
||||
self.services["off"]["displaytext"],
|
||||
"Check server id in createServiceOffering"
|
||||
)
|
||||
self.assertEqual(
|
||||
disk_response.name,
|
||||
self.services["off"]["name"],
|
||||
"Check name in createServiceOffering"
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
class TestDiskOfferings(cloudstackTestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
|
||||
def tearDown(self):
|
||||
|
||||
try:
|
||||
self.dbclient.close()
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.services = Services().services
|
||||
cls.api_client = super(TestDiskOfferings, cls).getClsTestClient().getApiClient()
|
||||
cls.disk_offering_1 = DiskOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["off"]
|
||||
)
|
||||
cls.disk_offering_2 = DiskOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["off"]
|
||||
)
|
||||
cls._cleanup = [cls.disk_offering_1]
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
cls.api_client = super(TestDiskOfferings, cls).getClsTestClient().getApiClient()
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def test_02_edit_disk_offering(self):
|
||||
"""Test to update existing disk offering"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. updateDiskOffering should return
|
||||
# a valid information for newly created offering
|
||||
|
||||
#Generate new name & displaytext from random data
|
||||
random_displaytext = random_gen()
|
||||
random_name = random_gen()
|
||||
|
||||
self.debug("Updating Disk offering with ID: %s" %
|
||||
self.disk_offering_1.id)
|
||||
|
||||
cmd = updateDiskOffering.updateDiskOfferingCmd()
|
||||
cmd.id = self.disk_offering_1.id
|
||||
cmd.displaytext = random_displaytext
|
||||
cmd.name = random_name
|
||||
|
||||
self.apiclient.updateDiskOffering(cmd)
|
||||
|
||||
list_disk_response = list_disk_offering(
|
||||
self.apiclient,
|
||||
id=self.disk_offering_1.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_disk_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_disk_response),
|
||||
0,
|
||||
"Check disk offering is updated"
|
||||
)
|
||||
|
||||
disk_response = list_disk_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
disk_response.displaytext,
|
||||
random_displaytext,
|
||||
"Check service displaytext in updateServiceOffering"
|
||||
)
|
||||
self.assertEqual(
|
||||
disk_response.name,
|
||||
random_name,
|
||||
"Check service name in updateServiceOffering"
|
||||
)
|
||||
return
|
||||
|
||||
def test_03_delete_disk_offering(self):
|
||||
"""Test to delete disk offering"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. deleteDiskOffering should return
|
||||
# a valid information for newly created offering
|
||||
|
||||
self.disk_offering_2.delete(self.apiclient)
|
||||
|
||||
self.debug("Deleted Disk offering with ID: %s" %
|
||||
self.disk_offering_2.id)
|
||||
list_disk_response = list_disk_offering(
|
||||
self.apiclient,
|
||||
id=self.disk_offering_2.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
list_disk_response,
|
||||
None,
|
||||
"Check if disk offering exists in listDiskOfferings"
|
||||
)
|
||||
return
|
||||
|
|
@ -1,223 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
""" BVT tests for Hosts and Clusters
|
||||
"""
|
||||
#Import Local Modules
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
from testcase.libs.utils import *
|
||||
from testcase.libs.base import *
|
||||
from testcase.libs.common import *
|
||||
|
||||
#Import System modules
|
||||
import time
|
||||
|
||||
class Services:
|
||||
"""Test Hosts & Clusters Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"clusters": {
|
||||
0: {
|
||||
"clustername": "Xen Cluster",
|
||||
"clustertype": "CloudManaged",
|
||||
# CloudManaged or ExternalManaged"
|
||||
"hypervisor": "XenServer",
|
||||
# Hypervisor type
|
||||
},
|
||||
1: {
|
||||
"clustername": "KVM Cluster",
|
||||
"clustertype": "CloudManaged",
|
||||
# CloudManaged or ExternalManaged"
|
||||
"hypervisor": "KVM",
|
||||
# Hypervisor type
|
||||
},
|
||||
2: {
|
||||
"hypervisor": 'VMware',
|
||||
# Hypervisor type
|
||||
"clustertype": 'ExternalManaged',
|
||||
# CloudManaged or ExternalManaged"
|
||||
"username": 'administrator',
|
||||
"password": 'fr3sca',
|
||||
"url": 'http://192.168.100.17/CloudStack-Clogeny-Pune/Pune-1',
|
||||
# Format:http://vCenter Host/Datacenter/Cluster
|
||||
"clustername": 'VMWare Cluster',
|
||||
},
|
||||
},
|
||||
"hosts": {
|
||||
"xenserver": {
|
||||
# Must be name of corresponding Hypervisor type
|
||||
# in cluster in small letters
|
||||
"hypervisor": 'XenServer',
|
||||
# Hypervisor type
|
||||
"clustertype": 'CloudManaged',
|
||||
# CloudManaged or ExternalManaged"
|
||||
"url": 'http://192.168.100.211',
|
||||
"username": "root",
|
||||
"password": "fr3sca",
|
||||
},
|
||||
"kvm": {
|
||||
"hypervisor": 'KVM',
|
||||
# Hypervisor type
|
||||
"clustertype": 'CloudManaged',
|
||||
# CloudManaged or ExternalManaged"
|
||||
"url": 'http://192.168.100.212',
|
||||
"username": "root",
|
||||
"password": "fr3sca",
|
||||
},
|
||||
"vmware": {
|
||||
"hypervisor": 'VMware',
|
||||
# Hypervisor type
|
||||
"clustertype": 'ExternalManaged',
|
||||
# CloudManaged or ExternalManaged"
|
||||
"url": 'http://192.168.100.203',
|
||||
"username": "administrator",
|
||||
"password": "fr3sca",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
class TestHosts(cloudstackTestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.services = Services().services
|
||||
self.zone = get_zone(self.apiclient, self.services)
|
||||
self.pod = get_pod(self.apiclient, self.zone.id, self.services)
|
||||
self.cleanup = []
|
||||
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
self.dbclient.close()
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def test_01_clusters(self):
|
||||
"""Test Add clusters & hosts - XEN, KVM, VWARE
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. Verify hypervisortype returned by API is Xen/KVM/VWare
|
||||
# 2. Verify that the cluster is in 'Enabled' allocation state
|
||||
# 3. Verify that the host is added successfully and in Up state
|
||||
# with listHosts API response
|
||||
|
||||
#Create clusters with Hypervisor type XEN/KVM/VWare
|
||||
for k, v in self.services["clusters"].items():
|
||||
cluster = Cluster.create(
|
||||
self.apiclient,
|
||||
v,
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id
|
||||
)
|
||||
self.debug(
|
||||
"Created Cluster for hypervisor type %s & ID: %s" %(
|
||||
v["hypervisor"],
|
||||
cluster.id
|
||||
))
|
||||
self.assertEqual(
|
||||
cluster.hypervisortype,
|
||||
v["hypervisor"],
|
||||
"Check hypervisor type is " + v["hypervisor"] + " or not"
|
||||
)
|
||||
self.assertEqual(
|
||||
cluster.allocationstate,
|
||||
'Enabled',
|
||||
"Check whether allocation state of cluster is enabled"
|
||||
)
|
||||
|
||||
#If host is externally managed host is already added with cluster
|
||||
response = list_hosts(
|
||||
self.apiclient,
|
||||
clusterid=cluster.id
|
||||
)
|
||||
|
||||
if not response:
|
||||
hypervisor_type = str(cluster.hypervisortype.lower())
|
||||
host = Host.create(
|
||||
self.apiclient,
|
||||
cluster,
|
||||
self.services["hosts"][hypervisor_type],
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id
|
||||
)
|
||||
self.debug(
|
||||
"Created host (ID: %s) in cluster ID %s" %(
|
||||
host.id,
|
||||
cluster.id
|
||||
))
|
||||
|
||||
#Cleanup Host & Cluster
|
||||
self.cleanup.append(host)
|
||||
self.cleanup.append(cluster)
|
||||
|
||||
list_hosts_response = list_hosts(
|
||||
self.apiclient,
|
||||
clusterid=cluster.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_hosts_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_hosts_response),
|
||||
0,
|
||||
"Check list Hosts response"
|
||||
)
|
||||
|
||||
host_response = list_hosts_response[0]
|
||||
#Check if host is Up and running
|
||||
self.assertEqual(
|
||||
host_response.state,
|
||||
'Up',
|
||||
"Check if state of host is Up or not"
|
||||
)
|
||||
#Verify List Cluster Response has newly added cluster
|
||||
list_cluster_response = list_clusters(
|
||||
self.apiclient,
|
||||
id=cluster.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_cluster_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_cluster_response),
|
||||
0,
|
||||
"Check list Hosts response"
|
||||
)
|
||||
|
||||
cluster_response = list_cluster_response[0]
|
||||
self.assertEqual(
|
||||
cluster_response.id,
|
||||
cluster.id,
|
||||
"Check cluster ID with list clusters response"
|
||||
)
|
||||
self.assertEqual(
|
||||
cluster_response.hypervisortype,
|
||||
cluster.hypervisortype,
|
||||
"Check hypervisor type with is " + v["hypervisor"] + " or not"
|
||||
)
|
||||
return
|
||||
|
|
@ -1,499 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
""" BVT tests for Templates ISO
|
||||
"""
|
||||
#Import Local Modules
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
from testcase.libs.utils import *
|
||||
from testcase.libs.base import *
|
||||
from testcase.libs.common import *
|
||||
import urllib
|
||||
from random import random
|
||||
#Import System modules
|
||||
import time
|
||||
|
||||
|
||||
class Services:
|
||||
"""Test ISO Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"account": {
|
||||
"email": "test@test.com",
|
||||
"firstname": "Test",
|
||||
"lastname": "User",
|
||||
"username": "test",
|
||||
# Random characters are appended in create account to
|
||||
# ensure unique username generated each time
|
||||
"password": "fr3sca",
|
||||
},
|
||||
"iso_1":
|
||||
{
|
||||
"displaytext": "Test ISO 1",
|
||||
"name": "ISO 1",
|
||||
"url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso",
|
||||
# Source URL where ISO is located
|
||||
"isextractable": True,
|
||||
"isfeatured": True,
|
||||
"ispublic": True,
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
},
|
||||
"iso_2":
|
||||
{
|
||||
"displaytext": "Test ISO 2",
|
||||
"name": "ISO 2",
|
||||
"url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso",
|
||||
# Source URL where ISO is located
|
||||
"isextractable": True,
|
||||
"isfeatured": True,
|
||||
"ispublic": True,
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"mode": 'HTTP_DOWNLOAD',
|
||||
# Used in Extract template, value must be HTTP_DOWNLOAD
|
||||
},
|
||||
"destzoneid": 5,
|
||||
# Copy ISO from one zone to another (Destination Zone)
|
||||
"isfeatured": True,
|
||||
"ispublic": True,
|
||||
"isextractable": True,
|
||||
"bootable": True, # For edit template
|
||||
"passwordenabled": True,
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
# CentOS 5.3 (64 bit)
|
||||
"mode": 'advanced'
|
||||
# Networking mode: Basic or Advanced
|
||||
}
|
||||
|
||||
|
||||
class TestCreateIso(cloudstackTestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.services = Services().services
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
# Get Zone, Domain and templates
|
||||
self.domain = get_domain(self.apiclient, self.services)
|
||||
self.zone = get_zone(self.apiclient, self.services)
|
||||
self.services["domainid"] = self.domain.id
|
||||
self.services["iso_2"]["zoneid"] = self.zone.id
|
||||
|
||||
self.account = Account.create(
|
||||
self.apiclient,
|
||||
self.services["account"],
|
||||
domainid=self.domain.id
|
||||
)
|
||||
|
||||
self.cleanup = [self.account]
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
|
||||
self.dbclient.close()
|
||||
#Clean up, terminate the created ISOs
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
|
||||
return
|
||||
|
||||
def test_01_create_iso(self):
|
||||
"""Test create public & private ISO
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. database (vm_template table) should be
|
||||
# updated with newly created ISO
|
||||
# 2. UI should show the newly added ISO
|
||||
# 3. listIsos API should show the newly added ISO
|
||||
|
||||
iso = Iso.create(
|
||||
self.apiclient,
|
||||
self.services["iso_2"],
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.debug("ISO created with ID: %s" % iso.id)
|
||||
|
||||
try:
|
||||
iso.download(self.apiclient)
|
||||
except Exception as e:
|
||||
self.fail("Exception while downloading ISO %s: %s"\
|
||||
% (iso.id, e))
|
||||
|
||||
list_iso_response = list_isos(
|
||||
self.apiclient,
|
||||
id=iso.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_iso_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_iso_response),
|
||||
0,
|
||||
"Check template available in List ISOs"
|
||||
)
|
||||
iso_response = list_iso_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
iso_response.displaytext,
|
||||
self.services["iso_2"]["displaytext"],
|
||||
"Check display text of newly created ISO"
|
||||
)
|
||||
self.assertEqual(
|
||||
iso_response.name,
|
||||
self.services["iso_2"]["name"],
|
||||
"Check name of newly created ISO"
|
||||
)
|
||||
self.assertEqual(
|
||||
iso_response.zoneid,
|
||||
self.services["iso_2"]["zoneid"],
|
||||
"Check zone ID of newly created ISO"
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
class TestISO(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.services = Services().services
|
||||
cls.api_client = super(TestISO, cls).getClsTestClient().getApiClient()
|
||||
|
||||
# Get Zone, Domain and templates
|
||||
cls.domain = get_domain(cls.api_client, cls.services)
|
||||
cls.zone = get_zone(cls.api_client, cls.services)
|
||||
|
||||
cls.services["domainid"] = cls.domain.id
|
||||
cls.services["iso_1"]["zoneid"] = cls.zone.id
|
||||
cls.services["iso_2"]["zoneid"] = cls.zone.id
|
||||
cls.services["sourcezoneid"] = cls.zone.id
|
||||
|
||||
#Create an account, ISOs etc.
|
||||
cls.account = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["account"],
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
cls.services["account"] = cls.account.account.name
|
||||
cls.iso_1 = Iso.create(
|
||||
cls.api_client,
|
||||
cls.services["iso_1"],
|
||||
account=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid
|
||||
)
|
||||
try:
|
||||
cls.iso_1.download(cls.api_client)
|
||||
except Exception as e:
|
||||
raise Exception("Exception while downloading ISO %s: %s"\
|
||||
% (cls.iso_1.id, e))
|
||||
|
||||
cls.iso_2 = Iso.create(
|
||||
cls.api_client,
|
||||
cls.services["iso_2"],
|
||||
account=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid
|
||||
)
|
||||
try:
|
||||
cls.iso_2.download(cls.api_client)
|
||||
except Exception as e:
|
||||
raise Exception("Exception while downloading ISO %s: %s"\
|
||||
% (cls.iso_2.id, e))
|
||||
|
||||
cls._cleanup = [cls.account]
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
cls.api_client = super(TestISO, cls).getClsTestClient().getApiClient()
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
|
||||
return
|
||||
|
||||
def setUp(self):
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
self.dbclient.close()
|
||||
#Clean up, terminate the created ISOs, VMs
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
|
||||
return
|
||||
|
||||
def test_02_edit_iso(self):
|
||||
"""Test Edit ISO
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. UI should show the edited values for ISO
|
||||
# 2. database (vm_template table) should have updated values
|
||||
|
||||
#Generate random values for updating ISO name and Display text
|
||||
new_displayText = random_gen()
|
||||
new_name = random_gen()
|
||||
|
||||
self.debug("Updating ISO permissions for ISO: %s" % self.iso_1.id)
|
||||
|
||||
cmd = updateIso.updateIsoCmd()
|
||||
#Assign new values to attributes
|
||||
cmd.id = self.iso_1.id
|
||||
cmd.displaytext = new_displayText
|
||||
cmd.name = new_name
|
||||
cmd.bootable = self.services["bootable"]
|
||||
cmd.passwordenabled = self.services["passwordenabled"]
|
||||
|
||||
self.apiclient.updateIso(cmd)
|
||||
|
||||
#Check whether attributes are updated in ISO using listIsos
|
||||
list_iso_response = list_isos(
|
||||
self.apiclient,
|
||||
id=self.iso_1.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_iso_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_iso_response),
|
||||
0,
|
||||
"Check template available in List ISOs"
|
||||
)
|
||||
|
||||
iso_response = list_iso_response[0]
|
||||
self.assertEqual(
|
||||
iso_response.displaytext,
|
||||
new_displayText,
|
||||
"Check display text of updated ISO"
|
||||
)
|
||||
self.assertEqual(
|
||||
iso_response.name,
|
||||
new_name,
|
||||
"Check name of updated ISO"
|
||||
)
|
||||
self.assertEqual(
|
||||
iso_response.bootable,
|
||||
self.services["bootable"],
|
||||
"Check if image is bootable of updated ISO"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
iso_response.ostypeid,
|
||||
self.services["ostypeid"],
|
||||
"Check OSTypeID of updated ISO"
|
||||
)
|
||||
return
|
||||
|
||||
def test_03_delete_iso(self):
|
||||
"""Test delete ISO
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. UI should not show the deleted ISP
|
||||
# 2. database (vm_template table) should not contain deleted ISO
|
||||
|
||||
self.debug("Deleting ISO with ID: %s" % self.iso_1.id)
|
||||
self.iso_1.delete(self.apiclient)
|
||||
|
||||
# Sleep to ensure that ISO state is reflected in other calls
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
#ListIsos to verify deleted ISO is properly deleted
|
||||
list_iso_response = list_isos(
|
||||
self.apiclient,
|
||||
id=self.iso_1.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
list_iso_response,
|
||||
None,
|
||||
"Check if ISO exists in ListIsos"
|
||||
)
|
||||
return
|
||||
|
||||
def test_04_extract_Iso(self):
|
||||
"Test for extract ISO"
|
||||
|
||||
# Validate the following
|
||||
# 1. Admin should able extract and download the ISO
|
||||
# 2. ListIsos should display all the public templates
|
||||
# for all kind of users
|
||||
# 3 .ListIsos should not display the system templates
|
||||
|
||||
self.debug("Extracting ISO with ID: %s" % self.iso_2.id)
|
||||
|
||||
cmd = extractIso.extractIsoCmd()
|
||||
cmd.id = self.iso_2.id
|
||||
cmd.mode = self.services["iso_2"]["mode"]
|
||||
cmd.zoneid = self.services["iso_2"]["zoneid"]
|
||||
list_extract_response = self.apiclient.extractIso(cmd)
|
||||
|
||||
try:
|
||||
#Format URL to ASCII to retrieve response code
|
||||
formatted_url = urllib.unquote_plus(list_extract_response.url)
|
||||
url_response = urllib.urlopen(formatted_url)
|
||||
response_code = url_response.getcode()
|
||||
except Exception:
|
||||
self.fail(
|
||||
"Extract ISO Failed with invalid URL %s (ISO id: %s)" \
|
||||
% (formatted_url, self.iso_2.id)
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
list_extract_response.id,
|
||||
self.iso_2.id,
|
||||
"Check ID of the downloaded ISO"
|
||||
)
|
||||
self.assertEqual(
|
||||
list_extract_response.extractMode,
|
||||
self.services["iso_2"]["mode"],
|
||||
"Check mode of extraction"
|
||||
)
|
||||
self.assertEqual(
|
||||
list_extract_response.zoneid,
|
||||
self.services["iso_2"]["zoneid"],
|
||||
"Check zone ID of extraction"
|
||||
)
|
||||
self.assertEqual(
|
||||
response_code,
|
||||
200,
|
||||
"Check for a valid response of download URL"
|
||||
)
|
||||
return
|
||||
|
||||
def test_05_iso_permissions(self):
|
||||
"""Update & Test for ISO permissions"""
|
||||
|
||||
# validate the following
|
||||
# 1. listIsos returns valid permissions set for ISO
|
||||
# 2. permission changes should be reflected in vm_template
|
||||
# table in database
|
||||
|
||||
self.debug("Updating permissions for ISO: %s" % self.iso_2.id)
|
||||
|
||||
cmd = updateIsoPermissions.updateIsoPermissionsCmd()
|
||||
cmd.id = self.iso_2.id
|
||||
#Update ISO permissions
|
||||
cmd.isfeatured = self.services["isfeatured"]
|
||||
cmd.ispublic = self.services["ispublic"]
|
||||
cmd.isextractable = self.services["isextractable"]
|
||||
self.apiclient.updateIsoPermissions(cmd)
|
||||
|
||||
#Verify ListIsos have updated permissions for the ISO for normal user
|
||||
list_iso_response = list_isos(
|
||||
self.apiclient,
|
||||
id=self.iso_2.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_iso_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
iso_response = list_iso_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
iso_response.id,
|
||||
self.iso_2.id,
|
||||
"Check ISO ID"
|
||||
)
|
||||
self.assertEqual(
|
||||
iso_response.ispublic,
|
||||
self.services["ispublic"],
|
||||
"Check ispublic permission of ISO"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
iso_response.isfeatured,
|
||||
self.services["isfeatured"],
|
||||
"Check isfeatured permission of ISO"
|
||||
)
|
||||
return
|
||||
|
||||
def test_06_copy_iso(self):
|
||||
"""Test for copy ISO from one zone to another"""
|
||||
|
||||
#Validate the following
|
||||
#1. copy ISO should be successful and secondary storage
|
||||
# should contain new copied ISO.
|
||||
|
||||
self.debug("Copy ISO from %s to %s" % (
|
||||
self.zone.id,
|
||||
self.services["destzoneid"]
|
||||
))
|
||||
|
||||
cmd = copyIso.copyIsoCmd()
|
||||
cmd.id = self.iso_2.id
|
||||
cmd.destzoneid = self.services["destzoneid"]
|
||||
cmd.sourcezoneid = self.zone.id
|
||||
self.apiclient.copyIso(cmd)
|
||||
|
||||
#Verify ISO is copied to another zone using ListIsos
|
||||
list_iso_response = list_isos(
|
||||
self.apiclient,
|
||||
id=self.iso_2.id,
|
||||
zoneid=self.services["destzoneid"]
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_iso_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_iso_response),
|
||||
0,
|
||||
"Check template extracted in List ISO"
|
||||
)
|
||||
iso_response = list_iso_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
iso_response.id,
|
||||
self.iso_2.id,
|
||||
"Check ID of the downloaded ISO"
|
||||
)
|
||||
self.assertEqual(
|
||||
iso_response.zoneid,
|
||||
self.services["destzoneid"],
|
||||
"Check zone ID of the copied ISO"
|
||||
)
|
||||
|
||||
self.debug("Cleanup copied ISO: %s" % iso_response.id)
|
||||
# Cleanup- Delete the copied ISO
|
||||
cmd = deleteIso.deleteIsoCmd()
|
||||
cmd.id = iso_response.id
|
||||
cmd.zoneid = self.services["destzoneid"]
|
||||
self.apiclient.deleteIso(cmd)
|
||||
return
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,237 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
""" BVT tests for Primary Storage
|
||||
"""
|
||||
#Import Local Modules
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
from testcase.libs.utils import *
|
||||
from testcase.libs.base import *
|
||||
from testcase.libs.common import *
|
||||
|
||||
#Import System modules
|
||||
import time
|
||||
|
||||
class Services:
|
||||
"""Test Primary storage Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"nfs": {
|
||||
0: {
|
||||
"url": "nfs://192.168.100.131/testprimary",
|
||||
# Format: File_System_Type/Location/Path
|
||||
"name": "Primary XEN",
|
||||
"hypervisor": 'XEN',
|
||||
},
|
||||
1: {
|
||||
"url": "nfs://192.168.100.131/Primary",
|
||||
"name": "Primary KVM",
|
||||
"hypervisor": 'KVM',
|
||||
},
|
||||
2: {
|
||||
"url": "nfs://192.168.100.131/Primary",
|
||||
"name": "Primary VMWare",
|
||||
"hypervisor": 'VMWare',
|
||||
},
|
||||
},
|
||||
"iscsi": {
|
||||
0: {
|
||||
"url": "iscsi://192.168.100.21/iqn.2012-01.localdomain.clo-cstack-cos6:iser/1",
|
||||
# Format : iscsi://IP Address/IQN number/LUN#
|
||||
"name": "Primary iSCSI",
|
||||
"hypervisor": 'XEN',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
class TestPrimaryStorageServices(cloudstackTestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.services = Services().services
|
||||
self.cleanup = []
|
||||
# Get Zone and pod
|
||||
self.zone = get_zone(self.apiclient, self.services)
|
||||
self.pod = get_pod(self.apiclient, self.zone.id)
|
||||
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def test_01_primary_storage(self):
|
||||
"""Test primary storage pools - XEN, KVM, VMWare
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. verify hypervisortype returned by api is Xen/KVM/VMWare
|
||||
# 2. verify that the cluster is in 'Enabled' allocation state
|
||||
# 3. verify that the host is added successfully and
|
||||
# in Up state with listHosts api response
|
||||
|
||||
#Create NFS storage pools with on XEN/KVM/VMWare clusters
|
||||
for k, v in self.services["nfs"].items():
|
||||
|
||||
clusters = list_clusters(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
hypervisortype=v["hypervisor"]
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(clusters, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
cluster = clusters[0]
|
||||
#Host should be present before adding primary storage
|
||||
list_hosts_response = list_hosts(
|
||||
self.apiclient,
|
||||
clusterid=cluster.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_hosts_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_hosts_response),
|
||||
0,
|
||||
"Check list Hosts for hypervisor: " + v["hypervisor"]
|
||||
)
|
||||
|
||||
storage = StoragePool.create(self.apiclient,
|
||||
v,
|
||||
clusterid=cluster.id,
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id
|
||||
)
|
||||
self.cleanup.append(storage)
|
||||
|
||||
self.debug("Created storage pool in cluster: %s" % cluster.id)
|
||||
|
||||
self.assertEqual(
|
||||
storage.state,
|
||||
'Up',
|
||||
"Check primary storage state for hypervisor: " + v["hypervisor"]
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
storage.type,
|
||||
'NetworkFilesystem',
|
||||
"Check storage pool type for hypervisor : " + v["hypervisor"]
|
||||
)
|
||||
|
||||
#Verify List Storage pool Response has newly added storage pool
|
||||
storage_pools_response = list_storage_pools(
|
||||
self.apiclient,
|
||||
id=storage.id,
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(storage_pools_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(storage_pools_response),
|
||||
0,
|
||||
"Check list Hosts response"
|
||||
)
|
||||
|
||||
storage_response = storage_pools_response[0]
|
||||
self.assertEqual(
|
||||
storage_response.id,
|
||||
storage.id,
|
||||
"Check storage pool ID for hypervisor: " + v["hypervisor"]
|
||||
)
|
||||
self.assertEqual(
|
||||
storage.type,
|
||||
storage_response.type,
|
||||
"Check storage pool type for hypervisor: " + v["hypervisor"]
|
||||
)
|
||||
# Call cleanup for reusing primary storage
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
self.cleanup = []
|
||||
|
||||
# Create iSCSI storage pools with on XEN/KVM clusters
|
||||
for k, v in self.services["iscsi"].items():
|
||||
clusters = list_clusters(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
hypervisortype=v["hypervisor"]
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(clusters, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
cluster = clusters[0]
|
||||
|
||||
storage = StoragePool.create(self.apiclient,
|
||||
v,
|
||||
clusterid=cluster.id,
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id
|
||||
)
|
||||
self.cleanup.append(storage)
|
||||
|
||||
self.debug("Created iSCSI storage pool in cluster: %s" % cluster.id)
|
||||
|
||||
self.assertEqual(
|
||||
storage.state,
|
||||
'Up',
|
||||
"Check primary storage state for hypervisor: " + v["hypervisor"]
|
||||
)
|
||||
|
||||
#Verify List Storage pool Response has newly added storage pool
|
||||
storage_pools_response = list_storage_pools(
|
||||
self.apiclient,
|
||||
id=storage.id,
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(storage_pools_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(storage_pools_response),
|
||||
0,
|
||||
"Check Hosts response for hypervisor: " + v["hypervisor"]
|
||||
)
|
||||
|
||||
storage_response = storage_pools_response[0]
|
||||
self.assertEqual(
|
||||
storage_response.id,
|
||||
storage.id,
|
||||
"Check storage pool ID for hypervisor: " + v["hypervisor"]
|
||||
)
|
||||
self.assertEqual(
|
||||
storage.type,
|
||||
storage_response.type,
|
||||
"Check storage pool type hypervisor: " + v["hypervisor"]
|
||||
)
|
||||
|
||||
# Call cleanup for reusing primary storage
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
self.cleanup = []
|
||||
return
|
||||
|
|
@ -1,819 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
""" BVT tests for routers
|
||||
"""
|
||||
#Import Local Modules
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
import remoteSSHClient
|
||||
from testcase.libs.utils import *
|
||||
from testcase.libs.base import *
|
||||
from testcase.libs.common import *
|
||||
|
||||
#Import System modules
|
||||
import time
|
||||
|
||||
|
||||
class Services:
|
||||
"""Test router Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"service_offering": {
|
||||
"name": "Tiny Instance",
|
||||
"displaytext": "Tiny Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
},
|
||||
"virtual_machine":
|
||||
{
|
||||
"displayname": "Test VM",
|
||||
"username": "root",
|
||||
"password": "fr3sca",
|
||||
"ssh_port": 22,
|
||||
"hypervisor": 'XenServer',
|
||||
"privateport": 22,
|
||||
"publicport": 22,
|
||||
"protocol": 'TCP',
|
||||
},
|
||||
"account": {
|
||||
"email": "test@test.com",
|
||||
"firstname": "Test",
|
||||
"lastname": "User",
|
||||
"username": "testuser",
|
||||
"password": "fr3sca",
|
||||
},
|
||||
"ostypeid":'5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
"mode": 'advanced', #Networking mode: Basic, Advanced
|
||||
}
|
||||
|
||||
|
||||
class TestRouterServices(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
|
||||
cls.api_client = super(
|
||||
TestRouterServices,
|
||||
cls
|
||||
).getClsTestClient().getApiClient()
|
||||
cls.services = Services().services
|
||||
# Get Zone, Domain and templates
|
||||
cls.domain = get_domain(cls.api_client, cls.services)
|
||||
cls.zone = get_zone(cls.api_client, cls.services)
|
||||
template = get_template(
|
||||
cls.api_client,
|
||||
cls.zone.id,
|
||||
cls.services["ostypeid"]
|
||||
)
|
||||
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
|
||||
|
||||
#Create an account, network, VM and IP addresses
|
||||
cls.account = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["account"],
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
cls.service_offering = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["service_offering"]
|
||||
)
|
||||
cls.vm_1 = VirtualMachine.create(
|
||||
cls.api_client,
|
||||
cls.services["virtual_machine"],
|
||||
templateid=template.id,
|
||||
accountid=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid,
|
||||
serviceofferingid=cls.service_offering.id
|
||||
)
|
||||
cls.cleanup = [
|
||||
cls.vm_1,
|
||||
cls.account,
|
||||
cls.service_offering
|
||||
]
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
cls.api_client = super(
|
||||
TestRouterServices,
|
||||
cls
|
||||
).getClsTestClient().getApiClient()
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(cls.api_client, cls.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def setUp(self):
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
return
|
||||
|
||||
def test_01_router_internal_basic(self):
|
||||
"""Test router internal basic zone
|
||||
"""
|
||||
# Validate the following
|
||||
# 1. Router only does dhcp
|
||||
# 2. Verify that ports 67 (DHCP) and 53 (DNS) are open on UDP
|
||||
# by checking status of dnsmasq process
|
||||
|
||||
# Find router associated with user account
|
||||
list_router_response = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
hosts = list_hosts(
|
||||
self.apiclient,
|
||||
zoneid=router.zoneid,
|
||||
type='Routing',
|
||||
state='Up',
|
||||
virtualmachineid=self.vm_1.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list host returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
self.debug("Router ID: %s, state: %s" % (router.id, router.state))
|
||||
|
||||
self.assertEqual(
|
||||
router.state,
|
||||
'Running',
|
||||
"Check list router response for router state"
|
||||
)
|
||||
|
||||
result = get_process_status(
|
||||
host.ipaddress,
|
||||
self.services['virtual_machine']["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password,
|
||||
router.linklocalip,
|
||||
"service dnsmasq status"
|
||||
)
|
||||
res = str(result)
|
||||
self.debug("Dnsmasq process status: %s" % res)
|
||||
|
||||
self.assertEqual(
|
||||
res.count("running"),
|
||||
1,
|
||||
"Check dnsmasq service is running or not"
|
||||
)
|
||||
return
|
||||
|
||||
def test_02_router_internal_adv(self):
|
||||
"""Test router internal advanced zone
|
||||
"""
|
||||
# Validate the following
|
||||
# 1. Router does dhcp, dns, gateway, LB, PF, FW
|
||||
# 2. verify that dhcp, dns ports are open on UDP
|
||||
# 3. dnsmasq, haproxy processes should be running
|
||||
|
||||
# Find router associated with user account
|
||||
list_router_response = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
router = list_router_response[0]
|
||||
|
||||
hosts = list_hosts(
|
||||
self.apiclient,
|
||||
zoneid=router.zoneid,
|
||||
type='Routing',
|
||||
state='Up',
|
||||
virtualmachineid=self.vm_1.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
self.debug("Router ID: %s, state: %s" % (router.id, router.state))
|
||||
self.assertEqual(
|
||||
router.state,
|
||||
'Running',
|
||||
"Check list router response for router state"
|
||||
)
|
||||
|
||||
result = get_process_status(
|
||||
host.ipaddress,
|
||||
self.services['virtual_machine']["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password,
|
||||
router.linklocalip,
|
||||
"service dnsmasq status"
|
||||
)
|
||||
res = str(result)
|
||||
self.debug("Dnsmasq process status: %s" % res)
|
||||
|
||||
self.assertEqual(
|
||||
res.count("running"),
|
||||
1,
|
||||
"Check dnsmasq service is running or not"
|
||||
)
|
||||
|
||||
result = get_process_status(
|
||||
host.ipaddress,
|
||||
self.services['virtual_machine']["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password,
|
||||
router.linklocalip,
|
||||
"service haproxy status"
|
||||
)
|
||||
res = str(result)
|
||||
self.assertEqual(
|
||||
res.count("running"),
|
||||
1,
|
||||
"Check haproxy service is running or not"
|
||||
)
|
||||
self.debug("Haproxy process status: %s" % res)
|
||||
return
|
||||
|
||||
def test_03_restart_network_cleanup(self):
|
||||
"""Test restart network
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. When cleanup = true, router is destroyed and a new one created
|
||||
# 2. New router will have new publicIp and linkLocalIp and
|
||||
# all it's services should resume
|
||||
|
||||
# Find router associated with user account
|
||||
list_router_response = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
#Store old values before restart
|
||||
old_linklocalip = router.linklocalip
|
||||
|
||||
timeout = 10
|
||||
# Network should be in Implemented or Setup stage before restart
|
||||
while True:
|
||||
networks = list_networks(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(networks, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
network = networks[0]
|
||||
if network.state in ["Implemented", "Setup"]:
|
||||
break
|
||||
elif timeout == 0:
|
||||
break
|
||||
else:
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
self.debug(
|
||||
"Restarting network with ID: %s, Network state: %s" % (
|
||||
network.id,
|
||||
network.state
|
||||
))
|
||||
cmd = restartNetwork.restartNetworkCmd()
|
||||
cmd.id = network.id
|
||||
cmd.cleanup = True
|
||||
self.apiclient.restartNetwork(cmd)
|
||||
|
||||
# Get router details after restart
|
||||
list_router_response = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
self.assertNotEqual(
|
||||
router.linklocalip,
|
||||
old_linklocalip,
|
||||
"Check link-local IP after restart"
|
||||
)
|
||||
return
|
||||
|
||||
def test_04_restart_network_wo_cleanup(self):
|
||||
"""Test restart network without cleanup
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. When cleanup = false, router is restarted and
|
||||
# all services inside the router are restarted
|
||||
# 2. check 'uptime' to see if the actual restart happened
|
||||
|
||||
timeout = 10
|
||||
# Network should be in Implemented or Setup stage before restart
|
||||
while True:
|
||||
networks = list_networks(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(networks, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
network = networks[0]
|
||||
if network.state in ["Implemented", "Setup"]:
|
||||
break
|
||||
elif timeout == 0:
|
||||
break
|
||||
else:
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
self.debug(
|
||||
"Restarting network with ID: %s, Network state: %s" % (
|
||||
network.id,
|
||||
network.state
|
||||
))
|
||||
cmd = restartNetwork.restartNetworkCmd()
|
||||
cmd.id = network.id
|
||||
cmd.cleanup = False
|
||||
self.apiclient.restartNetwork(cmd)
|
||||
|
||||
# Get router details after restart
|
||||
list_router_response = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
hosts = list_hosts(
|
||||
self.apiclient,
|
||||
zoneid=router.zoneid,
|
||||
type='Routing',
|
||||
state='Up',
|
||||
virtualmachineid=self.vm_1.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
res = get_process_status(
|
||||
host.ipaddress,
|
||||
self.services['virtual_machine']["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password,
|
||||
router.linklocalip,
|
||||
"uptime"
|
||||
)
|
||||
|
||||
# res = 12:37:14 up 1 min, 0 users, load average: 0.61, 0.22, 0.08
|
||||
# Split result to check the uptime
|
||||
result = res[0].split()
|
||||
self.debug("Router Uptime: %s" % result)
|
||||
self.assertEqual(
|
||||
str(result[1]),
|
||||
'up',
|
||||
"Check router is running or not"
|
||||
)
|
||||
if str(result[3]) == "min,":
|
||||
self.assertEqual(
|
||||
(int(result[2]) < 3),
|
||||
True,
|
||||
"Check uptime is less than 3 mins or not"
|
||||
)
|
||||
else:
|
||||
self.assertEqual(
|
||||
str(result[3]),
|
||||
'sec,',
|
||||
"Check uptime is in seconds"
|
||||
)
|
||||
return
|
||||
|
||||
def test_05_router_basic(self):
|
||||
"""Test router basic setup
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. verify that listRouters returned a 'Running' router
|
||||
# 2. router will have dns same as that seen in listZones
|
||||
# 3. router will have a guestIP and a linkLocalIp"
|
||||
|
||||
list_router_response = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_router_response),
|
||||
0,
|
||||
"Check list router response"
|
||||
)
|
||||
for router in list_router_response:
|
||||
self.assertEqual(
|
||||
router.state,
|
||||
'Running',
|
||||
"Check list router response for router state"
|
||||
)
|
||||
|
||||
zones = list_zones(
|
||||
self.apiclient,
|
||||
id=router.zoneid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(zones, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
zone = zones[0]
|
||||
|
||||
self.assertEqual(
|
||||
router.dns1,
|
||||
zone.dns1,
|
||||
"Compare DNS1 of router and zone"
|
||||
)
|
||||
self.assertEqual(
|
||||
router.dns2,
|
||||
zone.dns2,
|
||||
"Compare DNS2 of router and zone"
|
||||
)
|
||||
self.assertEqual(
|
||||
hasattr(router, 'guestipaddress'),
|
||||
True,
|
||||
"Check whether router has guest IP field"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
hasattr(router, 'linklocalip'),
|
||||
True,
|
||||
"Check whether router has link local IP field"
|
||||
)
|
||||
return
|
||||
|
||||
def test_06_router_advanced(self):
|
||||
"""Test router advanced setup
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. verify that listRouters returned a 'Running' router
|
||||
# 2. router will have dns and gateway as in listZones, listVlanIpRanges
|
||||
# 3. router will have guest,public and linklocal IPs
|
||||
|
||||
list_router_response = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_router_response),
|
||||
0,
|
||||
"Check list router response"
|
||||
)
|
||||
for router in list_router_response:
|
||||
self.assertEqual(
|
||||
router.state,
|
||||
'Running',
|
||||
"Check list router response for router state"
|
||||
)
|
||||
|
||||
zones = list_zones(
|
||||
self.apiclient,
|
||||
id=router.zoneid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(zones, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
zone = zones[0]
|
||||
|
||||
self.assertEqual(
|
||||
router.dns1,
|
||||
zone.dns1,
|
||||
"Compare DNS1 of router and zone"
|
||||
)
|
||||
self.assertEqual(
|
||||
router.dns2,
|
||||
zone.dns2,
|
||||
"Compare DNS2 of router and zone"
|
||||
)
|
||||
self.assertEqual(
|
||||
hasattr(router, 'guestipaddress'),
|
||||
True,
|
||||
"Check whether router has guest IP field"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
hasattr(router, 'linklocalip'),
|
||||
True,
|
||||
"Check whether router has link local IP field"
|
||||
)
|
||||
|
||||
#Fetch corresponding ip ranges information from listVlanIpRanges
|
||||
ipranges_response = list_vlan_ipranges(
|
||||
self.apiclient,
|
||||
zoneid=router.zoneid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(ipranges_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
iprange = ipranges_response[0]
|
||||
self.assertEqual(
|
||||
router.gateway,
|
||||
iprange.gateway,
|
||||
"Check gateway with that of corresponding IP range"
|
||||
)
|
||||
return
|
||||
|
||||
def test_07_stop_router(self):
|
||||
"""Test stop router
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. listRouter should report the router for the account as stopped
|
||||
|
||||
list_router_response = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
self.debug("Stopping the router with ID: %s" % router.id)
|
||||
#Stop the router
|
||||
cmd = stopRouter.stopRouterCmd()
|
||||
cmd.id = router.id
|
||||
self.apiclient.stopRouter(cmd)
|
||||
|
||||
#List routers to check state of router
|
||||
router_response = list_routers(
|
||||
self.apiclient,
|
||||
id=router.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#List router should have router in stopped state
|
||||
self.assertEqual(
|
||||
router_response[0].state,
|
||||
'Stopped',
|
||||
"Check list router response for router state"
|
||||
)
|
||||
return
|
||||
|
||||
def test_08_start_router(self):
|
||||
"""Test start router
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. listRouter should report the router for the account as stopped
|
||||
|
||||
list_router_response = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
self.debug("Starting the router with ID: %s" % router.id)
|
||||
|
||||
#Start the router
|
||||
cmd = startRouter.startRouterCmd()
|
||||
cmd.id = router.id
|
||||
self.apiclient.startRouter(cmd)
|
||||
|
||||
#List routers to check state of router
|
||||
router_response = list_routers(
|
||||
self.apiclient,
|
||||
id=router.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#List router should have router in running state
|
||||
self.assertEqual(
|
||||
router_response[0].state,
|
||||
'Running',
|
||||
"Check list router response for router state"
|
||||
)
|
||||
return
|
||||
|
||||
def test_09_reboot_router(self):
|
||||
"""Test reboot router
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. listRouter should report the router for the account as stopped
|
||||
|
||||
list_router_response = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
public_ip = router.publicip
|
||||
|
||||
self.debug("Rebooting the router with ID: %s" % router.id)
|
||||
|
||||
#Reboot the router
|
||||
cmd = rebootRouter.rebootRouterCmd()
|
||||
cmd.id = router.id
|
||||
self.apiclient.rebootRouter(cmd)
|
||||
|
||||
#List routers to check state of router
|
||||
router_response = list_routers(
|
||||
self.apiclient,
|
||||
id=router.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#List router should have router in running state and same public IP
|
||||
self.assertEqual(
|
||||
router_response[0].state,
|
||||
'Running',
|
||||
"Check list router response for router state"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
router_response[0].publicip,
|
||||
public_ip,
|
||||
"Check list router response for router public IP"
|
||||
)
|
||||
return
|
||||
|
||||
def test_10_network_gc(self):
|
||||
"""Test network GC
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. stop All User VMs in the account
|
||||
# 2. wait for network.gc.interval time"
|
||||
# 3. After network.gc.interval, router should be stopped
|
||||
# 4. ListRouters should return the router in Stopped state
|
||||
|
||||
list_vms = list_virtual_machines(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_vms, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_vms),
|
||||
0,
|
||||
"Check length of list VM response"
|
||||
)
|
||||
|
||||
for vm in list_vms:
|
||||
self.debug("Stopping the VM with ID: %s" % vm.id)
|
||||
# Stop all virtual machines associated with that account
|
||||
cmd = stopVirtualMachine.stopVirtualMachineCmd()
|
||||
cmd.id = vm.id
|
||||
self.apiclient.stopVirtualMachine(cmd)
|
||||
|
||||
# Get network.gc.interval config value
|
||||
config = list_configurations(
|
||||
self.apiclient,
|
||||
name='network.gc.interval'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(config, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
gcinterval = config[0]
|
||||
|
||||
# Get network.gc.wait config value
|
||||
config = list_configurations(
|
||||
self.apiclient,
|
||||
name='network.gc.wait'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(config, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
gcwait = config[0]
|
||||
|
||||
total_wait = int(gcinterval.value) + int(gcwait.value)
|
||||
# Wait for wait_time * 2 time to cleanup all the resources
|
||||
time.sleep(total_wait * 2)
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
#Check status of network router
|
||||
list_router_response = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
if isinstance(list_router_response, list):
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List router call failed!")
|
||||
time.sleep(5)
|
||||
timeout = timeout -1
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
self.debug("Router state after network.gc.interval: %s" % router.state)
|
||||
self.assertEqual(
|
||||
router.state,
|
||||
'Stopped',
|
||||
"Check state of the router after stopping all VMs associated"
|
||||
)
|
||||
return
|
||||
|
|
@ -1,379 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
""" BVT tests for Secondary Storage
|
||||
"""
|
||||
#Import Local Modules
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
from testcase.libs.utils import *
|
||||
from testcase.libs.base import *
|
||||
from testcase.libs.common import *
|
||||
|
||||
#Import System modules
|
||||
import time
|
||||
|
||||
class Services:
|
||||
"""Test secondary storage Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"storage": {
|
||||
"url": "nfs://192.168.100.131/SecStorage"
|
||||
# Format: File_System_Type/Location/Path
|
||||
},
|
||||
"hypervisors": {
|
||||
0: {
|
||||
"hypervisor": "XenServer",
|
||||
"templatefilter": "self",
|
||||
},
|
||||
1: {
|
||||
"hypervisor": "KVM",
|
||||
"templatefilter": "self",
|
||||
},
|
||||
2: {
|
||||
"hypervisor": "VMWare",
|
||||
"templatefilter": "self",
|
||||
},
|
||||
},
|
||||
"sleep": 60,
|
||||
"timeout": 5,
|
||||
}
|
||||
|
||||
class TestSecStorageServices(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.api_client = super(TestSecStorageServices, cls).getClsTestClient().getApiClient()
|
||||
cls.services = Services().services
|
||||
cls._cleanup = []
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
#Cleanup resources used
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.cleanup = []
|
||||
self.services = Services().services
|
||||
# Get Zone and pod
|
||||
self.domain = get_domain(self.apiclient, self.services)
|
||||
self.zone = get_zone(self.apiclient, self.services)
|
||||
self.pod = get_pod(self.apiclient, self.zone.id)
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def test_01_add_sec_storage(self):
|
||||
"""Test secondary storage
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. secondary storage should be added to the zone.
|
||||
# 2. Verify with listHosts and type secondarystorage
|
||||
|
||||
cmd = addSecondaryStorage.addSecondaryStorageCmd()
|
||||
cmd.zoneid = self.zone.id
|
||||
cmd.url = self.services["storage"]["url"]
|
||||
sec_storage = self.apiclient.addSecondaryStorage(cmd)
|
||||
|
||||
self.debug("Added secondary storage to zone: %s" % self.zone.id)
|
||||
# Cleanup at the end
|
||||
self._cleanup.append(sec_storage)
|
||||
|
||||
self.assertEqual(
|
||||
sec_storage.zoneid,
|
||||
self.zone.id,
|
||||
"Check zoneid where sec storage is added"
|
||||
)
|
||||
|
||||
list_hosts_response = list_hosts(
|
||||
self.apiclient,
|
||||
type='SecondaryStorage',
|
||||
id=sec_storage.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_hosts_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_hosts_response),
|
||||
0,
|
||||
"Check list Hosts response"
|
||||
)
|
||||
|
||||
host_response = list_hosts_response[0]
|
||||
#Check if host is Up and running
|
||||
self.assertEqual(
|
||||
host_response.id,
|
||||
sec_storage.id,
|
||||
"Check ID of secondary storage"
|
||||
)
|
||||
self.assertEqual(
|
||||
sec_storage.type,
|
||||
host_response.type,
|
||||
"Check type of host from list hosts response"
|
||||
)
|
||||
return
|
||||
|
||||
def test_02_sys_vm_start(self):
|
||||
"""Test system VM start
|
||||
"""
|
||||
|
||||
# 1. verify listHosts has all 'routing' hosts in UP state
|
||||
# 2. verify listStoragePools shows all primary storage pools
|
||||
# in UP state
|
||||
# 3. verify that secondary storage was added successfully
|
||||
|
||||
list_hosts_response = list_hosts(
|
||||
self.apiclient,
|
||||
type='Routing',
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_hosts_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
# ListHosts has all 'routing' hosts in UP state
|
||||
self.assertNotEqual(
|
||||
len(list_hosts_response),
|
||||
0,
|
||||
"Check list host response"
|
||||
)
|
||||
for host in list_hosts_response:
|
||||
self.assertEqual(
|
||||
host.state,
|
||||
'Up',
|
||||
"Check state of routing hosts is Up or not"
|
||||
)
|
||||
|
||||
# ListStoragePools shows all primary storage pools in UP state
|
||||
list_storage_response = list_storage_pools(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_storage_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_storage_response),
|
||||
0,
|
||||
"Check list storage pools response"
|
||||
)
|
||||
|
||||
for primary_storage in list_hosts_response:
|
||||
self.assertEqual(
|
||||
primary_storage.state,
|
||||
'Up',
|
||||
"Check state of primary storage pools is Up or not"
|
||||
)
|
||||
|
||||
# Secondary storage is added successfully
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
list_hosts_response = list_hosts(
|
||||
self.apiclient,
|
||||
type='SecondaryStorage',
|
||||
zoneid=self.zone.id,
|
||||
)
|
||||
|
||||
if not isinstance(list_hosts_response, list):
|
||||
# Sleep to ensure Secondary storage is Up
|
||||
time.sleep(int(self.services["sleep"]))
|
||||
timeout = timeout - 1
|
||||
elif timeout == 0 or isinstance(list_hosts_response, list):
|
||||
break
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_hosts_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_hosts_response),
|
||||
0,
|
||||
"Check list Hosts response"
|
||||
)
|
||||
|
||||
host_response = list_hosts_response[0]
|
||||
#Check if host is Up and running
|
||||
self.assertEqual(
|
||||
host_response.state,
|
||||
'Up',
|
||||
"Check state of secondary storage"
|
||||
)
|
||||
self.debug("Checking SSVM status in zone: %s" % self.zone.id)
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
while True:
|
||||
list_ssvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
systemvmtype='secondarystoragevm',
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id
|
||||
)
|
||||
if not isinstance(list_ssvm_response, list):
|
||||
# Sleep to ensure SSVMs are Up and Running
|
||||
time.sleep(int(self.services["sleep"]))
|
||||
timeout = timeout - 1
|
||||
elif timeout == 0 or isinstance(list_ssvm_response, list):
|
||||
break
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_ssvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#Verify SSVM response
|
||||
self.assertNotEqual(
|
||||
len(list_ssvm_response),
|
||||
0,
|
||||
"Check list System VMs response"
|
||||
)
|
||||
|
||||
for ssvm in list_ssvm_response:
|
||||
self.assertEqual(
|
||||
ssvm.state,
|
||||
'Running',
|
||||
"Check whether state of SSVM is running"
|
||||
)
|
||||
return
|
||||
|
||||
def test_03_sys_template_ready(self):
|
||||
"""Test system templates are ready
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# If SSVM is in UP state and running
|
||||
# 1. wait for listTemplates to show all builtin templates
|
||||
# downloaded for all added hypervisors and in “Ready” state"
|
||||
|
||||
for k, v in self.services["hypervisors"].items():
|
||||
|
||||
self.debug("Downloading BUILTIN templates in zone: %s" %
|
||||
self.zone.id)
|
||||
|
||||
list_template_response = list_templates(
|
||||
self.apiclient,
|
||||
hypervisor=v["hypervisor"],
|
||||
zoneid=self.zone.id,
|
||||
templatefilter=v["templatefilter"],
|
||||
listall=True,
|
||||
account='system',
|
||||
domainid=self.domain.id
|
||||
)
|
||||
|
||||
# Ensure all BUILTIN templates are downloaded
|
||||
templateid = None
|
||||
for template in list_template_response:
|
||||
if template.templatetype == "BUILTIN":
|
||||
templateid = template.id
|
||||
|
||||
# Wait to start a downloading of template
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
while True and (templateid != None):
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
template_response = list_templates(
|
||||
self.apiclient,
|
||||
id=templateid,
|
||||
zoneid=self.zone.id,
|
||||
templatefilter=v["templatefilter"],
|
||||
listall=True,
|
||||
account='system',
|
||||
domainid=self.domain.id
|
||||
)
|
||||
|
||||
if isinstance(template_response, list):
|
||||
template = template_response[0]
|
||||
break
|
||||
|
||||
elif timeout == 0:
|
||||
raise Exception("List template API call failed.")
|
||||
|
||||
time.sleep(1)
|
||||
timeout = timeout - 1
|
||||
|
||||
# If template is ready,
|
||||
# template.status = Download Complete
|
||||
# Downloading - x% Downloaded
|
||||
# Error - Any other string
|
||||
if template.status == 'Download Complete' :
|
||||
break
|
||||
elif 'Downloaded' not in template.status.split():
|
||||
raise Exception
|
||||
elif 'Downloaded' in template.status.split():
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
#Ensuring the template is in ready state
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
template_response = list_templates(
|
||||
self.apiclient,
|
||||
id=templateid,
|
||||
zoneid=self.zone.id,
|
||||
templatefilter=v["templatefilter"],
|
||||
listall=True,
|
||||
account='system',
|
||||
domainid=self.domain.id
|
||||
)
|
||||
|
||||
if isinstance(template_response, list):
|
||||
template = template_response[0]
|
||||
break
|
||||
|
||||
elif timeout == 0:
|
||||
raise Exception("List template API call failed.")
|
||||
|
||||
time.sleep(1)
|
||||
timeout = timeout - 1
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(template_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
template = template_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
template.isready,
|
||||
True,
|
||||
"Check whether state of template is ready or not"
|
||||
)
|
||||
return
|
||||
|
|
@ -1,237 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
""" BVT tests for Service offerings"""
|
||||
|
||||
#Import Local Modules
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
from testcase.libs.utils import *
|
||||
from testcase.libs.base import *
|
||||
from testcase.libs.common import *
|
||||
|
||||
|
||||
class Services:
|
||||
"""Test Service offerings Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"off":
|
||||
{
|
||||
"name": "Service Offering",
|
||||
"displaytext": "Service Offering",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100, # MHz
|
||||
"memory": 64, # in MBs
|
||||
},
|
||||
}
|
||||
|
||||
class TestCreateServiceOffering(cloudstackTestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
self.services = Services().services
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
self.dbclient.close()
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
|
||||
return
|
||||
|
||||
def test_01_create_service_offering(self):
|
||||
"""Test to create service offering"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. createServiceOfferings should return a valid information for newly created offering
|
||||
# 2. The Cloud Database contains the valid information
|
||||
|
||||
service_offering = ServiceOffering.create(
|
||||
self.apiclient,
|
||||
self.services["off"]
|
||||
)
|
||||
self.cleanup.append(service_offering)
|
||||
|
||||
self.debug("Created service offering with ID: %s" % service_offering.id)
|
||||
|
||||
list_service_response = list_service_offering(
|
||||
self.apiclient,
|
||||
id=service_offering.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_service_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_service_response),
|
||||
0,
|
||||
"Check Service offering is created"
|
||||
)
|
||||
service_response = list_service_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
list_service_response[0].cpunumber,
|
||||
self.services["off"]["cpunumber"],
|
||||
"Check server id in createServiceOffering"
|
||||
)
|
||||
self.assertEqual(
|
||||
list_service_response[0].cpuspeed,
|
||||
self.services["off"]["cpuspeed"],
|
||||
"Check cpuspeed in createServiceOffering"
|
||||
)
|
||||
self.assertEqual(
|
||||
list_service_response[0].displaytext,
|
||||
self.services["off"]["displaytext"],
|
||||
"Check server displaytext in createServiceOfferings"
|
||||
)
|
||||
self.assertEqual(
|
||||
list_service_response[0].memory,
|
||||
self.services["off"]["memory"],
|
||||
"Check memory in createServiceOffering"
|
||||
)
|
||||
self.assertEqual(
|
||||
list_service_response[0].name,
|
||||
self.services["off"]["name"],
|
||||
"Check name in createServiceOffering"
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
class TestServiceOfferings(cloudstackTestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
|
||||
def tearDown(self):
|
||||
|
||||
try:
|
||||
self.dbclient.close()
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.services = Services().services
|
||||
cls.api_client = super(TestServiceOfferings, cls).getClsTestClient().getApiClient()
|
||||
cls.service_offering_1 = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["off"]
|
||||
)
|
||||
cls.service_offering_2 = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["off"]
|
||||
)
|
||||
cls._cleanup = [cls.service_offering_1]
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
cls.api_client = super(TestServiceOfferings, cls).getClsTestClient().getApiClient()
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def test_02_edit_service_offering(self):
|
||||
"""Test to update existing service offering"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. updateServiceOffering should return
|
||||
# a valid information for newly created offering
|
||||
|
||||
#Generate new name & displaytext from random data
|
||||
random_displaytext = random_gen()
|
||||
random_name = random_gen()
|
||||
|
||||
self.debug("Updating service offering with ID: %s" %
|
||||
self.service_offering_1.id)
|
||||
|
||||
cmd = updateServiceOffering.updateServiceOfferingCmd()
|
||||
#Add parameters for API call
|
||||
cmd.id = self.service_offering_1.id
|
||||
cmd.displaytext = random_displaytext
|
||||
cmd.name = random_name
|
||||
self.apiclient.updateServiceOffering(cmd)
|
||||
|
||||
list_service_response = list_service_offering(
|
||||
self.apiclient,
|
||||
id=self.service_offering_1.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_service_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_service_response),
|
||||
0,
|
||||
"Check Service offering is updated"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
list_service_response[0].displaytext,
|
||||
random_displaytext,
|
||||
"Check server displaytext in updateServiceOffering"
|
||||
)
|
||||
self.assertEqual(
|
||||
list_service_response[0].name,
|
||||
random_name,
|
||||
"Check server name in updateServiceOffering"
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
def test_03_delete_service_offering(self):
|
||||
"""Test to delete service offering"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. deleteServiceOffering should return
|
||||
# a valid information for newly created offering
|
||||
|
||||
self.debug("Deleting service offering with ID: %s" %
|
||||
self.service_offering_2.id)
|
||||
|
||||
self.service_offering_2.delete(self.apiclient)
|
||||
|
||||
list_service_response = list_service_offering(
|
||||
self.apiclient,
|
||||
id=self.service_offering_2.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
list_service_response,
|
||||
None,
|
||||
"Check if service offering exists in listDiskOfferings"
|
||||
)
|
||||
|
||||
return
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,915 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
""" BVT tests for SSVM
|
||||
"""
|
||||
#Import Local Modules
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
import remoteSSHClient
|
||||
from testcase.libs.utils import *
|
||||
from testcase.libs.base import *
|
||||
from testcase.libs.common import *
|
||||
import telnetlib
|
||||
|
||||
#Import System modules
|
||||
import time
|
||||
|
||||
class Services:
|
||||
"""Test SSVM Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"host": {
|
||||
"username": 'root', # Credentials for SSH
|
||||
"password": 'fr3sca',
|
||||
"publicport": 22,
|
||||
},
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
}
|
||||
|
||||
class TestSSVMs(cloudstackTestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.cleanup = []
|
||||
self.services = Services().services
|
||||
self.zone = get_zone(self.apiclient, self.services)
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def test_01_list_sec_storage_vm(self):
|
||||
"""Test List secondary storage VMs
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. listSystemVM (systemvmtype=secondarystoragevm)
|
||||
# should return only ONE SSVM per zone
|
||||
# 2. The returned SSVM should be in Running state
|
||||
# 3. listSystemVM for secondarystoragevm should list publicip,
|
||||
# privateip and link-localip
|
||||
# 4. The gateway programmed on the ssvm by listSystemVm should be
|
||||
# the same as the gateway returned by listVlanIpRanges
|
||||
# 5. DNS entries must match those given for the zone
|
||||
|
||||
list_ssvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
systemvmtype='secondarystoragevm',
|
||||
state='Running',
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_ssvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#Verify SSVM response
|
||||
self.assertNotEqual(
|
||||
len(list_ssvm_response),
|
||||
0,
|
||||
"Check list System VMs response"
|
||||
)
|
||||
|
||||
list_zones_response = list_zones(self.apiclient)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_zones_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.debug("Number of zones: %s" % len(list_zones_response))
|
||||
self.debug("Number of SSVMs: %s" % len(list_ssvm_response))
|
||||
# Number of Sec storage VMs = No of Zones
|
||||
self.assertEqual(
|
||||
len(list_ssvm_response),
|
||||
len(list_zones_response),
|
||||
"Check number of SSVMs with number of zones"
|
||||
)
|
||||
#For each secondary storage VM check private IP,
|
||||
#public IP, link local IP and DNS
|
||||
for ssvm in list_ssvm_response:
|
||||
|
||||
self.debug("SSVM state: %s" % ssvm.state)
|
||||
self.assertEqual(
|
||||
ssvm.state,
|
||||
'Running',
|
||||
"Check whether state of SSVM is running"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
hasattr(ssvm, 'privateip'),
|
||||
True,
|
||||
"Check whether SSVM has private IP field"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
hasattr(ssvm, 'linklocalip'),
|
||||
True,
|
||||
"Check whether SSVM has link local IP field"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
hasattr(ssvm, 'publicip'),
|
||||
True,
|
||||
"Check whether SSVM has public IP field"
|
||||
)
|
||||
|
||||
#Fetch corresponding ip ranges information from listVlanIpRanges
|
||||
ipranges_response = list_vlan_ipranges(
|
||||
self.apiclient,
|
||||
zoneid=ssvm.zoneid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(ipranges_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
iprange = ipranges_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
ssvm.gateway,
|
||||
iprange.gateway,
|
||||
"Check gateway with that of corresponding ip range"
|
||||
)
|
||||
|
||||
#Fetch corresponding zone information from listZones
|
||||
zone_response = list_zones(
|
||||
self.apiclient,
|
||||
id=ssvm.zoneid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(zone_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertEqual(
|
||||
ssvm.dns1,
|
||||
zone_response[0].dns1,
|
||||
"Check DNS1 with that of corresponding zone"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
ssvm.dns2,
|
||||
zone_response[0].dns2,
|
||||
"Check DNS2 with that of corresponding zone"
|
||||
)
|
||||
return
|
||||
|
||||
def test_02_list_cpvm_vm(self):
|
||||
"""Test List console proxy VMs
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. listSystemVM (systemvmtype=consoleproxy) should return
|
||||
# at least ONE CPVM per zone
|
||||
# 2. The returned ConsoleProxyVM should be in Running state
|
||||
# 3. listSystemVM for console proxy should list publicip, privateip
|
||||
# and link-localip
|
||||
# 4. The gateway programmed on the console proxy should be the same
|
||||
# as the gateway returned by listZones
|
||||
# 5. DNS entries must match those given for the zone
|
||||
|
||||
list_cpvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
systemvmtype='consoleproxy',
|
||||
state='Running',
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_cpvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#Verify CPVM response
|
||||
self.assertNotEqual(
|
||||
len(list_cpvm_response),
|
||||
0,
|
||||
"Check list System VMs response"
|
||||
)
|
||||
list_zones_response = list_zones(self.apiclient)
|
||||
# Number of Console Proxy VMs = No of Zones
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_zones_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.debug("Number of zones: %s" % len(list_zones_response))
|
||||
self.debug("Number of CPVMs: %s" % len(list_cpvm_response))
|
||||
|
||||
self.assertEqual(
|
||||
len(list_cpvm_response),
|
||||
len(list_zones_response),
|
||||
"Check number of CPVMs with number of zones"
|
||||
)
|
||||
#For each CPVM check private IP, public IP, link local IP and DNS
|
||||
for cpvm in list_cpvm_response:
|
||||
|
||||
self.debug("CPVM state: %s" % cpvm.state)
|
||||
self.assertEqual(
|
||||
cpvm.state,
|
||||
'Running',
|
||||
"Check whether state of CPVM is running"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
hasattr(cpvm, 'privateip'),
|
||||
True,
|
||||
"Check whether CPVM has private IP field"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
hasattr(cpvm, 'linklocalip'),
|
||||
True,
|
||||
"Check whether CPVM has link local IP field"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
hasattr(cpvm, 'publicip'),
|
||||
True,
|
||||
"Check whether CPVM has public IP field"
|
||||
)
|
||||
#Fetch corresponding ip ranges information from listVlanIpRanges
|
||||
ipranges_response = list_vlan_ipranges(
|
||||
self.apiclient,
|
||||
zoneid=cpvm.zoneid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(ipranges_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
iprange = ipranges_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
cpvm.gateway,
|
||||
iprange.gateway,
|
||||
"Check gateway with that of corresponding ip range"
|
||||
)
|
||||
|
||||
#Fetch corresponding zone information from listZones
|
||||
zone_response = list_zones(
|
||||
self.apiclient,
|
||||
id=cpvm.zoneid
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
cpvm.dns1,
|
||||
zone_response[0].dns1,
|
||||
"Check DNS1 with that of corresponding zone"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
cpvm.dns2,
|
||||
zone_response[0].dns2,
|
||||
"Check DNS2 with that of corresponding zone"
|
||||
)
|
||||
return
|
||||
|
||||
def test_03_ssvm_internals(self):
|
||||
"""Test SSVM Internals"""
|
||||
|
||||
# Validate the following
|
||||
# 1. The SSVM check script should not return any
|
||||
# WARN|ERROR|FAIL messages
|
||||
# 2. If you are unable to login to the SSVM with the signed key
|
||||
# then test is deemed a failure
|
||||
# 3. There should be only one ""cloud"" process running within the SSVM
|
||||
# 4. If no process is running/multiple process are running
|
||||
# then the test is a failure
|
||||
|
||||
list_ssvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
systemvmtype='secondarystoragevm',
|
||||
state='Running',
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_ssvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
ssvm = list_ssvm_response[0]
|
||||
|
||||
hosts = list_hosts(
|
||||
self.apiclient,
|
||||
id=ssvm.hostid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
self.debug("Running SSVM check script")
|
||||
|
||||
result = get_process_status(
|
||||
host.ipaddress,
|
||||
self.services['host']["publicport"],
|
||||
self.services['host']["username"],
|
||||
self.services['host']["password"],
|
||||
ssvm.linklocalip,
|
||||
"/usr/local/cloud/systemvm/ssvm-check.sh |grep -e ERROR -e WARNING -e FAIL"
|
||||
)
|
||||
res = str(result)
|
||||
self.debug("SSVM script output: %s" % res)
|
||||
|
||||
self.assertEqual(
|
||||
res.count("ERROR"),
|
||||
1,
|
||||
"Check for Errors in tests"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
res.count("WARNING"),
|
||||
1,
|
||||
"Check for warnings in tests"
|
||||
)
|
||||
|
||||
#Check status of cloud service
|
||||
result = get_process_status(
|
||||
host.ipaddress,
|
||||
self.services['host']["publicport"],
|
||||
self.services['host']["username"],
|
||||
self.services['host']["password"],
|
||||
ssvm.linklocalip,
|
||||
"service cloud status"
|
||||
)
|
||||
res = str(result)
|
||||
self.debug("Cloud Process status: %s" % res)
|
||||
# cloud.com service (type=secstorage) is running: process id: 2346
|
||||
self.assertEqual(
|
||||
res.count("is running"),
|
||||
1,
|
||||
"Check cloud service is running or not"
|
||||
)
|
||||
return
|
||||
|
||||
def test_04_cpvm_internals(self):
|
||||
"""Test CPVM Internals"""
|
||||
|
||||
# Validate the following
|
||||
# 1. test that telnet access on 8250 is available to
|
||||
# the management server for the CPVM
|
||||
# 2. No telnet access, test FAIL
|
||||
# 3. Service cloud status should report cloud agent status to be
|
||||
# running
|
||||
|
||||
list_cpvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
systemvmtype='consoleproxy',
|
||||
state='Running',
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_cpvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
cpvm = list_cpvm_response[0]
|
||||
|
||||
hosts = list_hosts(
|
||||
self.apiclient,
|
||||
id=cpvm.hostid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
try:
|
||||
telnet = telnetlib.Telnet(
|
||||
str(self.apiclient.connection.mgtSvr),
|
||||
'8250'
|
||||
)
|
||||
self.debug("Telnet management server (IP: %s)" %
|
||||
self.apiclient.connection.mgtSvr)
|
||||
except Exception as e:
|
||||
self.fail(
|
||||
"Telnet Access failed for %s: %s" % \
|
||||
(self.apiclient.connection.mgtSvr, e)
|
||||
)
|
||||
|
||||
self.debug("Checking cloud process status")
|
||||
|
||||
result = get_process_status(
|
||||
host.ipaddress,
|
||||
self.services['host']["publicport"],
|
||||
self.services['host']["username"],
|
||||
self.services['host']["password"],
|
||||
cpvm.linklocalip,
|
||||
"service cloud status"
|
||||
)
|
||||
res = str(result)
|
||||
self.debug("Cloud Process status: %s" % res)
|
||||
self.assertEqual(
|
||||
res.count("is running"),
|
||||
1,
|
||||
"Check cloud service is running or not"
|
||||
)
|
||||
return
|
||||
|
||||
def test_05_stop_ssvm(self):
|
||||
"""Test stop SSVM
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. The SSVM should go to stop state
|
||||
# 2. After a brief delay of say one minute, the SSVM should be
|
||||
# restarted once again and return to Running state with previous two
|
||||
# test cases still passing
|
||||
# 3. If either of the two above steps fail the test is a failure
|
||||
|
||||
list_ssvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
systemvmtype='secondarystoragevm',
|
||||
state='Running',
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_ssvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
ssvm = list_ssvm_response[0]
|
||||
|
||||
hosts = list_hosts(
|
||||
self.apiclient,
|
||||
id=ssvm.hostid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
self.debug("Stopping SSVM: %s" % ssvm.id)
|
||||
cmd = stopSystemVm.stopSystemVmCmd()
|
||||
cmd.id = ssvm.id
|
||||
self.apiclient.stopSystemVm(cmd)
|
||||
|
||||
# Sleep to ensure that VM is in proper state
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
list_ssvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
id=ssvm.id
|
||||
)
|
||||
if isinstance(list_ssvm_response, list):
|
||||
if list_ssvm_response[0].state == 'Running':
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List SSVM call failed!")
|
||||
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_ssvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
ssvm_response = list_ssvm_response[0]
|
||||
self.debug("SSVM state after debug: %s" % ssvm_response.state)
|
||||
self.assertEqual(
|
||||
ssvm_response.state,
|
||||
'Running',
|
||||
"Check whether SSVM is running or not"
|
||||
)
|
||||
# Call above tests to ensure SSVM is properly running
|
||||
self.test_01_list_sec_storage_vm()
|
||||
self.test_03_ssvm_internals()
|
||||
return
|
||||
|
||||
def test_06_stop_cpvm(self):
|
||||
"""Test stop CPVM
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. The CPVM should go to stop state
|
||||
# 2. After a brief delay of say one minute, the SSVM should be
|
||||
# restarted once again and return to Running state with previous
|
||||
# two test cases still passing
|
||||
# 3. If either of the two above steps fail the test is a failure
|
||||
|
||||
list_cpvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
systemvmtype='consoleproxy',
|
||||
state='Running',
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_cpvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
cpvm = list_cpvm_response[0]
|
||||
|
||||
hosts = list_hosts(
|
||||
self.apiclient,
|
||||
id=cpvm.hostid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
self.debug("Stopping CPVM: %s" % cpvm.id)
|
||||
cmd = stopSystemVm.stopSystemVmCmd()
|
||||
cmd.id = cpvm.id
|
||||
self.apiclient.stopSystemVm(cmd)
|
||||
|
||||
# Sleep to ensure that VM is in proper state
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
list_cpvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
id=cpvm.id
|
||||
)
|
||||
if isinstance(list_cpvm_response, list):
|
||||
if list_cpvm_response[0].state == 'Running':
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List CPVM call failed!")
|
||||
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
cpvm_response = list_cpvm_response[0]
|
||||
|
||||
self.debug("CPVM state after debug: %s" % cpvm_response.state)
|
||||
|
||||
self.assertEqual(
|
||||
cpvm_response.state,
|
||||
'Running',
|
||||
"Check whether CPVM is running or not"
|
||||
)
|
||||
# Call above tests to ensure CPVM is properly running
|
||||
self.test_02_list_cpvm_vm()
|
||||
self.test_04_cpvm_internals()
|
||||
return
|
||||
|
||||
def test_07_reboot_ssvm(self):
|
||||
"""Test reboot SSVM
|
||||
"""
|
||||
# Validate the following
|
||||
# 1. The SSVM should go to stop and return to Running state
|
||||
# 2. SSVM's public-ip and private-ip must remain the same
|
||||
# before and after reboot
|
||||
# 3. The cloud process should still be running within the SSVM
|
||||
|
||||
list_ssvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
systemvmtype='secondarystoragevm',
|
||||
state='Running',
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_ssvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
ssvm_response = list_ssvm_response[0]
|
||||
|
||||
hosts = list_hosts(
|
||||
self.apiclient,
|
||||
id=ssvm_response.hostid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
#Store the public & private IP values before reboot
|
||||
old_public_ip = ssvm_response.publicip
|
||||
old_private_ip = ssvm_response.privateip
|
||||
|
||||
self.debug("Rebooting SSVM: %s" % ssvm_response.id)
|
||||
cmd = rebootSystemVm.rebootSystemVmCmd()
|
||||
cmd.id = ssvm_response.id
|
||||
self.apiclient.rebootSystemVm(cmd)
|
||||
|
||||
# Sleep to ensure that VM is in proper state
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
list_ssvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
id=ssvm_response.id
|
||||
)
|
||||
if isinstance(list_ssvm_response, list):
|
||||
if list_ssvm_response[0].state == 'Running':
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List SSVM call failed!")
|
||||
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
ssvm_response = list_ssvm_response[0]
|
||||
self.debug("SSVM State: %s" % ssvm_response.state)
|
||||
self.assertEqual(
|
||||
'Running',
|
||||
str(ssvm_response.state),
|
||||
"Check whether CPVM is running or not"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
ssvm_response.publicip,
|
||||
old_public_ip,
|
||||
"Check Public IP after reboot with that of before reboot"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
ssvm_response.privateip,
|
||||
old_private_ip,
|
||||
"Check Private IP after reboot with that of before reboot"
|
||||
)
|
||||
#Call to verify cloud process is running
|
||||
self.test_03_ssvm_internals()
|
||||
return
|
||||
|
||||
def test_08_reboot_cpvm(self):
|
||||
"""Test reboot CPVM
|
||||
"""
|
||||
# Validate the following
|
||||
# 1. The CPVM should go to stop and return to Running state
|
||||
# 2. CPVM's public-ip and private-ip must remain
|
||||
# the same before and after reboot
|
||||
# 3. the cloud process should still be running within the CPVM
|
||||
|
||||
|
||||
list_cpvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
systemvmtype='consoleproxy',
|
||||
state='Running',
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_cpvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
cpvm_response = list_cpvm_response[0]
|
||||
|
||||
hosts = list_hosts(
|
||||
self.apiclient,
|
||||
id=cpvm_response.hostid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
#Store the public & private IP values before reboot
|
||||
old_public_ip = cpvm_response.publicip
|
||||
old_private_ip = cpvm_response.privateip
|
||||
|
||||
self.debug("Rebooting CPVM: %s" % cpvm_response.id)
|
||||
|
||||
cmd = rebootSystemVm.rebootSystemVmCmd()
|
||||
cmd.id = cpvm_response.id
|
||||
self.apiclient.rebootSystemVm(cmd)
|
||||
|
||||
# Sleep to ensure that VM is in proper state
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
list_cpvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
id=cpvm_response.id
|
||||
)
|
||||
if isinstance(list_cpvm_response, list):
|
||||
if list_cpvm_response[0].state == 'Running':
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List CPVM call failed!")
|
||||
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
cpvm_response = list_cpvm_response[0]
|
||||
|
||||
self.debug("CPVM state: %s" % cpvm_response.state)
|
||||
self.assertEqual(
|
||||
'Running',
|
||||
str(cpvm_response.state),
|
||||
"Check whether CPVM is running or not"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
cpvm_response.publicip,
|
||||
old_public_ip,
|
||||
"Check Public IP after reboot with that of before reboot"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
cpvm_response.privateip,
|
||||
old_private_ip,
|
||||
"Check Private IP after reboot with that of before reboot"
|
||||
)
|
||||
#Call to verify cloud process is running
|
||||
self.test_04_cpvm_internals()
|
||||
return
|
||||
|
||||
def test_09_destroy_ssvm(self):
|
||||
"""Test destroy SSVM
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. SSVM should be completely destroyed and a new one will spin up
|
||||
# 2. listSystemVMs will show a different name for the
|
||||
# systemVM from what it was before
|
||||
# 3. new SSVM will have a public/private and link-local-ip
|
||||
# 4. cloud process within SSVM must be up and running
|
||||
|
||||
list_ssvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
systemvmtype='secondarystoragevm',
|
||||
state='Running',
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_ssvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
ssvm_response = list_ssvm_response[0]
|
||||
|
||||
old_name = ssvm_response.name
|
||||
|
||||
self.debug("Destroying SSVM: %s" % ssvm_response.id)
|
||||
cmd = destroySystemVm.destroySystemVmCmd()
|
||||
cmd.id = ssvm_response.id
|
||||
self.apiclient.destroySystemVm(cmd)
|
||||
|
||||
# Sleep to ensure that VM is in proper state
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
list_ssvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
systemvmtype='secondarystoragevm'
|
||||
)
|
||||
if isinstance(list_ssvm_response, list):
|
||||
if list_ssvm_response[0].state == 'Running':
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List SSVM call failed!")
|
||||
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
ssvm_response = list_ssvm_response[0]
|
||||
|
||||
# Verify Name, Public IP, Private IP and Link local IP
|
||||
# for newly created SSVM
|
||||
self.assertNotEqual(
|
||||
ssvm_response.name,
|
||||
old_name,
|
||||
"Check SSVM new name with name of destroyed SSVM"
|
||||
)
|
||||
self.assertEqual(
|
||||
hasattr(ssvm_response, 'privateip'),
|
||||
True,
|
||||
"Check whether SSVM has private IP field"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
hasattr(ssvm_response, 'linklocalip'),
|
||||
True,
|
||||
"Check whether SSVM has link local IP field"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
hasattr(ssvm_response, 'publicip'),
|
||||
True,
|
||||
"Check whether SSVM has public IP field"
|
||||
)
|
||||
|
||||
#Call to verify cloud process is running
|
||||
self.test_03_ssvm_internals()
|
||||
return
|
||||
|
||||
def test_10_destroy_cpvm(self):
|
||||
"""Test destroy CPVM
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. CPVM should be completely destroyed and a new one will spin up
|
||||
# 2. listSystemVMs will show a different name for the systemVM from
|
||||
# what it was before
|
||||
# 3. new CPVM will have a public/private and link-local-ip
|
||||
# 4. cloud process within CPVM must be up and running
|
||||
|
||||
list_cpvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
systemvmtype='consoleproxy',
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_cpvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
cpvm_response = list_cpvm_response[0]
|
||||
|
||||
old_name = cpvm_response.name
|
||||
|
||||
self.debug("Destroying CPVM: %s" % cpvm_response.id)
|
||||
cmd = destroySystemVm.destroySystemVmCmd()
|
||||
cmd.id = cpvm_response.id
|
||||
self.apiclient.destroySystemVm(cmd)
|
||||
|
||||
# Sleep to ensure that VM is in proper state
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
list_cpvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
systemvmtype='consoleproxy',
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
if isinstance(list_cpvm_response, list):
|
||||
if list_cpvm_response[0].state == 'Running':
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List CPVM call failed!")
|
||||
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
cpvm_response = list_cpvm_response[0]
|
||||
|
||||
# Verify Name, Public IP, Private IP and Link local IP
|
||||
# for newly created CPVM
|
||||
self.assertNotEqual(
|
||||
cpvm_response.name,
|
||||
old_name,
|
||||
"Check SSVM new name with name of destroyed CPVM"
|
||||
)
|
||||
self.assertEqual(
|
||||
hasattr(cpvm_response, 'privateip'),
|
||||
True,
|
||||
"Check whether CPVM has private IP field"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
hasattr(cpvm_response, 'linklocalip'),
|
||||
True,
|
||||
"Check whether CPVM has link local IP field"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
hasattr(cpvm_response, 'publicip'),
|
||||
True,
|
||||
"Check whether CPVM has public IP field"
|
||||
)
|
||||
|
||||
#Call to verify cloud process is running
|
||||
self.test_04_cpvm_internals()
|
||||
return
|
||||
|
|
@ -1,752 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
""" BVT tests for Templates ISO
|
||||
"""
|
||||
#Import Local Modules
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
from testcase.libs.utils import *
|
||||
from testcase.libs.base import *
|
||||
from testcase.libs.common import *
|
||||
import urllib
|
||||
from random import random
|
||||
#Import System modules
|
||||
import datetime
|
||||
|
||||
|
||||
class Services:
|
||||
"""Test Templates Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"account": {
|
||||
"email": "test@test.com",
|
||||
"firstname": "Test",
|
||||
"lastname": "User",
|
||||
"username": "test",
|
||||
# Random characters are appended for unique
|
||||
# username
|
||||
"password": "fr3sca",
|
||||
},
|
||||
"service_offering": {
|
||||
"name": "Tiny Instance",
|
||||
"displaytext": "Tiny Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
},
|
||||
"disk_offering": {
|
||||
"displaytext": "Small",
|
||||
"name": "Small",
|
||||
"disksize": 1
|
||||
},
|
||||
"virtual_machine": {
|
||||
"displayname": "testVM",
|
||||
"hypervisor": 'XenServer',
|
||||
"protocol": 'TCP',
|
||||
"ssh_port": 22,
|
||||
"username": "root",
|
||||
"password": "password",
|
||||
"privateport": 22,
|
||||
"publicport": 22,
|
||||
},
|
||||
"volume": {
|
||||
"diskname": "Test Volume",
|
||||
},
|
||||
"template_1": {
|
||||
"displaytext": "Cent OS Template",
|
||||
"name": "Cent OS Template",
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
},
|
||||
"template_2": {
|
||||
"displaytext": "Public Template",
|
||||
"name": "Public template",
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"isfeatured": True,
|
||||
"ispublic": True,
|
||||
"isextractable": True,
|
||||
"mode": "HTTP_DOWNLOAD",
|
||||
},
|
||||
"templatefilter": 'self',
|
||||
"destzoneid": 5,
|
||||
# For Copy template (Destination zone)
|
||||
"isfeatured": True,
|
||||
"ispublic": True,
|
||||
"isextractable": False,
|
||||
"bootable": True,
|
||||
"passwordenabled": True,
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"mode": 'advanced',
|
||||
# Networking mode: Advanced, basic
|
||||
"sleep": 30,
|
||||
"timeout": 10,
|
||||
}
|
||||
|
||||
|
||||
class TestCreateTemplate(cloudstackTestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
self.dbclient.close()
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.services = Services().services
|
||||
cls.api_client = super(TestCreateTemplate, cls).getClsTestClient().getApiClient()
|
||||
|
||||
# Get Zone, Domain and templates
|
||||
cls.domain = get_domain(cls.api_client, cls.services)
|
||||
cls.zone = get_zone(cls.api_client, cls.services)
|
||||
cls.disk_offering = DiskOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["disk_offering"]
|
||||
)
|
||||
template = get_template(
|
||||
cls.api_client,
|
||||
cls.zone.id,
|
||||
cls.services["ostypeid"]
|
||||
)
|
||||
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
|
||||
cls.services["volume"]["diskoffering"] = cls.disk_offering.id
|
||||
cls.services["volume"]["zoneid"] = cls.zone.id
|
||||
cls.services["sourcezoneid"] = cls.zone.id
|
||||
|
||||
cls.account = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["account"],
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
cls.services["account"] = cls.account.account.name
|
||||
|
||||
cls.service_offering = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["service_offering"]
|
||||
)
|
||||
#create virtual machine
|
||||
cls.virtual_machine = VirtualMachine.create(
|
||||
cls.api_client,
|
||||
cls.services["virtual_machine"],
|
||||
templateid=template.id,
|
||||
accountid=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid,
|
||||
serviceofferingid=cls.service_offering.id,
|
||||
mode=cls.services["mode"]
|
||||
)
|
||||
|
||||
#Stop virtual machine
|
||||
cls.virtual_machine.stop(cls.api_client)
|
||||
|
||||
# Poll listVM to ensure VM is stopped properly
|
||||
timeout = cls.services["timeout"]
|
||||
while True:
|
||||
time.sleep(cls.services["sleep"])
|
||||
|
||||
# Ensure that VM is in stopped state
|
||||
list_vm_response = list_virtual_machines(
|
||||
cls.api_client,
|
||||
id=cls.virtual_machine.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Stopped':
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to stop VM (ID: %s) in change service offering" %
|
||||
vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
list_volume = list_volumes(
|
||||
cls.api_client,
|
||||
virtualmachineid=cls.virtual_machine.id,
|
||||
type='ROOT',
|
||||
listall=True
|
||||
)
|
||||
|
||||
cls.volume = list_volume[0]
|
||||
cls._cleanup = [
|
||||
cls.account,
|
||||
cls.service_offering,
|
||||
cls.disk_offering,
|
||||
]
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
cls.api_client = super(TestCreateTemplate, cls).getClsTestClient().getApiClient()
|
||||
#Cleanup resources used
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
|
||||
return
|
||||
|
||||
def test_01_create_template(self):
|
||||
"""Test create public & private template
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. database (vm_template table) should be updated
|
||||
# with newly created template
|
||||
# 2. UI should show the newly added template
|
||||
# 3. ListTemplates API should show the newly added template
|
||||
|
||||
#Create template from Virtual machine and Volume ID
|
||||
template = Template.create(
|
||||
self.apiclient,
|
||||
self.services["template_1"],
|
||||
self.volume.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.cleanup.append(template)
|
||||
|
||||
self.debug("Created template with ID: %s" % template.id)
|
||||
|
||||
list_template_response = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter=\
|
||||
self.services["templatefilter"],
|
||||
id=template.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_template_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#Verify template response to check whether template added successfully
|
||||
self.assertNotEqual(
|
||||
len(list_template_response),
|
||||
0,
|
||||
"Check template available in List Templates"
|
||||
)
|
||||
template_response = list_template_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
template_response.displaytext,
|
||||
self.services["template_1"]["displaytext"],
|
||||
"Check display text of newly created template"
|
||||
)
|
||||
name = template_response.name
|
||||
self.assertEqual(
|
||||
name.count(self.services["template_1"]["name"]),
|
||||
1,
|
||||
"Check name of newly created template"
|
||||
)
|
||||
self.assertEqual(
|
||||
template_response.ostypeid,
|
||||
self.services["template_1"]["ostypeid"],
|
||||
"Check osTypeID of newly created template"
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
class TestTemplates(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
|
||||
cls.services = Services().services
|
||||
cls.api_client = super(TestTemplates, cls).getClsTestClient().getApiClient()
|
||||
|
||||
# Get Zone, Domain and templates
|
||||
cls.domain = get_domain(cls.api_client, cls.services)
|
||||
cls.zone = get_zone(cls.api_client, cls.services)
|
||||
cls.disk_offering = DiskOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["disk_offering"]
|
||||
)
|
||||
template = get_template(
|
||||
cls.api_client,
|
||||
cls.zone.id,
|
||||
cls.services["ostypeid"]
|
||||
)
|
||||
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
|
||||
cls.services["volume"]["diskoffering"] = cls.disk_offering.id
|
||||
cls.services["volume"]["zoneid"] = cls.zone.id
|
||||
cls.services["template_2"]["zoneid"] = cls.zone.id
|
||||
cls.services["sourcezoneid"] = cls.zone.id
|
||||
|
||||
cls.account = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["account"],
|
||||
admin=True,
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
|
||||
cls.user = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["account"],
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
|
||||
cls.services["account"] = cls.account.account.name
|
||||
|
||||
cls.service_offering = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["service_offering"]
|
||||
)
|
||||
#create virtual machine
|
||||
cls.virtual_machine = VirtualMachine.create(
|
||||
cls.api_client,
|
||||
cls.services["virtual_machine"],
|
||||
templateid=template.id,
|
||||
accountid=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid,
|
||||
serviceofferingid=cls.service_offering.id,
|
||||
mode=cls.services["mode"]
|
||||
)
|
||||
#Stop virtual machine
|
||||
cls.virtual_machine.stop(cls.api_client)
|
||||
|
||||
# Poll listVM to ensure VM is stopped properly
|
||||
timeout = cls.services["timeout"]
|
||||
while True:
|
||||
time.sleep(cls.services["sleep"])
|
||||
|
||||
# Ensure that VM is in stopped state
|
||||
list_vm_response = list_virtual_machines(
|
||||
cls.api_client,
|
||||
id=cls.virtual_machine.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Stopped':
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to stop VM (ID: %s) in change service offering" %
|
||||
vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
list_volume = list_volumes(
|
||||
cls.api_client,
|
||||
virtualmachineid=cls.virtual_machine.id,
|
||||
type='ROOT',
|
||||
listall=True
|
||||
)
|
||||
try:
|
||||
cls.volume = list_volume[0]
|
||||
except Exception as e:
|
||||
raise Exception(
|
||||
"Exception: Unable to find root volume foe VM: %s" %
|
||||
cls.virtual_machine.id)
|
||||
|
||||
#Create templates for Edit, Delete & update permissions testcases
|
||||
cls.template_1 = Template.create(
|
||||
cls.api_client,
|
||||
cls.services["template_1"],
|
||||
cls.volume.id,
|
||||
account=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid
|
||||
)
|
||||
cls.template_2 = Template.create(
|
||||
cls.api_client,
|
||||
cls.services["template_2"],
|
||||
cls.volume.id,
|
||||
account=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid
|
||||
)
|
||||
cls._cleanup = [
|
||||
cls.service_offering,
|
||||
cls.disk_offering,
|
||||
cls.account,
|
||||
cls.user
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
cls.api_client = super(TestTemplates, cls).getClsTestClient().getApiClient()
|
||||
#Cleanup created resources such as templates and VMs
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
|
||||
return
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
|
||||
self.dbclient.close()
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
|
||||
return
|
||||
|
||||
def test_02_edit_template(self):
|
||||
"""Test Edit template
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. UI should show the edited values for template
|
||||
# 2. database (vm_template table) should have updated values
|
||||
|
||||
new_displayText = random_gen()
|
||||
new_name = random_gen()
|
||||
|
||||
cmd = updateTemplate.updateTemplateCmd()
|
||||
# Update template attributes
|
||||
cmd.id = self.template_1.id
|
||||
cmd.displaytext = new_displayText
|
||||
cmd.name = new_name
|
||||
cmd.bootable = self.services["bootable"]
|
||||
cmd.passwordenabled = self.services["passwordenabled"]
|
||||
|
||||
self.apiclient.updateTemplate(cmd)
|
||||
|
||||
self.debug("Edited template with new name: %s" % new_name)
|
||||
|
||||
# Sleep to ensure update reflected across all the calls
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
# Verify template response for updated attributes
|
||||
list_template_response = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter=\
|
||||
self.services["templatefilter"],
|
||||
id=self.template_1.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
if isinstance(list_template_response, list):
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List Template failed!")
|
||||
|
||||
time.sleep(10)
|
||||
timeout = timeout -1
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_template_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_template_response),
|
||||
0,
|
||||
"Check template available in List Templates"
|
||||
)
|
||||
template_response = list_template_response[0]
|
||||
|
||||
self.debug("New Name: %s" % new_displayText)
|
||||
self.debug("Name in Template response: %s"
|
||||
% template_response.displaytext)
|
||||
self.assertEqual(
|
||||
template_response.displaytext,
|
||||
new_displayText,
|
||||
"Check display text of updated template"
|
||||
)
|
||||
self.assertEqual(
|
||||
template_response.name,
|
||||
new_name,
|
||||
"Check name of updated template"
|
||||
)
|
||||
self.assertEqual(
|
||||
str(template_response.passwordenabled).lower(),
|
||||
str(self.services["passwordenabled"]).lower(),
|
||||
"Check passwordenabled field of updated template"
|
||||
)
|
||||
self.assertEqual(
|
||||
template_response.ostypeid,
|
||||
self.services["ostypeid"],
|
||||
"Check OSTypeID of updated template"
|
||||
)
|
||||
return
|
||||
|
||||
def test_03_delete_template(self):
|
||||
"""Test delete template
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. UI should not show the deleted template
|
||||
# 2. database (vm_template table) should not contain deleted template
|
||||
|
||||
self.debug("Deleting Template ID: %s" % self.template_1.id)
|
||||
|
||||
self.template_1.delete(self.apiclient)
|
||||
|
||||
list_template_response = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter=\
|
||||
self.services["templatefilter"],
|
||||
id=self.template_1.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
# Verify template is deleted properly using ListTemplates
|
||||
self.assertEqual(
|
||||
list_template_response,
|
||||
None,
|
||||
"Check if template exists in List Templates"
|
||||
)
|
||||
return
|
||||
|
||||
def test_04_extract_template(self):
|
||||
"Test for extract template"
|
||||
|
||||
# Validate the following
|
||||
# 1. Admin should able extract and download the templates
|
||||
# 2. ListTemplates should display all the public templates
|
||||
# for all kind of users
|
||||
# 3 .ListTemplates should not display the system templates
|
||||
|
||||
self.debug("Extracting template with ID: %s" % self.template_2.id)
|
||||
|
||||
cmd = extractTemplate.extractTemplateCmd()
|
||||
cmd.id = self.template_2.id
|
||||
cmd.mode = self.services["template_2"]["mode"]
|
||||
cmd.zoneid = self.zone.id
|
||||
list_extract_response = self.apiclient.extractTemplate(cmd)
|
||||
|
||||
try:
|
||||
# Format URL to ASCII to retrieve response code
|
||||
formatted_url = urllib.unquote_plus(list_extract_response.url)
|
||||
url_response = urllib.urlopen(formatted_url)
|
||||
response_code = url_response.getcode()
|
||||
|
||||
except Exception:
|
||||
self.fail(
|
||||
"Extract Template Failed with invalid URL %s (template id: %s)" \
|
||||
% (formatted_url, self.template_2.id)
|
||||
)
|
||||
self.assertEqual(
|
||||
list_extract_response.id,
|
||||
self.template_2.id,
|
||||
"Check ID of the extracted template"
|
||||
)
|
||||
self.assertEqual(
|
||||
list_extract_response.extractMode,
|
||||
self.services["template_2"]["mode"],
|
||||
"Check mode of extraction"
|
||||
)
|
||||
self.assertEqual(
|
||||
list_extract_response.zoneid,
|
||||
self.services["template_2"]["zoneid"],
|
||||
"Check zone ID of extraction"
|
||||
)
|
||||
self.assertEqual(
|
||||
response_code,
|
||||
200,
|
||||
"Check for a valid response download URL"
|
||||
)
|
||||
return
|
||||
|
||||
def test_05_template_permissions(self):
|
||||
"""Update & Test for template permissions"""
|
||||
|
||||
# Validate the following
|
||||
# 1. listTemplatePermissions returns valid
|
||||
# permissions set for template
|
||||
# 2. permission changes should be reflected in vm_template
|
||||
# table in database
|
||||
|
||||
self.debug("Updating Template permissions ID:%s" % self.template_2.id)
|
||||
|
||||
cmd = updateTemplatePermissions.updateTemplatePermissionsCmd()
|
||||
# Update template permissions
|
||||
cmd.id = self.template_2.id
|
||||
cmd.isfeatured = self.services["isfeatured"]
|
||||
cmd.ispublic = self.services["ispublic"]
|
||||
cmd.isextractable = self.services["isextractable"]
|
||||
self.apiclient.updateTemplatePermissions(cmd)
|
||||
|
||||
list_template_response = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter='featured',
|
||||
id=self.template_2.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_template_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
# Verify template response for updated permissions for normal user
|
||||
template_response = list_template_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
template_response.id,
|
||||
self.template_2.id,
|
||||
"Check template ID"
|
||||
)
|
||||
self.assertEqual(
|
||||
template_response.ispublic,
|
||||
int(True),
|
||||
"Check ispublic permission of template"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
template_response.templatetype,
|
||||
'SYSTEM',
|
||||
"ListTemplates should not list any system templates"
|
||||
)
|
||||
return
|
||||
|
||||
def test_06_copy_template(self):
|
||||
"""Test for copy template from one zone to another"""
|
||||
|
||||
# Validate the following
|
||||
# 1. copy template should be successful and
|
||||
# secondary storage should contain new copied template.
|
||||
|
||||
self.debug("Copy template from Zone: %s to %s" % (
|
||||
self.services["sourcezoneid"],
|
||||
self.services["destzoneid"]
|
||||
))
|
||||
cmd = copyTemplate.copyTemplateCmd()
|
||||
cmd.id = self.template_2.id
|
||||
cmd.destzoneid = self.services["destzoneid"]
|
||||
cmd.sourcezoneid = self.services["sourcezoneid"]
|
||||
self.apiclient.copyTemplate(cmd)
|
||||
|
||||
# Verify template is copied to another zone using ListTemplates
|
||||
list_template_response = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter=\
|
||||
self.services["templatefilter"],
|
||||
id=self.template_2.id,
|
||||
zoneid=self.services["destzoneid"]
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_template_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_template_response),
|
||||
0,
|
||||
"Check template extracted in List Templates"
|
||||
)
|
||||
|
||||
template_response = list_template_response[0]
|
||||
self.assertEqual(
|
||||
template_response.id,
|
||||
self.template_2.id,
|
||||
"Check ID of the downloaded template"
|
||||
)
|
||||
self.assertEqual(
|
||||
template_response.zoneid,
|
||||
self.services["destzoneid"],
|
||||
"Check zone ID of the copied template"
|
||||
)
|
||||
|
||||
# Cleanup- Delete the copied template
|
||||
cmd = deleteTemplate.deleteTemplateCmd()
|
||||
cmd.id = template_response.id
|
||||
cmd.zoneid = self.services["destzoneid"]
|
||||
self.apiclient.deleteTemplate(cmd)
|
||||
return
|
||||
|
||||
def test_07_list_public_templates(self):
|
||||
"""Test only public templates are visible to normal user"""
|
||||
|
||||
# Validate the following
|
||||
# 1. ListTemplates should show only 'public' templates for normal user
|
||||
|
||||
list_template_response = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter='featured',
|
||||
account=self.user.account.name,
|
||||
domainid=self.user.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_template_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_template_response),
|
||||
0,
|
||||
"Check template available in List Templates"
|
||||
)
|
||||
#Template response should list all 'public' templates
|
||||
for template in list_template_response:
|
||||
self.assertEqual(
|
||||
template.ispublic,
|
||||
True,
|
||||
"ListTemplates should list only public templates"
|
||||
)
|
||||
return
|
||||
|
||||
def test_08_list_system_templates(self):
|
||||
"""Test System templates are not visible to normal user"""
|
||||
|
||||
# Validate the following
|
||||
# 1. ListTemplates should not show 'SYSTEM' templates for normal user
|
||||
|
||||
list_template_response = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter='featured',
|
||||
account=self.user.account.name,
|
||||
domainid=self.user.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_template_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_template_response),
|
||||
0,
|
||||
"Check template available in List Templates"
|
||||
)
|
||||
|
||||
for template in list_template_response:
|
||||
self.assertNotEqual(
|
||||
template.templatetype,
|
||||
'SYSTEM',
|
||||
"ListTemplates should not list any system templates"
|
||||
)
|
||||
return
|
||||
|
|
@ -1,946 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
""" BVT tests for Virtual Machine Life Cycle
|
||||
"""
|
||||
#Import Local Modules
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
import remoteSSHClient
|
||||
from testcase.libs.utils import *
|
||||
from testcase.libs.base import *
|
||||
from testcase.libs.common import *
|
||||
#Import System modules
|
||||
import time
|
||||
|
||||
class Services:
|
||||
"""Test VM Life Cycle Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"disk_offering":{
|
||||
"displaytext": "Small",
|
||||
"name": "Small",
|
||||
"disksize": 1
|
||||
},
|
||||
"account": {
|
||||
"email": "test@test.com",
|
||||
"firstname": "Test",
|
||||
"lastname": "User",
|
||||
"username": "test",
|
||||
# Random characters are appended in create account to
|
||||
# ensure unique username generated each time
|
||||
"password": "fr3sca",
|
||||
},
|
||||
"small":
|
||||
# Create a small virtual machine instance with disk offering
|
||||
{
|
||||
"displayname": "testserver",
|
||||
"username": "root", # VM creds for SSH
|
||||
"password": "password",
|
||||
"ssh_port": 22,
|
||||
"hypervisor": 'XenServer',
|
||||
"privateport": 22,
|
||||
"publicport": 22,
|
||||
"protocol": 'TCP',
|
||||
},
|
||||
"medium": # Create a medium virtual machine instance
|
||||
{
|
||||
"displayname": "testserver",
|
||||
"username": "root",
|
||||
"password": "password",
|
||||
"ssh_port": 22,
|
||||
"hypervisor": 'XenServer',
|
||||
"privateport": 22,
|
||||
"publicport": 22,
|
||||
"protocol": 'TCP',
|
||||
},
|
||||
"service_offerings":
|
||||
{
|
||||
"tiny":
|
||||
{
|
||||
"name": "Tiny Instance",
|
||||
"displaytext": "Tiny Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
},
|
||||
"small":
|
||||
{
|
||||
# Small service offering ID to for change VM
|
||||
# service offering from medium to small
|
||||
"name": "Small Instance",
|
||||
"displaytext": "Small Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 500,
|
||||
"memory": 256
|
||||
},
|
||||
"medium":
|
||||
{
|
||||
# Medium service offering ID to for
|
||||
# change VM service offering from small to medium
|
||||
"name": "Medium Instance",
|
||||
"displaytext": "Medium Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 1000,
|
||||
"memory": 1024
|
||||
}
|
||||
},
|
||||
"iso": # ISO settings for Attach/Detach ISO tests
|
||||
{
|
||||
"displaytext": "Test ISO",
|
||||
"name": "testISO",
|
||||
"url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso",
|
||||
# Source URL where ISO is located
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"mode": 'HTTP_DOWNLOAD', # Downloading existing ISO
|
||||
},
|
||||
"diskdevice": '/dev/xvdd',
|
||||
# Disk device where ISO is attached to instance
|
||||
"mount_dir": "/mnt/tmp",
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
#Migrate VM to hostid
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
# CentOS 5.3 (64-bit)
|
||||
"mode":'advanced',
|
||||
}
|
||||
|
||||
|
||||
class TestDeployVM(cloudstackTestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.services = Services().services
|
||||
# Get Zone, Domain and templates
|
||||
domain = get_domain(self.apiclient, self.services)
|
||||
zone = get_zone(self.apiclient, self.services)
|
||||
|
||||
template = get_template(
|
||||
self.apiclient,
|
||||
zone.id,
|
||||
self.services["ostypeid"]
|
||||
)
|
||||
# Set Zones and disk offerings
|
||||
self.services["small"]["zoneid"] = zone.id
|
||||
self.services["small"]["template"] = template.id
|
||||
|
||||
self.services["medium"]["zoneid"] = zone.id
|
||||
self.services["medium"]["template"] = template.id
|
||||
self.services["iso"]["zoneid"] = zone.id
|
||||
|
||||
# Create Account, VMs, NAT Rules etc
|
||||
self.account = Account.create(
|
||||
self.apiclient,
|
||||
self.services["account"],
|
||||
domainid=domain.id
|
||||
)
|
||||
|
||||
self.service_offering = ServiceOffering.create(
|
||||
self.apiclient,
|
||||
self.services["service_offerings"]["tiny"]
|
||||
)
|
||||
# Cleanup
|
||||
self.cleanup = [
|
||||
self.service_offering,
|
||||
self.account
|
||||
]
|
||||
|
||||
def test_deploy_vm(self):
|
||||
"""Test Deploy Virtual Machine
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. Virtual Machine is accessible via SSH
|
||||
# 2. listVirtualMachines returns accurate information
|
||||
# 3. The Cloud Database contains the valid information
|
||||
|
||||
self.virtual_machine = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["small"],
|
||||
accountid=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
serviceofferingid=self.service_offering.id
|
||||
)
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.virtual_machine.id
|
||||
)
|
||||
|
||||
self.debug(
|
||||
"Verify listVirtualMachines response for virtual machine: %s" \
|
||||
% self.virtual_machine.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
"Check VM available in List Virtual Machines"
|
||||
)
|
||||
vm_response = list_vm_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
|
||||
vm_response.id,
|
||||
self.virtual_machine.id,
|
||||
"Check virtual machine id in listVirtualMachines"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
vm_response.displayname,
|
||||
self.virtual_machine.displayname,
|
||||
"Check virtual machine displayname in listVirtualMachines"
|
||||
)
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
except Exception as e:
|
||||
self.debug("Warning! Exception in tearDown: %s" % e)
|
||||
|
||||
|
||||
class TestVMLifeCycle(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.api_client = super(TestVMLifeCycle, cls).getClsTestClient().getApiClient()
|
||||
cls.services = Services().services
|
||||
|
||||
# Get Zone, Domain and templates
|
||||
domain = get_domain(cls.api_client, cls.services)
|
||||
zone = get_zone(cls.api_client, cls.services)
|
||||
template = get_template(
|
||||
cls.api_client,
|
||||
zone.id,
|
||||
cls.services["ostypeid"]
|
||||
)
|
||||
# Set Zones and disk offerings
|
||||
cls.services["small"]["zoneid"] = zone.id
|
||||
cls.services["small"]["template"] = template.id
|
||||
|
||||
cls.services["medium"]["zoneid"] = zone.id
|
||||
cls.services["medium"]["template"] = template.id
|
||||
cls.services["iso"]["zoneid"] = zone.id
|
||||
|
||||
# Create VMs, NAT Rules etc
|
||||
cls.account = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["account"],
|
||||
domainid=domain.id
|
||||
)
|
||||
|
||||
cls.small_offering = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["service_offerings"]["small"]
|
||||
)
|
||||
|
||||
cls.medium_offering = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["service_offerings"]["medium"]
|
||||
)
|
||||
#create small and large virtual machines
|
||||
cls.small_virtual_machine = VirtualMachine.create(
|
||||
cls.api_client,
|
||||
cls.services["small"],
|
||||
accountid=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid,
|
||||
serviceofferingid=cls.small_offering.id,
|
||||
mode=cls.services["mode"]
|
||||
)
|
||||
cls.medium_virtual_machine = VirtualMachine.create(
|
||||
cls.api_client,
|
||||
cls.services["medium"],
|
||||
accountid=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid,
|
||||
serviceofferingid=cls.medium_offering.id,
|
||||
mode=cls.services["mode"]
|
||||
)
|
||||
cls.virtual_machine = VirtualMachine.create(
|
||||
cls.api_client,
|
||||
cls.services["small"],
|
||||
accountid=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid,
|
||||
serviceofferingid=cls.small_offering.id,
|
||||
mode=cls.services["mode"]
|
||||
)
|
||||
cls._cleanup = [
|
||||
cls.small_offering,
|
||||
cls.medium_offering,
|
||||
cls.account
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.api_client = super(TestVMLifeCycle, cls).getClsTestClient().getApiClient()
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
return
|
||||
|
||||
def setUp(self):
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
|
||||
def tearDown(self):
|
||||
#Clean up, terminate the created ISOs
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
return
|
||||
|
||||
def test_01_stop_vm(self):
|
||||
"""Test Stop Virtual Machine
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. Should Not be able to login to the VM.
|
||||
# 2. listVM command should return
|
||||
# this VM.State of this VM should be ""Stopped"".
|
||||
|
||||
self.debug("Stopping VM - ID: %s" % self.virtual_machine.id)
|
||||
self.small_virtual_machine.stop(self.apiclient)
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
"Check VM available in List Virtual Machines"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
list_vm_response[0].state,
|
||||
"Stopped",
|
||||
"Check virtual machine is in stopped state"
|
||||
)
|
||||
return
|
||||
|
||||
def test_02_start_vm(self):
|
||||
"""Test Start Virtual Machine
|
||||
"""
|
||||
# Validate the following
|
||||
# 1. listVM command should return this VM.State
|
||||
# of this VM should be Running".
|
||||
|
||||
self.debug("Starting VM - ID: %s" % self.virtual_machine.id)
|
||||
self.small_virtual_machine.start(self.apiclient)
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
"Check VM avaliable in List Virtual Machines"
|
||||
)
|
||||
|
||||
self.debug(
|
||||
"Verify listVirtualMachines response for virtual machine: %s" \
|
||||
% self.small_virtual_machine.id
|
||||
)
|
||||
self.assertEqual(
|
||||
list_vm_response[0].state,
|
||||
"Running",
|
||||
"Check virtual machine is in running state"
|
||||
)
|
||||
return
|
||||
|
||||
def test_03_reboot_vm(self):
|
||||
"""Test Reboot Virtual Machine
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. Should be able to login to the VM.
|
||||
# 2. listVM command should return the deployed VM.
|
||||
# State of this VM should be "Running"
|
||||
|
||||
self.debug("Rebooting VM - ID: %s" % self.virtual_machine.id)
|
||||
self.small_virtual_machine.reboot(self.apiclient)
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
"Check VM avaliable in List Virtual Machines"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
list_vm_response[0].state,
|
||||
"Running",
|
||||
"Check virtual machine is in running state"
|
||||
)
|
||||
return
|
||||
|
||||
def test_04_change_offering_small(self):
|
||||
"""Change Offering to a small capacity
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. Log in to the Vm .We should see that the CPU and memory Info of
|
||||
# this Vm matches the one specified for "Small" service offering.
|
||||
# 2. Using listVM command verify that this Vm
|
||||
# has Small service offering Id.
|
||||
|
||||
self.debug("Stopping VM - ID: %s" % self.medium_virtual_machine.id)
|
||||
|
||||
self.medium_virtual_machine.stop(self.apiclient)
|
||||
|
||||
# Poll listVM to ensure VM is stopped properly
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
# Ensure that VM is in stopped state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.medium_virtual_machine.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Stopped':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to stop VM (ID: %s) in change service offering" % vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
self.debug("Change Service offering VM - ID: %s" %
|
||||
self.medium_virtual_machine.id)
|
||||
|
||||
cmd = changeServiceForVirtualMachine.changeServiceForVirtualMachineCmd()
|
||||
cmd.id = self.medium_virtual_machine.id
|
||||
cmd.serviceofferingid = self.small_offering.id
|
||||
self.apiclient.changeServiceForVirtualMachine(cmd)
|
||||
|
||||
self.debug("Starting VM - ID: %s" % self.medium_virtual_machine.id)
|
||||
self.medium_virtual_machine.start(self.apiclient)
|
||||
|
||||
# Poll listVM to ensure VM is started properly
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
# Ensure that VM is in running state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.medium_virtual_machine.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Running':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to start VM (ID: %s) after changing service offering" % vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
try:
|
||||
ssh = self.medium_virtual_machine.get_ssh_client()
|
||||
except Exception as e:
|
||||
self.fail(
|
||||
"SSH Access failed for %s: %s" % \
|
||||
(self.medium_virtual_machine.ipaddress, e)
|
||||
)
|
||||
|
||||
cpuinfo = ssh.execute("cat /proc/cpuinfo")
|
||||
|
||||
cpu_cnt = len([i for i in cpuinfo if "processor" in i])
|
||||
#'cpu MHz\t\t: 2660.499'
|
||||
cpu_speed = [i for i in cpuinfo if "cpu MHz" in i ][0].split()[3]
|
||||
|
||||
meminfo = ssh.execute("cat /proc/meminfo")
|
||||
#MemTotal: 1017464 kB
|
||||
total_mem = [i for i in meminfo if "MemTotal" in i][0].split()[1]
|
||||
|
||||
self.debug(
|
||||
"CPU count: %s, CPU Speed: %s, Mem Info: %s" % (
|
||||
cpu_cnt,
|
||||
cpu_speed,
|
||||
total_mem
|
||||
))
|
||||
self.assertAlmostEqual(
|
||||
int(cpu_cnt),
|
||||
self.small_offering.cpunumber,
|
||||
"Check CPU Count for small offering"
|
||||
)
|
||||
|
||||
self.assertAlmostEqual(
|
||||
list_vm_response[0].cpuspeed,
|
||||
self.small_offering.cpuspeed,
|
||||
"Check CPU Speed for small offering"
|
||||
)
|
||||
self.assertAlmostEqual(
|
||||
int(total_mem) / 1024, # In MBs
|
||||
self.small_offering.memory,
|
||||
"Check Memory(kb) for small offering"
|
||||
)
|
||||
return
|
||||
|
||||
def test_05_change_offering_medium(self):
|
||||
"""Change Offering to a medium capacity
|
||||
"""
|
||||
# Validate the following
|
||||
# 1. Log in to the Vm .We should see that the CPU and memory Info of
|
||||
# this Vm matches the one specified for "Medium" service offering.
|
||||
# 2. Using listVM command verify that this Vm
|
||||
# has Medium service offering Id.
|
||||
|
||||
self.debug("Stopping VM - ID: %s" % self.small_virtual_machine.id)
|
||||
self.small_virtual_machine.stop(self.apiclient)
|
||||
|
||||
# Poll listVM to ensure VM is stopped properly
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
# Ensure that VM is in stopped state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Stopped':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to stop VM (ID: %s) in change service offering" % vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
self.debug("Change service offering VM - ID: %s" %
|
||||
self.small_virtual_machine.id)
|
||||
|
||||
cmd = changeServiceForVirtualMachine.changeServiceForVirtualMachineCmd()
|
||||
cmd.id = self.small_virtual_machine.id
|
||||
cmd.serviceofferingid = self.medium_offering.id
|
||||
self.apiclient.changeServiceForVirtualMachine(cmd)
|
||||
|
||||
self.debug("Starting VM - ID: %s" % self.small_virtual_machine.id)
|
||||
self.small_virtual_machine.start(self.apiclient)
|
||||
|
||||
# Poll listVM to ensure VM is started properly
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
# Ensure that VM is in running state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Running':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to start VM (ID: %s) after changing service offering" % vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
|
||||
try:
|
||||
ssh_client = self.small_virtual_machine.get_ssh_client()
|
||||
except Exception as e:
|
||||
self.fail(
|
||||
"SSH Access failed for %s: %s" % \
|
||||
(self.small_virtual_machine.ipaddress, e)
|
||||
)
|
||||
|
||||
cpuinfo = ssh_client.execute("cat /proc/cpuinfo")
|
||||
|
||||
cpu_cnt = len([i for i in cpuinfo if "processor" in i])
|
||||
#'cpu MHz\t\t: 2660.499'
|
||||
cpu_speed = [i for i in cpuinfo if "cpu MHz" in i][0].split()[3]
|
||||
|
||||
meminfo = ssh_client.execute("cat /proc/meminfo")
|
||||
#MemTotal: 1017464 kB
|
||||
total_mem = [i for i in meminfo if "MemTotal" in i][0].split()[1]
|
||||
|
||||
self.debug(
|
||||
"CPU count: %s, CPU Speed: %s, Mem Info: %s" % (
|
||||
cpu_cnt,
|
||||
cpu_speed,
|
||||
total_mem
|
||||
))
|
||||
self.assertAlmostEqual(
|
||||
int(cpu_cnt),
|
||||
self.medium_offering.cpunumber,
|
||||
"Check CPU Count for medium offering"
|
||||
)
|
||||
|
||||
self.assertAlmostEqual(
|
||||
list_vm_response[0].cpuspeed,
|
||||
self.medium_offering.cpuspeed,
|
||||
"Check CPU Speed for medium offering"
|
||||
)
|
||||
|
||||
self.assertAlmostEqual(
|
||||
int(total_mem) / 1024, # In MBs
|
||||
self.medium_offering.memory,
|
||||
"Check Memory(kb) for medium offering"
|
||||
)
|
||||
return
|
||||
|
||||
def test_06_destroy_vm(self):
|
||||
"""Test destroy Virtual Machine
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. Should not be able to login to the VM.
|
||||
# 2. listVM command should return this VM.State
|
||||
# of this VM should be "Destroyed".
|
||||
|
||||
self.debug("Destroy VM - ID: %s" % self.small_virtual_machine.id)
|
||||
self.small_virtual_machine.delete(self.apiclient)
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
"Check VM avaliable in List Virtual Machines"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
list_vm_response[0].state,
|
||||
"Destroyed",
|
||||
"Check virtual machine is in destroyed state"
|
||||
)
|
||||
return
|
||||
|
||||
def test_07_restore_vm(self):
|
||||
"""Test recover Virtual Machine
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. listVM command should return this VM.
|
||||
# State of this VM should be "Stopped".
|
||||
# 2. We should be able to Start this VM successfully.
|
||||
|
||||
self.debug("Recovering VM - ID: %s" % self.small_virtual_machine.id)
|
||||
|
||||
cmd = recoverVirtualMachine.recoverVirtualMachineCmd()
|
||||
cmd.id = self.small_virtual_machine.id
|
||||
self.apiclient.recoverVirtualMachine(cmd)
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
"Check VM avaliable in List Virtual Machines"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
list_vm_response[0].state,
|
||||
"Stopped",
|
||||
"Check virtual machine is in Stopped state"
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
def test_08_migrate_vm(self):
|
||||
"""Test migrate VM
|
||||
"""
|
||||
# Validate the following
|
||||
# 1. Should be able to login to the VM.
|
||||
# 2. listVM command should return this VM.State of this VM
|
||||
# should be "Running" and the host should be the host
|
||||
# to which the VM was migrated to
|
||||
|
||||
hosts = Host.list(
|
||||
self.apiclient,
|
||||
zoneid=self.medium_virtual_machine.zoneid,
|
||||
type='Routing'
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check the number of hosts in the zone"
|
||||
)
|
||||
self.assertEqual(
|
||||
len(hosts),
|
||||
2,
|
||||
"Atleast 2 hosts should be present in a zone for VM migration"
|
||||
)
|
||||
|
||||
# Find the host of VM and also the new host to migrate VM.
|
||||
if self.medium_virtual_machine.hostid == hosts[0].id:
|
||||
host = hosts[1]
|
||||
else:
|
||||
host = hosts[0]
|
||||
|
||||
self.debug("Migrating VM-ID: %s to Host: %s" % (
|
||||
self.medium_virtual_machine.id,
|
||||
host.id
|
||||
))
|
||||
|
||||
cmd = migrateVirtualMachine.migrateVirtualMachineCmd()
|
||||
cmd.hostid = host.id
|
||||
cmd.virtualmachineid = self.medium_virtual_machine.id
|
||||
self.apiclient.migrateVirtualMachine(cmd)
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.medium_virtual_machine.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
list_vm_response,
|
||||
None,
|
||||
"Check virtual machine is listVirtualMachines"
|
||||
)
|
||||
|
||||
vm_response = list_vm_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
vm_response.id,
|
||||
self.medium_virtual_machine.id,
|
||||
"Check virtual machine ID of migrated VM"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
vm_response.hostid,
|
||||
host.id,
|
||||
"Check destination hostID of migrated VM"
|
||||
)
|
||||
return
|
||||
|
||||
def test_09_expunge_vm(self):
|
||||
"""Test destroy(expunge) Virtual Machine
|
||||
"""
|
||||
# Validate the following
|
||||
# 1. listVM command should NOT return this VM any more.
|
||||
|
||||
self.debug("Expunge VM-ID: %s" % self.small_virtual_machine.id)
|
||||
|
||||
cmd = destroyVirtualMachine.destroyVirtualMachineCmd()
|
||||
cmd.id = self.small_virtual_machine.id
|
||||
self.apiclient.destroyVirtualMachine(cmd)
|
||||
|
||||
config = list_configurations(
|
||||
self.apiclient,
|
||||
name='expunge.delay'
|
||||
)
|
||||
|
||||
response = config[0]
|
||||
# Wait for some time more than expunge.delay
|
||||
time.sleep(int(response.value) * 2)
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
self.assertEqual(
|
||||
list_vm_response,
|
||||
None,
|
||||
"Check Expunged virtual machine is listVirtualMachines"
|
||||
)
|
||||
return
|
||||
|
||||
def test_10_attachAndDetach_iso(self):
|
||||
"""Test for detach ISO to virtual machine"""
|
||||
|
||||
# Validate the following
|
||||
# 1. Create ISO
|
||||
# 2. Attach ISO to VM
|
||||
# 3. Log in to the VM.
|
||||
# 4. The device should be available for use
|
||||
# 5. Detach ISO
|
||||
# 6. Check the device is properly detached by logging into VM
|
||||
|
||||
iso = Iso.create(
|
||||
self.apiclient,
|
||||
self.services["iso"],
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
self.debug("Successfully created ISO with ID: %s" % iso.id)
|
||||
try:
|
||||
iso.download(self.apiclient)
|
||||
except Exception as e:
|
||||
self.fail("Exception while downloading ISO %s: %s"\
|
||||
% (iso.id, e))
|
||||
|
||||
self.debug("Attach ISO with ID: %s to VM ID: %s" % (
|
||||
iso.id,
|
||||
self.virtual_machine.id
|
||||
))
|
||||
#Attach ISO to virtual machine
|
||||
cmd = attachIso.attachIsoCmd()
|
||||
cmd.id = iso.id
|
||||
cmd.virtualmachineid = self.virtual_machine.id
|
||||
self.apiclient.attachIso(cmd)
|
||||
|
||||
try:
|
||||
ssh_client = self.virtual_machine.get_ssh_client()
|
||||
|
||||
cmds = [
|
||||
"mkdir -p %s" % self.services["mount_dir"],
|
||||
"mount -rt iso9660 %s %s" \
|
||||
% (
|
||||
self.services["diskdevice"],
|
||||
self.services["mount_dir"]
|
||||
),
|
||||
]
|
||||
|
||||
for c in cmds:
|
||||
res = ssh_client.execute(c)
|
||||
|
||||
self.assertEqual(res, [], "Check mount is successful or not")
|
||||
|
||||
c = "fdisk -l|grep %s|head -1" % self.services["diskdevice"]
|
||||
res = ssh_client.execute(c)
|
||||
#Disk /dev/xvdd: 4393 MB, 4393723904 bytes
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for virtual machine: %s - %s" %
|
||||
(self.virtual_machine.ipaddress, e))
|
||||
|
||||
# Res may contain more than one strings depending on environment
|
||||
# Split strings to form new list which is used for assertion on ISO size
|
||||
result = []
|
||||
for i in res:
|
||||
for k in i.split():
|
||||
result.append(k)
|
||||
|
||||
# Get ISO size
|
||||
iso_response = list_isos(
|
||||
self.apiclient,
|
||||
id=iso.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(iso_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
iso_size = iso_response[0].size
|
||||
|
||||
self.assertEqual(
|
||||
str(iso_size) in result,
|
||||
True,
|
||||
"Check size of the attached ISO"
|
||||
)
|
||||
try:
|
||||
#Unmount ISO
|
||||
command = "umount %s" % self.services["mount_dir"]
|
||||
ssh_client.execute(command)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for virtual machine: %s - %s" %
|
||||
(self.virtual_machine.ipaddress, e))
|
||||
|
||||
#Detach from VM
|
||||
cmd = detachIso.detachIsoCmd()
|
||||
cmd.virtualmachineid = self.virtual_machine.id
|
||||
self.apiclient.detachIso(cmd)
|
||||
|
||||
try:
|
||||
res = ssh_client.execute(c)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for virtual machine: %s - %s" %
|
||||
(self.virtual_machine.ipaddress, e))
|
||||
|
||||
# Check if ISO is properly detached from VM (using fdisk)
|
||||
result = self.services["diskdevice"] in str(res)
|
||||
|
||||
self.assertEqual(
|
||||
result,
|
||||
False,
|
||||
"Check if ISO is detached from virtual machine"
|
||||
)
|
||||
return
|
||||
|
|
@ -1,516 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
""" BVT tests for Volumes
|
||||
"""
|
||||
#Import Local Modules
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
from testcase.libs.utils import *
|
||||
from testcase.libs.base import *
|
||||
from testcase.libs.common import *
|
||||
import remoteSSHClient
|
||||
#Import System modules
|
||||
import os
|
||||
import urllib
|
||||
import time
|
||||
import tempfile
|
||||
|
||||
|
||||
class Services:
|
||||
"""Test Volume Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"account": {
|
||||
"email": "test@test.com",
|
||||
"firstname": "Test",
|
||||
"lastname": "User",
|
||||
"username": "test",
|
||||
# Random characters are appended for unique
|
||||
# username
|
||||
"password": "fr3sca",
|
||||
},
|
||||
"service_offering": {
|
||||
"name": "Tiny Instance",
|
||||
"displaytext": "Tiny Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
},
|
||||
"disk_offering": {
|
||||
"displaytext": "Small",
|
||||
"name": "Small",
|
||||
"disksize": 1
|
||||
},
|
||||
"volume_offerings": {
|
||||
0: {
|
||||
"diskname": "TestDiskServ",
|
||||
},
|
||||
},
|
||||
"customdisksize": 1, # GBs
|
||||
"username": "root", # Creds for SSH to VM
|
||||
"password": "password",
|
||||
"ssh_port": 22,
|
||||
"diskname": "TestDiskServ",
|
||||
"hypervisor": 'XenServer',
|
||||
"privateport": 22,
|
||||
"publicport": 22,
|
||||
"protocol": 'TCP',
|
||||
"diskdevice": "/dev/xvdb",
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"mode": 'advanced',
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
}
|
||||
|
||||
|
||||
class TestCreateVolume(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.api_client = super(TestCreateVolume, cls).getClsTestClient().getApiClient()
|
||||
cls.services = Services().services
|
||||
|
||||
# Get Zone, Domain and templates
|
||||
cls.domain = get_domain(cls.api_client, cls.services)
|
||||
cls.zone = get_zone(cls.api_client, cls.services)
|
||||
cls.disk_offering = DiskOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["disk_offering"]
|
||||
)
|
||||
cls.custom_disk_offering = DiskOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["disk_offering"],
|
||||
custom=True
|
||||
)
|
||||
template = get_template(
|
||||
cls.api_client,
|
||||
cls.zone.id,
|
||||
cls.services["ostypeid"]
|
||||
)
|
||||
cls.services["domainid"] = cls.domain.id
|
||||
cls.services["zoneid"] = cls.zone.id
|
||||
cls.services["template"] = template.id
|
||||
cls.services["customdiskofferingid"] = cls.custom_disk_offering.id
|
||||
|
||||
# Create VMs, NAT Rules etc
|
||||
cls.account = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["account"],
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
|
||||
cls.services["account"] = cls.account.account.name
|
||||
cls.service_offering = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["service_offering"]
|
||||
)
|
||||
cls.virtual_machine = VirtualMachine.create(
|
||||
cls.api_client,
|
||||
cls.services,
|
||||
accountid=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid,
|
||||
serviceofferingid=cls.service_offering.id,
|
||||
mode=cls.services["mode"]
|
||||
)
|
||||
cls._cleanup = [
|
||||
cls.service_offering,
|
||||
cls.disk_offering,
|
||||
cls.custom_disk_offering,
|
||||
cls.account
|
||||
]
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self.apiClient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
|
||||
def test_01_create_volume(self):
|
||||
"""Test Volume creation for all Disk Offerings (incl. custom)
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. Create volumes from the different sizes
|
||||
# 2. Verify the size of volume with actual size allocated
|
||||
|
||||
self.volumes = []
|
||||
for k, v in self.services["volume_offerings"].items():
|
||||
volume = Volume.create(
|
||||
self.apiClient,
|
||||
v,
|
||||
zoneid=self.zone.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
diskofferingid=self.disk_offering.id
|
||||
)
|
||||
self.debug("Created a volume with ID: %s" % volume.id)
|
||||
self.volumes.append(volume)
|
||||
|
||||
volume = Volume.create_custom_disk(
|
||||
self.apiClient,
|
||||
self.services,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
)
|
||||
self.debug("Created a volume with custom offering: %s" % volume.id)
|
||||
self.volumes.append(volume)
|
||||
|
||||
#Attach a volume with different disk offerings
|
||||
#and check the memory allocated to each of them
|
||||
for volume in self.volumes:
|
||||
list_volume_response = list_volumes(
|
||||
self.apiClient,
|
||||
id=volume.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_volume_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
list_volume_response,
|
||||
None,
|
||||
"Check if volume exists in ListVolumes"
|
||||
)
|
||||
self.debug(
|
||||
"Attaching volume (ID: %s) to VM (ID: %s)" % (
|
||||
volume.id,
|
||||
self.virtual_machine.id
|
||||
))
|
||||
self.virtual_machine.attach_volume(
|
||||
self.apiClient,
|
||||
volume
|
||||
)
|
||||
try:
|
||||
ssh = self.virtual_machine.get_ssh_client()
|
||||
ssh.execute("reboot")
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH access failed for VM %s - %s" %
|
||||
(self.virtual_machine.ipaddress, e))
|
||||
|
||||
# Poll listVM to ensure VM is started properly
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
# Ensure that VM is in running state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiClient,
|
||||
id=self.virtual_machine.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Running':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to start VM (ID: %s) " % vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
try:
|
||||
ssh = self.virtual_machine.get_ssh_client(
|
||||
reconnect=True
|
||||
)
|
||||
c = "fdisk -l"
|
||||
res = ssh.execute(c)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH access failed for VM: %s - %s" %
|
||||
(self.virtual_machine.ipaddress, e))
|
||||
|
||||
# Disk /dev/sda doesn't contain a valid partition table
|
||||
# Disk /dev/sda: 21.5 GB, 21474836480 bytes
|
||||
result = str(res)
|
||||
self.debug("fdisk result: %s" % result)
|
||||
|
||||
self.assertEqual(
|
||||
str(list_volume_response[0].size) in result,
|
||||
True,
|
||||
"Check if promised disk size actually available"
|
||||
)
|
||||
self.virtual_machine.detach_volume(self.apiClient, volume)
|
||||
|
||||
def tearDown(self):
|
||||
#Clean up, terminate the created volumes
|
||||
cleanup_resources(self.apiClient, self.cleanup)
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
cls.api_client = super(TestCreateVolume, cls).getClsTestClient().getApiClient()
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
|
||||
|
||||
class TestVolumes(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.api_client = super(TestVolumes, cls).getClsTestClient().getApiClient()
|
||||
cls.services = Services().services
|
||||
# Get Zone, Domain and templates
|
||||
cls.domain = get_domain(cls.api_client, cls.services)
|
||||
cls.zone = get_zone(cls.api_client, cls.services)
|
||||
cls.disk_offering = DiskOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["disk_offering"]
|
||||
)
|
||||
template = get_template(
|
||||
cls.api_client,
|
||||
cls.zone.id,
|
||||
cls.services["ostypeid"]
|
||||
)
|
||||
cls.services["domainid"] = cls.domain.id
|
||||
cls.services["zoneid"] = cls.zone.id
|
||||
cls.services["template"] = template.id
|
||||
cls.services["diskofferingid"] = cls.disk_offering.id
|
||||
|
||||
# Create VMs, VMs etc
|
||||
cls.account = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["account"],
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
|
||||
cls.services["account"] = cls.account.account.name
|
||||
cls.service_offering = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["service_offering"]
|
||||
)
|
||||
cls.virtual_machine = VirtualMachine.create(
|
||||
cls.api_client,
|
||||
cls.services,
|
||||
accountid=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid,
|
||||
serviceofferingid=cls.service_offering.id,
|
||||
mode=cls.services["mode"]
|
||||
)
|
||||
|
||||
cls.volume = Volume.create(
|
||||
cls.api_client,
|
||||
cls.services,
|
||||
account=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid
|
||||
)
|
||||
cls._cleanup = [
|
||||
cls.service_offering,
|
||||
cls.disk_offering,
|
||||
cls.account
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
|
||||
def setUp(self):
|
||||
self.apiClient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
|
||||
def test_02_attach_volume(self):
|
||||
"""Attach a created Volume to a Running VM
|
||||
"""
|
||||
# Validate the following
|
||||
# 1. shows list of volumes
|
||||
# 2. "Attach Disk" pop-up box will display with list of instances
|
||||
# 3. disk should be attached to instance successfully
|
||||
|
||||
self.debug(
|
||||
"Attaching volume (ID: %s) to VM (ID: %s)" % (
|
||||
self.volume.id,
|
||||
self.virtual_machine.id
|
||||
))
|
||||
self.virtual_machine.attach_volume(self.apiClient, self.volume)
|
||||
|
||||
list_volume_response = list_volumes(
|
||||
self.apiClient,
|
||||
id=self.volume.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_volume_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
list_volume_response,
|
||||
None,
|
||||
"Check if volume exists in ListVolumes"
|
||||
)
|
||||
volume = list_volume_response[0]
|
||||
self.assertNotEqual(
|
||||
volume.virtualmachineid,
|
||||
None,
|
||||
"Check if volume state (attached) is reflected"
|
||||
)
|
||||
try:
|
||||
#Format the attached volume to a known fs
|
||||
format_volume_to_ext3(self.virtual_machine.get_ssh_client())
|
||||
|
||||
except Exception as e:
|
||||
|
||||
self.fail("SSH failed for VM: %s - %s" %
|
||||
(self.virtual_machine.ipaddress, e))
|
||||
return
|
||||
|
||||
def test_03_download_attached_volume(self):
|
||||
"""Download a Volume attached to a VM
|
||||
"""
|
||||
# Validate the following
|
||||
# 1. download volume will fail with proper error message
|
||||
# "Failed - Invalid state of the volume with ID:
|
||||
# It should be either detached or the VM should be in stopped state
|
||||
|
||||
self.debug("Extract attached Volume ID: %s" % self.volume.id)
|
||||
|
||||
cmd = extractVolume.extractVolumeCmd()
|
||||
cmd.id = self.volume.id
|
||||
cmd.mode = "HTTP_DOWNLOAD"
|
||||
cmd.zoneid = self.services["zoneid"]
|
||||
# A proper exception should be raised;
|
||||
# downloading attach VM is not allowed
|
||||
with self.assertRaises(Exception):
|
||||
self.apiClient.extractVolume(cmd)
|
||||
|
||||
def test_04_delete_attached_volume(self):
|
||||
"""Delete a Volume attached to a VM
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. delete volume will fail with proper error message
|
||||
# "Failed - Invalid state of the volume with ID:
|
||||
# It should be either detached or the VM should be in stopped state
|
||||
|
||||
self.debug("Trying to delete attached Volume ID: %s" %
|
||||
self.volume.id)
|
||||
|
||||
cmd = deleteVolume.deleteVolumeCmd()
|
||||
cmd.id = self.volume.id
|
||||
#Proper exception should be raised; deleting attach VM is not allowed
|
||||
#with self.assertRaises(Exception):
|
||||
result = self.apiClient.deleteVolume(cmd)
|
||||
self.assertEqual(
|
||||
result,
|
||||
None,
|
||||
"Check for delete download error while volume is attached"
|
||||
)
|
||||
|
||||
def test_05_detach_volume(self):
|
||||
"""Detach a Volume attached to a VM
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# Data disk should be detached from instance and detached data disk
|
||||
# details should be updated properly
|
||||
|
||||
self.debug(
|
||||
"Detaching volume (ID: %s) from VM (ID: %s)" % (
|
||||
self.volume.id,
|
||||
self.virtual_machine.id
|
||||
))
|
||||
|
||||
self.virtual_machine.detach_volume(self.apiClient, self.volume)
|
||||
#Sleep to ensure the current state will reflected in other calls
|
||||
time.sleep(self.services["sleep"])
|
||||
list_volume_response = list_volumes(
|
||||
self.apiClient,
|
||||
id=self.volume.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_volume_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
list_volume_response,
|
||||
None,
|
||||
"Check if volume exists in ListVolumes"
|
||||
)
|
||||
volume = list_volume_response[0]
|
||||
self.assertEqual(
|
||||
volume.virtualmachineid,
|
||||
None,
|
||||
"Check if volume state (detached) is reflected"
|
||||
)
|
||||
return
|
||||
|
||||
def test_06_download_detached_volume(self):
|
||||
"""Download a Volume unattached to an VM
|
||||
"""
|
||||
# Validate the following
|
||||
# 1. able to download the volume when its not attached to instance
|
||||
|
||||
self.debug("Extract detached Volume ID: %s" % self.volume.id)
|
||||
|
||||
cmd = extractVolume.extractVolumeCmd()
|
||||
cmd.id = self.volume.id
|
||||
cmd.mode = "HTTP_DOWNLOAD"
|
||||
cmd.zoneid = self.services["zoneid"]
|
||||
extract_vol = self.apiClient.extractVolume(cmd)
|
||||
|
||||
#Attempt to download the volume and save contents locally
|
||||
try:
|
||||
formatted_url = urllib.unquote_plus(extract_vol.url)
|
||||
response = urllib.urlopen(formatted_url)
|
||||
fd, path = tempfile.mkstemp()
|
||||
os.close(fd)
|
||||
fd = open(path, 'wb')
|
||||
fd.write(response.read())
|
||||
fd.close()
|
||||
|
||||
except Exception:
|
||||
self.fail(
|
||||
"Extract Volume Failed with invalid URL %s (vol id: %s)" \
|
||||
% (extract_vol.url, self.volume.id)
|
||||
)
|
||||
|
||||
def test_07_delete_detached_volume(self):
|
||||
"""Delete a Volume unattached to an VM
|
||||
"""
|
||||
# Validate the following
|
||||
# 1. volume should be deleted successfully and listVolume should not
|
||||
# contain the deleted volume details.
|
||||
# 2. "Delete Volume" menu item not shown under "Actions" menu.
|
||||
# (UI should not allow to delete the volume when it is attached
|
||||
# to instance by hiding the menu Item)
|
||||
|
||||
self.debug("Delete Volume ID: %s" % self.volume.id)
|
||||
|
||||
cmd = deleteVolume.deleteVolumeCmd()
|
||||
cmd.id = self.volume.id
|
||||
self.apiClient.deleteVolume(cmd)
|
||||
|
||||
list_volume_response = list_volumes(
|
||||
self.apiClient,
|
||||
id=self.volume.id,
|
||||
type='DATADISK'
|
||||
)
|
||||
self.assertEqual(
|
||||
list_volume_response,
|
||||
None,
|
||||
"Check if volume exists in ListVolumes"
|
||||
)
|
||||
return
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
P1 Cases
|
||||
--------------------------------------
|
||||
These test cases are the core functionality tests that ensure the application is stable and can be tested thoroughly.
|
||||
These P1 cases definitions are located at : https://docs.google.com/a/clogeny.com/spreadsheet/ccc?key=0Aq5M2ldK6eyedDJBa0EzM0RPNmdVNVZOWnFnOVJJcHc&hl=en_US
|
||||
|
||||
|
||||
Guidelines
|
||||
----------
|
||||
P1 test cases are being developed using Python's unittests2. Following are certain guidelines being followed
|
||||
1. Tests exercised for the same resource should ideally be present under a single suite or file.
|
||||
|
||||
2. Time-consuming operations that create new cloud resources like server creation, volume creation etc
|
||||
should not necessarily be exercised per unit test. The resources can be shared by creating them at
|
||||
the class-level using setUpClass and shared across all instances during a single run.
|
||||
|
||||
3. Certain tests pertaining to NAT, Firewall and Load Balancing warrant fresh resources per test. Hence a call should be
|
||||
taken by the stakeholders regarding sharing resources.
|
||||
|
||||
4. Ensure that the tearDown/tearDownClass functions clean up all the resources created during the test run.
|
||||
|
||||
For more information about unittests: http://docs.python.org/library/unittest.html
|
||||
|
||||
|
||||
P1 Tests
|
||||
----------
|
||||
The following files contain these P1 cases:
|
||||
|
||||
1. test_snapshots.py - Snapshots related tests
|
||||
2. test_routers.py - Router related tests
|
||||
3. test_usage.py - Usage realted tests
|
||||
4. test_account.py - Account related tests
|
||||
5. test_resource_limits.py - Resource limits tests
|
||||
6. test_security_groups.py - Security groups related tests
|
||||
7. test_templates.py - templates related tests
|
||||
8. test_volumes.py - Volumes related tests
|
||||
9. test_blocker_bugs.py - Blocker bugs tests
|
||||
10. test_project_configs.py - Project global configuration related tests
|
||||
11. test_project_limits.py - Project resource limits related tests
|
||||
12. test_project_resources.py - Project resource creation related tests
|
||||
13. test_project_usage.py - Project usage related tests
|
||||
14. test_projects - Projects functionality tests
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -1,968 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
||||
""" P1 tests for high availability
|
||||
"""
|
||||
#Import Local Modules
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
from testcase.libs.utils import *
|
||||
from testcase.libs.base import *
|
||||
from testcase.libs.common import *
|
||||
import remoteSSHClient
|
||||
import datetime
|
||||
|
||||
|
||||
class Services:
|
||||
"""Test network offering Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"account": {
|
||||
"email": "test@test.com",
|
||||
"firstname": "HA",
|
||||
"lastname": "HA",
|
||||
"username": "HA",
|
||||
# Random characters are appended for unique
|
||||
# username
|
||||
"password": "password",
|
||||
},
|
||||
"service_offering": {
|
||||
"name": "Tiny Instance",
|
||||
"displaytext": "Tiny Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
},
|
||||
"lbrule": {
|
||||
"name": "SSH",
|
||||
"alg": "roundrobin",
|
||||
# Algorithm used for load balancing
|
||||
"privateport": 22,
|
||||
"publicport": 2222,
|
||||
},
|
||||
"natrule": {
|
||||
"privateport": 22,
|
||||
"publicport": 22,
|
||||
"protocol": "TCP"
|
||||
},
|
||||
"fw_rule":{
|
||||
"startport": 1,
|
||||
"endport": 6000,
|
||||
"cidr": '55.55.0.0/11',
|
||||
# Any network (For creating FW rule)
|
||||
},
|
||||
"virtual_machine": {
|
||||
"displayname": "VM",
|
||||
"username": "root",
|
||||
"password": "password",
|
||||
"ssh_port": 22,
|
||||
"hypervisor": 'XenServer',
|
||||
# Hypervisor type should be same as
|
||||
# hypervisor type of cluster
|
||||
"privateport": 22,
|
||||
"publicport": 22,
|
||||
"protocol": 'TCP',
|
||||
},
|
||||
"ostypeid": '9958b10f-9e5d-4ef1-908d-a047372d823b',
|
||||
# Cent OS 5.3 (64 bit)
|
||||
"sleep": 60,
|
||||
"timeout": 100,
|
||||
"mode":'advanced'
|
||||
}
|
||||
|
||||
|
||||
class TestHighAvailability(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
|
||||
cls.api_client = super(
|
||||
TestHighAvailability,
|
||||
cls
|
||||
).getClsTestClient().getApiClient()
|
||||
cls.services = Services().services
|
||||
# Get Zone, Domain and templates
|
||||
cls.domain = get_domain(
|
||||
cls.api_client,
|
||||
cls.services
|
||||
)
|
||||
cls.zone = get_zone(
|
||||
cls.api_client,
|
||||
cls.services
|
||||
)
|
||||
cls.pod = get_pod(
|
||||
cls.api_client,
|
||||
zoneid=cls.zone.id,
|
||||
services=cls.services
|
||||
)
|
||||
cls.template = get_template(
|
||||
cls.api_client,
|
||||
cls.zone.id,
|
||||
cls.services["ostypeid"]
|
||||
)
|
||||
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
|
||||
cls.services["virtual_machine"]["template"] = cls.template.id
|
||||
|
||||
cls.service_offering = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["service_offering"],
|
||||
offerha=True
|
||||
)
|
||||
cls._cleanup = [
|
||||
cls.service_offering,
|
||||
]
|
||||
return
|
||||
|
||||
# @classmethod
|
||||
# def tearDownClass(cls):
|
||||
# try:
|
||||
# #Cleanup resources used
|
||||
# cleanup_resources(cls.api_client, cls._cleanup)
|
||||
# except Exception as e:
|
||||
# raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
# return
|
||||
|
||||
def setUp(self):
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.account = Account.create(
|
||||
self.apiclient,
|
||||
self.services["account"],
|
||||
admin=True,
|
||||
domainid=self.domain.id
|
||||
)
|
||||
self.cleanup = [self.account]
|
||||
return
|
||||
|
||||
# def tearDown(self):
|
||||
# try:
|
||||
# #Clean up, terminate the created accounts, domains etc
|
||||
# cleanup_resources(self.apiclient, self.cleanup)
|
||||
# self.testClient.close()
|
||||
# except Exception as e:
|
||||
# raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
# return
|
||||
|
||||
def test_01_host_maintenance_mode(self):
|
||||
"""Test host maintenance mode
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. Create Vms. Acquire IP. Create port forwarding & load balancing
|
||||
# rules for Vms.
|
||||
# 2. Host 1: put to maintenance mode. All Vms should failover to Host
|
||||
# 2 in cluster. Vms should be in running state. All port forwarding
|
||||
# rules and load balancing Rules should work.
|
||||
# 3. After failover to Host 2 succeeds, deploy Vms. Deploy Vms on host
|
||||
# 2 should succeed.
|
||||
# 4. Host 1: cancel maintenance mode.
|
||||
# 5. Host 2 : put to maintenance mode. All Vms should failover to
|
||||
# Host 1 in cluster.
|
||||
# 6. After failover to Host 1 succeeds, deploy VMs. Deploy Vms on
|
||||
# host 1 should succeed.
|
||||
|
||||
hosts = Host.list(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
resourcestate='Enabled',
|
||||
type='Routing'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"List hosts should return valid host response"
|
||||
)
|
||||
self.assertEqual(
|
||||
len(hosts),
|
||||
2,
|
||||
"There must be two hosts present in a cluster"
|
||||
)
|
||||
self.debug("Checking HA with hosts: %s, %s" % (
|
||||
hosts[0].name,
|
||||
hosts[1].name
|
||||
))
|
||||
self.debug("Deploying VM in account: %s" % self.account.account.name)
|
||||
# Spawn an instance in that network
|
||||
virtual_machine = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["virtual_machine"],
|
||||
accountid=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
serviceofferingid=self.service_offering.id
|
||||
)
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=virtual_machine.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(vms),
|
||||
0,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
vm = vms[0]
|
||||
self.debug("Deployed VM on host: %s" % vm.hostid)
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"Deployed VM should be in RUnning state"
|
||||
)
|
||||
networks = Network.list(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(networks, list),
|
||||
True,
|
||||
"List networks should return valid list for the account"
|
||||
)
|
||||
network = networks[0]
|
||||
|
||||
self.debug("Associating public IP for account: %s" %
|
||||
self.account.account.name)
|
||||
public_ip = PublicIPAddress.create(
|
||||
self.apiclient,
|
||||
accountid=self.account.account.name,
|
||||
zoneid=self.zone.id,
|
||||
domainid=self.account.account.domainid,
|
||||
networkid=network.id
|
||||
)
|
||||
|
||||
self.debug("Associated %s with network %s" % (
|
||||
public_ip.ipaddress.ipaddress,
|
||||
network.id
|
||||
))
|
||||
self.debug("Creating PF rule for IP address: %s" %
|
||||
public_ip.ipaddress.ipaddress)
|
||||
nat_rule= NATRule.create(
|
||||
self.apiclient,
|
||||
virtual_machine,
|
||||
self.services["natrule"],
|
||||
ipaddressid=public_ip.ipaddress.id
|
||||
)
|
||||
|
||||
self.debug("Creating LB rule on IP with NAT: %s" %
|
||||
public_ip.ipaddress.ipaddress)
|
||||
|
||||
# Create Load Balancer rule on IP already having NAT rule
|
||||
lb_rule = LoadBalancerRule.create(
|
||||
self.apiclient,
|
||||
self.services["lbrule"],
|
||||
ipaddressid=public_ip.ipaddress.id,
|
||||
accountid=self.account.account.name
|
||||
)
|
||||
self.debug("Created LB rule with ID: %s" % lb_rule.id)
|
||||
|
||||
# Should be able to SSH VM
|
||||
try:
|
||||
self.debug("SSH into VM: %s" % virtual_machine.id)
|
||||
ssh = virtual_machine.get_ssh_client(
|
||||
ipaddress=public_ip.ipaddress.ipaddress)
|
||||
except Exception as e:
|
||||
self.fail("SSH Access failed for %s: %s" % \
|
||||
(virtual_machine.ipaddress, e)
|
||||
)
|
||||
|
||||
first_host = vm.hostid
|
||||
self.debug("Enabling maintenance mode for host %s" % vm.hostid)
|
||||
cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd()
|
||||
cmd.id = first_host
|
||||
self.apiclient.prepareHostForMaintenance(cmd)
|
||||
|
||||
self.debug("Waiting for SSVMs to come up")
|
||||
wait_for_ssvms(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id,
|
||||
)
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
# Poll and check state of VM while it migrates from one host to another
|
||||
while True:
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=virtual_machine.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(vms),
|
||||
0,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
vm = vms[0]
|
||||
|
||||
self.debug("VM 1 state: %s" % vm.state)
|
||||
if vm.state in ["Stopping", "Stopped", "Running", "Starting"]:
|
||||
if vm.state == "Running":
|
||||
break
|
||||
else:
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
else:
|
||||
self.fail(
|
||||
"VM migration from one-host-to-other failed while enabling maintenance"
|
||||
)
|
||||
second_host = vm.hostid
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"VM should be in Running state after enabling host maintenance"
|
||||
)
|
||||
# Should be able to SSH VM
|
||||
try:
|
||||
self.debug("SSH into VM: %s" % virtual_machine.id)
|
||||
ssh = virtual_machine.get_ssh_client(
|
||||
ipaddress=public_ip.ipaddress.ipaddress)
|
||||
except Exception as e:
|
||||
self.fail("SSH Access failed for %s: %s" % \
|
||||
(virtual_machine.ipaddress, e)
|
||||
)
|
||||
self.debug("Deploying VM in account: %s" % self.account.account.name)
|
||||
# Spawn an instance on other host
|
||||
virtual_machine_2 = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["virtual_machine"],
|
||||
accountid=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
serviceofferingid=self.service_offering.id
|
||||
)
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=virtual_machine_2.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(vms),
|
||||
0,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
vm = vms[0]
|
||||
self.debug("Deployed VM on host: %s" % vm.hostid)
|
||||
self.debug("VM 2 state: %s" % vm.state)
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"Deployed VM should be in Running state"
|
||||
)
|
||||
|
||||
self.debug("Canceling host maintenance for ID: %s" % first_host)
|
||||
cmd = cancelHostMaintenance.cancelHostMaintenanceCmd()
|
||||
cmd.id = first_host
|
||||
self.apiclient.cancelHostMaintenance(cmd)
|
||||
self.debug("Maintenance mode canceled for host: %s" % first_host)
|
||||
|
||||
self.debug("Enabling maintenance mode for host %s" % second_host)
|
||||
cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd()
|
||||
cmd.id = second_host
|
||||
self.apiclient.prepareHostForMaintenance(cmd)
|
||||
self.debug("Maintenance mode enabled for host: %s" % second_host)
|
||||
|
||||
self.debug("Waiting for SSVMs to come up")
|
||||
wait_for_ssvms(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id,
|
||||
)
|
||||
|
||||
# Poll and check the status of VMs
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(vms),
|
||||
0,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
vm = vms[0]
|
||||
self.debug(
|
||||
"VM state after enabling maintenance on first host: %s" %
|
||||
vm.state)
|
||||
if vm.state in ["Stopping", "Stopped", "Running", "Starting"]:
|
||||
if vm.state == "Running":
|
||||
break
|
||||
else:
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
else:
|
||||
self.fail(
|
||||
"VM migration from one-host-to-other failed while enabling maintenance"
|
||||
)
|
||||
|
||||
for vm in vms:
|
||||
self.debug(
|
||||
"VM states after enabling maintenance mode on host: %s - %s" %
|
||||
(first_host, vm.state))
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"Deployed VM should be in Running state"
|
||||
)
|
||||
# Spawn an instance on other host
|
||||
virtual_machine_3 = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["virtual_machine"],
|
||||
accountid=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
serviceofferingid=self.service_offering.id
|
||||
)
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=virtual_machine_3.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(vms),
|
||||
0,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
vm = vms[0]
|
||||
|
||||
self.debug("Deployed VM on host: %s" % vm.hostid)
|
||||
self.debug("VM 3 state: %s" % vm.state)
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"Deployed VM should be in Running state"
|
||||
)
|
||||
|
||||
# Should be able to SSH VM
|
||||
try:
|
||||
self.debug("SSH into VM: %s" % virtual_machine.id)
|
||||
ssh = virtual_machine.get_ssh_client(
|
||||
ipaddress=public_ip.ipaddress.ipaddress)
|
||||
except Exception as e:
|
||||
self.fail("SSH Access failed for %s: %s" % \
|
||||
(virtual_machine.ipaddress, e)
|
||||
)
|
||||
|
||||
self.debug("Canceling host maintenance for ID: %s" % second_host)
|
||||
cmd = cancelHostMaintenance.cancelHostMaintenanceCmd()
|
||||
cmd.id = second_host
|
||||
self.apiclient.cancelHostMaintenance(cmd)
|
||||
self.debug("Maintenance mode canceled for host: %s" % second_host)
|
||||
self.debug("Waiting for SSVMs to come up")
|
||||
wait_for_ssvms(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id,
|
||||
)
|
||||
return
|
||||
|
||||
def test_02_host_maintenance_mode_with_activities(self):
|
||||
"""Test host maintenance mode with activities
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. Create Vms. Acquire IP. Create port forwarding & load balancing
|
||||
# rules for Vms.
|
||||
# 2. While activities are ongoing: Create snapshots, recurring
|
||||
# snapshots, create templates, download volumes, Host 1: put to
|
||||
# maintenance mode. All Vms should failover to Host 2 in cluster
|
||||
# Vms should be in running state. All port forwarding rules and
|
||||
# load balancing Rules should work.
|
||||
# 3. After failover to Host 2 succeeds, deploy Vms. Deploy Vms on host
|
||||
# 2 should succeed. All ongoing activities in step 3 should succeed
|
||||
# 4. Host 1: cancel maintenance mode.
|
||||
# 5. While activities are ongoing: Create snapshots, recurring
|
||||
# snapshots, create templates, download volumes, Host 2: put to
|
||||
# maintenance mode. All Vms should failover to Host 1 in cluster.
|
||||
# 6. After failover to Host 1 succeeds, deploy VMs. Deploy Vms on
|
||||
# host 1 should succeed. All ongoing activities in step 6 should
|
||||
# succeed.
|
||||
|
||||
hosts = Host.list(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
resourcestate='Enabled',
|
||||
type='Routing'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"List hosts should return valid host response"
|
||||
)
|
||||
self.assertEqual(
|
||||
len(hosts),
|
||||
2,
|
||||
"There must be two hosts present in a cluster"
|
||||
)
|
||||
self.debug("Checking HA with hosts: %s, %s" % (
|
||||
hosts[0].name,
|
||||
hosts[1].name
|
||||
))
|
||||
self.debug("Deploying VM in account: %s" % self.account.account.name)
|
||||
# Spawn an instance in that network
|
||||
virtual_machine = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["virtual_machine"],
|
||||
accountid=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
serviceofferingid=self.service_offering.id
|
||||
)
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=virtual_machine.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(vms),
|
||||
0,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
vm = vms[0]
|
||||
self.debug("Deployed VM on host: %s" % vm.hostid)
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"Deployed VM should be in RUnning state"
|
||||
)
|
||||
networks = Network.list(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(networks, list),
|
||||
True,
|
||||
"List networks should return valid list for the account"
|
||||
)
|
||||
network = networks[0]
|
||||
|
||||
self.debug("Associating public IP for account: %s" %
|
||||
self.account.account.name)
|
||||
public_ip = PublicIPAddress.create(
|
||||
self.apiclient,
|
||||
accountid=self.account.account.name,
|
||||
zoneid=self.zone.id,
|
||||
domainid=self.account.account.domainid,
|
||||
networkid=network.id
|
||||
)
|
||||
|
||||
self.debug("Associated %s with network %s" % (
|
||||
public_ip.ipaddress.ipaddress,
|
||||
network.id
|
||||
))
|
||||
self.debug("Creating PF rule for IP address: %s" %
|
||||
public_ip.ipaddress.ipaddress)
|
||||
nat_rule= NATRule.create(
|
||||
self.apiclient,
|
||||
virtual_machine,
|
||||
self.services["natrule"],
|
||||
ipaddressid=public_ip.ipaddress.id
|
||||
)
|
||||
|
||||
self.debug("Creating LB rule on IP with NAT: %s" %
|
||||
public_ip.ipaddress.ipaddress)
|
||||
|
||||
# Create Load Balancer rule on IP already having NAT rule
|
||||
lb_rule = LoadBalancerRule.create(
|
||||
self.apiclient,
|
||||
self.services["lbrule"],
|
||||
ipaddressid=public_ip.ipaddress.id,
|
||||
accountid=self.account.account.name
|
||||
)
|
||||
self.debug("Created LB rule with ID: %s" % lb_rule.id)
|
||||
|
||||
# Should be able to SSH VM
|
||||
try:
|
||||
self.debug("SSH into VM: %s" % virtual_machine.id)
|
||||
ssh = virtual_machine.get_ssh_client(
|
||||
ipaddress=public_ip.ipaddress.ipaddress)
|
||||
except Exception as e:
|
||||
self.fail("SSH Access failed for %s: %s" % \
|
||||
(virtual_machine.ipaddress, e)
|
||||
)
|
||||
# Get the Root disk of VM
|
||||
volumes = list_volumes(
|
||||
self.apiclient,
|
||||
virtualmachineid=virtual_machine.id,
|
||||
type='ROOT',
|
||||
listall=True
|
||||
)
|
||||
volume = volumes[0]
|
||||
self.debug(
|
||||
"Root volume of VM(%s): %s" % (
|
||||
virtual_machine.name,
|
||||
volume.name
|
||||
))
|
||||
# Create a snapshot from the ROOTDISK
|
||||
self.debug("Creating snapshot on ROOT volume: %s" % volume.name)
|
||||
snapshot = Snapshot.create(self.apiclient, volumes[0].id)
|
||||
self.debug("Snapshot created: ID - %s" % snapshot.id)
|
||||
|
||||
snapshots = list_snapshots(
|
||||
self.apiclient,
|
||||
id=snapshot.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(snapshots, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
snapshots,
|
||||
None,
|
||||
"Check if result exists in list snapshots call"
|
||||
)
|
||||
self.assertEqual(
|
||||
snapshots[0].id,
|
||||
snapshot.id,
|
||||
"Check snapshot id in list resources call"
|
||||
)
|
||||
# Generate template from the snapshot
|
||||
self.debug("Generating template from snapshot: %s" % snapshot.name)
|
||||
template = Template.create_from_snapshot(
|
||||
self.apiclient,
|
||||
snapshot,
|
||||
self.services["templates"]
|
||||
)
|
||||
self.cleanup.append(template)
|
||||
self.debug("Created template from snapshot: %s" % template.id)
|
||||
|
||||
templates = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter=\
|
||||
self.services["templates"]["templatefilter"],
|
||||
id=template.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(templates, list),
|
||||
True,
|
||||
"List template call should return the newly created template"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
templates[0].isready,
|
||||
True,
|
||||
"The newly created template should be in ready state"
|
||||
)
|
||||
|
||||
first_host = vm.hostid
|
||||
self.debug("Enabling maintenance mode for host %s" % vm.hostid)
|
||||
cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd()
|
||||
cmd.id = first_host
|
||||
self.apiclient.prepareHostForMaintenance(cmd)
|
||||
|
||||
self.debug("Waiting for SSVMs to come up")
|
||||
wait_for_ssvms(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id,
|
||||
)
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
# Poll and check state of VM while it migrates from one host to another
|
||||
while True:
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=virtual_machine.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(vms),
|
||||
0,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
vm = vms[0]
|
||||
|
||||
self.debug("VM 1 state: %s" % vm.state)
|
||||
if vm.state in ["Stopping", "Stopped", "Running", "Starting"]:
|
||||
if vm.state == "Running":
|
||||
break
|
||||
else:
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
else:
|
||||
self.fail(
|
||||
"VM migration from one-host-to-other failed while enabling maintenance"
|
||||
)
|
||||
second_host = vm.hostid
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"VM should be in Running state after enabling host maintenance"
|
||||
)
|
||||
# Should be able to SSH VM
|
||||
try:
|
||||
self.debug("SSH into VM: %s" % virtual_machine.id)
|
||||
ssh = virtual_machine.get_ssh_client(
|
||||
ipaddress=public_ip.ipaddress.ipaddress)
|
||||
except Exception as e:
|
||||
self.fail("SSH Access failed for %s: %s" % \
|
||||
(virtual_machine.ipaddress, e)
|
||||
)
|
||||
self.debug("Deploying VM in account: %s" % self.account.account.name)
|
||||
# Spawn an instance on other host
|
||||
virtual_machine_2 = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["virtual_machine"],
|
||||
accountid=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
serviceofferingid=self.service_offering.id
|
||||
)
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=virtual_machine_2.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(vms),
|
||||
0,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
vm = vms[0]
|
||||
self.debug("Deployed VM on host: %s" % vm.hostid)
|
||||
self.debug("VM 2 state: %s" % vm.state)
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"Deployed VM should be in Running state"
|
||||
)
|
||||
|
||||
self.debug("Canceling host maintenance for ID: %s" % first_host)
|
||||
cmd = cancelHostMaintenance.cancelHostMaintenanceCmd()
|
||||
cmd.id = first_host
|
||||
self.apiclient.cancelHostMaintenance(cmd)
|
||||
self.debug("Maintenance mode canceled for host: %s" % first_host)
|
||||
|
||||
# Get the Root disk of VM
|
||||
volumes = list_volumes(
|
||||
self.apiclient,
|
||||
virtualmachineid=virtual_machine_2.id,
|
||||
type='ROOT',
|
||||
listall=True
|
||||
)
|
||||
volume = volumes[0]
|
||||
self.debug(
|
||||
"Root volume of VM(%s): %s" % (
|
||||
virtual_machine_2.name,
|
||||
volume.name
|
||||
))
|
||||
# Create a snapshot from the ROOTDISK
|
||||
self.debug("Creating snapshot on ROOT volume: %s" % volume.name)
|
||||
snapshot = Snapshot.create(self.apiclient, volumes[0].id)
|
||||
self.debug("Snapshot created: ID - %s" % snapshot.id)
|
||||
|
||||
snapshots = list_snapshots(
|
||||
self.apiclient,
|
||||
id=snapshot.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(snapshots, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
snapshots,
|
||||
None,
|
||||
"Check if result exists in list snapshots call"
|
||||
)
|
||||
self.assertEqual(
|
||||
snapshots[0].id,
|
||||
snapshot.id,
|
||||
"Check snapshot id in list resources call"
|
||||
)
|
||||
# Generate template from the snapshot
|
||||
self.debug("Generating template from snapshot: %s" % snapshot.name)
|
||||
template = Template.create_from_snapshot(
|
||||
self.apiclient,
|
||||
snapshot,
|
||||
self.services["templates"]
|
||||
)
|
||||
self.cleanup.append(template)
|
||||
self.debug("Created template from snapshot: %s" % template.id)
|
||||
|
||||
templates = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter=\
|
||||
self.services["templates"]["templatefilter"],
|
||||
id=template.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(templates, list),
|
||||
True,
|
||||
"List template call should return the newly created template"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
templates[0].isready,
|
||||
True,
|
||||
"The newly created template should be in ready state"
|
||||
)
|
||||
|
||||
self.debug("Enabling maintenance mode for host %s" % second_host)
|
||||
cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd()
|
||||
cmd.id = second_host
|
||||
self.apiclient.prepareHostForMaintenance(cmd)
|
||||
self.debug("Maintenance mode enabled for host: %s" % second_host)
|
||||
|
||||
self.debug("Waiting for SSVMs to come up")
|
||||
wait_for_ssvms(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id,
|
||||
)
|
||||
|
||||
# Poll and check the status of VMs
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(vms),
|
||||
0,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
vm = vms[0]
|
||||
self.debug(
|
||||
"VM state after enabling maintenance on first host: %s" %
|
||||
vm.state)
|
||||
if vm.state in ["Stopping", "Stopped", "Running", "Starting"]:
|
||||
if vm.state == "Running":
|
||||
break
|
||||
else:
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
else:
|
||||
self.fail(
|
||||
"VM migration from one-host-to-other failed while enabling maintenance"
|
||||
)
|
||||
|
||||
for vm in vms:
|
||||
self.debug(
|
||||
"VM states after enabling maintenance mode on host: %s - %s" %
|
||||
(first_host, vm.state))
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"Deployed VM should be in Running state"
|
||||
)
|
||||
# Spawn an instance on other host
|
||||
virtual_machine_3 = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["virtual_machine"],
|
||||
accountid=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
serviceofferingid=self.service_offering.id
|
||||
)
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=virtual_machine_3.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(vms),
|
||||
0,
|
||||
"List VMs should return valid response for deployed VM"
|
||||
)
|
||||
vm = vms[0]
|
||||
|
||||
self.debug("Deployed VM on host: %s" % vm.hostid)
|
||||
self.debug("VM 3 state: %s" % vm.state)
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"Deployed VM should be in Running state"
|
||||
)
|
||||
|
||||
# Should be able to SSH VM
|
||||
try:
|
||||
self.debug("SSH into VM: %s" % virtual_machine.id)
|
||||
ssh = virtual_machine.get_ssh_client(
|
||||
ipaddress=public_ip.ipaddress.ipaddress)
|
||||
except Exception as e:
|
||||
self.fail("SSH Access failed for %s: %s" % \
|
||||
(virtual_machine.ipaddress, e)
|
||||
)
|
||||
|
||||
self.debug("Canceling host maintenance for ID: %s" % second_host)
|
||||
cmd = cancelHostMaintenance.cancelHostMaintenanceCmd()
|
||||
cmd.id = second_host
|
||||
self.apiclient.cancelHostMaintenance(cmd)
|
||||
self.debug("Maintenance mode canceled for host: %s" % second_host)
|
||||
self.debug("Waiting for SSVMs to come up")
|
||||
wait_for_ssvms(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id,
|
||||
)
|
||||
return
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -1,880 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
""" P1 tests for Resource limits
|
||||
"""
|
||||
#Import Local Modules
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
from testcase.libs.utils import *
|
||||
from testcase.libs.base import *
|
||||
from testcase.libs.common import *
|
||||
import datetime
|
||||
|
||||
class Services:
|
||||
"""Test Resource Limits Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"domain": {
|
||||
"name": "Domain",
|
||||
},
|
||||
"project": {
|
||||
"name": "Project",
|
||||
"displaytext": "Test project",
|
||||
},
|
||||
"account": {
|
||||
"email": "administrator@clogeny.com",
|
||||
"firstname": "Test",
|
||||
"lastname": "User",
|
||||
"username": "test",
|
||||
# Random characters are appended for unique
|
||||
# username
|
||||
"password": "fr3sca",
|
||||
},
|
||||
"user": {
|
||||
"email": "administrator@clogeny.com",
|
||||
"firstname": "User",
|
||||
"lastname": "User",
|
||||
"username": "User",
|
||||
# Random characters are appended for unique
|
||||
# username
|
||||
"password": "fr3sca",
|
||||
},
|
||||
"service_offering": {
|
||||
"name": "Tiny Instance",
|
||||
"displaytext": "Tiny Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
},
|
||||
"disk_offering": {
|
||||
"displaytext": "Tiny Disk Offering",
|
||||
"name": "Tiny Disk Offering",
|
||||
"disksize": 1
|
||||
},
|
||||
"volume": {
|
||||
"diskname": "Test Volume",
|
||||
},
|
||||
"server": {
|
||||
"displayname": "TestVM",
|
||||
"username": "root",
|
||||
"password": "password",
|
||||
"ssh_port": 22,
|
||||
"hypervisor": 'XenServer',
|
||||
"privateport": 22,
|
||||
"publicport": 22,
|
||||
"protocol": 'TCP',
|
||||
},
|
||||
"template": {
|
||||
"displaytext": "Cent OS Template",
|
||||
"name": "Cent OS Template",
|
||||
"ostypeid": '471a4b5b-5523-448f-9608-7d6218995733',
|
||||
"templatefilter": 'self',
|
||||
},
|
||||
"ostypeid": '471a4b5b-5523-448f-9608-7d6218995733',
|
||||
# Cent OS 5.3 (64 bit)
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
"mode": 'advanced',
|
||||
}
|
||||
|
||||
|
||||
class TestProjectLimits(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.api_client = super(
|
||||
TestProjectLimits,
|
||||
cls
|
||||
).getClsTestClient().getApiClient()
|
||||
cls.services = Services().services
|
||||
# Get Zone
|
||||
cls.zone = get_zone(cls.api_client, cls.services)
|
||||
|
||||
# Create domains, account etc.
|
||||
cls.domain = Domain.create(
|
||||
cls.api_client,
|
||||
cls.services["domain"]
|
||||
)
|
||||
|
||||
cls.admin = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["account"],
|
||||
admin=True,
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
cls.user = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["user"],
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
cls._cleanup = [
|
||||
cls.admin,
|
||||
cls.user,
|
||||
cls.domain
|
||||
]
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
#Cleanup resources used
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def setUp(self):
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
#Clean up, terminate the created accounts, domains etc
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def test_01_project_limits(self):
|
||||
""" Test project limits
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. Create a Project. Verify once projects are created, they inherit
|
||||
# a default set of resource limits as configured by the Cloud Stack
|
||||
# ROOT admin.
|
||||
# 2. Reduce Project resources limits. Verify limits can be reduced by
|
||||
# the Project Owner of each project and project limit applies to
|
||||
# number of virtual instances, disk volumes, snapshots, IP address.
|
||||
# Also, verify resource limits for the project are independent of
|
||||
# account resource limits
|
||||
# 3. Increase Projects Resources limits above domains limit. Verify
|
||||
# project can’t have more resources than domain level limit allows.
|
||||
# 4. Create Resource more than its set limit for a project. Verify
|
||||
# resource allocation should fail giving proper message
|
||||
|
||||
# Create project as a domain admin
|
||||
project = Project.create(
|
||||
self.apiclient,
|
||||
self.services["project"],
|
||||
account=self.admin.account.name,
|
||||
domainid=self.admin.account.domainid
|
||||
)
|
||||
# Cleanup created project at end of test
|
||||
self.cleanup.append(project)
|
||||
self.debug("Created project with domain admin with ID: %s" %
|
||||
project.id)
|
||||
|
||||
list_projects_reponse = Project.list(
|
||||
self.apiclient,
|
||||
id=project.id,
|
||||
listall=True
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_projects_reponse, list),
|
||||
True,
|
||||
"Check for a valid list projects response"
|
||||
)
|
||||
list_project = list_projects_reponse[0]
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_projects_reponse),
|
||||
0,
|
||||
"Check list project response returns a valid project"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
project.name,
|
||||
list_project.name,
|
||||
"Check project name from list response"
|
||||
)
|
||||
# Get the resource limits for ROOT domain
|
||||
resource_limits = list_resource_limits(self.apiclient)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(resource_limits, list),
|
||||
True,
|
||||
"List resource API should return a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(resource_limits),
|
||||
0,
|
||||
"List resource API response should not be empty"
|
||||
)
|
||||
|
||||
# Reduce resource limits for project
|
||||
# Resource: 0 - Instance. Number of instances a user can create.
|
||||
# Resource: 1 - IP. Number of public IP addresses a user can own.
|
||||
# Resource: 2 - Volume. Number of disk volumes a user can create.
|
||||
# Resource: 3 - Snapshot. Number of snapshots a user can create.
|
||||
# Resource: 4 - Template. Number of templates that a user can
|
||||
# register/create
|
||||
for resource in resource_limits:
|
||||
update_resource_limit(
|
||||
self.apiclient,
|
||||
resource.resourcetype,
|
||||
max=1,
|
||||
projectid=project.id
|
||||
)
|
||||
self.debug(
|
||||
"Updating resource (ID: %s) limit for project: %s" % (
|
||||
resource,
|
||||
project.id
|
||||
))
|
||||
resource_limits = list_resource_limits(
|
||||
self.apiclient,
|
||||
projectid=project.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(resource_limits, list),
|
||||
True,
|
||||
"List resource API should return a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(resource_limits),
|
||||
0,
|
||||
"List resource API response should not be empty"
|
||||
)
|
||||
for resource in resource_limits:
|
||||
self.assertEqual(
|
||||
resource.max,
|
||||
1,
|
||||
"Resource limit should be updated to 1"
|
||||
)
|
||||
|
||||
# Get the resource limits for domain
|
||||
resource_limits = list_resource_limits(
|
||||
self.apiclient,
|
||||
domainid=self.domain.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(resource_limits, list),
|
||||
True,
|
||||
"List resource API should return a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(resource_limits),
|
||||
0,
|
||||
"List resource API response should not be empty"
|
||||
)
|
||||
|
||||
for resource in resource_limits:
|
||||
# Update domain resource limits to 2
|
||||
update_resource_limit(
|
||||
self.apiclient,
|
||||
resource.resourcetype,
|
||||
domainid=self.domain.id,
|
||||
max=2
|
||||
)
|
||||
with self.assertRaises(Exception):
|
||||
self.debug(
|
||||
"Attempting to update project: %s resource limit to: %s" % (
|
||||
project.id,
|
||||
max_value
|
||||
))
|
||||
# Update project resource limits to 3
|
||||
update_resource_limit(
|
||||
self.apiclient,
|
||||
resource.resourcetype,
|
||||
max=3,
|
||||
projectid=project.id
|
||||
)
|
||||
return
|
||||
@unittest.skip("No provision for updating resource limits from account through API")
|
||||
def test_02_project_limits_normal_user(self):
|
||||
""" Test project limits
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. Create a Project
|
||||
# 2. Reduce the projects limits as a domain admin. Verify resource
|
||||
# count is updated
|
||||
# 3. Reduce the projects limits as a project user owner who is not a
|
||||
# domain admin. Resource count should fail
|
||||
|
||||
# Create project as a domain admin
|
||||
project = Project.create(
|
||||
self.apiclient,
|
||||
self.services["project"],
|
||||
account=self.admin.account.name,
|
||||
domainid=self.admin.account.domainid
|
||||
)
|
||||
# Cleanup created project at end of test
|
||||
self.cleanup.append(project)
|
||||
self.debug("Created project with domain admin with ID: %s" %
|
||||
project.id)
|
||||
|
||||
list_projects_reponse = Project.list(
|
||||
self.apiclient,
|
||||
id=project.id,
|
||||
listall=True
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_projects_reponse, list),
|
||||
True,
|
||||
"Check for a valid list projects response"
|
||||
)
|
||||
list_project = list_projects_reponse[0]
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_projects_reponse),
|
||||
0,
|
||||
"Check list project response returns a valid project"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
project.name,
|
||||
list_project.name,
|
||||
"Check project name from list response"
|
||||
)
|
||||
# Get the resource limits for ROOT domain
|
||||
resource_limits = list_resource_limits(self.apiclient)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(resource_limits, list),
|
||||
True,
|
||||
"List resource API should return a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(resource_limits),
|
||||
0,
|
||||
"List resource API response should not be empty"
|
||||
)
|
||||
|
||||
# Reduce resource limits for project
|
||||
# Resource: 0 - Instance. Number of instances a user can create.
|
||||
# Resource: 1 - IP. Number of public IP addresses a user can own.
|
||||
# Resource: 2 - Volume. Number of disk volumes a user can create.
|
||||
# Resource: 3 - Snapshot. Number of snapshots a user can create.
|
||||
# Resource: 4 - Template. Number of templates that a user can
|
||||
# register/create
|
||||
for resource in resource_limits:
|
||||
update_resource_limit(
|
||||
self.apiclient,
|
||||
resource.resourcetype,
|
||||
max=1,
|
||||
projectid=project.id
|
||||
)
|
||||
self.debug(
|
||||
"Updating resource (ID: %s) limit for project: %s" % (
|
||||
resource,
|
||||
project.id
|
||||
))
|
||||
resource_limits = list_resource_limits(
|
||||
self.apiclient,
|
||||
projectid=project.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(resource_limits, list),
|
||||
True,
|
||||
"List resource API should return a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(resource_limits),
|
||||
0,
|
||||
"List resource API response should not be empty"
|
||||
)
|
||||
for resource in resource_limits:
|
||||
self.assertEqual(
|
||||
resource.max,
|
||||
1,
|
||||
"Resource limit should be updated to 1"
|
||||
)
|
||||
|
||||
self.debug("Adding %s user to project: %s" % (
|
||||
self.user.account.name,
|
||||
project.name
|
||||
))
|
||||
|
||||
# Add user to the project
|
||||
project.addAccount(
|
||||
self.apiclient,
|
||||
self.user.account.name,
|
||||
)
|
||||
|
||||
# Get the resource limits for domain
|
||||
resource_limits = list_resource_limits(
|
||||
self.apiclient,
|
||||
domainid=self.domain.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(resource_limits, list),
|
||||
True,
|
||||
"List resource API should return a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(resource_limits),
|
||||
0,
|
||||
"List resource API response should not be empty"
|
||||
)
|
||||
|
||||
for resource in resource_limits:
|
||||
#with self.assertRaises(Exception):
|
||||
self.debug(
|
||||
"Attempting to update resource limit by user: %s" % (
|
||||
self.user.account.name
|
||||
))
|
||||
# Update project resource limits to 3
|
||||
update_resource_limit(
|
||||
self.apiclient,
|
||||
resource.resourcetype,
|
||||
account=self.user.account.name,
|
||||
domainid=self.user.account.domainid,
|
||||
max=3,
|
||||
projectid=project.id
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
class TestResourceLimitsProject(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.api_client = super(TestResourceLimitsProject, cls).getClsTestClient().getApiClient()
|
||||
cls.services = Services().services
|
||||
# Get Zone, Domain and templates
|
||||
cls.zone = get_zone(cls.api_client, cls.services)
|
||||
|
||||
cls.template = get_template(
|
||||
cls.api_client,
|
||||
cls.zone.id,
|
||||
cls.services["ostypeid"]
|
||||
)
|
||||
cls.services["server"]["zoneid"] = cls.zone.id
|
||||
|
||||
# Create Domains, Account etc
|
||||
cls.domain = Domain.create(
|
||||
cls.api_client,
|
||||
cls.services["domain"]
|
||||
)
|
||||
|
||||
cls.account = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["account"],
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
# Create project as a domain admin
|
||||
cls.project = Project.create(
|
||||
cls.api_client,
|
||||
cls.services["project"],
|
||||
account=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid
|
||||
)
|
||||
cls.services["account"] = cls.account.account.name
|
||||
|
||||
# Create Service offering and disk offerings etc
|
||||
cls.service_offering = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["service_offering"]
|
||||
)
|
||||
cls.disk_offering = DiskOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["disk_offering"]
|
||||
)
|
||||
cls._cleanup = [
|
||||
cls.project,
|
||||
cls.service_offering,
|
||||
cls.disk_offering,
|
||||
cls.account,
|
||||
cls.domain
|
||||
]
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
#Cleanup resources used
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def setUp(self):
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
#Clean up, terminate the created instance, volumes and snapshots
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def test_03_vm_per_project(self):
|
||||
"""Test VM limit per project
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. Set max VM per project to 2
|
||||
# 2. Create account and start 2 VMs. Verify VM state is Up and Running
|
||||
# 3. Try to create 3rd VM instance. The appropriate error or alert
|
||||
# should be raised
|
||||
|
||||
self.debug(
|
||||
"Updating instance resource limits for project: %s" %
|
||||
self.project.id)
|
||||
# Set usage_vm=1 for Account 1
|
||||
update_resource_limit(
|
||||
self.apiclient,
|
||||
0, # Instance
|
||||
max=2,
|
||||
projectid=self.project.id
|
||||
)
|
||||
|
||||
self.debug("Deploying VM for project: %s" % self.project.id)
|
||||
virtual_machine_1 = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["server"],
|
||||
templateid=self.template.id,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
projectid=self.project.id
|
||||
)
|
||||
self.cleanup.append(virtual_machine_1)
|
||||
# Verify VM state
|
||||
self.assertEqual(
|
||||
virtual_machine_1.state,
|
||||
'Running',
|
||||
"Check VM state is Running or not"
|
||||
)
|
||||
self.debug("Deploying VM for project: %s" % self.project.id)
|
||||
virtual_machine_2 = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["server"],
|
||||
templateid=self.template.id,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
projectid=self.project.id
|
||||
)
|
||||
self.cleanup.append(virtual_machine_2)
|
||||
# Verify VM state
|
||||
self.assertEqual(
|
||||
virtual_machine_2.state,
|
||||
'Running',
|
||||
"Check VM state is Running or not"
|
||||
)
|
||||
# Exception should be raised for second instance
|
||||
with self.assertRaises(Exception):
|
||||
VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["server"],
|
||||
templateid=self.template.id,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
projectid=self.project.id
|
||||
)
|
||||
return
|
||||
|
||||
def test_04_publicip_per_project(self):
|
||||
"""Test Public IP limit per project
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. set max no of IPs per project to 2.
|
||||
# 2. Create an account in this domain
|
||||
# 3. Create 1 VM in this domain
|
||||
# 4. Acquire 1 IP in the domain. IP should be successfully acquired
|
||||
# 5. Try to acquire 3rd IP in this domain. It should give the user an
|
||||
# appropriate error and an alert should be generated.
|
||||
|
||||
self.debug(
|
||||
"Updating public IP resource limits for project: %s" %
|
||||
self.project.id)
|
||||
# Set usage_vm=1 for Account 1
|
||||
update_resource_limit(
|
||||
self.apiclient,
|
||||
1, # Public Ip
|
||||
max=2,
|
||||
projectid=self.project.id
|
||||
)
|
||||
|
||||
self.debug("Deploying VM for Project: %s" % self.project.id)
|
||||
virtual_machine_1 = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["server"],
|
||||
templateid=self.template.id,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
projectid=self.project.id
|
||||
)
|
||||
self.cleanup.append(virtual_machine_1)
|
||||
# Verify VM state
|
||||
self.assertEqual(
|
||||
virtual_machine_1.state,
|
||||
'Running',
|
||||
"Check VM state is Running or not"
|
||||
)
|
||||
networks = Network.list(
|
||||
self.apiclient,
|
||||
projectid=self.project.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(networks, list),
|
||||
True,
|
||||
"Check list networks response returns a valid response"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(networks),
|
||||
0,
|
||||
"Check list networks response returns a valid network"
|
||||
)
|
||||
network = networks[0]
|
||||
self.debug("Associating public IP for project: %s" %
|
||||
self.project.id)
|
||||
public_ip_1 = PublicIPAddress.create(
|
||||
self.apiclient,
|
||||
zoneid=virtual_machine_1.zoneid,
|
||||
services=self.services["server"],
|
||||
networkid=network.id,
|
||||
projectid=self.project.id
|
||||
)
|
||||
self.cleanup.append(public_ip_1)
|
||||
# Verify Public IP state
|
||||
self.assertEqual(
|
||||
public_ip_1.ipaddress.state in [
|
||||
'Allocated',
|
||||
'Allocating'
|
||||
],
|
||||
True,
|
||||
"Check Public IP state is allocated or not"
|
||||
)
|
||||
|
||||
# Exception should be raised for second Public IP
|
||||
with self.assertRaises(Exception):
|
||||
public_ip_2 = PublicIPAddress.create(
|
||||
self.apiclient,
|
||||
zoneid=virtual_machine_1.zoneid,
|
||||
services=self.services["server"],
|
||||
networkid=network.id,
|
||||
projectid=self.project.id
|
||||
)
|
||||
return
|
||||
|
||||
def test_05_snapshots_per_project(self):
|
||||
"""Test Snapshot limit per project
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. set max no of snapshots per project to 1.
|
||||
# 2. Create one snapshot in the project. Snapshot should be
|
||||
# successfully created
|
||||
# 5. Try to create another snapshot in this project. It should give
|
||||
# user an appropriate error and an alert should be generated.
|
||||
|
||||
self.debug(
|
||||
"Updating snapshot resource limits for project: %s" %
|
||||
self.project.id)
|
||||
# Set usage_vm=1 for Account 1
|
||||
update_resource_limit(
|
||||
self.apiclient,
|
||||
3, # Snapshot
|
||||
max=1,
|
||||
projectid=self.project.id
|
||||
)
|
||||
|
||||
self.debug("Deploying VM for account: %s" % self.account.account.name)
|
||||
virtual_machine_1 = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["server"],
|
||||
templateid=self.template.id,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
projectid=self.project.id
|
||||
)
|
||||
self.cleanup.append(virtual_machine_1)
|
||||
# Verify VM state
|
||||
self.assertEqual(
|
||||
virtual_machine_1.state,
|
||||
'Running',
|
||||
"Check VM state is Running or not"
|
||||
)
|
||||
|
||||
# Get the Root disk of VM
|
||||
volumes = list_volumes(
|
||||
self.apiclient,
|
||||
projectid=self.project.id,
|
||||
type='ROOT',
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(volumes, list),
|
||||
True,
|
||||
"Check for list volume response return valid data"
|
||||
)
|
||||
volume = volumes[0]
|
||||
|
||||
self.debug("Creating snapshot from volume: %s" % volumes[0].id)
|
||||
# Create a snapshot from the ROOTDISK
|
||||
snapshot_1 = Snapshot.create(self.apiclient,
|
||||
volumes[0].id,
|
||||
projectid=self.project.id
|
||||
)
|
||||
self.cleanup.append(snapshot_1)
|
||||
# Verify Snapshot state
|
||||
self.assertEqual(
|
||||
snapshot_1.state in [
|
||||
'BackedUp',
|
||||
'CreatedOnPrimary'
|
||||
],
|
||||
True,
|
||||
"Check Snapshot state is Running or not"
|
||||
)
|
||||
|
||||
# Exception should be raised for second snapshot
|
||||
with self.assertRaises(Exception):
|
||||
Snapshot.create(self.apiclient,
|
||||
volumes[0].id,
|
||||
projectid=self.project.id
|
||||
)
|
||||
return
|
||||
|
||||
def test_06_volumes_per_project(self):
|
||||
"""Test Volumes limit per project
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. set max no of volume per project to 1.
|
||||
# 2. Create 1 VM in this project
|
||||
# 4. Try to Create another VM in the project. It should give the user
|
||||
# an appropriate error that Volume limit is exhausted and an alert
|
||||
# should be generated.
|
||||
|
||||
self.debug(
|
||||
"Updating volume resource limits for project: %s" %
|
||||
self.project.id)
|
||||
# Set usage_vm=1 for Account 1
|
||||
update_resource_limit(
|
||||
self.apiclient,
|
||||
2, # Volume
|
||||
max=2,
|
||||
projectid=self.project.id
|
||||
)
|
||||
|
||||
self.debug("Deploying VM for project: %s" % self.project.id)
|
||||
virtual_machine_1 = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["server"],
|
||||
templateid=self.template.id,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
projectid=self.project.id
|
||||
)
|
||||
self.cleanup.append(virtual_machine_1)
|
||||
# Verify VM state
|
||||
self.assertEqual(
|
||||
virtual_machine_1.state,
|
||||
'Running',
|
||||
"Check VM state is Running or not"
|
||||
)
|
||||
|
||||
# Exception should be raised for second volume
|
||||
with self.assertRaises(Exception):
|
||||
Volume.create(
|
||||
self.apiclient,
|
||||
self.services["volume"],
|
||||
zoneid=self.zone.id,
|
||||
diskofferingid=self.disk_offering.id,
|
||||
projectid=self.project.id
|
||||
)
|
||||
return
|
||||
|
||||
def test_07_templates_per_project(self):
|
||||
"""Test Templates limit per project
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 1. set max no of templates per project to 1.
|
||||
# 2. Create a template in this project. Both template should be in
|
||||
# ready state
|
||||
# 3. Try create 2nd template in the project. It should give the user
|
||||
# appropriate error and an alert should be generated.
|
||||
|
||||
# Reset the volume limits
|
||||
update_resource_limit(
|
||||
self.apiclient,
|
||||
2, # Volume
|
||||
max=5,
|
||||
projectid=self.project.id
|
||||
)
|
||||
self.debug(
|
||||
"Updating template resource limits for domain: %s" %
|
||||
self.account.account.domainid)
|
||||
# Set usage_vm=1 for Account 1
|
||||
update_resource_limit(
|
||||
self.apiclient,
|
||||
4, # Template
|
||||
max=1,
|
||||
projectid=self.project.id
|
||||
)
|
||||
|
||||
self.debug("Deploying VM for account: %s" % self.account.account.name)
|
||||
virtual_machine_1 = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["server"],
|
||||
templateid=self.template.id,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
projectid=self.project.id
|
||||
)
|
||||
self.cleanup.append(virtual_machine_1)
|
||||
# Verify VM state
|
||||
self.assertEqual(
|
||||
virtual_machine_1.state,
|
||||
'Running',
|
||||
"Check VM state is Running or not"
|
||||
)
|
||||
virtual_machine_1.stop(self.apiclient)
|
||||
# Get the Root disk of VM
|
||||
volumes = list_volumes(
|
||||
self.apiclient,
|
||||
projectid=self.project.id,
|
||||
type='ROOT',
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(volumes, list),
|
||||
True,
|
||||
"Check for list volume response return valid data"
|
||||
)
|
||||
volume = volumes[0]
|
||||
|
||||
self.debug("Creating template from volume: %s" % volume.id)
|
||||
# Create a template from the ROOTDISK
|
||||
template_1 = Template.create(
|
||||
self.apiclient,
|
||||
self.services["template"],
|
||||
volumeid=volume.id,
|
||||
projectid=self.project.id
|
||||
)
|
||||
|
||||
self.cleanup.append(template_1)
|
||||
# Verify Template state
|
||||
self.assertEqual(
|
||||
template_1.isready,
|
||||
True,
|
||||
"Check Template is in ready state or not"
|
||||
)
|
||||
|
||||
# Exception should be raised for second template
|
||||
with self.assertRaises(Exception):
|
||||
Template.create(
|
||||
self.apiclient,
|
||||
self.services["template"],
|
||||
volumeid=volume.id,
|
||||
projectid=self.project.id
|
||||
)
|
||||
return
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -1,611 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
""" P1 tests for Templates
|
||||
"""
|
||||
#Import Local Modules
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
from testcase.libs.utils import *
|
||||
from testcase.libs.base import *
|
||||
from testcase.libs.common import *
|
||||
import urllib
|
||||
from random import random
|
||||
#Import System modules
|
||||
import time
|
||||
|
||||
|
||||
class Services:
|
||||
"""Test Templates Services
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"account": {
|
||||
"email": "test@test.com",
|
||||
"firstname": "Test",
|
||||
"lastname": "User",
|
||||
"username": "test",
|
||||
# Random characters are appended for unique
|
||||
# username
|
||||
"password": "fr3sca",
|
||||
},
|
||||
"service_offering": {
|
||||
"name": "Tiny Instance",
|
||||
"displaytext": "Tiny Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
},
|
||||
"disk_offering": {
|
||||
"displaytext": "Small",
|
||||
"name": "Small",
|
||||
"disksize": 1
|
||||
},
|
||||
"virtual_machine": {
|
||||
"displayname": "testVM",
|
||||
"hypervisor": 'XenServer',
|
||||
"protocol": 'TCP',
|
||||
"ssh_port": 22,
|
||||
"username": "root",
|
||||
"password": "password",
|
||||
"privateport": 22,
|
||||
"publicport": 22,
|
||||
},
|
||||
"volume": {
|
||||
"diskname": "Test Volume",
|
||||
},
|
||||
"templates": {
|
||||
# Configs for different Template formats
|
||||
# For Eg. raw image, zip etc
|
||||
0:{
|
||||
"displaytext": "Public Template",
|
||||
"name": "Public template",
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2",
|
||||
"hypervisor": 'XenServer',
|
||||
"format" : 'VHD',
|
||||
"isfeatured": True,
|
||||
"ispublic": True,
|
||||
"isextractable": True,
|
||||
},
|
||||
},
|
||||
"template": {
|
||||
"displaytext": "Cent OS Template",
|
||||
"name": "Cent OS Template",
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"templatefilter": 'self',
|
||||
},
|
||||
"templatefilter": 'self',
|
||||
"destzoneid": 2, # For Copy template (Destination zone)
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
"mode": 'advanced', # Networking mode: Advanced, basic
|
||||
}
|
||||
|
||||
|
||||
@unittest.skip("Open questions")
|
||||
class TestCreateTemplate(cloudstackTestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
self.dbclient.close()
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.services = Services().services
|
||||
cls.api_client = super(TestCreateTemplate, cls).getClsTestClient().getApiClient()
|
||||
|
||||
# Get Zone, Domain and templates
|
||||
cls.domain = get_domain(cls.api_client, cls.services)
|
||||
cls.zone = get_zone(cls.api_client, cls.services)
|
||||
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
|
||||
|
||||
cls.service_offering = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["service_offering"]
|
||||
)
|
||||
cls.account = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["account"],
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
cls.services["account"] = cls.account.account.name
|
||||
|
||||
cls._cleanup = [
|
||||
cls.account,
|
||||
cls.service_offering
|
||||
]
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
cls.api_client = super(TestCreateTemplate, cls).getClsTestClient().getApiClient()
|
||||
#Cleanup resources used
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
|
||||
return
|
||||
|
||||
def test_01_create_template(self):
|
||||
"""Test create public & private template
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. Upload a templates in raw img format. Create a Vm instances from
|
||||
# raw img template.
|
||||
# 2. Upload a templates in zip file format. Create a Vm instances from
|
||||
# zip template.
|
||||
# 3. Upload a templates in tar format.Create a Vm instances from tar
|
||||
# template.
|
||||
# 4. Upload a templates in tar gzip format.Create a Vm instances from
|
||||
# tar gzip template.
|
||||
# 5. Upload a templates in tar bzip format. Create a Vm instances from
|
||||
# tar bzip template.
|
||||
# 6. Verify VMs & Templates is up and in ready state
|
||||
|
||||
for k, v in self.services["templates"].items():
|
||||
|
||||
# Register new template
|
||||
template = Template.register(
|
||||
self.apiclient,
|
||||
v,
|
||||
zoneid=self.zone.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.debug(
|
||||
"Registered a template of format: %s with ID: %s" % (
|
||||
v["format"],
|
||||
template.id
|
||||
))
|
||||
# Wait for template to download
|
||||
template.download(self.apiclient)
|
||||
self.cleanup.append(template)
|
||||
|
||||
# Wait for template status to be changed across
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
list_template_response = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter=\
|
||||
self.services["templatefilter"],
|
||||
id=template.id,
|
||||
zoneid=self.zone.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
if isinstance(list_template_response, list):
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List template failed!")
|
||||
|
||||
time.sleep(5)
|
||||
timeout = timeout - 1
|
||||
#Verify template response to check whether template added successfully
|
||||
self.assertEqual(
|
||||
isinstance(list_template_response, list),
|
||||
True,
|
||||
"Check for list template response return valid data"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_template_response),
|
||||
0,
|
||||
"Check template available in List Templates"
|
||||
)
|
||||
|
||||
template_response = list_template_response[0]
|
||||
self.assertEqual(
|
||||
template_response.isready,
|
||||
True,
|
||||
"Check display text of newly created template"
|
||||
)
|
||||
|
||||
# Deploy new virtual machine using template
|
||||
virtual_machine = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["virtual_machine"],
|
||||
templateid=template.id,
|
||||
accountid=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
mode=self.services["mode"]
|
||||
)
|
||||
self.debug("creating an instance with template ID: %s" % template.id)
|
||||
vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=virtual_machine.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vm_response, list),
|
||||
True,
|
||||
"Check for list VMs response after VM deployment"
|
||||
)
|
||||
#Verify VM response to check whether VM deployment was successful
|
||||
self.assertNotEqual(
|
||||
len(vm_response),
|
||||
0,
|
||||
"Check VMs available in List VMs response"
|
||||
)
|
||||
vm = vm_response[0]
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
'Running',
|
||||
"Check the state of VM created from Template"
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
class TestTemplates(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
|
||||
cls.services = Services().services
|
||||
cls.api_client = super(TestTemplates, cls).getClsTestClient().getApiClient()
|
||||
|
||||
# Get Zone, templates etc
|
||||
cls.domain = get_domain(cls.api_client, cls.services)
|
||||
cls.zone = get_zone(cls.api_client, cls.services)
|
||||
|
||||
template = get_template(
|
||||
cls.api_client,
|
||||
cls.zone.id,
|
||||
cls.services["ostypeid"]
|
||||
)
|
||||
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
|
||||
cls.account = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["account"],
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
|
||||
cls.services["account"] = cls.account.account.name
|
||||
cls.service_offering = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["service_offering"]
|
||||
)
|
||||
|
||||
# create virtual machine
|
||||
cls.virtual_machine = VirtualMachine.create(
|
||||
cls.api_client,
|
||||
cls.services["virtual_machine"],
|
||||
templateid=template.id,
|
||||
accountid=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid,
|
||||
serviceofferingid=cls.service_offering.id,
|
||||
)
|
||||
#Stop virtual machine
|
||||
cls.virtual_machine.stop(cls.api_client)
|
||||
|
||||
timeout = cls.services["timeout"]
|
||||
#Wait before server has be successfully stopped
|
||||
time.sleep(cls.services["sleep"])
|
||||
|
||||
while True:
|
||||
list_volume = list_volumes(
|
||||
cls.api_client,
|
||||
virtualmachineid=cls.virtual_machine.id,
|
||||
type='ROOT',
|
||||
listall=True
|
||||
)
|
||||
if isinstance(list_volume, list):
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List volumes failed.")
|
||||
|
||||
time.sleep(5)
|
||||
timeout = timeout -1
|
||||
|
||||
cls.volume = list_volume[0]
|
||||
|
||||
#Create template from volume
|
||||
cls.template = Template.create(
|
||||
cls.api_client,
|
||||
cls.services["template"],
|
||||
cls.volume.id
|
||||
)
|
||||
cls._cleanup = [
|
||||
cls.service_offering,
|
||||
cls.account,
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
cls.api_client = super(TestTemplates, cls).getClsTestClient().getApiClient()
|
||||
#Cleanup created resources such as templates and VMs
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
|
||||
return
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self.cleanup = []
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
|
||||
return
|
||||
|
||||
def test_01_create_template_volume(self):
|
||||
"""Test Create template from volume
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. Deploy new VM using the template created from Volume
|
||||
# 2. VM should be in Up and Running state
|
||||
|
||||
virtual_machine = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["virtual_machine"],
|
||||
templateid=self.template.id,
|
||||
accountid=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
)
|
||||
|
||||
self.debug("creating an instance with template ID: %s" % self.template.id)
|
||||
self.cleanup.append(virtual_machine)
|
||||
vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=virtual_machine.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
#Verify VM response to check whether VM deployment was successful
|
||||
self.assertNotEqual(
|
||||
len(vm_response),
|
||||
0,
|
||||
"Check VMs available in List VMs response"
|
||||
)
|
||||
vm = vm_response[0]
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
'Running',
|
||||
"Check the state of VM created from Template"
|
||||
)
|
||||
return
|
||||
|
||||
def test_02_copy_template(self):
|
||||
"""Test for copy template from one zone to another"""
|
||||
|
||||
# Validate the following
|
||||
# 1. copy template should be successful and
|
||||
# secondary storage should contain new copied template.
|
||||
|
||||
self.debug(
|
||||
"Copying template from zone: %s to %s" % (
|
||||
self.template.id,
|
||||
self.services["destzoneid"]
|
||||
))
|
||||
cmd = copyTemplate.copyTemplateCmd()
|
||||
cmd.id = self.template.id
|
||||
cmd.destzoneid = self.services["destzoneid"]
|
||||
cmd.sourcezoneid = self.zone.id
|
||||
self.apiclient.copyTemplate(cmd)
|
||||
|
||||
# Verify template is copied to another zone using ListTemplates
|
||||
list_template_response = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter=\
|
||||
self.services["templatefilter"],
|
||||
id=self.template.id,
|
||||
zoneid=self.services["destzoneid"]
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_template_response, list),
|
||||
True,
|
||||
"Check for list template response return valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_template_response),
|
||||
0,
|
||||
"Check template extracted in List Templates"
|
||||
)
|
||||
|
||||
template_response = list_template_response[0]
|
||||
self.assertEqual(
|
||||
template_response.id,
|
||||
self.template.id,
|
||||
"Check ID of the downloaded template"
|
||||
)
|
||||
self.assertEqual(
|
||||
template_response.zoneid,
|
||||
self.services["destzoneid"],
|
||||
"Check zone ID of the copied template"
|
||||
)
|
||||
|
||||
# Cleanup- Delete the copied template
|
||||
cmd = deleteTemplate.deleteTemplateCmd()
|
||||
cmd.id = self.template.id
|
||||
cmd.zoneid = self.services["destzoneid"]
|
||||
self.apiclient.deleteTemplate(cmd)
|
||||
return
|
||||
|
||||
def test_03_delete_template(self):
|
||||
"""Test Delete template
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. Create a template and verify it is shown in list templates response
|
||||
# 2. Delete the created template and again verify list template response
|
||||
|
||||
# Verify template response for updated attributes
|
||||
list_template_response = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter=\
|
||||
self.services["template"]["templatefilter"],
|
||||
id=self.template.id,
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_template_response, list),
|
||||
True,
|
||||
"Check for list template response return valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_template_response),
|
||||
0,
|
||||
"Check template available in List Templates"
|
||||
)
|
||||
template_response = list_template_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
template_response.id,
|
||||
self.template.id,
|
||||
"Check display text of updated template"
|
||||
)
|
||||
|
||||
self.debug("Deleting template: %s" % self.template)
|
||||
# Delete the template
|
||||
self.template.delete(self.apiclient)
|
||||
self.debug("Delete template: %s successful" % self.template)
|
||||
|
||||
list_template_response = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter=\
|
||||
self.services["template"]["templatefilter"],
|
||||
id=self.template.id,
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.assertEqual(
|
||||
list_template_response,
|
||||
None,
|
||||
"Check template available in List Templates"
|
||||
)
|
||||
return
|
||||
|
||||
def test_04_template_from_snapshot(self):
|
||||
"""Create Template from snapshot
|
||||
"""
|
||||
|
||||
# Validate the following
|
||||
# 2. Snapshot the Root disk
|
||||
# 3. Create Template from snapshot
|
||||
# 4. Deploy Virtual machine using this template
|
||||
# 5. VM should be in running state
|
||||
|
||||
volumes = list_volumes(
|
||||
self.apiclient,
|
||||
virtualmachineid=self.virtual_machine.id,
|
||||
type='ROOT',
|
||||
listall=True
|
||||
)
|
||||
volume = volumes[0]
|
||||
|
||||
self.debug("Creating a snapshot from volume: %s" % volume.id)
|
||||
#Create a snapshot of volume
|
||||
snapshot = Snapshot.create(
|
||||
self.apiclient,
|
||||
volume.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.debug("Creating a template from snapshot: %s" % snapshot.id)
|
||||
# Generate template from the snapshot
|
||||
template = Template.create_from_snapshot(
|
||||
self.apiclient,
|
||||
snapshot,
|
||||
self.services["template"]
|
||||
)
|
||||
self.cleanup.append(template)
|
||||
# Verify created template
|
||||
templates = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter=\
|
||||
self.services["template"]["templatefilter"],
|
||||
id=template.id
|
||||
)
|
||||
self.assertNotEqual(
|
||||
templates,
|
||||
None,
|
||||
"Check if result exists in list item call"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
templates[0].id,
|
||||
template.id,
|
||||
"Check new template id in list resources call"
|
||||
)
|
||||
self.debug("Deploying a VM from template: %s" % template.id)
|
||||
# Deploy new virtual machine using template
|
||||
virtual_machine = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["virtual_machine"],
|
||||
templateid=template.id,
|
||||
accountid=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
)
|
||||
self.cleanup.append(virtual_machine)
|
||||
|
||||
vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=virtual_machine.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vm_response, list),
|
||||
True,
|
||||
"Check for list VM response return valid list"
|
||||
)
|
||||
|
||||
#Verify VM response to check whether VM deployment was successful
|
||||
self.assertNotEqual(
|
||||
len(vm_response),
|
||||
0,
|
||||
"Check VMs available in List VMs response"
|
||||
)
|
||||
vm = vm_response[0]
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
'Running',
|
||||
"Check the state of VM created from Template"
|
||||
)
|
||||
return
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -1,12 +0,0 @@
|
|||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,471 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
"""Common functions
|
||||
"""
|
||||
|
||||
#Import Local Modules
|
||||
from cloudstackTestCase import *
|
||||
from cloudstackAPI import *
|
||||
import remoteSSHClient
|
||||
from utils import *
|
||||
from base import *
|
||||
|
||||
#Import System modules
|
||||
import time
|
||||
|
||||
def get_domain(apiclient, services=None):
|
||||
"Returns a default domain"
|
||||
|
||||
cmd = listDomains.listDomainsCmd()
|
||||
if services:
|
||||
if "domainid" in services:
|
||||
cmd.id = services["domainid"]
|
||||
|
||||
domains = apiclient.listDomains(cmd)
|
||||
|
||||
if isinstance(domains, list):
|
||||
return domains[0]
|
||||
else:
|
||||
raise Exception("Failed to find specified domain.")
|
||||
|
||||
def get_zone(apiclient, services=None):
|
||||
"Returns a default zone"
|
||||
|
||||
cmd = listZones.listZonesCmd()
|
||||
if services:
|
||||
if "zoneid" in services:
|
||||
cmd.id = services["zoneid"]
|
||||
|
||||
zones = apiclient.listZones(cmd)
|
||||
|
||||
if isinstance(zones, list):
|
||||
return zones[0]
|
||||
else:
|
||||
raise Exception("Failed to find specified zone.")
|
||||
|
||||
def get_pod(apiclient, zoneid, services=None):
|
||||
"Returns a default pod for specified zone"
|
||||
|
||||
cmd = listPods.listPodsCmd()
|
||||
cmd.zoneid = zoneid
|
||||
|
||||
if services:
|
||||
if "podid" in services:
|
||||
cmd.id = services["podid"]
|
||||
|
||||
pods = apiclient.listPods(cmd)
|
||||
|
||||
if isinstance(pods, list):
|
||||
return pods[0]
|
||||
else:
|
||||
raise Exception("Exception: Failed to find specified pod.")
|
||||
|
||||
def get_template(apiclient, zoneid, ostypeid=12, services=None):
|
||||
"Returns a template"
|
||||
|
||||
cmd = listTemplates.listTemplatesCmd()
|
||||
cmd.templatefilter = 'featured'
|
||||
cmd.zoneid = zoneid
|
||||
|
||||
if services:
|
||||
if "template" in services:
|
||||
cmd.id = services["template"]
|
||||
|
||||
list_templates = apiclient.listTemplates(cmd)
|
||||
|
||||
for template in list_templates:
|
||||
if template.ostypeid == ostypeid:
|
||||
return template
|
||||
|
||||
raise Exception("Exception: Failed to find template with OSTypeID: %s" %
|
||||
ostypeid)
|
||||
return
|
||||
|
||||
def download_systemplates_sec_storage(server, services):
|
||||
"""Download System templates on sec storage"""
|
||||
|
||||
try:
|
||||
# Login to management server
|
||||
ssh = remoteSSHClient.remoteSSHClient(
|
||||
server["ipaddress"],
|
||||
server["port"],
|
||||
server["username"],
|
||||
server["password"]
|
||||
)
|
||||
except Exception as e:
|
||||
raise Exception("SSH access failted for server with IP address: %s" %
|
||||
server["ipaddess"])
|
||||
# Mount Secondary Storage on Management Server
|
||||
cmds = [
|
||||
"mkdir -p %s" % services["mnt_dir"],
|
||||
"mount -t nfs %s:/%s %s" % (
|
||||
services["sec_storage"],
|
||||
services["path"],
|
||||
services["mnt_dir"]
|
||||
),
|
||||
"%s -m %s -u %s -h %s -F" % (
|
||||
services["command"],
|
||||
services["mnt_dir"],
|
||||
services["download_url"],
|
||||
services["hypervisor"]
|
||||
)
|
||||
]
|
||||
for c in cmds:
|
||||
result = ssh.execute(c)
|
||||
|
||||
res = str(result)
|
||||
|
||||
# Unmount the Secondary storage
|
||||
ssh.execute("umount %s" % (services["mnt_dir"]))
|
||||
|
||||
if res.count("Successfully installed system VM template") == 1:
|
||||
return
|
||||
else:
|
||||
raise Exception("Failed to download System Templates on Sec Storage")
|
||||
return
|
||||
|
||||
def wait_for_ssvms(apiclient, zoneid, podid, interval=60):
|
||||
"""After setup wait for SSVMs to come Up"""
|
||||
|
||||
time.sleep(interval)
|
||||
timeout = 40
|
||||
while True:
|
||||
list_ssvm_response = list_ssvms(
|
||||
apiclient,
|
||||
systemvmtype='secondarystoragevm',
|
||||
zoneid=zoneid,
|
||||
podid=podid
|
||||
)
|
||||
ssvm = list_ssvm_response[0]
|
||||
if ssvm.state != 'Running':
|
||||
# Sleep to ensure SSVMs are Up and Running
|
||||
time.sleep(interval)
|
||||
timeout = timeout - 1
|
||||
elif ssvm.state == 'Running':
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("SSVM failed to come up")
|
||||
break
|
||||
|
||||
timeout = 40
|
||||
while True:
|
||||
list_ssvm_response = list_ssvms(
|
||||
apiclient,
|
||||
systemvmtype='consoleproxy',
|
||||
zoneid=zoneid,
|
||||
podid=podid
|
||||
)
|
||||
cpvm = list_ssvm_response[0]
|
||||
if cpvm.state != 'Running':
|
||||
# Sleep to ensure SSVMs are Up and Running
|
||||
time.sleep(interval)
|
||||
timeout = timeout - 1
|
||||
elif cpvm.state == 'Running':
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("CPVM failed to come up")
|
||||
break
|
||||
return
|
||||
|
||||
def download_builtin_templates(apiclient, zoneid, hypervisor, host, linklocalip, interval=60):
|
||||
"""After setup wait till builtin templates are downloaded"""
|
||||
|
||||
# Change IPTABLES Rules
|
||||
result = get_process_status(
|
||||
host["ipaddress"],
|
||||
host["port"],
|
||||
host["username"],
|
||||
host["password"],
|
||||
linklocalip,
|
||||
"iptables -P INPUT ACCEPT"
|
||||
)
|
||||
time.sleep(interval)
|
||||
# Find the BUILTIN Templates for given Zone, Hypervisor
|
||||
list_template_response = list_templates(
|
||||
apiclient,
|
||||
hypervisor=hypervisor,
|
||||
zoneid=zoneid,
|
||||
templatefilter='self'
|
||||
)
|
||||
|
||||
if not isinstance(list_template_response, list):
|
||||
raise Exception("Failed to download BUILTIN templates")
|
||||
|
||||
# Ensure all BUILTIN templates are downloaded
|
||||
templateid = None
|
||||
for template in list_template_response:
|
||||
if template.templatetype == "BUILTIN":
|
||||
templateid = template.id
|
||||
|
||||
# Sleep to ensure that template is in downloading state after adding
|
||||
# Sec storage
|
||||
time.sleep(interval)
|
||||
while True:
|
||||
template_response = list_templates(
|
||||
apiclient,
|
||||
id=templateid,
|
||||
zoneid=zoneid,
|
||||
templatefilter='self'
|
||||
)
|
||||
template = template_response[0]
|
||||
# If template is ready,
|
||||
# template.status = Download Complete
|
||||
# Downloading - x% Downloaded
|
||||
# Error - Any other string
|
||||
if template.status == 'Download Complete':
|
||||
break
|
||||
|
||||
elif 'Downloaded' in template.status:
|
||||
time.sleep(interval)
|
||||
|
||||
elif 'Installing' not in template.status:
|
||||
raise Exception("ErrorInDownload")
|
||||
|
||||
return
|
||||
|
||||
def update_resource_limit(apiclient, resourcetype, account=None, domainid=None,
|
||||
max=None, projectid=None):
|
||||
"""Updates the resource limit to 'max' for given account"""
|
||||
|
||||
cmd = updateResourceLimit.updateResourceLimitCmd()
|
||||
cmd.resourcetype = resourcetype
|
||||
if account:
|
||||
cmd.account = account
|
||||
if domainid:
|
||||
cmd.domainid = domainid
|
||||
if max:
|
||||
cmd.max = max
|
||||
if projectid:
|
||||
cmd.projectid = projectid
|
||||
apiclient.updateResourceLimit(cmd)
|
||||
return
|
||||
|
||||
def list_routers(apiclient, **kwargs):
|
||||
"""List all Routers matching criteria"""
|
||||
|
||||
cmd = listRouters.listRoutersCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listRouters(cmd))
|
||||
|
||||
def list_zones(apiclient, **kwargs):
|
||||
"""List all Zones matching criteria"""
|
||||
|
||||
cmd = listZones.listZonesCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listZones(cmd))
|
||||
|
||||
def list_networks(apiclient, **kwargs):
|
||||
"""List all Networks matching criteria"""
|
||||
|
||||
cmd = listNetworks.listNetworksCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listNetworks(cmd))
|
||||
|
||||
def list_clusters(apiclient, **kwargs):
|
||||
"""List all Clusters matching criteria"""
|
||||
|
||||
cmd = listClusters.listClustersCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listClusters(cmd))
|
||||
|
||||
def list_ssvms(apiclient, **kwargs):
|
||||
"""List all SSVMs matching criteria"""
|
||||
|
||||
cmd = listSystemVms.listSystemVmsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listSystemVms(cmd))
|
||||
|
||||
def list_storage_pools(apiclient, **kwargs):
|
||||
"""List all storage pools matching criteria"""
|
||||
|
||||
cmd = listStoragePools.listStoragePoolsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listStoragePools(cmd))
|
||||
|
||||
def list_virtual_machines(apiclient, **kwargs):
|
||||
"""List all VMs matching criteria"""
|
||||
|
||||
cmd = listVirtualMachines.listVirtualMachinesCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listVirtualMachines(cmd))
|
||||
|
||||
def list_hosts(apiclient, **kwargs):
|
||||
"""List all Hosts matching criteria"""
|
||||
|
||||
cmd = listHosts.listHostsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listHosts(cmd))
|
||||
|
||||
def list_configurations(apiclient, **kwargs):
|
||||
"""List configuration with specified name"""
|
||||
|
||||
cmd = listConfigurations.listConfigurationsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listConfigurations(cmd))
|
||||
|
||||
def list_publicIP(apiclient, **kwargs):
|
||||
"""List all Public IPs matching criteria"""
|
||||
|
||||
cmd = listPublicIpAddresses.listPublicIpAddressesCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listPublicIpAddresses(cmd))
|
||||
|
||||
def list_nat_rules(apiclient, **kwargs):
|
||||
"""List all NAT rules matching criteria"""
|
||||
|
||||
cmd = listPortForwardingRules.listPortForwardingRulesCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listPortForwardingRules(cmd))
|
||||
|
||||
def list_lb_rules(apiclient, **kwargs):
|
||||
"""List all Load balancing rules matching criteria"""
|
||||
|
||||
cmd = listLoadBalancerRules.listLoadBalancerRulesCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listLoadBalancerRules(cmd))
|
||||
|
||||
def list_lb_instances(apiclient, **kwargs):
|
||||
"""List all Load balancing instances matching criteria"""
|
||||
|
||||
cmd = listLoadBalancerRuleInstances.listLoadBalancerRuleInstancesCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listLoadBalancerRuleInstances(cmd))
|
||||
|
||||
def list_firewall_rules(apiclient, **kwargs):
|
||||
"""List all Firewall Rules matching criteria"""
|
||||
|
||||
cmd = listFirewallRules.listFirewallRulesCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listFirewallRules(cmd))
|
||||
|
||||
def list_volumes(apiclient, **kwargs):
|
||||
"""List all volumes matching criteria"""
|
||||
|
||||
cmd = listVolumes.listVolumesCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listVolumes(cmd))
|
||||
|
||||
def list_isos(apiclient, **kwargs):
|
||||
"""Lists all available ISO files."""
|
||||
|
||||
cmd = listIsos.listIsosCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listIsos(cmd))
|
||||
|
||||
def list_snapshots(apiclient, **kwargs):
|
||||
"""List all snapshots matching criteria"""
|
||||
|
||||
cmd = listSnapshots.listSnapshotsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listSnapshots(cmd))
|
||||
|
||||
def list_templates(apiclient, **kwargs):
|
||||
"""List all templates matching criteria"""
|
||||
|
||||
cmd = listTemplates.listTemplatesCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listTemplates(cmd))
|
||||
|
||||
def list_domains(apiclient, **kwargs):
|
||||
"""Lists domains"""
|
||||
|
||||
cmd = listDomains.listDomainsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listDomains(cmd))
|
||||
|
||||
def list_accounts(apiclient, **kwargs):
|
||||
"""Lists accounts and provides detailed account information for
|
||||
listed accounts"""
|
||||
|
||||
cmd = listAccounts.listAccountsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listAccounts(cmd))
|
||||
|
||||
def list_users(apiclient, **kwargs):
|
||||
"""Lists users and provides detailed account information for
|
||||
listed users"""
|
||||
|
||||
cmd = listUsers.listUsersCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listUsers(cmd))
|
||||
|
||||
def list_snapshot_policy(apiclient, **kwargs):
|
||||
"""Lists snapshot policies."""
|
||||
|
||||
cmd = listSnapshotPolicies.listSnapshotPoliciesCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listSnapshotPolicies(cmd))
|
||||
|
||||
def list_events(apiclient, **kwargs):
|
||||
"""Lists events"""
|
||||
|
||||
cmd = listEvents.listEventsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listEvents(cmd))
|
||||
|
||||
def list_disk_offering(apiclient, **kwargs):
|
||||
"""Lists all available disk offerings."""
|
||||
|
||||
cmd = listDiskOfferings.listDiskOfferingsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listDiskOfferings(cmd))
|
||||
|
||||
def list_service_offering(apiclient, **kwargs):
|
||||
"""Lists all available service offerings."""
|
||||
|
||||
cmd = listServiceOfferings.listServiceOfferingsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listServiceOfferings(cmd))
|
||||
|
||||
def list_vlan_ipranges(apiclient, **kwargs):
|
||||
"""Lists all VLAN IP ranges."""
|
||||
|
||||
cmd = listVlanIpRanges.listVlanIpRangesCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listVlanIpRanges(cmd))
|
||||
|
||||
def list_usage_records(apiclient, **kwargs):
|
||||
"""Lists usage records for accounts"""
|
||||
|
||||
cmd = listUsageRecords.listUsageRecordsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listUsageRecords(cmd))
|
||||
|
||||
def list_nw_service_prividers(apiclient, **kwargs):
|
||||
"""Lists Network service providers"""
|
||||
|
||||
cmd = listNetworkServiceProviders.listNetworkServiceProvidersCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listNetworkServiceProviders(cmd))
|
||||
|
||||
def list_virtual_router_elements(apiclient, **kwargs):
|
||||
"""Lists Virtual Router elements"""
|
||||
|
||||
cmd = listVirtualRouterElements.listVirtualRouterElementsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listVirtualRouterElements(cmd))
|
||||
|
||||
def list_network_offerings(apiclient, **kwargs):
|
||||
"""Lists network offerings"""
|
||||
|
||||
cmd = listNetworkOfferings.listNetworkOfferingsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listNetworkOfferings(cmd))
|
||||
|
||||
def list_resource_limits(apiclient, **kwargs):
|
||||
"""Lists resource limits"""
|
||||
|
||||
cmd = listResourceLimits.listResourceLimitsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listResourceLimits(cmd))
|
||||
|
|
@ -1,175 +0,0 @@
|
|||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
"""Utilities functions
|
||||
"""
|
||||
|
||||
import time
|
||||
import remoteSSHClient
|
||||
from cloudstackAPI import *
|
||||
import cloudstackConnection
|
||||
#from cloudstackConnection import cloudConnection
|
||||
import configGenerator
|
||||
import logging
|
||||
import string
|
||||
import random
|
||||
import imaplib
|
||||
import email
|
||||
import datetime
|
||||
|
||||
def restart_mgmt_server(server):
|
||||
"""Restarts the management server"""
|
||||
|
||||
try:
|
||||
# Get the SSH client
|
||||
ssh = is_server_ssh_ready(
|
||||
server["ipaddress"],
|
||||
server["port"],
|
||||
server["username"],
|
||||
server["password"],
|
||||
)
|
||||
result = ssh.execute("/etc/init.d/cloud-management restart")
|
||||
res = str(result)
|
||||
# Server Stop - OK
|
||||
# Server Start - OK
|
||||
if res.count("OK") != 2:
|
||||
raise ("ErrorInReboot!")
|
||||
except Exception as e:
|
||||
raise e
|
||||
return
|
||||
|
||||
def fetch_latest_mail(services, from_mail):
|
||||
"""Fetch mail"""
|
||||
|
||||
# Login to mail server to verify email
|
||||
mail = imaplib.IMAP4_SSL(services["server"])
|
||||
mail.login(
|
||||
services["email"],
|
||||
services["password"]
|
||||
)
|
||||
mail.list()
|
||||
mail.select(services["folder"])
|
||||
date = (datetime.date.today() - datetime.timedelta(1)).strftime("%d-%b-%Y")
|
||||
|
||||
result, data = mail.uid(
|
||||
'search',
|
||||
None,
|
||||
'(SENTSINCE {date} HEADER FROM "{mail}")'.format(
|
||||
date=date,
|
||||
mail=from_mail
|
||||
)
|
||||
)
|
||||
# Return False if email is not present
|
||||
if data == []:
|
||||
return False
|
||||
|
||||
latest_email_uid = data[0].split()[-1]
|
||||
result, data = mail.uid('fetch', latest_email_uid, '(RFC822)')
|
||||
raw_email = data[0][1]
|
||||
email_message = email.message_from_string(raw_email)
|
||||
result = get_first_text_block(email_message)
|
||||
return result
|
||||
|
||||
def get_first_text_block(email_message_instance):
|
||||
"""fetches first text block from the mail"""
|
||||
maintype = email_message_instance.get_content_maintype()
|
||||
if maintype == 'multipart':
|
||||
for part in email_message_instance.get_payload():
|
||||
if part.get_content_maintype() == 'text':
|
||||
return part.get_payload()
|
||||
elif maintype == 'text':
|
||||
return email_message_instance.get_payload()
|
||||
|
||||
def random_gen(size=6, chars=string.ascii_uppercase + string.digits):
|
||||
"""Generate Random Strings of variable length"""
|
||||
return ''.join(random.choice(chars) for x in range(size))
|
||||
|
||||
def cleanup_resources(api_client, resources):
|
||||
"""Delete resources"""
|
||||
for obj in resources:
|
||||
obj.delete(api_client)
|
||||
|
||||
def is_server_ssh_ready(ipaddress, port, username, password, retries=50):
|
||||
"""Return ssh handle else wait till sshd is running"""
|
||||
loop_cnt = retries
|
||||
while True:
|
||||
try:
|
||||
ssh = remoteSSHClient.remoteSSHClient(
|
||||
ipaddress,
|
||||
port,
|
||||
username,
|
||||
password
|
||||
)
|
||||
except Exception as e:
|
||||
if loop_cnt == 0:
|
||||
raise e
|
||||
loop_cnt = loop_cnt - 1
|
||||
time.sleep(30)
|
||||
else:
|
||||
return ssh
|
||||
|
||||
|
||||
def format_volume_to_ext3(ssh_client, device="/dev/sda"):
|
||||
"""Format attached storage to ext3 fs"""
|
||||
cmds = [
|
||||
"echo -e 'n\np\n1\n\n\nw' | fdisk %s" % device,
|
||||
"mkfs.ext3 %s1" % device,
|
||||
]
|
||||
for c in cmds:
|
||||
ssh_client.execute(c)
|
||||
|
||||
def fetch_api_client(config_file='datacenterCfg'):
|
||||
"""Fetch the Cloudstack API Client"""
|
||||
config = configGenerator.get_setup_config(config_file)
|
||||
mgt = config.mgtSvr[0]
|
||||
testClientLogger = logging.getLogger("testClient")
|
||||
asyncTimeout = 3600
|
||||
return cloudstackAPIClient.CloudStackAPIClient(
|
||||
cloudstackConnection.cloudConnection(
|
||||
mgt.mgtSvrIp,
|
||||
mgt.port,
|
||||
mgt.apiKey,
|
||||
mgt.securityKey,
|
||||
asyncTimeout,
|
||||
testClientLogger
|
||||
)
|
||||
)
|
||||
|
||||
def get_process_status(hostip, port, username, password, linklocalip, process):
|
||||
"""Double hop and returns a process status"""
|
||||
|
||||
#SSH to the machine
|
||||
ssh = remoteSSHClient.remoteSSHClient(
|
||||
hostip,
|
||||
port,
|
||||
username,
|
||||
password
|
||||
)
|
||||
ssh_command = "ssh -i ~/.ssh/id_rsa.cloud -ostricthostkeychecking=no "
|
||||
ssh_command = ssh_command + "-oUserKnownHostsFile=/dev/null -p 3922 %s %s" \
|
||||
% (linklocalip, process)
|
||||
|
||||
# Double hop into router
|
||||
timeout = 5
|
||||
# Ensure the SSH login is successful
|
||||
while True:
|
||||
res = ssh.execute(ssh_command)
|
||||
|
||||
if res[0] != "Host key verification failed.":
|
||||
break
|
||||
elif timeout == 0:
|
||||
break
|
||||
|
||||
time.sleep(5)
|
||||
timeout = timeout - 1
|
||||
return res
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue