#!/usr/bin/env python

from RECT import *

import sys, os
import time, string, traceback, types
import xmls
import traceback
import re
#from clogger import *
from decimal import *
from rectparams import *
try:
    from unittest.runner import _WritelnDecorator # Python 2.7
except ImportError:
    from unittest import _WritelnDecorator

class TestResult(unittest.TestResult):
    def __init__(self):
        self.failures = []
        self.errors = []
        self.results = []
        self.testsRun = 0
        self.shouldStop = 0
        self.starttime = 0
        self.stoptime = 0
        self.timetest = []
        self.buffer = False

    def startTest(self, test):
        "Called when the given test is about to be run"
        self.starttime = time.time()
        self.testsRun = self.testsRun + 1

    def stopTest(self, test):
        "Called when the given test has been run"
        pass

    def addError(self, test, err):
        """Called when an error has occurred. 'err' is a tuple of values as
        returned by sys.exc_info().
        """
        self.stoptime = time.time()
        self.timetest.append(self.stoptime-self.starttime)
        self.errors.append((test, self._exc_info_to_string(err, test)))
        self.results.append((test, self._exc_info(err, test)))

    def addFailure(self, test, err):
        """Called when an error has occurred. 'err' is a tuple of values as
        returned by sys.exc_info()."""
        self.stoptime = time.time()
        self.timetest.append(self.stoptime-self.starttime)
        self.failures.append((test, self._exc_info_to_string(err, test)))
        self.results.append((test, self._exc_info(err, test)))

    def addSuccess(self, test):
        "Called when a test has completed successfully"
        self.stoptime = time.time()
        self.timetest.append(self.stoptime-self.starttime)
        self.results.append((test, "DONE!"))
        pass

    def wasSuccessful(self):
        "Tells whether or not this result was a success"
        return len(self.failures) == len(self.errors) == 0

    def stop(self):
        "Indicates that the tests should be aborted"
        self.shouldStop = True

    def _exc_info_to_string(self, err, test):
        """Converts a sys.exc_info()-style tuple of values into a string."""
        exctype, value, tb = err
        # Skip test runner traceback levels
        while tb and self._is_relevant_tb_level(tb):
            tb = tb.tb_next
        if exctype is test.failureException:
            # Skip assert*() traceback levels
            length = self._count_relevant_tb_levels(tb)
            return ''.join(traceback.format_exception(exctype, value, tb, length))
        return ''.join(traceback.format_exception(exctype, value, tb))

    def _exc_info(self, err, test):
        """Converts a sys.exc_info()-style tuple of values into a tuple."""
        exctype, value, tb = err

        # Skip test runner traceback levels
        while tb and self._is_relevant_tb_level(tb):
            tb = tb.tb_next

        return (exctype, value, tb)

    def _is_relevant_tb_level(self, tb):
        return tb.tb_frame.f_globals.has_key('__unittest')

    def _count_relevant_tb_levels(self, tb):
        length = 0
        while tb and not self._is_relevant_tb_level(tb):
            length += 1
            tb = tb.tb_next
        return length

    def __repr__(self):
        return "<%s run=%i errors=%i failures=%i>" % \
               (_strclass(self.__class__), self.testsRun, len(self.errors),
                len(self.failures))

class _TextTestResult(TestResult):

    separator1 = '=' * 70
    separator2 = '-' * 70

    def __init__(self, stream, descriptions, verbosity):
        TestResult.__init__(self)
        self.stream = stream
        self.showAll = verbosity > 1
        self.dots = verbosity == 1
        self.descriptions = descriptions

    def getDescription(self, test):
        if self.descriptions:
            return test.shortDescription() or str(test)
        else:
            return str(test)

    def testTime(self, test):
        return self.timetest

    def startTest(self, test):
        TestResult.startTest(self, test)
        if self.showAll:
            self.stream.write(self.getDescription(test))
            self.stream.write(" ... ")

    def addSuccess(self, test):
        TestResult.addSuccess(self, test)
        if self.showAll:
            self.stream.writeln("ok")
        elif self.dots:
            self.stream.write('.')

    def addError(self, test, err):
        TestResult.addError(self, test, err)
        if self.showAll:
            self.stream.writeln("ERROR")
        elif self.dots:
            self.stream.write('E')

    def addFailure(self, test, err):
        TestResult.addFailure(self, test, err)
        if self.showAll:
            self.stream.writeln("FAIL")
        elif self.dots:
            self.stream.write('F')

    def printErrors(self):
        if self.dots or self.showAll:
            self.stream.writeln()
        self.printErrorList('ERROR', self.errors)
        self.printErrorList('FAIL', self.failures)

    def printErrorList(self, flavour, errors):
        for test, err in errors:
            self.stream.writeln(self.separator1)
            self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
            self.stream.writeln(self.separator2)
            self.stream.writeln("%s" % err)

    def printResults(self):
        for test, err in self.results:
            print test, err


class TextTestRunner:
    """A test runner class that displays results in textual form.

    It prints out the names of tests as they are run, errors as they
    occur, and a summary of the results at the end of the test run.
    """
    def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1):
        self.stream = _WritelnDecorator(stream)
        self.descriptions = descriptions
        self.verbosity = verbosity

    def _makeResult(self):
        return _TextTestResult(self.stream, self.descriptions, self.verbosity)

    def run(self, test):
        "Run the given test case or test suite."
        result = self._makeResult()
        startTime = time.time()
        test(result)
        stopTime = time.time()
        timeTaken = stopTime - startTime
        result.printErrors()
        self.stream.writeln(result.separator2)
        run = result.testsRun
        self.stream.writeln("Ran %d test%s in %.3fs" %
                            (run, run != 1 and "s" or "", timeTaken))
        self.stream.writeln()
        if not result.wasSuccessful():
            self.stream.write("FAILED (")
            failed, errored = map(len, (result.failures, result.errors))
            if failed:
                self.stream.write("failures=%d" % failed)
            if errored:
                if failed: self.stream.write(", ")
                self.stream.write("errors=%d" % errored)
            self.stream.writeln(")")
        else:
            self.stream.writeln("OK")
        #result.printResults()
        return result

def check_slaves(slave):
    object = re.compile(ur"(?P<host>-h [^ ]+)|(?P<port>-p [^ ]+)|(?P<timeout>-t [^ ]+)", re.S | re.U)
    result1 = object.finditer( slave )
    group_name_by_index = dict( [ (v, k) for k, v in object.groupindex.items() ] )
    res = 0
    for match in result1 :
        for group_index, group in enumerate( match.groups() ) :
            if group:
                if group_name_by_index[ group_index + 1 ] in ('host', 'port'):
                    res += 1

    if res == 2:
        return True
    else:
        return False

def get_tests (config):
    """ Return a list of names of tests from config and a list of descriptions of these tests """
    test_list = []
    for g in config.keys():
        if (getattr(config[g],'__class__').__name__ == 'list'):
            for h in config[g]:
                if (hasattr(h, "keys")):
                    tests_load_from_config = {}
                    if h.keys() is ['shares', 'slaves'] or ['slaves', 'shares']:
                        tests_load_from_config[g] = h
                        test_list.append(tests_load_from_config)

    return test_list

def main():
    if len(sys.argv) > 3:
        raise RuntimeError('Too many command line arguments!!!!!!!!!!')

    if len(sys.argv) == 3:
        cfgfile = sys.argv[1]
        cfgfile2 = sys.argv[2]
        config = initialize(cfgfile, cfgfile2)

    elif len(sys.argv) == 2:
        cfgfile = sys.argv[1]
        config = initialize(cfgfile)
    else:
        try:
            cfgfile = 'config.yaml'
            config = initialize(cfgfile)
        except:
            print "\'config.yaml\' was not found\n"
            traceback.print_exc()


    tests_load_from_config = get_tests(config)
    test_list = []
    for test_element in tests_load_from_config:
        test_list += test_element.keys()
    curdir = os.path.abspath(os.curdir)
    curdir = [curdir]
    if 'modules' not in config:
        choose_dir = config.get('path',curdir)
        sys.path = choose_dir + sys.path
        lib_list = import_libs(choose_dir)
        filtered_modules = (filter_modules(lib_list, test_list))
        if (filtered_modules == []):
            lib_list = import_libs(datadir)
            filtered_modules = (filter_modules(lib_list, test_list))
        suite = TestLoader(config).loadTestsFromNames(filtered_modules)
    else:
        suite = TestLoader(config).loadTests()
    xml_result = []
    for i in suite.__iter__():

        startTime = time.time()
        xmltimeStart = time.strftime("%H:%M:%S", time.localtime())
        s = TextTestRunner(verbosity=2).run(i)

        idtest = 1

        for y in i._tests:

            count1 = 1
            count2 = 1
            for z in  y._tests:
                z_index = y._tests.index(z)

                testResultXml = {"OK" : {}}

                testtime = s.testTime(z)

                for i_result in s.results:
                    if (z == i_result[0]):
                        for i_error in s.errors:
                            if (z == i_error[0]):
                                testResultXml = {"ERROR": {'exception' : {'code': str(i_result[1][0]), 'reason': str(i_result[1][1])}}}

                        for i_fail in s.failures:
                            if (z == i_fail[0]):
                                testResultXml = {"FAIL": {'exception' : {'code': str(i_result[1][0]), 'reason': str(i_result[1][1])}}}


                test_slaves = []
                for name in z.slave_names:

                    test_slaves.append({'description': str(config['proxies'][name]), 'mountoptions': z.getOption(name, 'mountOptions', '').split(","), 'sysinfo': {z.getOption(name, 'system', 'Windows') : {'Cifstype':str(config['proxies'][name].sysinfo().cifstype), 'Sysname':str(config['proxies'][name].sysinfo().sysname), 'Nodename':str(config['proxies'][name].sysinfo().nodename), 'Release':str(config['proxies'][name].sysinfo().release),   'Version':str(config['proxies'][name].sysinfo().version), 'Machine':str(config['proxies'][name].sysinfo().machine)}}})



                test_shares = []
                for name in z.share_names:
                    if ('system' in config['shares'][name]):
                        test_shares.append({'description':config['shares'][name]['start'], 'system':config['shares'][name]['system']})
                    else:
                        test_shares.append({'description':config['shares'][name]['start'], 'system':'Windows'})

                test = {'name':{'modulename':z.__module__, 'classname':z.tID, 'methodname':z._testMethodName}, 'description': {'shortdescr':z.shortDescription(), 'fulldescr':z._testMethodDoc}}

                xmltimeTest = Context(prec=3, rounding=ROUND_UP).create_decimal(str(testtime[z_index]))

                xml_result.append({'timendate': {'testtime':xmltimeTest, 'suitestarttime': xmltimeStart}, 'shares': test_shares, 'slaves': test_slaves, 'testdescr': test, 'result': testResultXml, 'id' : str(idtest) })

                idtest += 1

    o_Xml = xmls.CMakexml()
    result_path = o_Xml.WriteInfoInXML (capture_result = xml_result)
    print ""
    print "The directory with results in xml-format: %s" % result_path
    print "Use: xml-run compare -i %s" % result_path
    print ""

if __name__ == '__main__':
    main()

