From: Lionel Date: Tue, 18 Nov 2014 14:37:08 +0000 (+0100) Subject: cleaning code of MySQL data store object X-Git-Url: http://gitweb.enneade.fdn.org/?a=commitdiff_plain;h=e4ce220e85084651374a1f3912d1cf7a09e2769a;p=pyowl.git cleaning code of MySQL data store object --- diff --git a/cm160Server.py b/cm160Server.py index dd3cc0e..42195e9 100755 --- a/cm160Server.py +++ b/cm160Server.py @@ -692,10 +692,7 @@ class QueueReader(Thread): # ************* MySQL data store ********************************* class MySQLDataStore(object): - """Responsible for the storage and retrieval of CM160 data using an MySQL - database to hold the data""" - - DEFAULT_DBFILE = "cm160_data.db" + """Responsible for the storage and retrieval of CM160 data using a MySQL database to hold the data""" def __init__ (self): self.__db = "owl" @@ -761,64 +758,12 @@ class MySQLDataStore(object): self.info("stored record: %s" % cm160Data) self.__displayTime=time.time()+2 #We used to commit every record this was very slow when - #downloading 30 days of data aon slow (E.G raspberypi) + #downloading 30 days of data on slow (E.G raspberypi) #platforms. Now we only comit data as it is displayed. self.__StoreConn.commit() return stored -# TODO : what is it ? - def getCM160List(self, startTime, stopTime): - """Return a list of CM160 data objects containing the data stored in the database between the - start and stop times.""" - - # NB: As each socket connection is handled by a dedicated - # thread, and as sqlite objects can't be shared across - # threads, we must open a dedicated sqlite connection - # here - it can't be done (much) earlier and kept open - # e.g., in the enclosing Server context; and it's - # probably not a severe performance hit in this case - # anyway... - conn, cursor = self.__connect() - - cursor.execute("SELECT * from cm160data WHERE \ - (ts >= ?) AND (ts <= ?) ORDER BY ts", \ - [startTime, stopTime]) - - # We have to retrieve all rows in order to reliably find out - # how many there are in total (which we need to be able to - # later give "percentage complete" messages). But if there - # are too many rows (i.e., if this takes too long) the EOWL - # client seems to get confused (closes the history window and - # starts wrongly showing the historical datapoints as part of - # the real time plot!). So we retrieve in blocks of at most - # 5000 records (which has just been very roughly determined - # by trial and error!) and send rather bogus "percentage" - # messages to the client as each block is retrieved, just to - # keep it awake and waiting... - - rows = [] - rowset = cursor.fetchmany(5000) - while (rowset): - rows += rowset - rowset = cursor.fetchmany() - conn.close() - # Could probably release the lock even before the - # fetchall() but delaying to after should be very safe... - - cm160DataList=[] - rowCount = 0 - for r in rows: - rowCount += 1 ; - (ts, current, tariff) = (r) - cm160Data = CM160Data(ts.minute, ts.hour, \ - ts.day, ts.month, ts.year, \ - tariff, current) - cm160DataList.append(cm160Data) - self.debug("Row: %s" % cm160Data.__str__()) - - return cm160DataList - def info(self, text): """Display info text""" # if self.__uo != None: