id
int64
0
25.6k
text
stringlengths
0
4.59k
25,100
listing the tree java program /tree java /demonstrates tree /to run this programc>java tree app import java io */for / import java lang integer/for parseint(///////////////////////////////////////////////////////////////class dataitem public double ddata/one data item //public dataitem(double dd/constructor ddata dd//public void displayitem(/display itemformat "/ system out print("/"+ddata)///end class dataitem ///////////////////////////////////////////////////////////////class node private static final int order private int numitemsprivate node parentprivate node childarray[new node[order]private dataitem itemarray[new dataitem[order- ]//connect child to this node public void connectchild(int childnumnode childchildarray[childnumchildif(child !nullchild parent this//disconnect child from this nodereturn it public node disconnectchild(int childnum
25,101
node tempnode childarray[childnum]childarray[childnumnullreturn tempnode/public node getchild(int childnumreturn childarray[childnum]/public node getparent(return parent/public boolean isleaf(return (childarray[ ]==nulltrue false/public int getnumitems(return numitems/public dataitem getitem(int index/get dataitem at index return itemarray[index]/public boolean isfull(return (numitems==order- true false/public int finditem(double key/return index of /item (within nodefor(int = <order- ++/if found/otherwiseif(itemarray[ =null/return - breakelse if(itemarray[jddata =keyreturn jreturn - /end finditem /public int insertitem(dataitem newitem
25,102
numitems++double newkey newitem ddata/will add new item /key of new item for(int =order- >= --/start on right/examine items if(itemarray[ =null/if item nullcontinue/go left one cell else /not null/get its key double itskey itemarray[jddataif(newkey itskey/if it' bigger itemarray[ + itemarray[ ]/shift it right else itemarray[ + newitem/insert new item return + /return index to /new item /end else (not null/end for /shifted all itemsitemarray[ newitem/insert new item return /end insertitem(/public dataitem removeitem(/remove largest item /assumes node not empty dataitem temp itemarray[numitems- ]/save item itemarray[numitems- null/disconnect it numitems--/one less item return temp/return item /public void displaynode(/format "//for(int = <numitemsj++itemarray[jdisplayitem()/"/ system out println("/")/final "///end class node ///////////////////////////////////////////////////////////////class tree private node root new node() /make root node
25,103
node curnode rootint childnumberwhile(trueif(childnumber=curnode finditem(key!- return childnumber/found it else ifcurnode isleaf(return - /can' find it else /search deeper curnode getnextchild(curnodekey)/end while //insert dataitem public void insert(double dvaluenode curnode rootdataitem tempitem new dataitem(dvalue)while(trueifcurnode isfull(split(curnode)curnode curnode getparent()/if node full/split it /back up /search once curnode getnextchild(curnodedvalue)/end if(node is fullleafelse ifcurnode isleaf(/if node is break/go insert /node is not fullnot leafso go to lower level else curnode getnextchild(curnodedvalue)/end while curnode insertitem(tempitem)dataitem /end insert(/insert new /public void split(node thisnode/split the node /assumes node is full dataitem itembitemc
25,104
int itemindexitemc thisnode removeitem()/remove items from itemb thisnode removeitem()/this node child thisnode disconnectchild( )/remove children child thisnode disconnectchild( )/from this node node newright new node()/make new node if(thisnode==root/if this is the rootroot new node()/make new root parent root/root is our parent root connectchild( thisnode)/connect to parent else /this node not the root parent thisnode getparent()/get parent /deal with parent itemindex parent insertitem(itemb)/item to parent int parent getnumitems()/total itemsfor(int = - >itemindexj--/move parent' /connections node temp parent disconnectchild( )/one child parent connectchild( + temp)/to the right /connect newright to parent parent connectchild(itemindex+ newright)/deal with newright newright insertitem(itemc)/item to newright newright connectchild( child )/connect to and newright connectchild( child )/on newright /end split(//gets appropriate child of node during search for value public node getnextchild(node thenodedouble thevalueint /assumes node is not emptynot fullnot leaf int numitems thenode getnumitems()for( = <numitemsj++/for each item in node /are we lessifthevalue thenode getitem(jddata return thenode getchild( )/return left child
25,105
return thenode getchild( )/we're greaterso /return right child /public void displaytree(recdisplaytree(root )/private void recdisplaytree(node thisnodeint levelint childnumbersystem out print("level="+level+child="+childnumber+")thisnode displaynode()/display this node /call ourselves for each child of this node int numitems thisnode getnumitems()for(int = <numitems+ ++node nextnode thisnode getchild( )if(nextnode !nullrecdisplaytree(nextnodelevel+ )else return/end recdisplaytree(//end class tree ///////////////////////////////////////////////////////////////class tree app public static void main(string[argsthrows ioexception double valuetree thetree new tree ()thetree insert( )thetree insert( )thetree insert( )thetree insert( )thetree insert( )while(true
25,106
puttext("enter first letter of ")puttext("showinsertor find")char choice getchar()switch(choicecase ' 'thetree displaytree()breakcase ' 'puttext("enter value to insert")value getint()thetree insert(value)breakcase ' 'puttext("enter value to find")value getint()int found thetree find(value)if(found !- system out println("found "+value)else system out println("could not find "+value)breakdefaultputtext("invalid entry\ ")/end switch /end while /end main(//public static void puttext(string ssystem out print( )system out flush()//public static string getstring(throws ioexception inputstreamreader isr new inputstreamreader(system in)bufferedreader br new bufferedreader(isr)string br readline()return //public static char getchar(throws ioexception string getstring()return charat( )
25,107
string getstring()return integer parseint( )///end class tree app trees and red-black trees at this point trees and red-black trees (described in probably seem like entirely different entities howeverit turns out that in certain sense they are completely equivalent one can be transformed into the other by the application of few simple rulesand even the operations needed to keep them balanced are equivalent mathematicians would say they were isomorphic you probably won' ever need to transform tree into red-black treebut equivalence of these structures casts additional light on their operation and is useful in analyzing their efficiency historically the tree was developed firstlater the red-black tree evolved from it transformation from to red-black tree can be transformed into red-black tree by applying the following rulestransform any -node in the tree into black node in the red-black tree transform any -node into child (with two children of its ownand parent (with children and one other childit doesn' matter which item becomes the child and which the parent is colored red and is colored black transform any -node into parent and two children and both with two children of their own and are colored red and is black figure shows these transformations the child nodes in these subtrees are colored redall other nodes are colored black
25,108
figure shows tree and the corresponding red-black tree obtained by applying these transformations dotted lines surround the subtrees that were made from -nodes and -nodes the red-black rules are automatically satisfied by the transformation check that this is sotwo red nodes are never connectedand there is the same number of black nodes on every path from root to leaf (or null childfigure tree and its red-black equivalent you can say that -node in tree is equivalent to parent with red child in red-black treeand -node is equivalent to parent with two red children it follows that black parent with black child in red-black tree does not represent -node in - treeit simply represents -node with another -node child similarlya black parent with two black children does not represent -node operational equivalence not only does the structure of red-black tree correspond to treebut the operations applied to these two kinds of trees are also equivalent in tree the tree is kept balanced using node splits in red-black tree the two balancing methods are color flips and rotations -node splits and color flips
25,109
each -node into two -nodes in red-black tree you perform color flips how are these operations equivalentfigure -node split and color flip in figure - we show -node in tree before it is splitfigure - shows the situation after the split the -node that was the parent of the -node becomes -node in figure - we show the red-black equivalent to the tree in - the dotted line surrounds the equivalent of the -node color flip results in the red-black tree of figure - now nodes and are black and is red thus and its parent form the equivalent of -nodeas shown by the dotted line this is the same -node formed by the node split in figure - thus we see that splitting -node during the insertion process in tree is equivalent to performing color flips during the insertion process in red-black tree -node splits and rotations when -node in tree is transformed into its red-black equivalenttwo arrangements are possibleas we showed earlier in figure - either of the two data items can become the parent depending on which one is chosenthe child will be either left child or right childand the slant of the line connecting parent and child will be either left or right both arrangements are validhoweverthey may not contribute equally to balancing the tree let' look at the situation in slightly larger context figure - shows treeand - and - show two equivalent redblack trees derived from the tree by applying the transformation rules the difference between them is the choice of which of the two data items in the -node to make the parentin is the parentin cit'
25,110
although these arrangements are equally validyou can see that the tree in bis not balancedwhile that in cis given the red-black tree in )we would want to rotate it to the right (and perform two color changesto balance it amazinglythis rotation results in the exact same tree shown in cthus we see an equivalence between rotations in red-black trees and the choice of which node to make the parent when transforming trees to red-black trees although we don' show ita similar equivalence can be seen for the double rotation necessary for inside grandchildren efficiency of trees it' harder to analyze the efficiency of tree than red-black treebut the equivalence of red-black trees and trees gives us starting point speed as we saw in in red-black tree one node on each level must be visited during searchwhether to find an existing node or insert new one the number of levels in red-black tree ( balanced binary treeis about log ( + )so search times are proportional to this one node must be visited at each level in tree as wellbut the tree is shorter (has fewer levelsthan red-black tree with the same number of data items refer to figure where the tree has three levels and the red-black tree has five more specificallyin trees there are up to children per node if every node were fullthe height of the tree would be proportional to log logarithms to the base and to the base differ by constant factor of thusthe height of tree would be about half that of red-black treeprovided that all the nodes were full because they aren' all fullthe height of the tree is somewhere between log ( + and log ( + )/ thusthe reduced height of the tree decreases search times slightly compared with red-black trees on the other handthere are more items to examine in each nodewhich increases the search time because the data items in the node are examined using linear searchthis multiplies the search times by an amount proportional to mthe average number of items
25,111
some nodes contain itemsome and some if we estimate that the average is search times will be proportional to *log this is small constant number that can be ignored in big notation thusfor trees the increased number of items per node tends to cancel out the decreased height of the tree the search times for tree and for balanced binary tree such as red-black tree are approximately equaland are both (lognstorage requirements each node in tree contains storage for three references to data items and four references to its children this space may be in the form of arraysas shown in tree javaor of individual variables not all this storage is used node with only one data item will waste two thirds of the space for data and half the space for children node with two data items will waste one third of the space for data and one quarter of the space for childrento put it another wayit will use / of the available space if we take two data items per node as the average utilizationabout / of the available storage is wasted one might imagine using linked lists instead of arrays to hold the child and data referencesbut the overhead of the linked list compared with an arrayfor only or itemswould probably not make this worthwhile approach because they're balancedred-black trees contain few nodes that have only one childso almost all the storage for child references is used alsoevery node contains the maximum number of data itemswhich is this makes red-black trees more efficient than trees in terms of memory usage in javawhich stores references to objects instead of the objects themselvesthis difference in storage between trees and red-black trees may not be importantand the programming is certainly simpler for trees howeverin languages that don' use references this waythe difference in storage efficiency between red-black trees and trees may be significant external storage trees are an example of multiway treeswhich have more than two children and more than one data item another kind of multiway treethe -treeis useful when data resides in external storage external storage typically refers to some kind of disk systemsuch as the hard disk found in most desktop computers or servers in this section we'll begin by describing various aspects of external file handling we'll talk about simple approach to organizing external datasequential ordering finally we'll discuss -trees and explain why they work so well with disk files we'll finish with another approach to external storageindexingwhich can be used alone or with -tree we'll also touch on other aspects of external storagesuch as searching techniques in the next we'll mention different approach to external storagehashing the details of external storage techniques are dependent on the operating systemlanguageand even the hardware used in particular installation as consequenceour discussion in this section will be considerably more general than for most topics in this book accessing external data
25,112
stored entirely in main memory (often called ramfor random access memoryhoweverin many situations the amount of data to be processed is too large to fit in main memory all at once in this case different kind of storage is necessary disk files generally have much larger capacity than main memorythis is made possible by their lower cost per byte of storage of coursedisk files have another advantagetheir permanence when you turn off your computer (or the power fails)the data in main memory is lost disk files can retain data indefinitely with the power off howeverit' mostly the size difference that we'll be involved with here the disadvantage of external storage is that it' much slower than main memory this speed difference means that different techniques must be used to handle it efficiently as an example of external storageimagine that you're writing database program to handle the data found in the phone book for medium-sized cityperhaps , entries each entry includes nameaddressphone numberand various other data used internally by the phone company let' say an entry is stored as record requiring bytes the result is file size of , which is , , bytes or megabytes we'll assume that on the target machine this is too large to fit in main memorybut small enough to fit on your disk drive thus you have large amount of data on your disk drive how do you structure it to provide the usual desirable characteristicsquick searchinsertionand deletionin investigating the answersyou must keep in mind two facts firstaccessing data on disk drive is much slower than accessing it in main memory secondyou must access many records at once let' explore these points very slow access computer' main memory works electronically any byte can be accessed just as fast as any other bytein fraction of microsecond ( millionth of secondthings are more complicated with disk drives data is arranged in circular tracks on spinning disksomething like the tracks on compact disc (cdor the grooves in an oldstyle phonograph record to access particular piece of data on disk drivethe read-write head must first be moved to the correct track this is done with stepping motor or similar deviceit' mechanical activity that requires several milliseconds (thousandths of secondonce the correct track is foundthe read-write head must wait for the data to rotate into position on the averagethis takes half revolution even if the disk is spinning at , revolutions per minuteabout more milliseconds pass before the data can be read once the read-write head is positionedthe actual reading (or writingprocess beginsthis might take few more milliseconds thusdisk access times of around milliseconds are common this is something like , times slower than main memory technological progress is reducing disk access times every yearbut main memory access times are being reduced fasterso the disparity between disk access and main memory access times will grow even larger in the future one block at time once it is correctly positioned and the reading (or writingprocess beginsa disk drive can transfer large amount of data to main memory fairly quickly for this reasonand to
25,113
pagesallocation unitsor some other namedepending on the system we'll call them blocks the disk drive always reads or writes minimum of one block of data at time block size variesdepending on the operating systemthe size of the disk driveand other factorsbut it is usually power of for our phone book examplelet' assume block size of , bytes ( thus our phone book database will require , , bytes divided by , bytes per blockwhich is , blocks your software is most efficient when it specifies read or write operation that' multiple of the block size if you ask to read bytesthe system will read one block , bytesand throw away all but or if you ask to read , bytesit will read two blocksor , bytesand throw away almost half of them by organizing your software so that it works with block of data at time you can optimize its performance assuming our phone book record size of bytesyou can store records in block ( , divided by )as shown in figure thus for maximum efficiency it' important to read records at time (or multiples of this numberfigure blocks and records notice that it' also useful to make your record size multiple of that way an integral number of them will always fit in block of course the sizes shown in our phone book example for recordsblocksand so on are only illustrativethey will vary widely depending on the number and size of records and other software and hardware constraints blocks containing hundreds of records are commonand records may be much larger or smaller than bytes once the read-write head is positioned as described earlierreading block is fairly fastrequiring only few milliseconds thus disk access to read or write block is not very dependent on the size of the block it follows that the larger the blockthe more efficiently you can read or write single record (assuming you use all the records in the blocksequential ordering one way to arrange the phone book data in the disk file would be to order all the records according to some keysay alphabetically by last name the record for joseph aardvark would come firstand so on this is shown in figure
25,114
searching to search sequentially ordered file for particular last name such as smithyou could use binary search you would start by reading block of records from the middle of the file the records in the block are all read at once into , -byte buffer in main memory if the keys of these records are too early in the alphabet (kellerfor example)you' go to the / point in the file (princeand read block thereif the keys were too lateyou' go to the / point (deleonby continually dividing the range in half you' eventually find the record you were looking for as we saw in binary search in main memory takes log comparisonswhich for , items is about if every comparison tooksay microsecondsthis would be microsecondsor about / , of secondless than an eye blink howeverwe're now dealing with data stored on disk because each disk access is so time consumingit' more important to focus on how many disk accesses are necessary than on how many individual records there are the time to read block of records will be very much larger than the time to search the records in the block once they're in memory disk accesses are much slower than memory accessesbut on the other hand we access block at timeand there are far fewer blocks than records in our example there are , blocks log of this number is about so in theory we'll need about disk accesses to find the record we want in practice this number is reduced somewhat because we read records at once in the beginning stages of binary search it doesn' help to have multiple records in memory because the next access will be in distant part of the file howeverwhen we get close to the desired recordthe next record we want may already be in memory because it' part of the same block of this may reduce the number of comparisons by two or so thus we'll need about disk accesses ( )which at milliseconds per access requires about millisecondsor / second this is much slower than in-memory accessbut still not too bad insertion unfortunately the picture is much worse if we want to insert (or deletean item from sequentially ordered file because the data is orderedboth operations require moving half the records on the averageand therefore about half the blocks moving each block requires two disk accessesone read and one write once the insertion point is foundthe block containing it is read into memory buffer the last record in the block is savedand the appropriate number of records are shifted up to make room for the new onewhich is inserted then the buffer contents are written back to the disk file next the second block is read into the buffer its last record is savedall the other records are shifted upand the last record from the previous block is inserted at the external storagebeginning of the buffer then the buffer contents are again written back to disk this process continues until all the blocks beyond the insertion point have been rewritten
25,115
themwhich at milliseconds per read and write requires more than five minutes to insert single entry this won' be satisfactory if you have thousands of new names to add to the phone book another problem with the sequential ordering is that it only works quickly for one key our file is arranged by last names but suppose you wanted to search for particular phone number you can' use binary searchbecause the data is ordered by name you would need to go through the entire fileblock by blockusing sequential access this would require reading an average of half the blockswhich would require about minutesvery poor performance for simple search it would be nice to have more efficient way to store disk data -trees how can the records of file be arranged to provide fast searchinsertionand deletion timeswe've seen that trees are good approach to organizing in-memory data will trees work with filesthey willbut different kind of tree must be used for external data than for in-memory data the appropriate tree is multiway tree somewhat like treebut with many more data items per nodeit' called -tree -trees were first conceived as appropriate structures for external storage by bayer and mccreight in one block per node why do we need so many items per nodewe've seen that disk access is most efficient when data is read or written one block at time in treethe entity containing data is node it makes sense then to store an entire block of data in each node of the tree this wayreading node accesses maximum amount of data in the shortest time how much data can be put in nodewhen we simply stored the -byte data records for our phone book examplewe could fit into , -byte block in treehoweverwe also need to store the links to other nodes (which means links to other blocksbecause node corresponds to blockin an in-memory treesuch as those we've discussed in previous these links are references (or pointersin languages like ++to nodes in other parts of memory for tree stored in disk filethe links are block numbers in file (from to , in our phone book examplefor block numbers we can use field of type inta -byte typewhich can point to more than billion possible blocksprobably enough for most files now we can no longer squeeze -byte records into blockbecause we need room for the links to child nodes we could reduce the number of records to to make room for the linksbut it' most efficient to have an even number of records per nodeso (after appropriate negotiation with managementwe reduce the record size to bytes there will be child links (one more than the number of data itemsso the links will require bytes ( this leaves room for -byte records with bytes left over ( , block in such treeand the corresponding node representationis shown in figure
25,116
within each node the data is ordered sequentially by keyas in tree in factthe structure of -tree is similar to that of treeexcept that there are more data items per node and more links to children the order of -tree is the number of children each node can potentially have in our example this is so the tree is an order btree searching search for record with specified key is carried out in much the same way it is in an in-memory tree firstthe block containing the root is read into memory the search algorithm then starts examining each of the records (orif it' not fullas many as the node actually holds)starting at when it finds record with greater keyit knows to go to the child whose link lies between this record and the preceding one this process continues until the correct node is found if leaf is reached without finding the specified keythe search is unsuccessful insertion the insertion process in -tree is somewhat different than it is in tree recall that in tree many nodes are not fulland in fact contain only one data item in particulara node split always produces two nodes with one item in each this is not an optimum approach in -tree in -tree it' important to keep the nodes as full as possible so that each disk accesswhich reads an entire nodecan acquire the maximum amount of data to help achieve this endthe insertion process differs from that of trees in three waysa node split divides the data items equallyhalf go to the newly created nodeand half remain in the old one node splits are performed from the bottom up rather than from the top down it' not the middle item in node that' promoted upwardbut the middle item in the sequence formed from the items in the node plus the new item we'll demonstrate these features of the insertion process by building small -treeas shown in figure there isn' room to show realistic number of records per nodeso we'll show only fourthus the tree is an order -tree
25,117
25,118
have already been inserted into the tree new data item with key of is insertedresulting in node split here' how the split is accomplished because it' the root that' being splittwo new nodes are created (as in tree) new root and new node to the right of the one being split to decide where the data items gothe insertion algorithm arranges their keys in orderin an internal buffer four of these keys are from the node being splitand the fifth is from the new item being inserted in figure these -item sequences are shown to the side of the tree in this first step the sequence is shown the center item in this sequence in this first stepis promoted to the new root node (in the figurean arrow indicates that the center item will go upward all the items to the left of center remain in the node being splitand all the items to the right go into the new right-hand node the result is shown in figure - (in our phone book example items would go into each child noderather than the shown in the figure in figure - we insert two more items and they fill up the left childas shown in figure - the next item to be inserted splits this left childwith the result shown in figure - here the has been promoted upward into the root nextthree items and are inserted into the tree the first two fill up the third childand the third splits itcausing the creation of new node and the promotion of the middle item to the root the result is shown in figure - again three items and are added to the tree the first two items fill up the second childand the third one splits itcausing the creation of new node and the promotion of the middle item to the rootas shown in figure - now the root is full howeversubsequent insertions don' necessarily cause node splitbecause nodes are split only when new item is inserted into full nodenot when full node is encountered in the search down the tree thus and are inserted in the second child without causing any splitsas shown in figure - howeverthe next item to be inserted does cause splitin fact it causes two of them the second node child is fullso it' splitas shown in figure - howeverthe promoted from this splithas no place to go because the root is full thereforethe root must be split as wellresulting in the arrangement of figure - notice that throughout the insertion process no node (except the rootis ever less than half fulland many are more than half full as we notedthis promotes efficiency because file access that reads node always acquires substantial amount of data efficiency of -trees because there are so many records per nodeand so many nodes per leveloperations on -trees are very fastconsidering that the data is stored on disk in our phone book example there are , records all the nodes in the -tree are at least half fullso they contain at least records and links to children the height of the tree is thus somewhat less than log (logarithm to the base of )where is , this is so there will be about levels in the tree thususing -treeonly six disk accesses are necessary to find any record in file of , records at milliseconds per accessthis takes about millisecondsor / of second this is dramatically faster than the binary search of sequentially ordered file the more records there are in nodethe fewer levels there are in the tree we've seen that there are levels in our -treeeven though the nodes hold only records in
25,119
would have if we use blocks with hundreds of recordswe can reduce the number of levels in the tree and further improve access times although searching is faster in -trees than in sequentially ordered disk filesit' for insertion and deletion that -trees show the greatest advantage let' first consider -tree insertion in which no nodes need to be split this is the most likely scenariobecause of the large number of records per node in our phone book exampleas we've seenonly accesses are required to find the insertion point then one more access is required to write the block containing the newly inserted record back to the diska total of accesses next let' see how things look if node must be split the node being split must be readhave half its records removedand be written back to disk the newly created node must be written to the diskand the parent must be read andfollowing the insertion of the promoted recordwritten back to disk this is accesses in addition to the six necessary to find the insertion pointfor total of this is major improvement over the , accesses required for insertion in sequential file in some versions of the -treeonly leaf nodes contain records non-leaf nodes contain only keys and block numbers this may result in faster operation because each block can hold many more block numbers the resulting higher-order tree will have fewer levelsand access speed will be increased howeverprogramming may be complicated because there are two kinds of nodesleaves and non-leaves indexing different approach to speeding up file access is to store records in sequential order but use file index along with the data itself file index is list of key/block pairsarranged with the keys in order recall that in our original phone book example we had , records of bytes eachstored records to blockin , blocks assuming our search key is the last nameevery entry in the index contains two itemsthe keysuch as jones the number of the block where the jones record is located within the file these numbers run from to , let' say we use string bytes long for the key (big enough for most last names)and bytes for the block number ( type int in javaeach entry in our index thus requires bytes this is only / the amount necessary for each record the entries in the index are arranged sequentially by last name the original records on the disk can be arranged in any convenient order this usually means that new records are simply appended to the end of the fileso the records are ordered by time of insertion this arrangement is shown in figure
25,120
index file in memory because it' so much smaller than the file containing actual recordsit may be that the index is small enough to fit entirely in main memory in our example there are , records each one has -byte entry in the indexso the index will be , or , , bytes long ( megabytesin modern computers there' no problem fitting this in memory the index can be stored on the diskbut read into memory whenever the database program is started up from then onoperations on the index can take place in memory at the end of the day (or perhaps more frequentlythe index can be written back to disk for permanent storage searching the index-in-memory approach allows much faster operations on the phone book file than are possible with file in which the records themselves are arranged sequentially for examplea binary search requires index accesses at microseconds per accessthat' only about / , of second then there' (inevitablythe time to read the actual record from the fileonce its block number has been found in the index howeverthis is only one disk access of (say milliseconds insertion to insert new item in an indexed file two steps are necessary we first insert its full record into the main filethen we insert an entryconsisting of the key and the block number where the new record is storedinto the index because the index is in sequential orderto insert new item we need to move half the index entrieson the average figuring microseconds to move byte in memorywe have , times times or about seconds to insert new entry this compares with five minutes for the unindexed sequential file (note that we don' need to move any records in the main filewe simply append the new record at the end of the file of courseyou can use more sophisticated approach to storing the index in memory you could store it as binary treetreeor red-black treefor example any of these would significantly reduce insertion and deletion times in any case the index-in-memory approach is much faster than the sequential-file approach in some cases it will also be faster than -tree the only actual disk accesses necessary for an insertion into an indexed file involve the new record itself usually the last block in the file is read into memorythe new record is
25,121
multiple indexes an advantage of the indexed approach is that multiple indexeseach with different keycan be created for the same file in one index the keys can be last namesin another telephone numbersin another addresses because the indexes are small compared with the filethis doesn' increase the total data storage very much of courseit does present more of challenge when items are deleted from the filebecause entries must be deleted from all the indexesbut we won' get into that here index too large for memory if the index is too large to fit in memoryit must be broken into blocks and stored on the disk for large files it may then be profitable to store the index itself as -tree in the main file the records are stored in any convenient order this arrangement can be very efficient appending records to the end of the main file is fast operationand the index entry for the new record is also quick to insert because the index is tree the result is very fast searching and insertion for large files note that when an index is arranged as -treeeach node contains number of child pointers and one fewer data items the child pointers are the block numbers of other nodes in the index the data items consist of key value and pointer to block in the main file don' confuse these two kinds of block pointers complex search criteria in complex searches the only practical approach may be to read every block in file sequentially suppose in our phone book example we wanted list of all entries in the phone book with first name frankwho lived in springfieldand who had phone number with three " digits in it (these were perhaps clues found scrawled on scrap of paper clutched in the hand of victim of foul play file organized by last names would be no help at all even if there were index files ordered by first names and citiesthere would be no convenient way to find which files contained both frank and springfield in such cases (which are quite common in many kinds of databasesthe fastest approach is probably to read the file sequentiallyblock by blockchecking each record to see if it meets the criteria sorting external files mergesort is the preferred algorithm for sorting external data this is becausemore so than most sorting techniquesdisk accesses tend to occur in adjacent records rather than random parts of the file recall from "recursion,that mergesort works recursively by calling itself to sort smaller and smaller sequences once two of the smallest sequences (one byte each in the internal-memory versionhave been sortedthey are then merged into sorted sequence twice as long larger and larger sequences are mergeduntil eventually the entire file is sorted the approach for external storage is similar howeverthe smallest sequence that can be read from the disk is block of records thusa two-stage process is necessary in the first phasea block is readits records are sorted internallyand the resulting sorted block is written back to disk the next block is similarly sorted and written back to disk this continues until all the blocks are internally sorted
25,122
written back to disk this continues until all pairs of blocks have been merged next each pair of -block sequences is merged into -block sequence each time the size of the sorted sequences doublesuntil the entire file is sorted figure shows the mergesort process on an external file the file consists of four blocks of four records eachfor total of records only three blocks can fit in internal memory (of course all these sizes would be much larger in real situation figure - shows the file before sortingthe number in each record is its key value figure mergesort on an external file internal sort of blocks in the first phase all the blocks in the file are sorted internally this is done by reading the block into memory and sorting it with any appropriate internal sorting algorithmsuch as quicksort (or for smaller numbers of recordsshellsort or insertion sortthe result of sorting the blocks internally is shown in figure - the dotted lines in the figure separate sorted recordssolid lines separate unsorted records second file may be used to hold the sorted blocksand we assume that availability of external storage is not problem it' often desirable to avoid modifying the original file merging in the second phase we want to merge the sorted blocks in the first pass we merge every pair of blocks into sorted -block sequence thus the two blocks - and - are merged into -- - also- and - are merged into -- - the result is shown in figure - third file is necessary to hold the result of this merge step in the second passthe two -record sequences are merged into -record sequenceand the sort is complete of course more merge steps would be required to sort larger filesthe number of such steps is proportional to log the merge steps can alternate between two files internal arrays because the computer' internal memory has room for only three blocksthe merging process must take place in stages let' say there are three arrayscalled arr arr and arr each of which can hold block
25,123
these two arrays are then merge-sorted into arr howeverbecause arr holds only one blockit becomes full before the sort is completed when it becomes fullits contents are written to disk the sort then continuesfilling up arr again this completes the sortand arr is again written to disk the following lists show the details of each of the three mergesorts mergesort read - into arr read - into arr merge into arr write to disk merge into arr write to disk mergesort read - into arr read - into arr merge into arr write to disk merge into arr write to disk mergesort read - into arr read - into arr merge into arr write to disk merge into arr (arr is now empty read - into arr merge into arr write to disk merge into arr (arr is now empty read - into arr merge into arr write to disk merge into arr write to disk this last sequence of steps is rather lengthyso it may be helpful to examine the details of the array contents as the steps are completed figure shows how these arrays look at various stages of mergesort
25,124
summary multiway tree has more keys and children than binary tree tree is multiway tree with up to three keys and four children per node in multiway treethe keys in node are arranged in ascending order in treeall insertions are made in leaf nodesand all leaf nodes are on the same level three kinds of nodes are possible in treea -node has one key and two childrena -node has two keys and three childrenand -node has three keys and four children there is no -node in tree in search in treeat each node the keys are examined if the search key is not found the next node will be child if the search key is less than key child if the search key is between key and key child if the search key is between key and key and child if the search key is greater than key insertion into tree requires that any full node be split on the way down the treeduring the search for the insertion point splitting the root creates two new nodessplitting any other node creates one new node the height of tree can increase only when the root is split there is one-to-one correspondence between tree and red-black tree to transform tree into red-black treemake each -node into black nodemake each -node into black parent with red childand make each -node into black parent with two red children when -node is transformed into parent and childeither node can become the parent
25,125
tree rotation in red-black tree corresponds to changing between the two possible orientations (slantswhen transforming -node the height of tree is less than log search times are proportional to the height the tree wastes space because many nodes are not even half full external storage means storing data outside of main memoryusually on disk external storage is largercheaper (per byte)and slower than main memory data in external storage is typically transferred to and from main memory block at time data can be arranged in external storage in sequential key order this gives fast search times but slow insertion (and deletiontimes -tree is multiway tree in which each node may have dozens or hundreds of keys and children there is always one more child than there are keys in -tree node for the best performancea -tree is typically organized so that node holds one block of data if the search criteria involve many keysa sequential search of all the records in file may be the most practical approach hash tables overview hash table is data structure that offers very fast insertion and searching when you first hear about themhash tables sound almost too good to be true no matter how many data items there areinsertion and searching (and sometimes deletioncan take close to constant timeo( in big notation in practice this is just few machine instructions for human user of hash table this is essentially instantaneous it' so fast that computer programs typically use hash tables when they need to look up tens of thousands of items in less than second (as in spelling checkershash tables are significantly faster than treeswhichas we learned in the preceding operate in relatively fast (logntime not only are they fasthash tables are relatively easy to program hash tables do have several disadvantages they're based on arraysand arrays are difficult to expand once they've been created for some kinds of hash tablesperformance may degrade catastrophically when the table becomes too fullso the programmer needs to have fairly accurate idea of how many data items will need to be stored (or be prepared to periodically transfer data to larger hash tablea timeconsuming processalsothere' no convenient way to visit the items in hash table in any kind of order
25,126
elsewhere howeverif you don' need to visit items in orderand you can predict in advance the size of your databasehash tables are unparalleled in speed and convenience introduction to hashing in this section we'll introduce hash tables and hashing one important concept is how range of key values is transformed into range of array index values in hash table this is accomplished with hash function howeverfor certain kinds of keysno hash function is necessarythe key values can be used directly as array indices we'll look at this simpler situation first and then go on to show how hash functions can be used when keys aren' distributed in such an orderly fashion employee numbers as keys suppose you're writing program to access employee records for small company withsay , employees each employee record requires , bytes of storage thus you can store the entire database in only megabytewhich will easily fit in your computer' memory the company' personnel director has specified that she wants the fastest possible access to any individual record alsoevery employee has been given number from (for the founderto , (for the most recently hired workerthese employee numbers can be used as keys to access the recordsin factaccess by other keys is deemed unnecessary employees are seldom laid offbut even when they aretheir record remains in the database for reference (concerning retirement benefits and so onwhat sort of data structure should you use in this situationkeys are index numbers one possibility is simple array each employee record occupies one cell of the arrayand the index number of the cell is the employee number for that record this is shown in figure figure employee numbers as array indices as you knowaccessing specified array element is very fast if you know its index number the clerk looking up herman alcazar knows that he is employee number so he enters that numberand the program goes instantly to index number in the array single program statement is all that' necessaryemprecord rec databasearray[ ]it' also very quick to add new itemyou insert it just past the last occupied element the next new record--for jim chanthe newly hired employee number , --would go in cell , againa single statement inserts the new record
25,127
presumably the array is made somewhat larger than the current number of employeesto allow room for expansionbut not much expansion is anticipated not always so orderly the speed and simplicity of data access using this array-based database make it very attractive howeverit works in our example only because the keys are unusually well organized they run sequentially from to known maximumand this maximum is reasonable size for an array there are no deletionsso memory-wasting gaps don' develop in the sequence new items can be added sequentially at the end of the arrayand the array doesn' need to be very much larger than the current number of items dictionary in many situations the keys are not so well behaved as in the employee database just described the classic example is dictionary if you want to put every word of an english-language dictionaryfrom to zyzzyva (yesit' word)into your computer' memoryso they can be accessed quicklya hash table is good choice similar widely used application for hash tables is in computer-language compilerswhich maintain symbol table in hash table the symbol table holds all the variable and function names made up by the programmeralong with the addresses where they can be found in memory the program needs to access these names very quicklyso hash table is the preferred data structure let' say we want to store , -word english-language dictionary in main memory you would like every word to occupy its own cell in , -cell arrayso you can access the word using an index number this will make access very fast but what' the relationship of these index numbers to the wordsgiven the word morphosisfor examplehow do we find its index numberconverting words to numbers what we need is system for turning word into an appropriate index number to beginwe know that computers use various schemes for representing individual characters as numbers one such scheme is the ascii codein which is is and so onup to for howeverthe ascii code runs from to to accommodate capitalspunctuationand so on there are really only letters in english wordsso let' devise our own code-- simpler one that can potentially save memory space let' say is is is and so on up to for we'll also say blank is so we have characters (uppercase letters aren' used in this dictionary how do we combine the digits from individual letters into number that represents an entire wordthere are all sorts of approaches we'll look at two representative onesand their advantages and disadvantages add the digits simple approach to converting word to number might be to simply add the code numbers for each character say we want to convert the word cats to number first we convert the characters to digits using our homemade codec
25,128
then we add them thus in our dictionary the word cats would be stored in the array cell with index all the other english words would likewise be assigned an array index calculated by this process how well would this workfor the sake of argumentlet' restrict ourselves to -letter words then (remembering that blank is )the first word in the dictionaryawould be coded by + + + + + + + + + = the last potential word in the dictionary would be zzzzzzzzzz (ten zsour code obtained by adding its letters would be thus the total range of word codes is from to unfortunatelythere are , words in the dictionaryso there aren' enough index numbers to go around each array element will need to hold about words ( , divided by clearly this presents problems if we're thinking in terms of our one word-per-array element scheme maybe we could put subarray or linked list of words at each array element howeverthis would seriously degrade the access speed it would be quick to access the array elementbut slow to search through the words to find the one we wanted so our first attempt at converting words to numbers leaves something to be desired too many words have the same index (for examplewastingivetendmoantickbailsdredgeand hundreds of other words add to as cats does we conclude that this approach doesn' discriminate enoughso the resulting array has too few elements we need to spread out the range of possible indices multiply by powers let' try different way to map words to numbers if our array was too small beforelet' make sure it' big enough what would happen if we created an array in which every wordin fact every potential wordfrom to zzzzzzzzzzwas guaranteed to occupy its own unique array elementto do thiswe need to be sure that every character in word contributes in unique way to the final number we'll begin by thinking about an analogous situation with numbers instead of words recall that in an ordinary multi-digit numbereach digit position represents value times as big as the position to its right thus , really means * * * * orwriting the multipliers as powers of
25,129
* * * * (an input routine in computer program performs similar series of multiplications and additions to convert sequence of digitsentered at the keyboardinto number stored in memory in this system we break number into its digitsmultiply them by appropriate powers of (because there are possible digits)and add the products in similar way we can decompose word into its lettersconvert the letters to their numerical equivalentsmultiply them by appropriate powers of (because there are possible charactersincluding the blank)and add the results this gives unique number for every word say we want to convert the word cats to number we convert the digits to numbers as shown earlier then we multiply each number by the appropriate power of and add the results * * * * calculating the powers gives * , * * * and multiplying the letter codes times the powers yields , which sums to , this process does indeed generate unique number for every potential word we just calculated four-letter word what happens with larger wordsunfortunately the range of numbers becomes rather large the largest -letter wordzzzzzzzzzztranslates into * * * * * * * * * * just by itself is more than , , , , so you can see that the sum will be huge an array stored in memory can' possibly have this many elements the problem is that this scheme assigns an array element to every potential wordwhether it' an actual english word or not thus there are cells for aaaaaaaaaaaaaaaaaaabaaaaaaaaacand so onup to zzzzzzzzzz only small fraction of these are necessary for real wordsso most array cells are empty this is shown in figure figure index for every potential our first scheme--adding the numbers--generated too few indices this latest scheme-
25,130
hashing what we need is way to compress the huge range of numbers we obtain from the numbers-multiplied-by-powers system into range that matches reasonably sized array how big an array are we talking about for our english dictionaryif we only have , wordsyou might assume our array should have approximately this many elements howeverit turns out we're going to need an array with about twice this many cells (it will become clear later why this is so so we need an array with , elements thus we look for way to squeeze range of to more than , , , , into the range to , simple approach is to use the modulo operator (%)which finds the remainder when one number is divided by another to see how this workslet' look at smaller and more comprehensible range suppose we squeeze numbers in the range to (we'll represent them by the variable largenumberinto the range to (the variable smallnumberthere are numbers in the range of small numbersso we'll say that variable smallrange has the value it doesn' really matter what the large range is (unless it overflows the program' variable sizethe java expression for the conversion is smallnumber largenumber smallrangethe remainders when any number is divided by are always in the range to for example % gives and % is this is shown in figure we've squeezed the range - into the range - -to- compression ratio figure range conversion similar expression can be used to compress the really huge numbers that uniquely represent every english word into index numbers that fit in our dictionary arrayarrayindex hugenumber arraysizethis is an example of hash function it hashes (convertsa number in large range into number in smaller range this smaller range corresponds to the index numbers in an array an array into which data is inserted using hash function is called hash table
25,131
to reviewwe convert word into huge number by multiplying each character in the word by an appropriate power of hugenumber ch * ch * ch * ch * ch * ch * ch * ch * ch * ch * thenusing the modulo (%operatorwe squeeze the resulting huge range of numbers into range about twice as big as the number of items we want to store this is an example of hash functionarraysize numberwords arrayindex hugenumber arraysizein the huge rangeeach number represents potential data item (an arrangement of letters)but few of these numbers represent actual data items (english wordsa hash function transforms these large numbers into the index numbers of much smaller array in this array we expect thaton the averagethere will be one word for every two cells some cells will have no wordsand some more than one practical implementation of this scheme runs into trouble because hugenumber will probably overflow its variable sizeeven for type long we'll see how to deal with this later collisions we pay price for squeezing large range into small one there' no longer guarantee that two words won' hash to the same array index this is similar to what happened when we added the letter codesbut the situation is nowhere near as bad when we added the lettersthere were only possible results (for words up to lettersnow we're spreading this out into , possible results even soit' impossible to avoid hashing several different words into the same array locationat least occasionally we' hoped that we could have one data item per index numberbut this turns out not to be possible the best we can do is hope that not too many words will hash to the same index perhaps you want to insert the word melioration into the array you hash the word to obtain its index numberbut find that the cell at that number is already occupied by the word demystifywhich happens to hash to the exact same number (for certain size arraythis situationshown in figure is called collision figure collision
25,132
but in fact we can work around the problem in variety of ways remember that we've specified an array with twice as many cells as data items thus perhaps half the cells are empty one approachwhen collision occursis to search the array in some systematic way for an empty celland insert the new item thereinstead of at the index specified by the hash function this approach is called open addressing if cats hashes to , but this location is already occupied by parsnipthen we might try to insert cats in , for example second approach (mentioned earlieris to create an array that consists of linked lists of words instead of the words themselves then when collision occursthe new item is simply inserted in the list at that index this is called separate chaining in the balance of this we'll discuss open addressing and separate chainingand then return to the question of hash functions open addressing in open addressingwhen data item can' be placed at the index calculated by the hash functionanother location in the array is sought we'll explore three methods of open addressingwhich vary in the method used to find the next vacant cell these methods are linear probingquadratic probingand double hashing linear probing in linear probing we search sequentially for vacant cells if , is occupied when we try to insert cats therewe go to , then , and so onincrementing the index until we find an empty cell this is called linear probing because it steps sequentially along the line of cells the hash workshop applet the hash workshop applet demonstrates linear probing when you start this appletyou'll see screen similar to figure figure the hash workshop applet in this applet the range of keys runs from to the initial size of the array is the hash function has to squeeze the range of keys down to match the array size it does this with the modulo (%operatoras we've seen beforearrayindex key arraysize
25,133
arrayindex key this hash function is simple enough that you can solve it mentally for given keykeep subtracting multiples of until you get number under for exampleto hash subtract giving and then againgiving this is the index number where the algorithm will place thus you can easily check that the algorithm has hashed key to the correct address (an array size of is even easier to figure outas key' last digit is the index it will hash to as with other appletsoperations are carried out by repeatedly pressing the same button for exampleto find data item with specified numberclick the find button repeatedly rememberfinish sequence with one button before using another button for exampledon' switch from clicking fill to some other button until the press any key message is displayed all the operations require you to type numerical value at the beginning of the sequence the find button requires you to type key valuefor examplewhile new requires the size of the new table the new button you can create new hash table of size you specify by using the new button the maximum size is this limitation results from the number of cells open addressingthat can be viewed in the applet window the initial size is also we use this number because it makes it easy to check if the hash values are correctbut as we'll see laterin general-purpose hash tablethe array size should be prime numberso would be better choice the fill button initially the hash table contains itemsso it' half full howeveryou can also fill it with specified number of data items using the fill button keep clicking filland when promptedtype the number of items to fill hash tables work best when they are not more than half or at the most two-thirds full ( items in -cell tableyou'll see that the filled cells aren' evenly distributed in the cells sometimes there' sequence of several empty cellsand sometimes sequence of filled cells let' call sequence of filled cells in hash table filled sequence as you add more and more itemsthe filled sequences become longer this is called clusteringand is shown in figure figure clustering
25,134
fill it too full (for exampleif you try to put items in -cell tableyou may think the program has stoppedbut be patient it' extremely inefficient at filling an almost-full array alsonote that if the hash table becomes completely full the algorithms all stop workingin this applet they assume that the table has at least one empty cell the find button the find button starts by applying the hash function to the key value you type into the number box this results in an array index the cell at this index may be the key you're looking forthis is the optimum situationand success will be reported immediately howeverit' also possible that this cell is already occupied by data item with some other key this is collisionyou'll see the red arrow pointing to an occupied cell following collisionthe search algorithm will look at the next cell in sequence the process of finding an appropriate cell following collision is called probe following collisionthe find algorithm simply steps along the array looking at each cell in sequence if it encounters an empty cell before finding the key it' looking forit knows the search has failed there' no use looking furtherbecause the insertion algorithm would have inserted the item at this cell (if not earlierfigure shows successful and unsuccessful linear probes figure linear probes the ins button the ins button inserts data itemwith key value that you type into the number boxinto the hash table it uses the same algorithm as the find button to locate the appropriate cell if the original cell is occupiedit will probe linearly for vacant cell when it finds oneit inserts the item try inserting some new data items type in -digit number and watch what happens most items will go into the first cell they trybut some will suffer collisionsand need to step along to find an empty cell the number of steps they take is the probe length most probe lengths are only few cells long sometimeshoweveryou may see probe lengths of or cellsor even longer as the array becomes excessively full notice which keys hash to the same index if the array size is the keys and so on up to all hash to index try inserting this sequence or similar one this will demonstrate the linear probe
25,135
isn' accomplished by simply removing data item from cellleaving it empty why notremember that during insertion the probe process steps along series of cellslooking for vacant one if cell is made empty in the middle of this sequence of full cellsthe find routine will give up when it sees the empty celleven if the desired cell can eventually be reached for this reason deleted item is replaced by an item with special key value that identifies it as deleted in this applet we assume all legitimate key values are positiveso the deleted value is chosen as - deleted items are marked with the special key *delthe insert button will insert new item at the first available empty cell or in *delitem the find button will treat *delitem as an existing item for the purposes of searching for another item further along if there are many deletionsthe hash table fills up with these ersatz *deldata itemswhich makes it less efficient for this reason many hash table implementations don' allow deletion if it is implementedit should be used sparingly duplicates allowedcan you allow data items with duplicate keys to be used in hash tablesthe fill routine in the hash applet doesn' allow duplicatesbut you can insert them with the insert button if you like then you'll see that only the first one can be accessed the only way to access second item with the same key is to delete the first one this isn' too convenient you could rewrite the find algorithm to look for all items with the same key instead of just the first one howeverit would then need to search through all the cells of every linear sequence it encountered this wastes time for all table accesseseven when no duplicates are involved in the majority of cases you probably want to forbid duplicates clustering try inserting more items into the hash table in the hash workshop applet as it gets more fullclusters grow larger clustering can result in very long probe lengths this means that it' very slow to access cells at the end of the sequence the more full the array isthe worse clustering becomes it' not problem when the array is half fulland still not too bad when it' two-thirds full beyond thishoweverperformance degrades seriously as the clusters grow larger and larger for this reason it' critical when designing hash table to ensure that it never becomes more than halfor at the most two-thirdsfull (we'll discuss the mathematical relationship between how full the hash table is and probe lengths at the end of this java code for linear probe hash table it' not hard to create methods to handle searchinsertionand deletion with linear-probe hash tables we'll show the java code for these methodsand then complete hash java program that puts them in context the find(method the find(method first calls hashfunc(to hash the search key to obtain the index number hashval the hashfunc(method applies the operator to the search key and the array sizeas we've seen before nextin while conditionfind(checks if the item at this index is empty (nullif notit checks if the item contains the search key if it doesit returns the item if it doesn'
25,136
next cell is occupied here' the code for find()public dataitem find(int key/(assumes table not fullint hashval hashfunc(key)/find item with key /hash the key while(hasharray[hashval!null/until empty cell/found the keyif(hasharray[hashvalidata =keyreturn hasharray[hashval]/yesreturn item ++hashval/go to next cell hashval %arraysize/wrap around if necessary return null/can' find item as hashval steps through the arrayit eventually reaches the end when this happens we want it to wrap around to the beginning we could check for this with an if statementsetting hashval to whenever it equaled the array size howeverwe can accomplish the same thing by applying the operator to hashval and the array size cautious programmers might not want to assume the table is not fullas is done here the table should not be allowed to become fullbut if it didthis method would loop forever for simplicity we don' check for this situation the insert(method the insert(method uses about the same algorithm as find(to locate where data item should go howeverit' looking for an empty cell or deleted item (key - )rather than specific item once this empty cell has been locatedinsert(places the new item into it public void insert(dataitem item/insert dataitem /(assumes table not fullint key item idata/extract key int hashval hashfunc(key)/hash the key /until empty cell or - while(hasharray[hashval!null &hasharray[hashvalidata !- ++hashval/go to next cell hashval %arraysize/wrap around if necessary hasharray[hashvalitem/insert item /end insert(the delete(method the delete(method finds an existing item using code similar to find(once the item is founddelete(writes over it with the special data item nonitemwhich is predefined with key of -
25,137
int hashval hashfunc(key)/delete dataitem /hash the key while(hasharray[hashval!null/until empty cell/found the keyif(hasharray[hashvalidata =keydataitem temp hasharray[hashval]/save item hasharray[hashvalnonitem/delete item return temp/return item ++hashval/go to next cell hashval %arraysize/wrap around if necessary return null/can' find item /end delete(the hash java program here' the complete hash java program dataitem object contains just one fieldan integer that is its key as in other data structures we've discussedthese objects could contain more dataor reference to an object of another class (such as employee or partnumberthe major field in class hashtable is an array called hasharray other fields are the size of the array and the special nonitem object used for deletions here' the listing for hash java/hash java /demonstrates hash table with linear probing /to run this programc:>java hashtableapp import java io */for / import java util */for stack class import java lang integer/for parseint(///////////////////////////////////////////////////////////////class dataitem /(could have more datapublic int idata/data item (key//public dataitem(int ii/constructor idata ii///end class dataitem ///////////////////////////////////////////////////////////////
25,138
dataitem[hasharrayint arraysizedataitem nonitem/array holds hash table /for deleted items /public hashtable(int size/constructor arraysize sizehasharray new dataitem[arraysize]nonitem new dataitem(- )/deleted item key is - /public void displaytable(system out print("table")for(int = <arraysizej++if(hasharray[ !nullsystem out print(hasharray[jidata")else system out print("*")system out println("")/public int hashfunc(int keyreturn key arraysize/hash function /public void insert(dataitem item/insert dataitem /(assumes table not fullint key item idata/extract key int hashval hashfunc(key)/hash the key /until empty cell or - while(hasharray[hashval!null &hasharray[hashvalidata ! ++hashval/go to next cell hashval %arraysize/wraparound if necessary hasharray[hashvalitem/insert item /end insert(
25,139
int hashval hashfunc(key)/hash the key while(hasharray[hashval!null/until empty cell/found the keyif(hasharray[hashvalidata =keydataitem temp hasharray[hashval]/save item hasharray[hashvalnonitem/delete item return temp/return item ++hashval/go to next cell hashval %arraysize/wraparound if necessary return null/can' find item /end delete(/public dataitem find(int key/find item with key int hashval hashfunc(key)/hash the key while(hasharray[hashval!null/until empty cell/found the keyif(hasharray[hashvalidata =keyreturn hasharray[hashval]/yesreturn item ++hashval/go to next cell hashval %arraysize/wraparound if necessary return null/can' find item //end class hashtable ///////////////////////////////////////////////////////////////class hashtableapp public static void main(string[argsthrows ioexception dataitem adataitemint akeysizenkeyspercell/get sizes puttext("enter size of hash table")size getint()puttext("enter initial number of items")
25,140
keyspercell /make table hashtable thehashtable new hashtable(size)for(int = <nj++/insert data akey (int)(java lang math random(keyspercell size)adataitem new dataitem(akey)thehashtable insert(adataitem)while(true/interact with user puttext("enter first letter of ")puttext("showinsertdeleteor find")char choice getchar()switch(choicecase ' 'thehashtable displaytable()breakcase ' 'puttext("enter key value to insert")akey getint()adataitem new dataitem(akey)thehashtable insert(adataitem)breakcase ' 'puttext("enter key value to delete")akey getint()thehashtable delete(akey)breakcase ' 'puttext("enter key value to find")akey getint()adataitem thehashtable find(akey)if(adataitem !nullsystem out println("found akey)else system out println("could not find akey)breakdefaultputtext("invalid entry\ ")/end switch /end while /end main(/
25,141
system out print( )system out flush()//public static string getstring(throws ioexception inputstreamreader isr new inputstreamreader(system in)bufferedreader br new bufferedreader(isr)string br readline()return //public static char getchar(throws ioexception string getstring()return charat( )//public static int getint(throws ioexception string getstring()return integer parseint( )///end class hashtableapp the main(routine in the hashtableapp class contains user interface that allows the user to show the contents of the hash table (enter )insert an item ( )delete an item ( )or find an item (finitiallyit asks the user to input the size of the hash table and the number of items in it you can make it almost any sizefrom few items to , (it may take little time to build larger tables than this don' use the (for showoption on tables of more than few hundred itemsthey scroll off the screen and it takes long time to display them variable in main()keyspercellspecifies the ratio of the range of keys to the size of the array in the listingit' set to this means that if you specify table size of the keys will range from to to see what' going onit' best to create tables with fewer than about itemsso all the items can be displayed on one line here' some sample interaction with hash javaenter size of hash table enter initial number of items enter first letter of showinsertdeleteor finds
25,142
enter first letter of showinsertdeleteor findf enter key value to find found enter first letter of showinsertdeleteor findi enter key value to insert enter first letter of showinsertdeleteor finds table * * * enter first letter of showinsertdeleteor findd enter key value to delete enter first letter of showinsertdeleteor finds table *- * * key values run from to ( times minus the *symbol indicates that cell is empty the item with key is inserted at location (the first item is numbered because % is notice how changes to - when this item is deleted expanding the array one option when hash table becomes too full is to expand its array in javaarrays have fixed size and can' be expanded your program could create newlarger arrayand then rehash the contents of the old small array into the new large one howeverthis is time-consuming process remember that the hash function calculates the location of given data item based on the array sizeso the locations in the large array won' be the same as those in small array you can'tthereforesimply copy the items from one array to the other you'll need to go through the old array in sequenceinserting each item into the new array with the insert(method java offers class vector that is an array-like data structure that can be expanded howeverit' not much help because of the need to rehash all data items when the table changes size expanding the array is only practical when there' plenty of time available to carry it out quadratic probing we've seen that clusters can occur in the linear probe approach to open addressing once cluster formsit tends to grow larger items that hash to any value in the range of the cluster will step along and insert themselves at the end of the clusterthus making it even bigger the bigger the cluster getsthe faster it grows it' like the crowd that gathers when someone faints at the shopping mall the first arrivals come because they saw the victim falllater arrivals gather because they wondered what everyone else was looking at the larger the crowd growsthe more people are attracted to it the ratio of the number of items in tableto the table' sizeis called the load factor table with , cells and , items has load factor of / loadfactor nitems arraysizeclusters can form even when the load factor isn' high parts of the hash table may consist of big clusterswhile others are sparsely inhabited clusters reduce performance
25,143
widely separated cellsinstead of those adjacent to the primary hash site the step is the square of the step number in linear probeif the primary hash index is xsubsequent probes go to + + + and so on in quadratic probingprobes go to + + + + + and so on the distance from the initial probe is the square of the step numberx+ + + + + and so on figure shows some quadratic probes figure quadratic probes it' as if quadratic probe became increasingly desperate as its search lengthened at first it calmly picks the adjacent cell if that' occupiedit thinks it may be in small cluster so it tries something cells away if that' occupied it becomes little concernedthinking it may be in larger clusterand tries cells away if that' occupied it feels the first tinges of panic and jumps cells away pretty soon it' flying hysterically all over the placeas you can see if you try searching with the hashdouble workshop applet when the table is almost full the hashdouble applet with quadratic probes the hashdouble workshop applet allows two different kinds of collision handlingquadratic probes and double hashing (we'll look at double hashing in the next section this applet generates display much like that of the hash workshop appletexcept that it includes radio buttons to select quadratic probing or double hashing to see how quadratic probes lookstart up this applet and create new hash table of items using the new button when you're asked to select double or quadratic probeclick the quad button once the new table is createdfill it four-fifths full using the fill button ( items in -cell arraythis is too fullbut it will generate longer probes so you can study the probe algorithm incidentallyif you try to fill the hash table too fullyou may see the message can' complete fill this occurs when the probe sequences get very long every additional step in the probe sequence makes bigger step size if the sequence is too longthe step size will eventually exceed the capacity of its integer variableso the applet shuts down the fill process before this happens once the table is filledselect an existing key value and use the find key to see if the
25,144
patienthoweveryou'll find key that requires three or four stepsand you'll see the step size lengthen for each step you can also use find to search for nonexistent keythis search continues until an empty cell is encountered importantalways make the array size prime number use instead of for example (other primes less than are and if the array size is not primean endless sequence of steps may occur during probe if this happens during fill operationthe applet will be paralyzed the problem with quadratic probes quadratic probes eliminate the clustering problem we saw with the linear probewhich is called primary clustering howeverquadratic probes suffer from different and more subtle clustering problem this occurs because all the keys that hash to particular cell follow the same sequence in trying to find vacant space let' say all hash to and are inserted in this order then will require one-step probe will require -step probeand will require -step probe each additional item with key that hashes to will require longer probe this phenomenon is called secondary clustering secondary clustering is not serious problembut quadratic probing is not often used because there' slightly better solution double hashing to eliminate secondary clustering as well as primary clusteringanother approach can be useddouble hashing (sometimes called rehashingsecondary clustering occurs because the algorithm that generates the sequence of steps in the quadratic probe always generates the same steps and so on what we need is way to generate probe sequences that depend on the key instead of being the same for every key then numbers with different keys that hash to the same index will use different probe sequences the solution is to hash the key second timeusing different hash functionand use the result as the step size for given key the step size remains constant throughout probebut it' different for different keys experience has shown that this secondary hash function must have certain characteristicsit must not be the same as the primary hash function it must never output (otherwise there would be no stepevery probe would land on the same celland the algorithm would go into an endless loopexperts have discovered that functions of the following form work wellstepsize constant (key constant)where constant is prime and smaller than the array size for examplestepsize (key )this is the secondary hash function used in the workshop applet for any given key all the steps will be the same sizebut different keys generate different step sizes with this
25,145
figure double hashing the hashdouble applet with double hashing you can use the hashdouble workshop applet to see how double hashing works it starts up automatically in double-hashing modebut if it' in quadratic mode you can switch to double by creating new table with the new button and clicking the double button when prompted to best see probes at work you'll need to fill the table rather fullsay to about nine-tenths capacity or more even with such high load factorsmost data items will be found in the cell found by the first hash functiononly few will require extended probe sequences try finding existing keys when one needs probe sequenceyou'll see how all the steps are the same size for given keybut that the step size is different--between and -for different keys java code for double hashing here' the listing for hashdouble javawhich uses double hashing it' similar to the hash java programbut uses two hash functionsone for finding the initial indexand the second for generating the step size as beforethe user can show the table contentsinsert an itemdelete an itemand find an item /hashdouble java /demonstrates hash table with double hashing /to run this programc:>java hashdoubleapp import java io */for / import java util */for stack class import java lang integer/for parseint(///////////////////////////////////////////////////////////////class dataitem /(could have more itemspublic int idata/data item (key//public dataitem(int ii/constructor idata ii/
25,146
/end class dataitem ///////////////////////////////////////////////////////////////class hashtable dataitem[hasharrayint arraysizedataitem nonitem/array is the hash table /for deleted items /hashtable(int size/constructor arraysize sizehasharray new dataitem[arraysize]nonitem new dataitem(- )/public void displaytable(system out print("table")for(int = <arraysizej++if(hasharray[ !nullsystem out print(hasharray[jidata")else system out print("*")system out println("")/public int hashfunc (int keyreturn key arraysize/public int hashfunc (int key/non-zeroless than array sizedifferent from hf /array size must be relatively prime to and return key //insert dataitem public void insert(int keydataitem item
25,147
/(assumes table not fullint hashval hashfunc (key)/hash the key int stepsize hashfunc (key)/get step size /until empty cell or - while(hasharray[hashval!null &hasharray[hashvalidata !hashval +stepsizehashval %arraysizehasharray[hashvalitem/end insert(/add the step /for wraparound /insert item /public dataitem delete(int key/delete dataitem int hashval hashfunc (key)/hash the key int stepsize hashfunc (key)/get step size while(hasharray[hashval!null/until empty cell/is correct hashvalif(hasharray[hashvalidata =keydataitem temp hasharray[hashval]/save item hasharray[hashvalnonitem/delete item return temp/return item hashval +stepsize/add the step hashval %arraysize/for wraparound return null/can' find item /end delete(/public dataitem find(int key/find item with key /(assumes table not fullint hashval hashfunc (key)/hash the key int stepsize hashfunc (key)/get step size while(hasharray[hashval!null/until empty cell/is correct hashvalif(hasharray[hashvalidata =keyreturn hasharray[hashval]/yesreturn item hashval +stepsize/add the step hashval %arraysize/for wraparound return null/can' find item
25,148
///////////////////////////////////////////////////////////////class hashdoubleapp public static void main(string[argsthrows ioexception int akeydataitem adataitemint sizen/get sizes puttext("enter size of hash table")size getint()puttext("enter initial number of items") getint()/make table hashtable thehashtable new hashtable(size)for(int = <nj++/insert data akey (int)(java lang math random( size)adataitem new dataitem(akey)thehashtable insert(akeyadataitem)while(true/interact with user puttext("enter first letter of ")puttext("showinsertdeleteor find")char choice getchar()switch(choicecase ' 'thehashtable displaytable()breakcase ' 'puttext("enter key value to insert")akey getint()adataitem new dataitem(akey)thehashtable insert(akeyadataitem)breakcase ' 'puttext("enter key value to delete")akey getint()thehashtable delete(akey)breakcase ' 'puttext("enter key value to find")akey getint()
25,149
adataitem thehashtable find(akey)if(adataitem !nullsystem out println("found akey)else system out println("could not find akey)breakdefaultputtext("invalid entry\ ")/end switch /end while /end main(//public static void puttext(string ssystem out print( )system out flush()//public static string getstring(throws ioexception inputstreamreader isr new inputstreamreader(system in)bufferedreader br new bufferedreader(isr)string br readline()return //public static char getchar(throws ioexception string getstring()return charat( )//public static int getint(throws ioexception string getstring()return integer parseint( )///end class hashdoubleapp output and operation of this program are similar to those of hash java table shows what happens when items are inserted into -cell hash table using double hashing the step sizes run from to
25,150
item number key hash value step size cells in probe sequence
25,151
the array gets more fullthe probe sequences become quite long here' the resulting array of keys* * table size prime number double hashing requires that the size of the hash table is prime number to see whyimagine situation where the table size is not prime number for examplesuppose the array size is (indices from to )and that particular key hashes to an initial index of and step size of the probe sequence will be and so onrepeating endlessly only these three cells are ever examinedso the algorithm will never find the empty cells that might be waiting at and so on the algorithm will crash and burn if the array size were which is primethe probe sequence eventually visits every cell it' and so on and on if there is even one empty cellthe probe will find it using prime number as the array size makes it impossible for any number to divide it evenlyso the probe sequence will eventually check every cell similar effect occurs using the quadratic probe in that casehoweverthe step size gets larger with each stepand will eventually overflow the variable holding itthus preventing an endless loop in generaldouble hashing is the probe sequence of choice when open addressing is used separate chaining in open addressingcollisions are resolved by looking for an open cell in the hash table different approach is to install linked list at each index in the hash table data item' key is hashed to the index in the usual wayand the item is inserted into the linked list at that index other items that hash to the same index are simply added to the linked listthere' no need to search for empty cells in the primary array figure shows how separate chaining looks figure eparate chaining separate chaining is conceptually somewhat simpler than the various probe schemes used in open addressing howeverthe code is longer because it must include the mechanism for the linked listsusually in the form of an additional class the hashchain workshop applet
25,152
an array of linked listsas shown in figure figure the hashchain workshop applet each element of the array occupies one line of the displayand the linked lists extend from left to right initially there are cells in the array ( liststhis is more than fits on the screenyou can move the display up and down with the scrollbar to see the entire array the display shows up to six items per list you can create hash table with up to listsand use load factors up to higher load factors may cause the linked lists to exceed six items and run off the right edge of the screenmaking it impossible to see all the items (this may happen very occasionally even at the load factor experiment with the hashchain applet by inserting some new items with the ins button you'll see how the red arrow goes immediately to the correct list and inserts the item at the beginning of the list the lists in the hashchain applet are not sortedso insertion does not require searching through the list (the example program will demonstrate sorted lists try to find specified items using the find button during find operationif there are several items on the listthe red arrow must step through the items looking for the correct one for successful searchhalf the items in the list must be examined on the averageas we discussed in "linked lists for an unsuccessful search all the items must be examined load factors the load factor (the ratio of the number of items in hash table to its sizeis typically different in separate chaining than in open addressing in separate chaining it' normal to put or more items into an -cell arraythus the load factor can be or greater there' no problem with thissome locations will simply contain two or more items in their lists of courseif there are many items on the listsaccess time is reduced because access to specified item requires searching through an average of half the items on the list finding the initial cell takes fast ( timebut searching through list takes time proportional to the number of items on the listo(mtime thus we don' want the lists to become too full load factor of as shown in the workshop appletis common with this load factorroughly one third of the cells will be emptyone third will hold one itemand one third will hold two or more items in open addressingperformance degrades badly as the load factor increases above one half or two thirds in separate chaining the load factor can rise above without hurting performance very much this makes separate chaining more robust mechanism
25,153
table duplicates duplicates are allowed and may be generated in the fill process all items with the same key will be inserted in the same listso if you need to discover all of themyou must search the entire list in both successful and unsuccessful searches this lowers performance the find operation in the applet only finds the first of several duplicates deletion in separate chainingdeletion poses no special problems as it does in open addressing the algorithm hashes to the proper list and then deletes the item from the list because probes aren' usedit doesn' matter if the list at particular cell becomes empty we've included del button in the workshop applet to show how deletion works table size with separate chaining it' not so important to make the table size prime numberas it is with quadratic probes and double hashing there are no probes in separate chainingso there' no need to worry that probe will go into an endless sequence because the step size divides evenly into the array size on the other handcertain kinds of key distributions can cause data to cluster when the array size is not prime number we'll have more to say about this when we discuss hash functions buckets another approach similar to separate chaining is to use an array at each location in the hash tableinstead of linked list such arrays are called buckets this approach is not as efficient as the linked list approachhoweverbecause of the problem of choosing the size of the buckets if they're too small they may overflowand if they're too large they waste memory linked listswhich allocate memory dynamicallydon' have this problem java code for separate chaining the hashchain java program includes sortedlist class and an associated link class sorted lists don' speed up successful searchbut they do cut the time of an unsuccessful search in half (as soon as an item larger than the search key is reachedwhich on average is half the items in listthe search is declared failure deletion times are also cut in halfhoweverinsertion times are lengthenedbecause the new item can' just be inserted at the beginning of the listits proper place in the ordered list must be located before it' inserted if the lists are shortthe increase in insertion times may not be important in situations where many unsuccessful searches are anticipatedit may be worthwhile to use the slightly more complicated sorted listrather than an unsorted list howeveran unsorted list is preferred if insertion speed is more important the hashchain java programshown in listing begins by constructing hash table with table size and number of items entered by the user the user can then insertfindand delete itemsand display the list for the entire hash table to be viewed on the screenthe size of the table must be no greater than or so listing the hashchain java program
25,154
/demonstrates hash table with separate chaining /to run this programc:>java hashchainapp import java io */for / import java util */for stack class import java lang integer/for parseint(///////////////////////////////////////////////////////////////class link /(could be other itemspublic int idata/data item public link next/next link in list /public link(int it/constructor idatait/public void displaylink(/display this link system out print(idata ")/end class link ///////////////////////////////////////////////////////////////class sortedlist private link first/ref to first list item /public void sortedlist(/constructor first null/public void insert(link thelink/insert linkin order int key thelink idatalink previous null/start at first link current first/until end of listwhile(current !null &key current idata/or current keyprevious currentcurrent current next/go to next item if(previous==null/if beginning of listfirst thelink/first --new link else /not at beginningprevious next thelink/prev --new link thelink next current/new link --current /end insert(
25,155
/delete link /(assumes non-empty listlink previous null/start at first link current first/until end of listwhile(current !null &key !current idata/or key =currentprevious currentcurrent current next/go to next link /disconnect link if(previous==null/if beginning of list first first next/delete first link else /not at beginning previous next current next/delete current link /end delete(/public link find(int key/find link link current first/start at first /until end of listwhile(current !null &current idata <key/or key too smallif(current idata =key/is this the linkreturn current/found itreturn link current current next/go to next item return null/didn' find it /end find(/public void displaylist(system out print("list (first-->last)")link current first/start at beginning of list while(current !null/until end of listcurrent displaylink()/print data current current next/move to next link system out println("")/end class sortedlist ///////////////////////////////////////////////////////////////
25,156
private sortedlist[hasharrayprivate int arraysize/array of lists /public hashtable(int size/constructor arraysize sizehasharray new sortedlist[arraysize]/create array for(int = <arraysizej++/fill array hasharray[jnew sortedlist()/with lists /public void displaytable(for(int = <arraysizej++/for each cellsystem out print( ")/display cell number hasharray[jdisplaylist()/display list /public int hashfunc(int key/hash function return key arraysize/public void insert(link thelink/insert link int key thelink idataint hashval hashfunc(key)/hash the key hasharray[hashvalinsert(thelink)/insert at hashval /end insert(/public void delete(int key/delete link int hashval hashfunc(key)/hash the key hasharray[hashvaldelete(key)/delete link /end delete(/public link find(int key/find link
25,157
/hash the key link thelink hasharray[hashvalfind(key)/get link return thelink/return link //end class hashtable ///////////////////////////////////////////////////////////////class hashchainapp public static void main(string[argsthrows ioexception int akeylink adataitemint sizenkeyspercell /get sizes puttext("enter size of hash table")size getint()puttext("enter initial number of items") getint()/make table hashtable thehashtable new hashtable(size)for(int = <nj++/insert data akey (int)(java lang math random(keyspercell size)adataitem new link(akey)thehashtable insert(adataitem)while(true/interact with user puttext("enter first letter of ")puttext("showinsertdeleteor find")char choice getchar()switch(choicecase ' 'thehashtable displaytable()breakcase ' 'puttext("enter key value to insert")akey getint()adataitem new link(akey)thehashtable insert(adataitem)breakcase ' 'puttext("enter key value to delete")akey getint()thehashtable delete(akey)
25,158
breakcase ' 'puttext("enter key value to find")akey getint()adataitem thehashtable find(akey)if(adataitem !nullsystem out println("found akey)else system out println("could not find akey)breakdefaultputtext("invalid entry\ ")/end switch /end while /end main(//public static void puttext(string ssystem out print( )system out flush()//public static string getstring(throws ioexception inputstreamreader isr new inputstreamreader(system in)bufferedreader br new bufferedreader(isr)string br readline()return //public static char getchar(throws ioexception string getstring()return charat( )//public static int getint(throws ioexception string getstring()return integer parseint( )///end class hashchainapp here' the output when the user creates table with listsinserts items into itand displays it with the option
25,159
enter initial number of items enter first letter of showinsertdeleteor finds list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last) list (first-->last)if you insert more items into this tableyou'll see the lists grow longerbut maintain their sorted order you can delete items as well we'll return to the question of when to use separate chaining when we discuss hash table efficiency later in this hash functions in this section we'll explore the issue of what makes good hash functionand see if we can improve the approach to hashing strings mentioned at the beginning of this quick computation good hash function is simpleso it can be computed quickly the major advantage of hash tables is their speed if the hash function is slowthis speed will be degraded hash function with many multiplications and divisions is not good idea (the bitmanipulation facilities of java or ++such as shifting bits right to divide number by multiple of can sometimes be used to good advantage the purpose of hash function is to take range of key values and transform them into index values in such way that the key values are distributed randomly across all the indices of the hash table keys may be completely random or not so random random keys so-called perfect hash function maps every key into different table location this is only possible for keys that are unusually well behavedand whose range is small enough to be used directly as array indices (as in the employee-number example at the beginning of this
25,160
compress larger range of keys into smaller range of index numbers the distribution of key values in particular database determines what the hash function needs to do in this we've assumed that the data was randomly distributed over its entire range in this situation the hash function index key arraysizeis satisfactory it involves only one mathematical operationand if the keys are truly random the resulting indices will be random tooand therefore well distributed non-random keys howeverdata is often distributed non-randomly imagine database that uses car-part numbers as keys perhaps these numbers are of the form -- this is interpreted as followsdigits - supplier number ( to currently up to digits - category code ( up to digits - month of introduction ( to digits - year of introduction ( to digits - serial number ( to but never exceeds digit toxic risk flag ( or digits - checksum (sum of other fieldsmodulo the key used for the part number shown would be , , , , , howeversuch keys are not randomly distributed the majority of numbers from to , , , , , can' actually occur (for examplesupplier numbers above category codes from that aren' multiples of and months from to alsothe checksum is not independent of the other numbers some work should be done to these part numbers to help ensure that they form range of more truly random numbers don' use non-data the key fields should be squeezed down until every bit counts for examplethe category codes should be changed to run from to alsothe checksum should be removed because it doesn' add any additional informationit' deliberately redundant various bit-twiddling techniques are appropriate for compressing the various fields in the key use all the data every part of the key (except non-dataas described aboveshould contribute to the hash function don' just use the first digits or some such expurgation the more data that contributes to the keythe more likely it is that the keys will hash evenly into the entire range of indices
25,161
we'll see how to handle overflow when we talk about hashing strings in moment to summarizethe trick is to find hash function that' simple and fastyet excludes the non-data parts of the key and uses all the data use prime number for the modulo base often the hash function involves using the modulo operator (%with the table size we've already seen that it' important for the table size to be prime number when using quadratic probe or double hashing howeverif the keys themselves may not be randomly distributedit' important for the table size to be prime number no matter what hashing system is used this is becauseif many keys share divisor with the array sizethey may tend to hash to the same locationcausing clustering using prime table size eliminates this possibility for exampleif the table size is multiple of in our car part examplethe category codes will all hash to index numbers that are multiples of howeverwith prime number such as you are guaranteed that no keys will divide into the table size the moral is to examine your keys carefullyand tailor your hash algorithm to remove any irregularity in the distribution of the keys hashing strings we saw at the beginning of this how to convert short strings to key numbers by multiplying digit codes by powers of constant in particularwe saw that the four-letter word cats could turn into number by calculating key * * * * this approach has the desirable attribute of involving all the characters in the input string the calculated key value can then be hashed into an array index in the usual wayindex (keyarraysizehere' java method that finds the key value of wordpublic static int hashfunc (string keyint hashval int pow / * etc for(int =key length()- >= --/right to left int letter key charat( /get char code hashval +pow letter/times power of pow * /next power of return hashval arraysize/end hashfunc (the loop starts at the rightmost letter in the word if there are lettersthis is - the numerical equivalent of the letteraccording to the code we devised at the beginning of this ( = and so on)is placed in letter this is then multiplied by power of
25,162
the hashfunc (method is not as efficient as it might be aside from the character conversionthere are two multiplications and an addition inside the loop we can eliminate multiplication by taking advantage of mathematical identity called horner' method (horner was an english mathematician - this states that an expression like var * var * var * var * var * can be written as (((var * var )* var )* var )* var to evaluate thiswe can start inside the innermost parentheses and work outward if we translate this to java method we have the following codepublic static int hashfunc (string keyint hashval for(int = <key length() ++/left to right int letter key charat( /get char code hashval hashval letter/multiply and add return hashval arraysize/mod /end hashfunc (here we start with the leftmost letter of the word (which is somewhat more natural than starting on the right)and we have only one multiplication and one addition each time through the loop (aside from extracting the character from the stringthe hashfunc (method unfortunately can' handle strings longer than about letters longer strings cause the value of hashval to exceed the size of type int (if we used type longthe same problem would still arise for somewhat longer strings can we modify this basic approach so we don' overflow any variablesnotice that the key we eventually end up with is always less than the array sizebecause we apply the modulo operator it' not the final index that' too bigit' the intermediate key values it turns out that with horner' formulation we can apply the modulo (%operator at each step in the calculation this gives the same result as applying the modulo operator once at the endbut avoids overflow (it does add an operation inside the loop the hashfunc (method shows how this lookspublic static int hashfunc (string keyint hashval for(int = <key length() ++/left to right int letter key charat( /get char code hashval (hashval letterarraysize/mod return hashval/no mod /end hashfunc (
25,163
of instead of so that multiplication can be effected using the shift (>>operatorwhich is faster than the modulo (%operator you can use an approach similar to this to convert any kind of string to number suitable for hashing the strings can be wordsnamesor any other concatenation of characters hashing efficiency we've noted that insertion and searching in hash tables can approach ( time if no collision occursonly call to the hash function and single array reference are necessary to insert new item or find an existing item this is the minimum access time if collisions occuraccess times become dependent on the resulting probe lengths each cell accessed during probe adds another time increment to the search for vacant cell (for insertionor for an existing cell during an accessa cell must be checked to see if it' emptyand--in the case of searching or deletion--if it contains the desired item thus an individual search or insertion time is proportional to the length of the probe this is in addition to constant time for the hash function the average probe length (and therefore the average access timeis dependent on the load factor (the ratio of items in the table to the size of the tableas the load factor increasesprobe lengths grow longer we'll look at the relationship between probe lengths and load factors for the various kinds of hash tables we've studied open addressing the loss of efficiency with high load factors is more serious for the various open addressing schemes than for separate chaining in open addressingunsuccessful searches generally take longer than successful searches during probe sequencethe algorithm can stop as soon as it finds the desired itemwhich ison the average halfway through the probe sequence on the other handit must go all the way to the end of the sequence before it' sure it can' find an item linear probing the following equations show the relationship between probe length (pand load factor (lfor linear probing for successful search it' ( - and for an unsuccessful search it' ( - these formulas are from knuth (see appendix "further reading")and their derivation is quite complicated figure shows the result of graphing these equations
25,164
at load factor of / successful search takes comparisons and an unsuccessful search takes at load factor of / the numbers are and at higher load factors the numbers become very large the moralas you can seeis that the load factor must be kept under / and preferably under / on the other handthe lower the load factorthe more memory is needed for given amount of data the optimum load factor in particular situation depends on the tradeoff between memory efficiencywhich decreases with lower load factorsand speedwhich increases quadratic probing and double hashing quadratic probing and double hashing share their performance equations these indicate modest superiority over linear probing for successful searchthe formula (again from knuthis -log ( -loadfactorloadfactor for an unsuccessful search it is ( -loadfactorfigure shows graphs of these formulas at load factor of successful and unsuccessful searches both require an average of two probes at / load factorthe numbers are and and at they're and thus somewhat higher load factors can be tolerated for quadratic probing and double hashing than for linear probing
25,165
separate chaining the efficiency analysis for separate chaining is differentand generally easierthan for open addressing we want to know how long it takes to search for or insert an item into separate-chaining hash table we'll assume that the most time-consuming part of these operations is comparing the search key of the item with the keys of other items in the list we'll also assume that the time required to hash to the appropriate listand to determine when the end of list has been reachedis equivalent to one key comparison thus all operations require +ncomps timewhere ncomps is the number of key comparisons let' say that the hash table consists of arraysize elementseach of which holds listand that data items have been inserted in the table thenon the averageeach list will hold divided by arraysize itemsaverage list length arraysize this is the same as the definition of the load factorloadfactor arraysize so the average list length equals the load factor searching in successful searchthe algorithm hashes to the appropriate list and then searches along the list for the item on the averagehalf the items must be examined before the correct one is located thus the search time is loadfactor this is true whether the lists are ordered or not in an unsuccessful searchif the lists are unorderedall the items must be searchedso the time is loadfactor
25,166
figure separate-chaining performance for an ordered listonly half the items must be examined in an unsuccessful searchso the time is the same as for successful search in separate chaining it' typical to use load factor of about (the number of data items equals the array sizesmaller load factors don' improve performance significantlybut the time for all operations increases linearly with load factorso going beyond or so is generally bad idea insertion if the lists are not orderedinsertion is always immediatein the sense that no comparisons are necessary the hash function must still be computedso let' call the insertion time if the lists are orderedthenas with an unsuccessful searchan average of half the items in each list must be examinedso the insertion time is loadfactor open addressing versus separate chaining if open addressing is to be useddouble hashing seems to be the preferred system by small margin over quadratic probing the exception is the situation where plenty of memory is available and the data won' expand after the table is createdin this case linear probing is somewhat simpler to implement andif load factors below are usedcauses little performance penalty if the number of items that will be inserted in hash table isn' known when the table is createdseparate chaining is preferable to open addressing increasing the load factor causes major performance penalties in open addressingbut performance degrades only linearly in separate chaining when in doubtuse separate chaining its drawback is the need for linked list classbut the payoff is that adding more data than you anticipated won' cause performance to slow to crawl hashing and external storage at the end of the last we discussed using -trees as data structures for external
25,167
recall from the last that disk file is divided into blocks containing many recordsand that the time to access block is much larger than any internal processing on data in main memory for these reasons the overriding consideration in devising an external storage strategy is minimizing the number of block accesses on the other handexternal storage is not expensive per byteso it may be acceptable to use large amounts of itmore than is strictly required to hold the dataif by so doing we can speed up access time this is possible using hash tables table of file pointers the central feature in external hashing is hash table containing block numberswhich refer to blocks in external storage the hash table is sometimes called an index (in the sense of book' indexit can be stored in main memoryorif it is too largestored externally on diskwith only part of it being read into main memory at time even if it fits entirely in main memorya copy will probably be maintained on the diskand read into memory when the file is opened non-full blocks let' reuse the example from the last in which the block size is , bytesand record is bytes thus block can hold records every entry in the hash table points to one of these blocks let' say there are blocks in particular file the index (hash tablein main memory holds pointers to the file blockswhich start at at the beginning of the file and run up to in external hashing it' important that blocks don' become full thus we might store an average of records per block some blocks would have more recordsand some fewer there would be about records in the file this arrangement is shown in figure figure external hashing all records with keys that hash to the same value are located in the same block to find record with particular keythe search algorithm hashes the keyuses the hash value as an index to the hash tablegets the block number at that indexand reads the block this is an efficient process because only one block access is necessary to locate given item the downside is that considerable disk space is wasted because the blocks areby
25,168
to implement this scheme the hash function and the size of the hash table must be chosen with some careso that limited number of keys hash to the same value in our example we want only records per keyon the average full blocks even with good hash functiona block will occasionally become full this can be handled using variations of the collision-resolution schemes discussed for internal hash tablesopen addressing and separate chaining in open addressingif during insertion one block is found to be fullthe algorithm inserts the new record in neighboring block in linear probing this is the next blockbut it could also be selected using quadratic probe or double hashing in separate chainingspecial overflow blocks are made availablewhen primary block is found to be fullthe new record is inserted in the overflow block full blocks are undesirable because an additional disk access is necessary for the second blockthis doubles the access time howeverthis is acceptable if it happens rarely we've discussed only the simplest hash table implementation for external storage there are many more complex approaches that are beyond the scope of this book summary hash table is based on an array the range of key values is usually greater than the size of the array key value is hashed to an array index by hash function an english-language dictionary is typical example of database that can be efficiently handled with hash table the hashing of key to an already filled array cell is called collision collisions can be handled in two major waysopen addressing and separate chaining in open addressingdata items that hash to full array cell are placed in another cell in the array in separate chainingeach array element consists of linked list all data items hashing to given array index are inserted in that list we discussed three kinds of open addressinglinear probingquadratic probingand double hashing in linear probing the step size is always so if is the array index calculated by the hash functionthe probe goes to xx+ + + and so on the number of such steps required to find specified item is called the probe length in linear probingcontiguous sequences of filled cells appear these are called primary clustersand they reduce performance
25,169
goes to xx+ + + + and so on quadratic probing eliminates primary clusteringbut suffers from the less severe secondary clustering secondary clustering occurs because all the keys that hash to the same value follow the same sequence of steps during probe all keys that hash to the same value follow the same probe sequence because the step size does not depend on the keybut only on the hash value in double hashing the step size depends on the keyand is obtained from secondary hash function if the secondary hash function returns value in double hashingthe probe goes to xx+sx+ sx+ sx+ sand so onwhere depends on the keybut remains constant during the probe the load factor is the ratio of data items in hash table to the array size the maximum load factor in open addressing should be around for double hashing at this load factorsearches will have an average probe length of search times go to infinity as load factors approach in open addressing it' crucial that an open-addressing hash table does not become too full load factor of is appropriate for separate chaining at this load factor successful search has an average probe length of and an unsuccessful search probe lengths in separate chaining increase linearly with load factor string can be hashed by multiplying each character by different power of constantadding the productsand using the modulo (%operator to reduce the result to the size of the hash table to avoid overflowthe modulo operator can be applied at each step in the processif the polynomial is expressed using horner' method hash table sizes should generally be prime numbers this is especially important in quadratic probing and separate chaining hash tables can be used for external storage one way to do this is to have the elements in the hash table contain disk-file block numbers heaps overview we saw in "stacks and queues,that priority queue is data structure that offers convenient access to the data item with the smallest (or largestkey this is useful when key values indicate the order in which items should be accessed
25,170
and activities should be executed sooner than others and are therefore given higher priority another example is in weapons systemssay in navy cruiser variety of threats-airplanesmissilessubmarinesand so on--are detected and must be prioritized for examplea missile that' short distance from the cruiser is assigned higher priority than an aircraft long distance awayso that countermeasures (surface-to-air missilesfor examplecan deal with it first priority queues are also used internally in other computer algorithms in "weighted graphs,we'll see priority queues used in graph algorithmssuch as dijkstra' algorithm priority queue is an abstract data type (adtoffering methods that allow removal of the item with the maximum (or minimumkey valueinsertionand sometimes other activities as with other adtspriority queues can be implemented using variety of underlying structures in we saw priority queue implemented as an array the trouble with that approach is thateven though removal of the largest item is accomplished in fast ( timeinsertion requires slow (ntimebecause an average of half the items in the array must be moved to insert the new one in order in this we'll describe another structure that can be used to implement priority queuethe heap heap is kind of tree it offers both insertion and deletion in (logntime thus it' not quite as fast for deletionbut much faster for insertion it' the method of choice for implementing priority queues where speed is important and there will be many insertions (incidentallydon' confuse the term heapused here for special kind of binary treewith the same term used to mean the portion of computer memory available to programmer with new in languages like java and +heaps overview we saw in "stacks and queues,that priority queue is data structure that offers convenient access to the data item with the smallest (or largestkey this is useful when key values indicate the order in which items should be accessed priority queues may be used for task scheduling in computerswhere some programs and activities should be executed sooner than others and are therefore given higher priority another example is in weapons systemssay in navy cruiser variety of threats-airplanesmissilessubmarinesand so on--are detected and must be prioritized for examplea missile that' short distance from the cruiser is assigned higher priority than an aircraft long distance awayso that countermeasures (surface-to-air missilesfor examplecan deal with it first priority queues are also used internally in other computer algorithms in "weighted graphs,we'll see priority queues used in graph algorithmssuch as dijkstra' algorithm priority queue is an abstract data type (adtoffering methods that allow removal of the item with the maximum (or minimumkey valueinsertionand sometimes other activities as with other adtspriority queues can be implemented using variety of underlying structures in we saw priority queue implemented as an array the trouble with that approach is thateven though removal of the largest item is
25,171
half the items in the array must be moved to insert the new one in order in this we'll describe another structure that can be used to implement priority queuethe heap heap is kind of tree it offers both insertion and deletion in (logntime thus it' not quite as fast for deletionbut much faster for insertion it' the method of choice for implementing priority queues where speed is important and there will be many insertions (incidentallydon' confuse the term heapused here for special kind of binary treewith the same term used to mean the portion of computer memory available to programmer with new in languages like java and +the heap workshop applet the heap workshop applet demonstrates the operations we discussed in the last sectionit allows you to insert new items into heap and remove the largest item in addition you can change the priority of given item when you start up the heap workshop appletyou'll see display similar to figure figure the heap workshop applet there are four buttonsfillchngremand insfor fillchangeremoveand insert let' see how they work fill the heap contains nodes when the applet is first started using the fill key you can create new heap with any number of nodes from to press fill repeatedlyand type in the desired number when prompted change it' possible to change the priority of an existing node this is useful procedure in many situations for examplein our cruiser examplea threat such as an approaching airplane may reverse course away from the carrierits priority should be lowered to reflect this new developmentalthough the aircraft would remain in the priority queue until it was out of radar range to change the priority of noderepeatedly press the chng key when promptedclick on the node with the mouse this will position the red arrow on the node thenwhen promptedtype in the node' new priority
25,172
loweredthe node will trickle downward remove repeatedly pressing the rem button causes the node with the highest keylocated at the rootto be removed you'll see it disappearand then be replaced by the last (rightmostnode on the bottom row finally this node will trickle down until it reaches the position that reestablishes the heap order insert new node is always inserted initially in the first available array celljust to the right of the last node on the bottom row of the heap from there it trickles up to the appropriate position pressing the ins key repeatedly carries out this operation java code for heaps the complete code for heap java is shown later in this section before we get to itwe'll focus on the individual operations of insertionremovaland change here are some things to remember from about representing tree as an array for node at index in the arrayits parent is ( - its left child is * its right child is * these relationships can be seen in figure (remember that the symbolwhen applied to integersperforms integer divisionin which the answer is rounded to the lowest integer insertion we place the trickle-up algorithm in its own method the insert(methodwhich includes call to this trickleup(methodis straightforwardpublic boolean insert(int keyif(currentsize==maxsizereturn falsenode newnode new node(key)heaparray[currentsizenewnodetrickleup(currentsize++)return true/if array is full/failure /make new node /put it at the end /trickle it up /success /end insert(we check to make sure the array isn' full and then make new node using the key value passed as an argument this node is inserted at the end of the array finally the trickleup(routine is called to move this node up to its proper position in trickleup((shown belowthe argument is the index of the newly inserted item
25,173
inside the while loopthe variable index will trickle up the path toward the rootpointing to each node in turn the while loop runs as long as we haven' reached the root (index> and the key (idataof index' parent is less than the new node the body of the while loop executes one step of the trickle-up process it first copies the parent node into indexmoving the node down (this has the effect of moving the "holeupward then it moves index upward by giving it its parent' indexand giving its parent its parent' index public void trickleup(int indexint parent (index- node bottom heaparray[index]whileindex &heaparray[parentidata bottom idata heaparray[indexheaparray[parent]/move node down index parent/move index up parent (parent- /parent <its parent /end while heaparray[indexbottom/end trickleup(finallywhen the loop has exitedthe newly inserted nodewhich has been temporarily stored in bottomis inserted into the cell pointed to by index this is the first location where it' not larger than its parentso inserting it here satisfies the heap condition removal the removal algorithm is also not complicated if we subsume the trickle-down algorithm into its own routine we save the node from the rootcopy the last node (at index currentsize- into the rootand call trickledown(to place this node in its appropriate location public node remove(/delete item with max key /(assumes non-empty listnode root heaparray[ ]/save the root heaparray[ heaparray[--currentsize]/root <last trickledown( )/trickle down the root return root/return removed node /end remove(this method returns the node that was removedthe user of the heap usually needs to process it in some way the trickledown(routine is more complicated than trickleup(because we must determine which of the two children is larger first we save the node at index in variable called top if trickledown(has been called from remove()index is the rootbutas we'll seeit can be called from other routines as well the while loop will run as long as index is not on the bottom row--that isas long as it
25,174
leftand if socompare the children' keyssetting largerchild appropriately then we check if the key of the original node (now in topis greater than that of largerchildif sothe trickle-down process is complete and we exit the loop public void trickledown(int indexint largerchildnode top heaparray[index]while(index currentsize/ int leftchild *index+ int rightchild leftchild+ /save root /while node has at /least one child/find larger child if(rightchild currentsize &/(rightchild exists?heaparray[leftchildidata heaparray[rightchildidatalargerchild rightchildelse largerchild leftchild/top >largerchildif(top idata >heaparray[largerchildidatabreak/shift child up heaparray[indexheaparray[largerchild]index largerchild/go down /end while heaparray[indextop/index <root /end trickledown(on exiting the loop we need only restore the node stored in top to its appropriate positionpointed to by index key change once we've created the trickledown(and trickleup(methodsit' easy to implement an algorithm to change the priority (the keyof node and then trickle it up or down to its proper position the change(method accomplishes thispublic boolean change(int indexint newvalueif(index=currentsizereturn falseint oldvalue heaparray[indexidata/remember old heaparray[indexidata newvalue/change to new if(oldvalue newvaluetrickleup(index)else trickledown(index)return true/if raised/trickle it up /if lowered/trickle it down
25,175
/end change(this routine first checks that the index given in the first argument is validand if sochanges the idata field of the node at that index to the value specified as the second argument thenif the priority has been raisedthe node is trickled upif it' been loweredthe node is trickled down actuallythe difficult part of changing node' priority is not shown in this routinefinding the node you want to change in the change(method just shown we supply the index as an argumentand in the heap workshop applet the user simply clicks on the selected node in real-world application mechanism would be needed to find the appropriate nodeas we've seenthe only node to which we normally have convenient access in heap is the one with the largest key the problem can be solved in linear (ntime by searching the array sequentially ora separate data structure (perhaps hash tablecould be updated with the new index value whenever node was moved in the priority queue this would allow quick access to any node of coursekeeping second structure updated would itself be timeconsuming the array size we should note that the array sizeequivalent to the number of nodes in the heapis vital piece of information about the heap' state and critical field in the heap class nodes copied from the last position aren' erasedso the only way for algorithms to know the location of the last occupied cell is to refer to the current size of the array the heap java program the heap java program (see listing uses node class whose only field is the idata variable that serves as the node' key as usualthis class would hold many other fields in useful program the heap class contains the methods we discussedplus isempty(and displayheap()which outputs crude but comprehensible characterbased representation of the heap listing the heap java program /heap java /demonstrates heaps /to run this programc>java heapapp import java io */for / import java lang integer/for parseint(///////////////////////////////////////////////////////////////class node public int idata/data item (keypublic node(int keyidata key/end class node /constructor ///////////////////////////////////////////////////////////////
25,176
private node[heaparrayprivate int maxsizeprivate int currentsize/size of array /number of nodes in array /public heap(int mx/constructor maxsize mxcurrentsize heaparray new node[maxsize]/create array /public boolean isempty(return currentsize== /public boolean insert(int keyif(currentsize==maxsizereturn falsenode newnode new node(key)heaparray[currentsizenewnodetrickleup(currentsize++)return true/end insert(/public void trickleup(int indexint parent (index- node bottom heaparray[index]whileindex &heaparray[parentidata bottom idata heaparray[indexheaparray[parent]/move it down index parentparent (parent- /end while heaparray[indexbottom/end trickleup(/public node remove(/delete item with max key /(assumes non-empty listnode root heaparray[ ]
25,177
trickledown( )return root/end remove(/public void trickledown(int indexint largerchildnode top heaparray[index]/save root while(index currentsize/ /while node has at /least one childint leftchild *index+ int rightchild leftchild+ /find larger child if(rightchild currentsize &/(rightchild exists?heaparray[leftchildidata heaparray[rightchildidatalargerchild rightchildelse largerchild leftchild/top >largerchildif(top idata >heaparray[largerchildidatabreak/shift child up heaparray[indexheaparray[largerchild]index largerchild/go down /end while heaparray[indextop/root to index /end trickledown(/public boolean change(int indexint newvalueif(index=currentsizereturn falseint oldvalue heaparray[indexidata/remember old heaparray[indexidata newvalue/change to new if(oldvalue newvaluetrickleup(index)else trickledown(index)return true/end change(/if raised/trickle it up /if lowered/trickle it down /public void displayheap(system out print("heaparray")/array format
25,178
if(heaparray[ !nullsystem out printheaparray[midata ")else system out print"-")system out println()/heap format int nblanks int itemsperrow int column int /current item string dots "system out println(dots+dots)/dotted top line while(currentsize if(column = for(int = <nblanksk++system out print(')/for each heap item /first item in row/preceding blanks /display item system out print(heaparray[jidata)if(++ =currentsizebreak/doneif(++column==itemsperrow/end of rownblanks / /half the blanks itemsperrow * /twice the items column /start over on system out println()/new row else /next item on row for(int = <nblanks* - ++system out print(')/interim blanks /end for system out println("\ "+dots+dots)/dotted bottom line /end displayheap(//end class heap ///////////////////////////////////////////////////////////////class heapapp public static void main(string[argsthrows ioexception int valuevalue heap theheap new heap( )/make heapmax size boolean success
25,179
theheap insert( )theheap insert( )theheap insert( )theheap insert( )theheap insert( )theheap insert( )theheap insert( )theheap insert( )theheap insert( )/insert items while(true/until [ctrl]-[cputtext("enter first letter of ")puttext("showinsertremovechange")int choice getchar()switch(choicecase ' '/show theheap displayheap()breakcase ' '/insert puttext("enter value to insert")value getint()success theheap insert(value)if!success puttext("can' insertheap is full'\ ')breakcase ' '/remove if!theheap isempty(theheap remove()else puttext("can' removeheap is empty'\ ')breakcase ' '/change puttext("enter index of item")value getint()puttext("enter new priority")value getint()success theheap change(valuevalue )if!success puttext("can' changeinvalid index'\ ')breakdefaultputtext("invalid entry\ ")/end switch /end while /end main(/
25,180
system out print( )system out flush()//public static string getstring(throws ioexception inputstreamreader isr new inputstreamreader(system in)bufferedreader br new bufferedreader(isr)string br readline()return //public static char getchar(throws ioexception string getstring()return charat( )//public static int getint(throws ioexception string getstring()return integer parseint( )///end class heapapp the array places the heap' root at index some heap implementations start the array with the root at using position as sentinel value with the largest possible key this saves an instruction in some of the algorithmsbut complicates things conceptually the main(routine in heapapp creates heap with maximum size of (dictated by the limitations of the display routineand inserts into it nodes with random keys then it enters loop in which the user can enter siror cfor showinsertremoveor change here' some sample interaction with the programenter first letter of showinsertremovechanges heaparray
25,181
enter first letter of showinsertremovechangei enter value to insert enter first letter of showinsertremovechanges heaparray enter first letter of showinsertremovechanger enter first letter of showinsertremovechanges heaparray enter first letter of showinsertremovechangethe user displays the heapadds an item with key of shows the heap againremoves the item with the greatest keyand shows the heap third time the show(routine displays both the array and the tree versions of the heap you'll need to use your imagination to fill in the connections between nodes expanding the heap array
25,182
the heap arraya new array can be createdand the data from the old array copied into it (unlike the situation with hash tableschanging the size of heap doesn' require reordering the data the copying operation takes linear timebut enlarging the array size shouldn' be necessary very oftenespecially if the array size is increased substantially each time it' expanded (by doubling itfor examplein javaa vector class object could be used instead of an arrayvectors can be expanded dynamically efficiency of heap operations for heap with substantial number of itemsit' the trickle-up and trickle-down algorithms that are the most time-consuming parts of the operations we've seen these algorithms spend time in looprepeatedly moving nodes up or down along path the number of copies necessary is bounded by the height of the heapif there are five levelsfour copies will carry the "holefrom the top to the bottom (we'll ignore the two moves used to transfer the end node to and from temporary storagethey're always necessary so they require constant time the trickleup(method has only one major operation in its loopcomparing the key of the new node with the node at the current location the trickledown(method needs two comparisonsone to find the largest childand second to compare this child with the "lastnode they must both copy node from top to bottom or bottom to top to complete the operation heap is special kind of binary treeand as we saw in the number of levels in binary tree equals log ( + )where is the number of nodes the trickleup(and trickledown(routines cycle through their loops - timesso the first takes time proportional to log nand the second somewhat more because of the extra comparison thus the heap operations we've talked about here all operate in (logntime heapsort the efficiency of the heap data structure lends itself to surprisingly simple and very efficient sorting algorithm called heapsort the basic idea is to insert the unordered items into heap using the normal insert(routine repeated application of the remove(routine will then remove the items in sorted order here' how that might lookfor( = <sizej++theheap insertanarray[ )for( = <sizej++anarray[jtheheap remove()/from unsorted array /to sorted array because insert(and remove(operate in (logntimeand each must be applied timesthe entire sort requires ( *logntimewhich is the same as quicksort howeverit' not quite as fast as quicksort partly this is because there are more operations in the inner while loop in trickledown(than in the inner loop in quicksort howeverseveral tricks can make heapsort more efficient the first saves timeand the second saves memory trickling down in place
25,183
howeverif all the items are already in an arraythey can be rearranged into heap with only / applications of trickledown(this offers small speed advantage two correct subheaps make correct heap to see how this worksyou should know that trickledown(will create correct heap ifwhen an out-of-order item is placed at the rootboth the child subheaps of this root are correct heaps (the root can itself be the root of subheap as well as of the entire heap this is shown in figure figure both subtrees must be correct this suggests way to transform an unordered array into heap we can apply trickledown(to the nodes on the bottom of the (potentialheap--that isat the end of the array--and work our way upward to the root at index at each step the subheaps below us will already be correct heaps because we already applied trickledown(to them after we apply trickledown(to the rootthe unordered array will have been transformed into heap notice however that the nodes on the bottom row--those with no children--are already correct heapsbecause they are trees with only one nodethey have no relationships to be out of order therefore we don' need to apply trickledown(to these nodes we can start at node / - the rightmost node with childreninstead of - the last node thus we need only half as many trickle operations as we would using insert( times figure shows the order in which the trickle-down algorithm is appliedstarting at node in -node heap figure order of applying trickledown(the following code fragment applies trickledown(to all nodesexcept those on the bottom rowstarting at / - and working back to the rootfor( =size/ - >= --
25,184
recursive approach recursive approach can also be used to form heap from an array heapify(method is applied to the root it calls itself for the root' two childrenthen for each of these children' two childrenand so on eventually it works its way down to the bottom rowwhere it returns immediately whenever it finds node with no children once it has called itself for two child subtreesheapify(then applies trickledo (to the root of the subtree this ensures that the subtree is correct heap then heapify(returns and works on the subtree one level higher heapify(int indexif(index / - returnheapify(index* + )heapify(index* + )trickledown(index)/transform array into heap /if node has no children/return /turn right subtree into heap /turn left subtree into heap /apply trickle-down to this node this recursive approach is probably not quite as efficient as the simple loop using the same array our initial code fragment showed unordered data in an array this data was then inserted into heapand finally removed from the heap and written back to the array in sorted order in this procedure two size- arrays are requiredthe initial array and the array used by the heap in factthe same array can be used both for the heap and for the initial array this cuts in half the amount of memory needed for heapsortno memory beyond the initial array is necessary we've already seen how trickledown(can be applied to half the elements of an array to transform them into heap we transform the unordered array data into heap in placeonly one array is necessary for this thus the first step in heapsort requires only one array howeverthings are more complicated when we apply remove(repeatedly to the heap where are we going to put the items that are removedeach time an item is removed from the heapan element at the end of the heap array becomes emptythe heap shrinks by one we can put the recently removed item in this newly freed cell as more items are removedthe heap array becomes smaller and smallerwhile the array of ordered data becomes larger and larger thus with little planning it' possible for the ordered array and the heap array to share the same space this is shown in figure
25,185
the heapsort java program we can put these two tricks--applying trickledown(without using insert()and using the same array for the initial data and the heap--together in program that performs heapsort listing shows how this looks listing the heapsort java program /heapsort java /demonstrates heap sort /to run this programc>java heapsortapp import java io */for / import java lang integer/for parseint(///////////////////////////////////////////////////////////////class node public int idatapublic node(int keyidata key/end class node /data item (key/constructor ///////////////////////////////////////////////////////////////class heap private node[heaparrayprivate int maxsizeprivate int currentsize/size of array /number of items in array /public heap(int mx/constructor
25,186
maxsize mxcurrentsize heaparray new node[maxsize]/public node remove(/delete item with max key /(assumes non-empty listnode root heaparray[ ]heaparray[ heaparray[--currentsize]trickledown( )return root/end remove(/public void trickledown(int indexint largerchildnode top heaparray[index]/save root while(index currentsize/ /not on bottom row int leftchild *index+ int rightchild leftchild+ /find larger child if(rightchild currentsize &/right ch existsheaparray[leftchildidata heaparray[rightchildidatalargerchild rightchildelse largerchild leftchild/top >largerchildif(top idata >heaparray[largerchildidatabreak/shift child up heaparray[indexheaparray[largerchild]index largerchild/go down /end while heaparray[indextop/root to index /end trickledown(/public void displayheap(int nblanks int itemsperrow int column int /current item string dots "system out println(dots+dots)/dotted top line
25,187
if(column = for(int = <nblanksk++system out print(')/for each heap item /first item in row/preceding blanks /display item system out print(heaparray[jidata)if(++ =currentsizebreak/doneif(++column==itemsperrow/end of rownblanks / /half the blanks itemsperrow * /twice the items column /start over on system out println()/new row else /next item on row for(int = <nblanks* - ++system out print(')/interim blanks /end for system out println("\ "+dots+dots)/dotted bottom line /end displayheap(/public void displayarray(for(int = <maxsizej++system out print(heaparray[jidata ")system out println("")/public void insertat(int indexnode newnodeheaparray[indexnewnode/public void incrementsize(currentsize++//end class heap ///////////////////////////////////////////////////////////////class heapsortapp public static void main(string[argsthrows ioexception
25,188
int sizejsystem out print("enter number of items")size getint()heap theheap new heap(size)for( = <sizej++/fill array with /random nodes int random (int)(java lang math random()* )node newnode new node(random)theheap insertat(jnewnode)theheap incrementsize()system out print("random")theheap displayarray()/display random array heap for( =size/ - >= --/make random array into theheap trickledown( )system out print("heaptheheap displayarray()theheap displayheap()")/dislay heap array /display heap for( =size- >= --/remove from heap and /store at array end node biggestnode theheap remove()theheap insertat(jbiggestnode)system out print("sorted")theheap displayarray()/display sorted array /end main(/public static string getstring(throws ioexception inputstreamreader isr new inputstreamreader(system in)bufferedreader br new bufferedreader(isr)string br readline()return //public static int getint(throws ioexception string getstring()return integer parseint( )/
25,189
/end class heapsortapp the heap class is much the same as in the heap java programexcept that to save space we've removed the trickleup(and insert(methodswhich aren' necessary for heapsort we've also added an insertat(method that allows direct insertion into the heap' array notice that this addition is not in the spirit of object-oriented programming the heap class interface is supposed to shield class users from the underlying implementation of the heap the underlying array should be invisiblebut insertat(allows direct access to it in this situation we accept the violation of oop principles because the array is so closely tied to the heap architecture an incrementsize(method is another addition to the heap class it might seem as though we could combine this with insertat()but when inserting into the array in its role as an ordered array we don' want to increase the heap sizeso we keep these functions separate the main(routine in the heapsortapp class gets the array size from the user fills the array with random data turns the array into heap with / applications of trickledown( removes the items from the heap and writes them back at the end of the array after each step the array contents are displayed the user selects the array size here' some sample output from heapsort javaenter number of items random heap sorted the efficiency of heapsort as we notedheapsort runs in ( *logntime although it may be slightly slower than quicksortan advantage over quicksort is that it is less sensitive to the initial distribution of data certain arrangements of key values can reduce quicksort to slow ( timewhereas heapsort runs in ( *logntime no matter how the data is distributed summary in an ascending priority queue the item with the largest key (or smallest in
25,190
priority queue is an abstract data type (adtthat offers methods for insertion of data and removal of the largest (or smallestitem heap is an efficient implementation of an adt priority queue heap offers removal of the largest itemand insertionin ( *logntime the largest item is always in the root heaps do not support ordered traversal of the datalocating an item with specific keyor deletion heap is usually implemented as an array representing complete binary tree the root is at index and the last item at index - each node has key less than its parents and greater than its children an item to be inserted is always placed in the first vacant cell of the arrayand then trickled up to its appropriate position when an item is removed from the rootit' replaced by the last item in the arraywhich is then trickled down to its appropriate position the trickle-up and trickle-down processes can be thought of as sequence of swapsbut are more efficiently implemented as sequence of copies the priority of an arbitrary item can be changed first its key is changedthenif the key was increasedthe item is trickled upwhile if the key was decreased the item is trickled down heapsort is an efficient sorting procedure that requires ( *logntime conceptually heapsort consists of making insertions into heapfollowed by removals heapsort can be made to run faster by applying the trickle-down algorithm directly to / items in the unsorted arrayrather than inserting items the same array can be used for the initial unordered datafor the heap arrayand for the final sorted data thus heapsort requires no extra memory
25,191
list graphs weighted graphs when to use what graphs overview graphs are one of the most versatile structures used in computer programming the sorts of problems that graphs can help solve are generally quite different from those we've dealt with thus far in this book if you're dealing with general kinds of data storage problemsyou probably won' need graphbut for some problems--and they tend to be interesting ones-- graph is indispensable our discussion of graphs is divided into two in this we'll cover the algorithms associated with unweighted graphsshow some algorithms that these graphs can representand present two workshop applets to model them in the next we'll look at the more complicated algorithms associated with weighted graphs introduction to graphs graphs are data structures rather like trees in factin mathematical sensea tree is kind of graph in computer programminghowevergraphs are used in different ways than trees the data structures examined previously in this book have an architecture dictated by the algorithms used on them for examplea binary tree is shaped the way it is because that shape makes it easy to search for data and insert new data the edges in tree represent quick ways to get from node to node graphson the other handoften have shape dictated by physical problem for examplenodes in graph may represent citieswhile edges may represent airline flight routes between the cities another more abstract example is graph representing the individual tasks necessary to complete project in the graphnodes may represent taskswhile directed edges (with an arrow at one endindicate which task must be completed before another in both casesthe shape of the graph arises from the specific real-world situation before going furtherwe must mention thatwhen discussing graphsnodes are called vertices (the singular is vertexthis is probably because the nomenclature for graphs is older than that for treeshaving arisen in mathematics centuries ago trees are more closely associated with computer science definitions figure - shows simplified map of the freeways in the vicinity of san josecalifornia figure - shows graph that models these freeways
25,192
in the graphcircles represent freeway interchanges and straight lines connecting the circles represent freeway segments the circles are verticesand the lines are edges the vertices are usually labeled in some way--oftenas shown herewith letters of the alphabet each edge is bounded by the two vertices at its ends the graph doesn' attempt to reflect the geographical positions shown on the mapit shows only the relationships of the vertices and the edges--that iswhich edges are connected to which vertex it doesn' concern itself with physical distances or directions alsoone edge may represent several different route numbersas in the case of the edge from to hwhich involves routes and it' the connectedness (or lack of itof one intersection to another that' importantnot the actual routes adjacency two vertices are said to be adjacent to one another if they are connected by single edge thus in figure vertices and are adjacentbut vertices and are not the vertices adjacent to given vertex are sometimes said to be its neighbors for examplethe neighbors of are ihand paths path is sequence of edges figure shows path from vertex to vertex that passes through vertices and we can call this path baej there can be more than one path between two verticesanother path from to is bcdj connected graphs graph is said to be connected if there is at least one path from every vertex to every other vertexas in the graph in figure - howeverif "you can' get there from here(as vermont farmers traditionally tell city slickers who stop to ask for directions)the
25,193
non-connected graph consists of several connected components in figure -ba and are one connected componentand and are another for simplicitythe algorithms we'll be discussing in this are written to apply to connected graphsor to one connected component of non-connected graph if appropriatesmall modifications will usually enable them to work with non-connected graphs as well directed and weighted graphs the graphs in figures and are non-directed graphs that means that the edges don' have directionyou can go either way on them thus you can go from vertex to vertex bor from vertex to vertex awith equal ease (this models freeways appropriatelybecause you can usually go either way on freeway howevergraphs are often used to model situations in which you can go in only one direction along an edgefrom to but not from to aas on one-way street such graph is said to be directed the allowed direction is typically shown with an arrowhead at the end of the edge in some graphsedges are given weighta number that can represent the physical distance between two verticesor the time it takes to get from one vertex to anotheror how much it costs to travel from vertex to vertex (on airline routesfor examplesuch graphs are called weighted graphs we'll explore them in the next we're going to begin this by discussing simple undirectedunweighted graphslater we'll explore directed unweighted graphs we have by no means covered all the definitions that apply to graphswe'll introduce more as we go along figure onnected and nonconnected graphs historical note one of the first mathematicians to work with graphs was leonhard euler in the early th century he solved famous problem dealing with the bridges in the town of konigsbergpoland this town included an island and seven bridgesas shown in figure -
25,194
the problemmuch discussed by the townsfolkwas to find way to walk across all seven bridges without recrossing any of them we won' recount euler' solution to the problemit turns out that there is no such path howeverthe key to his solution was to represent the problem as graphwith land areas as vertices and bridges as edgesas shown in figure - this is perhaps the first example of graph being used to represent problem in the real world representing graph in program it' all very well to think about graphs in the abstractas euler and other mathematicians did until the invention of the computerbut we want to represent graphs by using computer what sort of software structures are appropriate to model graphwe'll look at vertices firstand then at edges vertices in very abstract graph program you could simply number the vertices to - (where is the number of verticesyou wouldn' need any sort of variable to hold the verticesbecause their usefulness would result from their relationships with other vertices in most situationshowevera vertex represents some real-world objectand the object must be described using data items if vertex represents city in an airline route simulationfor exampleit may need to store the name of the cityits altitudeits locationand other such information thus it' usually convenient to represent vertex by an object of vertex class our example programs store only letter (like )used as label for identifying the vertexand flag for use in search algorithmsas we'll see later here' how the vertex class looksclass vertex public char label/label ( ' 'public boolean wasvisitedpublic vertex(char lablabel labwasvisited false/constructor /end class vertex vertex objects can be placed in an array and referred to using their index number in our
25,195
placed in list or some other data structure whatever structure is usedthis storage is for convenience only it has no relevance to how the vertices are connected by edges for thiswe need another mechanism edges in "red-black trees,we saw that computer program can represent trees in several ways mostly we examined trees in which each node contained references to its childrenbut we also mentioned that an array could be usedwith node' position in the array indicating its relationship to other nodes "heaps,described arrays used to represent kind of tree called heap graphhoweverdoesn' usually have the same kind of fixed organization as tree in binary treeeach node has maximum of two childrenbut in graph each vertex may be connected to an arbitrary number of other vertices for examplein figure -avertex is connected to three other verticeswhereas is connected to only one to model this sort of free-form organizationa different approach to representing edges is preferable to that used for trees two methods are commonly used for graphs:the adjacency matrix and the adjacency list (remember that one vertex is said to be adjacent to another if they're connected by single edge the adjacency matrix an adjacency matrix is two-dimensional array in which the elements indicate whether an edge is present between two vertices if graph has verticesthe adjacency matrix is an nxn array table shows the adjacency matrix for the graph in figure - table adjacency matrix the vertices are used as headings for both rows and columns an edge between two vertices is indicated by the absence of an edge is (you could also use boolean true/false values as you can seevertex is adjacent to all three other verticesb is adjacent to and dc is adjacent only to aand is adjacent to and in this examplethe "connectionof vertex to itself is indicated by so the diagonal from upper-left to lower-righta- to -dwhich is called the identity diagonalis all the entries on the identity diagonal don' convey any real informationso you can equally well put along itif that' more convenient in your program note that the triangular-shaped part of the matrix above the identity diagonal is mirror
25,196
may seem inefficientbut there' no convenient way to create triangular array in most computer languagesso it' simpler to accept the redundancy consequentlywhen you add an edge to the graphyou must make two entries in the adjacency matrix rather than one the adjacency list the other way to represent edges is with an adjacency list the list in adjacency list refers to linked list of the kind we examined in "recursion actuallyan adjacency list is an array of lists (or list of listseach individual list shows what vertices given vertex is adjacent to table shows the adjacency lists for the graph of figure - table adjacency lists vertex list containing adjacent vertices in this tablethe symbol indicates link in linked list each link in the list is vertex here the vertices are arranged in alphabetical order in each listalthough that' not really necessary don' confuse the contents of adjacency lists with paths the adjacency list shows which vertices are adjacent to--that isone edge away from-- given vertexnot paths from vertex to vertex later we'll discuss when to use an adjacency matrix as opposed to an adjacency list the workshop applets shown in this all use the adjacency matrix approachbut sometimes the list approach is more efficient adding vertices and edges to graph to add vertex to graphyou make new vertex object with new and insert it into your vertex arrayvertexlist in real-world program vertex might contain many data itemsbut for simplicity we'll assume that it contains only single character thus the creation of vertex looks something like thisvertexlist[nverts++new vertex(' ')this inserts vertex fwhere nverts is the number of vertices currently in the graph how you add an edge to graph depends on whether you're using an adjacency matrix or adjacency lists to represent the graph let' say that you're using an adjacency matrix and want to add an edge between vertices and these numbers correspond to the
25,197
adjacency matrix adjmatyou filled it with to insert the edgeyou say adjmat[ ][ adjmat[ ][ if you were using an adjacency listyou would add to the list for and to the list for the graph class let' look at class graph that contains methods for creating vertex list and an adjacency matrixand for adding vertices and edges to graph objectclass graph private final int max_verts private vertex vertexlist[]/array of vertices private int adjmat[][]/adjacency matrix private int nverts/current number of vertices /public graph(/constructor vertexlist new vertex[max_verts]/adjacency matrix adjmat new int[max_verts][max_verts]nverts for(int = <max_vertsj++/set adjacency for(int = <max_vertsk++/matrix to adjmat[ ][ /end constructor /public void addvertex(char lab/argument is label vertexlist[nverts++new vertex(lab)/public void addedge(int startint endadjmat[start][end adjmat[end][start /public void displayvertex(int vsystem out print(vertexlist[vlabel)
25,198
//end class graph within the graph classvertices are identified by their index number in vertexlist we've already discussed most of the methods shown here to display vertexwe simply print out its one-character label the adjacency matrix (or the adjacency listprovides information that is local to given vertex specificallyit tells you which vertices are connected by single edge to given vertex to answer more global questions about the arrangement of the verticeswe must resort to various algorithms we'll begin with searches searches one of the most fundamental operations to perform on graph is finding which vertices can be reached from specified vertex for exampleimagine trying to find out how many towns in the united states can be reached by passenger train from kansas city (assuming that you don' mind changing trainssome towns could be reached others couldn' be reached because they didn' have passenger rail service possibly others couldn' be reachedeven though they had rail servicebecause their rail system (the narrow-gauge hayfork-hicksville rrfor exampledidn' connect with the standardgauge line you started on or any of the lines that could be reached from your line here' another situation in which you might need to find all the vertices reachable from specified vertex imagine that you're designing printed circuit boardlike the ones inside your computer (open it up and take look!various components--mostly integrated circuits (ics)--are placed on the boardwith pins from the ics protruding through holes in the board the ics are soldered in placeand their pins are electrically connected to other pins by traces--thin metal lines applied to the surface of the circuit boardas shown in figure (noyou don' need to worry about the details of this figure figure ins and traces on circuit boardsearches in grapheach pin might be represented by vertexand each trace by an edge on circuit board there are many electrical circuits that aren' connected to each otherso the graph is by no means connected one during the design processthereforeit may be genuinely useful to create graph and use it to find which pins are connected to the same electrical circuit assume that you've created such graph now you need an algorithm that provides systematic way to start at specified vertexand then move along edges to other verticesin such way that when it' done you are guaranteed that it has visited every
25,199
when we discussed binary treesvisit means to perform some operation on the vertexsuch as displaying it there are two common approaches to searching graphdepth-first search (dfsand breadth-first search (bfsboth will eventually reach all connected vertices the difference is that the depth-first search is implemented with stackwhereas the breadthfirst search is implemented with queue these mechanisms resultas we'll seein the graph being searched in different ways depth-first search the depth-first search uses stack to remember where it should go when it reaches dead end we'll show an exampleencourage you to try similar examples with the graphn workshop appletand then finally show some code that carries out the search an example we'll discuss the idea behind the depth-first search in relation to figure the numbers in this figure show the order in which the vertices are visited figure depth-first search to carry out the depth-first searchyou pick starting point--in this casevertex you then do three thingsvisit this vertexpush it onto stack so you can remember itand mark it so you won' visit it again next you go to any vertex adjacent to that hasn' yet been visited we'll assume the vertices are selected in alphabetical orderso that brings up you visit bmark itand push it on the stack now whatyou're at band you do the same thing as beforego to an adjacent vertex that hasn' been visited this leads you to we can call this process rule remember rule if possiblevisit an adjacent unvisited vertexmark itand push it on the stack applying rule again leads you to at this pointhoweveryou need to do something elsebecause there are no unvisited vertices adjacent to here' where rule comes in remember rule if you can' follow rule thenif possiblepop vertex off the stack following this ruleyou pop off the stackwhich brings you back to has no unvisited adjacent verticesso you pop it ditto now only is left on the stack ahoweverdoes have unvisited adjacent verticesso you visit the next onec but is the end of the line againso you pop it and you're back to you visit dgand iand then pop them all when you reach the dead end at now you're back to you visit