summaryrefslogtreecommitdiff
path: root/defpager/backing.c
blob: aa9810c09925fe42644fe27902f121a04823c77a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
/* Backing store management for GNU Hurd.
   Copyright (C) 1996 Free Software Foundation, Inc.
   Written by Thomas Bushnell, n/BSG.

   This file is part of the GNU Hurd.

   The GNU Hurd is free software; you can redistribute it and/or
   modify it under the terms of the GNU General Public License as
   published by the Free Software Foundation; either version 2, or (at
   your option) any later version.

   The GNU Hurd is distributed in the hope that it will be useful, but
   WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with this program; if not, write to the Free Software
   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */


#include <hurd/store.h>

const struct store_class *const permitted_classes[] = 
{
  &store_device_class, &store_ileave_class, &store_concat_class, 0
};

/* Allocation map, by PAGE.  */  
/* If a bit is SET the corresponding PAGE is free. */
char *bmap;

/* Number of bytes in bmap */
size_t bmap_len;

/* Allocation rotor */
char *bmap_rotor;

pthread_mutex_t bmap_lock = PTHREAD_MUTEX_INITIALIZER;

error_t
init_backing (char *name)
{
  error_t err;
  int i;

  err = store_open (name, STORE_NO_FILEIO, &permitted_classes, &backing_store);
  if (err)
    return err;
  
  bmap_len = backing_store->size / vm_page_size / NBBY;
  bmap = malloc (bmap_len);
  for (i = 0; i < bmap_len; i++)
    bmap[i] = 0xff;
  bmap_rotor = bmap;

  /* Mark the very first page as occupied.  This makes sure we never
     return zero offsets from allocate_backing_page (which
     conventionally means that there is no space left.  It also makes
     sure we don't tromp on the misfeature in Linux of using the first
     page for permanent data. */
  *bmap_rotor |= 1;
}

int
allocate_backing_page ()
{
  int wrapped;
  int bit;
  int pfn;

  pthread_mutex_lock (&bmap_lock);

  wrapped = (bmap_rotor == bmap);

  while (!wrapped || bmap_rotor < bmap + bmap_len)
    {
      if (bmap[bmap_rotor])
	break;
      bmap_rotor++;
      if (bmap_rotor >= bmap + bmap_len)
	wrapped++;
    }
  
  if (wrapped == 2)
    {
      /* Didn't find one... */
      pthread_mutex_unlock (&bmap_lock);
      printf ("WARNING: Out of paging space; pageout failing.");
      return 0;
    }
  
  /* Find which bit */
  bit = ffs (*bmap_rotor);
  assert_backtrace (bit);
  bit--;
  
  /* Mark it */
  *bmap_rotor |= 1 << bit;
  
  /* Return the correct offset */
  pfn = (bmap_rotor - bmap) * 8 + bit;

  pthread_mutex_unlock (&bmap_lock);
  
  return pfn * (vm_page_size / store->block_size);
}


void
return_backing_pages (off_t *map, int maplen)
{
  int i;
  
  pthread_mutex_lock (&bmap_lock);
  for (i = 0; i < maplen; i++)
    {
      int pfn;
      char *b;
      int bit;

      pfn = map[i] / (vm_page_size / store->block_size);
      b = bmap + pfn & ~7;
      bit = pfn & 7;
      
      assert_backtrace ((*b & (1 << bit)) == 0);
      *b |= 1 << bit;
    }
  pthread_mutex_unlock (&bmap_lock);
}