vms buffer overflows and large memory allocation

* vms-lib.c (struct carsym_mem): Add limit.
	(vms_add_index): Heed limit.
	(vms_traverse_index): Catch buffer overflows.  Remove outdated fixme.
	(vms_lib_read_index): Set up limit.  Catch 32-bit overflow.
	Always return actual number read.
	(_bfd_vms_lib_archive_p): Catch buffer overflows.  Replace
	assertion with error exit.
This commit is contained in:
Alan Modra 2020-02-24 11:52:03 +10:30
parent 23c4ec6f02
commit c893ce360a
2 changed files with 82 additions and 25 deletions

View File

@ -1,3 +1,13 @@
2020-02-24 Alan Modra <amodra@gmail.com>
* vms-lib.c (struct carsym_mem): Add limit.
(vms_add_index): Heed limit.
(vms_traverse_index): Catch buffer overflows. Remove outdated fixme.
(vms_lib_read_index): Set up limit. Catch 32-bit overflow.
Always return actual number read.
(_bfd_vms_lib_archive_p): Catch buffer overflows. Replace
assertion with error exit.
2020-02-22 Alan Modra <amodra@gmail.com>
PR 25585

View File

@ -120,6 +120,9 @@ struct carsym_mem
/* Maximum number of entries. */
unsigned int max;
/* Do not allocate more that this number of entries. */
unsigned int limit;
/* If true, the table was reallocated on the heap. If false, it is still
in the BFD's objalloc. */
bfd_boolean realloced;
@ -136,12 +139,14 @@ vms_add_index (struct carsym_mem *cs, char *name,
struct carsym *n;
size_t amt;
if (cs->max > -33u / 2)
if (cs->max > -33u / 2 || cs->max >= cs->limit)
{
bfd_set_error (bfd_error_file_too_big);
return FALSE;
}
cs->max = 2 * cs->max + 32;
if (cs->max > cs->limit)
cs->max = cs->limit;
if (_bfd_mul_overflow (cs->max, sizeof (struct carsym), &amt))
{
bfd_set_error (bfd_error_file_too_big);
@ -243,6 +248,7 @@ vms_traverse_index (bfd *abfd, unsigned int vbn, struct carsym_mem *cs)
file_ptr off;
unsigned char *p;
unsigned char *endp;
unsigned int n;
/* Read the index block. */
BFD_ASSERT (sizeof (indexdef) == VMS_BLOCK_SIZE);
@ -251,7 +257,10 @@ vms_traverse_index (bfd *abfd, unsigned int vbn, struct carsym_mem *cs)
/* Traverse it. */
p = &indexdef.keys[0];
endp = p + bfd_getl16 (indexdef.used);
n = bfd_getl16 (indexdef.used);
if (n > sizeof (indexdef.keys))
return FALSE;
endp = p + n;
while (p < endp)
{
unsigned int idx_vbn;
@ -292,6 +301,8 @@ vms_traverse_index (bfd *abfd, unsigned int vbn, struct carsym_mem *cs)
/* Point to the next index entry. */
p = keyname + keylen;
if (p > endp)
return FALSE;
if (idx_off == RFADEF__C_INDEX)
{
@ -333,11 +344,17 @@ vms_traverse_index (bfd *abfd, unsigned int vbn, struct carsym_mem *cs)
if (!vms_read_block (abfd, kvbn, kblk))
return FALSE;
if (koff > sizeof (kblk) - sizeof (struct vms_kbn))
return FALSE;
kbn = (struct vms_kbn *)(kblk + koff);
klen = bfd_getl16 (kbn->keylen);
if (klen > sizeof (kblk) - koff)
return FALSE;
kvbn = bfd_getl32 (kbn->rfa.vbn);
koff = bfd_getl16 (kbn->rfa.offset);
if (noff + klen > keylen)
return FALSE;
memcpy (name + noff, kbn + 1, klen);
noff += klen;
}
@ -368,7 +385,7 @@ vms_traverse_index (bfd *abfd, unsigned int vbn, struct carsym_mem *cs)
|| bfd_bread (&lhs, sizeof (lhs), abfd) != sizeof (lhs))
return FALSE;
/* FIXME: this adds extra entries that were not accounted. */
/* These extra entries may cause reallocation of CS. */
if (!vms_add_indexes_from_list (abfd, cs, name, &lhs.ng_g_rfa))
return FALSE;
if (!vms_add_indexes_from_list (abfd, cs, name, &lhs.ng_wk_rfa))
@ -397,7 +414,8 @@ vms_lib_read_index (bfd *abfd, int idx, unsigned int *nbrel)
struct vms_idd idd;
unsigned int flags;
unsigned int vbn;
struct carsym *csbuf;
ufile_ptr filesize;
size_t amt;
struct carsym_mem csm;
/* Read index desription. */
@ -411,14 +429,27 @@ vms_lib_read_index (bfd *abfd, int idx, unsigned int *nbrel)
|| !(flags & IDD__FLAGS_VARLENIDX))
return NULL;
csbuf = bfd_alloc (abfd, *nbrel * sizeof (struct carsym));
if (csbuf == NULL)
return NULL;
csm.max = *nbrel;
filesize = bfd_get_file_size (abfd);
csm.nbr = 0;
csm.max = *nbrel;
csm.limit = -1u;
csm.realloced = FALSE;
csm.idx = csbuf;
if (filesize != 0)
{
/* Put an upper bound based on a file full of single char keys.
This is to prevent fuzzed binary silliness. It is easily
possible to set up loops over file blocks that add syms
without end. */
if (filesize / (sizeof (struct vms_rfa) + 2) <= -1u)
csm.limit = filesize / (sizeof (struct vms_rfa) + 2);
}
if (csm.max > csm.limit)
csm.max = csm.limit;
if (_bfd_mul_overflow (csm.max, sizeof (struct carsym), &amt))
return NULL;
csm.idx = bfd_alloc (abfd, amt);
if (csm.idx == NULL)
return NULL;
/* Note: if the index is empty, there is no block to traverse. */
vbn = bfd_getl32 (idd.vbn);
@ -429,7 +460,7 @@ vms_lib_read_index (bfd *abfd, int idx, unsigned int *nbrel)
/* Note: in case of error, we can free what was allocated on the
BFD's objalloc. */
bfd_release (abfd, csbuf);
bfd_release (abfd, csm.idx);
return NULL;
}
@ -437,14 +468,16 @@ vms_lib_read_index (bfd *abfd, int idx, unsigned int *nbrel)
{
/* There are more entries than the first estimate. Allocate on
the BFD's objalloc. */
struct carsym *csbuf;
csbuf = bfd_alloc (abfd, csm.nbr * sizeof (struct carsym));
if (csbuf == NULL)
return NULL;
memcpy (csbuf, csm.idx, csm.nbr * sizeof (struct carsym));
free (csm.idx);
*nbrel = csm.nbr;
csm.idx = csbuf;
}
return csbuf;
*nbrel = csm.nbr;
return csm.idx;
}
/* Standard function. */
@ -568,6 +601,8 @@ _bfd_vms_lib_archive_p (bfd *abfd, enum vms_lib_kind kind)
!= sizeof (buf_reclen))
goto err;
reclen = bfd_getl32 (buf_reclen);
if (reclen < sizeof (struct vms_dcxmap))
goto err;
buf = _bfd_malloc_and_read (abfd, reclen, reclen);
if (buf == NULL)
goto err;
@ -578,39 +613,51 @@ _bfd_vms_lib_archive_p (bfd *abfd, enum vms_lib_kind kind)
(abfd, tdata->nbr_dcxsbm * sizeof (struct dcxsbm_desc));
for (i = 0; i < tdata->nbr_dcxsbm; i++)
{
struct vms_dcxsbm *sbm = (struct vms_dcxsbm *) (buf + sbm_off);
struct vms_dcxsbm *sbm;
struct dcxsbm_desc *sbmdesc = &tdata->dcxsbm[i];
unsigned int sbm_len;
unsigned int sbm_sz;
unsigned int off;
unsigned char *data = (unsigned char *)sbm;
unsigned char *buf1;
unsigned int l, j;
if (sbm_off > reclen
|| reclen - sbm_off < sizeof (struct vms_dcxsbm))
goto err;
sbm = (struct vms_dcxsbm *) (buf + sbm_off);
sbm_sz = bfd_getl16 (sbm->size);
sbm_off += sbm_sz;
BFD_ASSERT (sbm_off <= reclen);
sbmdesc->min_char = sbm->min_char;
BFD_ASSERT (sbmdesc->min_char == 0);
sbmdesc->max_char = sbm->max_char;
sbm_len = sbmdesc->max_char - sbmdesc->min_char + 1;
l = (2 * sbm_len + 7) / 8;
BFD_ASSERT
(sbm_sz >= sizeof (struct vms_dcxsbm) + l + 3 * sbm_len
|| (tdata->nbr_dcxsbm == 1
&& sbm_sz >= sizeof (struct vms_dcxsbm) + l + sbm_len));
if (sbm_sz < sizeof (struct vms_dcxsbm) + l + sbm_len
|| (tdata->nbr_dcxsbm > 1
&& sbm_sz < sizeof (struct vms_dcxsbm) + l + 3 * sbm_len))
goto err;
sbmdesc->flags = (unsigned char *)bfd_alloc (abfd, l);
memcpy (sbmdesc->flags, data + bfd_getl16 (sbm->flags), l);
off = bfd_getl16 (sbm->flags);
if (off > reclen - sbm_off
|| reclen - sbm_off - off < l)
goto err;
memcpy (sbmdesc->flags, (bfd_byte *) sbm + off, l);
sbmdesc->nodes = (unsigned char *)bfd_alloc (abfd, 2 * sbm_len);
memcpy (sbmdesc->nodes, data + bfd_getl16 (sbm->nodes), 2 * sbm_len);
off = bfd_getl16 (sbm->nodes);
if (off > reclen - sbm_off
|| reclen - sbm_off - off < 2 * sbm_len)
goto err;
memcpy (sbmdesc->nodes, (bfd_byte *) sbm + off, 2 * sbm_len);
off = bfd_getl16 (sbm->next);
if (off != 0)
{
if (off > reclen - sbm_off
|| reclen - sbm_off - off < 2 * sbm_len)
goto err;
/* Read the 'next' array. */
sbmdesc->next = (unsigned short *)bfd_alloc
(abfd, sbm_len * sizeof (unsigned short));
buf1 = data + off;
sbmdesc->next = (unsigned short *) bfd_alloc (abfd, 2 * sbm_len);
buf1 = (bfd_byte *) sbm + off;
for (j = 0; j < sbm_len; j++)
sbmdesc->next[j] = bfd_getl16 (buf1 + j * 2);
}